hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d82c9cb5024f9c9cf0bd3159b46b6ac35fd02c1
| 1,144 |
py
|
Python
|
src/preprocessing/CannyFilter.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | 7 |
2015-11-27T09:53:32.000Z
|
2021-01-13T17:35:54.000Z
|
src/preprocessing/CannyFilter.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/preprocessing/CannyFilter.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
import numpy as np
import cv, cv2
try:
import vigra as vi
except Exception, e:
pass
class CannyFilter(object):
# def filter(self, rgb, depth, args = {}):
# img = cv.fromarray(depth, cv.CV_8UC1)
# mat1 = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1)
# mat2 = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1)
# cv.Convert(img, mat1)
# cv.Canny(mat1, mat2, 50, 200) # ???
# return rgb, np.asarray(mat2)
def filter(self, rgb, depth, balls, args = {}):
img = np.asarray(depth)
# img = cv.fromarray(depth, cv.CV_8UC1)
# mat = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1)
# cv.Convert(img, mat)
img = cv2.GaussianBlur(img,(3,3),0)
# img = cv2.Canny(img,10,100,apertureSize=3)
# cv.Canny(img,edges,0,300,aperture_size=3)
im = vi.Image(img, dtype=np.uint8)
edgels = vi.analysis.cannyEdgelList(im,3.0,3.0)
# img = cv2.Canny(img, 50, 200)
# rgb = cv2.cvtColor(img, cv.CV_GRAY2BGR)
print edgels
w, h = img.shape
rgb = np.empty((w, h, 3), dtype=np.uint8)
rgb[:, :, :] = img[:, :, np.newaxis]
return rgb, depth, balls
| 27.902439 | 62 | 0.592657 |
9de8548126c0bb0004bd92da17ad48b3fbc94ad4
| 74 |
py
|
Python
|
pythonProj/FZPython/pyquant/libs/__init__.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/pyquant/libs/__init__.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/pyquant/libs/__init__.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | 2 |
2019-04-10T10:05:00.000Z
|
2021-11-24T17:17:23.000Z
|
# coding: utf8
# from .loglib import logger
# from .socketioclient import
| 18.5 | 29 | 0.756757 |
ae65f90e332bf1f2c4e825ef0bd9ca41429699ca
| 1,727 |
py
|
Python
|
data.py
|
asfer/mnist_sandbox
|
69be6d623e948afdf92a3ca4c7ce77f93eafc0a8
|
[
"MIT"
] | null | null | null |
data.py
|
asfer/mnist_sandbox
|
69be6d623e948afdf92a3ca4c7ce77f93eafc0a8
|
[
"MIT"
] | null | null | null |
data.py
|
asfer/mnist_sandbox
|
69be6d623e948afdf92a3ca4c7ce77f93eafc0a8
|
[
"MIT"
] | null | null | null |
import array
import struct
import numpy as np
from PIL import Image
class MNIST:
""" MNIST dataset is composed of digit images of size 28x28 and its labels """
def __init__(self, data_dir):
self.train_data, self.train_labels = self.parse_images(data_dir + '/train-images-idx3-ubyte'), \
self.parse_labels(data_dir + '/train-labels-idx1-ubyte')
self.test_data, self.test_labels = self.parse_images(data_dir + '/t10k-images-idx3-ubyte'), \
self.parse_labels(data_dir + '/t10k-labels-idx1-ubyte')
@staticmethod
def parse_images(filename):
with open(filename, 'rb') as f:
magic, items, rows, cols = struct.unpack('>IIII', f.read(16))
assert magic == 2051 and rows == 28 and cols == 28
images = array.array('B', f.read())
assert items * rows * cols == len(images)
return np.array(images, dtype=np.int8).reshape((items, cols, rows), order='C')
@staticmethod
def parse_labels(filename):
with open(filename, 'rb') as f:
magic, items = struct.unpack('>II', f.read(8))
assert magic == 2049
labels = array.array('B', f.read())
assert len(labels) == items
return np.array(labels, dtype=np.int8).reshape((items, 1))
@staticmethod
def display(array):
image = Image.fromarray(array)
scaled_shape = tuple([8 * i for i in array.shape])
image = image.resize(scaled_shape)
image.show()
mnist = MNIST('./data')
if __name__ == '__main__':
example = mnist.train_data[41, :, :]
print(example.shape)
mnist.display(example)
| 34.54 | 105 | 0.588303 |
c9ba834056a8519b7c1744c6f2bf6724bc877345
| 61 |
py
|
Python
|
ex1/evento.py
|
renzon/oo-inpe
|
1b33939974f998badbeebd7bfe182070e77ef98f
|
[
"MIT"
] | null | null | null |
ex1/evento.py
|
renzon/oo-inpe
|
1b33939974f998badbeebd7bfe182070e77ef98f
|
[
"MIT"
] | null | null | null |
ex1/evento.py
|
renzon/oo-inpe
|
1b33939974f998badbeebd7bfe182070e77ef98f
|
[
"MIT"
] | null | null | null |
class Evento():
def __init__(self, s):
self.s = s
| 20.333333 | 26 | 0.540984 |
01165dc10d67af40d30bb37cc14258aed30e386a
| 1,766 |
py
|
Python
|
rc4.py
|
gamegrd/YY
|
39fca68d11956508fdbc094c9c319b7a7082722f
|
[
"Apache-2.0"
] | 9 |
2016-10-17T04:57:47.000Z
|
2021-03-08T15:50:03.000Z
|
rc4.py
|
SkyblueMr/YY
|
39fca68d11956508fdbc094c9c319b7a7082722f
|
[
"Apache-2.0"
] | 1 |
2017-10-13T09:27:34.000Z
|
2017-10-13T09:27:34.000Z
|
rc4.py
|
SkyblueMr/YY
|
39fca68d11956508fdbc094c9c319b7a7082722f
|
[
"Apache-2.0"
] | 6 |
2017-06-07T01:27:42.000Z
|
2020-02-10T04:08:51.000Z
|
import binascii
import hexdump
class RC4:
def __init__(self, key = ""):
if key:
self._rc4_init(key)
def _rc4_init(self, key):
(self.x,self.y) = (0,0)
key_len = len(key)
if key_len > 256 or key_len < 1:
raise IndexError, 'Invalid key length' + key_len
self.state_array = [i for i in xrange(0,256)] #self.stat_array = range(0,256)
for i in xrange(0,256):
self.x = ((ord(key[i%key_len]) & 0xff) + self.state_array[i] + self.x) & 0xff
self.state_array[i], self.state_array[self.x] = self.state_array[self.x], self.state_array[i]
self.x = 0
def update(self, input):
self.out = []
for i in xrange(0,len(input)):
self.x = (self.x + 1) & 0xff
self.y = (self.state_array[self.x] + self.y) & 0xff
self.state_array[self.x], self.state_array[self.y] = self.state_array[self.y], self.state_array[self.x]
self.out.append(chr((ord(input[i]) ^ self.state_array[(self.state_array[self.x] + self.state_array[self.y]) & 0xff])))
return "".join(self.out)
if __name__ == '__main__':
key = 'F091172542D066E5F848E4BEAD43ACE1'
data = 'F91C884344E7D3D1BA8DAEBF6E55A0ED4A60313D34555048F22441102DCB352602FE8F1478773FBF58FBE6D1E6F56DFB87E1F5B55901F24738444BE32191977F9CDFCEAD18DCDE65021F3303CBC3EC0545C939AA02016E04315FEAF90F07C7BF4841A0C886279EAAC9F4AD78576A74'
key = binascii.unhexlify(key)
data = binascii.unhexlify(data)
rc4 = RC4(key)
hexdump.hexdump(rc4.update(data))
rc4 = RC4(key)
hexdump.hexdump(rc4.update(data[:32]))
hexdump.hexdump(rc4.update(data[32:]))
rc4 = M2Crypto.RC4.RC4(key)
hexdump.hexdump(rc4.update(data[:32]))
hexdump.hexdump(rc4.update(data[32:]))
| 40.136364 | 235 | 0.656285 |
6d7bc5844acf310633bdacd59548fe19e1e887dd
| 28,982 |
py
|
Python
|
qdviewer/QDViewer.py
|
FlorianTimm/quer2qgis
|
4ff6542fa9d26e4e37170b3437408f00547b7efd
|
[
"MIT"
] | null | null | null |
qdviewer/QDViewer.py
|
FlorianTimm/quer2qgis
|
4ff6542fa9d26e4e37170b3437408f00547b7efd
|
[
"MIT"
] | null | null | null |
qdviewer/QDViewer.py
|
FlorianTimm/quer2qgis
|
4ff6542fa9d26e4e37170b3437408f00547b7efd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QDViewer
A QGIS plugin
Querschnittsdaten anzeigen
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-05-11
git sha : $Format:%H$
copyright : (C) 2018 by Lehmkuhl/Timm
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QVariant
from PyQt5.QtGui import QIcon
from qgis.gui import QgsFileWidget
from qgis.core import QgsVectorLayer, QgsField, QgsProject, QgsFeature, QgsPointXY, QgsGeometry, QgsPoint, \
QgsCoordinateReferenceSystem, QgsVectorFileWriter
from PyQt5.QtWidgets import QAction
import sqlite3
import math
import time
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .QDViewer_dialog import QDViewerDialog
import os.path
class QDViewer:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'QDViewer_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = QDViewerDialog()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&QDViewer')
self.toolbar = self.iface.addToolBar(u'QDViewer')
self.toolbar.setObjectName(u'QDViewer')
self.achsen = None
self.achsenData = None
self.querschnitte = None
self.querschnittData = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('QDViewer', message)
def add_action(self, icon_path, text, callback, enabled_flag=True, add_to_menu=True, add_to_toolbar=True,
status_tip=None, whats_this=None, parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/QDViewer/icon.png'
self.add_action(
icon_path,
text=self.tr(u'QDViewer'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&QDViewer'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
@staticmethod
def sqrt(t):
return math.sqrt(t)
# Vektor berechnen
@staticmethod
def dist(p_x1, p_x2, p_y1, p_y2):
p_dx = p_x2 - p_x1
p_dy = p_y2 - p_y1
return QDViewer.einheit(p_dx, p_dy)
# Einheitsvektor berechnen
@staticmethod
def einheit(p_dx, p_dy):
p_strecke = math.sqrt(p_dx * p_dx + p_dy * p_dy)
if p_strecke > 0:
p_dx = p_dx / p_strecke
p_dy = p_dy / p_strecke
return p_dx, p_dy, p_strecke
# Polygon schreiben
def write_polygon(self, p_x, p_y, row, quer):
feat = QgsFeature(self.querschnitte.fields())
feat.setAttributes([row[0], row[1], quer[0], quer[1], quer[2], quer[3], quer[8], quer[9], quer[10], quer[11],
quer[12], quer[13], quer[14], quer[15]])
if len(p_x) > 0:
points = []
for i in range(len(p_x)):
points.append(QgsPointXY(p_x[i], p_y[i]))
feat.setGeometry(QgsGeometry.fromPolygonXY([points]))
self.querschnittData.addFeatures([feat])
# text = "POLYGON (("
# for i in range(len(p_x)):
# text += str(p_x[i]) + " " + str(p_y[i]) + ", "
# if len(p_x) > 0:
# text = text[:-2]
# text += ")); " + str(row[0]) + "; " + str(row[1]) + "; " + str(quer[0]) + "; " + str(quer[1]) + "; " + \
# str(quer[2]) + "; " + str(quer[3]) + "; " + str(quer[4]) + "; " + str(quer[5]) + "; " + \
# str(quer[6]) + "; " + str(quer[7]) + "; " + str(quer[8])
# print(text)
# self.txt.write(text + "\n")
def write_linestring(self, p_punkte, row):
feat = QgsFeature(self.achsen.fields())
feat.setAttributes([row[0], row[1]])
if len(p_punkte) > 0:
points = []
for p_p in p_punkte:
points.append(QgsPointXY(p_p[1], p_p[2]))
feat.setGeometry(QgsGeometry.fromPolylineXY(points))
self.achsenData.addFeatures([feat])
# text = "LINESTRING ("
# for p_p in p_punkte:
# text += str(p_p[1]) + " " + str(p_p[2]) + ", "
# if len(p_punkte) > 0:
# text = text[:-2]
# text += "); " + str(row[0]) + "; " + str(row[1])
# print(text)
# self.txt2.write(text + "\n")
# Normalisiert Vektoren
@staticmethod
def norm(p_dx, p_dy):
p_s = math.sqrt(p_dx * p_dx + p_dy * p_dy)
if p_s > 0:
p_dx = p_dx / p_s
p_dy = p_dy / p_s
return p_dx, p_dy, p_s
def make_layers(self):
# WKT-Dateien anlegen
# self.txt = open(self.dlg.fw_speichern.filePath(), "w")
# self.txt.write("Geometrie; VNK; NNK; VST; BST; STREIFEN; STREIFENNR; ABSTAND_VST1; " +
# "ABSTAND_VST2; ABSTAND_BST1; ABSTAND_BST2; ART\n")
# self.txt2 = open(self.dlg.fw_speichern.filePath() + "_achsen.wkt", "w")
# self.txt2.write("Geometrie; VNK; NNK\n")
# from qgis.core import QgsMapLayerRegistry
self.querschnitte = QgsVectorLayer("polygon?crs=epsg:25832", "Querschnitte", "memory")
self.querschnitte.startEditing()
self.querschnittData = self.querschnitte.dataProvider()
self.querschnittData.addAttributes([QgsField("VNK", QVariant.String),
QgsField("NNK", QVariant.String),
QgsField("VST", QVariant.Int),
QgsField("BST", QVariant.Int),
QgsField("STREIFEN", QVariant.String),
QgsField("STREIFENNR", QVariant.Int),
QgsField("ART", QVariant.String),
QgsField("ARTOBER", QVariant.String),
QgsField("BREITE", QVariant.Int),
QgsField("BISBREITE", QVariant.Int),
QgsField("BLPART", QVariant.String),
QgsField("BLPART3", QVariant.String),
QgsField("UIPART", QVariant.String),
QgsField("UIPART3", QVariant.String),
QgsField("LAENGE", QVariant.Int),
QgsField("FLAECHE", QVariant.Double),
QgsField("BAUJAHRGEW", QVariant.Date),
QgsField("ABNAHMEGEW", QVariant.Date),
QgsField("DAUERGEW", QVariant.Int),
QgsField("ABLAUFGEW", QVariant.Date),
QgsField("ERFART", QVariant.String),
QgsField("QUELLE", QVariant.String),
QgsField("ADATUM", QVariant.Date),
QgsField("BEMERKUNG", QVariant.String),
QgsField("BEARBEITER", QVariant.String),
QgsField("STAND", QVariant.Date),
QgsField("PARENTID", QVariant.String),
QgsField("OBJEKTID", QVariant.String),
QgsField("FLAG", QVariant.String)])
self.querschnitte.commitChanges()
QgsProject.instance().addMapLayer(self.querschnitte)
self.achsen = QgsVectorLayer("linestring?crs=epsg:25832", "Achsen", "memory")
self.achsen.startEditing()
self.achsenData = self.achsen.dataProvider()
self.achsenData.addAttributes([QgsField("VNK", QVariant.String), QgsField("NNK", QVariant.String)]) #
self.achsen.commitChanges()
QgsProject.instance().addMapLayer(self.achsen)
def arbeite(self):
# QgsVectorFileWriter.writeAsVectorFormat(vlyr, newSQLiteFilePath, "utf-8", crs, "SQLite", False, None,
# ["SPATIALITE=YES"])
self.make_layers()
print("Connect DB and copy...")
dbabschn = QgsVectorLayer(self.dlg.fw_abschn.filePath(), "dbabschn", "ogr")
db1030 = QgsVectorLayer(self.dlg.fw_1030.filePath(), "db1030", "ogr")
db255 = QgsVectorLayer(self.dlg.fw_255.filePath(), "db255", "ogr")
QgsVectorFileWriter.writeAsVectorFormat(dbabschn, self.dlg.fw_speichern.filePath()+"\dbabschn.sqlite", "utf-8",
QgsCoordinateReferenceSystem(), "SQLite")
QgsVectorFileWriter.writeAsVectorFormat(db255, self.dlg.fw_speichern.filePath()+"\db000255.sqlite", "utf-8",
QgsCoordinateReferenceSystem(), "SQLite")
QgsVectorFileWriter.writeAsVectorFormat(db1030, self.dlg.fw_speichern.filePath()+"\db001030.sqlite", "utf-8",
QgsCoordinateReferenceSystem(), "SQLite")
mem_db = sqlite3.connect(':memory:') # create a memory database
for old_db_pfad in [self.dlg.fw_speichern.filePath() + "\dbabschn.sqlite",
self.dlg.fw_speichern.filePath() + "\db000255.sqlite",
self.dlg.fw_speichern.filePath() + "\db001030.sqlite"]:
old_db = sqlite3.connect(old_db_pfad)
c = old_db.cursor()
c.execute('''DROP TABLE geometry_columns''')
c.execute('''DROP TABLE spatial_ref_sys''')
query = "".join(line for line in old_db.iterdump())
mem_db.executescript(query)
daten = mem_db.cursor()
mem_db.create_function("sqrt", 1, QDViewer.sqrt)
print("Tabellen anlegen...")
daten.execute('''CREATE TABLE "tmp11" ("VNK" varchar(10), "NNK" varchar(10), "SORT" float, "XKOORD" float,
"YKOORD" float, "STAND" text, "FLAG" varchar(1), DX float default 0, DY float default 0,
laenge float default NULL, STATION float default NULL, ABSTAND float default NULL)''')
daten.execute('''CREATE TABLE "tmp12" ("VNK" varchar(10), "NNK" varchar(10), "SORT" float, "XKOORD" float,
"YKOORD" float, "STAND" text, "FLAG" varchar(1), DX float default 0, DY float default 0,
laenge float default NULL, STATION float default NULL, ABSTAND float default NULL)''')
daten.execute('''CREATE TABLE "tmp13" ("VNK" varchar(10), "NNK" varchar(10), "SORT" float, "XKOORD" float,
"YKOORD" float, "STAND" text, "FLAG" varchar(1), DX float default 0, DY float default 0,
laenge float default NULL, STATION float default NULL, ABSTAND float default NULL)''')
daten.execute('''CREATE TABLE "tmp14" ("VNK" varchar(10), "NNK" varchar(10), "SORT" float, "XKOORD" float,
"YKOORD" float, "STAND" text, "FLAG" varchar(1), DX float default 0, DY float default 0,
laenge float default NULL, STATION float default NULL, ABSTAND float default NULL,
DXP float default 0, DYP float default 0)''')
# DX/DY berechnen
print("DX/DY berechnen...")
daten.execute('''INSERT INTO tmp11
SELECT a.VNK, a.NNK, a.SORT, a.XKOORD, a.YKOORD, a.STAND, a.FLAG, (b.XKOORD - a.XKOORD) DX,
(b.YKOORD - a.YKOORD) DY, NULL, NULL, NULL FROM DB000255 a, DB000255 b
WHERE a.VNK = b.VNK AND a.NNK = b.NNK AND a.SORT = (b.SORT + 1);''')
# Laenge berechnen und Vektor DX/DY zu Einheitsvektor
print("Laenge berechnen und Vektor DX/DY zu Einheitsvektor...")
daten.execute('''UPDATE tmp11 SET laenge = sqrt(DX*DX+DY*DY), DX = DX / sqrt(DX*DX+DY*DY),
DY = DY / sqrt(DX*DX+DY*DY) WHERE DX != 0 OR DY != 0;''')
# DX/DY vom zweiten Punkt für ersten übernehmen
print("DX/DY vom zweiten Punkt für ersten übernehmen...")
daten.execute('''INSERT INTO tmp11
SELECT a.VNK, a.NNK, a.SORT, a.XKOORD, a.YKOORD, a.STAND, a.FLAG, b.DX DX, b.DY DY, 0, 0, 0
FROM DB000255 a, tmp11 b
WHERE a.VNK = b.VNK AND a.NNK = b.NNK AND a.SORT = 1 AND b.SORT = 2;''')
# ABSTAND berechnen (Summe der Laenge)
print("ABSTAND berechnen (Summe der Laenge)...")
daten.execute('''INSERT INTO tmp12
SELECT a.VNK, a.NNK, a.SORT, a.XKOORD, a.YKOORD, a.STAND, a.FLAG, a.DX, a.DY, a.laenge, NULL, SUM(b.laenge)
FROM tmp11 a, tmp11 b
WHERE a.VNK = b.VNK AND a.NNK = b.NNK AND a.SORT >= b.SORT GROUP BY a.VNK, a.NNK, a.SORT;''')
# Station berechnen
print("Station berechnen...")
daten.execute('''INSERT INTO tmp13
SELECT a.VNK, a.NNK, a.SORT, a.XKOORD, a.YKOORD, a.STAND, a.FLAG, a.DX, a.DY, a.laenge,
a.ABSTAND * b.faktor , a.ABSTAND
FROM tmp12 a,
(SELECT b.VNK, b.NNK, c.LEN / max(b.ABSTAND) faktor FROM tmp12 b, DBABSCHN c
WHERE b.VNK = c.VNK AND b.NNK = c.NNK GROUP BY b.VNK, b.NNK) b
WHERE a.VNK = b.VNK AND a.NNK = b.NNK
GROUP BY a.VNK, a.NNK, a.SORT;''')
# DXP/DYP berechnen (Verschiebe-Vektor an Stützpunkten)
print("DXP/DYP berechnen (Verschiebe-Vektor an Stützpunkten)...")
daten.execute('''INSERT INTO tmp14
SELECT a.*, a.DX + b.DX, a.DY + b.DY
FROM tmp13 a, tmp13 b
WHERE a.VNK = b.VNK AND a.NNK = b.NNK AND a.SORT = (b.SORT - 1);''')
# Letzten Punkt übernehmen
print("Letzten Punkt übernehmen...")
daten.execute('''INSERT INTO tmp14
SELECT a.*, a.DX, a.DY
FROM tmp13 a, (SELECT VNK, NNK, max(SORT) SORT FROM tmp13 GROUP BY VNK, NNK) b
WHERE a.VNK = b.VNK AND a.NNK = b.NNK AND a.SORT = b.SORT;''')
# DXP/DYP zu Einheitsvektor
print("DXP/DYP zu Einheitsvektor...")
daten.execute('''UPDATE tmp14 SET DXP = DXP / sqrt(DXP*DXP+DYP*DYP), DYP = DYP / sqrt(DXP*DXP+DYP*DYP)
WHERE DXP != 0 OR DYP != 0;''')
mem_db.commit()
print("Querschnitte summieren:")
print("Tabellen anlegen...")
daten.execute('''CREATE TABLE tmp1 ("VNK" varchar(10), "NNK" varchar(10), "VST" float,
"BST" float, "STREIFEN" varchar(1), "STREIFENNR" float, "ART" varchar(3),
"ARTOBER" varchar(3), "BREITE" float, "BISBREITE" float, "BLPART" varchar(2),
"BLPART3" varchar(5), "UIPART" varchar(2), "UIPART3" varchar(5), "LAENGE" float,
"FLAECHE" float, "BAUJAHRGEW" text, "ABNAHMEGEW" text, "DAUERGEW" float, "ABLAUFGEW" text,
"ERFART" varchar(2), "QUELLE" varchar(2), "ADATUM" text, "BEMERKUNG" varchar(254),
"BEARBEITER" varchar(64), "STAND" text, "PARENTID" varchar(32), "OBJEKTID" varchar(32),
"FLAG" varchar(1), ABSTAND_VST1 float default 0, ABSTAND_VST2 float default 0, ABSTAND_BST1 float default 0,
ABSTAND_BST2 float default 0);''')
daten.execute('''CREATE TABLE tmp2 ("VNK" varchar(10), "NNK" varchar(10), "VST" float,
"BST" float, "STREIFEN" varchar(1), "STREIFENNR" float, "ART" varchar(3),
"ARTOBER" varchar(3), "BREITE" float, "BISBREITE" float, "BLPART" varchar(2),
"BLPART3" varchar(5), "UIPART" varchar(2), "UIPART3" varchar(5), "LAENGE" float,
"FLAECHE" float, "BAUJAHRGEW" text, "ABNAHMEGEW" text, "DAUERGEW" float, "ABLAUFGEW" text,
"ERFART" varchar(2), "QUELLE" varchar(2), "ADATUM" text, "BEMERKUNG" varchar(254),
"BEARBEITER" varchar(64), "STAND" text, "PARENTID" varchar(32), "OBJEKTID" varchar(32),
"FLAG" varchar(1), ABSTAND_VST1 float default 0, ABSTAND_VST2 float default 0, ABSTAND_BST1 float default 0,
ABSTAND_BST2 float default 0);''')
print("Summe links und rechts...")
daten.execute('''INSERT INTO tmp1 SELECT a.VNK, a.NNK, a.VST, a.BST, a.STREIFEN, a.STREIFENNR, a.ART, a.ARTOBER,
a.BREITE, a.BISBREITE, a.BLPART, a.BLPART3, a.UIPART, a.UIPART3, a.LAENGE, a.FLAECHE, a.BAUJAHRGEW,
a.ABNAHMEGEW, a.DAUERGEW, a.ABLAUFGEW, a.ERFART, a.QUELLE, a.ADATUM, a.BEMERKUNG, a.BEARBEITER, a.STAND,
a.PARENTID, a.OBJEKTID, a.FLAG,
(SUM(b.BREITE)) ABSTAND_VST1,
NULL,
(SUM(b.BISBREITE)) ABSTAND_BST1,
NULL
FROM DB001030 a, DB001030 b WHERE a.VNK = b.VNK
AND b.NNK = a.NNK AND a.VST = b.VST AND a.STREIFEN = b.STREIFEN AND a.STREIFENNR > b.STREIFENNR
and a.STREIFEN != "M"
GROUP BY a.VNK, a.NNK, a.VST, a.STREIFEN, a.STREIFENNR;''')
print("Streifen 1 kopieren...")
daten.execute('''INSERT INTO tmp1
SELECT VNK, NNK, VST,BST, STREIFEN, STREIFENNR, ART, ARTOBER, BREITE, BISBREITE, BLPART, BLPART3, UIPART,
UIPART3, LAENGE, FLAECHE, BAUJAHRGEW, ABNAHMEGEW, DAUERGEW, ABLAUFGEW, ERFART, QUELLE, ADATUM, BEMERKUNG,
BEARBEITER, STAND, PARENTID, OBJEKTID, FLAG, 0 ABSTAND_VST1, NULL ABSTAND_VST2, 0 ABSTAND_BST1,
NULL ABSTAND_BST2 FROM DB001030 WHERE STREIFENNR = 1;''')
print("Mittelstreifen addieren...")
daten.execute('''INSERT INTO tmp2
SELECT a.VNK, a.NNK, a.VST,a.BST,a.STREIFEN,a.STREIFENNR,a.ART,a.ARTOBER,a.BREITE,a.BISBREITE,a.BLPART,
a.BLPART3, a.UIPART,a.UIPART3,a.LAENGE,a.FLAECHE,a.BAUJAHRGEW,a.ABNAHMEGEW,a.DAUERGEW,a.ABLAUFGEW,a.ERFART,
a.QUELLE, a.ADATUM, a.BEMERKUNG,a.BEARBEITER,a.STAND,a.PARENTID,a.OBJEKTID,a.FLAG,
(a.ABSTAND_VST1+round(IFNULL(m.BREITE,0) / 2)) ABSTAND_VST1,
NULL ABSTAND_VST2,
(a.ABSTAND_BST1+round(IFNULL(m.BISBREITE,0) / 2)) ABSTAND_BST1,
NULL ABSTAND_BST2
FROM tmp1 a left join (select * from DB001030 where STREIFEN = "M") m
on a.VNK = m.VNK AND a.NNK = m.NNK AND a.VST = m.VST;''')
print("Abstand der Außenkante berechnen...")
daten.execute('''UPDATE tmp2
SET ABSTAND_VST2 = ABSTAND_VST1 + BREITE,
ABSTAND_BST2 = ABSTAND_BST1 + BISBREITE;''')
print("Linke drehen...")
daten.execute('''UPDATE tmp2 SET
ABSTAND_VST1 = - ABSTAND_VST1,
ABSTAND_VST2 = - ABSTAND_VST2,
ABSTAND_BST1 = - ABSTAND_BST1,
ABSTAND_BST2 = - ABSTAND_BST2 WHERE STREIFEN = "L";''')
print("Mittelstreifen berechnen...")
daten.execute('''INSERT INTO tmp2
SELECT VNK, NNK, VST, BST, STREIFEN, STREIFENNR, ART, ARTOBER, BREITE, BISBREITE, BLPART, BLPART3, UIPART,
UIPART3, LAENGE, FLAECHE, BAUJAHRGEW, ABNAHMEGEW, DAUERGEW, ABLAUFGEW, ERFART, QUELLE, ADATUM, BEMERKUNG,
BEARBEITER, STAND, PARENTID, OBJEKTID, FLAG,
ROUND(- BREITE / 2) ABSTAND_VST1,
ROUND(BREITE / 2) ABSTAND_VST2,
ROUND(- BISBREITE / 2) ABSTAND_BST1,
ROUND(BISBREITE / 2) ABSTAND_BST2 FROM DB001030 WHERE STREIFEN = "M";''')
print("Nicht benötigte Tabellen löschen...")
daten.execute('''DROP TABLE tmp11''')
daten.execute('''DROP TABLE tmp12''')
daten.execute('''DROP TABLE tmp13''')
daten.execute('''DROP TABLE tmp1''')
mem_db.commit()
print("Arbeiten...")
# Abschnitte selektieren und durchgehen
daten.execute('SELECT VNK, NNK, LEN FROM DBABSCHN')
abschn = daten.fetchall()
for row in abschn:
# Koordinaten der Achse abfragen
sql = 'SELECT STATION, XKOORD, YKOORD, DXP, DYP, DX, DY FROM tmp14 WHERE VNK = "' + row[0] + '" AND ' \
'NNK = "' + row[1] + '" AND STATION IS NOT NULL AND XKOORD IS NOT NULL AND YKOORD IS NOT NULL AND ' \
'DX IS NOT NULL AND DY IS NOT NULL ORDER BY SORT'
daten.execute(sql)
punkte = daten.fetchall()
# Achse als WKT ablegen
self.write_linestring(punkte, row)
# Querschnitte laden
sql = 'SELECT VST, BST, STREIFEN, STREIFENNR, ABSTAND_VST1, ABSTAND_VST2, ABSTAND_BST1, ABSTAND_BST2,' + \
' ART,ARTOBER, BREITE, BISBREITE, BLPART, BLPART3, UIPART, UIPART3, LAENGE, FLAECHE, BAUJAHRGEW,' + \
'ABNAHMEGEW, DAUERGEW, ABLAUFGEW, ERFART, QUELLE, ADATUM, BEMERKUNG, BEARBEITER, STAND, PARENTID, ' \
'OBJEKTID, FLAG FROM tmp2 WHERE VNK = "' + row[0] + '" AND NNK = "' + row[1] + '" AND ' + \
'ABSTAND_VST1 IS NOT NULL AND ABSTAND_VST2 IS NOT NULL AND ABSTAND_BST1 IS NOT NULL AND ' \
'ABSTAND_BST2 IS NOT NULL ORDER BY VST, STREIFEN, STREIFENNR'
daten.execute(sql)
for quer in daten.fetchall():
# print(quer)
x = []
y = []
c = 0
pa = None
for p in punkte:
if p[0] >= quer[0] and c == 0 and pa is not None:
# Berechnung Anfangspunkt
dx = p[1] - pa[1]
dy = p[2] - pa[2]
diff = p[0] - pa[0]
f = 0
if diff > 0:
f = (quer[0] - pa[0]) / diff
# print(f)
dxn, dyn, s = QDViewer.norm(dx, dy)
# print("P1")
# print(quer[4])
if quer[4] is not None:
x.append(pa[1] + dx * f + dyn * quer[4] / 100)
y.append(pa[2] + dy * f - dxn * quer[4] / 100)
x.append(pa[1] + dx * f + dyn * quer[5] / 100)
y.append(pa[2] + dy * f - dxn * quer[5] / 100)
c = 1
if c == 1 and p[0] <= quer[1]:
# print("P2")
# Prozentualer Abstand
f = (p[0]-quer[0])/(quer[1]-quer[0])
# print(f)
# Abstand interpolieren
a = quer[4]+f*(quer[6]-quer[4])
# print(a)
# Abstand2 interpolieren
b = quer[5]+f*(quer[7]-quer[5])
# print(b)
try:
x.insert(0, p[1] - p[4] * a / 100)
y.insert(0, p[2] + p[3] * a / 100)
x.append(p[1] - p[4] * b / 100)
y.append(p[2] + p[3] * b / 100)
except TypeError:
break
if c == 1 and p[0] > quer[1]:
# print("P3")
# Berechnung Endpunkt
dx = p[1] - pa[1]
dy = p[2] - pa[2]
if (p[0] - pa[0]) != 0:
f = (quer[1] - pa[0]) / (p[0] - pa[0])
else:
f = 1
# print(p[0])
# print(f)
dxn, dyn, s = QDViewer.norm(dx, dy)
if quer[6] is not None:
x.insert(0, pa[1] + dx * f + dyn * quer[6] / 100)
y.insert(0, pa[2] + dy * f - dxn * quer[6] / 100)
if quer[7] is not None:
x.append(pa[1] + dx * f + dyn * quer[7] / 100)
y.append(pa[2] + dy * f - dxn * quer[7] / 100)
break
pa = p
self.write_polygon(x, y, row, quer)
print("Fertig")
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
self.dlg.fw_speichern.setStorageMode(QgsFileWidget.GetDirectory)
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
self.arbeite()
pass
| 46.520064 | 121 | 0.530364 |
096fcd42c6d9f08d5d089e2c6170449d67a02488
| 4,290 |
py
|
Python
|
deprecated/examples/simnet_bow/py_reader_generator.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/examples/simnet_bow/py_reader_generator.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/examples/simnet_bow/py_reader_generator.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There are 13 integer features and 26 categorical features
import random
def combination(x, y):
res = [[[xi, yi] for yi in y] for xi in x]
return res[0]
def get_one_data(file_list, sample_rate):
for file in file_list:
contents = []
with open(file, "r") as fin:
for q in fin.readlines():
"""query_ids, pos_title_ids, neg_title_ids, label"""
one_data = q.split(";")[:-1]
if len(one_data) < 4:
print("data format error!, please check!", q)
continue
label = int(one_data[0])
pos_title_num, neg_title_num = int(one_data[1].split(" ")[0]), int(one_data[1].split(" ")[1])
query_ids = [int(x) for x in one_data[2].split(" ")]
if pos_title_num + neg_title_num != len(one_data) - 3:
print("data format error, pos_title_num={}, neg_title_num={}, one_data={}"
.format(pos_title_num, neg_title_num, len(one_data)))
continue
for x in range(pos_title_num):
pos_title_ids = [ int(i) for i in one_data[3+x].split(" ")]
for y in range(neg_title_num):
if random.random() > sample_rate:
continue
neg_title_ids = [int(i) for i in one_data[3+pos_title_num+y].split(" ")]
yield [query_ids, pos_title_ids, neg_title_ids, [label]]
fin.close()
def get_batch_reader(file_list, batch_size=128, sample_rate=0.02, trainer_id=1):
def batch_reader():
res = []
idx = 0
for i in get_one_data(file_list, sample_rate):
res.append(i)
idx += 1
if len(res) >= batch_size:
yield res
res = []
return batch_reader
def get_infer_data(file_list, sample_rate):
for file in file_list:
contents = []
with open(file, "r") as fin:
for q in fin.readlines():
"""query_ids, pos_title_ids, neg_title_ids, label"""
one_data = q.split(";")[:-1]
if len(one_data) < 4:
print("data format error!, please check!",q)
continue
label = int(one_data[0])
pos_title_num, neg_title_num = int(one_data[1].split(" ")[0]), int(one_data[1].split(" ")[1])
query_ids = [int(x) for x in one_data[2].split(" ")]
if pos_title_num + neg_title_num != len(one_data) - 3:
print("data format error, pos_title_num={}, neg_title_num={}, one_data={}"
.format(pos_title_num,neg_title_num,len(one_data)))
continue
for x in range(pos_title_num):
pos_title_ids = [int(i) for i in one_data[3 + x].split(" ")]
for y in range(neg_title_num):
if random.random() > sample_rate:
continue
neg_title_ids = [int(i) for i in one_data[3 + pos_title_num + y].split(" ")]
yield [query_ids, pos_title_ids, neg_title_ids]
fin.close()
def get_infer_batch_reader(file_list, batch_size=128, sample_rate=0.02, trainer_id=1):
def batch_reader():
res = []
idx = 0
for i in get_infer_data(file_list, sample_rate):
res.append(i)
idx += 1
if len(res) >= batch_size:
yield res
res = []
return batch_reader
| 39 | 109 | 0.545221 |
111d89dc3fa97f05216826d77249b04673a33d5f
| 381 |
py
|
Python
|
examples/seq2seq/att_all_need/__init__.py
|
yym6472/transformers
|
abd01205561e5caec167c1fbb20bccea24d7ba46
|
[
"Apache-2.0"
] | 1 |
2021-12-30T05:41:37.000Z
|
2021-12-30T05:41:37.000Z
|
examples/seq2seq/att_all_need/__init__.py
|
yym6472/transformers
|
abd01205561e5caec167c1fbb20bccea24d7ba46
|
[
"Apache-2.0"
] | null | null | null |
examples/seq2seq/att_all_need/__init__.py
|
yym6472/transformers
|
abd01205561e5caec167c1fbb20bccea24d7ba46
|
[
"Apache-2.0"
] | null | null | null |
import att_all_need.Constants
import att_all_need.Modules
import att_all_need.Layers
import att_all_need.SubLayers
import att_all_need.Models
import att_all_need.Translator
import att_all_need.Optim
__all__ = [
att_all_need.Constants, att_all_need.Modules, att_all_need.Layers,
att_all_need.SubLayers, att_all_need.Models, att_all_need.Optim,
att_all_need.Translator]
| 29.307692 | 70 | 0.84252 |
6e79cb55306cf44f641a4fcd8e7ab51391678716
| 517 |
py
|
Python
|
Datastructures/linearSearch.py
|
BALAVIGNESHDOSTRIX/pyexpert
|
300498f66a3a4f6b3060d51b3d6643d8e63cf746
|
[
"CC0-1.0"
] | null | null | null |
Datastructures/linearSearch.py
|
BALAVIGNESHDOSTRIX/pyexpert
|
300498f66a3a4f6b3060d51b3d6643d8e63cf746
|
[
"CC0-1.0"
] | null | null | null |
Datastructures/linearSearch.py
|
BALAVIGNESHDOSTRIX/pyexpert
|
300498f66a3a4f6b3060d51b3d6643d8e63cf746
|
[
"CC0-1.0"
] | null | null | null |
#Linear Search
class LinearSerach:
def __init__(self):
self.elements = [10,52,14,8,1,400,900,200,2,0]
def SearchEm(self,elem):
y = 0
if elem in self.elements:
print("{x} is in the position of {y}".format(x = elem,y = self.elements.index(elem)))
else:
print("The element {x} not presented in the list".format(x = elem))
linear = LinearSerach()
task_elem = int(input("Enter the element:"))
linear.SearchEm(task_elem)
| 21.541667 | 97 | 0.576402 |
2853cd0347b667a039186eec376666b20fb855dc
| 467 |
py
|
Python
|
Python/M01_ProgrammingBasics/L06_NestedLoops/Exercises/Solutions/P04_TrainTheTrainers.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L06_NestedLoops/Exercises/Solutions/P04_TrainTheTrainers.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L06_NestedLoops/Exercises/Solutions/P04_TrainTheTrainers.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
people = int(input())
name_doc = input()
grade_sum = 0
average_grade = 0
total_grade = 0
numbers = 0
while name_doc != "Finish":
for x in range(people):
grade = float(input())
grade_sum += grade
average_grade = grade_sum / people
print(f"{name_doc} - {average_grade:.2f}.")
name_doc = input()
grade_sum = 0
total_grade += average_grade
numbers += 1
print(f"Student's final assessment is {total_grade / numbers:.2f}.")
| 24.578947 | 68 | 0.640257 |
95be5f081349f73ff5bdd3615dda1dea6e95803e
| 218 |
py
|
Python
|
Algorithms/Implementation/designer_pdf_viewer.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/designer_pdf_viewer.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/designer_pdf_viewer.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import string
h = list(map(int, input().strip().split(' ')))
word = input().strip()
L = list(string.ascii_lowercase)
wi = [h[L.index(w)] for w in word]
print(len(word) * max(wi))
| 15.571429 | 46 | 0.642202 |
c2864f7e6f1b9b8bc01c8cc1bc2f59a6a1c5eb5a
| 2,659 |
py
|
Python
|
resources/mechanics_lib/FixedLegsBuilder.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 7 |
2016-01-20T02:33:00.000Z
|
2021-02-04T04:06:57.000Z
|
resources/mechanics_lib/FixedLegsBuilder.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | null | null | null |
resources/mechanics_lib/FixedLegsBuilder.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 3 |
2016-10-05T07:20:30.000Z
|
2017-11-20T10:36:50.000Z
|
from api.component import Component
###################
# Component builder
###################
c = Component()
### Subcomponents used in this assembly
c.addSubcomponent("bar", "RectBeam")
c.addSubcomponent("leg1", "PointedLeg")
c.addSubcomponent("leg2", "PointedLeg")
c.addSubcomponent("split", "SplitEdge")
### New free parameters specific to this assembly
c.newParameter("depth")
c.newParameter("height")
c.newParameter("length")
c.newParameter("leg.beamwidth")
### Subcomponent parameter inheritance
# Constrain one length of the RectBeam object based on new FixedLegs parameter
c.addConstraint(("bar", "length"), "length")
# Constrain one parameter of the RectBeam object based on PointedLeg parameter
c.addConstraint(("bar", "depth"), "leg.beamwidth")
# Constrain other parameter of the RectBeam object based on new FixedLegs parameter
c.addConstraint(("bar", "width"), "depth")
# Constrain one parameter of the PointedLeg object based on new FixedLegs parameter
c.addConstraint(("leg1", "length"), "height")
# Constrain one parameter of the RectBeam object based on PointedLeg parameter
c.addConstraint(("leg1", "beamwidth"), "leg.beamwidth")
# Constrain one parameter of the PointedLeg object based on new FixedLegs parameter
c.addConstraint(("leg2", "length"), "height")
# Constrain one parameter of the RectBeam object based on PointedLeg parameter
c.addConstraint(("leg2", "beamwidth"), "leg.beamwidth")
# Break apart the edge where the two PointedLegs will connect
c.addConstraint(("split", "botlength"), ("length", "leg.beamwidth"), "(x[0],)")
c.addConstraint(("split", "toplength"), ("length", "leg.beamwidth"), "(x[1], x[0] - 2*x[1], x[1])")
### Subcomponents connections
# SplitEdge component to define multiple attachment points
c.addConnection(("bar", "tabedge"),
("split", "botedge"),
"Flat")
# Attach one leg
c.addConnection(("split", "topedge.2"),
("leg1", "front"),
"Flat")
# Attach other leg
c.addConnection(("split", "topedge.0"),
("leg2", "right"),
"Flat")
# Add tabs for rigid attachment
c.addConnection(("leg1", "right"),
("bar", "botedge.1"),
"Tab",
name="tab1", angle=90, depth=6)
c.addConnection(("leg2", "front"),
("bar", "topedge.1"),
"Tab",
name="tab2", angle=90, depth=6)
### Exoposed interfaces
# Locations on FixedLegs component that higher order components can use for assembly
c.inheritInterface("topedge", ("bar", "topedge"))
c.inheritInterface("botedge", ("bar", "botedge"))
c.toYaml("FixedLegs.yaml")
| 35.453333 | 99 | 0.657766 |
b0b8286a112f6cc255d6f141cac0e14ff0ca06ff
| 37 |
py
|
Python
|
app/others/__init__.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | 2 |
2019-06-24T08:34:39.000Z
|
2019-06-27T12:23:47.000Z
|
app/user/__init__.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
app/user/__init__.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
from . import views
| 12.333333 | 19 | 0.72973 |
b0362586ab24eacfd3e6ed4b0d747c1e52f21716
| 1,532 |
py
|
Python
|
src/entity/GridSampler.py
|
dreaming-coder/RadarSet
|
c912298d0d6058c6647986524e5d95a205b51c1d
|
[
"MIT"
] | null | null | null |
src/entity/GridSampler.py
|
dreaming-coder/RadarSet
|
c912298d0d6058c6647986524e5d95a205b51c1d
|
[
"MIT"
] | null | null | null |
src/entity/GridSampler.py
|
dreaming-coder/RadarSet
|
c912298d0d6058c6647986524e5d95a205b51c1d
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import ndarray
from sklearn.neighbors import KNeighborsRegressor
__all__ = ["GridSampler"]
class GridSampler(object):
def __init__(self, data: ndarray, longitude: ndarray, latitude: ndarray, k: int = 5):
"""
:param data: dbz
:param longitude: 经度
:param latitude: 维度
:param k: 取几个临近点
※※※※※※※※※※※※※※※※※※※※
※※ ※※
※※ 所有的参数都是一维的数据 ※※
※※ ※※
※※※※※※※※※※※※※※※※※※※※
"""
assert data.ndim == 1 and longitude.ndim == 1 and latitude.ndim == 1 and k > 1
longitude, self.lon_avg, self.lon_std = _normalize(longitude)
latitude, self.lat_avg, self.lat_std = _normalize(latitude)
inputs = np.dstack([longitude, latitude])[0]
self.knn = KNeighborsRegressor(n_neighbors=k, weights="distance")
data[np.isnan(data)] = 0 # 雷达缺失值用 0 填充,否则 KNN 会报错
self.knn.fit(inputs, data)
def map_data(self, lon: ndarray, lat: ndarray) -> ndarray:
shape = lon.shape
lon = lon.ravel()
lat = lat.ravel()
lon = (lon - self.lon_avg) / self.lon_std
lat = (lat - self.lat_avg) / self.lat_std
inputs = np.dstack([lon, lat])[0]
outputs = self.knn.predict(inputs).reshape(shape)
return outputs
def _normalize(data: ndarray):
avg = data.mean()
std = data.std()
data = (data - avg) / std
return data, avg, std
| 30.64 | 89 | 0.535901 |
05f54f0884c8616422ea49c91577829790703cff
| 3,717 |
py
|
Python
|
tools/generate-defines.py
|
lurch/Pinout2
|
bd3b39de2c053607b9415dd9eb0297a401ba7162
|
[
"CC-BY-4.0"
] | null | null | null |
tools/generate-defines.py
|
lurch/Pinout2
|
bd3b39de2c053607b9415dd9eb0297a401ba7162
|
[
"CC-BY-4.0"
] | null | null | null |
tools/generate-defines.py
|
lurch/Pinout2
|
bd3b39de2c053607b9415dd9eb0297a401ba7162
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
import json
import markdown
import unicodedata
import re
import os
import time
import sys
lang = 'en'
pins = None
key_template = {
'python': '{board_name}_{name}',
'ruby': '{board_name}_{name}',
'spin': ' {board_name}_{name}',
'c': '#define {board_name}_{name}'
}
value_template = {
'python': ' = {value}',
'ruby': ' = {value}',
'spin': ' = {value}',
'c': ' {value}'
}
comment_prefix = {
'python': '#',
'ruby': '',
'spin': '\'',
'c': '//'
}
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = value.replace('+','PLUS')
value = value.replace('-','MINUS')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '_', value)
def bcm_to_physical(pin):
pin = pin[3:]
for idx in pins:
compare_pin = pins[idx]
if 'scheme' in compare_pin:
if 'bcm' in compare_pin['scheme']:
if compare_pin['scheme']['bcm'] == int(pin):
print("Mapping BCM{} to {}".format(pin, str(idx)))
return str(idx)
def physical_to_bcm(pin):
pin = pins[pin]
if 'scheme' in pin:
if 'bcm' in pin['scheme']:
return str(pin['scheme']['bcm'])
return None
def physical_to_wiringpi(pin):
pin = pins[pin]
if 'scheme' in pin:
if 'wiringpi' in pin['scheme']:
return str(pin['scheme']['wiringpi'])
return None
def physical_to(pin, scheme='bcm'):
if scheme in ['bcm','wiringpi']:
pin = pins[pin]
if 'scheme' in pin:
if scheme in pin['scheme']:
return str(pin['scheme'][scheme])
elif scheme == 'physical':
return pin
return None
db = json.load(open('../src/{}/pi-pinout.json'.format(lang)))
pins = db['pins']
define = {}
keys = []
def add_define(key, value):
global keys, define
keys.append(key)
define[key] = value
if len(sys.argv) >= 3:
overlay_file = sys.argv[1]
pin_scheme = sys.argv[2]
output_lang = sys.argv[3]
overlay = json.load(open('../src/{}/overlay/{}.json'.format(lang,overlay_file)))
if 'i2c' in overlay:
for addr in overlay['i2c']:
info = overlay['i2c'][addr]
add_define('ADDR_' + slugify(info['name']).upper(), addr)
if 'pin' in overlay:
for pin in overlay['pin']:
info = overlay['pin'][pin]
if str(pin).startswith('bcm'):
pin = bcm_to_physical(pin)
if 'name' in info:
name = slugify(info['name']).upper()
else:
name = slugify(pins[pin]['name']).upper()
add_define(name, physical_to(pin, pin_scheme))
board_name = slugify(overlay['name']).upper()
print(comment_prefix[output_lang] + ' Pin definitions for ' + overlay['name'])
print(comment_prefix[output_lang] + ' Using the {} pin numbering scheme'.format(pin_scheme))
row_length = 0
for name in define:
key = key_template[output_lang].format(
board_name = board_name,
name = name
)
row_length = max(len(key),row_length)
for name in keys:
key = key_template[output_lang].format(
board_name = board_name,
name = name
)
value = value_template[output_lang].format(value = define[name])
value = value.rjust(row_length - len(key) + len(value),' ')
print(key+value)
| 25.458904 | 96 | 0.552596 |
afc4a3f0be4d9ea808205f06cf570014790fdc8d
| 203 |
py
|
Python
|
Algorithms/Sorting/CountingSort2.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/CountingSort2.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/CountingSort2.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
from collections import Counter
n = int(raw_input())
counter = Counter(raw_input().split())
ar = []
for i in xrange(100):
ar += [str(i)] * counter.get(str(i), 0)
print ' '.join(ar)
| 15.615385 | 43 | 0.630542 |
bb7101ada8ce544953bb0426ef09e562f84cae47
| 529 |
py
|
Python
|
pyntcloud/neighbors/r_neighbors.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 1,142 |
2016-10-10T08:55:30.000Z
|
2022-03-30T04:46:16.000Z
|
pyntcloud/neighbors/r_neighbors.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 195 |
2016-10-10T08:30:37.000Z
|
2022-02-17T12:51:17.000Z
|
pyntcloud/neighbors/r_neighbors.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 215 |
2017-02-28T00:50:29.000Z
|
2022-03-22T17:01:31.000Z
|
import numpy as np
def r_neighbors(kdtree, r):
""" Get indices of all neartest neighbors with a distance < r for each point
Parameters
----------
kdtree: pyntcloud.structrues.KDTree
The KDTree built on top of the points in point cloud
r: float
Maximum distance to consider a neighbor
Returns
-------
r_neighbors: (N, X) ndarray of lists
Where N = kdtree.data.shape[0]
len(X) varies for each point
"""
return np.array(kdtree.query_ball_tree(kdtree, r))
| 24.045455 | 80 | 0.635161 |
3c873fcdcd88197cf9e2a49adb8d73709128b60d
| 2,571 |
py
|
Python
|
src/bo4e/bo/standorteigenschaften.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/bo/standorteigenschaften.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/bo/standorteigenschaften.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Contains Standorteigenschaften class
and corresponding marshmallow schema for de-/serialization
"""
from typing import List, Optional
import attr
from marshmallow import fields
from bo4e.bo.geschaeftsobjekt import Geschaeftsobjekt, GeschaeftsobjektSchema
from bo4e.com.standorteigenschaftenallgemein import StandorteigenschaftenAllgemein, StandorteigenschaftenAllgemeinSchema
from bo4e.com.standorteigenschaftengas import StandorteigenschaftenGas, StandorteigenschaftenGasSchema
from bo4e.com.standorteigenschaftenstrom import StandorteigenschaftenStrom, StandorteigenschaftenStromSchema
from bo4e.enum.botyp import BoTyp
from bo4e.validators import check_list_length_at_least_one
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class Standorteigenschaften(Geschaeftsobjekt):
"""
Modelliert die regionalen und spartenspezifischen Eigenschaften einer gegebenen Adresse.
.. HINT::
`Standorteigenschaften JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/bo/StandorteigenschaftenSchema.json>`_
"""
# required attributes
bo_typ: BoTyp = attr.ib(default=BoTyp.STANDORTEIGENSCHAFTEN)
#: Allgemeine Eigenschaften
eigenschaften_allgemein: StandorteigenschaftenAllgemein = attr.ib(
validator=attr.validators.instance_of(StandorteigenschaftenAllgemein)
)
#: Eigenschaften zur Sparte Strom
eigenschaften_strom: List[StandorteigenschaftenStrom] = attr.ib(
validator=attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(StandorteigenschaftenStrom),
iterable_validator=check_list_length_at_least_one,
)
)
# optional attributes
#: Eigenschaften zur Sparte Gas
eigenschaften_gas: Optional[StandorteigenschaftenGas] = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(StandorteigenschaftenGas)), default=None
)
class StandorteigenschaftenSchema(GeschaeftsobjektSchema):
"""
Schema for de-/serialization of Standorteigenschaften
"""
class_name = Standorteigenschaften
# required attributes
eigenschaften_allgemein = fields.Nested(StandorteigenschaftenAllgemeinSchema, data_key="eigenschaftenAllgemein")
eigenschaften_strom = fields.List(fields.Nested(StandorteigenschaftenStromSchema), data_key="eigenschaftenStrom")
# optional attributes
eigenschaften_gas = fields.Nested(StandorteigenschaftenGasSchema, load_default=None, data_key="eigenschaftenGas")
| 41.467742 | 196 | 0.801634 |
b1f8a1cb4b21c3994787c2e39a860fa22f68d47b
| 3,738 |
py
|
Python
|
pocketthrone/managers/filemanager.py
|
herrschr/pocket-throne
|
819ebae250f45b0a4b15a8320e2836c0b5113528
|
[
"BSD-2-Clause"
] | 4 |
2016-06-05T16:48:04.000Z
|
2020-03-23T20:06:06.000Z
|
pocketthrone/managers/filemanager.py
|
herrschr/pocket-throne
|
819ebae250f45b0a4b15a8320e2836c0b5113528
|
[
"BSD-2-Clause"
] | null | null | null |
pocketthrone/managers/filemanager.py
|
herrschr/pocket-throne
|
819ebae250f45b0a4b15a8320e2836c0b5113528
|
[
"BSD-2-Clause"
] | null | null | null |
__all__ = ['FileManager']
import os
from kivy.core.image import Image as CoreImage
from kivy.cache import Cache
# static class for accessing game paths and os methods
class FileManager:
_tag = "[FileManager] "
_initialized = False
IMAGE_FORMAT = ".png"
# to be set diectly after creation
root = {"game": None, "img": None, "mod": None}
# texture cache
TEXTURE_CACHE = "textures"
_texture_none = None
@classmethod
def set_game_root(self, path):
'''sets the game folder paths considering root path'''
self.root["game"] = path
self.root["img"] = path + "/img/"
self.root["mod"] = path + "/mods/"
self.initialize()
@classmethod
def initialize(self):
'''flag FileManager as initialized'''
# make texture cache
Cache.register(self.TEXTURE_CACHE)
# flag FileManager as initialized
self._initialized = True
@classmethod
def check_if_initialized(self):
'''aborts method if manager is uninitialized'''
if not self._initialized:
return False
@classmethod
def game_root(self):
'''returns root doler path'''
self.check_if_initialized()
return self.root["game"]
@classmethod
def img_root(self):
'''returns the image resource folder path'''
self.check_if_initialized()
return self.root["img"]
@classmethod
def img_format(self):
'''returns accepted image file extension'''
return ".png"
@classmethod
def mod_root(self):
'''returns the mod folder path'''
self.check_if_initialized()
return self.root["mod"]
@classmethod
def exists(self, file_name, is_image=False, auto_ext=True):
'''returns whether a file exists'''
if is_image:
file_path = self.img_root() + file_name
if auto_ext:
file_path += ".png"
return os.path.isfile(file_path)
return os.path.isfile(file_name)
@classmethod
def read_file(self, file_path):
'''returns content of file under path file_path'''
print(self._tag + "READ " + file_path)
content = ""
file = open(file_path, "r")
content = file.read()
file.close()
return content
@classmethod
def get_texture(self, name, type="", use_cache=True):
'''returns a kivy Texture loaded from'''
# argument rel_path is relative this games image directory
texture = None
# when manager is uninitialized
if not self._initialized:
print(self._tag + "ERROR manager is not initialized")
return None
# when parameter rel_path is None -> return default texture
elif name == None or name == "none":
print(self._tag + "ERROR while loading texture; is none.")
return
# when manager is initialized AND rel_path =/= None OR "none"
elif use_cache == True:
return self.get_texture_from_cache(name)
elif use_cache == False:
return self.get_texture_from_file(name)
@classmethod
def get_texture_from_file(self, name):
'''returns texture class from file name'''
full_name = str(name) + ".png"
abs_path = self.img_root() + full_name
texture = None
try:
if abs_path:
print(self._tag + "trying to load " + str(abs_path) + " from file")
image = CoreImage(abs_path)
if image:
texture = image.texture
except:
print(self._tag + "ABORT; can't load texture")
return
return texture
@classmethod
def add_texture_to_cache(self, name):
'''ads a texture to texture cache'''
texture = self.get_texture_from_file(name)
Cache.append(self.TEXTURE_CACHE, name, texture)
@classmethod
def get_texture_from_cache(self, name):
'''returns a texture under name from cache'''
#try to load texture from cache
texture = Cache.get(self.TEXTURE_CACHE, name)
if texture != None:
return texture
if texture == None:
# load from file & add to cache
texture = self.get_texture_from_file(name)
self.add_texture_to_cache(name)
# return texture
return texture
| 26.323944 | 71 | 0.706528 |
3ca3476d9be341c7d55701d15be9a0ebb8d9184e
| 2,156 |
py
|
Python
|
tests/onegov/election_day/views/test_views_upload_parties.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/views/test_views_upload_parties.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/views/test_views_upload_parties.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import date
from tests.onegov.election_day.common import create_election_compound
from tests.onegov.election_day.common import login
from tests.onegov.election_day.common import upload_election_compound
from tests.onegov.election_day.common import upload_party_results
from tests.onegov.election_day.common import upload_proporz_election
from unittest.mock import patch
from webtest import TestApp as Client
from webtest.forms import Upload
def test_upload_parties_invalidate_cache(election_day_app_gr):
anonymous = Client(election_day_app_gr)
anonymous.get('/locale/de_CH').follow()
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_proporz_election(client)
upload_election_compound(client)
urls = (
'/election/proporz-election/party-strengths',
'/elections/elections/party-strengths'
)
for url in urls:
assert '49117' not in client.get(url)
assert '49117' not in anonymous.get(url)
upload_party_results(client)
upload_party_results(client, slug='elections/elections')
for url in urls:
assert '49117' in client.get(url)
assert '49117' in anonymous.get(url)
def test_upload_parties_submit(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
create_election_compound(client)
for slug in ('election/election', 'elections/elections'):
with patch(
'onegov.election_day.views.upload.parties.import_party_results'
) as import_:
import_.return_value = []
csv = 'csv'.encode('utf-8')
upload = client.get(f'/{slug}/upload-party-results')
upload.form['parties'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert import_.called
| 32.666667 | 75 | 0.699443 |
a7362c94db122a53c8a49772b5f9f48ba739cbeb
| 700 |
py
|
Python
|
Problem Solving/Mathematics/Probability/Assignment Problem/assignment_problem.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Problem Solving/Mathematics/Probability/Assignment Problem/assignment_problem.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Problem Solving/Mathematics/Probability/Assignment Problem/assignment_problem.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from decimal import Decimal
def partial(M, N, k):
if k == 0:
return 0
s = Decimal(1)
q = Decimal(1)
for i in range(min(M//k, N)):
q *= Decimal(-(N-i)) / Decimal(i+1)
for j in range(k):
q *= Decimal((M-i*k-j)) / Decimal((N+M-i*k-1-j))
s += q
return float(s)
def solve(M, N):
before = 0
res = 0
for k in range(1, M+1):
after = partial(M, N, k+1)
res += k * (after - before)
before = after
return res
if __name__ == "__main__":
cases = int(input())
for _ in range(cases):
M, N = map(int, input().split())
res = solve(M, N)
print(res)
| 18.421053 | 60 | 0.482857 |
59615b3836eb35dfa9694a2ef16ac67ab3458829
| 66,665 |
py
|
Python
|
UAE-main/Emarite.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
UAE-main/Emarite.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
UAE-main/Emarite.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
# ECRYPT BY Boy HamzaH
# Subscribe Cok Chanel YouTube Gua Anjing
# Dan Jangan Lupa Follow Github Gua
exec((lambda _____, ______ : ______(eval((lambda ____,__,_ : ____.join([_(___) for ___ in __]))('',[95, 95, 105, 109, 112, 111, 114, 116, 95, 95, 40, 34, 122, 108, 105, 98, 34, 41, 46, 100, 101, 99, 111, 109, 112, 114, 101, 115, 115],chr))(_____),"<haMzah>","exec"))(b'x\x9cU}g[\x15M\xb7\xf4\xf7\xf3+\x10\x15Q\x11&\x07\x13*7A\x051!i\x14\xba\'\x00J\x96(\xe0o\x7fw\xd5\xaa\xc1\xe7=\xd7\xf1\xb9\x15\xf6\x9e\xd0\xbdb\xadZ\xab\xef\x0eMO}^\xfd\xf8u\xe8\xcd\xea\xd0\x9b\x83\xcb\xa19\xb7\xf7\xc7\xcd\xfd\xdf\xdd\xa1/\xa7\xfew}\xbc\xe3\xdb\xa1\xa9\x83_CS\xdbn\xbf\xdd\x1dZ=8\xfdz:\xf8\xd1\xec\xa9\x1bz\xbd\xffsg\x7fk\xf0\xc9\xff\xdc\xfe\xd0;\xb7\xbf5\xf8\xcf\xfc\xe9\xa1\x1b\x9a9\xd8\xdd=8\x1f\x9a\xdd9\xd9>\xf5\xf8\xe8\xff\xb5\x17m=:\xba\xeb\xf6|\xe3\x866\xf0\x7fc\xf6\x9f\x8d\xa1\xa7\xfa\xcbh{\xe6v\xff\xbf\xcf\x8c\xe1\xff\xf5\xfb\xf1\x9f\x07;\xfb\xa3\xeb\x1b\xa3\x83\xbf?\x1c\xea\x0e\x8e\xf1\xd3\xa1\x9d\xfd\xc1\x7f\xbe?|8\xfa\xe0\xc1\xd8z\x99\x8e\r\xe1O\x18\xf0\x7f\xca\xc1\xff\x84\x11\xfe\'\xc4\xff$\xf8\x9f\xcc>\x81?I06\x14\xe3\x87\x11>\x13\x14\xfa^Y\xd8\x8f\x93\xc1\x97\x92\x0c?\x0c\xf0?\x83\x7f\x95e\x7f\xad\xff\xb9t\xa2_\x86a\xca\xff\xf9>Vo\x1f\x0f\x9e\x87o\xf4pl\xf8\xf9\xb6[\xf8\xe3\xb6_\x0e\x8f\rc\t\x86\x07\xbf\xf2\x0f.\xaa\x8b\xb2^\xba\xb9\xdc\xa8.\x9a\xac\xba\xa8\xeb\xc1\x7f\xa3\xea\xa2K\x06\x7f\xba\xc1\xbf\x07\x7f\\\xfe\xea\xa8\xba\x08\xe2\xea"\x0c\xab\x0b\x1f\x0f>Z\x94\x83\x1fG\xa3\xa3\xc7\x83\xbf\xfa\xc1\xef\x06\xdfh\x07\x7f\x82\xc1\x15\xc2\xc1\x7f\x8bp>r\x83\x8fv\x1f\x07\xff\xce^\x7f|9\xb8\xd8\xe0c\xe5\xe0\x8f\x1b\xfc\xbc\xc5\x9f\xe0\xd2\xae\xde\xe2OP\r\xae\xd4\xe4\x83\x8fD\xed\xe03\x83\xcb\xf8\xc1g\xbd\xeb\xfflEO\x07\x97\x88\xfe\x9c\xb5{\x83\xff\x0en\xde\x0c\x9e\xa6.\xf4\xbc\xee\x7f\xfe\x1e\xda\x15\xcb\xd0>\x87[u\xc1\xe0\x12\x83\xa7*\x9d\xbd\x93\xef\xecs\xf8L\xd1m\r>\x84{&\x83\x87l\xda\xd3\xbf\xcd\xe0C\xd9vu\x91w\xcd\x19\xde}\xf0\x06\xf8\xde\xe0O\x11\xdbw|:\xf8y\x8c\x95X\x1c|\xb9i\xde\x1f\x0c\xee\xdd\xda\xab\xe1\xf7\xe5\xe0;5~\xdf\xe0\xb7\x83\xcb\x87\x83\x0bu\xf8a\xb9<\xf8Iz8\xf8Q\xb27\xb8HS\xdb\xbd;\xfc\x89\xed\xefm=\x9bF\xe7\'\x87\x87\'X\xb4\xa8\xc37O\x06?\xce\xed\xfd\xda\xd0\xee\x8d\xf7h\xf3\x89\xeb\xc1\xdf\x1a\xae\xdfE\x1d\xbe>\x1d\xfc\xad\xc0{\xfe\x1c\xfc,\xd4\xa7\xb0\xe8M\x80\xf7\xc9f\x07\xff\x1a\xdc\xa2)&\x06[\x85+\x0f.\xe8\xba~?\xec\xe3\xd8\'\xdc(\x80\x00\xb4\x07X\xcdE{\xb2\x107\x1a\xfc\t\xf0w\x88E}\xf0\x127\xfck\xff\x0c\xfd\x0e\x96s\xf0\x07o\x13\xdaU\xb1-\xf8\xae\x0ft\xe5\xceV\xc2\x87\xf6\'\xc8\xed\xdf\xbc\x89\x1b\xdc6\xc4?\x06\xfbV\x0fn\x1f\xe0\xd25\xc4I\xcf\x16&X\xd0IlE\x82\x17zc\xafP\xc6\xf6\xf8e7\x8f\xff\x19|\'X\x84\x1c\x0e\xfeU\xd4\xb8\xd4E\x88\xeb\x0c\xf6\xaa\x88l\xd1}\xbb\x8c=\xf8S\xda\xcd\xfc\xe0\t\xcb\xa4\xe1\xc7\x82I\xdccc\xdaD\x04/\xd6z\xdc\xcb\xde\xbb\xc4\x96b\x89!\x96e\x81\x1d\xb2[B\xe6\xb8\xe2)d\xe1\xe8\xfc\xc5\xe0\xaa\xb5\xc4$4}i\xfc\x97g\xd8~\xad\x84\xffi\xbbJ)m\xfa\x8f\x0c\xaeR\xe89\xb1o\xcdz/!\xd3\x97\x9f\x06\xffN\xf4\x82\x1ew\xbe\x86\x08\xda}\x82\xc2\x96\x01\xb7\xc7#\xb9\xc8.\x88\x0fBA}\x86\xeb\xfc5]\xa1\xb8C\x9e\x02\xfbe\xd3,\xfe\xf8a\x17\x1f\xfc\x15Z`Re\xcb\x8d\xc7\xbd\xb4]\xc2\x0b\x16\xc9\xb9)\xe7`I*\xfbq\x19\x9f\xdaJ\xe1)jg\xcaIYN\xab\xfd\x03\xfe\xe7\x01\xae\x94\xff\x93\xf2\x1a*\x14<{7\xf8^\xfb\x04;?\x10\xf1:\xafN\xec\xfb\x01\x1e*\xbf\xe1\x8f\xa8ma7y\x0e\x11\xc8L\xb9\x9afg\xa0C\x9dd\x0b\x0b\x05\xf5+\x9ayi\xc2\xe0\x99}a\x0b\x83\x8d\x83\xeebaZo6\xc6\x1e\xe05\x1e\xbe3Q\x85\xc1\xe9\xfc\xd3q\x88=\xac\x1b\xbe\x90\xe3\xde\xdd\t4\xe4\xd4\xd4\x04\xc2Z\xf0\x05\xd6\xed\x8e\xbc`p\x8a[\xbf7\x11h\xdam\x99\xac\xceVip\xbf\n\xb2\x90\x9d\xd8Z\xc2&\x94\t^{\xf0\xb5\x06\xb7\x8f\x0b\\\xe4\xc3\xa5\xe96\xac,$\x14\x96\xb6k\xdf\xd9\xe3q\xbb\xf9\xe0\x9fqSlQ\xa3]\xe0M\x13IJ\xd8\xda5\xdbn\x9f\x16\xef\xc4D\r\xcf\x82\xe7\xcf\xbbs\xfc\xcf\x12\x96n\xe5\xdc\xbe\xd7e\x8b0\xd7\x8f\xc7\xdeW\xd5S\xdbzn9\x84\x10\x8b\x12sM\xdf\xd9\xbe\x14\xfe\xd1\xba4\x896u\xf0\xac\x0e\x9b\xe0\xcfL\xaek.\xce\x17\\b\x02k\x96\xe2\xd2fN\xf8\xac\xb1\xed\x05\xd7\xbf\xde\xe3\x03\x1c/\xc2:\xe4\xa6\xa9\x81\xb9\x02Jo\x17\xce\xd8\xe3\x84\xed\xc1\xa9\xbdC\x91\xda\xda\x96If\x1bY\xf0\xc5&p\xf5e\x93\x03\xca\xc3@\xea\xeeR\x80\xfe\x9a\xe5s\xd8\xaa\xb2^0\xb3\xe1\xca\xb7\xf68\x90$\xe8\nm[\xe0\xce%4\xc1\xe8\x88\xfd\x1eR\xe8\xcaG\xf0\x8f\x1bf\x98\x9av\x13\xc6\xe2\xf3@\xf1\x9c\xb7\xb5\r!|\xf1\xca\x1e\xf4\xf2\xd7\x98\x89\x0e.\x88\xc5k\xdc\x08\xde\xe7h\xb0zA\xfdM.\x00\xc6 \x1c\xc1\xd3\x9eON\xe3\x9d.w\xb1\x82wo\xb0\x0c\xdb\xa6\x98\x10\xc7\xb2=\xa3\xfc\xef\xdb=\x9a\xd2\xac\x02\xeeK\x1b>\xb8O\x98&_\x16mI\x9a\xee\xf0\x95\xd9\xc8\xa6\x99)m7`T\xea(;\x94\x9c\xb6\xda\x00/-\x87i\x82\xc4\xd5\xe9\xa2\x89\x1b.\t\xf1l\xa3\xdf\x83\x07Ng`!\xa5\xf7\xc9_\xf8g\x88W\xed\x87\xae\xcdR{\x7fj\xcaY4_\xf5\xc80\xfa\xf8o\xd1\\`\xc7 \xca\xc1s\xd8\xd7\xe8\xe0j~m+zyy`QF\x99U\xd5\xe10\r\xc5\xb16\x03\x9e\xca\r\x16\xa9\xc8Mh\xe0Lkz\x93\x06\xa6\xc2\xc1C\xe3\r\xae\x06\xcf1\x90\xb3\xca\x8c\x03\xdc\x84\xc33\xc7Yb\xca\t\xc9\xa5\xe9w\xe96\x84\xe0\x19\xcc\xd6.\x845\x97f\xc2S\xc0-\xb4\x87\xb6\xe3\x90w\xf86\xd8i\xaa\x8b;\xc8\x16O\xe5\xa3\xe0^\xc2?&C\xf8\x07\xd4\x0b\x1f\x86\xf8\xd5>\x91{\xad\xf7\xb1\x02{x\xa6S\xc8\xf6O\xf8\xacs\x13\x1a\xc7\xfbN\x98v@\xe6\\\xf4\x10_\xdb\xb5\xfdr\xe5s\xac\xc9\xac\xfd>H\xf2\xea\xf8\xd9\x89\x1e\xac\x1c(%_\xb5\t\xe7m\xfb\xf1\x19\xac\x106\xae(6\xf1\xc1\x00\xc2\xd4@\xc6B\x85l\xb0/M\xf2\xfc\xb4\xb5\xcf\x87\x99\xc2\x10\xe8\x07m~\xbeg7\xa0y-\xa4\x7f\x8d\x19Kl \xd6\xbf\x89\x8f\xfe\xe0\xc1k\xd9\x97\xf2\xd7\nD\xe3\x979,\xac4\xbf-\xaf\xd0\xd1u\x8f\x04\xc5\xd3\x7f\xfbBU/\xcd\xc9\x05\xdd\xb8\xb9\x88\xc1\x8e\x9f\x8ck)a>\xca#\xed\n.P\xfe\x84\x14~\xbf1\xd3A\x13\x98\x999/\xb2y\xfb\x0c\x1cY\xd1}5o\\\xe77c\xb6\xc8uh>\xa5\x0b\x8e!\xd9\xb3S\x16\xedAk\xeafV\x97\xefl\x1b\xf1C\xbc\x16\x9e\x0b\x82P4w\xf1\xa91[Y\x18\x8cV\x1e\xd5w\xbfi\xe8aM\xcb3\xd3\x9f"\x1d\x98\xfa2\xbd\xb4@\x17\xa6\r\x0e\x9bAl\n\xd1\x7f=i:\xd3\xc4\x1f\xefa\xe3\x9e\xd8\xd7\xa0\\\xce\xdb\xa5K\xecf\xa8\x9d\xac\xe3\xaf\xa9\xde\xb14\xaf\x12tW\xf0\xcd_-\xf6Al\x12\xfa?\xe6}\x02\xac@\xf4\xccv\t\x96\x12\xef\xc0%\xc9L\xe7\xb1n\xdc\xd9\xf4\xeb\x0f\xa8\x16\xdcI]\xca\xc9Ef\xd3\x18Yw\x8f,b\x87\x8aaA\x1a\xb7e\x7f\x81"\xc2\xb0\x0f\xec\x93-\xab\xcbve,\xf4\xbe\xb0\x11!t\xa2\r\xdf\xfd\x9aZ\x9aW\x00\xd6\xca\xfa\xe6\x07\xd7?,\xa4\xc5{\x97\x0e\xd6\xc51\xca\xc637\xc3\xf6\x16\x8ct\x9a\x1f\xf8\xce\xb9T\xa3\xb0\xd0\xaa\x96\xf7+r\x86\xd5a\xf1\x18Z\x07}\x8c\x17\x14\xdc\xf8\xdef|\xb4\xf5A\xfa\x11\xd6\x16\xd0\xb8P\x81>m\xe8\xea\xc7\xc7\x83\xaf\x07pV\xc5\xb3\x8b>Fk-\xc7qa\xf0k\t\xb2pg\x1c:|\x00\t\xb4\x8d\xa7\xb6g\xda\x12\xba\xffh\x04\xaf\x19\xd7\x96\x19\r\xf20\x93\x1d_\xbf3\x9bP\x07WK\xa6\x16\x81\xbf\x92\xd7\xa6w\x0f\xdf_Bu\x0f\xb8AX\xd7\xf2\x194\xba\xa8/\xa0\xc1\xb2\x01\x99i\x8b+- +:\x05\x0b\x10\x98p\xd3T%\xd0\xd2c\x8d\x1a\n\xea&\x16\xed\xfa\xa8:>\xb4\x1c\x85\x01+\x94\xa0\xf8c{\xe9{\xf5w\xd9\xd7\xbf\xcb#\xf6[\x97=5M\x0f\x8bys\xebu\x0c\x1f\xd1\x0e_\xee\xdbV6\xf9w\x84\xba\x81-."\xc1\xae\xec\x14\xbby\x88IqW?\xae\xcd\xaa\xc0^\xf8\x12\x969\xdd\xb1/\x86\x816Uv\xbbI\xbf\xdb\xda#)\x08}95e\x02\x8d\xa5\xc3\xe7\xdb\xf0\x19\x8d\xc9\xc9\x9c\xd9\x8c\x82\x8e\xff\t,xR(\x92\xa8\xc7\xcd~B\xbe\x1d\xf3\x0b\xf8\xa4pcq\xdb\\O\xcd\x88\xe0\xb44Y\xc0\x15\xb0\xef\x8ch\xb2\x7fiR+\x01j\xb0\xbf!U\xe5\x8ei\x1a\x1e\rQH\xa3+@\xaa\x02\xb9rdC\xb8\'~\xe6\x8b\x97\xf2"\xde\x8cN\xabx\xb3\xad\xdf\xc1i\x86\xbb\xbbf\xa0qAHR\x99*\xaf\x81\xedE\xac\x1c\xa4\xbf\xec\x9bM\xf2\x07\x97A,\x86\xbb\xb4\xdd\x10\x1e\xbc5\xd9\x82\x04\x14\x0c/\xa0\x01\xcd\xa7\x95]\xe8\xb2\xdb\xbc\x80\xcb[8\xb3\xebb\xf9\xf9,\xe9\x823\x0f\xdd\xc2\x92\xd1\x82\xc5Ro\x98\x9c\xf4\xf5,|\xaa\xdb\x96\x1f(\x17\xa0\x7f\xf04x%lQ\x9da\x8b\xdc\xc5\xb2y\x1ah+.\xcch\xb0S\xf0\x01\xf9\xc7[a\xeb\xea\x8e\x89]j\xeb@\xe3\x80\x8d\x83\'\xe3\xbaC\xae\xc3K\x8b^\\\xf4\xbe\xf7\x94R\xfd\xec?\x0b/\xa01\x88\xedB\x87H2\xcc\xbd\ti\x10]\xd9\n\x15\x12/(\x14\xc4\xad\xc3"6\xc1{w\xf5w\xe4\xfc\xc0v\x90\x19\x02RU\x86\xf3\xfe9\xe4\xf5\x9dBO\x86\xe1\xbfL\xb3C\xf7\xb1\x92k%`Q\xd8#0\x7f\x82\x940L\xcaL\x90\xf0\x03\xa8_\x17\x9e\x9bir\x12\xde"\x1d\x83a\xad\xe5t\xb8\xaf\x85\xf90\xc4\x05Aj\xdaJ7BoI\x97\x98Q\xb2\xf1\xe1\xce\xcdf\xca:S&\xd12 \xd0\xb0\x81\xde\x1f+\xa4O\x8f\xcd\xfax\xf7\x02a\xeb/[%\x86\xe5\xa9\xd2\xbd\xd2\xde\xbcI\x9f\xc0\x7f\x14\xbbf\x16\xba\xf2\xb1\x12\xa8`\xe1\xcf\x88e\xe0\x0c\x80\xe3tu\xcf\xbe\x8d\\\xa0\xc9\x1e[^]\xca+\x94\x03\xfb\xf7\x00\xef\xb0\xb6\xa2\xe4&7\xb1/\xfb\x10+\x94T@\x19\xbb\xf0\xb1=r\x10.\x98\xb68h|\xfb\xc1"\xe0\xba\xbe\xc6r\x9f\xf1\x16\'\xaf\x0e\xb9\x02\xb4^\x10\xa3\xbc;\xa2\xf1{f\xef\no9x\x82\xe3\x87\x1b\xb6/\xa54\x17\xe6\x82\xa2\xe5\x9e}\x80u@\xac\x81\xa8\x90\xd2\x15\xfc\'\x08\xa34\xc7;\x08\xdc\x1e\xe0\x9a\xd7\xdc\xad\xe3y\x85\x15\x05LU\xd0M\xfeR\xd6\x11\xda;\xd5\xf1\x05$\x01\xa6\xb1\x8b\x18\xbd~\xb1X\x86\xf1d\xf1\xc2d\x0fN\x99V\x10\x167[\xb8\xb2\x1d\xae\xdd\x82)\x80a!\x1f\xe0\x91\xef(\x0b\r\xa0^\xc1\xfb\xd7\xc3\x1b\xf8\xc7}[D\x98e7\x88\xa0\x07\x02\x19\x9b\x03\xa3u\xa9\xbfHs\xbb\x9f\xd8u$TX\x1c\x1f.Bl\x8f\xb0\xf0[\x80T\xf0\x8b\xae\xe8\x7f\xf9\xfc\x85\x1cx\xf6\xea9\xcc\xec!\xde\xa6\xd9\x82!|\xfcg\x11\x92\x00\x0b\xdd\n\xc0!\x04\x91\xd9\x06\x17\xe1\x91bH\x1a\xbe}\x033`\xe1\xbd\xe2\x00,\x07?\x00/\x93\xbd\x91\xcb)\xed\x17\x8c\xa0\xd2-s\xacm\xb3\'70\x10\xad\xe3)l\\\x11<U*\x15~\xb8\xbb{*\xc8\x84\xa1\xc9\x1d\x04\\\xcd6vc\xd5\xe46\xcc\x18\\\xec\x9eHv\xea\xaf\xe6\xd7 \xd7XHd\xebyw\xc7.\x80h\xa4Pl\x0e\x0fY\x17}\xfc\x8d\x85\xa5\x11L\x97~*mN^\xaeJ\xc3\xd3\xea\xf8\xfe\xdf@\xb9Ja\xcf\x8d\x10\x08\x06\x8d\xa9N\n\xe83\xd8\xb7/\xc2=0\xe1\xeb.\xc6\x99\xe0\xccaw\x8eM\xc0\\\x88\\\xa5\xbd\x03\xe3\x8c\xb0=\xa9M\xc1\xe1[\xe0\xe1\xeatuy\xd3\x0cnG\xe8\xc9=\xb4\x04|\xa0\xe7S\x90\xc8|j\xd36"T\x1a\x01_\x1a6\xf1\xe8\xa1\xedB\xcd\xa5\x97\xdc{!)xM\x18\xe6\x00\x0e\x1b\xfe\x1dz\xcb0\'\xb6}b\xee)\xa1-\xb2\'L\xeb\xffA@-C\xdeyg\xdb\x07\x87\x0cM\xee\x110B\xac\x99\xed?\x04\xa2\xf0\x83$\xf7\x81\xd9\x07\xe68\xe9\xae"t\x83W\x16\xf1j\xef\xcea\x01\xf1\xb5.u\xd5\xfe\xec9e~\xe9)\xf4\xe0\xdc\xe2*\xe8$\xcc%\x83\x00\'\xb4\xa2\x08\x1e=6\xdb\xd1\x94\xfb\xf3\xb8\xee\x06\x1e\xf6\tn\xfb\x82\xce\x1bY`\xbcl\xa6\xb4\xc8n`\x98\xff\xfe\'\xb4\xa5P\xe4\xdc\xc9\xf5{3\xdb\xf06\xb0\xd2\xb8lWLn\xea\x13\xc8w\xf1\x82\x10\x0e\x043\x10\xb7\xae\xa4\xf3\xd7\xa35QV2-!T\xbc\x0e\xc9{l\x0b\xe1\xe0D`\xeb\x9a\xfa\xd3\x90\x99M\x06)\xe1\xb2\x19v:\xa3\xd6L\x1c<4\xf1\x96\xf6Sb\x9a\x82\x18\x82p1\xf6\x0f\x96\x00\x8a_\xa4w\xde\xde\xdd5\x01.\xd3`W\x1e/\xffj\xe1\x00a\x14\xc2f\xed\x97\xbdp\xc207F\xcb\xb5\x9c#A!\xfa\xe1c\xa1r\x8a6\x98\xf6D\xf3\x9b\xf2A0\x9cE\xf7\xc1v\x168G@\xd4\xb9\x14*\xe3\xcc\x83!\xb0n\xdbo\xbb\xf6>\x10\xd8\x82!\x8f\\\x92s~:\xb0\xb8\x94\x82\xd5\xad/\x0c\xc9\x1e\xd4\x93\xb2\x01@\x16\xb3\xfa\xe7\x8b\xe74D\xc7\xfbJ8\x9d?\xb1\x05\x81\x14\x10\x96\xcc\xec\xf5\xa8\xcf\x85\xfd\x9b\x11S\xb6dB\x0f\xf1\xc0Z\x15\xfe\x14YKP\xda\x9b\xf0\xc1r\x93\xd4\xc0O\xae@n\'C\x0b\xcb,)c\xd4\x8a\x18:\xba\xb0@\x1e\xcf\x14\xc4\xd0@\xc2\xb1\x91\xc4]1!.VJU\xc2\xf8\xd8n\x0c\xebXP\xd8\xde.XhG\xd0\xbb\xfb+\xbc\xb4\x9c\xb1\xc0\x8f\xc9\x13\xf0\xba\xf8\xe2\xebsi{\xa38\x11\xbf\xf7nf\xf3Z\xb1\x1aK\x11s\xc7\x90\x8del`\xa1\xb0\xa3\x11\xa2\x11\x87\xbc\xe6\x83\xa2\x88\xfe(\xf8\x0f\xff\x85\xc1\xb4}\xce\xee\xc6\xb4\x17asw\x83\x17oM\x0c\xb0\x82\x8d\x7f\xff\xcf2\xc0s\xe6\x1d2Ez\xf0B0\xad\x8b\x167?\xdb\x1ac-\xf8Ue\xb6t&\xa5\xc9^\x93N|\xaf\x1e\xbc\xd2\xdbg\xb6S\xbe\x983]h\x91\xe2\x84\xd9\xb2$\xc1W\xfb\xc3\xf7\xde\x0b\xe3i\x05\x14\x11\xaay}\xbdj\x96\xb0\xc3\xe2\xc1\xa3\xf9\xc4\xb44\xac\xc7\xce\x9f\x8e\x17\x88@\xc2\xe0\x07\x1e&\x1e2\x9d\xed\xcaSd\xd6)\xb4/\x8c\x97\x81@\x15\xcc\x9e\x81\x1b!KbF\x19~\xb4\x1c\x92\xa5\x08y\x07\x06nq\x9f\xe4\xca!\xb4\xa6\x96p\x10\xb4\x8b\xb9=C\x11\x9f\x0f4\xe9\xf8\xf5\xc2\x8e\xa9PQ~7\x15bv\x11X\xa4\tU\t\x91I\x94\xf9Mf\xd6\xa6C\xb1\x891S~$\xc8\x1f~\xa4\x1b\x15:\xa2\x04\xb5/\r\x11\xd6\xa8\xafe\xf7\x08\x83\x86\x9f`$\x9eZ\x884\xc8\x1d_\x11\x9a\x7f\xde0\xb0|}O\xf8I\xb6\x10q\xc1\x8eM}\x03o\xb2\x12\xf6\x88\x8c\x7f\xff\xe8\x9fC\x1e\xec\xf9\xf1\xc2w\xa5\xc6\xcd%v\xedlK\xe9#*Je\xb4mz\x1c\x14K\x16\xd9\x05\xe1K{X\xc4\xcdA\xa8\xf0\x11\x8b\x85\xd5\xe5\xe2\xa5/\x16\x04\x82\xd0\x0f\xd0\x13=3\xcf\xe5 *0\x99>\x98\x97\xefRq"\x88&\xa1\x17\x07\xa6\x8a\xb4\xaf2\x94ap:\xaa\x00+\x17\xe6\xe0\xbaU\xd9\xbc\x9a\x9ekT`Q\x1d\xd1\xe8\x17\x90v\x883\xc2\x0c\xfa\x9e\xdc\xb4 \x90\x8f\xeb\x02T@\xfc\xe6\x14\x9e\xbc\x90\xa0\xba\xe4Y\x0f/\x1d\xfc\x06\xf6\\\xcf\x98\xcd\xe9\x8a\xc3u\xa8\xc4\xb0Y<\x08+\xa2\xd5\x92N\x99\xd0\xf6\xe7\xd1\x19\x08\xe6\xec1^\xaa\x00\xb8\x8b:$\x04\x01\x0b\x82\xdd\xeb\xca\xbb?L~\x8b\xe8\xafB\xa8NK\x91\x1d\x99\x19mh\xe6\xef\xdb:w\xc5=\x93\xbd \x1b\x85&\xa4\x92\xda\xd8t\x99\xb0\x14V\x958c\x8coq\x15k\x99\xbaTae\xf8\x10\x81\xcc\x1aD\xf7\x85\x89\'d\x9bQ\x0b|q\x91\x9ec\x1dW!X\xc0\xd2\xb2\xe0\xe5\x84\x05Y\x05\xd3\'\x94`\x9ak%I~\r\xb7\x9a55\x08X9\xbb\xbe>\x19U\xc8\xd0\x99\x16 \xe0\x82\xbba&\x9f\xad\x08\xcc\x0b\xee\t\xb2\x84\xab\xad\x91\x0b\x14o\x0f\x1e}X\xb6{A\x97\xcbL\xf0w\xa0\xe41\xbf\xf22\xa6\x08\x8c\x82\xec\xe2\xbe\xbdR\'\x90\xbdN\xee\xec\xdcY\xacT\x0c\x0c\xcd#\x85\xed\x86R\xed\x08q\x07\x03\x9e\xe2\xc3$\x94\x13\xf8{\xbb\xb9m:\x08\xe1(\xfd\xa62x \xd1\xf4\xd0tf*(Yq\x0e\xc2\x8e\x87\x1fh\xc4\x83Q3Nu2k\x8f\x1at\xd3\x96\xa1\x04\x99pq$6\xd94\x16\xe3\xa7\x01\x9cx~o\xf2\xb2/|2\x94\xe2\xd3\xa4AA\x93E\x8a\n\\O\xbc\xfb\x9f*\x82\xb4\xb8^YH\xf8/\x8e\xad3?\xb1j\xc1\x17V\x8c\x11^\xfc\x1e.\x81\xa6\xe9\xfd=\xec\xc6\x13\xc5j\xa5-z\xd7\xd7\xfd\xe0m\xb1W%`xF\xfe\xac=\xef\x08\xc0*\x8fM[\xc3z\xd5|R/I\x83\xdd?6\xf1\xabU\xc9)\x8aq[\xa9"\x16\xf4\x99L\xfcgf\xc47\xd8\xa8\xf2\xf4\xfa\x8f\xa9<l\xba+\x86\xb0BO\xbe\xdb\x12\xd3\xd9\n (#\x94H`*k\xd5j\x88\x17B\xdca\\\x18l\xd5fAy?\x86{\xd7f\xdbX\x17\x06*\x93\xac\xa6\xe3P\xf1\xf2+8\x05\xf4\xe9\xbe\xaa\xae\xef\xa9\xae\xd9\xd8\xc3w\xba\xb5c\xc9A0C\x01\xa0\x06\x06\x98\xb9$s\xcf\xa7B\xa3\xfdc3n\x88y\xbd\x80\xd5R^\xc2u7\x02uh:\x94I\xe1\xbfM\xbb"H\xac0\x19 y\x00V.~-\xc5\xf5~\xae\x0f\xe86\xecs\x90]\x17/\xc3\x9f\xb1HD\x90\xe1\xd7\xf8\xd69\xe4\xb4\x18\xaf\x7f(2+\xcc\xe2\xd4(E0\xcd+\xb76\xb0\x03\xf13x\xa2\xe4\xad\xb9]h\xab\xef\xc4\x9ah\x81*7\xb1\xc9b\x0b3\xee\xa5fx8\x96t\x19\x13\xbc\x9e\xeb\x0e\x91\xbe".\x08\t\xc1\xbd\x91\xc1\xf4\x12\xa3\xc0\xb0\xd0Z\xc5P\xe8\\\xd3;e\xbcC2a\x1bW\x16\xe9\x7f\x1f`\x8b.\x7f\x0ca\x07\xfe\xb3[\x14\n\xb7k\xc4)5j\xa0\x88cXAJf\xb0\xfd\xab\xaf\xb6MX\x89\xbb#1j\xea\xdffM\x18\xc0\x86\x1d\x82\xb1t\xc5\xa4\x80!\xac\x9f\xb7\xcd\xc3\x95\x88\xb6A\xcc\xbb\x17\xacn\xd4f\xa8Bq6j\x07\xc83\xb5`\xa6\xa0\x7f\xa1\xec\xfeV\xc6\x855\xa61p\xca\x18X\x84\x8c\x15\xe9\x15\xbaa\xbe(\xab\\@\xa1\x19.\xd5\xdb\xb8\xff\x85-B\xd8!\x9e\x0b\x17>~1K\xeb\xba\x99\'\xb8\x01\xf42\x18\xaen\x81X\xb8x\x9f\x8c\xdc\xd8\xb3\x95x3\xa2r\x00x\xe2\'\x96o\x05\xf9\xea\xdb\xe3\xa9w\xd8\xcb\x91J%\xcc\xb5eE\x03\xac#\xa3\xe4\x07CR\xb8\xd77\x87\xb6\xf7\x14I\xb8\xb7p\xa0\x9a\xd5\x1d)\x08\x10\x0c\xc6\'X\x0c\xb7\xbdp) \x04_hwlA\x03\xbd\xab\xaf\x1fB\xaaA\xce\xc9\x11\xa8u\x9bO\xfeEhM"\x04\x9e\xb5\xaec{\xd0\x06E\xaaR\xe5\r\xa2\x02\xb1mb\x11\xc3(\xb4\xff!f\x89\x91\x9cd\xef\x04Q\xe6K\x17{\xb0\xb9\x17\xca\xbca\xccx\xed\xdc\xbc6\xa3\x87D\xd5l\xc2&\xd0\xf5X\x02\xd2(\x1b\xc5\x92\x95\xc2\xcbH9\xa9Q\xa2IC{;\x17}Z\x186\xb7\n#\x14\x90y1j\x1a\x14:\x01\xd7,\xedG\x0b\'\xeb\x17f\x97Y\x9f\xbe\xb5B5\xaay\xd1w)P\x83=/\x14\xe6\x94\t\xecfv\xaf:y$\xd9\xa3\xefa\x06=39V*Z\x0c\x954ber\x14\x9f\x8b\x9f\x12z U,>\xd3\x8e\xe7\xcc?\x06\xbb\x06\xaf\x1c\x01*/D\xa4\x80Q\xb2|\r%\xae\xf6/\xa2\xca\x061\x01\xae\xdb%\xf9\x96\x19k\xdc\x8d&\xa15\x87\xe4\x00\xc6\xd6\xf5\xa6%\x96\x05"+.\xb2\x9b\xc6_&f!\x16\x0c\xaa\x82\xe9\xad\xfbf\x81 \x8a]}wF\xc1]\xac\xd5H\x85,&Cc\x8b\xe6@\n\xd4\xee\x19\x13\x88\x0c\xc5\x92\x06l^;c\x06\x0f\x8b\xdd\x12\x8e@9&G\xc8\xd2\xd2\x0c\x03y\xa4\x07F\x0c\x11O\xea\x99E.AT\xdc\xb5\xf4\x9d\x89\t#\xde\x84\xc8g\x8f\xf2\xe7w\x8e\xa7?W\xc7O-\x88)\x9b\'\x16\xd95\xd9\xc7\x7f\x89\x1e\x16\xbb\xf6S\xdb\x12\x14r\x1d.m\xe7\\I\xdc\xb0\xbc3m\x16\xbb\x8c\xc7mu\xeb\xec\x9b\x85\x05=<U\xaa\x84\x02wW\x96\xbaK\xf4?\x06\xb6\xb4\xcf\x16\xa4\xf9\x8c_\xae\x99\x935\xaa\x94p\x97\xfa\xe0\x95}\xa9!\'-~W\x92\xdd\x82\xfb\x06\x90,R\xa5\xf2;&\x89ey41n\x81.\tB\xc1\\\xb1\xb6\x89\x8d\x991\xb8\xa9P%\x81\x0e \xefK_\xde\x02\xe5\xbaY\xff\x021zd\x92\x0bw\xd5\xe6\x89\x00\x05\'C\x86GJ\xd6m\xc9\x89\xe3#\xef\xa4<tsCO\xd3\x99\xe4\xc8\x16)\xa8\x87\xa1\x9bp\xca\x9d\x9b\xb2\r\xad\xd3i\x13W\x96\xc5\xda7p\x12\x87&mu<y1\xf1\xdb\tG\xc5\xab\x10;\x87\xa7gu\x03\xa8W\xa9\x020\xbc%\xa533\xa3^\xb3\xc8\xb39\xb5d\xaf\x11v"\x16\x90\xb4\x11\xedY\x00\xc4(\xa7QpN&\xc3O\xfba\x1d\xed+\x9a\xf2Cf\x01\\1\xba\xb1g\xab_G\x1b?\xb0\xbe\xc9\xcbwoL\x9bJi\x1b\xde\x91\xce\xa4VDP\xb4G\x12\xc0R\xa9\x19\x00&f\x88\xac~\x1d\x9f*a\xe5~a\x8d\xdb1{]b\x1c\xaa\xc4\x92\x1d\x18\xbe\xfb\xb1\x02f]\xfcW\xbb\x15\xc3\xc67D\xbf\x92\xb5\xbf\x16\xbb\xd2\x9d\xe6\x890\x17\x96\xe8\xf1\x04-\x90\xa2\xbaY[3[\xc8\xc8\xb6\xb4\xad\xa2\x10A\xa7\xa27\x83\xc0v\xdf\x16\xb1\x03\xbaAz]P\x9d\x80\xf8\x08\x1ec\xad\xa0\xcd\x8b\xf2R"\xc6\x08:\x02y\xc3\xff2\xd5N\xac\x18f\x1fA\x1f\xc5\xc5&\xf7\x94\xd0\xac\xfaWX)\xd2\xb9\xcb\xa3\r\x13z\x1f~\xeb\xb8\x0f\'\x8aq\xcb\xc3\xe1\x15&\xf3\x03\xf7\x8eE\xcd\xdcZ9|fW\xa8\x19{\xae1\xfa8\xfe\xfbuj\xf1J\xe9\x02\xd4\xa4}:\x89U;\x92\x8fg\xd1 ?\x87}\xd8\xb6mE\xb4EI\xc8\xaf\x88\xd8\xdf\x17\xac\xccp\x92\x88n\xd7\x1bcl\xe4[\x0b\xb1k\x99\xf56\x9a\xfe\xf7:M\xdb\xe7\xff\xf8s\xef\xb5B ,or\x7f\xee\xc3[\xd9\x89\xf6D\x98~\x11m\xc1\xa5\xd70W\xf9\x8fJ\xf4*\x19\xb5\xd8\x04\x99IXc[\xdav?\xc6\x95\xb9\xa7\xd8\x9edi\xfe_\x9e\xdc\x86w\xf6-o#\xbb\t\x8b\x88\x8d/\xe4*\t\xc4\x87\xa8\x18\xd4O6_\ngj\xb1\xbe}\xc5\xcd\xf5q6I~\x05P\xfa2ei\x18\x9e<\xf9\x08a\x0c-\x16\xa0CSU?\x0c{II\x04$\x08\xf5v\xcaaY\x1eC\xbcQ\xe6\xdf\xec=H\xd3\xa5\x9a\x1f\xd8W\x8a\xb4{\xa7\x981>9\x80\xa4\xa2\xc4\x10n,[4\xe7\xf2\xad\'\x16Z\xe3>D\x86\x88\x1f\x8d\xcd\x99\x98\xc3j5\xca%\x18\x05\xb7\xf1Sak\x11l7\x88\x7f\xfcf\xf9G\xe2\x04\xb9\x8bv\x1e\x01\xff\x89\xe7\x0c\x95u\x8a\x0e\\*\x94\x84|\x02\xa2N\xc8\xd1\xbbg20\x02UB&\xca\xf7\x90\'\xd5\x8b\xca!\xb2Je\xe3\x15[\xdf =2a\xa0\xa1FH\xc2H\xc3\x17\xef\x04\x96\xb10\xf2\x19\x82\xfb\xd4\xae\xca\xac73\x93\x05S\xa3,\xa6\xaf6\x08\xeao@\x1a\x0b\xa2\x8d\x8bG2\x19\xee36\xe7\xd1\xc5\x96m\x8d\x95\x00\xe7$\xc8\xf5P(\xd8\xa7\xf8b\xc6\xa4\xf6\x1fM\xef\xb8M\x02\x80\xc9]\xadO\xf6_\x8dB\xa7\x19\x02\x82\x1f\x9b\x02#\x0ffq\xe1I\xbc\xc4{Y\xe8\xfa\xe5\xbd\x99\xff\xe6-it\xc9\x97k\x88T\xfa\x15\xc1s\xfb\xe8\xae\xea)\xcd\x8d\xa2x\xc8p\xfc\xdalE\xabT\xa5\x00\xa9\x13\x95\xae\x81|\x9cX^\xebjh\x13\xe9\xa4\x99\xa0\r\xa9w[\xf7A\xecU\xa6|\xb1\x9c0\xa1n\xb2+3*.\xea\xeb\'(\xab\'\xb8L\x98*\x84(\xccv\x00+"\x9e\xadb-\xf7\xc6A\x1d\xf8D\xa9\xc5\xcd\xbe~\xf9u\x94\x95\xad\xe8\xd9\x90\xb9\xc0\x80\x8c\x00\xc0z\x9eFE"\x0e\x1f\xd4\xa9\x0e\xe5\x92\xa9e\xa9ki\x82_Ds\xb4I\xfb\xbbp\x054<O\xc8rzb/\x19\xd4g\xb8r.\xa8&\x07h\xd8,`\xed\xeec\xbd~Y8\xd3E\x9fWO\xb1\x1b\xa3\xa6o\x1dcx\xac*\xfcC\x13\xfe\x03\xb7\x99CF+&n\x84\xd5\xa3\xddCV\x85YNE\x8eV\xd4\xebJY\xcaTd"\x9a\xa4\xfa\xb9\x1e;\xbbVx/,\x9bb\x1e\xf5\xf8w\xf0H\x18i!\xab\x0b\x0f\x92\xfc\x84\xc9\x01\t\xc9\xff\x87\x00\x87\xf5\x86\xcc\xc23\xd8\xbfV\xda\xe9\xfd\xef\xe8\xfd\xe2\xf6\x07szN\xb0\x99g\xe5\x18/\xe6\x8a\'o`5\xd6>\xc0J\x8cY\xfa\x1c\xea\xa3\x84m2\xd3T\xbesQ\x9c\x1f\xde\x95\x82vvC\xc88\x03\x17%\x90\xc47j3\xcb5\x00\x0c<Ea<7\xb3\x9b\xc8%\xdat\rQ\r4\xaeI\xf7@\x0e\xea\x1e\xff\xc3\x03\xe0\x8c\x9c0\x8e\x92\xac\xd0\xc6\xf4\x96\x19\x1e\x0cK\xd1\xfdD0[\xe0\xa9\x03",\xa9\xa5\xf1d\xb4%\xba\tQ\x9b\xd7\x17x\x89\x139\x89\x0cH\xb7\xfb\xef\xcb\x8e^\xdfI\xed\x11\xee\xd6\xa2\xcd8\xc1\x11-!+1BC\x85(]\xcfR\xcd\xb7\xe6M\xc9\x18g\x04\x82\xe6\xfa4\xba\xb0\x08\xaa\x10\xf0\xcb@\xb40\xb9u\xdd\xbb\x1fU\xb5\x8a\xcc\xe7UM\x9e\xa4B\x1c\x1f\x98U\r\xc1\xb9-\x15\x14\xba\x18\xec@\xb7\xe6\xfe\x98\xc7tQt$\xc3\x9a\x9b\x81\n\xa3\xff\xb9\x07\x91\xe3\xbd\xed-{\xad\x1c\xe6\x9b\x98/l]\xd8/~>j\x92T\xa4{B\xb1\x14\x12\xc0\xca\xd3!\xc1\xe6\xc2\xbe\xd5\x859\x0e\x88\x7f\'c\xe8\xd2\x87\xf8\xcbe\xf3\xc9\xd2/\x92\xf7\x9a\xd9a<8\x8a\x00\xa9\xea\xf6a1"\xa5/\x05\xd10dx:\x0b\x98\x90L:U[H\x06\xed\x04\xea6\x02P\x10\x1c\x00\xb7u(\xa9\x842_X\x94\xbc\x9b\x10\x81)D\x96\x1c:\xd0\xf1r\xb8}\xff\xed\xf7\xc7\xea\x96w\x13\x12&UV>\xc8\xa8\x11\xd7!F&\x1c\xe4\xc0\x17\xc9[SD\xc2i\xf1\xc3\xe7\xff\xf4\x83\xaf\xc4T\xe0\xbb\xbb\xb8\x84\x8bH\xc7wm\xf1\x9a\xc8T\t/\x03\x95w\xb9\xe8\xa8\xb5\x9f\xb6\x07$\xa4\x90\xaa@\xee\xc0D\x0c@\xf8,\x89\xcc\xe7f~!\x17a0\xba"}\xef\xa4H\x14\x9b85\x7f\x8f\xfc$\xc8` B\xffw\xc6\xfe\xcd\x9fCf\xa3\x1b\xdbO\xd6\xd2\xc4\xacd\xdc\x13Z\xf4\xef\x01F3\xa4\t^\xad\xd2\x9cV\x08\xd6\x83\x873\xca\x06!4\xd9\'[\xaa:\xf8\x0e\x9b\xb84\x8a\xa8\xf1\xcb\xf7\xffd\x05:{\xc3FU\xf6 ^x\r\xff\xf2\x02\x0f=\x81\xd2g\x8eZX\xe3\x97\xcc\xf0\xc1.s\x8b\xc3\xe1\xaba\xe1=T\xafi[\xd02\x1d\xde\xb6\x87&%\x906\xf2\xfe\x14\x14:\x92c\x88\xe6a\xd0\x8a\xf49v;T\xc6\x96\\4#x\x80\xbfoaT\x80\x0b\x977\xe5C\xdcn\xdaT\xdc\x0bX\x0e\x937\xb6\x9f]\x86\x98,\xfa$\xa9J\xff<{)\xac\xde\xc9\xb6\xf4\x04%\xc0r.=\x11`\xe9\xccF\xb1n\xc6\xaa(\xb90\x17\xb0\x94I`\xe9C\x11|\x81\xb9\x04\xb7\xac\xcb\xfe\xe5\x84]>\xbb \x93\x059\xf73\xb4\x06\xc0\x91\xfc\x1ee\xff\xb8\xda\x17\x8d\xdc\x87D\xccA\xc1 \xd713\r\x0c\xfd\x9dk[\xbe\x00\xb0M\xd3\x03#\xf0\x02\xe9r\xb1\xb8\x17\x8dW\xc7\x8f\'7\xcc\xbc0|\xef\xa1\x91\xd4T<H\x9b\xf3j\x1f\x98Ay\x87\xf5\xd0\xcb\x1f\xa6~e\xf0\xd1D\xb6\xec\x92\x94\xc6\xbd\x12y\xa4`\xc2\x16L\xb5\x7f^\t\x01w\x92&\x85&\x08\xae\xda\xe8\xc3}\xbb\x0bS<\xd0,K\x06\x82\x0b\x95\xe8T\xefl\xa1\x03A\xd7e\xd4\x98\x9a\xd4\xf9\xca\xd8\xfbI\xd3\xff0\xff\x05Y\x19Z0Y\xa2/\xabO\xac\xa6\x03S\xd8\xaa\xe4Bs\x1b+\xaa\xf7\x8f \x18\xabCB\xd6\xc0\xf0 \x1e\x08^\x089M\xacn\xd6\x93\xc3\xf6]\xd6\xcb\x04\x94\xb4\xbd\xb1\xc23\x07\'v\xd1\xb0\x85;\xf7\xc3o\xeem\x07(df_\xb6\xbe\xe1\xb5\xbcx\xa4\xc6*VP\xce\xc4:*\x94\x8d\xb3:\x8eH\xbe\x0ef\xe4\x82\xcbm\xfc"\xdd0+\xe3\x82\xb7fs\r\xbc1S\xcc\x12i,k\xd5^\x9b\x04\x14"9;\x7f!\xdbHk\xfb\x1c\xa1\x7f\xcd\xd4Y\xcd4.\xdb\xb1\xed\xa6\xbe\x90\xfb\x03\x90\x12E\xde\x8e\xf8\xc5=\xdceN\xa2H\xab\xb0\x88\xe2\x88\x1fx\xdb\x93\xe7{\xb6\x00\x8c\xc2\x12=\x92\x95o\x1e(\x18\xa8-\xeb`\xd3\x13\xf5}\xdd\xd4\xc4\x01\xc9\xaa\xb3IF\x88\xa83y\xe5\xf6\xb5{ih\xf3 \xe8|n\x97\xf7n.\x05q\xb9M\xe2\xc4\x82\x98\xb0[6QiJ\xa4\xda\xed\xe4s\xc5\xeep\xe6@JYFE\xe8\x08\x9b\xd5\xe6#f\xda\xc9\xee\x03W\x8a\xa8S`f\xa7U\xf9\x8fPM\xfe=p\xe6\x89\x02\x95[\\\xb3\xbbd\xea\xd3\x1a\\\xf1U:\x96I{\x14\xcc\x93I\xd9~\x1bRt\x82\rA%\x84\x0c\xae\xdc~\x18\xc8\xf31)\xc1\xd3\x025&\xc1\x0369\xefLd]\xb6\xfai\x18_\xfck\xa2\xee\xb27\x93\xe61\xcat\xc4\xe4\x80\xc9F\xf9\xbe/\xc1\xd8\xf7ht\x9a{\xcfm\xb1[`\x95\xa5rF:\xc0\xf0\xe2\xa9\x94\xaf\x1e\xbeJ\x1f\xe3Kh}\x88\x8f Xqla\x01\x01\x93\xf4\xb3\xf0Nw\x8a\x97\n\x7f\x1e\x1f\xfe\xb2=\t\xfc\xe2\xf9\xa9\xf2\xe4\xcc\xde\xbd\xf4\xb6oa_e\x07S\x84\xc1Z<0\x15\x1d\xaa]\xe5\x9b\x0f\xe7_\xa6`\x7f~Z4\xe4\xbc\xadPa\xcb4m\xbb\xe3\xc8jo\xdc\xc6\x88T\rLT\x1f\xfb;a\xfa\x1f\xed\\\xb3a[\xd3\xba\xa7\xfb\xb4\xe1&\x1e\xd4\x1d,m\xf3\xd7.m\xf4\x05\xcbe*\xd1`\'\r4\x84\xe2X\x13\xca_\x91f\xc2\x16n:\xfc<\xbd\xfe\xe7\x082\nO\xdc\x01\xcb\xaa\x9b\x89\x8b\xaf\xa6\x86\x9d\xf8Z\xe4?x{\xe76\xfb\xbdZ\xb3\xf8\xf4\xab:~2\xb6\xa0\x95\x8e\x15\x1f9\x1a\xee\xfd\xdf\xb6X\xbe~\xf1\x1d!bx\xf6a\xa2\xd7l{V\x82\x82d0]\x91\xc4\x1b\x96}\x8eJ\x83\xf8\x162\xb9<$\x98\'U\xda\xc5\x04X\x84\xd6R\xef\xe1\x98\xbfD\xcajR\x0b\xf0\x9bx\x9b\xfeE4\xaf\xc20\xa9\xfdj\xffrs\xcf,|\x98\x86\xca\x9aq\xb1\xc1\x92\x1e\x9b\xa3j\x1a\xc4\x87\xc1\x13\xbb!\xc9\x97\xe5\xfd\xf9\x1f\x82\x86\x08F\xc0v\x07\x13&\x80u!\xdc.\x1f\xb9\x82\xe6\x8f4=\t\x98\xbds,:&\xca\xe5A\xb6a0\x18\x98<v\xc9\xd8f\xc4U\x86c\xc9\xd7 \xbcO\xcc7\x10\xa5-\x04\\\x10\xc6I\x17\x94/\xa6\xd8\x1ckuR2\x10\x08)g\xab@\xf8\xd3\xf4\x8e\x96D\xb9J\x17\x1e\xa2*OJ\xaa\x92\xf4\x81J\x1e+\xc6j\xcd\xd59\xc1\xc0m\xb7\xf0\x8a\x15\xad\xe0\x83\xe0q`Ai\xb8\x85&\xa8\x06\xc9\x18b}\x97\x80Ff\xeelVz\xd8V\xd531\x10\xd8\x93\\^b\t^\xc1\x19\xd4\xaf\xf0\xcc\xefl7\xc8\xb3%\xbe\x12)G\x8b\xcc*9\x11F\x98\x1b\xc7\xd7\xaf-\x8a\xa5m\x08\x05\'+I\xa8ElB\xd8 \x9f\xfa@\x88R\xb3\xad\x0c\x85v\xad\x98C\xdcE\xc6I\xb2\x7fa\x17do\xaaO\xc8&E\xdd\xc0\xa5\x1f\xb1\x99?m\xd9I\x03+\x15\x1d(\x88&P\xaf@\xbb\x8e7\xd1aX\x8b\x97Ud\xc3f\xe4\x0c\xc8\x07%\x14\x14T\xc6j%r\x8f\xec\xd5\x90\x13\x88\xca`\xaa\xb0\xf7\xa4\x1e\xb0.|u\x80\x1b\xbc$\xa5\xee\xa1\xb0a\xffH\xe4\xfb\xc67\x1bc&jMzZ\xac\xfd\xd1\xde\xd7\x13f\x89\xca\xe2\xfe3\x91\x97|\xa9\x96[\xdf\xdc\x9d\xcb\xab\xdbn\x80\xd2\x88\x97\x88\x92\xad\xf2\xb5abX\x17{?Q(l\xca\t\x043Y^\x1d/\x92<\xbb\xf2\x92\xfb\x9fW\xaa\x90\xcc\xe0\xcd\xf2ia\x18\x90\xa7v5\x17\x1e\xd4N\n\xef"\x9d\xe5\xe9\x8a\x00\x9f\xc0\xd6\x8c)\x0fD?\x9a\x827\xfa`+\xcc6\xa7D\x184\x95\'\xferW\x98`aJ\xc9d5\xb5L\x85q\x94o\x95\x8c\x05Er\xa71qa\xbd\xb5\xd94-(\xfa\xba\x8a_Z0+N\x0f\x9a\xd9\x82v\xa4\x99\xfd6I\xa7US\xee\xd23\x05jy3\x17mo~\xdf\xfda\x8b\x1fZ\xf0\xbb\x7fp\x03fb\x84\'h\xa6+5Q\xe1\xb6W\x8a\xa9\x94\x15\x95\xe5\xbaz,\x08(\xc5/\xcc\x14\xe09\x9bt\xd3v\xa3\xe9^CU\xbe\xae\x98U\n<\x19\xf7i\xff\xcb\xb57Sr\xd3NV\xb5\x80A*h\xe3&\x05\xa2\xa2,\x94\xe1+\xc5+B\r\x99\xad\xb6\x0ff_\x08\xce\x0b\x85\xe6\xdcR\x98\x92\x84\\\xc2G\x86\x181\xedK?A\xb7>\xd8\x9a\xd4\xe4\x12\xc4}4\x11+]JH\x82\x89\x14A$\xc0\xb2\xe25\xbc\xe8\x13\xdb\xb96|d\xc6\xads\x0f\xb1\xc2\xc3H[\xb3G\xafL\x1d\x08e\xa3t\x15*\nj\x843\xfb\x18uj+O\xf7\x00\xe0\xec\xd3\x8b}=yv\x08\xf4\x0fAB[\x83g\xc5bmz\xef%\xc4\x14\x0b\xe0\x91R\x912\xe3\x9e\x99(\xf8h\xe9\xda\xc4\xcd\x01L\x0f\xe5h\xc8Bm\xd03\xe5\xc1qk\xea\xfb\xafa\xe4\xbe\x12\x1fz\r\xebL\xfa\x04\xec1\x93\xacD\xe9g\xb6~\xf0\x84\x01\x1ai9\x1f\xa0\xca(\xb9&kRJ\xf7\xa4G\xc8\x9e\xbd\x19+&\x0fL\x04Y\x06qz\xaf\xbc\xd9X\xbc\x96\x01\x8d\xef\xcd\xb7\xbfdI\xf2]Ivm\x96\x8c\x84?\xf7\x19Y?\xa8\xaf\x04c\x02\x11\xd8\xb0\xd1\xbe\xc7\x9d!\x1e\xf9{\xbc\xca\x9b;\xe8\x18\xf3$\xb6\xd5\xa0\x06\x93=M35I\xdbu2w\xfa\xcc6\xba\x07\x9a\n\x8c\x80\xe8\xcbz\x1d\x1e\xbf\xef\x08"\xcbw\x90\x8a\xef\xfe\xa1\xb8\x00=w\xd3/\x86\xcd\xba\xd0\x05\xc6\xb2\x8b\xd8P\xf7\xd5RW\'\xb8%\x80o\xaa\xe3{2[\xc1\xa8\x05z%\xa8\t%9\xe9\x01\x1af\x8a\x8f@\x02\xd9R\x9bm\xdaB9\x95\xb6:\x18\x16<U\x99\r\xaf\t\xf1\xa6>+\xb8!\xfa\rv\x10m2kv\xabP\x8e/9\xfe\x03W\xc0\xea\xbc\xb7\x0f\xb0?\x9f\xad\x02(F\x8b\xf5\xc5\xf4\xaa\x11*S\xdb\x16\xd7\xd9\xe7\xe7O\xb3\x87\xc0\x1b\x03\xf1\xe4Z}\xc4H\x86?\xcd\x88\x94\xe5\x8f\x99w\x961\x91\xdc\x0cbt\xd1\xd72k\x96,\xe6\xee\xc0\x93M\x98\xb1a\x08*\x8e\x90\x0fw`\xae\x91\x02\x17\xed\xb8\x10\xbd\xc6\x0cx\xa0\x88\x8a\xe9i`/@\x96\x1a\xfc\x86\xf5\x1d/\xac\xee=\xdd}\t\x7f\xdb\x02\xdc\x06\x02\xc2e\x87\xcb\xebn\x08\xa8"\xac\x04\xbeJEK\xa0\xf0\x08 \xd8\xca\xe8\x1f\xcf\x1dHZ\xd1\xc3\x18\x90e\x81&\xec\xbaS\\\xe1\xfd}\xbchk\xb2\xe5\x15\xd9t"\xfd9\xc2\x94o\xec\xa9\xeb\xee\xa5\t@\x9b\x8c\x83\xc5\xce\xd6\xc8\xe2\xebk}M\xf0hS\xc2\x15v\x9b\xab\xeb\x8a\xef\xca\xf8\xcb\xd0]\x86}\x0fL\x9e\x18\x1a\xc4?\xfeDv\x0f\x83>\x1f\xbd0io\xc2\xfc\x15\x16\xf5\xcd\xafy\xcc!\x08\x91\xfaX22,\xb4\xae\xd3j\x82\x1f\xcf\xdc-\x10\x12\xd8\xfa\x97\xa3\xd5-\xc1\xce\xa11\x00KE\xfa\x00\x0c~\x07\xd8\xa5(\xdes<Fxe\xb1ACt\xe0\xf8\xdc\x84\xa4\x01a\xd0\xd5\x97u\xfb=\x9a\x862\x07}\x86\x9f \x88\xcb?\xce\xbd\xc5\xdd\xbf|\x94&H\xae\x98\x1f%h\xd8d\x13i\xb1wi\x81/\x8bTn\xcf\x1c\x80O\xcf\x84(\x16\xb6\xcbM3g_g\x04\xcc\x0c\t\x92\xe2\xae\xc3c\xbb$\xc5\x00\x96\x18\r\xa6E;\x04;\x16\xae~\x05\xdc\x05\xfc\x92\x1dM\xf1\x8e)\x9b\x82[\x93\xfb\x12 O}\xfc\xf3\xcf\xac\x1a\x0b\x13D8\xc8?\x82z\xc4"lv\x9d\x97\x16\xe8\x10u\xf7H\x16\xda\xab\x17\x90\x8e\xf6\xd7}\x15\xc3\xc4Nn\xf2\xbf+\x16V4}\xe9\x1e\x81A\xb4#g\xedg\xab\x07\x07\xe6\xc1\x985:\xfb\xbaO\x97\x02\x93l\x82\xaf\xcc\x1d\xde\x98\x07\xef\xc4*\xb1NUb1\xa8\xf3\x91)\x1c\xach\xd1\x8b\xb5-\xd0L\xfd\xc4\xfc\xc8\xcf\'\xcb[Jz\xc1brh\xfc\xe7\xedT;\'j\xc1\xc5\x7fczWS\xe8\xf7-\xc8f\xba\x80`\xc3\x87\x07&,\\\xbc\xf0\xe0\x8em\x87+^\x9b\xc5.\xfc\xe6\xb2mF\x93t\xc2\x9a\xf2\x1e+\xc1u>\xcb\xcb&\x96\xc8\xb0b\xdfWZ\xb1X`J\x15\xc5\x0fsC\x1d8;a\'\xc6\xfe`\xd9\x8ew\xd8L\x14\x9a\xcbi\xc1 \xed\x8a\xf7\x0b\x16\x04v\xc1\xebq\x8bGl\x0e\xc8yu\xdb\xd5\xc4\x8e\xf1\x10\xef\xec\x84u\xc0z\xc23h\x1a\xcb\xf1\xd5\x04\xf8\xb3\x94\xb5\xa84\x81\xab\xd9\xcfU\x9aP\xda@\x1a<\xcd\xfby{\x17\xafp?,\xb6\x84m\xe4\x87\x86\x074\xd9\xc1\xba\x11\tO\x14\xeau\x82q{\xf2a\x8fy\xf7\\\x0e\xe1\r\xfdp\x99P\xa1\x18;\x0e\x18\xda\x10\x08\xc6\xe8\x86\x9e*\x0e\xf9\xeb\xfckS;6\x7f\xb7\xab&*\xa1(\xf7\r\xe7\xb1\x90r\xf9\x12\x97\xfef\xff\x1a\xd8\xcc}2A\x102\x96gOi\xa5\xc6\xf5\xd5\xd0\xec.\x11\xd0\xe4\x1d\xde\xef\x85\xc5\xeb\x0c%!\xee\xe5\xc1\xc8=\xfb\xa0\r\r\x98\xb1e\xb4\xf8\xd4\xbcE\xa0\xb2w\xdb[\x9b@\xd8\x93C\xfb\x11;]\x82\x1b\xf3\xac\xa1\x88\xf7\xc4\xd5\n\xe9\x0b]ec\x8aS\xb75\x1c4\x9a\x95YTSH\xe2\xa3S3\x8e\x86\x1d\xa1\x8f6[Uh\x9dO\xf4\xc1\xe8\x14d\xe2\xd0\xe2\xbe\x0e\xd32\xe8\xcfP\x00%\xad\xd5M*\x84\'r\x01<\xc4\x99\x021\xab\xf2}\x8d\xc1\xdcLX\x00\x82 l\x81\x90\x8a\xef\x19\xca\xbc(N$\xb5\xa1=\xfbx\xa3\x8c\xa1\xb9^\xc6\x04\xa6\xf83%\xa82]C\x1e\xc1\x9e\xf5\x81\x08\xb1\x0b\x7f\xe5\x107\xb82\x11.\x82\x17\x7f?\xdf\xfd^i$\xc1o\xc8ig\xbe\x0e+Y\xba}l\xf4\xa4\xb2\xdf\xd6\x92\'\xac(\xd3\x14\x10\x06\xear\xfd.\x81\xaf\xfb\xcc\x90\x8e\x15\x9f\x94\xabB0:E\xaa(\xa4\xb1:JX\xb9@XN\x02`d\xe9aMl:4\xa5n\xba\xf9a\xdb^/-(\x93SRA\xd8_\r\x9eQ\\\xdf\xb5\x1d,\xca\x19\xdc\xbe\x06U;\xc7z\xc6\x1f\xa6\xec7\x1c\x14S\xee=\xb6\xe0\x95$B\xffKT\x13\x17\x1e\x9a2\xb5\xedo\x052n\xb4\xd2\x08\x1e\x95aI\xd7\x89_\x9cT\xb7\xdd\xbb\xb4\xa3\x8c\x9f\xafl\x17\x82\xf6`\xc9\x8cU(\xa3C\xef\x92\xdb\x92w\xdd\xf4_\x93b&\x02\xfe\xe168\xec\xd1\x99\xc0s\x84\xd5A\xf8l\xed\x16\x14\xcbP\x88\xa64\xe6\xa3k\nx\xa2\xcd\xd5E\xa5\xd9\xde^"\xf4\xd3\xa1\x84\x96]\x93\x8a\xbe|(\xf4"\x98\xb3E$\x9b\xd7\xb7\x17&o\x1cm \x10\x98\x84]hq\xf3\x8eD2\xffb\xdc\x8c\xbdK\xe7\xcc\x0e\xd0A\xa9~\xe61\xe5\xc3\xb97\xbf\xa6\xf6\x04\x9d\xc1\x80\xb4\x82\x97`\xb4\xd9\x00\x92[\xc0\xefE\xc8\xaa#Ri\x96\xe5W\xa3\xab1\xa1Y\x0c\x12\x9f.\xd9\xf5Y\x10\x81Pp\xa8\x10\x8ahMr\xf7\xeb\xc7j\x7frU\xb13\xee\x83AC\x01\xa4\x9a\xd6=M\xb5\x9e(\xb6\xb1G\xc1\xf9\x05\xa1S\xca\xce\x99\xf0w3K\x96V\xb1i\xd6\x9b\xb6Q.\x98\xdc\x01\x92\xcb^-\xd9\xaf\xba\xf2c4*p\x8a\x05\x90\xe0\xebW3\xe6u\xbcn\x16\x0e\xc2\xc2\xd8#Y\xb6\xa8\x00\x1e\xa2c\xe1\x1fv"\xf8\x83%\xc7\xe0\x02\xf6@\x06\xaf\x85\'\xb5+/\xa5\x04\xa2\x1c\x94\t\x16&|\x98\xb6H\x9e\xa1\x87\xfd|.\xa2\xbb\x12\xa5.?\xb6;\xf8\xf4\xd5?\xf9uA7\xa7\xc2\x8e\x13\xfe\x92Y\xc6\\\xc0\x8d\x85\xdd(\x82$2\xc5Q!g\xa1\xd1\xdb\x03\x16\xf1\xdd#s\x9d\xa5\xb8\x08\xd8\xadR\xc4\x1e\xecJ\x9b\xac?\xee\x87\xcbD\x1f\xe7\x89\xac\xa5o\x83\xe5\x89\xea\xb6_\x87\x98D\xf0I\x8f\xc8\xa15x\x8e\x12\xae,\xef&\xaa\xe3\x9bK)\x8b(|\x0cX\x13\t(\xde\xa4\x886\x1d%\x83\xb08\xde/x\x17\xab\x1b8\xc4\xe5q{\\\xb4cO?7\x92%\xda_\xaf\xed\xf3mO w\x87\x0c\x137\x94g5\xff\xc2l\x0f\xc8\x8bH+\xde\xb0\xd6\xbe\x91$%\xc7\xe1\xfaBZwp\xad\x08\x99e\x8e\xf7\x88\x07\x10*2\x89ULR\xf6H\xbf\xff\xbdR\xa9!\xc0\xbc&\xbb^\xfd\xd5\x0fx]\x1bD\xd2-\xff\xfeW\x01k\xf3\x0fw\xe1if\x80\x14\xa5\xe5\xaeI`\x07wh\xb4\xab\xd6\x16\'\xc4\x18 N?\x03\x02?\xb8\xd0\xfe\x94\xbdL\x98\xdeW\x1bFY\x0e}\x01\x0c\x86\xd8\xdf\x9b\xcf<yoN\xa1\x16\xfd\x83\x84\xbc\xcc\xf2\x9d\xc6\x93\xb6D\x86\xd3\xde7h\x00\x89o\xc1\x99Y8j^l*\xcf\xfa\x1f\xd1\xeb\x19[\xea\xa6\xc6d\x85px\xdd$\x99\x94\xa6\xe6\xfc\xb1\x85ra\x0e\x98\xb5E\xd2\x991?Z\x87\x95G\x84\xc8\xf1<\xd1\xe27\xb3\xf6=\xac\xe3\x13$\x01\t\xee\xd1!/@S\xb6\xe6P\xed\xf7j!*G)zLC.\x18\xdbK\x0bL\xdbC\xc8N\x1f\x06\'\x9f}\xd5\x87\xddw\xb3A\x85\xe82A\xeb\xc5\xbeiq\x8f\xa0\xde\xbd\xb68\x80v\xaf\\\x92\x81fN\r\x01\x0f\x88\x8b\x8dW\xb7#\x81\xfa\xda\xc8@D\x8e\x1fQn~)Tfiz\xdc\x0c\x0c\x01\xcaN\xc1 \xf4\xbcyd\xcb\xe9\x95\xb7\xb4@\xf8J\x0c>+\x04\xf5\x97\xac*\xb6\x8a\x91j\x0b!\xe0\xc6\xb2_\xb0\xde\xec8\xc4/\xdc\xb0mF\x11}S\xa0\xdcZ\xe0m\x0c\xee\x9fR\x86\x02\xdcG\xab\x07}\x0bMy\xc3\xe0.\x8cU\xf8\xf5\xf1\xf9\xff\xc4\x08\xde\xf2l:y\xd1_\xd9\x80\xe566M\xa6|\x00\x16\x16x\xadM\xab<#\x90\xaa\x96\xc9\xf85\xf8\x87-\xabQ\xef\xe0\x18\xe1\xab\xa1\x7fl\xd6A2\xe4\xe3\xc7\xf8j:*\x9c\x88\xe4\xc4\xd7\xa5\xed=\xab>\xb1\xa5\xa5\xb09\r\xf2\x19\xa3\xd2\xaf(\x1e\x8f\xcdW6 \xfa\xb0\x9b\x8e\xf0\xdc\xc4\x89Y\x1a\xc3XSL\x80b\xee\xafkr\x0f\xa2\xdeP\xeb\r\xe9\xa3v\xa4\x96,\xe5\xb5\x05\x88\xe3,\xe6EcMu[\xf4j\xea%\xccCLV\x10\x1cq5Z3\xca\x83\xa8\xec\x01\xbf\x00\xf4(\xe7\xa41\xff\xc3\xf2\xa7Z\xd6\x96\xa92%\xc4W\'+S\x8f)\xae\xcc\x890-"}\xf5L\x02\x90\xca\xd1t#I\x9fFB_\xc3\xb1k\xacI\xf3Q\x04h\x0e\xda\x88\xe5<\x14h\x12\x08#\xe9\xf4\xe1\x7f\x0f\xed#le\xc2\xaf\xa0H\xd6(?&\xe3\xe3\xfe\xc5$A\xa2V/\x17\xdc\xadn[\xa8|\xfa\x15P\x0ef\xd4\x94\xaa\x1fxU\x83Hw\xab\x1f\xc9^\x96\xf4\x91\xd5\x9a=\x89=\xd1[\xd3\xab.\\\xc4\x04\x0e\xf7k\xee\xe9\xe7\xeb\xb1\x0b\xc3\x02\x1a\xc6r\xed\x829\\B\xff\x8azX\x88\xf7\xca\'\x1b\xd3\x15\x02\xfe9i\xce\xdf-&\xb4\x1e\xe7\xe7\x93\x99r\xb9d\x1e\x92\xf2J\x81 27\xf7\x96\xf6!S\xf9@\xc8o[\xbfl\x14\xe4\xa4S\xad\xc4\xd0\x99\xe9\xa0\xdb\'\xe5\x05\xb7\xee8Y\xb1i\xcf\x7fC,\xd6\x04$1\xaf\x88\x9fU\xc2\xdd\x15\x90$\x92\x9c\xd2\x14\xa7\x04\xb9\xde\x87\xd5-\x99 l\xbf\xdbZ8%M^\xd81\x87?\xc40\xd1E\x82|\x81-[H\x17}\xd0\xcf\xf0rf\xd3X+\x85c\xab\x8b\xd5\xad\x8d\xb9\x13[\x1c\xc4\xecD\xc9\xdbG?\xde\xc8\x8d7}\xa4M\x06\xdaS\xb3aF]\xb6\x97 \x15\xa93\x1c\x93\xf5\xd2\x9e\xbf!#\xd4k\x9d\x13q\x91d\xfbl/\xa8n\xa7\x9dxw\x8c\xce\x92\xe2\xab\x02B$\x16\xc1\xc1G\x93\x19~\x15X7\xeeA\xc2\x02\x86\x03\x06}\xe2\xdc\xbe\x18_\xb4\x0f\x11F\xf4=\xa6\x98\xd8\xb5J\x87\xe2O\xbcT\x1d\xaf\x98\xd24\n\x9cY\xd1\x8b8\xa8`\x10\xde\xa1#\x9b\x8dT\xd4>\xa0\x03\x8c\x13\x90A\xd8\xccK!q0\xee\xb7\xdc\x18\xfc\xa2\x85\x1b*>\xd7J\xcf{,2\xb5\x9dd\'\xa5,v\xcb\n\xdb\xf3g\x02\xe3U.\xe6\xfbb<.;\xec\x92S\xb4\xd6\xe5s\xd5m\x97\x17)\x94\xb9\xcc\x82<t\x11\xad\xff!\xe0\xff/\x8c \x86\x8dV>\x1aV\xccec\x15\xc0]`\x1b~/W\xb7t\x19foV\x83<V\x04\xe9l\xf9\xd8\x06D\x8cc-\xfaX\x1f\xbd\'(<\xac^Kb\xd4\\\xfb#@\xd7\xc99\xcdUH[tra\xdb\xc1a\x96@\xb6\xad\xcf\xe5\x9ev\xba\x1e\xd2\xd6\xa5\xe6\x1b\x98t\xa3\x02AvE\xf8/Wf2\\\x1f\xa7P\xbdf\xe9S61\xaa\xb45\xd9\x9bY\xf0\xc7\xb3\xd0\x9e1\xf4\xd3\x92\x00\x19\xc996\x8f\xef\xfd\x13\xc5Z\xaa\xdc\x04\xf7G\xd6\xec\x03\xac\'\x83\xfeQ\xa2D\xd7\xf7d\x16b\xa7\x06J\xefB\x00k\xd6S\xab\xe7\'\xf1(\xbck?e\xc4\x16*\xe0h\xc6V-\\e\x86[\x9b\x8f\xef{N8/\x0eZ\x88\xc2y\x18\x05r\x12R\x85Z\xb5\xd4\x020\t\x07\xf3\xb9\xea\xc1d?\xa2\x83\xad\x9ax\x0b\xd4\xdf\x08i\x12X\xdc53\xdc\x12`E\x83O\x07\xf5o\x95\xc5\x1awHx\xb03O\x86\xb7\xe2{\x8b\xd6\x17\xe4B\xden\xe1\x89V \x1b\x81\xe9\x03}2~\xa4\xa8!]\xac\x1e\xa0\xa8\x1bB\xaeI\xed,\xc7m\x91YF\xeb\x06\xae\xb3:\x12\xca\xa2\x08\x17\xbbJ\xfb%\x93\xdb\xa5 \xf1uX\xf2`\xfb\xf3(\x82\x95\xe0?F\xa5\xb8U.GG"\xb2\xa6.\xfe\x1e\x08\xe4\xf1\x0e\xde1\xbd\xac!=\r\'t"\xb0\xcer\x88\x1bu\xae^|8\x869\x0b,\x93\x847\x029\xc1.d\xaaK\x1d|wR\xf5ud\xfaH/\xf6\x08\xc9l\xdd5(\xfb\xf5z\x87\xc4"\xfa\x0f\xbb\x85\xd5*\xee\xdd\xb5U`\x7f:\xec\x04\xc6\xd7\x18\xad\xf4~\x9f\x0c\x99\x0b\x88\xc6e\xf1\x15\xaf\x86\xd9\xcf\xbf\xf6\xb7"z\xfb\xc1\x16\xb9+\x16M\xe2}\x1dp\x8c\xe8\x8c]\x95b\x0b\xac\xa2\xbb\xaen\xc7Y\x93\xcf\\\x877\x9f\xbe\xefj\xb8i\x97~A\xf7X\x94\x12U\xdd\xd0\x0f\xbd\x10AL\x99k\xba\xf7w\xcdht\x8c\x86\xc8_\x82\xbdK\x98\xe9o\xfczt:\x90\xab\x13@D\xed\xd7J\r9f\xfe\xe9Zj\xcb\x8a\x02\x02\xe4QO)B\x85\x8a\xfd\xff\xb0U\xdd\xa5E\x95\x9c&\x84\xb7\xc3\x94\xb3\xb2\x90!a\x84\x80RB\x8b\xf9\x0fe23d+\xd2\xeaO\xa3T\xc2\xbafF\xee|1/EKC\xd1,\x95\x97\xc2Nes\x9f,hm\xba\x0f\xdb&Te:\x941\x94x\xb0u\x8e\x1ca\x07\x979\x9e\xb4D\xd7+\xd7\'\x14\xde\xed\xed\x1d(\x82s\xf1\xc2\xb8\xeds\x81B\x83\xf5A\xde\xd7O\xd2\x959\xb3\xd0\xad\xc3\xcc\\,E\xe9\x8f\xcdI\x96}(\x8e\'/\x8e \xb5H\xbfb<5 (*\r\xe9;\xaf\xd9A\xb3\n\xa4\x14\xd0#\xf1\xffF\xe0\xadt\xbfH\xba\xbb4\x90>\x9d\x9d3\xe3\xd7f\x10\xda\xb8\xe7_\xb8n\xaa\x9d^\xc2\x80\xc2\xf2\x1e\x10\xb8@\r\x9cMK:,I\xe6w7\xc9d\xc9\xec\xc18?-\x8c\x8e\xf7O\xcc\x82\xf0>\xe5\xeb/\x16\xafp\x9b\xe0#\x0b\xea\xa4\xff9\x8cG&8re\x8a\xd3\xd4;\x94\xc0}\xdb9\xa6\x02BUB\xf2J\xbd\xea\x08\xdd\xf4\r\xae\x9e\xde?R\t\r\x93\x12K\x92\r\x90\xfezt\x8d\xd4\x11\xeb"\xd3\xd7\x96\xb2wa\xfa\xcc\x0c\x91\xcb~\xd1\x94=\x98?8\x84\x80\xfd\x1a\x02\x8f\x03\xb6\xd4\xbb\x93\x9b\xeav\xe28\x93\x15\xecD\xb3\x0c\xf39\x89"F\xfe\xf07r\xcd\xf8\x1b\xbcl\xf3?b\x87\xcf1Db\x92\x12&\x97&X\x9c\x8a\x99\xae\x00\xef\xabG&\xec\x95\xbalc=\x90^3\x87v\xb2\xbf\xcaHI\x94\xc6\xe0\x05\xba\xd1N\xb8g\xa81\x08\x19j\x90d\xf2\x86\xc3w\xa0\x12`r8\x8e\x81\xf5\xb3\xe6\xaf\x06\xba[\x81b\xdc"{%}\x05N\x98\x06\x0f\xc1gHC\xc5\xd1Lp*\xe5Z\xcdX\xb9\x1a3\t\xa3\xc3\xef\xcc\xc8\x9a\xf3|o\x06\'\x14\xe8AvA\xf8\xfc\x91\xf2\xbc\xf2\xa3\x1cLj\xb1R\xd0\x8e\x81\\UtLS\xf0\xbd\xe6\xad\xf2\xc9heV\xcf\x1e\x99@v\x9c\x85\x9d*>v\xdd\x0f\xb3\x89\x9c@\x99\xbf\x1a\xb1\x808DC\x8f\x13\xad\x99\x08\n3\x17\x1a\xb8m\xc0OE\xaeZA\x81Rr\x11`\x7fI\x98\x16|\xc4\x02k\x8e\x19\xdd\x83mB\x10\x99\x8cW\xb7\x8dR\xac\x1b#ie\xcf\x12\xfb\xd8\x85\r!\xa5(U\xa4\xa0\xb4\xcb\xb54\xfd\xa0\xe06\x02+\x11sV\xcb\xf6\n\x8do\x98\x8b\xe9\xc1\x0egWj\x1b\xa2N\xd6\xa06\xecf\xe4+\xd3\xbf\xa8\xc6G\x1f\x14\xde\xf9\xbb\x96\xcd@r;q^9\xc394\xa7\xd2\xf9\xbbI\x9f\x83\x0ei;\xc2\xe9Q\x83![P\xd0[\x8el\r\xe5\xe7\xb0\xe1\xcc(\x93\xfc\x8b^\x91\x1c\xb2\xd5\xc7\xbfU\xf7a\\X~U \\*Q\x80\x8c`\xea9)9m\xden\x81\xe0M~I\xf2\xe6\xc7\xf8g|\xfa\x83\x9cl\xa4\xec:<\xa9\xd4\x94h\x89\xd5\tD\xec\xa9\x19\x7f\x82\xec@%\xdb\x90\xadU\x93\xa6xm3;\xa2T\x80R\xba`\xc68P\x8d\xc6\x8a\x99\x1b\xdf\xa1\x9d\xc1\xd1\x0e\xd9\xb9\x80M\xd0\xbf\xdfwe\xd2\xbd\x13EF\\\x94\xac\x00\x87\xcd2\x85\xf6\x8c\x0f\xa5\xb4aV\xda\x9a\xd0\x84\x07\xf3\x93r)qu<V=\xd8\x9d\r\x15\xeb4k\x97\xe6\x97\xba\xf8\rM\xd1\xbb}K\x10\xd8\xb2\xdb\xb3\x97\xd2\xa5\xeb\x9d\xcb/\xeb\x96\xdc\x10\x89\xc6\xf2\x85~\x8c\x82}l\x11\xb2\xd9j\xc8+|R\xb3YU\x80\xb12\xac\xb3\x1b\xa37\xde\xaf\xd4H\xf3\xd7"AN\x1b\x8fl7\x1b\x9ef\xa0\xda\x13\xd1%o\xde\x8f\xedx\xc8\xd0\x1czA\xf3\x0e\xf3\xde\x91d\x87\xe2q\xd6\xc1\xfd\xcb\x0f\x8a\x0bIy\xc3lULf\xa2\xab\xe8\xab#\x8a\x91ZY:\x12\xffK3\xafe\xd2=\x01\xf6\xedT\x97\xe2;pD5\x93\x8d\xceL,\xd9k\x81Y\x00rD\xd51`\xf56\x8f\xba\x8f\xdb\x116\xe9L\xe7B\xec\x16\xe7)\xd72b\xb1R\\\xcc|!y\x14sc\xe8\xda\x12\xdb\x0f\xa7\xe4\xb7\xc7i\x9d \xf2V\xb1X\x13\xdb\xd5\nCS\xf6\xd7\xbe\xf5\x13\x0cIA\x8b\r\xd8\x7f`Z\xc5\x1e\x1b/o\xd6\xf5\xccl\xd7D\x0b\x95\xc8s\x95\re\r\x91\x81d\xb3\xd5\xf1]p["\x81/\x96\xa5\x0c\xd9\r\x19\x01\xa0"\xd5\xd6\x0c\xaf.\xcc\xb7s\xe0\x02\x9e\xd9m\xc0,\x03l\'\x8a\xaf\x04\xc6\xf5\xc1zi\x8fL\xfc\x08\x87jtd/\xe7*\x8d\x94\xe6\x18X\xe6)\xc75I H\x8e-ju\x98\xd7\x88\xd1\x92O\xb7V\x98\xc4\x9c08\xf3\x07\xff\x02\xd9R\xec_\x92\x17\x03\x0b\xdaJ]\xb5\xcb\xc1\x9aNo\xb6\xcc\xcb\xb4y=f\xb1>\xa3\xe1\xd4@\xd8\xe9\xde\xe8N\xde!\x02\x90)\x84\xa1Y.wQ)k`9Aj,\xa3\xb1\xe3+H"\xc9eL$\x0f`\xc0\xaf\xf6-\xe4l\xeaW\x1fM\xb2|\xf9\xc5\xd6\xa1?\xe6\xc3!\xc2@"P\xaa\xac\xdc\x95\x07;\x06K5\x19\x02\x95\xfa\xce\xff\xa0\xcd\x08\x96p\x1eJ\xd3\xa0\xd6\x84\x19,\x9e\x9e\xf0\xcd\x89\x8c\x01Vv\xf0\x84\xc7\xe3&4\x1dw"\xb1_\xf0d\x93p\xe7\x8b\xf9f\x16\x9aA-\xa3\x99\x0c\xf5\x85~X\x0c\x83\xe1r]\xb9\n\x02\x81h\xab\xda?\x15\x12\x90s\xbe\xcf\xac\x94%U\x1e\x8f6\xb4\xb6\\:\xfd\x9f\xad\xc3\xea\xf2<\x92\xd6PN\x92K\x0c\xf1<\xb1\xe8\x9c\xc7)8\x9e\x9d\x12X`\\\xa4\xbdM6\xd5*\xc1\xa9l\xeb\x8f\x9c\xe7D(\xf1\xc0\x96\x8e\xa3~q\xc1\x02\x85R\xff\xec\xadL\x1bU1|\x8a\x89\xfeapM\xd4v\xb4R5\xfc\x8b\t\x90M\xeb\x05c\x1cb\xdd\t[c\xcfbi1p\x1d\xe5\xe8\xac\x8d\x16n\x94\xc1\xa0\xa7!T\x9d\xaa\xee~l\x98\x8fa\xe7bg\xb8\x05\'\x10\xc5\xb2cE\x1f\xed\x0c\xafl\xc8\xbb\xd3\x8b\xbd6Q,B$\xdc\xed$N\xa8\xe8\xf6>\xc8\xe9\xc0\xb0f\x84\x06\xde\x99\xd3\xe5)<\x85m\xa1\xcd\xeeU\xa0O\x1a\xc0\x0fs\x91!\x89\xba\xee\x05\xea\x87\xe9\x17l\x19\xf0\xb4\x84\xb3\xe9\xd6\xcf\x15\xbfs\xc7\x8fo\xc6\x15`\x15\n\x90[A\xd1m\xfaA\n\xe2\xcb\x8b\x0bYIfK\xaaO\x11r\xcd\xcf \x9a\xc9\x94\xc6<\xc1cp\xe2}!\xfdR\x8d\xc5E\xf7-\xc2\xec\xbb\xb5)\xb3\xe4\xc8b\xa2K\xd1\xa1=\x91\xbdU\xb1\xc9J\x01\xbau\x88&r_\xfe\x13\x84\xa6\x99\x82\xcd\xffm\xd1?\xf5\x83pT\xfb\xdf3\xb3f\xcc\x9fAPtb\x8a\xba|kfC\xb0Zfr\xcb\xe92\xe1s\x93l6\xdb\t\xcb\xe0!A\xed\xe7\x1b\xb3V\xa1\xe2\x1e\xe6\xf3\xe1\x0b\xd4\xdc"\xb4\x9cD\x9b\xbb$\xece\xab;\xf8 \xc9|c\xe6\x95|h"`\x07\xf5\xcc\xda\xab\xd7\xfe).\xf4\x03\xf6\xb48k-\xe0\xafs\xc1m\x1ce\x8ed\xad\xc4\x89\x1b\xe4\xaa#\xc9\xe0\x1c\xa5\xe4\x8c\x03\xf7Z\x99,\xf2\xbbWL\xab9\x07\xbc9\x82\xe8\xbe\xc3\xdc\x8d\xe8\xdb\xeb\xf8\xc0\xb0\x0c\xda\x14(=Kb\xf1\x9f\x99\xeb\xea\xb6NO\xde+\xd4\x0b\x9d~M0\x9a\xf7\x18\x1e\xf6\x80vYrO\xb6\xae\xff\x0f\xd20/\xe8\x942\xb6\x8ef*\x0ci\xf4B\xe5\xc8\x8e\x13\x89\xce\xeb\xce\x8e\xac\xddC\xf3o\xb4\xee\xdat\x1e\x81\x13\xa1\xad\xc8w\x07\x7f\xde\xc1\x8ai\xa6&kz\x91)q\xd9O\xefk{\x88\xd0\xc8\x91K\r\xb5\xffA\xf6\x06\x9a\xe5\x9f\xca=rZ#;x\xd9t]\x9a\x15\xe8\xa0M\xce}\x12H\x14\x18\xb0\xc5J0\xa6\xf6\x86\xaaV\x93L\x86S\x9c\x88\xd6\xa6\x12",I\xf7\xb0:\x06\xdc\xcai<\xf0\xdca\xf9\xf2\xeb\x96Y|\x17\xae,\x9b\x80\xb0\x1e\xc1\x0e\xff\x90\xd5\xbc{\xa8\xe8\x93\x8e\xc8\x19\xa0\xa5\xc0r\xab\xd9\x1f\x9b-i\xbb\xf5h\xd5\xecA\xd8\xec\xbe4\x99\xe7\x10K\xc1\x7f\r\xa62\x04\xd1\xcfOB\xd4\xc4\x1f\xf5\xd1\x17S\xbc0\xd0\xa9#$l*\xb0\xa3G\xebA\xd6\xec\xd7\x9f\x1d\x00\xcb\xc4\xbbKK\xc3Y\x1a\xeb\xcb\xfc\xc9\xd8\xcd\r\x97\xa3\x12\xba\xdd\n\x1f\xc6\x08\x83\xc0v\xe3\x0f\xe4)\x93\xfe&\xb6\x8b^|\xd6\xbew\x84\x17\xa5\t|\tM\xef\x07}\x94\x82\xea\x89\xe1\xa22hl\x92\xfd\xa7\x93g\xff\xe3\xba\xba\xea\xc1\xa9\x00\xabd\x17\xa4ez\xa2\xa0\x9b\x9e\xc2>\xa2\x0b\x91c\x88\xba\x90\x8c\xfc\x87\xdfl/]c,\xe6\x93}\x13\xae\xa0\xf8h\xaa\x14H\xcaH\x91\xf2_\x08\x17\xb0\x9b\x01\x06\rT\xbe2\xef\xd9\xb7\x9c\x96\x1a\xee\xd1\x1b\x98\xc3,\xd2G\x1c\xb1\x14\xd9\xf6\xd3\xf2\x0c\xc4\xb5*\x96v\xfa7\x04\xf2\x16?\x14\xb8W\x9b\xae\x90\nV\xfe\xd9\x14\x06\xa9$\xa8\x89\x86\x84\'\xe4\xcan\x9d\x9c\\\xf4x\xc9\xbe\xcc\x89\x02`\x85\x90\x96\x02\xdaA\xe8^\xef3\xcc\xc2\xdb\x03\x06c\x959\x11\xd4\xd7\x1c\xe8\xd2,\x1c#)h\xa3\xbd\x1dB\x02c9:D\xc2\x83]9I\x18\r\x9a\xb4\xcc\xa5Sf\x98\\\xfd\xd9\x14\xdf\xe6\x98`\xad\xbe\x9a\xc2q\xd8\xb8\xf1\xacN\xa6\xe4U\x10h\x94 \x82\x86[_\x12[S\x9aRz\x98\xaf\xa6c\xecU\xc0B\x87\x0b\x80s\xfd\x97q\x8b\x97mf\xc5\x88\x85\x0c\x0e3H\x98\\y\xd9B\xb8\xdez\xee\x8b\xe9\xda\xc0c\xec\x8f\xd9R\xf4\xb3\xae\xbc\x1b{l\xc6\x98\xed^\xf5\xa7\xa7:\xff\x8d{\xe4\xef\xdf\x91\xf3\xeb1@o[V\xc4D\xc6>\xbfB9\xdc\x18L\x8f\xc1\xaf\xc4\x8c\x8a\x8ey\xd6\xce\xb0\x05\x97l\n/^\xe4/\x91v\xd4\xf7\x86m\x89[\x8cGc2#o\xea2\xf4\xc9u^<\xaaVKWg\xbf\xe6\x7f`\x0e\x8d\x8f\x9e|[\xe4\xd4\x88\x16\\e\xd47}\x07V;\x9a\x05}\xf7\xe4\xe7+\xb3\x14A\xa8<\xb4Q9\xa9?\x03\x8a\xc4\x154\x97\xb1]%\x1da\xfb\x89\xf0\xba\x02\xe9\xb0\xb5\xea\xd3\xa5\x91q\r`2\xbd\x9f\nC"\xd9\xe5\x12\xcf\xbb\xc8\xb2\xc3\x998\xa6\x8d \xea29\xbfo\xc17Az\xd2\x98\x9f\xfcZ/\x7fC\xda\xd0tD\xb0\x15N:?M,\xf4\xf4\xc9\xd5\xa4lP\x83\xe7\n\xbf\x8c\xc8\xa4\x17\x88\x95\xba\x91PA\xd2 \x96\x1c>PN\x86\x82\xa1\xef\xe1\xf4\xecf\xc5$\x88\xc9S\x81\x89"\xa4\xd5Ek2\xb4\x9cn\x8d\xa3\xb1\x186\x8b\xa2\xc3\xc9\x8e\x99\x818d\xa8\x84j\x0co\xdc\x8a\xca\x11\x9d\x9e\x10\xca\x8d\xe9\xef\xd4RU\x1a\xeb\xf0\'\x84,\xff=oF\x94\xc9o,\x9f\x13\xd0\xcd\xe1aa\x84:\xd5tz2"K\x16*\x8d\xf6\xed\xff\xa5\x00\x1a\x0f\x10\xa8h\xae\xb7V\xed\xb35\xc3\xd0\xcc\xba`\x9e\xd9\xd3\x12\xe1wO\x8f+\x8d\x01\xb5\xff\xd2\xc0\x88S\xca\nhv\xb3a\xda_\x07J\r\xd9\xc2\x8cf["ZNAW\xdb\xee\xaf\tr\n\xd6\xbf\\"\xe5\xb9\xe8\xf3\x9e\x05K7\x98\xed1\x93ZW|\x15L\x99\xd6\x97=\r\x95Y\xe4\x08\x04\xe9\xd4B4o\xe6\xd2\xb0\xc3\xd0\x8cYO\xcff\x9cJ\xaaCt4\x8e\x8c+\xf9p*\x99)\xb2M\x8bEH\x17\xe0~u\xb6\x8dl\xd9\xcc\xff\xc3\xa6\x9fTjj\xc3\xaf/\xf1\xc8h\xcbttu\x1e\x804~\xc3\x8e\xcb\xe2-\xe4<\x7f!vaK\x94jr\xcb\xcd\xc2\xd0\xc5#_\x14N"\xc2\xf2\x98e\xc7qb\xc4\xea\x05\xfc\x91> |\xcf\t\xaf/\xfay5m\xbaM@\x9f\xa2th\x91&9\xc9\x0e\xd9\x0c\x8e\tb\x95^\x97\xe9\x02\xacsR\xab\xe0Q\xba\xfb\x16kX\x87\xda\x9c\x19\x9c~^&\xa7\xda\x14\x1f\x81}\xd5o\xb5z\xe19\xdc\x7f\xfbb\xfc {\xa8\xb4\x1c\xba\x0ep\xa8QZ\xc3\xcc\x9f0\xff\xa4\xc9c\x03\x8c\x83`\x13\tv\x9c\x00;m\xb2\xd7p\x94N~\xf6ci\xc8\x1e\xbdV\xad\x95\xdaG4\x1f\xbfa\x9a\xd9\xde\xe6#\xfb\x8b\xf7\xa1`\x11\xfa(H\xe7(\xd9NtT\xdd\x8e\x10\t\x00\x83x\xe9J\x81Y\x11\x1eIb?C\x83\xca\xdc3\x0b\xe1\xb1\xf8,\x05\xdb\xa2/~^?\x01\xd7\x85\x87pt\xaf\x05\xff\xb4b\nz\xf15K7$D\x02e)\\+t\xd1w\x9d\xbeW\xc7b"\x04\xa8r\xf4\xd8v\xe3\x1e\xfd\x87\xfeP\x0e\x8ae\x940nb\xcd\xcc\xdc\xbd\x7fkwpp\xde<t,\xb0`\x81\x19]\x8d\x9c\xb4\xcd^\xc1<\xba\x1f\x08\xf08\xe3\'\xe1\xd0\x7f1\xceB\x15\xd1\x1bL\xf6c\xce\x85\xe40\xf4n?Wj)\xb4\x874c\xd2\x03\x0f\xcd\x9d\xe16>\x82\xdc\xb1\x97\x81\x87\xf9e\x1a\xe3AQ\xef0\x15\xac\t \n\x005\xe9\xcc\xf0\xe2D\xc5I\x99}b;T\xaabB\xce\x85SJ\x06\x00\x97\x90GP\x9d\\\x99\xa2\xc2\x95\xb1G \xbaz|\x96\x1b\xfe[F\xa8\xf9r\xa9X\xc1F\x85\xb4\x99\xa8\xec\x8c\xb5"\x1c\x85\xb7-\x85|\xfa|\xf3\xca\xfc3{\xc0\xe2J\xb3~\x04\x07e\x10\xb2n\xf9P\xd9w\xc7\x90w\x7fX\xee\x03q#h\xc4\x14\x05]\xb0\xc9\x1e\xdd\xbc\xbe\xb6\x97\x80\x00\xb2\xb0\x19}\xc7\xf4b\xc6\xef8\x84\xc9\xcb\x0f4Z\xcf\xc1\xdb\xe3<?\xda\xe3\x1e<CW\x18{\xdex<\n\x8b\x89\x07\xf7a\xeey\xb2\x96\x9f~\xb2\x8f\xe93\xe1\x0e \xde\xf4\x8dm9\x9bA `\x18\xf6\xd0\xba\xdd\x7f\x89H\x7f($[Q\x99Bl\x9e\x1d\xfd\xe6q(P\xd0\xe0b\x7f\t\x8b\x82\xe8\t)y\x80aSFG\xb2e\x0ey\xb2\xaa\xab47\xcf\xcc\xd2I\x8f\xa9\x9e\x99\xe1\'`\xc9\xd3\xe9\x8c\xb1\xa4t\x14uwN\r\xf5\xa5\xea]%\xceD#\xbd\x96Xt_\x1dLM\xb18v\x92\xc9OS/\xc1\x85\'\xdf\xe5\xb8j\xe6\x8b\xe2\xafpt3\xad\x81\xabn\xb9\xf7\xad\xaa\xcfe\x88\x1e\x01"[%H\xf8\xf9\xfd^b_\x99M"\x9d\t\x0e\t\xfd=e\xb6k\xf1\x1e=K7\x01\x89\r\x7f\xc3M\xed\x82\xa2\xd3\xaa\x14m\x87\x7f\x01\xd3\xc6\x802\xcaG\xa2\xf0\x8fE\xee\x15\xf3\xb3$T\xe2q\xbb\x87G\xe7\xf6j\xfc\x13\x7fW\xeeQ\xbfo\x95\xb2%\xc2=\xb1w\xfe\xe1\x92\xe1Gl\n\xe0\xac\xc4\x88\x03\x86\x90\xf3\xc4\x00\xf0\xb3\xf7\xf6\xecl\xf4jf\x8f\xe5K2{m\xce$+6\xc1\x18\xe0\xf9:\xa5\xf9\'RxS\xfd\xda\xcf\xff\xe5\xdfpI\xf2,\x18\x8f.\x98&3)\t\xc2\xa3-\xb3Ya\x8dY\xd5\xa4\x8eD[\x9f`v\x8e\x97\xcd\xfd\x95\x98\x92C\xb9\x81Av\xedss\x02\xfc(Q\xb1?\x1c\xd1\x1c\x98\xa1f\x8d\x076\xb5\xc5\x8c\x11G\x87\x14\xdd\xa0\x9c\x82\xaax\'\x98\x97\xe1\xa1?\xad\xaa\xb5\xe3\x93\xea\xf6\x98\xb0\xd6\x03\x9f\xc2i\x19\xa4\xfe\xa0\x16\xe0\x04)\xd9!\t\xca\x9d\xban\x88\x99\x08\xa8\xe7\xaeX\xd7\x0c\xfeA\xee\x08*g\xfc\xb2\xba\x1d\xe3\xca4\xcfo[\xe0b\xf6\xf8\x95\xbc\x7f\x8e0\xe0\xf6\xb4\xde\x96s\xe3\xcc\x8e!\x07UW\xeb\x831H\xbd\x93\xa2\x16\xc9\xb5.Y|2\xe7\xc2c\x0e\xa2\xd0\xfc+Y]\xfe\xeb\xabV\xa6\xc2+\xa2i\x8d&\x0fN\x04\x90}\x9eg\xc3\x9c\x18\xe6\x9ev\x84jq_\xfaQ\x80\xde\x8fY$\x902\x12kd\x13\xdbr\xcc\xaeX\xa0i\x89\xd4\x8c\xf6\xbd-%\xe7nF\xe6\x93C\xe9?\x8ds&`\xa9UL]\\\xdf9T^\xe2\x17.Y9\xc0\x19\x85<\xaf\r\x91K\xf4Y\x18#\xee\t\xbdg\xb7\xb5\xc0\xc9VD\x15s\xf0\xd2\xc2\xc2\xf6\'`;89\xf8kB\x84\x05\xb7\x91\xd1\xc4\x844\xc09\x91>`?\x97\xd8\x1b\x04\x93d\xc7\xfbs@\xca\xe4\x89\xa2\xbe\x9e\xc9\xcf\x10\xc4\xd9B\xb5\xc2rB\xbdb\x8dJ\xb4\xf7\xafL\x06`\x12[@\xaeL\xc1\x83\xea\x81;\x9f\xaez6\xda\xddC[\x9dZ^\xd2\xc6\xa9>5\xd9%\xdfD]\'\x03\xabW-1p-\xec\x93eN\x9c\xfe\xd1SS\x9d\x00\x9d\r\xb5\xac\x1b\xe3GT\xdcI\xc2\xf1\xe6Qj?a\x8fk\xd3\xe7\x86!}\x17\xa6 \x1c\xdf\x02](\x14*\x86<\x8f\x10\x0596(\t\x94m\xad\xa6\x98\xef\x01\xdd\xac\xf5\x80%\'\x07\xe6\xafV \xae\'\xb3\xf6CN>j\xcd\x97\xd7b\x14\xe2\xfc^4\xf9\xb89\x00\xdd\xd1\xe7\x91M\xb3\xa9}_\x8eG\xb4@\x82\x9a*\xafMy\x91\xbe\xff"\x00&S\x08\xda\xdd?9B8\x88\xa9\xccm9\xb3c\x1bJ\xc0\xc7\xcd\xadL&&\x1e\xd0gW_\xaf\x98\x164u\x8f]\x98sj\xd0\x81C\x8a!\'\xba\xd5\x01\xd0\x01\x7f\xf1\xe7p\xcc\x9e\xd2\x10\x16@\xce\x98\x01\xe0\xf5|A,SG2\x1d\xf26V~\x98\x98O\x99\x19a\x0e\x8e\x9dD\xed\xbf\xf5o-;\xb7J\xf2\x8a=e\xd3\x13\n\xb1\xa6\xd9\x99v#\x11\xba\xdc\x07\x0c\n\xec(\x00LC\xda\xe5\xc9%<<\xfb\x93\x9f-s\xf0\xe4\x87\r{-\xb2)\x10\xc2\xb0X\x15\xde}\xa8\xa0\xac3\xa5\xb3\xcek\x0c\\,\x03\xe6v\x82v\xcaP\xea\x13\x81\xa6{\x8b\xd2\x13\x0f\xbb\xbb]i&\x0b\xd4h\xdd.=Hf\x8f\xf7u\x97\xc4\x14\x98\x8d\x98\x91\x80\x90\xb6\xb7\xa1\xec\xd3?\xdf\xbb\xd7\x0f\xf9\xafaHjL\xaa\xe0\xb8\x03;\xdeP\xa0edqM\x1d\xcd\xd8g\xbb\xa8\xb3"\xfd\xbf\xea|-K\xde\x97\xd2Z\xc9p\xdb\x81\x14\xc5v)\xf2\x81\xdf\xc2\x9c\xc5\'\x91\xd9\x0eV\t3>\xea\xf1\xbc \x17\xd4h]yj\xebB\xd6\xbe\x12\xe8\x82Q\xafm\xad\xef\xeb\xe9\x88\x8b{\xf8\xa6?\x0c\x88\x05\xf6\xe8;)\xcd\x18\xcc\x1c\x7f\xb2K3\xeb@\xd4=\xd0 \xc0\xdd!Z\xbf0e\xa8\x16&\xca&K\x8c;\xe1\t$\xd81,\n\xcd\x14b@BgDKP\xb2\x83Z\xd4\xe9\xd7O\n>\xcb\xcf\xd5\xed\x84\x996]a\xbe\x9c\xc2\xaa\x84G{\xf7H\xa8b)\x011\x7f2\x1d^g\x8a\x17HbXI\xde\xc2M \x8f${\xd3\xe8\x0b\xfbO,\xff\xee\x8f\x1bu\xe1\xd1&Bx\x1c\xe9Gfk\xf8N\xef\x05\xf9j\xef\xfe\xb2\xf0\xaa\x15\xc0X\x14$\xa0\xd4k;f?\xec038\xe8\x8f\xa6\xba\xc4\x8c\x1b\x1e\x96\x12PR@\xc3\xe0<h\xd0\xe1i\x15}uK\xbcDd\xdd[\x8b\x02\x80\xf7@\x18\xf7u\x8e\x1e;\x91@\xbc\xe8\x9a\xc5\xbe\xc0\xc1\xa5x\x83\x10-\xf9\xb4nk\xc4!\x1e`\xaf\xb0x\x0b\x9b\x19a=|\xfd\xd1\x94\xc1\x95\x98\xec\x94.\x9bX\x97\xfe?\x12;\xd1\x84\xed\x0c\xee?6\xe7\xe6\xda\x89\xb1\xcd~\xcb\x10z\xbbf\xe5\x9d\xd8\xd2\x84q\x01\xd0\xb7\xb9 Z\xfaT2\xbf!*\xd4s\xbfu\xb5xW\x16\x15\xd7s?\x86\x85(\x10\x9f\xa4\xdc\xfd\xa7\xe4\xa68Y\x9bU\xf8\x85N\xf0\xa2\xd4&\x95\x86\xd7\xb1\xf6\xf7}\x19\x9b\x8eq\xa8\xd4\xb3\x94\n\xc1z+\xe6\xb0\xf2H=x8\xe8T-&!\x7f\x80\x06\x03\x17}"I\xfc\xd44\xc4X\x07\xd8\xc9\xc4BU\xa2\xddPl\xf0\xfcC\xf6M\x82\xfa\xe6\xe2\xa5>"\x1a\x7fd_!\xaf8\xa9n\x8f\xf6h\xb0\x92\xca\x83\xec\xe8w\x91\xb0\xd9\xb7\xc9,B\x05(>uTi\xf21d\xe1Wk\xaa\xd4y\xe5\xf7\xecyr\x16yr\n\x1f\xd1\xf3G\x16.96P\n\xa4\xae1\x07\x9d\x9coon\xd3aX@\x17\xbe\xd5x\xb5\x8eXN\xbd\xaeSz\xd8,\x01\x978\x08\x96\xf7w\xec9l0\xd6\xac\xdd\xa5S\x8d\x82!\r\xf9\xc1\xecQ\xc1\x16\xc4\x07\xfb/\x10\x8a\x90\xc0\xc2\xd2\xf7i\xa5\xb3\xa4\xbf\x05\xca;3\xe1\xe4\xcd\xec;\x83\x00\x99E\x07\xe6\xb4CL\x9ct\x12\x83~Ni\x91\xfeK\x10\x18\x86\xe2\x959\x97\xd9[\xa0\xc7\xceZ\x14r\xca\xf0\xcdW\xdc\x10\x0e\x0e\x83\xbf!nL\xa5Z\xa1\xa6d\xf5s\x95X\x1b\x88\xed\r\xe8\xd5j\x8b\x8f8\xd6\xe3\xf6\x84\x02.\xec\xf4\xe2\xc9K\xdb\x8bA(?w\xcf6\xb5\xcd\xb6w-K,\xe3\x9d\x1d\x18.\xf4t\x86\xd9\xc8{\xdc\x8e\xc6|\xc1\x14\x91I\xa9(\x8bu!>b(\xfb\xccD\xa8\xae*\x9c\xb5\xcd\xd3\x14r\xbd\xbbWP\x9a3\xd9+l\x93C!3\x8am\x06\xcf\xf6\x01OH\xd0\x14\xfa\xe7\x17^\xdc\xb3<\x94p8\x0f\x1fR\xfd\x95\x1eR\xacC\xde\xb4@\xd2\xd7>\'\xca\xf9\x1a&\xa9\x13G\xa6\xf4\'\xf2\xfa\xbaS\xdd\n\xba\t\xcc\xf6\x95\xf1~\xa5s\xa0P0\xcc\xdfQp*\xd6i\xb6>c>H0\xfbc^\x82\x02\xba\xa0\xcb\x9e\x99\xf7P\xc3\xc7\xbe$=;\\\xd8Y2\x014)\xbb\xaan[\xa9y>l\\\xe9\xd8_\xdb\n\x1e)\xeb\xde\xb0\x9b\x04Z\xe8\x7f\xbe5\xcfL\x82\x93\xc4\x9d,&t\x9f\x86\x1e\xe9?\xda\xea\xcav\x7f}\xd5\x84\xd6\xb3\xa0\x8c\xe1\x8f\xcc\x95"Nt\xe5\x91\xd0\x81E\x0fay\x0e\x03~\xf6Q\x80y}\x89b\x10\xda=o\xd1\xc0\xda\xecy\x83\xa0\x9d\xb1Z<\xac\xce\xf5\xfe\xccj\xa2qN\xd0H\x1f\xa9\to\xaa\xd9\x01\x93\xf6\xf5G?\x82N\xa5Z}H\xfdD\xfd~\x97Jp\xe3\xb8$\xb1\xa5\x85\xde-\xe0\xd9\xc1\xf4\xf6"\xb0\x06\x02>\x8a\x1etg2\xaa\nM\x9f\x1d3"f\xf8\xf0Q\xde(\xb7\r&f\xcb\xe9}\xb5\xa1\xe6e\x04\x17\r\xeaoH\xee\x1a\x85\xa5\x1b\xff\xf5\xc5\x0c\x90w\x97\xa6Fdy\xa9\xb0Xt_LA\xfa\xb0\x86,\xde\x1a\x83([!\xc8=D_\x96\xdb\xa6@l1\xc8lS9\x0b\xa4\xb8\xb1\x84\xa6\x8e\xef\xff\x82Q\'\xadF\xe5W\xa6\x16X\x8f\x18\xf1.\xfb\xd9\xcc\'[\xf9\x93\xf7\xc0\\G#\x9bc\rOM\xec\xd9\x81\xd9\xd7\xa6\xc9\xf8l\xef\xa9\xb0hL\xc2\x03sF\x03\xd3yui\xb6\x88\xa6\xba}\xcapl\xbb\xbam)\xe1\xf8\x19\xd1\xfdlxfU\x1d>\xb7Gn\x06:s&\x10.\xa3\xeb\x01\'\x1bs0\xea\xfce\xb5?\x7f&D\xb41\x10\x80\x19@\xb9a\x9an\xbc\xcb-\xach3\xba\xa5D-\xb4 \x82\xcd\xa9\x02\xa9\xca\xf0\x8e=\x08g\xc5 \xe4\xcae\xfa\x18\xf8\x08\x88c\x8cZ?\xfb\xb1\xfa\xda\xf6\x9c\xa0Dj;f\xf4\x8c?P\xca\x0f\xc2,\xd1\xe7I\xf6\x19a\x07\x92\xb6\xe1g\x91\x03\xb6\xb5\xd82D\xb32\xdb}R\xaf\xd3g\xbf\xa6\xa7\x87\x141\x88#eGf|5$\x88Yj.\x89\xec\xec\x95\x1c\x0b\xae]\xb1\xf9\xc1\xe4\x81\x1dv\x19N\x8ef\x0e\xd0\x1dN\xc1\xa3\xcc\x98\x1f\xe7\x00\x0b\x05.\x81x(A\x0e\x00\xb7\x9b\xact\x1a \x14|TAh\xf8\xb4|\xfc\xb5R\xc7.\x0c\xe0\xc3Z\xd9~S\xdd\xceXf_/zB\x99\xfa\xba\xd3\xc8\xf6\xb6\xa3\x85\xe5IV*\x8cp\nMg\xff\xae\xcb{x\xae\xc5J\x9d\xe7\xbbw-\xe4a\x1d\x06\xc2\x16\x0c\xcf\xd8\xcb\x98\xce\xffR\xd0\x1a\xd8K\xd4\xc9K\x83];$|\x9d\xccx?^\xdf\'[\x9c\xcb\xb1\xa0\xbd\n\xed\xady\xac\xaa%O\xe0\x8ef\x97\xf2\x9a\xb2\x88m\xb2\x8e\xeaP1\xcd1\xc1O\xf4Q\xe6,\xcf\xaa\xe3\x0f\x8a\xfe"\x8b|\x9d\xca ADz\xfeg]\xa2\xc4;7\x80\x1f\xcc\x9dl\xec}\xc2\xe0\xda\xf2\x02\x95\x8f0bI\x8d\x88\xfc\x1d\tJQ\xddr\xd5\xd9\x90F\xf4\xf1\x89mw+h\x87\x1a\x82\xa9\xd7\xadt\x92\xe7\x84\xa9\x95\x83#?]\xd6g--\xd1G\xa7K\x91\x9fBJ\xed\x07\x939Z\xfc\xc0\xae\xc4\xfb\xf6\xb9]q\xfe\xd5\xae\xd7\x08\xb9j\x15\xa3\xb5\xe9\xe4\xa9\tO\xde\xcd\xf7_\xc8\x01Qd\x00b\xa3\xdf:\xb6\xbb\x95/\xac\x9d\x8e\x03gm\xc1M\xcf\xc1\x01\x85\xeb\xb6\xabMHI\xc6\xec1\x8c\xe2\xaf\xd3sa\xf1lj\x84\xaaD,\xe5]\xdf\xcc 2\xdc\x19\xc3w?\x886\x18J\xa9\xfb\xa3B\x89\xb8\xa5ff8\x10\xab\xf4\xfb\n\xe5\xbb\x05\xc8t|\xfcY\xc9[x\x83\xc6$P\xb1\xda\x9a\xf3ucK\xec\x89\xb0d\xa3\xa7\x9f\x85V\xf9\x9f\x0f\x8585\xc9\x08\xde8S\xd6\xc9\xf3!y\xef\xab\xea\x1f\xb2\xdf\x9a\x95b\xd1:\xddX\xb5Ga\xd77\xcae\xce\x91\xb4\xa3\x90\xa2.?!\xbfX\x1c\xa9nKI\xb4R\xcc_A5\xc2$\xe6P\xe5s\x81J\x0f\xd2GoQ\xcc\xe2I\x1aN{Nr\x06\xba\x87[\xf9\xcc\x9a\x0c\x8c\x05\xfbf\x1b\xfe\xb1\xcc\xa8\xc5\xc1\x13^\xfc\xc7~\x86?3\xe2\xc0\x84\xb3\xf0\x17\x88\x99\xe2\xed\x0f\xbf\xab\xe3\r\x00\xd0<\xfa3x\x01\x1f\xf0D~V\x94\xc1\xda)\xb7\xe0\x00\x7frm\xef\xfc\xe8\x9d`NU\\Z\xfbm\x1f\xb1\xf9\xd1xHF\xe3I4\x01yCq\xc6\x8e\xe3%\x90\x83\xdf\xe73\xf3r9\x9c\x12\x10\x9aea]\x01\xf2\xd5(e.\xbb\xeb\x95?\x96\x1et5[\xb0\xd1v\xee\xfa\xcaD\xfbp\xe7\xe2\\\xe8\x1d}!\xe8\x04%\x1a\xbd\x1bd\xe4\xbdM\x0b\x8achl\xb6\xfeM\xa9\x15~\x9e\xa8\xeb\x805b\xc1\xd2.\xe7\xacA\x91?\x06\x8f|\xb2\xf6\xf0\xe6g\xa5a\xacC\x16nz\xa4\'\xecw\r\xfb\x05\xddb\x15\xe1\x8b\xb8\x0fM\x82#\xb2\xcb\x84E|`\xcc\xec\xa5\xc8t\xa4\x12\xe7\x97\xc7\xfa\xa4\xdc\x9e\x81\x1b\x8a\xd8\xd1\x16K\xfa}\x17]NmY\xe4iE1Mw0F\x9e\t0\xc1;""/\x92\xef\xa8\x12\xf8i\xf8\xc5\xf2\x99\xc0\x0c,\x0e\xe6\xc9\x14\xf9\xf87[\xa7\x90\x88w\xc7&\xe2\x16\xafB\x13\x10\x9a\r\xf3BwI\xb2\xe8\x94\x1d\xe5\x9f?=\xaenI\xe8\x9c\xfc\x10\xae\xa2\xfaD\'V(j\xee,y\xe9\xcf7$\x01)\xefq\x18\xdcp\x84$\xcaN8\x9f[\x98\xab*\x9a\xb4\xc9\t\x0bm\x88\xce\x05\xdf\x9f\xb1\x92\xb7"\xe8\xc9\xbf\xfd\xb4\xc3S\x9ao\xcaJ\xf3!^\x0b;\x08`\xfc\xb1&<+\x99\xac[\xdf{~\xf3\xf4\xb0\xe1Le\x02\x03\r\n\xd1)\x99(\x1bLzl\x96\xb6\x8e~\x82\x19\xcf\xb0\x17\xf2G\x03\x18*\x91\r\xb5\xd4\x91\xe0\x9d\xc2$\xb0\x03\x0c\xcd\xf9\x1c\xf1\xe4lu\xbc\xcbB\x02`}\xf6_\xc4\xf3OR\x13\xdc0yX\xdd\x1e\xba\xee\xf3\xb1WJ\x83\x90\xc4\xc6\x91=\xb5\x1d\xf3\x88\x16\x8a|\xc7\x9e\x95\xd57\xfc\xb1\xd1\xed\xaa\xd04~\xfc\xdb\xa9=\xb2\x8dy\x03 S\x8a\xc6\xc9\x926\xa6\xb39\x1c\xc7\xe60\x1b\x88\t,b`\xc6\xb2\xfe\x93\xa1\x1c|+\x10U\x8bf\xd8V\x04\x1e\x9a\x9d?\x0c\xfe\x8e\x0eNe\xa9\x99_D\x0c\'\x8e\xc9\xf0N\x04\xa9\xa74\x7fG\x95\xce\xe40[\x18\xa8\xa2\xccC\xb3\x11#\xd6\xcf0U\x17Md\xd6A\xf4\x13\xcb\x17\x8e%=\xaa\xffr\x1a\xa6\xe6\xc2\xc4\x82\xfcC\x9eA\x18rzka\xdfj\x92\xafzb\x14&\x1aN>\x84o\xcc\xf69\x0c\xf9\xce\x9e\x9c\x1f\xe1U\x98\xe0\xe0Ou\xcbT\xb6q\x9e\x82B\xebX8o\x1b\xdd\xe3\xb2\x9f\x98K\x83\xe7j\xd2S\xf3\x0c\xa4\xd9\xa2\xb6\xc2\x91 *}\x86F&PSa!\xbf\xed\xdb\x0f\n;H\xc6h\xde\x8f\xdb\x82\x91\x1c^J\xcbK\x03f\x07BT\xc9\xd6p\x81|\xf5`\xbe\xda\xc7\x18j\x8c9\x83\xa8\x07\xf1\xcd[\xe5\x1f\x8c\x0bSS\xf2\x96s6\x82\xea\xdf\x1c\xdaX\xe12\x0eB\x81\x01c\x07O\x80CpCa\xa1\x94q\xf6-\x94\x95f\xc1L\x08\x08\xe2\xd1V]\xfc\x1aR\x82\xf9\xe5\xad*\xb5\x81\xb8\xd4$*c\x8c\x1ac&F\xca\x07wO\x041\x10\xb2o\xfaT\x12\x0f\xc8sD\x0b\x9a\x12\xb7\xb9c\xb2l\'~\xe0f\xc4\xac\x9b?\xfeNu\xdb\xd0\xcdy/\xb4\x9b\xd3\xf8\xfa\x17-^\xa9\xe8D\x15LN\x11`\xfd\xf6\x08\xb6\xf0\xbc\xd2\xf1\xd9S\xf7us\x18\x19$\xcc\x1c/\x13sBi\xcc\x16\x16\xa8|\x8eC^\xe9\xbd\xf2\x9f\xbd\xe4\x1bvJq\x8e\xef\xec}1\xdb\xe0\x82\x0b\xe5\xc3$\x12M\xda#\xf6\xe7e\x14bz{L\x1bix\xddz\xf5!\xb6\xa6X\xbb\xbc\x07U\xde}\xb6\xbda\xb2U\xfb;7\xc7\x9f\x84\x9b\xc2t7\x1f\x0c(1\x0c\xde\xe2.\xeb\xaefV\x16\x9b-nE_\x81\x98\xc2x\x96\t\x9c!h\xef\x8d\xa8\x7f\xd8\x8f\x02\xa0\x98\'\xd3\x17U\x92\x02\xd4H\xe6\xa8\xed\xd5\xfeg\x0b\x0e\x02\x82\xd1\xdbx\x90\x8d\xc4\x8c0\x87\xe1kgC\xff\r~m\xa5\xd5\xb8X\x87\xf9\xa4a\xfa\xa7\xba=\xae%\xc0\xd4r\x96\xcb\xe1\xa1\x837\xc8!\x9d\xc2\xfc\xc2v\x9e\xa3br\xf6\t\xba5/\xd2G\xddZ\x94q\xac\x17*\x94nF\xe60\x9c\x1f\xfe\x8f\xad\x02vF\xd3\xf0\xd9;{\x00\xdf\x8f\xb4#3 \xc5D\xc9f\xf4\x8d]\x92;\xc3L\xe4\xae-fmB\xfb\xd5\x94\x97{\x99O\x97\x96\xf0\xba\xf4)\xa8\x85\xacJ\xd6/7\x10\xd0\xaf\x9a\x03`]\x14\xaf\x90}\x04o!\x9d\x04\xd0\xcc7,\xc0\xdf\x89\x1f_+L\x96\xe4\xfb\x1c\xb1sqw9>_\xb0\xf5%;\x14r\x07\xc6\x1c)6\x1e\xc7\r9\x0e\x19\x8d\xb6Y\xc6\xf8k\xaf\xc1l c\x8a\xfbp\xd5\xa4\xdceg\xbf\xd8\x96\x08k\x81\x80\xb7\xadOX\x82\x85S\xc4\x94Uk\x15B\xb1$[\xfc\x15\xdc\x01\xa9\x80\x85L\x1e\xc2\x12\x0b\x11\xc7\xac\x10\xd7\x8e\x9d\x9b\x95\n!\xd9l\x0eNe\x98\x1a\x8a\xe6\xb1\xe9\xbd\x91\xc7\x977L\xe48D\x84\xc5\xc9\xd1P\x161gY\xe9\x0c\x0f\xf5\xdc\xb4\xad\x16\x98\xdf\x81%\xc1b]m*S\xe3d\xd2\x86\x8d\x95u9\x82\x9f\xcd\xea\xb6\x9d\xc2 \x95\xf1K\xd3g\xf4\xe0\xb3\xf2\x95=\x82\x83\x871\xf2\xe1\x19\x07D\xee\t\x06\xe4\xa8\x18\xc6\x1ax\xb7l\xa4\xe3VV<~\x01>\x1b\x03\xe9\x82\xf6\xdc\xac\xb0k\xfb DPo\xc76 \x16S\x02\xadal\xbb\xc3\xb1\x80\xf97n\xd1\xb8\xf2gL\x1a\xee\x0fz)\xac\xd6i\x93\x87\xb1\x9b0:A\xf6]!x\x8f\xdc\xe5\xb2BQ\xcfR\xa9\x8chH\xcf\xa8H\xa8\xd4N\xd7\x90\x80@\xe5v\xa7\xbc\xb3Q,\xd9fW\xf7cF\xe5kg\xf6\xa4<\xbf=(\xce?+\x85Uv\xdf\n\x06`X\x10\x8c\x05r\xe9"\x7fpP\x943T\xbc)\xcdE\xf8`\xd5\x1c\x14\x87\xe5\x86}r\xde\xaeY\xed\x10\x0f7<\x8a\xc8\x9f\xa7\x80)\xae%D\xef/m\x01B\xce\xd5#\xba\x92c\xb4\x0e\xb8\xf3\xac{\xd0\x08\r\xcf\xaa\x18\xe7\x83et\x0f`\xc8i\x9b\x1c.(r\xaa\xe5L\xed \xf5\xbcR\xb3\x90\xa9Z\x7fD]\x7f\x12\x00C\xd9\xfa\xf2\xa5\xaa7\x0cDI_F\xed\xd7\t\xedfg\'\xe1\xeeB\x19\xbd\xb6\x81\x13\x9f\xf0\xa7U\x80\x1a\xb0\xf3*\xde>\xb5\x10\xb8\x91\x8b-D~`\xfc\x1a\x1e\xcb\x87\x91\xd3\xc0\x88\xea\xc0\x12#\xd7\xb5Gc\xd1\x10\x96\x12s\xeb\xd8VR?\xfb\xb37\xafM\'q\x9a1\x08(\xb4\x0ci\xc9$\x85\xc9\xc8u\xbc\x01q\x17\xd9\x0b\x96\x83\x8a\x10Z\x19\x9f-\x9a\\\x91%\xc0\xa2\x00\xfa\xfa0v\xaag\x9a\x0cv\xe1\x18\x94\x95\xfa\x8f\x9f\xa6?\xc8\x94\x06\xfa\xbdS\xcb\xfca\x1d[L\x93t\xcd\x1e\xf0O\xafq\xcf\x94\x9enuaxn\xc6|\x9d\xd5\xe4\x84Q\xfa\x1c\xbcs\xa2\x83\xc0\xe9Y\x11\x91m\xe8\xd2|\xec\x87\xb90\x9bE\xban\xbb\xd4\x81\xdc\xcex\xa7K^6{2\x1b\xf5c\xf3 \x01\x98\x8dm\xf8\xcd\x0c-\xf1\xa1\xec\xce\xabu\x13[\x8e\x91#\x1787i\xb4\xdeZ\xf9LV\x14\xbeWj\xeb\xc0\x0f\xe8k\x13\x810\xe5\xb2@\xba\xd4\xa4\x99S\x16\x994u:\xd5>\xb7\xe8\xad+zS\x85\x8dF\x1c\xc3smP\xa8\xaa\x85\x86\xf4\xdc\x07\xce\x14\xf0\xdf\xcf\xb0\x87}\xfb\xafBv\x8ej\xc0\nw{8\x8f\x16\xde\xce\xe1T\xc6\xa6\x018\x83\xfe\xc7\x86\xdc\xfd\xa0\xfa\x07\xe3\xbb\xef/\xb9\x91\'\xff\x92+\xdf\xa2\xb8\xe4\xf3\x9fSP\xe2\x94_a\x91\xea\x95\xc20\xab#x\x8bHik\x13\xa58\x18\x9fI<\xb1Y\x82\xcfe\xc7#\x81\x92\x0f\xa6\x1e\x1c\x1f.m\xeb0\xc5\xa7\x01\xb3\xc5\x19\x1b)\x91\xd7e\xffc-\x88\x18\xcb\x90\xa8\xc6\xc5\xa2j\x8b\x01\xac\xeeD(\x10\xd3\xc7\xc4\x9e\xab\xa7\x0e\x97\xe99\x8f\xf4b\xec\x10\x87\x13 \n\xa0\xe1\xb4\xcd\xff\xba;\x95\x0e\x83\xc5"\xae\xd0*(` g\xddk[\x94\x076\xb9\xd0\xe6:g\xf3\xa7\xf5\xa8\x8f\x98\xd2\x12\xdf\xc5f6<pr\xd6\xf4\xa1?\x16\xbd\x10=\x85#40g\x95z[\xe2\xfa\x1a\\\xd7Dh\xe3`\x8f\x87\xfbfON\x12X^\x9d|\xa8n{X9{\xbby\xdc\x83<SGOe\xc1#\x19\xe7\xfaK\x7ff\x02y5\xcbD8\x9c\x89U\xa0\x15\xe5\xc0\xd2\xda\xec\x0c{B=\xab\x85\x7fmCX(\x10\xb5\xb9T\xaa[+a\xb5V\x97\xa7\xe8\x82\t\xbe\xbf\x91\x1f\xc0\x19\xe5\xc4-j!\x94\xd1\x96\xf2\xc96{\xfdRH[k\x12ML5\x17~\x87\x10\xa4\'\xde\x13\x8f\xc2am\x9d\xbb\xd6\xb4\x07;MmcM\x88\\`\xc2E\xaa\x03,\x05Z\xdc8\xdb\x0fWh\xa7d];\x0c.\xa1OG\xe1\x83\xf5\xaa\xa4\xba=\xb1(\xfc\x9f(\xc5\xe7\xa2X\x90\t\xaf\x04\xae\x01A\x90\xa3\xe0S\xb3\xc7\xb7l\x99P\xa8Wx\xf8X\x90\t\x99,\xbf\xb0\x86\xe1\xb5Yr\x8e\xf2\x17\xc5\x86\x8d\xb4LjQ\xda.I}\x1aUD\xee\xc6M\xa0\x82$\xb2f\x1bh\x1f\x90W\xffc\xbf\x12\x9a\x0c{\xf6k\xdfV\xb66\x8dH{j\x8a;y\xb9\xf2d\xbe\xd9\xb7gncp\x9eayi\x13\xdb\xdd1\x8b\xb7\x06\xba\x8aC\x03P\x174p\x1b\xf2HpK\xbe\xa2\xcb5\x1d\x9b\xa5\xb7<\x9e\x992Q\xe8\x1amL+\xbbD\x16+H[6m\xd8\\}\x19\xdbA\x97B\x89\x93\xd3,\x99\xc2j\xc6\xc4y\xd6\x0c\x9d\xe4\xc4\'\xe1\xb5\x8d{\x05\xaa\xaa\x13\x84\x1f\x043\xe7f\xc7j\x8c?\x870\xd2\x0e\x10\x03\x87\xde\xe2\xa8\xa9.\xbe|[i\xfa\xb4\x00\x04\x0e\x08\x85\xe5\xf3\x93\x95\xe6\xd7\x86\x00w\xbat\xfa(\xb4\xeb\xc0\xda3iI\xcd\x80\xf7\x93\xa1\xfa\x83\xed\xba\x1e\x8e!Gh\xb1\xba\x9d\xc7\x15\xc2\x9b\xd9\x00|\x95\x8f\t\xafz\x84epZMr\x17\xae\xf6\xb2\x8f\xb0\xaa\xdbY\xd2\xce\xbd\xc1\xc2\x82\xc3CQ\xa9\x15\xb5\xa7}&\x0bs\xf1\xfb\xaf\xbdZ\x11\x01\x12\x87\x19\xf6\x19\xd1\x16\x02\x14^\xac\x0e\xa6N\xb4o\x02\xc5\xe8\xaa\xbcP\x0boz\\g\xc7\x8f*5\xb7mZ^\xecq\xa8\x1de\xd5\x9f\xe1\xc7o\x86\xb4\xbc\x892\x8c\xac\xd2\xa9\n\xd7B\xd6u\xad\x82\xccj\xb2\xc0\xde\x8b\xb5_R\xee\x85\x7fr\x8e\xbe\x98h\x84\x86k\x81\x12\x91\xbd\\\xddu\xe3\xbbF\xb3\x91s\x826\xb5\xf0v\xfd\xf1\x8b>\xec\xfd^\xa8\x9c\x12\xed\x9e\xa1\xd8\xae\x1dD\xb9-/n\x1eR\xeb\xed\x9cA\x9c3\xd3\x1d\xae\xf1x\xd1\x89\x0b\xdb\x89\xbc[E\x90\x07Q`\xafT-\xb7Lv\x04o\x0c\xdb\xc1\x03\xbe\xdc{\rj\xe5\x16D\xaa\xb2D\xe6\x1fXf\x1f\xdc\x82\xe3\xa2`\x1f\xd8.\xceXv\xba\x9ex\xf5\x8dGX\xc7\xef\xbf*\x00)L\x15\x9c\x85\xc5\xfb}V7b\xbbI\x065LF\xe3\x9e\xe5k\x99\xf0\x0f6\xc4\xf2\x90IV3\x9f\x9b1\xe5\x84s 0\xa1\xaa\xb4\rfr\x97n\xde$\xca\x03v\xe6(\xe0\xdcp\xf3\x9a\xf8\xa9\x90\x07\x16%SL\xf7+H\x949\xec\x91\x8b\x193\x9861\xdd\xdd\x07\x0c\xcf9\x8c\x99\xec\r\x9dF]\xfeg!\x07gN\xf41oKj\xbd\xb2\x03v\xd7\x05\xb7/j\xb9\x9f\x1c"\x99G\xd0\xafD\xdd\x04-b\x89\x81\xda[\xf4`\xe7\x05_\xd82s\xdaR0f\xae\xc9)\x9f.\xdd\xe2\xe6\xaa\x19z\x8e\xa1\xa3%\xb3\xba/4\xb4\xb0\x87\x0b\x845\x06\xf1\xb9\xf6X\xa0\x17\xeb\xaa\xb86\xa0 R4\xdb\xef\x8b&\x9bl@\x10&\xd9\xa14\xcf\x91\x1fdagJ\x81b\xbb:g:E\xf6\xe6\xa14+\x8cd\xe0\n\x05_\xb1\xdc\x143Y\xd9\xe2Z9\x16O-\t\xed\x83e\xfb\x16\x99m\no\xcaQ8\xf1\xd6\xb2\x02s\xa1vN\xa0\x1b9\xb6\xca\xa5\xfbS\x07HR\xc5\xeee\x7f\x0f\x87\xe1\tk\xe5X\xe4\x03\xa6\xe6\x83\xa1\xf5D\x1bK\xd3V\xdf\x9cOl\x9a;f\xa4\xdd\x96\xd0trB\x18\xae\xcf\xe9\xb4\x80\xba\x9c\xa8N\x88^\xfc\xd4~\xe7\xf2H\x80!]\x1f\xf1!(\xad\xa1\xf7\xe1\xf6k\xb3\x86V\x16\x86]/>\x08\xab\x89m\x99\x98\xad`\xca\x053\x94\xf6|\xd7^\x85\x15\x04\xcf\xb2s\x0bVu2$\xfc\x8e\x8e$\xadt\xf0 \xf6\xed\xcf9\xd3\xe4\x0b\xb8\xde\x85\xfchbY*\x871.}]\xb74\x9a\xc5\x83uy\x95\xccB\n\xeec\xa6)\xdfl\x13\xa8\xe1\x1c\x92/\xe4\x9b\xe24_\xf3Jqi\xba\xd9$\xfe\x8f\xe9W\xa7\xdb\xd7\x0c\xff\xc4\x07\xed\x04\x89\xb7Ql\xab@\xcb\x06\x05\x08\xb7>\xc2\xbe}4\xf4\x84\x91\x15\x04\x91\xcf@Bnx\xb2\x0e\xa9s\xe7&\x99\xd4r\xb1p\x02\xb4o5H2H\xb1)~\x8c>\x019\xcd-\xdf\x98\xbaC\xb0,!T2+sJW\xd3\x82\xb9RL\xf4\x87\x85\xf9\xe6\x8e\xe2\r\xe2\x88\x9b\nD\x18\xe1,W:S\xb9\xd4\t\xb8\xe4\x1b\x13=\xf0\xb2F\xb5\xe0\x7f\x98C\x1b\xfcd^\x9fz\x87\x85\x88^"\xa6\'\xd5\r4\x166\x98\x97\x7fM9H)\xc1{5\xcf\x91\xbf\xc5\xa8\xa4s^L\xf1\x17q\xb6\xda79\xd9\xfd\x96\xd4\xc9\xaf\xfc\xfa\x0b+6\xa7\x03\xe8Z@R\x90\x04\x9e\t\x8bJ4\r\x9br\xff\xc6\x1d\xdc\xbc0\xd7YGo\xccE3\x16\xc4\x95l\xaa\x1d\x8b\xe7\xc9xu\xbcgJ\xe1\xf2\x9fe\xf4\xa3_?\x9d\x86\x1a\n\x064\xa6\xf8en\xcaW\xfa\xe1\xca\xdaA\x08S\xf3\xb47j\xef7\xb3\x03m6\x12\xfeK\xb3:\x8f\xd3\x98\t\xa7\x94\xf5\xe8\xb2\x19\x93\xc6hm\'\xdf5Y(H^T\xb7\xa3y\xd8\xa3\xe0\x95\xe3\xf7\xde\x90i\x15N\x0c\xea\xa2O\x1b\xca\x8d\xec\xe8\xdeSX \xe4[\xf5\x98\x10\'B\xc4z\x84\x02\xc3\x8e]\xb0+T\x0f2Z\xa3\xd9\xaf\xaa\xac\x03TL=\xda\xdc\xe6\xee\xe5\xb0P\xa7\xe2\xe1\xfc\xe9!d\x14\xac\xd6|\x13\xbe\x9f\x8c\xa4X\x909)\xbb\xaf\xe5\xf3\x8av\xcev\xde(\x11\xeb\xff\x90\x9b\xb0\xbb\xf9]\xfd\xeb\x94\x89\x0f\xedua\x1d}\x89"\x1c:\xa7\xfa\xb1D\xc4\x13\x8a\x7fU\xb16\xbb\x8b\x7f\xacZ\xd0\x10\x84\xaf\xdd\xd4\x95\x01\xf5\xecq\xc7`\x15\x16v\xc4\xf3\xe01\xbd(\x04\r2\xf0e3H\x845b\x93N\x177\xca\xea\xb8\xdb\xa0%\xd3\x076\xe6Q\xbb\xe8\xfc\xe5\xae\xd6\x9f\x9dL\x1dq\xc2uS_\x87\xa1\xe4\x1c\n\xde\x98\x12\x90\xeb\x87\xfe.2\xcf\x8c\xe5P\x19\xa0K\x12\x0c\xa4(\xe2a\x96\x87\xf62\xce]\x9eT\xb7C\x8a\x03\xa5\x9be\xfb\xe3\x04Z\x02\x8aq\xd2\x9d\x1f\x99!f\xa3\x99xcvB\xf2Ib6\x99&\xcbW\xc0\xa4\xc9\xc8\xc6;\xfa\x0f\xcbf\xc3]\x821\xb1v\xbe\xbc\xa2\xe2\x0eSD\xea\xdc\xd7\xef\x8fw\xcd\xe73\xf1\x92W\xa6\xd6\xa7\x1b\xe0:6Sg=\x1e\xb2f\xbb\xc1\n\x1eKz"\xb60\xc3\x8c\xa7\xcc\xd40e,\xf4\x8b\x9a3\tT\x0b\xe7lw\xd6\xd7\xf77\xff{\xaa\\Tz\xc4\x89\x8d01]w\xb2\xf4\xe2\x98\x94\x17\xb8\x8cd\xf3\xd3l\xa5\xb1\xcc\x0fm\xdb\\\xf0\x17P3N\xd7\xa4G\xc1v`6Z\x9d\xcc\xac+"U\xb0\x08\x17\xdc\xa6SU\xf5\xb9\x87z\xea\x9d1\xa4l\x94\xa3\xe8\xde\xb6}\xbbHQ\xf0m\xf98\x85<Oh\xb6\xb5\x16\xd4\xe1\x84R\xf20\xd4\xa6\xd2`\xb63\x0b;H\xc5IO\x9f,ko\x10&\x03*b\xbflf\xe9\x00I\x1a\x90`\x12\xa1\x13\xda\xaa\x1b[\x02f\x96\x84e\xbfaO\xba\xda\x89k\n\r\xe2xP\xd9\xa7\x0e\x08rI\x1fmz\xf9\xa0\xd2`P\xdc\xf0\xee\xb8,\x13\xdb\xf4r\xdbKR,S\xa5\xb9*\x0f0h\r\xed\xdd\xdaB\r\x82N\x85\xfc\xae\x9f,Q\xa6\xe0\xd1\xb7\xcd\xc6\xeb\xb7\x9b\x9f\x0596\xf9au\xb2\x85\x0fo\xce\x12\xe4\xbfO\x97\x0f\xa3\x14,\x99\x81d\xe3\n\x0es\xe0\x1b\xe5G\xf6S5Y\x98:\xe4\xb6"t\x84\x14\xb9\xbf&,\xae\xaf\xab\xe2Q@\x86eS\xb3\xc20v?\xb5r^\x91\x8d\xdc~RW\x9al\xfc\xac\xd2\xe1W\xe5\xc8\x81)\xa8\xebpTt(\x16\xd3`\xfb\xc0\x00-\xae\x16\xe7\x19F\xf7\x12\xee\xcd\x02uB\t:EX\xb7\x90o\x88xtxh\xd9\xde+\xccf\xd6\x84M1\xea<5\x8dn9\x07\x9fi\xdd\xd2\xcc"x\xed8\x14\x80\\?\x11J|r\xcf\x96]J:0\xf2:\xc5s\x10CiJw\x97=\xe2\xc1\x80\x97x3\x8c\xd3\xb1\xc1j2\xac\xa9\xa0\xb5\xce\\\x7f\x1b\xf0\xde/\xab\xdb3\xe4C\xd7\xab\xe1\xcf\xeav\xd8\x11\x1bv\x01+\x96}\x90\xed\xbf\xceq\xa1\xf7\x89\xb4!%d\xf4\x14\x9a\xa4\xf4\xb3\x19\x8bl\xf2\xba\xb0\xcc\xa0?\x07\xbeA\x17Kk\x05\x9c\x1b3\xc8<n\x01\xa7\xa9t\xeej\xdb$\xd1\t\x80\xa4\xa5\x11\x05\x9d\xc7\xfb\x14\x80\xfdHC\x8c\xcd\xe8\xf1\xa8X\xd8\x84\xfc\xd7\xd5\xbf,/D+\xbc\x1d\xf3se!\x1c\x0f\xc0\x0b\xea\xb5\xe7\xc8\x1b\xfc\xfa\xbb\xb6R\xb3\\\xa4\xd0\x16\'\x08\xb1^\xc7\x87\xfc\xa9c\xc2\n\x89\x86\x13\t\x8d`\x12\xce6e\x08+n(\'\xcc\xb6\xc8\xd7\x88h\xc3w\xb3\x19B\xd1#}$\xad\t\xd8W\x05\xf6$%\xe5\xf3\xbf\xb3\xc7\x82\xf9\x10_\xa2\xc7\xdc\x06\xbe\xacK\xea\x83\x85\xb3\xee<\x13~G\x94H\x15\xc6@e\x0c\xfa\x8d\x14G\xc1c\n\x8c\x8bW\x05\'\x15\xde\xe3\x8co\xb6\t :e6\xc6\xe4\xeb\x9b,\xaf \xe5\xb0{\xfe\x9f]\xb2\xd0\x9f\xceOc\xf0`\x8e\xfe4\x1fc\x86\tC\x9c\xbee\x05h!\x03|\x1cRb\xb7\x0em\xa5\xa8\x9a\xcd"\xa03\x14\xd9\x98\x99$\x9c\x99I\xe6\x97\xabni\xc0\x0c\xf3,s\xd9\x1d\xb70\x82e\xff\xc8\xa4\xc4\x05 \xf8\xf85 \xbd\x18(\xf7\xbf\xae\x8a\x93\xf8\x95\xcc\xb5n\xd8_\x8e\x9b\xa7\'A\xb8\x15~L\xf2\x02\xf6\x8a\xcd\xd5\xa0\xf83\xf4\x88\x83Xe\x08\x16\x9f\xcbc\x8cz-\x84\xf93\x9d\xc0\xe6e/\x97\x04\x8f\xb1\xc0\xed&V\xde\xd8\xe3y\x16\xe8<XZA\x16*?\xa8\x8f:{\xa7\xb2xe\x89 \xe4!\x84=s\x19\xca\xbc\xdd\xb3km1\xb6\xad\x1ec\x12\xf8q(\xa8D}\x92\x10\xa7\x12[\x92\x83\xea\xcd\xef\xf6\xc6\x1c\x0bB\x06\x03\x0e\xb3#\x08\xc4\x01\x9d$=\x8f\x9c\xf6\xf6\xb5\xb2i\x8fT\xba\x80\xc5\x9c\xcfb\xdc:x\x16\x9f\xe1Ps\x16\x17\xba\xe6J\xf1{\xd1\x14\xd3\xd054\xa2:`\x86\xadX7egw\x1a\x84\xde\xc7&\x98a\x9f\xb2\xc8\xd0\xf9\x06\xfb\x13\xbd\xa2\x9d\x7f \xd0\x18\x02\x86\xd9\x10=\x16Y\x8a\xc7\xca\x87\x0e\x97\xefx-on.\x8500\xb1\xd9\x8d\xc8,\x11\x1b7\xdc\x9fq\t55\xfb\x8eE\xb1l4l\xb2\xdf\x0f\x7f?4\xdd\xef0\xd1\xac.\xc0\xcd\x85\x8f"_\x16\x01{\xe3\x87\x87\xf5\xf48\xac\x8d!\x82\xce/N\xae\xc7,\xf8p\xe0ps6)\xfe\x87\x83:ka\xba\xae>\xc0GS\x94\xb6\x90\xcep\x961N^\x0f\xb2O\xe6\x8d\xd9_J\xfe\xda=\xb3<\xa5?\x12\x8c\xae\x04\xb7U\xd2\xde\n\x95g\x116\xb4\xa0\xc1\xd8\x19gK\xf6\x93\xfeT:\x18\x91\x00g\xe7\xf4}\x86\xb5\x02\x0f\xdf<\x86\xb5\xc5\xf9t\x0e\x13%\x18\xd0\xa5\xe7?\x94"\xdb<\x92u3\xb4$\x00\x84g\x16"\xf3\xdc\xef\xfe\x94\x11k\xa2\xf9\x17\xd3\xfaH\xb0\xb85\xd7\xceoC\x01A^t\x0b\x9fl\x17\xfas\xf8\x18\xff7\x1f-\x8e+\xf8\xb2\xf4P,\x04F\xf6\x02\xcc\xc5xP=\xf8=,\x9c\x12g\xfaM\xa4\x16\xb1\x14\x8e\xb3\xa5?\xe9L19\x1e\xbd|\xfc\x8d~\x14\x9f\x8f\xd1\x9a\x0e\xdaG\xc8\x92\x15\xabm\xb1U\xb2\x98H\xb6f\x15H\xde\xcb\xe5\xcb\xfbX\x08\\+NPjlM\xc8Y7\x9e7R\xb2\xfaY\xa5\xe3K\xcf\xecf!g=C\xb9\xbb>.\x03\xf8\xd8\x80CC\x98\x82\xb5\x9c\x19h2\x07\x00\x17z\xe6\xecxB\xe1\x9c\xeb\x81\x91\x05\xb3\x1b,\x83\x03\x93nq(G\x83\x11\xeblL\xf0\x05O\x9bY\xdf\xd5\xd1X\xadv<\xc0h\x80\xdb\x86\r\xceKA\xbb~\xe3E\xf9\x1ddN7O\xcf\x86L\xabHj`3\xd4O\xe1G\xaa2\x96`\xb90Gh\x15:\x94\xf7\x9f\x8d\xd8;:\x86\x95\xf9Z{\x7f\xde\x84\xcb\xa6d\xff\x99\xe2\xfb`\x98\x0cG|\xd7\xa6v\xdcHH5?\x95/+q\xcaX\x8e\xaf\xf6\xff\x9e-\xe3\xa11\x96\xb5\x94\x1b\x08\x84\x05\x11\xac\x8b,\xa0)\n\x96\xfd\x1f\xf5\x99\xc7k\xbc\xfb\xf8\xee\xa4\xdd\x87Q\x90\x80,/\x90\x88m\x02\x84_g\x95/\xb7!\x8e\xc4lD\xc6\xe7\xb4\xb8v\xe6`\xdfT\xcbr@S\\\xf6\xf8\xfa\xad\xbfG\xdf\xa6\xde\xfe\xae\xfe\x9d\xbd\x88\xa2\x04\xd9-\xb5r\x07\xc0\x0b\xa5\x88N<eT\x9c\x1a\xf2y\xfd/V\x1e^\xa1x\\\xfc\xaa\xfe\x9d\\A0\xe3\xad\x12P\x9e\x8b\x80I\x0b\x83\xa8\xa0"\x1f\xeb\xbfms\x80\xd4\x10T\t\x00\\xy\x1d\x8e\x9dn\x14\xba\xb0\xea\xcbR&\x983\xdd\xe4\'\xc3nxp/\r;\xce\x1d\x0c\x10\xfc\x10\xd5\xa3\xdd}a\xa9H\xd7>\x18\xab\x0f\xf6\x0ewv\xdb\x87\x0f\xff\x1f@\x94\x99e',compile))
| 13,333 | 66,563 | 0.734133 |
ab9ca67f277a91dbee47ada211bbf6c5c38e0130
| 4,379 |
bzl
|
Python
|
kythe/docs/asciidoc.bzl
|
wcalandro/kythe
|
64969a853711c228b4e3cfc3ce91b84b5bb853d7
|
[
"Apache-2.0"
] | 1,168 |
2015-01-27T10:19:25.000Z
|
2018-10-30T15:07:11.000Z
|
kythe/docs/asciidoc.bzl
|
wcalandro/kythe
|
64969a853711c228b4e3cfc3ce91b84b5bb853d7
|
[
"Apache-2.0"
] | 2,811 |
2015-01-29T16:19:04.000Z
|
2018-11-01T19:48:06.000Z
|
kythe/docs/asciidoc.bzl
|
wcalandro/kythe
|
64969a853711c228b4e3cfc3ce91b84b5bb853d7
|
[
"Apache-2.0"
] | 165 |
2015-01-27T19:06:27.000Z
|
2018-10-30T17:31:10.000Z
|
load("@bazel_skylib//lib:shell.bzl", "shell")
load("@bazel_skylib//lib:paths.bzl", "paths")
AsciidocInfo = provider(
doc = "Information about the asciidoc-generated files.",
fields = {
"primary_output_path": "Path of the primary output file beneath {resource_dir}.",
"resource_dir": "File for the directory containing all of the generated resources.",
},
)
_toolchain_type = "//tools/build_rules/external_tools:external_tools_toolchain_type"
def _asciidoc_impl(ctx):
resource_dir = ctx.actions.declare_directory(ctx.label.name + ".d")
primary_output = "{name}.html".format(name = ctx.label.name)
# Declared as an output, but not saved as part of the default output group.
# Build with --output_groups=+asciidoc_logfile to retain.
logfile = ctx.actions.declare_file(ctx.label.name + ".logfile")
# Locate the asciidoc binary from the toolchain and construct its args.
asciidoc = ctx.toolchains[_toolchain_type].asciidoc
args = ["--backend", "html", "--no-header-footer"]
for key, value in ctx.attr.attrs.items():
if value:
args.append("--attribute=%s=%s" % (key, value))
else:
args.append("--attribute=%s!" % (key,))
if ctx.attr.example_script:
args.append("--attribute=example_script=" + ctx.file.example_script.path)
args += ["--conf-file=%s" % c.path for c in ctx.files.confs]
args += ["-o", paths.join(resource_dir.path, primary_output)]
args.append(ctx.file.src.path)
# Get the path where all our necessary tools are located so it can be set
# to PATH in our run_shell command.
tool_path = ctx.toolchains[_toolchain_type].path
# Resolve data targets to get input files and runfiles manifests.
data, _, manifests = ctx.resolve_command(tools = ctx.attr.data)
# Run asciidoc and capture stderr to logfile. If it succeeds, look in the
# captured log for error messages and fail if we find any.
ctx.actions.run_shell(
inputs = ([ctx.file.src] +
ctx.files.confs +
([ctx.file.example_script] if ctx.file.example_script else []) +
data),
input_manifests = manifests,
outputs = [resource_dir, logfile],
arguments = args,
command = "\n".join([
"set -e",
"mkdir -p {resource_dir}".format(resource_dir = shell.quote(resource_dir.path)),
# Run asciidoc itself, and fail if it returns nonzero.
"{asciidoc} \"$@\" 2> >(tee -a {logfile} >&2)".format(
logfile = shell.quote(logfile.path),
asciidoc = shell.quote(asciidoc),
),
# The tool succeeded, but now check for error diagnostics.
'if grep -q -e "filter non-zero exit code" -e "no output from filter" {logfile}; then'.format(
logfile = shell.quote(logfile.path),
),
"exit 1",
"fi",
# Move SVGs to the appropriate directory.
"find . -name '*.svg' -maxdepth 1 -exec mv '{{}}' {out}/ \\;".format(out = shell.quote(resource_dir.path)),
]),
env = {"PATH": tool_path},
mnemonic = "RunAsciidoc",
)
return [
DefaultInfo(files = depset([resource_dir])),
OutputGroupInfo(asciidoc_logfile = depset([logfile])),
AsciidocInfo(primary_output_path = primary_output, resource_dir = resource_dir),
]
asciidoc = rule(
implementation = _asciidoc_impl,
toolchains = ["//tools/build_rules/external_tools:external_tools_toolchain_type"],
attrs = {
"src": attr.label(
doc = "asciidoc file to process",
allow_single_file = True,
),
"attrs": attr.string_dict(
doc = "Dict of attributes to pass to asciidoc as --attribute=KEY=VALUE",
),
"confs": attr.label_list(
doc = "`conf-file`s to pass to asciidoc",
allow_files = True,
),
"data": attr.label_list(
doc = "Files/targets used during asciidoc generation. Only needed for tools used in example_script.",
allow_files = True,
),
"example_script": attr.label(
doc = "Script to pass to asciidoc as --attribute=example_script=VALUE.",
allow_single_file = True,
),
},
doc = "Generate asciidoc",
)
| 41.704762 | 119 | 0.611784 |
e624c79117e6ad5ea768a2205621a752851da45a
| 1,345 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/intro/decorator_utils.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/intro/decorator_utils.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/intro/decorator_utils.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import functools
# handgestrickt
def decorate_with_memo(func):
lookup_map = dict()
@functools.wraps(func)
def helper(n):
# MEMOIZATION: prüfe, ob vorberechnetes Ergebnis existiert
if n in lookup_map:
return lookup_map[n]
result = func(n)
# MEMOIZATION: speichere berechnetes Ergebnis
lookup_map[n] = result
return result
return helper
# Memoization nicht so klar ersichtlich
def decorate_with_memo_shorter_one_param(func):
lookup_map = dict()
@functools.wraps(func)
def helper(n):
if n not in lookup_map:
lookup_map[n] = func(n)
return lookup_map[n]
return helper
def decorate_with_memo_shorter_orig(func):
lookup_map = dict()
@functools.wraps(func)
def helper(*args):
if args in lookup_map:
return lookup_map[args]
else:
result = func(*args)
lookup_map[args] = result
return result
return helper
def decorate_with_memo_shorter(func):
lookup_map = dict()
@functools.wraps(func)
def helper(*args):
if args not in lookup_map:
lookup_map[args] = func(*args)
return lookup_map[args]
return helper
| 20.074627 | 66 | 0.633457 |
fb07bb97aefdc6435635740fdd659d5ee7cf5533
| 646 |
py
|
Python
|
tests/onegov/pay/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/pay/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/pay/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import pytest
import transaction
from onegov.pay.models import Payment
@pytest.fixture(scope='function', autouse=True)
def reset_payment():
yield
# during testing we need to reset the links created on the payment
# model - in reality this is not an issue as we don't define the same
# models over and over
classes = [Payment]
while classes:
cls = classes.pop()
for key in (Payment.registered_links or tuple()):
del cls.__mapper__._props[key]
classes.extend(cls.__subclasses__())
if Payment.registered_links:
Payment.registered_links.clear()
transaction.abort()
| 23.071429 | 73 | 0.682663 |
fd1bb520973ae33e6e5d32eeaf2b640688ce4e41
| 449 |
py
|
Python
|
Curso-Em-Video-Python/2Exercicios/017_Catetos_e_Hipotenusa.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/017_Catetos_e_Hipotenusa.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/017_Catetos_e_Hipotenusa.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
from math import hypot
co = float(input('Quanto medo o cateto oposto: '))
ca = float(input('Quanto medo o cateto adjacente: '))
hipotenusa = hypot(co, ca)
'''hipotenusa = (ca**2 + co**2) ** (1/2) formula matematica
print('O a soma dos quadrado dos catetos de um triângulo retângulo é igual ao quadrado de sua hipotenusa: {:.2f}'.format(hipotenusa))'''
'''Formula Facil pelo math abaixo '''
print('A soma da hipotenusa é {:.2f}'.format(hipotenusa))
| 44.9 | 136 | 0.703786 |
239f39d43572c6dd8b67d39196a62f04b93137ae
| 527 |
py
|
Python
|
IVTa/2014/ALEKSEEV_I_S/task_3_50.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/ALEKSEEV_I_S/task_3_50.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTa/2014/ALEKSEEV_I_S/task_3_50.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 3. Вариант 50.
# Напишите программу, которая выводит имя "Саид-Мурадзола Садриддин", и запрашивает его псевдоним.
#Программа должна сцеплять две эти строки и выводить полученную строку,
#разделяя имя и псевдоним с помощью тире.
# Alekseev I.S.
# 15.05.2016
name = "Саид-Мурадзола Садриддин"
print("Герой нашей сегоднящней программы - " + name)
alias = input("Под каким же именем мы знаем этого человека? Ваш ответ: ")
#вводим Айни
print("Все верно: " + name + " - " + alias)
input("\n\nНажмите Enter для выхода.")
| 32.9375 | 99 | 0.732448 |
23baafab3de4686aa7f0b8672a42c0db1845e871
| 256 |
py
|
Python
|
Problems/Stack/Easy/RemoveAllAdjacentDuplicatesInString/test_remove_duplicates.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Stack/Easy/RemoveAllAdjacentDuplicatesInString/test_remove_duplicates.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Stack/Easy/RemoveAllAdjacentDuplicatesInString/test_remove_duplicates.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from remove_duplicates import removeDuplicates
class Test(TestCase):
def test_remove_duplicates(self):
self.assertEqual(removeDuplicates("abbaca"), "ca")
self.assertEqual(removeDuplicates("azxxzy"), "ay")
| 32 | 58 | 0.753906 |
7bca982bd072e67d2d8cbc89ef9e46683acc55dc
| 7,527 |
py
|
Python
|
backend/cctalk/tools.py
|
bytebang/ppvm
|
07a0650c618b9dd6abc7563070a80c132034f532
|
[
"MIT"
] | null | null | null |
backend/cctalk/tools.py
|
bytebang/ppvm
|
07a0650c618b9dd6abc7563070a80c132034f532
|
[
"MIT"
] | null | null | null |
backend/cctalk/tools.py
|
bytebang/ppvm
|
07a0650c618b9dd6abc7563070a80c132034f532
|
[
"MIT"
] | null | null | null |
""" Provides tools for general use.
Module content
--------------
"""
# The python-cctalk package allows one to send ccTalk messages and decode replies from a coin validator.
license_text = "(C) 2011 David Schryer GNU GPLv3 or later."
__copyright__ = license_text
__autodoc__ = ['make_serial_object', 'drop_to_ipython', 'make_msg', 'send_packet_and_get_reply', 'interpret_reply']
__all__ = __autodoc__
from IPython.frontend.terminal.embed import InteractiveShellEmbed
import os
import serial
import time
import subprocess
from struct import unpack
def make_msg(code, data=None, to_slave_addr=2, from_host_addr=1):
"""Makes a ccTalk message from a ccTalk code and data to be sent with this packet.
Parameters
----------
code : int
ccTalk code for this message.
data : list of integers
Data to be sent in this message.
to_slave_addr : int
Address of slave to be sent to. Defaults to 2.
from_host_addr : int
Address of host that is sending the message. Defaults to 1.
Returns
-------
packet : list of integers
An integer equivalent of the ccTalk packet.
This needs to be converted to a byte packet prior to sending.
"""
if not data:
seq = [to_slave_addr, 0, from_host_addr, code]
else:
seq = [to_slave_addr, len(data), from_host_addr, code] + data
packet_sum = 0
for i in seq:
packet_sum += i
end_byte = 256 - (packet_sum%256)
packet = seq + [end_byte]
return packet
def send_packet_and_get_reply(serial_object, packet_holder, initial_wait=0.05, total_wait=1,
debug=True, verbose=True):
"""Sends a packet and gets a reply.
Parameters
----------
serial_object : object made with :py:func:`cctalk.tools.make_serial_object`
Serial communication object.
packet_holder : Holder
Holder containing the packet and extended information about the packet being send.
See :py:class:`cctalk.coin_messenger.CoinMessenger` for Holder construction.
initial_wait : float
Time in seconds before probing for a reply. Defaults to 0.05 seconds.
total_wait : float
Time in seconds before giving up. Defaults to 1 second.
debug : bool
Flag to send out debug messages.
verbose : bool
Flag to be more verbose.
Returns
-------
reply_msg : message recieved from :py:func:`cctalk.tools.interpret_reply`
if reply_msg is False, no reply was obtained.
Raises
------
UserWarning
If a reply was obtained but :py:func:`cctalk.tools.interpret_reply` returned False.
"""
h = packet_holder
packet = h.packet
byte_msg = h.byte_message
s = time.time()
serial_object.write(packet)
time.sleep(initial_wait)
while True:
t = time.time() - s
if t > total_wait: break
raw = serial_object.read(serial_object.inWaiting())
if len(raw) > 1:
len_raw = len(raw)
out_byte = unpack('={0}c'.format(int(len_raw)), raw)
out_int = list(map(ord, out_byte))
if verbose:
print('Recieved original packet int: {0} byte:{1}'.format(out_int, out_byte))
if len(out_byte) == len(byte_msg) and debug:
print('Recieved original packet int: {0} byte:{1}'.format(out_int, byte_msg))
elif len(out_byte) < len(byte_msg) and debug:
print('Recieved small packet int: {0} byte:{1}'.format(out_int, byte_msg))
else:
# The first part of the return is the echo in the line
# (a repeat of the message sent).
start_index = 5 + h.bytes_sent
reply_packet = out_byte[start_index:]
reply_msg = interpret_reply(reply_packet, packet_holder, verbose=verbose)
if reply_msg:
return reply_msg
else:
msg = "A bad reply was recieved."
raise UserWarning(msg, (reply_packet, reply_msg))
return False
def interpret_reply(reply_byte, packet_holder, verbose=False):
"""Interprets a reply byte message.
Parameters
----------
reply_byte : byte message returned from serial_object.read(serial_object.inWaiting())
Often the reply contains an echo of the message sent. This part should be removed first.
packet_holder : Holder
Holder containing the packet and extended information about the packet that was originally sent.
See :py:class:`cctalk.coin_messenger.CoinMessenger` for Holder construction.
verbose : bool
Flag to be more verbose.
Returns
-------
reply : The type dependes on the type expected.
Reply to the message in the type expected.
Raises
------
UserWarning
If a simple pool did not return an expected message.
Assumes 1,2 for send recieve hosts.
"""
h = packet_holder
reply_length = h.bytes_returned
reply_type = h.type_returned
reply_int = list(map(ord, reply_byte))
if len(reply_int) < 2:
print('Recieved small packet int: {0} byte:{1}'.format(reply_int, reply_byte))
return False
msg_length = reply_int[1]
if verbose:
print("Recieved {0} bytes:".format(msg_length))
if msg_length != reply_length:
print('Message length != return_length. ml: {0} rl:{1}'.format(msg_length, reply_length))
return False
if h.request_code == 254:
expected_reply = [1, 0, 2, 0, 253]
if reply_int != expected_reply:
msg = "Simple pool did not return expected message."
raise UserWarning(msg, (reply_int, expected_reply))
reply_msg_int = reply_int[4:-1]
reply_msg_byte = reply_byte[4:-1]
if reply_type is str:
return str().join(reply_msg_byte)
elif reply_type is int:
return reply_msg_int
elif reply_type is bool:
return True
else:
return reply_msg_byte
def make_serial_object(port_name):
"""Makes a serial object that can be used for talking with the coin validator.
port_type is a string that can currently only be equal to 'coin_validator'.
Paramters
---------
port_type : str
Type of port to connect to. Currently only 'coin_validator is valid.'
Returns
-------
serial_object : object made by :py:class:`serial.Serial`
"""
return serial.Serial(port=port_name,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
xonxoff=True,
)
def drop_to_ipython(local_variables, *variables_to_inspect):
'''
Drops to ipython at the point in the code where it is called to inspect the variables passed to it.
Parameters
----------
local_variables : list
Usually one would pass the output of locals().
variables_to_inspect: tuple
All variables passed to this routine are wrapped into a tuple.
'''
try:
call_name = local_variables['self'].__module__
except Exception:
call_name = "Module"
b = 'Dropping into IPython'
em = 'Leaving Interpreter, back to program.'
msg = '***Called from %s. Hit Ctrl-D to exit interpreter and continue program.'
ipshell = InteractiveShellEmbed([], banner1=b, exit_msg=em)
ipshell(msg %(call_name))
| 32.868996 | 115 | 0.634649 |
a99ad5868185e1054df2d7b8b74429cdab82171a
| 1,533 |
py
|
Python
|
frds/mktstructure/measures/price_impact.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 1 |
2022-03-06T20:36:06.000Z
|
2022-03-06T20:36:06.000Z
|
mktstructure/measures/price_impact.py
|
mgao6767/mktstructure
|
5432c1bed163f838209d34b74c09629bea620ba8
|
[
"MIT"
] | null | null | null |
mktstructure/measures/price_impact.py
|
mgao6767/mktstructure
|
5432c1bed163f838209d34b74c09629bea620ba8
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from .exceptions import *
name = "PriceImpact"
description = """
The price impact is
2 * q * (midpoint_5min - midpoint) / midpoint
where q is the trade direction (1 for buys and -1 for sells),
midpoint is bid-ask midpoint, and midpoint_5min is the bid-ask midpoint 5min later.
"""
vars_needed = {"Price", "Volume", "Mid Point", "Direction"}
def estimate(data: pd.DataFrame) -> np.ndarray:
if not vars_needed.issubset(data.columns):
raise MissingVariableError(name, vars_needed.difference(data.columns))
midpt = data["Mid Point"].to_numpy()
timestamps = np.array(data.index, dtype="datetime64")
# Find the Quote Mid Point 5 min later than each trade.
matched_midpt = []
for idx, ts1 in enumerate(timestamps):
for i, ts2 in enumerate(timestamps[idx:]):
if ts2 - ts1 >= np.timedelta64(5, "m"):
matched_midpt.append(midpt[idx + i])
break
matched = len(matched_midpt)
directions = data["Direction"].to_numpy()[:matched]
pimpact = 2 * directions * (matched_midpt - midpt[:matched]) / midpt[:matched]
# Daily price impact is the dollar-volume-weighted average
# of the price impact computed over all trades in the day.
price = data["Price"].to_numpy()
volume = data["Volume"].to_numpy()
dolloar_volume = np.multiply(volume, price)[:matched]
pimpact = np.sum(np.multiply(pimpact, dolloar_volume) / np.sum(dolloar_volume))
return np.nan if np.isnan(pimpact) else pimpact
| 38.325 | 84 | 0.679713 |
a5ebb807ff741ef8001b2a69f8057b5972fd93d4
| 1,878 |
py
|
Python
|
tarefas-poo/lista-03/balde/view/interface_com_usuario.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-03/balde/view/interface_com_usuario.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
tarefas-poo/lista-03/balde/view/interface_com_usuario.py
|
victoriaduarte/POO_UFSC
|
0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a
|
[
"MIT"
] | null | null | null |
# --------------------------
# UFSC - CTC - INE - INE5663
# Exercício do Balde
# --------------------------
# Classe que representa a interface com o usuário
#
from view.menu import Menu
from view.paineis.painel_cria_baldes import PainelCriaBaldes
from view.paineis.painel_destroi_baldes import PainelDestroiBaldes
from view.paineis.painel_manipula_baldes import PainelManipulaBaldes
class InterfaceComUsuario:
def __init__(self):
self._baldes = None # inicialmente não há baldes definidos
opcoes_sem_balde = {
0: 'Sair',
1: 'Criar Baldes'
}
opcoes_com_balde = {
0: 'Sair',
1: 'Destruir Baldes',
2: 'Manipular Baldes'
}
self._menu_sem_baldes = Menu('Programa Baldes', opcoes_sem_balde)
self._menu_com_baldes = Menu('Programa Baldes', opcoes_com_balde)
def interaja(self):
terminar = False
while not terminar:
if self._baldes is None:
menu = self._menu_sem_baldes
opcao = menu.pergunte()
if opcao == 0:
terminar = True
elif opcao == 1:
criador = PainelCriaBaldes()
criador.crie(self)
else:
menu = self._menu_com_baldes
opcao = menu.pergunte()
if opcao == 0:
terminar = True
elif opcao == 1:
destruidor = PainelDestroiBaldes()
destruidor.destrua(self)
elif opcao == 2:
manipulador = PainelManipulaBaldes()
manipulador.manipule(self._baldes[0], self._baldes[1])
def armazene_baldes(self, baldeA, baldeB):
self._baldes = (baldeA, baldeB)
def destrua_baldes(self):
self._baldes = None
| 32.947368 | 74 | 0.546858 |
93a51902285ff3561d20e3b1bfd2d2a16c459863
| 3,337 |
py
|
Python
|
2018/finals/web-mitigator/app/main.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 2,757 |
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
2018/finals/web-mitigator/app/main.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 20 |
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
2018/finals/web-mitigator/app/main.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 449 |
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
#!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, redirect, request, make_response, send_from_directory
from google.cloud import tasks_v2beta3
import logging
from urllib.parse import urlencode, quote_plus
client = tasks_v2beta3.CloudTasksClient()
parent = client.queue_path('ctf-web-kuqo48d', 'europe-west1', 'xss')
SECRET_URL = '917fh3h5721'
app = Flask(__name__, static_url_path='')
app.url_map.host_matching = (__name__ != '__main__')
@app.route('/', host="bountyplz-mitigator.web.ctfcompetition.com", methods=['GET', 'POST'])
def bountyplz():
"""Return XSS and BugBounty Form (for PoCs/xss bot)"""
poc = request.form.get('poc', '')
if poc.startswith("http"):
task = {
'app_engine_http_request': {
'app_engine_routing': {
'service': 'uxssbot',
},
'http_method': 'POST',
'relative_uri': '/submit?%s'%urlencode({'service': 'mitigator', 'url': poc}, quote_via=quote_plus),
'body': urlencode({'service': 'mitigator', 'url': poc}, quote_via=quote_plus).encode(),
}
}
res = client.create_task(parent, task)
logging.info('Created uxssbot task: ' + res.name)
return """We have received your bug report."""
return """
<html>
<script src='https://www.google.com/recaptcha/api.js'></script>
<form method=post action=/>
<div class="g-recaptcha" data-sitekey="6LfR1XUUAAAAAPGx2NejNMYImdoRW9PZLQcFtBN_"></div>
<input type=url name=poc placeholder="PoC URL" value=%s>
<input type=submit>
</form>
</html>"""%poc
@app.route('/' + SECRET_URL, host="mitigator.web.ctfcompetition.com")
def login():
"""Redirect the user to the SEM"""
token = request.cookies.get('flag', 'public-demo')
return redirect("/" + SECRET_URL + ".html?flag=" + token)
@app.route('/', host="mitigator.web.ctfcompetition.com")
def backtopwn():
"""Redirect the user to pwnable"""
return redirect("http://mitigator.ctfcompetition.com:1337/index.html")
@app.route('/' + SECRET_URL + '.html', host="mitigator.web.ctfcompetition.com")
def sem():
response = make_response(app.send_static_file('sem.html'))
response.headers['content-security-policy'] = "default-src 'none'; style-src 'sha256-a6K5yWfSJ1D3n7JPrbZVrFADjNGla8XNjwqREOH1FFs='; script-src 'sha256-hJezPHmyLh3996xSSmcHvy0la57OWfPoGhLKvt40LGA=' 'sha256-9TaiPuyxl5StNVGXWFGVh2SHM62NJ9KT462mtr8Jd7Q=' https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js; object-src 'none'; connect-src 'self'; report-uri https://bountyplz-mitigator.web.ctfcompetition.com/";
return response
@app.route('/secret/<path:path>', host="mitigator.web.ctfcompetition.com")
def proxy(path):
return app.send_static_file('secret/' + path)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| 42.782051 | 422 | 0.706023 |
9e13297bcad332cae21c3a3453309ecd98fd058b
| 956 |
py
|
Python
|
data/data_loader.py
|
florianfricke/Bachelor_Thesis_Sentiment_Analyse
|
aa1fa95cfbc13115ee60baaf79eab0d1940998ab
|
[
"MIT"
] | 1 |
2020-06-04T13:20:45.000Z
|
2020-06-04T13:20:45.000Z
|
data/data_loader.py
|
florianfricke/Bachelor_Thesis_Sentiment_Analyse
|
aa1fa95cfbc13115ee60baaf79eab0d1940998ab
|
[
"MIT"
] | 6 |
2020-06-03T18:45:11.000Z
|
2022-02-10T01:51:03.000Z
|
data/data_loader.py
|
florianfricke/Bachelor_Thesis_Sentiment_Analyse
|
aa1fa95cfbc13115ee60baaf79eab0d1940998ab
|
[
"MIT"
] | null | null | null |
"""
Created by Florian Fricke.
"""
import os
from utilities.utilities import clean_text
from os.path import join
class DataLoader:
def __init__(self):
self._dirname = os.path.dirname(__file__)
def get_data(self, filename, seperator="\t", sentiment_column_number=0, text_column_number=1):
data = list()
for line_id, line in enumerate(
open(join(self._dirname, filename), "r", encoding="utf-8").readlines()):
try:
columns = line.rstrip().split(seperator)
sentiment = columns[sentiment_column_number]
text = clean_text(" ".join(columns[text_column_number:]))
if text != "Not Available":
data.append((sentiment, text))
except Exception:
print("\nWrong format in line:{} in file:{}".format(
line_id, filename))
raise Exception
return data
| 30.83871 | 98 | 0.58159 |
1923d29de7080a5e45efd66b8d2643600e031f38
| 664 |
py
|
Python
|
source/test/test_templatehandler.py
|
marctrommen/bloggenerator
|
5b4fed4cd12a899829261d492328dfc39a78153d
|
[
"MIT"
] | null | null | null |
source/test/test_templatehandler.py
|
marctrommen/bloggenerator
|
5b4fed4cd12a899829261d492328dfc39a78153d
|
[
"MIT"
] | null | null | null |
source/test/test_templatehandler.py
|
marctrommen/bloggenerator
|
5b4fed4cd12a899829261d492328dfc39a78153d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import templatehandler
class TemplateHandlerTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TemplateHandlerTest, self).__init__(*args, **kwargs)
def test_getPage(self):
th = templatehandler.TemplateHandler()
self.assertEqual(len(th.getPage()), 1924, "incorrect file size")
def test_getBlogpost(self):
th = templatehandler.TemplateHandler()
self.assertEqual(len(th.getBlogpost()), 656, "incorrect file size")
def test_getBlogpostKeyword(self):
th = templatehandler.TemplateHandler()
self.assertEqual(len(th.getBlogpostKeyword()), 79, "incorrect file size")
| 27.666667 | 75 | 0.746988 |
2709581564c8e67ff6eb7794622e5fc13124691b
| 4,482 |
py
|
Python
|
convertx/cli.py
|
VolkerBergen/convertx
|
3d11f94e5ff20b17381c626848fe26e82a5c9cb7
|
[
"BSD-2-Clause"
] | null | null | null |
convertx/cli.py
|
VolkerBergen/convertx
|
3d11f94e5ff20b17381c626848fe26e82a5c9cb7
|
[
"BSD-2-Clause"
] | 5 |
2022-01-06T20:34:22.000Z
|
2022-01-17T14:58:05.000Z
|
convertx/cli.py
|
VolkerBergen/convertx
|
3d11f94e5ff20b17381c626848fe26e82a5c9cb7
|
[
"BSD-2-Clause"
] | 1 |
2022-01-05T13:17:57.000Z
|
2022-01-05T13:17:57.000Z
|
import argparse
import io
import os
import sys
from datetime import date, datetime
from mammoth import convert, writers
from html2text import html2text
from .styles import style_mappings, style_mappings_md, check_comments
def main():
argv = [arg for arg in sys.argv if not arg.startswith('--')]
argv_dir = ' '.join([arg for arg in sys.argv if arg.startswith('--')])
command = 'find -s . -name "*docx*" -print0 | while IFS= read -r -d "" filename; do\n' # find docx files
command += 'convertx "$filename" "${filename//docx/html}"' # execute convertx command
command += ' {}\ndone'.format(argv_dir) # add input/output directories
# `convertx` to loop through directory for conversion
if (len(argv) == 1):
os.system(command)
# `convertx html` to loop through directory for conversion
elif (len(argv) == 2) and ('html' in argv[-1]):
os.system(command)
# `convertx markdown` to loop through directory for conversion
elif (len(argv) == 2) and ('markdown' in argv[-1]):
os.system(command.replace('html', 'md'))
# `convertx filename.docx` for html conversion into filename.html
elif len(argv) == 2:
filename_docx = argv[-1]
filename_html = filename_docx.replace("docx", "html")
os.system('convertx "{}" "{}"'.format(filename_docx, filename_html))
# actual html or markdown conversion
else:
args = _parse_args()
outdir = args.output_dir
if outdir is not None and not os.path.exists(outdir):
os.makedirs(outdir)
is_valid = (not '~$' in args.path) and (not '/._' in args.path)
is_selected = (args.input_dir is None) or (args.input_dir in args.path)
mtime = datetime.fromtimestamp(os.path.getmtime(args.path)).strftime('%Y-%m-%d')
#is_selected &= (mtime >= '2021-05-01')
if is_valid and is_selected:
with open(args.path, "rb") as docx_fileobj:
if outdir is None:
path, file = os.path.split(args.output)
else:
path, file = outdir, os.path.basename(args.output)
output_path = os.path.join(path, file.replace(' ', ''))
if args.stdout_write in ['true', 'only']:
output_file = os.path.join(path, "format_errors.txt")
if not os.path.exists(output_file):
sys.stdout = open(output_file, 'a')
if str(date.today()) not in open(output_file).readline():
sys.stdout = open(output_file, 'w') # reset
print('{}\n'.format(date.today()))
sys.stdout = open(output_file, 'a')
print('see --> https://youtu.be/9HIv-R8lg9I <-- for explanations.\n\n')
sys.stdout = open(output_file, 'a')
result = convert(docx_fileobj).value
if args.output.endswith('html'):
title = args.output.split('/')[-1].strip('.html')
result = style_mappings(result, title)
elif args.output.endswith('md'):
title = args.output.split('/')[-1].strip('.md')
result = style_mappings(result, title)
result = html2text(result)
result = style_mappings_md(result)
else:
raise ValueError('File format not supported.')
check_comments(args.path, title)
if args.stdout_write != 'only':
_write_output(output_path, result)
def _write_output(path, contents):
with io.open(path, "w", encoding="utf-8") as fileobj:
fileobj.write(contents)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"path",
metavar="docx-path",
help="Path to the .docx file to convert.")
parser.add_argument(
"output",
nargs="?",
metavar="output-path",
help="Output path for the generated document.")
parser.add_argument(
"--input-dir",
help="Input directory for generated HTML.")
parser.add_argument(
"--output-dir",
help="Output directory for generated HTML.")
parser.add_argument(
"--stdout-write",
help="Output filepath for generated error messages.")
return parser.parse_args()
if __name__ == "__main__":
main()
| 33.699248 | 109 | 0.572735 |
e30c668146f5292adf9d8941078d876c3b4b49d3
| 5,250 |
py
|
Python
|
ArduinoToPiDataTransfer/PiDataReceiverGeneric.py
|
Robsen01/Brain_Interface
|
2525dc5e90bc5ac6b67f8f596fdf26a9ae6b391e
|
[
"Apache-2.0"
] | null | null | null |
ArduinoToPiDataTransfer/PiDataReceiverGeneric.py
|
Robsen01/Brain_Interface
|
2525dc5e90bc5ac6b67f8f596fdf26a9ae6b391e
|
[
"Apache-2.0"
] | null | null | null |
ArduinoToPiDataTransfer/PiDataReceiverGeneric.py
|
Robsen01/Brain_Interface
|
2525dc5e90bc5ac6b67f8f596fdf26a9ae6b391e
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep, time_ns
import serial
import serial.tools.list_ports
import math
'''
PiDataReceiverGeneric has everything you need to communicate with the Arduino.
'''
class PiDataReceiverGeneric:
'''
port must be a string like 'COM3'. Retrieve possible ports with PiDataReceiver.list_possible_ports.
'''
def __init__(self, port, threshold, baudrate=115200, timeout=.1, send_raw_data=False, send_filtered_data=False, send_envlope=True, data_separation=",") -> None:
self.arduino = serial.Serial(port=port, baudrate=baudrate, timeout=timeout)
self.send_raw_data = send_raw_data
self.send_filtered_data = send_filtered_data
self.send_envlope = send_envlope
self.data_separation = data_separation
self.threshold = threshold
'''
Closes the potentialy open port to the arduino that may be connected.
Should be called, before disposing this object
'''
def close_arduino_port(self):
if hasattr(self, "arduino"):
if not self.arduino.closed:
self.arduino.close()
'''
Wait a bit to call this function after PiDataReceiver was initiated. 2-3 Seconds is good.
This sends a string to the Arduino to configure it.
Returns true, if the Arduino responded that all is well.
'''
def init_arduino(self) -> bool:
self.clear_arduino_buffer()
d = self.data_separation
if(self.send_raw_data):
d += "1"
else:
d += "0"
if(self.send_filtered_data):
d += "1"
else:
d += "0"
if(self.send_envlope):
d += "1"
else:
d += "0"
if(self.threshold and type(self.threshold) == int and self.threshold > 0):
d += str(self.threshold)
else:
d+= "0"
self.write(d)
self.arduino.flushOutput()
# wait till response, there will be some empty lines before the response. The loop reads them out, until the response arrives
time = time_ns()
data = ""
while data != "all good":
data = self.arduino.readline()
data = data.decode('utf-8')
data = data.strip()
# wait max 1.5 s for response (takes almost exactly 1s on my machine)
if time_ns() - time > 1500000000:
break
if data == "all good":
return True
return False
'''
This function writes a string to the Arduino.
'''
def write(self, s) -> None:
self.arduino.write(bytes(s, 'utf-8'))
'''
Reads last line that arduino send.
Returns list of int.
List may contain raw_data, filtered_data, envlope, depending on send_raw_data and the other attributes.
The last value is a Timestamp of the moment when the analoge value was read.
'''
def read(self):
try:
data = self.arduino.readline()
data = data.decode('utf-8')
data = data.split(self.data_separation)
data = list(map(int, data)) # convert strings-list to int-list
except:
# when getting started Arduino sends some setup lines. This helps to ignore them
data = []
return data
'''
Should be called before you start to read or write values for the first time after a connect, in case there are still old values in the buffer.
'''
def clear_arduino_buffer(self) -> None:
self.arduino.reset_input_buffer()
self.arduino.reset_output_buffer()
'''
staticmethod
Returns list with items like this:
('COM3', 'Arduino Due Programming Port (COM3)', 'USB VID:PID=2341:003D SNR=75330303035351300230')
The first String in this list that represents the Arduino must be used for the port-parameter of PiDataReceiverGeneric
'''
@staticmethod
def list_possible_ports():
return list(serial.tools.list_ports.comports())
'''
staticmethod
should be used to convert the recieved timestamp from nanoseconds to seconds
'''
@staticmethod
def time_to_s(ns_value) -> float:
# /1000000 converts the nanoseconds to seconds
return ns_value/1000000
'''
staticmethod
converts the raw integer to the measured mV value
'''
@staticmethod
def raw_to_mV(value) -> float:
# https://www.arduino.cc/reference/en/language/functions/analog-io/analogread/
# value*(5/1024) - 1.5
# value[total steps measured by the adc.] *(5/1024)[1 step of the adc. in V] - 1.5[the Sensor maps the detectionrange of +/- 1,5mV to a positive range between 0-3V. We reverse that here]
# the result is a value between -1.5mV and + 1.5mV
return value*5/1024 - 1.5
'''
staticmethod
converts the raw integer to the measured mV value
'''
@staticmethod
def filtered_to_mV(value) -> float:
# https://www.arduino.cc/reference/en/language/functions/analog-io/analogread/
# value*(5/1024)
# value[total steps measured by the adc. and filtered] *(5/1024)[1 step of the adc. in V]
# the result is a value roughly between -1.5mV and + 1.5mV.
return value*5/1024
| 34.313725 | 195 | 0.62419 |
8bc4e1cbae7913cb418e025dd446d4bbf461bd0f
| 46,469 |
py
|
Python
|
MachineTranslation/src_bak/attention/v1/machine_translation_with_attention_xrh.py
|
Xinrihui/DeepLearningApp
|
8d86b88251ee8d37358c642b1ec4a341767bfd17
|
[
"Apache-2.0"
] | 2 |
2021-08-25T01:13:29.000Z
|
2021-10-10T14:49:59.000Z
|
MachineTranslation/src_bak/attention/v1/machine_translation_with_attention_xrh.py
|
Xinrihui/DeepLearningApp
|
8d86b88251ee8d37358c642b1ec4a341767bfd17
|
[
"Apache-2.0"
] | null | null | null |
MachineTranslation/src_bak/attention/v1/machine_translation_with_attention_xrh.py
|
Xinrihui/DeepLearningApp
|
8d86b88251ee8d37358c642b1ec4a341767bfd17
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 适用于 tensorflow >= 2.0, keras 被直接集成到 tensorflow 的内部
# ref: https://keras.io/about/
from tensorflow.keras.layers import Bidirectional, Concatenate, Dot, Input, LSTM
from tensorflow.keras.layers import RepeatVector, Dense, Lambda, Softmax, Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
from src_bak.attention.v1.lib.nmt_utils import *
from src_bak.attention.v1.lib.utils_xrh import *
from sklearn.model_selection import train_test_split
from lib.bleu_xrh import *
from deprecated import deprecated
@deprecated(version='0.0', reason="You should use class MachineTranslationV1")
class BasicMachineTranslation:
"""
基于 LSTM + seq2seq + attention 的翻译模型
基础的面向过程实现
Author: xrh
Date: 2019-12-16
ref: https://github.com/enggen/Deep-Learning-Coursera
"""
def model_implementation_naive(self, Tx, Ty, machine_vocab, human_vocab, Xoh, Yoh, m):
# 对于 attention 模块, 使用全局的 网络层(Keras Layers) 对象,以在多个 model 中共享权重
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=2)
densor = Dense(1, activation="relu")
activator = Softmax(axis=1)
dotor = Dot(axes=1)
def one_step_attention(a, s_prev): # 与RNN 类似,是一个 循环结构
"""
attention 模块的实现
使用全局的 网络层(Keras Layers) 对象,以在多个 model 中共享权重
:param a: encoder-lstm 所有时间步的输出 shape (m, Tx, 2*n_a)
:param s_prev: decoder-lstm 中上一个时间步的 隐藏状态 shape (m, n_s)
:return:
"""
s_prev = repeator(s_prev)
concat = concatenator([a, s_prev]) # shape: (m, Tx, 2*n_a+n_s)
e = densor(concat) # shape: (m, Tx, 1)
alphas = activator(e) # shape: (m, Tx, 1)
context = dotor([alphas, a]) # shape: (m, 1, 2*n_a)
return context
n_a = 64
n_s = 128
pre_activation_LSTM_cell = Bidirectional(LSTM(n_a, return_sequences=True, return_state=True))
# ref: https://keras.io/api/layers/recurrent_layers/lstm/#lstm-class
# ref: https://keras.io/api/layers/recurrent_layers/bidirectional/
post_activation_LSTM_cell = LSTM(n_s, return_state=True)
output_layer = Dense(len(machine_vocab), activation='softmax')
def model(Tx, Ty, n_a, n_s, human_vocab_size):
"""
实现 encoder 和 decoder
:param Tx: 输入序列的长度
:param Ty: 输出序列的长度
:param n_a: encoder-lstm 中隐藏状态的维度
:param n_s: decoder-lstm 中隐藏状态的维度
:param human_vocab_size: 输入序列的字典大小
:return: Keras model instance
"""
# 定义模型的输入
X = Input(shape=(Tx, human_vocab_size)) # shape: (m,Tx,human_vocab_size) , m- batch 大小
# decoder-LSTM 的初始状态
# 隐藏状态向量 hidden state
s0 = Input(shape=(n_s,), name='s0') # shape of s: (m, n_s=64)
# 细胞状态向量
c0 = Input(shape=(n_s,), name='c0') # shape of c: (m, n_s=64)
s = s0
c = c0
outputs = []
# lstm-encoder 实现
a, forward_h, forward_c, backward_h, backward_c = pre_activation_LSTM_cell(
inputs=X) # shape of a : (m,Tx, 2*n_a)
# lstm-decoder 实现
# 对Ty 时间步进行展开,由于每次只运行一个 时间步 所以要输出 hidden state 和 cell state 作为下一个时间步的LSTM的输入
for t in range(Ty):
context = one_step_attention(a, s) # shape: (m, 1, 2*n_a)
# shape of s: (m, n_s)
s, _, c = post_activation_LSTM_cell(inputs=context, initial_state=[s, c]) # 输入只有一个时间步
out = output_layer(s) # shape: (m, machine_vocab)
outputs.append(out)
model = Model(inputs=[X, s0, c0], outputs=outputs)
# shape of outs : ( Ty, m, machine_vocab)
return model
model = model(Tx, Ty, n_a, n_s, len(human_vocab))
opt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# tips :
# n_a = 64
# n_s = 128
# m = 10000
outputs = list(Yoh.swapaxes(0, 1)) # Yoh.swapaxes(0,1) 第0维度 和 第1 维度交换,原来为(m,T_y,11) 变换后 为:(T_y,m,11)
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
history = model.fit([Xoh, s0, c0], outputs, epochs=40, batch_size=2048, validation_split=0.1)
class MachineTranslationV1:
"""
基于 LSTM + seq2seq + attention 的翻译模型
对比 class BasicMachineTranslation 改进如下:
1. 面向对象实现
2. 更改了 attention 机制
(1) 对于 decoder, 第一个时间步的输入的 隐藏状态向量(s) 和 细胞状态向量(c) 为 encoder 最后一个时间步的输出
(2) 对于 decoder, 每一个时间步的输入除了 attention 模块对 encoder 所有时间步的输出的加权和 之外,
还引入了 decoder 上一个时间步的输出
3. 基于 beamsearch 的推理
(1) 实现了 encoder 和 decoder 的解耦, 提升推理速度
(2) decoder 采用分时间步解码的方式, 即每一个时间步做一次模型的推理得到解码结果
Author: xrh
Date: 2019-12-16
ref:
1.https://github.com/enggen/Deep-Learning-Coursera
2.论文 Neural machine translation by jointly learning to align and translate
"""
def __init__(self, Tx, Ty, n_a, n_s, machine_vocab, inv_machine_vocab, human_vocab,
use_pretrain=False, model_path='models/lstm_seq2seq_attention.h5'):
"""
模型初始化
:param Tx: 输入序列的长度
:param Ty: 输出序列的长度
:param n_a: encoder-lstm 中隐藏状态的维度 n_a = 64
:param n_s: decoder-lstm 中隐藏状态的维度 n_s = 128
:param machine_vocab: 输出序列的字典
:param inv_machine_vocab: 输出序列的逆字典
:param human_vocab: 输入序列的字典
:param use_pretrain: 使用训练好的模型
:param model_path: 预训练模型的路径
"""
self.Tx = Tx
self.Ty = Ty
self.n_a = n_a
self.n_s = n_s
self.machine_vocab = machine_vocab
self.inv_machine_vocab = inv_machine_vocab
self.human_vocab = human_vocab
self.model_path = model_path
# 输出序列的字典大小
self.machine_vocab_size = len(machine_vocab)
# 输入序列的字典大小
self.human_vocab_size = len(human_vocab)
# 对组成计算图的所有网络层进行声明和初始化
self.__init_computation_graph()
# 用于训练的计算图
self.model_train = self.__encoder_decoder_model(Tx=self.Tx, Ty=self.Ty, human_vocab_size=self.human_vocab_size)
if use_pretrain: # 载入训练好的模型
print('load pretrained model sucess ')
self.model_train.load_weights(self.model_path)
def __init_computation_graph(self):
"""
对组成计算图的所有网络层进行声明和初始化
:return:
"""
# Model 也可以作为一个网络层
self.model_one_step_attention = self.__one_step_attention_model(self.Tx, self.n_a, self.n_s)
self.pre_activation_LSTM_cell = Bidirectional(LSTM(self.n_a, return_sequences=True, return_state=True),
name='encoder_lstm')
self.concatenate_s = Concatenate(name='concatenate_s')
self.concatenate_c = Concatenate(name='concatenate_c')
self.concatenate_context = Concatenate()
self.post_activation_LSTM_cell = LSTM(self.n_s, return_state=True, name='decoder_lstm')
self.output_layer = Dense(len(self.machine_vocab), activation='softmax', name='decoder_output')
self.lambda_argmax = Lambda(TensorUtils.argmax_tensor, arguments={'axis': -1}, name='argmax_tensor')
self.lambda_one_hot = Lambda(TensorUtils.one_hot_tensor, arguments={'num_classes': len(self.machine_vocab)},
name='one_hot_tensor')
self.reshape = Reshape(target_shape=(1, len(self.machine_vocab)))
def __one_step_attention_model(self, Tx, n_a, n_s):
"""
attention 模块的实现
把 几个网络层(keras layer) 包装为 model ,并通过重新定义 model 的输入的方式 来共享 layer 的权重
:param Tx: 输入序列的长度
:param n_a: encoder-lstm 中隐藏状态的维度
:param n_s: decoder-lstm 中隐藏状态的维度
:return: keras model
model inputs: [a0, s_prev0]
model outputs : context 向量, 作为 decoder-lstm 的输入 shape: (m, 1, 2*n_a)
"""
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=2)
densor = Dense(1, activation="relu")
activator = Softmax(axis=1)
dotor = Dot(axes=1)
a0 = Input(shape=(Tx, 2 * n_a), name='a') # shape: (m, Tx, 2 * n_a)
s_prev0 = Input(shape=(n_s,), name='s_prev') # shape: (m, Tx, n_s)
a = a0 # 否则报错 : ValueError: Graph disconnected: cannot obtain value for tensor Tensor ....
# The following previous layers were accessed without issue: []
s_prev = s_prev0
s_prev = repeator(s_prev) # shape: (m, Tx, n_s)
concat = concatenator([a, s_prev]) # shape: (m, Tx, 2*n_a+n_s)
e = densor(concat) # shape: (m, Tx, 1)
alphas = activator(e) # shape: (m, Tx, 1)
context = dotor([alphas, a]) # shape: (m, 1, 2*n_a)
model = Model(inputs=[a0, s_prev0], outputs=context) # Model 也可以作为一个网络层
return model
def __encoder_decoder_model(self, Tx, Ty, human_vocab_size):
"""
实现 encoder 和 decoder
1. 解码时加入上一个时刻的输出单词,
考虑场景: 若前一个时刻的词是 '-', 则当前词必须为数字
在 decoder中, 经过 softmax 输出后 取最大的 那一个字符的 one-hot 向量 与 context 拼接后输入 decoder-lstm
2. 修改 decoder-LSTM 的初始 隐藏状态的输入 ,由原来的 0 向量,改为 encoder-LSTM 最后一个时间步的隐状态(注意进行拼接)
3. 把所有的 keras layer object 声明为类变量,以便 后面重构 decoder 可以使用训练好的网络结构
:param Tx: 输入序列的长度
:param Ty: 输出序列的长度
:param human_vocab_size: 输入序列的字典大小
:return: Keras model instance
"""
X = Input(shape=(Tx, human_vocab_size)) # shape: (m,Tx,human_vocab_size)
pred0 = Input(shape=(1, self.machine_vocab_size), name='pred0') # shape: (m ,1, 11)
pred = pred0
# print('pred: after Input', pred)
outputs = []
# lstm-encoder
a, forward_h, forward_c, backward_h, backward_c = self.pre_activation_LSTM_cell(
inputs=X) # shape of a : (m,Tx, 2*n_a)
# 最后一个时间步的 隐藏状态向量
s = self.concatenate_s(inputs=[forward_h, backward_h]) # shape (m, 2*n_a=128)
# 最后一个时间步的 细胞状态向量
c = self.concatenate_c(inputs=[forward_c, backward_c]) # shape (m, 2*n_a=128)
# lstm-decoder
for t in range(Ty): # 遍历 Ty 个时间步
context = self.model_one_step_attention(inputs=[a, s]) # shape of context : (m, 1, 2*n_a=128)
# print('context after one_step_attention: ', context)
context = self.concatenate_context(inputs=[context, pred]) # shape of context: (m,1,128+11=139)
# print('context after Concatenate: ', context)
s, _, c = self.post_activation_LSTM_cell(inputs=context, initial_state=[s, c]) # 输入 context 只有1个时间步
out = self.output_layer(inputs=s) # shape (m, machine_vocab)
pred = self.lambda_argmax(inputs=out) # shape (m, 1)
pred = self.lambda_one_hot(inputs=pred) # shape (m, machine_vocab_size)
# print(pred)
pred = self.reshape(inputs=pred) # shape: (m ,1, machine_vocab_size)
# print(pred)
outputs.append(out) # shape : (Ty, m, machine_vocab_size)
model = Model(inputs=[X, pred0], outputs=outputs)
# shape of outputs : ( Ty ,m ,machine_vocab)
return model
def fit(self, Xoh, Yoh, epoch_num=120, batch_size=2048):
"""
训练模型
:param Xoh: 输入序列 (one-hot化)
:param Yoh: 输出序列 (one-hot化)
:param epoch_num: 模型训练的 epoch 个数, 一般训练集所有的样本模型都见过一遍才算一个 epoch
:param batch_size: 选择 min-Batch梯度下降时, 每一次输入模型的样本个数 (默认 = 2048)
:return:
"""
m = np.shape(Xoh)[0] # 训练样本总数
model = self.__encoder_decoder_model(self.Tx, self.Ty, self.human_vocab_size)
# 打印 模型(计算图) 的所有网络层
# print(model.summary())
# 画出计算图
# plot_model(model, to_file='doc/model2.png')
opt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
outputs = list(Yoh.swapaxes(0, 1)) # Yoh.swapaxes(0,1) 第0维度 和 第1 维度交换,原来为(m,T_y,11) 变换后 为:(T_y,m,11)
pred0 = np.zeros((m, 1, self.machine_vocab_size))
history = model.fit([Xoh, pred0], outputs, epochs=epoch_num, batch_size=batch_size, validation_split=0.1)
# 将训练好的模型保存到文件
model.save(self.model_path)
print('save model sucess ')
def __inference_encoder_model(self, Tx, human_vocab_size):
"""
推理过程中 解耦后的 encoder
:param Tx: 输入序列的长度
:param human_vocab_size: 输入序列的字典大小
:return: model -- Keras model instance
"""
X = Input(shape=(Tx, human_vocab_size)) # shape: (N,Tx,human_vocab_size)
# 使用 已经训练好的 网络层
a, forward_h, forward_c, backward_h, backward_c = self.pre_activation_LSTM_cell(
inputs=X) # shape of a : (N, Tx, 2*n_a)
# encoder 最后一个时间步的 隐藏状态向量
s = self.concatenate_s([forward_h, backward_h]) # shape of s: (N, n_a+n_a) = (m,128)
# encoder 最后一个时间步的 细胞状态向量
c = self.concatenate_c([forward_c, backward_c])
# context = self.model_one_step_attention(inputs=[a, s]) # shape of context : (N, 1, 128)
outputs = [a, s, c]
model = Model(inputs=[X], outputs=outputs) # 生成计算图(模型)
return model
def __inference_onestep_decoder_model(self, n_s, Tx):
"""
推理过程中 解耦后的 一个时间步的 decoder
:param n_s: decoder-lstm 中隐藏状态的维度
:return: model -- Keras model instance
"""
a = Input(shape=(Tx, n_s), name='a') # shape (N, Tx, 2*n_a)
# 输入的隐藏状态向量
s0 = Input(shape=(n_s,), name='s') # shape of s: (m, n_s=64)
# 输入的细胞状态向量
c0 = Input(shape=(n_s,), name='c') # shape of c: (m, n_s=64)
# decoder 中上一个时间步的输出
pred = Input(shape=(1, self.machine_vocab_size), name='pred') # shape of pred (m ,1, 11)
s = s0 # unmutable object: a new tensor is generated
c = c0
# print('pred: after Input',pred)
context = self.model_one_step_attention(inputs=[a, s]) # shape of context : (m, 1, 2*n_a=128)
# print('context after one_step_attention: ', context)
context = self.concatenate_context(inputs=[context, pred]) # shape of context: (m,128+11=139)
s, _, c = self.post_activation_LSTM_cell(inputs=context, initial_state=[s, c])
# s 最后一个时间步的隐藏状态向量 shape (m, n_s)
# c 最后一个时间步的细胞状态向量 shape (m, n_s)
out = self.output_layer(inputs=s) # shape (m, machine_vocab_size)
outputs = [s, c, out] # 输出 s c out 作为下一个时间步使用
model = Model(inputs=[a, s0, c0, pred], outputs=outputs) # 生成计算图(模型)
return model
def __inference_beamsearch(self, source_oh, Tx, Ty, n_s, human_vocab_size, machine_vocab_size, k=3):
"""
实现 beamsearch (带窗口的贪心搜索)
np.array -> tensor 很自然, 但是 tensor -> np.array 的方式: K.eval(tensor) 需要启动框架计算 计算图, 非常耗费时间;
尽量多用 numpy 的函数,以减少 tensor 到 np.array 的转换的次数
:param source_oh: 输入序列(one-hot化)
:param Tx: 输入序列的长度
:param Ty: 输出序列的长度
:param n_s: decoder-lstm 中隐藏状态的维度
:param human_vocab_size: 输入序列的字典大小
:param machine_vocab_size: 输出序列的字典大小
:param k: 窗口大小
:return: decoder_result
"""
pred0 = np.zeros((k, 1, self.machine_vocab_size))
pred = pred0
decoder_result = []
# encoder 和 decoder 的解耦
encoder = self.__inference_encoder_model(Tx, human_vocab_size)
context, s, c = encoder.predict([source_oh])
# source_oh shape:(m=3, Tx=30, human_vocab_size=37)
# shape of context : (m, 1, 2*n_a=128)
# shape of s: (m, 2*n_a)
# shape of c: (m, 2*n_a)
# bearmserach decoder 实现
for timestep in range(Ty):
# print('timestep :', timestep)
onestep_decoder = self.__inference_onestep_decoder_model(n_s, Tx)
s, c, out = onestep_decoder.predict([context, s, c, pred])
# out: softmax 层输出的为 11 个分类的概率 shape (m=3, machine_vocab_size=11) 输入的样本数量为3
# s 最后一个时间步的隐藏状态向量 shape (m, n_s)
# c 最后一个时间步的细胞状态向量 shape (m, n_s)
# print('out: \n', out) # shape:(3, 11)
# 每次都对 3个相同的样本(k=3)进行 推理,但是每一个 样本对应的 pred 不同 ;
# beamsearch 中,每一个时间步都会根据上一步的 onestep_decoder 输出结果中 选择最好的k个, 输入此时间步的 onestep_decoder
if timestep == 0:
out_top_K = ArrayUtils.partition_topk_array(out,
k) # shape:(3,3) 每一行为 k 个标号, 表示从每个样本的11个分类中选出概率最大的 3个 类别
# print('out_top_K: \n', out_top_K)
top_K_indices = out_top_K
r0 = top_K_indices[0] # shape:(1,3) 因为3个输入样本是一样的,取其中一个即可
r0 = np.reshape(r0, (k, 1)) # shape:(3,1)
decoder_result = r0
one_hot = ArrayUtils.one_hot_array(r0.reshape(-1), machine_vocab_size) # shape:(3,11)
# 把 r0.reshape(-1) shape:(3,) 最后一个维度 变为 one-hot 向量
# print(one_hot)
one_hot = np.reshape(one_hot, (1, one_hot.shape[0], one_hot.shape[1])) # shape:(1,3,11)
one_hot_permute = one_hot.transpose((1, 0, 2)) # shape: (3,1,11) ;
# 交换 第0维 和 第1维,相当于3个不同的 pred 同时输入下一个时间步的 onestep_decoder
pred = one_hot_permute
else:
out_top_K = ArrayUtils.whole_topk_array(out,
k) # shape:(3, 2) 找出 out (shape (3,11)) 中33 个元素中的k个最大的元素的标号(位置)
# out shape:(3, 11)
r = out_top_K
# print('r: \n', r)
# [[1 1]
# [0 1]
# [2 1]] # topk 的元素在out中的标号为 [2,1], 代表 第2个输入的pred 所输出的11个分类中的第1个类别
r_pre = decoder_result # shape:(k,timestep) 上一步 解码的结果 即是 这一步的输入
# [[2]
# [1]
# [3]]
rt = np.zeros((k, timestep + 1), dtype=np.int32) # 这一步 会在上一步 已有的解码序列的基础上 增加1个 解码位
for i in range(k):
rt[i, :] = np.concatenate((r_pre[r[i][0]], [r[i][1]]), axis=0)
# i=2:
# r[2][0]=2 说明是第2个输入的pred,前一步的解码情况为: r_pre[r[2][0]]=[3] ,
# 再连接上这一步的解码位 r[2][1]=1 得到 解码序列:[3,1]
# 一共k 个解码序列 组成 rt
decoder_result = rt
# decoder_result:
# [[1 1]
# [2 1]
# [3 1]]
one_hot = ArrayUtils.one_hot_array(decoder_result[:, -1], machine_vocab_size) # shape:(3, 11)
# print(one_hot.shape)
one_hot = np.reshape(one_hot, (1, one_hot.shape[0], one_hot.shape[1])) # shape:(1,3, 11)
one_hot_permute = one_hot.transpose((1, 0, 2)) # shape: (3,1,11)
pred = one_hot_permute
# print('decoder_result: \n', decoder_result)
return decoder_result
def inference(self, example):
"""
使用训练好的模型进行推理
:param example: 样本序列
:return:
"""
source = np.array(string_to_int(example, self.Tx, self.human_vocab))
source_oh = ArrayUtils.one_hot_array(source, nb_classes=self.human_vocab_size)
# beamsearch 的窗口大小
k = 3
source_oh = source_oh.reshape(1, source_oh.shape[0], source_oh.shape[1])
# print(source_oh.shape)
source_oh = np.repeat(source_oh, k, axis=0)
# print(source_oh.shape) #(3, 30, 37) m=k=3 将一个样本复制为3个, 输入模型进行推理
decoder_result = self.__inference_beamsearch(source_oh=source_oh, Tx=self.Tx, Ty=self.Ty, n_s=self.n_s,
human_vocab_size=self.human_vocab_size,
machine_vocab_size=self.machine_vocab_size, k=k)
candidates = []
for prediction in decoder_result:
output = ''.join(int_to_string(prediction, self.inv_machine_vocab))
# print("source:", example)
# print("output:", output)
candidates.append(output)
return candidates
@deprecated(version='1.0', reason="You should use another function")
def evaluate_deprecated(self, source_list, reference_list):
"""
使用 bleu 对翻译结果进行评价
:param source_list: 待翻译的句子的列表
:param reference_list: 对照语料, 人工翻译的句子列表
:return: average_bleu_score : 所有 source 的最佳翻译结果的平均分数 ;
best_result_list : 所有 source 的最佳翻译结果
best_result = (max_bleu_score, source, reference, best_candidate)
"""
bleu_score_list = np.zeros(len(source_list))
best_result_list = []
for i in range(len(source_list)):
source = source_list[i] # "3rd of March 2002"
reference = reference_list[i] # "2002-03-03"
candidates = self.inference(source) # ['2002-03-03', '0002-03-03', '1002-03-03']
max_bleu_score = float('-inf') # 最佳分数
best_candidate = None # 最好的翻译结果
reference_arr = reference.split('-')
# 对 reference 切分(分隔符为 '-' )为 ['2002','03','03']
for candidate in candidates: # 遍历所有的 candidate, 找到 分数最高的
candidate_arr = candidate.split('-')
# 对 candidate 切分(分隔符为 '-' )为 ['2002','03','03']
bleu_score = BleuScore.compute_bleu_corpus([[reference_arr]], [candidate_arr], N=2)[0]
# bleu_score = corpus_bleu([[reference_arr]], [candidate_arr], weights=(0.5, 0.5, 0, 0))
# 因为 reference_arr 的长度为3 , 很容易导致 3-gram-precision=0,
# 根据 bleu 计算公式 log(0) -> -inf, 导致bleu=0;
# weights=(0.5, 0.5, 0, 0) 表示 只考虑 1-gram-precision 和 2-gram-precision 的对数加权和
if bleu_score > max_bleu_score:
max_bleu_score = bleu_score
best_candidate = candidate
bleu_score_list[i] = max_bleu_score
best_result = (max_bleu_score, source, reference, best_candidate)
print('i:{}, best_result:{}'.format(i,best_result))
best_result_list.append(best_result)
average_bleu_score = np.average(bleu_score_list)
return average_bleu_score, best_result_list
def evaluate(self, source_list, reference_list):
"""
使用 bleu 对翻译结果进行评价
1.推理时采用 beamsearch (窗口大小 k = 3), 我们取 bleu 得分最高的作为此样本的预测序列
2.词元(term)的粒度
(1) '1990-09-23' 使用分隔符 '-' 切分为 3个 term ['1990','09','23'],
计算 bleu 时设置 N_gram 的长度上限为 2(仅仅考虑 1-gram, 2-gram)
(2) '1978-12-21' 切分为 10个 term ['1', '9', '7', '8', '-', '1', '2', '-', '2', '1']
:param source_list: 待翻译的句子的列表
:param reference_list: 对照语料, 人工翻译的句子列表
:return: average_bleu_score : 所有 source 的最佳翻译结果的平均分数 ;
best_result_list : 所有 source 的最佳翻译结果
best_result = (max_bleu_score, source, reference, best_candidate)
"""
bleu_score_list = np.zeros(len(source_list))
best_result_list = []
for i in range(len(source_list)):
source = source_list[i] # "3rd of March 2002"
reference = reference_list[i] # "2002-03-03"
candidates = self.inference(source) # ['2002-03-03', '0002-03-03', '1002-03-03']
# reference_arr = reference.split('-')
# 使用分隔符为 '-', 对 reference 切分为 ['2002','03','03']
# candidate_arr_list = [candidate.split('-')for candidate in candidates]
reference_arr = list(reference)
candidate_arr_list = [list(candidate) for candidate in candidates]
candidates_bleu_score = BleuScore.compute_bleu_corpus( [[reference_arr]]*len(candidates), candidate_arr_list, N=4)
max_bleu_score = np.max(candidates_bleu_score)
best_idx = np.argmax(candidates_bleu_score)
bleu_score_list[i] = max_bleu_score
best_candidate = candidates[best_idx]
best_result = (max_bleu_score, source, reference, best_candidate)
print('i:{}, best_result:{}'.format(i,best_result))
best_result_list.append(best_result)
average_bleu_score = np.average(bleu_score_list)
return average_bleu_score, best_result_list
class MachineTranslationV2:
"""
基于 LSTM + seq2seq + attention 的翻译模型
对比 class BasicMachineTranslation 改进如下:
1. 采用面向对象实现
2. 更改了 attention 机制
(1) 对于 decoder, 第一个时间步的输入的 隐藏状态向量(s) 和 细胞状态向量(c) 为 encoder 最后一个时间步的输出
(2) 对于 decoder, 每一个时间步的输入除了 attention 模块对 encoder 所有时间步的输出的加权和 之外,
还引入了 decoder 上一个时间步的输出
3. 基于 beamsearch 的推理
(1) decoder 采用一体化模型解码的方式, 即构建推理计算图, 一次推理拿到所有时间步的结果
Author: xrh
Date: 2019-12-16
ref:
1.https://github.com/enggen/Deep-Learning-Coursera
2.论文 Neural machine translation by jointly learning to align and translate
"""
def __init__(self, Tx, Ty, n_a, n_h, vocab_target, inv_vocab_target, vocab_source, k=3,
use_pretrain=False, model_path='models/lstm_seq2seq_attention.h5'):
"""
模型初始化
:param Tx: 编码器输入序列的长度
:param Ty: 解码器输出序列的长度
:param n_a: encoder-lstm 中隐藏状态的维度 n_a = 64
:param n_h: decoder-lstm 中隐藏状态的维度 n_h = 128
:param vocab_target: 输出序列的字典
:param inv_vocab_target: 输出序列的逆字典
:param vocab_source: 输入序列的字典
:param k: 模型推理时 beamsearch 的窗口
:param use_pretrain: 使用训练好的模型
:param model_path: 预训练模型的路径
"""
self.Tx = Tx
self.Ty = Ty
self.n_a = n_a
self.n_h = n_h
self.vocab_target = vocab_target
self.inv_vocab_target = inv_vocab_target
self.vocab_source = vocab_source
self.k = k
self.model_path = model_path
# 输出序列的字典大小
self.vocab_target_size = len(vocab_target)
# 输入序列的字典大小
self.vocab_source_size = len(vocab_source)
# 对组成计算图的所有网络层进行声明和初始化
self.__init_computation_graph()
# 用于训练的计算图
self.model_train = self.train_model()
# 用于推理的计算图
self.infer_model = self.inference_model()
if use_pretrain: # 载入训练好的模型
print('load pretrained model sucess ')
self.model_train.load_weights(self.model_path)
def __init_computation_graph(self):
"""
对组成计算图的所有网络层进行声明和初始化
:return:
"""
# Model 也可以作为一个网络层
self.model_one_step_attention = self.__one_step_attention_model(self.Tx, self.n_a, self.n_h)
self.pre_activation_LSTM_cell = Bidirectional(LSTM(self.n_a, return_sequences=True, return_state=True),
name='encoder_lstm')
self.concatenate_h = Concatenate(name='concatenate_h')
self.concatenate_c = Concatenate(name='concatenate_c')
self.concatenate_context = Concatenate()
self.post_activation_LSTM_cell = LSTM(self.n_h, return_state=True, name='decoder_lstm')
self.output_layer = Dense(len(self.vocab_target), activation='softmax', name='decoder_output')
self.lambda_argmax = Lambda(K.argmax, arguments={'axis': -1}, name='argmax_tensor')
self.lambda_one_hot = Lambda(TensorUtils.one_hot_tensor, arguments={'num_classes': len(self.vocab_target)},
name='one_hot_tensor')
# self.reshape = Reshape(target_shape=(1, len(self.vocab_target)))
self.lambda_expand_dims = Lambda(K.expand_dims, arguments={'axis': 1}, name='expand_dims')
self.lambda_whole_top_k = Lambda(TensorUtils.whole_top_k_tensor, arguments={'k': self.k},
name='whole_top_k_tensor')
def __one_step_attention_model(self, Tx, n_a, n_h):
"""
attention 模块的实现
把 几个网络层(keras layer) 包装为 model ,并通过重新定义 model 的输入的方式 来共享 layer 的权重
:param Tx: 输入序列的长度
:param n_a: encoder-lstm 中隐藏状态的维度
:param n_h: decoder-lstm 中隐藏状态的维度
:return: keras model
model inputs: [a0, s_prev0]
model outputs : context 向量, 作为 decoder-lstm 的输入 shape: (N, 1, 2*n_a)
"""
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=2)
densor = Dense(1, activation="relu")
activator = Softmax(axis=1)
dotor = Dot(axes=1)
a0 = Input(shape=(Tx, 2 * n_a), name='a') # shape: (N, Tx, 2 * n_a)
s_prev0 = Input(shape=(n_h,), name='s_prev') # shape: (N, Tx, n_h)
a = a0 # 否则报错 : ValueError: Graph disconnected: cannot obtain value for tensor Tensor ....
# The following previous layers were accessed without issue: []
s_prev = s_prev0
s_prev = repeator(s_prev) # shape: (N, Tx, n_h)
concat = concatenator([a, s_prev]) # shape: (N, Tx, 2*n_a+n_h)
e = densor(concat) # shape: (N, Tx, 1)
alphas = activator(e) # shape: (N, Tx, 1)
context = dotor([alphas, a]) # shape: (N, 1, 2*n_a)
model = Model(inputs=[a0, s_prev0], outputs=context) # Model 也可以作为一个网络层
return model
def train_model(self,):
"""
将各个 网络层(layer) 拼接为训练计算图, 包括 encoder 和 decoder
1. 解码时加入上一个时刻的输出单词,
考虑场景: 若前一个时刻的词是 '-', 则当前词必须为数字
在 decoder中, 经过 softmax 输出后 取最大的 那一个字符的 one-hot 向量 与 context 拼接后输入 decoder-lstm
2. 修改 decoder-LSTM 的初始 隐藏状态的输入 ,由原来的 0 向量,改为 encoder-LSTM 最后一个时间步的隐状态(注意进行拼接)
3. 把所有的 keras layer object 声明为类变量,以便 后面重构 decoder 可以使用训练好的网络结构
:return: Keras model instance
"""
X = Input(shape=(self.Tx, self.vocab_source_size)) # shape: (N,Tx,vocab_source_size)
pred0 = Input(shape=(1, self.vocab_target_size), name='pred0') # shape: (m ,1, 11)
pred = pred0
# print('pred: after Input', pred)
outputs = []
# lstm-encoder
a, forward_h, forward_c, backward_h, backward_c = self.pre_activation_LSTM_cell(
inputs=X) # shape of a : (N,Tx, 2*n_a)
# 最后一个时间步的 隐藏状态向量
h = self.concatenate_h(inputs=[forward_h, backward_h]) # shape (N, 2*n_a=128)
# 最后一个时间步的 细胞状态向量
c = self.concatenate_c(inputs=[forward_c, backward_c]) # shape (N, 2*n_a=128)
# lstm-decoder
for t in range(self.Ty): # 遍历 Ty 个时间步 # TODO: 循环结构有可能导致 OON, 训练计算图应避免写此结构
context = self.model_one_step_attention(inputs=[a, h]) # shape of context : (N, 1, 2*n_a=128)
# print('context after one_step_attention: ', context)
context = self.concatenate_context(inputs=[context, pred]) # shape of context: (N,1,128+11=139)
# print('context after Concatenate: ', context)
h, _, c = self.post_activation_LSTM_cell(inputs=context, initial_state=[h, c]) # 输入 context 只有1个时间步
out = self.output_layer(inputs=h) # shape (N, vocab_target)
pred = self.lambda_argmax(inputs=out) # shape (N, )
pred = self.lambda_one_hot(inputs=pred) # shape (N, vocab_target_size)
# print(pred)
pred = self.lambda_expand_dims(inputs=pred) # shape: (N ,1, vocab_target_size)
# print(pred)
outputs.append(out) # shape : (Ty, N, vocab_target_size)
model = Model(inputs=[X, pred0], outputs=outputs)
# shape of outputs : ( Ty ,m ,vocab_target)
return model
def fit(self, Xoh, Yoh, epoch_num=120, batch_size=2048):
"""
训练模型
:param Xoh: 输入序列 (one-hot化)
:param Yoh: 输出序列 (one-hot化)
:param epoch_num: 模型训练的 epoch 个数, 一般训练集所有的样本模型都见过一遍才算一个 epoch
:param batch_size: 选择 min-Batch梯度下降时, 每一次输入模型的样本个数 (默认 = 2048)
:return:
"""
N = np.shape(Xoh)[0] # 训练样本总数
# 打印 模型(计算图) 的所有网络层
# print(self.model_train.summary())
# 画出计算图
# plot_model(self.model_train, to_file='docs/images/model_train_attention.png', show_layer_names=True, show_shapes=True)
opt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, decay=0.01/epoch_num)
self.model_train.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
outputs = list(Yoh.swapaxes(0, 1)) # Yoh.swapaxes(0,1) 第0维度 和 第1 维度交换,原来为(N,T_y,11) 变换后 为:(T_y,N,11)
pred0 = np.zeros((N, 1, self.vocab_target_size))
history = self.model_train.fit([Xoh, pred0], outputs, epochs=epoch_num, batch_size=batch_size, validation_split=0.1)
# 将训练好的模型保存到文件
self.model_train.save(self.model_path)
print('save model sucess ')
def inference_model(self):
"""
将各个 网络层(layer) 拼接为推理计算图
并实现 beamsearch (带窗口的贪心搜索)
:return:
"""
X = Input(shape=(self.Tx, self.vocab_source_size)) # shape: (k, Tx, vocab_source_size)
pred0 = Input(shape=(1, self.vocab_target_size), name='pred0') # shape: (k ,1, vocab_target_size)
pred = pred0
# lstm-encoder
a, forward_h, forward_c, backward_h, backward_c = self.pre_activation_LSTM_cell(
inputs=X) # shape of a : (k, Tx, 2*n_a)
# encoder 最后一个时间步的 隐藏状态向量
h = self.concatenate_h(inputs=[forward_h, backward_h]) # shape (k, 2*n_a=128)
# encoder 最后一个时间步的 细胞状态向量
c = self.concatenate_c(inputs=[forward_c, backward_c]) # shape (k, 2*n_a=128)
outputs = [] # 记录每一个时间步解码器输出的窗口
# lstm-decoder
for t in range(self.Ty): # 遍历 Ty 个时间步
context = self.model_one_step_attention(inputs=[a, h]) # shape of context : (k, 1, 2*n_a=128)
# print('context after one_step_attention: ', context)
context = self.concatenate_context(inputs=[context, pred]) # shape of context: (k, 1, 128+11=139)
# print('context after Concatenate: ', context)
h, _, c = self.post_activation_LSTM_cell(inputs=context, initial_state=[h, c]) # 输入 context 只有1个时间步
out = self.output_layer(inputs=h) # shape (k, vocab_target)
# for beamsearch
top_k_index = self.lambda_whole_top_k(inputs=out) # topk个元素 在矩阵中的标号, shape (k, 2)
# TODO: 喂入数据后报错, 怀疑原因: 该层的输入为 shape (None, ...), 而输出为 shape (k, ...)
# topk 的样本标号 shape (k, )
# sample_id = tf.gather(top_k_index, indices=[0], axis=1) # 通过索引取数组, 效果相当于 top_k_index[:, 0]
# topk 的单词标号 shape (k, )
# word_id = tf.gather(top_k_index, indices=[1], axis=1) # 效果相当于 top_k_index[:, 1]
sample_id = Lambda(tf.gather, arguments={'indices': [0], 'axis': 1})(top_k_index)
word_id = Lambda(tf.gather, arguments={'indices': [1], 'axis': 1})(top_k_index)
sample_id = Lambda(tf.squeeze)(sample_id)
word_id = Lambda(tf.squeeze)(word_id)
# h = K.gather(h, sample_id) # 通过索引取数组, 效果相当于 h = h[sample_id, :]
# c = K.gather(c, sample_id)
h = Lambda(K.gather, arguments={'indices': sample_id})(h)
c = Lambda(K.gather, arguments={'indices': sample_id})(c)
word_id_one_hot = self.lambda_one_hot(inputs=word_id) # shape (k, vocab_target_size)
pred = self.lambda_expand_dims(inputs=word_id_one_hot) # shape: (k ,1, vocab_target_size)
outputs.append(top_k_index) # shape : (Ty, k, 2)
model = Model(inputs=[X, pred0], outputs=outputs)
# shape of outputs : ( Ty ,m ,vocab_target)
return model
def beam_search_seq_gen(self, top_k_index_list):
"""
根据每一个时间步的 beamsearch 的预测结果, 生成解码序列
:param top_k_index_list: topk 个元素在矩阵中的标号, shape (Ty, k, 2)
:return:
"""
# decoder_seq shape (k, Ty)
decoder_seq = [] # 解码序列
for t in range(self.Ty): # 遍历所有的时间步
top_k_index = top_k_index_list[t] # shape (k, 2)
sample_id = top_k_index[:, 0] # topk 的样本标号 shape (k, )
word_id = top_k_index[:, 1] # topk 的单词标号 shape (k, )
if t == 0:
decoder_seq = np.expand_dims(word_id, axis=1) # shape (k, 1)
# [[20]
# [11]
# [32]]
else:
pre_decoder_seq = decoder_seq # shape:(k, t) 上一步的解码序列
# [[20]
# [11]
# [32]]
decoder_seq = np.zeros((self.k, t + 1), dtype=np.int32) # 这一步 会在上一步 已有的解码序列的基础上 增加1个 解码位
# top_k_index:
# [[1 10]
# [0 12]
# [0 21]]
# sample_id: [1, 0, 0]
# word_id: [10, 12, 21]
for i in range(self.k): # 遍历所有的样本
# 前一步的解码情况作为前缀
prefix = pre_decoder_seq[sample_id[i]] # sample_id[0]: 1 , prefix: [11]
# 这一步的解码位
c = word_id[i] # c: 10
# 前缀
decoder_seq[i, :] = np.concatenate((prefix, [c]), axis=0) # [11, 10]
return decoder_seq
def inference(self, Xoh):
"""
使用训练好的模型进行推理
:param Xoh: 输入序列 (one-hot化) shape: (N, Tx, vocab_source_size)
:return: candidate_group_list
"""
# 打印 模型(计算图) 的所有网络层
print(self.infer_model.summary())
# 画出计算图
plot_model(self.infer_model, to_file='images/infer_model_attention.png', show_layer_names=True, show_shapes=True)
candidate_group_list = []
# beamsearch 的窗口大小 k = 3
pred0 = np.zeros((self.k, 1, self.vocab_target_size))
for source_oh in Xoh: # 遍历所有输入序列
# source_oh shape (Tx, vocab_source_size)
source_oh = np.expand_dims(source_oh, axis=0) # shape (1, Tx, vocab_source_size)
source_oh_batch = np.repeat(source_oh, self.k, axis=0)
# print(source_oh.shape) #(3, 30, 37) k=3 将一个样本复制为3个, 输入模型进行推理
top_k_index = self.infer_model.predict([source_oh_batch, pred0])
decoder_result = self.beam_search_seq_gen(top_k_index)
candidate_group = []
for prediction in decoder_result:
candidate = ''.join(int_to_string(prediction, self.inv_vocab_target))
# print("output:", output)
candidate_group.append(candidate)
candidate_group_list.append(candidate_group)
return candidate_group_list
def evaluate(self, source_list, reference_list):
"""
使用 bleu 对翻译结果进行评价
1.推理时采用 beamsearch (窗口大小 k = 3), 我们取 bleu 得分最高的作为此样本的预测序列
2.词元(term)的粒度
(1) '1990-09-23' 使用分隔符 '-' 切分为 3个 term ['1990','09','23'],
计算 bleu 时设置 N_gram 的长度上限为 2(仅仅考虑 1-graN, 2-gram)
(2) '1978-12-21' 切分为 10个 term ['1', '9', '7', '8', '-', '1', '2', '-', '2', '1']
:param source_list: 待翻译的句子的列表
:param reference_list: 对照语料, 人工翻译的句子列表
:return: average_bleu_score : 所有 source 的最佳翻译结果的平均分数 ;
best_result_list : 所有 source 的最佳翻译结果
best_result = (max_bleu_score, source, reference, best_candidate)
"""
bleu_score_list = np.zeros(len(source_list))
best_result_list = []
for i in range(len(source_list)):
source = source_list[i] # "3rd of March 2002"
reference = reference_list[i] # "2002-03-03"
candidates = self.inference(source) # ['2002-03-03', '0002-03-03', '1002-03-03']
# reference_arr = reference.split('-')
# 使用分隔符为 '-', 对 reference 切分为 ['2002','03','03']
# candidate_arr_list = [candidate.split('-')for candidate in candidates]
reference_arr = list(reference)
candidate_arr_list = [list(candidate) for candidate in candidates]
candidates_bleu_score = BleuScore.compute_bleu_corpus( [[reference_arr]]*len(candidates), candidate_arr_list, N=4)
max_bleu_score = np.max(candidates_bleu_score)
best_idx = np.argmax(candidates_bleu_score)
bleu_score_list[i] = max_bleu_score
best_candidate = candidates[best_idx]
best_result = (max_bleu_score, source, reference, best_candidate)
print('i:{}, best_result:{}'.format(i,best_result))
best_result_list.append(best_result)
average_bleu_score = np.average(bleu_score_list)
return average_bleu_score, best_result_list
class Test:
def test_BasicMachineTranslation(self):
# 配置 TensorFlow 在跑程序的时候不让程序占满 GPU 内存
# physical_devices = tf.config.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
m = 10000 # 数据集中的样本总数
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
# 0.了解数据集
print(dataset[:5])
# human date machine date
# 待翻译的日期 翻译后的日期
# ('9 may 1998', '1998-05-09'),
# ('10.11.19', '2019-11-10'),
# ('9/10/70', '1970-09-10'),
# ('saturday april 28 1990', '1990-04-28'),
# ('thursday january 26 1995', '1995-01-26'),
print(human_vocab)
print(machine_vocab)
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
# X 为待翻译的日期, Y 为翻译后的标准化的日期
# Xoh 为 X 的 one-hot 化表示 , Yoh 为 Y 的 one-hot 化表示
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
sol = BasicMachineTranslation()
sol.model_implementation_naive(Tx=Tx, Ty=Ty, machine_vocab=machine_vocab, human_vocab=human_vocab, Xoh=Xoh,
Yoh=Yoh, m=m)
# sol.model_implementation_v2(Tx=Tx, Ty=Ty, machine_vocab=machine_vocab, inv_machine_vocab=inv_machine_vocab,
# human_vocab=human_vocab, Xoh=Xoh, Yoh=Yoh, m=m)
def test_MachineTranslationV1(self):
m = 10000 # 数据集中的样本总数
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
# 0.了解数据集
print(dataset[:5])
# human date machine date
# 待翻译的日期 翻译后的日期
# ('9 may 1998', '1998-05-09'),
# ('10.11.19', '2019-11-10'),
# ('9/10/70', '1970-09-10'),
# ('saturday april 28 1990', '1990-04-28'),
# ('thursday january 26 1995', '1995-01-26'),
print(human_vocab)
print(machine_vocab)
# 划分训练集和测试集
train_dataset, test_dataset = train_test_split(dataset, test_size=0.2, random_state=1024)
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(train_dataset, human_vocab, machine_vocab, Tx, Ty)
# X 为待翻译的日期(字符以标号表示), Y 为翻译后的标准化的日期(字符以标号表示)
# Xoh 为 X 的 one-hot 化表示 , Yoh 为 Y 的 one-hot 化表示
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
# trainer = MachineTranslationV1(Tx=Tx, Ty=Ty, n_a=64, n_s=128, machine_vocab=machine_vocab,
# inv_machine_vocab=inv_machine_vocab, human_vocab=human_vocab)
# trainer.fit(Xoh=Xoh, Yoh=Yoh, epoch_num=200, batch_size=2048)
infer = MachineTranslationV1(Tx=Tx, Ty=Ty, n_a=64, n_s=128, machine_vocab=machine_vocab,
inv_machine_vocab=inv_machine_vocab, human_vocab=human_vocab, use_pretrain=True)
example = "december 21 1978"
candidates = infer.inference(example)
print("source:", example) # 待翻译的句子
print("beam search candidates: \n", candidates) # 模型翻译的句子列表
# ['9987-12-21', '1988-12-21', '2987-12-21']
test_dataset_arr = np.array(test_dataset[:200]) # 测试数据全部跑一遍太慢了
X_test = test_dataset_arr[:, 0] # 待翻译的日期(源序列)
Y_test = test_dataset_arr[:, 1] # 翻译后的日期(目标序列)
average_bleu_score, best_result_list = infer.evaluate(X_test,Y_test)
print('average_bleu_score:', average_bleu_score)
def test_MachineTranslationV2(self):
m = 10000 # 数据集中的样本总数
dataset, vocab_source, vocab_target, inv_vocab_target = load_dataset(m)
# 0.了解数据集
print(dataset[:5])
# human date machine date
# 待翻译的日期 翻译后的日期
# ('9 may 1998', '1998-05-09'),
# ('10.11.19', '2019-11-10'),
# ('9/10/70', '1970-09-10'),
# ('saturday april 28 1990', '1990-04-28'),
# ('thursday january 26 1995', '1995-01-26'),
print(vocab_source)
print(vocab_target)
# 划分训练集和测试集
train_dataset, test_dataset = train_test_split(dataset, test_size=0.2, random_state=1024)
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(train_dataset, vocab_source, vocab_target, Tx, Ty)
# X 为待翻译的日期(字符以标号表示), Y 为翻译后的标准化的日期(字符以标号表示)
# Xoh 为 X 的 one-hot 化表示 , Yoh 为 Y 的 one-hot 化表示
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
# 1.模型训练
n_a = 64
n_h = 128
# trainer = MachineTranslation(Tx=Tx, Ty=Ty, n_a=n_a, n_h=n_h, vocab_target=vocab_target,
# inv_vocab_target=inv_vocab_target, vocab_source=vocab_source,
# use_pretrain=False)
# trainer.fit(Xoh=Xoh, Yoh=Yoh, epoch_num=50, batch_size=512)
# 2.模型推理
X_test, Y_test, Xoh_test, Yoh_test = preprocess_data(test_dataset, vocab_source, vocab_target, Tx, Ty)
# tf.python.framework_ops.disable_eager_execution()
infer = MachineTranslationV2(Tx=Tx, Ty=Ty, n_a=n_a, n_h=n_h, vocab_target=vocab_target,
inv_vocab_target=inv_vocab_target, vocab_source=vocab_source,
use_pretrain=True)
candidate_group_list = infer.inference(Xoh_test[:10])
print(candidate_group_list)
# average_bleu_score, best_result_list = infer.evaluate(X_test,Y_test)
#
# print('average_bleu_score:', average_bleu_score)
if __name__ == '__main__':
test = Test()
# test.test_BasicMachineTranslation()
test.test_MachineTranslationV1()
# test.test_MachineTranslationV2()
| 34.600894 | 128 | 0.586628 |
8bd06748bac62ea851dbcc3b99fe1b5f1ca60408
| 4,998 |
py
|
Python
|
ChunkServer.py
|
lihuiba/SoftSAN
|
1b8ab2cae92b7aac34211909b27d4ebe595275d7
|
[
"Apache-2.0"
] | 1 |
2015-08-02T09:53:18.000Z
|
2015-08-02T09:53:18.000Z
|
ChunkServer.py
|
lihuiba/SoftSAN
|
1b8ab2cae92b7aac34211909b27d4ebe595275d7
|
[
"Apache-2.0"
] | null | null | null |
ChunkServer.py
|
lihuiba/SoftSAN
|
1b8ab2cae92b7aac34211909b27d4ebe595275d7
|
[
"Apache-2.0"
] | 2 |
2018-03-21T04:59:50.000Z
|
2019-12-03T15:54:17.000Z
|
import rpc, logging
import messages_pb2 as msg
import guid as Guid
import mds, config, util
import gevent.server
import Backend
from pytgt.tgt_ctrl import *
import random
class ChunkServer:
def __init__(self, prefix_vol='lv_softsan_', vgname='VolGroup'):
self.lvm = Backend.LVM_SOFTSAN()
self.tgt = Tgt()
self.prefix_vol = prefix_vol
self.vgname = vgname
# always use lun_index=1.
def AssembleVolume(self, req):
self.tgt.reload()
ret = msg.AssembleVolume_Response()
str_guid = Guid.toStr(req.volume.guid)
lv_name = self.prefix_vol+str_guid
if not self.lvm.haslv(lv_name):
ret.error = "Chunk {0} does not exist!".format(str_guid)
return ret
target_name = "iqn:softsan_"+str_guid
target_id = self.tgt.target_name2target_id(target_name)
if target_id != None:
ret.access_point = target_name
return ret
while True:
target_id = str(random.randint(0,1024*1024))
if not self.tgt.is_in_targetlist(target_id):
break
lun_path = '/dev/'+self.vgname+'/'+lv_name
if self.tgt.new_target_lun(target_id, target_name, lun_path, 'ALL')!=None:
ret.error = "Failed to export chunk {0} with tgt".format(str_guid)
return ret
ret.access_point = target_name
return ret
def DisassembleVolume(self, req):
self.tgt.reload()
ret = msg.DisassembleVolume_Response()
target_name = req.access_point
target_id = self.tgt.target_name2target_id(target_name)
if target_id==None:
ret.error='No such access_point'
return ret
if self.tgt.delete_target(target_id)!=None:
ret.error=('failed to Disassemble Volume'+target_name)
return ret
# try to create every requested chunk. however, if some chunk can not be created, fill the ret.error with the output of lvcreate
def NewChunk(self, req):
self.lvm.reload()
ret = msg.NewChunk_Response()
size = str(req.size)+'M'
for i in range(req.count):
a_guid = Guid.generate()
lv_name = self.prefix_vol+Guid.toStr(a_guid)
lv_size = size
output = self.lvm.lv_create(self.vgname, lv_name, lv_size)
if output!=None:
ret.error = str(i) + ':' + output + ' '
break
t=ret.guids.add()
Guid.assign(t, a_guid)
return ret
# try to delete every requested chunk. if it can not delete, fill the ret.error with output of lvremove
def DeleteChunk(self, req):
self.lvm.reload()
ret = msg.DeleteChunk_Response()
print 'ChunkServer: DeleteChunk'
for a_guid in req.guids:
str_guid=Guid.toStr(a_guid)
lv_name = self.prefix_vol+str_guid
lv_path = '/dev/'+self.vgname+'/'+lv_name
output = self.lvm.lv_remove(lv_path)
if output!=None:
ret.error = "Unable to delete chunk {0}:\n{1}".format(str_guid, output)
break
t=ret.guids.add()
Guid.assign(t, a_guid)
return ret
def doHeartBeat(self, serviceport, stub):
serviceip=stub.socket.getsockname()[0]
while True:
info=msg.ChunkServerInfo()
info.ServiceAddress=serviceip
info.ServicePort=serviceport
self.lvm.reload_softsan_lvs()
for lv in self.lvm.softsan_lvs:
chk=info.chunks.add()
name4guid = lv.name.split(self.prefix_vol)[1]
Guid.assign(chk.guid, Guid.fromStr(name4guid))
chk.size = int(lv.get_sizes(lv.total_extents)[2])
stub.callMethod('ChunkServerInfo', info)
print 'for test--------------------------------', random.randint(0,100)
gevent.sleep(1)
def heartBeat(self, confobj):
guid=Guid.generate()
stub=rpc.RpcStub(guid, None, mds.MDS)
while True:
try:
socket=gevent.socket.socket()
socket.connect((confobj.mdsaddress, int(confobj.mdsport)))
mdsEndpoint=(confobj.mdsaddress, int(confobj.mdsport))
socket.connect(mdsEndpoint)
stub.socket=socket
self.doHeartBeat(int(confobj.port), stub)
except KeyboardInterrupt:
raise
except:
logging.debug('An error occured during heart beat, preparing to retry', exc_info=1)
gevent.sleep(2)
def test_ChunkServer():
print ' test begin '.center(100,'-')
print
server=ChunkServer()
logging.basicConfig(level=logging.DEBUG)
# mock the newchunk request from client
req_newchunk=msg.NewChunk_Request()
req_newchunk.size=32
req_newchunk.count=1
ret_newchunk = server.NewChunk(req_newchunk)
# mock the assemblevolume request from client
req_assemblevolume=msg.AssembleVolume_Request()
Guid.assign(req_assemblevolume.volume.guid, ret_newchunk.guids[-1])
req_assemblevolume.volume.size=32
ret_assemblevolume = server.AssembleVolume(req_assemblevolume)
# # mock req_disassemblevolume
req_disassemblevolume = msg.DisassembleVolume_Request()
req_disassemblevolume.access_point = ret_assemblevolume.access_point
ret_disassemblevolume = server.DisassembleVolume(req_disassemblevolume)
# mock the delchunk request from client
req_delchunk = msg.DeleteChunk_Request()
for a_guid in ret_newchunk.guids:
t=req_delchunk.guids.add()
Guid.assign(t, a_guid)
ret_delchunk = server.DeleteChunk(req_delchunk)
print
print ' test end '.center(100,'-')
if __name__=='__main__':
pass
# link_test()
#test_ChunkServer()
| 31.2375 | 130 | 0.726491 |
7bfa1761e0f14130896d9a6799049a6aae704952
| 394 |
py
|
Python
|
2015/11/look-begin-rate-table/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2015/11/look-begin-rate-table/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2015/11/look-begin-rate-table/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1qBD8PVaqBNYx3iyXuQmNJgGFtF7E8cysyGeiBlMMEEw'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
def percent(value):
return unicode(round(float(value), 3) * 100) + '%'
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS + [percent]
| 21.888889 | 77 | 0.771574 |
efb9e57ba66eb8f54906265e8958d1d884c48c2e
| 7,084 |
py
|
Python
|
Packs/Cymulate/Integrations/Cymulate/Cymulate_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Cymulate/Integrations/Cymulate/Cymulate_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Cymulate/Integrations/Cymulate/Cymulate_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from Cymulate import cymulate_test, fetch_incidents, cymulate_get_incident_info, Client, CymulateModuleTypeEnum
BASE_URL = 'https://api.cymulate.com/v1/'
MOKE_TEST = {"success": True, "data": ["Phishing Awareness", "Web Application Firewall",
"Lateral Movement", "Data Exfiltration",
"Immediate Threats Intelligence", "Email Gateway",
"Endpoint Security", "Web Gateway", "Full Kill-Chain APT"]}
FETCH_INCIDENTS_TEST = {"success": True,
"data": [
{
"Id": "5dbeaf53a910862fa859491e",
"Name": " Ursnif infection with Dridex and Powershell Empire",
"Timestamp": "03/11/2019 05:43:31",
"InProgress": False
},
{
"Id": "5dbea88c357ca849ac41bb2e",
"Name": "Pcap and malware for an ISC diary (Emotet + Trickbot)",
"Timestamp": "03/11/2019 05:14:36",
"InProgress": False
},
{
"Id": "5d528f78705e364e9055033c",
"Name": "BlackSquid Drops XMRig Miner",
"Timestamp": "13/08/2019 06:22:48",
"InProgress": False
},
{
"Id": "5d25dc5d86d73c22203d919f",
"Name": "dll2",
"Timestamp": "10/07/2019 08:38:53",
"InProgress": False
},
{
"Id": "5cc7109ca842693cc0f15588",
"Name": "hot files test 8",
"Timestamp": "29/04/2019 10:56:28",
"InProgress": False
},
{
"Id": "5c8e6cbf3dd9fe08186d7b64",
"Name": "Hancitor malspam infections from 2018-08-13 and 2018-08-14",
"Timestamp": "17/03/2019 11:50:23",
"InProgress": False
}
]
}
CYMULATE_GET_INCIDENT_INFO_TEST = {"success": True,
"data": [
{
"Module": "Immediate Threats Intelligence",
"Penetration_Vector": "-",
"Attack_Payload": "2019-07-08-Ursnif-binary-retrieved-by-Word-macro_"
"2b999360-a3f9-11e9-980e-633d1efd31f3.exe",
"Name": " Ursnif infection with Dridex and Powershell Empire",
"Timestamp": "03/11/2019 05:45:47",
"Sha1": "ff57bfaed6db3379bbf69a19404a6e21668a7a52",
"Sha256": "0894e82d9397d909099c98fe186354591ae86a73230700f462b72ae36c700ddf",
"Md5": "ef99338df4078fab6e9a8cf6797a1d14",
"Status": "Penetrated",
"Attack_Vector": "Endpoint Security",
"Attack_Type": "Antivirus",
"Mitigation": "N/A",
"Description": "N/A",
"ID": "c1d33138a2101724889862152444ec7e",
"Related_URLS": "N/A",
"Related_Email_Addresses": "N/A"
}
]
}
TECHNICAL_INCIDENTS_IDS = ['5dbeaf53a910862fa859491e', '5dbea88c357ca849ac41bb2e', '5d528f78705e364e9055033c',
'5d25dc5d86d73c22203d919f', '5cc7109ca842693cc0f15588', '5c8e6cbf3dd9fe08186d7b64']
MOCK_TIMESTAMP = "2020-12-02T16%3A32%3A37"
ATTACK_ID = "5dbeaf53a910862fa859491e"
def local_get_last_run():
return {}
def test_test_client(requests_mock):
requests_mock.get(BASE_URL + 'user/modules', json=MOKE_TEST)
client = Client(
base_url=BASE_URL,
headers={"x-token": 'RW#fdsfds34e343rdes'},
verify=False)
cymulate_test(client=client, is_fetch=False)
def test_fetch_incidents(mocker, requests_mock):
requests_mock.get(BASE_URL + 'immediate-threats/ids?from={}'.format(MOCK_TIMESTAMP),
json=FETCH_INCIDENTS_TEST)
for incident_id in TECHNICAL_INCIDENTS_IDS:
requests_mock.get(BASE_URL + 'immediate-threats/attack/technical/' + incident_id,
json=CYMULATE_GET_INCIDENT_INFO_TEST)
mocker.patch.object(demisto, 'params',
return_value={'fetch_time': MOCK_TIMESTAMP})
mocker.patch.object(demisto, 'getLastRun', side_effect=local_get_last_run)
client = Client(
base_url=BASE_URL,
headers={"x-token": 'RW#fdsfds34e343rdes'},
verify=False)
next_run, incidents, remain_incidents = fetch_incidents(client=client,
module_type=CymulateModuleTypeEnum.IMMEDIATE_THREATS,
last_run={'last_fetch': '2020-12-02T16:32:37'},
first_fetch_time={},
only_penatrated=False,
limit=20,
integration_context=None)
assert len(incidents) == 6
def test_cymulate_get_incident_info(mocker, requests_mock):
mocker.patch.object(demisto, 'args', return_value={"module_type": CymulateModuleTypeEnum.IMMEDIATE_THREATS.name,
"attack_id": ATTACK_ID})
requests_mock.get(BASE_URL + 'immediate-threats/attack/technical/' + ATTACK_ID,
json=CYMULATE_GET_INCIDENT_INFO_TEST)
client = Client(
base_url=BASE_URL,
headers={"x-token": 'RW#fdsfds34e343rdes'},
verify=False)
# Get incident's parent id
attack_id = demisto.args().get('attack_id')
technical_info = cymulate_get_incident_info(client=client, attack_id=attack_id)
assert(technical_info[0]['ID'] == CYMULATE_GET_INCIDENT_INFO_TEST['data'][0]['ID'])
| 48.190476 | 120 | 0.449605 |
ef58a8dd9159150f5c62dc1540bb5470aed1bc7c
| 1,370 |
py
|
Python
|
kts/core/backend/io.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 18 |
2019-02-14T13:10:07.000Z
|
2021-11-26T07:10:13.000Z
|
kts/core/backend/io.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-02-17T14:06:42.000Z
|
2019-09-15T18:05:54.000Z
|
kts/core/backend/io.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-09-15T13:12:42.000Z
|
2020-04-15T14:05:54.000Z
|
import time
import kts.core.backend.signal as rs
class TextChunk(rs.Signal):
def __init__(self, timestamp, text, run_id=None):
self.timestamp = timestamp
self.text = text
self.run_id = run_id
def get_contents(self):
res = {'timestamp': self.timestamp, 'text': self.text}
if self.run_id is not None:
res['run_id'] = self.run_id
return res
class RemoteTextIO:
def __init__(self, run_id=None):
self.buf = ""
self.run_id = run_id
def write(self, b):
self.buf += b
if self.buf.find('\n') != -1:
self.flush()
def flush(self):
if self.buf:
for line in self.buf.split('\n'):
if line:
rs.send(TextChunk(time.time(), line + '\n', self.run_id))
self.buf = ""
class LocalTextIO:
def __init__(self, report, run_id):
self.buf = ""
self.report = report
self.run_id = run_id
def write(self, b):
self.buf += b
if b.find('\n') != -1:
self.flush()
def flush(self):
if self.buf:
self.report.update_text(self.run_id, timestamp=time.time(), text=self.buf)
self.buf = ""
class SuppressIO:
def __init__(self):
pass
def write(self, b):
pass
def flush(self):
pass
| 21.746032 | 86 | 0.535766 |
08e845d836398aafaf9d487f94d2d21eb1a18a13
| 802 |
py
|
Python
|
binary-tree-right-side-view/binary-tree-right-side-view.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
binary-tree-right-side-view/binary-tree-right-side-view.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
binary-tree-right-side-view/binary-tree-right-side-view.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rightSideView(self, root: Optional[TreeNode]) -> List[int]:
if not root:
return []
output=[]
stack=[(root,0)]
prev_depth=0
while(stack):
node, depth = stack.pop(0)
if depth!=prev_depth:
output.append(prev_node.val)
if node.left:
stack.append((node.left, depth+1))
if node.right:
stack.append((node.right, depth+1))
prev_depth=depth
prev_node=node
output.append(prev_node.val)
return output
| 30.846154 | 67 | 0.523691 |
decba192a977ffe5ba11a40e9c31065a4fe14352
| 11,249 |
py
|
Python
|
src/austria/migrations/0001_initial.py
|
frocentus/offenewahlen_api
|
71860583890f8a4c23df67f271e8f13558fa9582
|
[
"MIT"
] | 14 |
2017-07-12T14:42:57.000Z
|
2017-09-08T20:32:53.000Z
|
src/austria/migrations/0001_initial.py
|
frocentus/offenewahlen_api
|
71860583890f8a4c23df67f271e8f13558fa9582
|
[
"MIT"
] | 209 |
2017-07-11T21:36:01.000Z
|
2017-11-05T01:10:50.000Z
|
src/austria/migrations/0001_initial.py
|
OKFNat/offenewahlen-api
|
71860583890f8a4c23df67f271e8f13558fa9582
|
[
"MIT"
] | 8 |
2017-07-15T09:40:42.000Z
|
2018-01-12T03:02:06.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-05 23:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='District',
fields=[
('short_code', models.CharField(max_length=3, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
'verbose_name_plural': 'districts',
'verbose_name': 'district',
'ordering': ['short_code'],
},
),
migrations.CreateModel(
name='Election',
fields=[
('short_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('short_name_text', models.CharField(max_length=50, null=True)),
('full_name', models.CharField(max_length=200)),
('election_type', models.CharField(max_length=100)),
('election_id', models.CharField(max_length=20, null=True)),
('wikidata_id', models.CharField(max_length=20, null=True, unique=True)),
('administrative_level', models.CharField(max_length=100)),
('election_day', models.DateTimeField(verbose_name='timestamp of election day')),
('status', models.CharField(default='init', max_length=200)),
],
options={
'verbose_name_plural': 'elections',
'ordering': ['short_name'],
'verbose_name': 'election',
'get_latest_by': 'election_day',
},
),
migrations.CreateModel(
name='List',
fields=[
('short_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('short_name_text', models.CharField(default=None, max_length=50)),
('full_name', models.CharField(default=None, max_length=200)),
],
options={
'verbose_name_plural': 'lists',
'verbose_name': 'list',
'ordering': ['short_name'],
},
),
migrations.CreateModel(
name='ListResult',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('votes', models.IntegerField(default=-1, null=True)),
],
options={
'verbose_name_plural': 'list results',
'verbose_name': 'list result',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Municipality',
fields=[
('code', models.CharField(max_length=5, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('kennzahl', models.CharField(default=None, max_length=5)),
],
options={
'verbose_name_plural': 'municipalities',
'verbose_name': 'municipality',
'ordering': ['code'],
},
),
migrations.CreateModel(
name='Party',
fields=[
('short_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('short_name_text', models.CharField(default=None, max_length=50, unique=True)),
('full_name', models.CharField(max_length=200, unique=True)),
('family', models.CharField(default=None, max_length=200, null=True)),
('wikidata_id', models.CharField(default=None, max_length=20, null=True, unique=True)),
('website', models.CharField(default=None, max_length=100, null=True)),
('location', models.CharField(default=None, max_length=100, null=True)),
],
options={
'verbose_name_plural': 'parties',
'verbose_name': 'party',
'ordering': ['short_name'],
},
),
migrations.CreateModel(
name='PollingStation',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(default=None, max_length=200, null=True)),
('type', models.CharField(max_length=30)),
],
options={
'verbose_name_plural': 'polling stations',
'verbose_name': 'polling station',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='PollingStationResult',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('eligible_voters', models.IntegerField(default=-1, null=True)),
('votes', models.IntegerField(default=-1)),
('valid', models.IntegerField(default=-1)),
('invalid', models.IntegerField(default=-1)),
('ts_result', models.DateTimeField(verbose_name='timestamp of bmi result')),
],
options={
'verbose_name_plural': 'polling station results',
'ordering': ['id'],
'verbose_name': 'polling station result',
'get_latest_by': 'ts_result',
},
),
migrations.CreateModel(
name='RawData',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('ts_file', models.DateTimeField(null=True, verbose_name='creation date of original file')),
('ts_import', models.DateTimeField(default=None, verbose_name='import date of file into database')),
('hash', models.CharField(max_length=100)),
('content', models.TextField()),
('header', models.TextField(default=None, null=True)),
('dataformat', models.CharField(max_length=50)),
('description', models.TextField(default=None)),
],
options={
'verbose_name_plural': 'raw data',
'ordering': ['id'],
'verbose_name': 'raw data',
'get_latest_by': 'ts_file',
},
),
migrations.CreateModel(
name='RegionalElectoralDistrict',
fields=[
('short_code', models.CharField(max_length=2, primary_key=True, serialize=False)),
('name', models.CharField(default=None, max_length=100)),
],
options={
'verbose_name_plural': 'regional electoral districts',
'verbose_name': 'regional electoral district',
'ordering': ['short_code'],
},
),
migrations.CreateModel(
name='State',
fields=[
('short_code', models.CharField(max_length=1, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
'verbose_name_plural': 'states',
'verbose_name': 'state',
'ordering': ['short_code'],
},
),
migrations.AddIndex(
model_name='state',
index=models.Index(fields=['name', 'short_code'], name='austria_sta_name_6993c1_idx'),
),
migrations.AddIndex(
model_name='regionalelectoraldistrict',
index=models.Index(fields=['name', 'short_code'], name='austria_reg_name_b53223_idx'),
),
migrations.AddField(
model_name='rawdata',
name='election',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.Election'),
),
migrations.AddField(
model_name='pollingstationresult',
name='election',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.Election'),
),
migrations.AddField(
model_name='pollingstationresult',
name='polling_station',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.PollingStation'),
),
migrations.AddField(
model_name='pollingstation',
name='municipality',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.Municipality'),
),
migrations.AddIndex(
model_name='party',
index=models.Index(fields=['full_name', 'short_name', 'short_name_text'], name='austria_par_full_na_2a1baa_idx'),
),
migrations.AddField(
model_name='municipality',
name='district',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.District'),
),
migrations.AddField(
model_name='municipality',
name='regional_electoral_district',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.RegionalElectoralDistrict'),
),
migrations.AddField(
model_name='listresult',
name='election_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='austria.List'),
),
migrations.AddField(
model_name='listresult',
name='polling_station_result',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='austria.PollingStationResult'),
),
migrations.AddField(
model_name='list',
name='party',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='austria.Party'),
),
migrations.AddIndex(
model_name='election',
index=models.Index(fields=['full_name', 'short_name', 'short_name_text'], name='austria_ele_full_na_bbb3c0_idx'),
),
migrations.AddField(
model_name='district',
name='state',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.State'),
),
migrations.AddIndex(
model_name='pollingstation',
index=models.Index(fields=['name'], name='austria_pol_name_56e525_idx'),
),
migrations.AddIndex(
model_name='municipality',
index=models.Index(fields=['name', 'code', 'kennzahl'], name='austria_mun_name_329503_idx'),
),
migrations.AddIndex(
model_name='list',
index=models.Index(fields=['short_name', 'full_name', 'short_name_text'], name='austria_lis_short_n_4b5871_idx'),
),
migrations.AddIndex(
model_name='district',
index=models.Index(fields=['name', 'short_code'], name='austria_dis_name_e39bb2_idx'),
),
]
| 43.265385 | 135 | 0.552938 |
3e771eb33afa0ab45444ee8bac5a61bd72d00ef3
| 11,803 |
py
|
Python
|
Packs/Microsoft365Defender/Integrations/Microsoft365DefenderEventCollector/Microsoft365DefenderEventCollector.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 1 |
2021-11-02T05:36:38.000Z
|
2021-11-02T05:36:38.000Z
|
Packs/Microsoft365Defender/Integrations/Microsoft365DefenderEventCollector/Microsoft365DefenderEventCollector.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 61 |
2021-10-07T08:54:38.000Z
|
2022-03-31T10:25:35.000Z
|
Packs/Microsoft365Defender/Integrations/Microsoft365DefenderEventCollector/Microsoft365DefenderEventCollector.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | null | null | null |
# pylint: disable=no-name-in-module
# pylint: disable=no-self-argument
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
from abc import ABC
from typing import Any, Callable, Optional
from enum import Enum
from pydantic import BaseConfig, BaseModel, AnyUrl, validator # type: ignore[E0611, E0611, E0611]
from requests.auth import HTTPBasicAuth
import requests
from MicrosoftApiModule import *
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
MAX_ALERTS_PAGE_SIZE = 10000
ALERT_CREATION_TIME = 'alertCreationTime'
DEFNDER_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
SECURITY_SCOPE = 'https://securitycenter.onmicrosoft.com/windowsatpservice/.default'
AUTH_ERROR_MSG = 'Authorization Error: make sure tenant id, client id and client secret is correctly set'
''' HELPER CLASSES '''
# COPY OF SiemApiModule
class Method(str, Enum):
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
HEAD = 'HEAD'
PATCH = 'PATCH'
DELETE = 'DELETE'
def load_json(v: Any) -> dict:
if not isinstance(v, (dict, str)):
raise ValueError('headers are not dict or a valid json')
if isinstance(v, str):
try:
v = json.loads(v)
if not isinstance(v, dict):
raise ValueError('headers are not from dict type')
except json.decoder.JSONDecodeError as exc:
raise ValueError('headers are not valid Json object') from exc
if isinstance(v, dict):
return v
class IntegrationHTTPRequest(BaseModel):
method: Method
url: AnyUrl
verify: bool = True
headers: dict = dict() # type: ignore[type-arg]
auth: Optional[HTTPBasicAuth]
data: Any = None
class Config(BaseConfig):
arbitrary_types_allowed = True
_normalize_headers = validator('headers', pre=True, allow_reuse=True)(
load_json
)
class Credentials(BaseModel):
identifier: Optional[str]
password: str
def set_authorization(request: IntegrationHTTPRequest, auth_credendtials):
"""Automatic authorization.
Supports {Authorization: Bearer __token__}
or Basic Auth.
"""
creds = Credentials.parse_obj(auth_credendtials)
if creds.password and creds.identifier:
request.auth = HTTPBasicAuth(creds.identifier, creds.password)
auth = {'Authorization': f'Bearer {creds.password}'}
if request.headers:
request.headers |= auth # type: ignore[assignment, operator]
else:
request.headers = auth # type: ignore[assignment]
class IntegrationOptions(BaseModel):
"""Add here any option you need to add to the logic"""
proxy: bool = False
limit: int = 1000
class IntegrationEventsClient(ABC):
def __init__(
self,
request: IntegrationHTTPRequest,
options: IntegrationOptions,
session=requests.Session(),
):
self.request = request
self.options = options
self.session = session
self._set_proxy()
self._skip_cert_verification()
@abstractmethod
def set_request_filter(self, after: Any):
"""TODO: set the next request's filter.
Example:
"""
self.request.headers['after'] = after
def __del__(self):
try:
self.session.close()
except AttributeError as err:
demisto.debug(
f'ignore exceptions raised due to session not used by the client. {err=}'
)
def call(self, request: IntegrationHTTPRequest) -> requests.Response:
try:
response = self.session.request(**request.dict())
response.raise_for_status()
return response
except Exception as exc:
msg = f'something went wrong with the http call {exc}'
LOG(msg)
raise DemistoException(msg) from exc
def _skip_cert_verification(
self, skip_cert_verification: Callable = skip_cert_verification
):
if not self.request.verify:
skip_cert_verification()
def _set_proxy(self):
if self.options.proxy:
ensure_proxy_has_http_prefix()
else:
skip_proxy()
class IntegrationGetEvents(ABC):
def __init__(
self, client: IntegrationEventsClient, options: IntegrationOptions
) -> None:
self.client = client
self.options = options
def run(self):
stored = []
for logs in self._iter_events():
stored.extend(logs)
if len(stored) >= self.options.limit:
return stored[:self.options.limit]
return stored
def call(self) -> requests.Response:
return self.client.call(self.client.request)
@staticmethod
@abstractmethod
def get_last_run(events: list) -> dict:
"""Logic to get the last run from the events
Example:
"""
return {'after': events[-1]['created']}
@abstractmethod
def _iter_events(self):
"""Create iterators with Yield"""
pass
# END COPY OF SiemApiModule
class DefenderIntegrationOptions(IntegrationOptions):
first_fetch: str
class DefenderAuthenticator(BaseModel):
verify: bool
url: str
tenant_id: str
client_id: str
credentials: dict
ms_client: Any = None
def set_authorization(self, request: IntegrationHTTPRequest):
try:
if not self.ms_client:
demisto.debug('try init the ms client for the first time')
self.ms_client = MicrosoftClient(
base_url=self.url,
tenant_id=self.tenant_id,
auth_id=self.client_id,
enc_key=self.credentials.get('password'),
scope=SECURITY_SCOPE,
verify=self.verify,
self_deployed=True
)
token = self.ms_client.get_access_token()
auth = {'Authorization': f'Bearer {token}'}
if request.headers:
request.headers |= auth # type: ignore[assignment, operator]
else:
request.headers = auth # type: ignore[assignment]
demisto.debug('getting access token for Defender Authenticator - succeeded')
except BaseException as e:
# catch BaseException to catch also sys.exit via return_error
demisto.error(f'Fail to authenticate with Microsoft services: {str(e)}')
err_msg = 'Fail to authenticate with Microsoft services, see the error details in the log'
raise DemistoException(err_msg)
class DefenderHTTPRequest(IntegrationHTTPRequest):
params: dict = dict()
method: Method = Method.GET
_normalize_url = validator('url', pre=True, allow_reuse=True)(
lambda base_url: base_url + '/api/alerts'
)
class DefenderClient(IntegrationEventsClient):
authenticator: DefenderAuthenticator
request: DefenderHTTPRequest
options: DefenderIntegrationOptions
def __init__(self, request: DefenderHTTPRequest, options: IntegrationOptions, authenticator: DefenderAuthenticator):
self.authenticator = authenticator
super().__init__(request, options)
def set_request_filter(self, after: Any):
limit = min(self.options.limit, MAX_ALERTS_PAGE_SIZE)
if not after:
demisto.debug(f'lastRunObj is empty, calculate the first fetch time according {self.options.first_fetch=}')
first_fetch_date = dateparser.parse(self.options.first_fetch, settings={'TIMEZONE': 'UTC'})
after = datetime.strftime(first_fetch_date, DEFNDER_DATE_FORMAT) # type: ignore[arg-type]
self.request.params = {
'$filter': f'{ALERT_CREATION_TIME}+gt+{after}',
'$orderby': f'{ALERT_CREATION_TIME}+asc',
'$top': limit,
'$expand': 'evidence',
}
demisto.debug(f'setting the request filter to be: {self.request.params}')
def authenticate(self):
self.authenticator.set_authorization(self.request)
class DefenderGetEvents(IntegrationGetEvents):
client: DefenderClient
def _iter_events(self):
self.client.authenticate()
self.client.set_request_filter(demisto.getLastRun() and demisto.getLastRun().get('after'))
response = self.client.call(self.client.request)
value = response.json().get('value', [])
demisto.debug(f'getting {len(value)} alerts from Defender Api')
return [value]
@staticmethod
def get_last_run(events: list) -> dict:
"""Logic to get the last run from the events
"""
return events and len(events) > 0 and {'after': events[-1]['alertCreationTime']} or demisto.getLastRun()
''' HELPER FUNCTIONS '''
''' COMMAND FUNCTIONS '''
def test_module(get_events: DefenderGetEvents) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type get_events: ``DefenderGetEvents``
:param get_events: the get_events instance
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
get_events.client.request.params = {'limit': 1}
get_events.run()
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'authenticate' in str(e):
message = AUTH_ERROR_MSG
else:
raise
return message
def main(command: str, demisto_params: dict):
demisto.debug(f'Command being called is {command}')
try:
options = DefenderIntegrationOptions.parse_obj(demisto_params)
request = DefenderHTTPRequest.parse_obj(demisto_params)
authenticator = DefenderAuthenticator.parse_obj(demisto_params)
clinet = DefenderClient(request=request, options=options, authenticator=authenticator)
get_events = DefenderGetEvents(client=clinet, options=options)
if command == 'test-module':
return_results(test_module(get_events=get_events))
elif command in ('fetch-events', 'microsoft-365-defender-get-events'):
events = get_events.run()
if command == 'microsoft-365-defender-get-events':
demisto.debug(f'{command=}, publishing events to the context')
human_readable = tableToMarkdown(name="Alerts:", t=events)
return_results(CommandResults('Microsoft365Defender.alerts', 'id', events, readable_output=human_readable))
elif events:
demisto.setLastRun(get_events.get_last_run(events))
demisto.debug(f'Last run set to {demisto.getLastRun()}')
if command == 'fetch-events' or argToBoolean(demisto_params.get('push_to_xsiam', False)):
# publishing events to XSIAM
vendor = demisto_params.get('vendor')
product = demisto_params.get('product')
demisto.debug(f'{command=}, publishing events to XSIAM')
send_events_to_xsiam(events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
# Args is always stronger. Get getIntegrationContext even stronger
demisto_params = demisto.params() | demisto.args() | demisto.getLastRun()
main(demisto.command(), demisto_params)
| 32.425824 | 123 | 0.649581 |
e4295da2f7812b00aee90ea339be9189b4602eb0
| 352 |
py
|
Python
|
LFA/class_assembly_line.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 10 |
2020-12-08T20:18:15.000Z
|
2021-06-07T20:00:07.000Z
|
LFA/class_assembly_line.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2 |
2021-06-28T03:42:13.000Z
|
2021-06-28T16:53:13.000Z
|
LFA/class_assembly_line.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2 |
2021-01-14T19:59:20.000Z
|
2021-06-15T11:53:21.000Z
|
from utils import *
from class_moore import Moore
class Assembly_Line:
def __init__(self, assembly_type):
self.automaton = Moore(*read_7_tuple_from_data(assembly_type))
self.type = self.automaton.name
def __len__(self):
return self.automaton.capacity
def __str__(self):
return str(self.automaton.output)
| 23.466667 | 70 | 0.704545 |
39adf3d80d50ee6f6c6e6afd03e42f583ec4eb69
| 879 |
py
|
Python
|
exercises/fr/test_04_12_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/test_04_12_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/test_04_12_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert len(doc1.ents) == 2, "Attendu deux entités dans le premier exemple"
assert (
doc1.ents[0].label_ == "SITE_WEB" and doc1.ents[0].text == "Reddit"
), "Vérifie l'entité une dans le premier exemple"
assert (
doc1.ents[1].label_ == "SITE_WEB" and doc1.ents[1].text == "Patreon"
), "Vérifie l'entité deux dans le premier exemple"
assert len(doc2.ents) == 1, "Attendu une entité dans dans le deuxième exemple"
assert (
doc2.ents[0].label_ == "SITE_WEB" and doc2.ents[0].text == "YouTube"
), "Vérifie l'entité dans le deuxième exemple"
assert len(doc3.ents) == 1, "Attendu une entité dans dans le troisième exemple"
assert (
doc3.ents[0].label_ == "SITE_WEB" and doc3.ents[0].text == "Reddit"
), "Vérifie l'entité dans le troisième exemple"
__msg__.good("Joli boulot !")
| 46.263158 | 83 | 0.631399 |
0806256e761da98bcded927cddb03e948b352211
| 9,338 |
py
|
Python
|
tests/services/test_event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
tests/services/test_event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
tests/services/test_event.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
import pytest
def test_update_event_dates_with_recurrence_rule(client, seeder, utils, app):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
with app.app_context():
from project.dateutils import create_berlin_date
from project.models import Event
from project.services.event import update_event_dates_with_recurrence_rule
event = Event.query.get(event_id)
date_definition = event.date_definitions[0]
date_definition.start = create_berlin_date(2030, 12, 31, 14, 30)
date_definition.end = create_berlin_date(2030, 12, 31, 16, 30)
update_event_dates_with_recurrence_rule(event)
len_dates = len(event.dates)
assert len_dates == 1
event_date = event.dates[0]
assert event_date.start == date_definition.start
assert event_date.end == date_definition.end
# Update again
update_event_dates_with_recurrence_rule(event)
len_dates = len(event.dates)
assert len_dates == 1
event_date = event.dates[0]
assert event_date.start == date_definition.start
assert event_date.end == date_definition.end
# All-day
date_definition.allday = True
update_event_dates_with_recurrence_rule(event)
len_dates = len(event.dates)
assert len_dates == 1
event_date = event.dates[0]
assert event_date.start == date_definition.start
assert event_date.end == date_definition.end
assert event_date.allday
# Wiederholt sich alle 1 Tage, endet nach 7 Ereigniss(en)
date_definition.recurrence_rule = "RRULE:FREQ=DAILY;COUNT=7"
update_event_dates_with_recurrence_rule(event)
len_dates = len(event.dates)
assert len_dates == 7
def test_update_event_dates_with_recurrence_rule_past(
client, seeder, utils, app, mocker
):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
with app.app_context():
from project.dateutils import create_berlin_date
from project.models import Event
from project.services.event import update_event_dates_with_recurrence_rule
utils.mock_now(mocker, 2020, 1, 3)
event = Event.query.get(event_id)
date_definition = event.date_definitions[0]
date_definition.start = create_berlin_date(2020, 1, 2, 14, 30)
date_definition.end = create_berlin_date(2020, 1, 2, 16, 30)
# Wiederholt sich alle 1 Tage, endet nach 7 Ereigniss(en)
date_definition.recurrence_rule = "RRULE:FREQ=DAILY;COUNT=7"
update_event_dates_with_recurrence_rule(event)
# Es sollen nur 6 Daten vorhanden sein (das erste Date war gestern)
len_dates = len(event.dates)
assert len_dates == 6
# Das erste Date ist heute
event_date = event.dates[0]
assert event_date.start == create_berlin_date(2020, 1, 3, 14, 30)
assert event_date.end == create_berlin_date(2020, 1, 3, 16, 30)
def test_update_event_dates_with_recurrence_rule_past_forever(
client, seeder, utils, app, mocker
):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
with app.app_context():
from project.dateutils import create_berlin_date
from project.models import Event
from project.services.event import update_event_dates_with_recurrence_rule
utils.mock_now(mocker, 2020, 1, 3)
event = Event.query.get(event_id)
date_definition = event.date_definitions[0]
date_definition.start = create_berlin_date(2019, 1, 1, 14, 30)
date_definition.end = create_berlin_date(2019, 1, 1, 16, 30)
# Wiederholt sich alle 1 Tage (unendlich)
date_definition.recurrence_rule = "RRULE:FREQ=DAILY"
update_event_dates_with_recurrence_rule(event)
# Es sollen 367 Daten vorhanden sein (Schaltjahr +1)
len_dates = len(event.dates)
assert len_dates == 367
# Das erste Date ist heute
event_date = event.dates[0]
assert event_date.start == create_berlin_date(2020, 1, 3, 14, 30)
assert event_date.end == create_berlin_date(2020, 1, 3, 16, 30)
# Das letzte Date ist in einem Jahr
event_date = event.dates[366]
assert event_date.start == create_berlin_date(2021, 1, 3, 14, 30)
assert event_date.end == create_berlin_date(2021, 1, 3, 16, 30)
def test_update_event_dates_with_recurrence_rule_exdate(
client, seeder, utils, app, mocker
):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
with app.app_context():
from project.dateutils import create_berlin_date
from project.models import Event
from project.services.event import update_event_dates_with_recurrence_rule
utils.mock_now(mocker, 2021, 6, 1)
event = Event.query.get(event_id)
date_definition = event.date_definitions[0]
date_definition.start = create_berlin_date(2021, 4, 21, 17, 0)
date_definition.end = create_berlin_date(2021, 4, 21, 18, 0)
# Wiederholt sich jeden Mittwoch
date_definition.recurrence_rule = "RRULE:FREQ=WEEKLY;BYDAY=WE;UNTIL=20211231T000000\nEXDATE:20210216T000000,20210223T000000,20210602T000000"
update_event_dates_with_recurrence_rule(event)
# Das erste Date soll nicht der 02.06. sein (excluded), sondern der 09.06.
event_date = event.dates[0]
assert event_date.start == create_berlin_date(2021, 6, 9, 17, 0)
assert event_date.end == create_berlin_date(2021, 6, 9, 18, 0)
def test_get_meta_data(seeder, app, db):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(admin_unit_id)
photo_id = seeder.upsert_default_image()
with app.app_context():
from project.models import Event, EventAttendanceMode, Location
from project.services.event import get_meta_data
event = Event.query.get(event_id)
event.attendance_mode = EventAttendanceMode.offline
location = Location()
location.city = "Stadt"
event.event_place.location = location
event.photo_id = photo_id
db.session.commit()
with app.test_request_context():
meta = get_meta_data(event)
assert meta is not None
def test_get_recurring_events(client, seeder, app):
user_id, admin_unit_id = seeder.setup_base()
event_id = seeder.create_event(
admin_unit_id, recurrence_rule="RRULE:FREQ=DAILY;COUNT=7"
)
seeder.create_event(admin_unit_id, recurrence_rule=None)
seeder.create_event(admin_unit_id, recurrence_rule="")
with app.app_context():
from project.services.event import get_recurring_events
recurring_events = get_recurring_events()
assert len(recurring_events) == 1
assert recurring_events[0].id == event_id
def test_get_events_query(client, seeder, app):
_, admin_unit_id = seeder.setup_base()
seeder.create_event(admin_unit_id)
seeder.upsert_event_place(admin_unit_id, "Other Place")
with app.app_context():
from project.services.event import get_events_query
from project.services.event_search import EventSearchParams
params = EventSearchParams()
params.admin_unit_id = admin_unit_id
params.can_read_private_events = True
events = get_events_query(params)
pagination = events.paginate()
assert pagination.total == 1
@pytest.mark.parametrize(
"index, event_descs, keyword, results, order",
[
(0, ("Führung durch Goslar", "Other"), "Goslar", 1, None),
(1, ("Führung durch Goslar", "Other"), "Führung", 1, None),
(2, ("Führung durch Goslar", "Other"), "Fuehrung", 0, None),
(3, ("Führung durch Goslar", "Other"), "Goslar Führung", 1, None),
(
4,
("Führung durch Goslar", "Führung durch Soest"),
"Goslar Führung",
1,
None,
),
(
5,
(
"Führung durch Goslar",
"Führung durch Soest",
"Führung durch Berlin",
),
"Führung (Goslar OR Soest)",
2,
None,
),
],
)
def test_get_events_fulltext(
client, seeder, app, index, event_descs, keyword, results, order
):
_, admin_unit_id = seeder.setup_base()
if type(event_descs) is not tuple:
event_descs = [event_descs]
event_ids = list()
for event_desc in event_descs:
event_id = seeder.create_event(admin_unit_id, name=event_desc)
event_ids.append(event_id)
with app.app_context():
from project.services.event import get_events_query
from project.services.event_search import EventSearchParams
params = EventSearchParams()
params.keyword = keyword
events = get_events_query(params)
pagination = events.paginate()
assert pagination.total == results
if not order:
order = range(0, len(event_descs) - 1)
i = 0
for item in pagination.items:
assert item.id == event_ids[order[i]]
i = i + 1
| 34.330882 | 148 | 0.668344 |
083497431e4ea34ae2484440e44fa0809d4e3205
| 2,559 |
py
|
Python
|
Common_Algorithms/HMM/Wordseg_02.py
|
xrick/WolfNLP
|
ff59be5a180813e7127c7eaf52db2478c20e8431
|
[
"MIT"
] | null | null | null |
Common_Algorithms/HMM/Wordseg_02.py
|
xrick/WolfNLP
|
ff59be5a180813e7127c7eaf52db2478c20e8431
|
[
"MIT"
] | null | null | null |
Common_Algorithms/HMM/Wordseg_02.py
|
xrick/WolfNLP
|
ff59be5a180813e7127c7eaf52db2478c20e8431
|
[
"MIT"
] | null | null | null |
#-*-coding:utf-8
# By tostq <[email protected]>
# 博客: blog.csdn.net/tostq
import numpy as np
import hmm
from hmmlearn.hmm import MultinomialHMM
state_M = 4
word_N = 0
state_list = {'B':0,'M':1,'E':2,'S':3}
# 获得某词的分词结果
# 如:(我:S)、(你好:BE)、(恭喜发财:BMME)
def getList(input_str):
outpout_str = []
if len(input_str) == 1:
outpout_str.append(3)
elif len(input_str) == 2:
outpout_str = [0,2]
else:
M_num = len(input_str) -2
M_list = [1] * M_num
outpout_str.append(0)
outpout_str.extend(M_list)
outpout_str.append(2)
return outpout_str
# 预处理词典:RenMinData.txt_utf8
def precess_data():
ifp = file("RenMinData.txt_utf8")
line_num = 0
word_dic = {}
word_ind = 0
line_seq = []
state_seq = []
# 保存句子的字序列及每个字的状态序列,并完成字典统计
for line in ifp:
line_num += 1
if line_num % 10000 == 0:
print line_num
line = line.strip()
if not line:continue
line = line.decode("utf-8","ignore")
word_list = []
for i in range(len(line)):
if line[i] == " ":continue
word_list.append(line[i])
# 建立单词表
if not word_dic.has_key(line[i]):
word_dic[line[i]] = word_ind
word_ind += 1
line_seq.append(word_list)
lineArr = line.split(" ")
line_state = []
for item in lineArr:
line_state += getList(item)
state_seq.append(np.array(line_state))
ifp.close()
lines = []
for i in range(line_num):
lines.append(np.array([[word_dic[x]] for x in line_seq[i]]))
return lines,state_seq,word_dic
# 将句子转换成字典序号序列
def word_trans(wordline, word_dic):
word_inc = []
line = wordline.strip()
line = line.decode("utf-8", "ignore")
for n in range(len(line)):
word_inc.append([word_dic[line[n]]])
return np.array(word_inc)
X,Z,word_dic = precess_data()
wordseg_hmm = hmm.DiscreteHMM(4,len(word_dic),5)
wordseg_hmm.train_batch(X,Z)
print "startprob_prior: ", wordseg_hmm.start_prob
print "transmit: ", wordseg_hmm.transmat_prob
sentence_1 = "我要回家吃饭"
sentence_2 = "中国人民从此站起来了"
sentence_3 = "经党中央研究决定"
sentence_4 = "江主席发表重要讲话"
Z_1 = wordseg_hmm.decode(word_trans(sentence_1,word_dic))
Z_2 = wordseg_hmm.decode(word_trans(sentence_2,word_dic))
Z_3 = wordseg_hmm.decode(word_trans(sentence_3,word_dic))
Z_4 = wordseg_hmm.decode(word_trans(sentence_4,word_dic))
print u"我要回家吃饭: ", Z_1
print u"中国人民从此站起来了: ", Z_2
print u"经党中央研究决定: ", Z_3
print u"江主席发表重要讲话: ", Z_4
| 25.088235 | 68 | 0.623681 |
f25bbeef8b86b5586bcf49e392dc55c4a785e375
| 913 |
py
|
Python
|
Algorithms/Implementation/forming_a_magic_square.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/forming_a_magic_square.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/forming_a_magic_square.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
all_cases = [[[8, 1, 6], [3, 5, 7], [4, 9, 2]],
[[6, 1, 8], [7, 5, 3], [2, 9, 4]],
[[4, 9, 2], [3, 5, 7], [8, 1, 6]],
[[2, 9, 4], [7, 5, 3], [6, 1, 8]],
[[8, 3, 4], [1, 5, 9], [6, 7, 2]],
[[4, 3, 8], [9, 5, 1], [2, 7, 6]],
[[6, 7, 2], [1, 5, 9], [8, 3, 4]],
[[2, 7, 6], [9, 5, 1], [4, 3, 8]], ]
s = []
for s_i in range(3):
s_t = [int(s_temp) for s_temp in input().strip().split(' ')]
s.append(s_t)
diffs = [sum([abs(cases[i][j] - s[i][j]) for i in range(0, 3) for j in range(0, 3)]) for cases in all_cases]
print(min(diffs))
# Origin code
# for cases in all_cases:
# diff = 0
# for i in range(0, 3):
# for j in range(0, 3):
# diff += abs(cases[i][j] - s[i][j])
# diffs.append(diff)
#
# tranpose sample
# seeds.append([list(t) for t in list(zip(*seed))])
| 29.451613 | 108 | 0.41402 |
d9d103effb7ebd2fcd272e2fd1c6ce75e5453a53
| 8,644 |
py
|
Python
|
methods/transformers/src/transformers/configuration_electra.py
|
INK-USC/RiddleSense
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
[
"MIT"
] | 3 |
2021-07-06T20:02:31.000Z
|
2022-03-27T13:13:01.000Z
|
methods/transformers/src/transformers/configuration_electra.py
|
INK-USC/RiddleSense
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
[
"MIT"
] | null | null | null |
methods/transformers/src/transformers/configuration_electra.py
|
INK-USC/RiddleSense
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ELECTRA model configuration """
from .configuration_utils import PretrainedConfig
from .utils import logging
logger = logging.get_logger(__name__)
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/config.json",
"google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/config.json",
"google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/config.json",
"google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/config.json",
"google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/config.json",
"google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/config.json",
}
class ElectraConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.ElectraModel` or a
:class:`~transformers.TFElectraModel`. It is used to instantiate a ELECTRA model according to the specified
arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar
configuration to that of the ELECTRA `google/electra-small-discriminator
<https://huggingface.co/google/electra-small-discriminator>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30522):
Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.ElectraModel` or
:class:`~transformers.TFElectraModel`.
embedding_size (:obj:`int`, `optional`, defaults to 128):
Dimensionality of the encoder layers and the pooler layer.
hidden_size (:obj:`int`, `optional`, defaults to 256):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.ElectraModel` or
:class:`~transformers.TFElectraModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
summary_type (:obj:`str`, `optional`, defaults to :obj:`"first"`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- :obj:`"last"`: Take the last token hidden state (like XLNet).
- :obj:`"first"`: Take the first token hidden state (like BERT).
- :obj:`"mean"`: Take the mean of all tokens hidden states.
- :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- :obj:`"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (:obj:`str`, `optional`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass :obj:`"gelu"` for a gelu activation to the output, any other value will result in no activation.
summary_last_dropout (:obj:`float`, `optional`, defaults to 0.0):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
Examples::
>>> from transformers import ElectraModel, ElectraConfig
>>> # Initializing a ELECTRA electra-base-uncased style configuration
>>> configuration = ElectraConfig()
>>> # Initializing a model from the electra-base-uncased style configuration
>>> model = ElectraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "electra"
def __init__(
self,
vocab_size=30522,
embedding_size=128,
hidden_size=256,
num_hidden_layers=12,
num_attention_heads=4,
intermediate_size=1024,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
summary_type="first",
summary_use_proj=True,
summary_activation="gelu",
summary_last_dropout=0.1,
pad_token_id=0,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout
| 54.708861 | 144 | 0.686719 |
76e4aff3ee9efc9424a901af81997883bce88779
| 238 |
py
|
Python
|
FUNCTIONS CLASS 12.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
FUNCTIONS CLASS 12.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | null | null | null |
FUNCTIONS CLASS 12.py
|
sanxy/hacktoberfest-1
|
913582b310688d496602e8b1bc9166cb64866e38
|
[
"MIT"
] | 1 |
2020-09-30T18:53:05.000Z
|
2020-09-30T18:53:05.000Z
|
import mysql.connector
mydb = mysql.connector.connect(host="localhost",user='root',passwd='thebeast',database='employees21')
mycursor = mydb.cursor()
mycursor.execute('select ename from emp1')
for i in mycursor:
print (i)
| 23.8 | 102 | 0.718487 |
0a464ba136340553b3e81399e9eab7ebc0939ffa
| 11,260 |
py
|
Python
|
rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 37 |
2019-06-07T07:39:00.000Z
|
2022-01-27T08:32:57.000Z
|
rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 209 |
2020-03-18T18:28:12.000Z
|
2022-03-01T13:42:29.000Z
|
rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 65 |
2019-05-21T12:16:53.000Z
|
2022-02-23T10:54:15.000Z
|
import logging
from collections import defaultdict, OrderedDict
from pathlib import Path
import numpy as np
from typing import Any, Dict, Optional, Text, List, Type, Union
from rasa.nlu.tokenizers.spacy_tokenizer import POS_TAG_KEY
from rasa.shared.constants import DOCS_URL_COMPONENTS
from rasa.nlu.components import Component
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.tokenizers.tokenizer import Tokenizer
from rasa.nlu.featurizers.featurizer import SparseFeaturizer
from rasa.shared.nlu.training_data.features import Features
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.constants import TOKENS_NAMES, FEATURIZER_CLASS_ALIAS
from rasa.shared.nlu.constants import TEXT, FEATURE_TYPE_SEQUENCE
from rasa.nlu.model import Metadata
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
END_OF_SENTENCE = "EOS"
BEGIN_OF_SENTENCE = "BOS"
class LexicalSyntacticFeaturizer(SparseFeaturizer):
"""Creates features for entity extraction.
Moves with a sliding window over every token in the user message and creates
features according to the configuration.
"""
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [Tokenizer]
defaults = {
# 'features' is [before, word, after] array with before, word,
# after holding keys about which features to use for each word,
# for example, 'title' in array before will have the feature
# "is the preceding word in title case?"
# POS features require 'SpacyTokenizer'.
"features": [
["low", "title", "upper"],
["BOS", "EOS", "low", "upper", "title", "digit"],
["low", "title", "upper"],
]
}
function_dict = {
"low": lambda token: token.text.islower(),
"title": lambda token: token.text.istitle(),
"prefix5": lambda token: token.text[:5],
"prefix2": lambda token: token.text[:2],
"suffix5": lambda token: token.text[-5:],
"suffix3": lambda token: token.text[-3:],
"suffix2": lambda token: token.text[-2:],
"suffix1": lambda token: token.text[-1:],
"pos": lambda token: token.data.get(POS_TAG_KEY)
if POS_TAG_KEY in token.data
else None,
"pos2": lambda token: token.data.get(POS_TAG_KEY)[:2]
if "pos" in token.data
else None,
"upper": lambda token: token.text.isupper(),
"digit": lambda token: token.text.isdigit(),
}
def __init__(
self,
component_config: Dict[Text, Any],
feature_to_idx_dict: Optional[Dict[Text, Any]] = None,
):
super().__init__(component_config)
self.feature_to_idx_dict = feature_to_idx_dict or {}
self.number_of_features = self._calculate_number_of_features()
def _calculate_number_of_features(self) -> int:
return sum(
[
len(feature_values.values())
for feature_values in self.feature_to_idx_dict.values()
]
)
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
self.feature_to_idx_dict = self._create_feature_to_idx_dict(training_data)
self.number_of_features = self._calculate_number_of_features()
for example in training_data.training_examples:
self._create_sparse_features(example)
def process(self, message: Message, **kwargs: Any) -> None:
self._create_sparse_features(message)
def _create_feature_to_idx_dict(
self, training_data: TrainingData
) -> Dict[Text, Dict[Text, int]]:
"""Create dictionary of all feature values.
Each feature key, defined in the component configuration, points to
different feature values and their indices in the overall resulting
feature vector.
"""
# get all possible feature values
all_features = []
for example in training_data.training_examples:
tokens = example.get(TOKENS_NAMES[TEXT])
if tokens:
all_features.append(self._tokens_to_features(tokens))
# build vocabulary of features
feature_vocabulary = self._build_feature_vocabulary(all_features)
# assign a unique index to each feature value
return self._map_features_to_indices(feature_vocabulary)
@staticmethod
def _map_features_to_indices(
feature_vocabulary: Dict[Text, List[Text]]
) -> Dict[Text, Dict[Text, int]]:
feature_to_idx_dict = {}
offset = 0
for feature_name, feature_values in feature_vocabulary.items():
feature_to_idx_dict[feature_name] = {
str(feature_value): feature_idx
for feature_idx, feature_value in enumerate(
sorted(feature_values), start=offset
)
}
offset += len(feature_values)
return feature_to_idx_dict
@staticmethod
def _build_feature_vocabulary(
features: List[List[Dict[Text, Any]]]
) -> Dict[Text, List[Text]]:
feature_vocabulary = defaultdict(set)
for sentence_features in features:
for token_features in sentence_features:
for feature_name, feature_value in token_features.items():
feature_vocabulary[feature_name].add(feature_value)
# sort items to ensure same order every time (for tests)
feature_vocabulary = OrderedDict(sorted(feature_vocabulary.items()))
return feature_vocabulary
def _create_sparse_features(self, message: Message) -> None:
"""Convert incoming messages into sparse features using the configured
features."""
import scipy.sparse
tokens = message.get(TOKENS_NAMES[TEXT])
# this check is required because there might be training data examples without TEXT,
# e.g., `Message("", {action_name: "action_listen"})`
if tokens:
sentence_features = self._tokens_to_features(tokens)
one_hot_seq_feature_vector = self._features_to_one_hot(sentence_features)
sequence_features = scipy.sparse.coo_matrix(one_hot_seq_feature_vector)
final_sequence_features = Features(
sequence_features,
FEATURE_TYPE_SEQUENCE,
TEXT,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sequence_features)
def _tokens_to_features(self, tokens: List[Token]) -> List[Dict[Text, Any]]:
"""Convert words into discrete features."""
configured_features = self.component_config["features"]
sentence_features = []
for token_idx in range(len(tokens)):
# get the window size (e.g. before, word, after) of the configured features
# in case of an even number we will look at one more word before,
# e.g. window size 4 will result in a window range of
# [-2, -1, 0, 1] (0 = current word in sentence)
window_size = len(configured_features)
half_window_size = window_size // 2
window_range = range(-half_window_size, half_window_size + window_size % 2)
prefixes = [str(i) for i in window_range]
token_features = {}
for pointer_position in window_range:
current_idx = token_idx + pointer_position
# skip, if current_idx is pointing to a non-existing token
if current_idx < 0 or current_idx >= len(tokens):
continue
token = tokens[token_idx + pointer_position]
current_feature_idx = pointer_position + half_window_size
prefix = prefixes[current_feature_idx]
for feature in configured_features[current_feature_idx]:
token_features[f"{prefix}:{feature}"] = self._get_feature_value(
feature, token, token_idx, pointer_position, len(tokens)
)
sentence_features.append(token_features)
return sentence_features
def _features_to_one_hot(
self, sentence_features: List[Dict[Text, Any]]
) -> np.ndarray:
"""Convert the word features into a one-hot presentation using the indices
in the feature-to-idx dictionary."""
one_hot_seq_feature_vector = np.zeros(
[len(sentence_features), self.number_of_features]
)
for token_idx, token_features in enumerate(sentence_features):
for feature_name, feature_value in token_features.items():
feature_value_str = str(feature_value)
if (
feature_name in self.feature_to_idx_dict
and feature_value_str in self.feature_to_idx_dict[feature_name]
):
feature_idx = self.feature_to_idx_dict[feature_name][
feature_value_str
]
one_hot_seq_feature_vector[token_idx][feature_idx] = 1
return one_hot_seq_feature_vector
def _get_feature_value(
self,
feature: Text,
token: Token,
token_idx: int,
pointer_position: int,
token_length: int,
) -> Union[bool, int, Text]:
if feature == END_OF_SENTENCE:
return token_idx + pointer_position == token_length - 1
if feature == BEGIN_OF_SENTENCE:
return token_idx + pointer_position == 0
if feature not in self.function_dict:
raise ValueError(
f"Configured feature '{feature}' not valid. Please check "
f"'{DOCS_URL_COMPONENTS}' for valid configuration parameters."
)
value = self.function_dict[feature](token)
if value is None:
logger.debug(
f"Invalid value '{value}' for feature '{feature}'."
f" Feature is ignored."
)
return value
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional[Metadata] = None,
cached_component: Optional["LexicalSyntacticFeaturizer"] = None,
**kwargs: Any,
) -> "LexicalSyntacticFeaturizer":
file_name = meta.get("file")
feature_to_idx_file = Path(model_dir) / f"{file_name}.feature_to_idx_dict.pkl"
feature_to_idx_dict = io_utils.json_unpickle(feature_to_idx_file)
return LexicalSyntacticFeaturizer(meta, feature_to_idx_dict=feature_to_idx_dict)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this model into the passed directory.
Return the metadata necessary to load the model again."""
feature_to_idx_file = Path(model_dir) / f"{file_name}.feature_to_idx_dict.pkl"
io_utils.json_pickle(feature_to_idx_file, self.feature_to_idx_dict)
return {"file": file_name}
| 37.161716 | 92 | 0.642007 |
6aadbc149a4009a745fe1c5424569f28af289cd3
| 295 |
py
|
Python
|
exercises/fr/exc_01_08_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/fr/exc_01_08_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/fr/exc_01_08_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("fr_core_news_sm")
text = "Apple a été créée en 1976 par Steve Wozniak, Steve Jobs et Ron Wayne."
# Traite le texte
doc = ____
# Itère sur les entités prédites
for ent in ____.____:
# Affiche le texte de l'entité et son label
print(ent.____, ____.____)
| 21.071429 | 78 | 0.715254 |
0a801caf4b2b74512bea3fecc7a23036652df9bc
| 303 |
py
|
Python
|
web/interceptors/ErrorInterceptor.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 2 |
2019-06-10T08:57:47.000Z
|
2021-06-12T16:22:15.000Z
|
web/interceptors/ErrorInterceptor.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
web/interceptors/ErrorInterceptor.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from application import app
from common.libs.Helper import ops_render
from common.libs.LogService import LogService
@app.errorhandler(404)
def error_404(e):
LogService.add_error_log(str(e))
return ops_render('error/error.html', {'status': 404, 'msg': '很抱歉!您访问的页面不存在'})
| 27.545455 | 82 | 0.732673 |
86e434ce86d2a788348dd975202cf547479816c8
| 47,418 |
py
|
Python
|
nz_crawl_demo/day3/demo1.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_crawl_demo/day3/demo1.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_crawl_demo/day3/demo1.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
from lxml import etree
text = """
<ul class="item_con_list" style="display: block;">
<li class="con_list_item first_row default_list" data-index="0" data-positionid="3580958" data-salary="18k-25k" data-company="众禄金融" data-positionname="python工程师" data-companyid="50285" data-hrid="1509336" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3580958.html" target="_blank" data-index="0" data-lg-tj-id="8E00" data-lg-tj-no="
0101
" data-lg-tj-cid="3580958" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">python工程师</h3>
<span class="add">[<em>罗湖区</em>]</span>
</a>
<span class="format-time">3天前发布</span>
<input type="hidden" class="hr_portrait" value="">
<input type="hidden" class="hr_name" value="石经理">
<input type="hidden" class="hr_position" value="招聘经理">
<input type="hidden" class="target_hr" value="1509336">
<input type="hidden" class="target_position" value="3580958">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0101" data-lg-tj-cid="50285" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
<i class="pos_icon pos_icon_12"></i></div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">18k-25k</span>
<!--<i></i>-->经验3-5年 / 大专
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/50285.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0101
" data-lg-tj-cid="50285" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">众禄金融</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
移动互联网,金融 / 上市公司
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/50285.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0101
" data-lg-tj-cid="50285" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image2/M00/18/65/CgotOVn65YSAAu7lAAAVwDCKc5w606.png" alt="众禄金融" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>金融</span>
<span>信息安全</span>
<span>php</span>
<span>Java</span>
<span>web</span>
</div>
<div class="li_b_r">“互联网金融,高速发展,五险一金,金融中心”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="1" data-positionid="3172437" data-salary="10k-15k" data-company="乐易网络" data-positionname="python开发工程师" data-companyid="33627" data-hrid="569371" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3172437.html" target="_blank" data-index="1" data-lg-tj-id="8E00" data-lg-tj-no="
0102
" data-lg-tj-cid="3172437" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">python开发工程师</h3>
<span class="add">[<em>南山区</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image/M00/4C/88/CgpFT1lwWhiAB5C2AAD55Ttkxck626.jpg">
<input type="hidden" class="hr_name" value="Tina">
<input type="hidden" class="hr_position" value="HR">
<input type="hidden" class="target_hr" value="569371">
<input type="hidden" class="target_position" value="3172437">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0102" data-lg-tj-cid="33627" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1">
<div class="chat_pop_up">
<span class="arrow"></span>
<dl class="chat_main clearfix">
<dt><div class="chat_qrcode"><canvas width="116" height="116"></canvas></div></dt>
<dd>
<dl class="chat_head clearfix">
<dt>
<img class="hr_headpic" src="https://static.lagou.com/i/image/M00/4C/88/CgpFT1lwWhiAB5C2AAD55Ttkxck626.jpg" alt="hr头像" width="62" height="62">
</dt>
<dd>
<div class="hr_name">Tina</div>
<div class="hr_position">HR</div>
</dd>
<dd class="tips_text">Hi,对我发布的职位感兴趣?用拉勾APP扫码,直接和我聊聊吧!</dd>
</dl>
</dd>
</dl>
</div>
</div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">10k-15k</span>
<!--<i></i>-->经验3-5年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/33627.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0102
" data-lg-tj-cid="33627" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">乐易网络</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
移动互联网,游戏 / A轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/33627.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0102
" data-lg-tj-cid="33627" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/57/05/CgqKkVfOIv6AQwyPAAEg94dso0Q147.png" alt="乐易网络" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>php</span>
<span>MySQL</span>
<span>后端</span>
</div>
<div class="li_b_r">“五险一金,年终奖金”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="2" data-positionid="3088129" data-salary="10k-20k" data-company="对酒当歌" data-positionname="python开发工程师" data-companyid="32901" data-hrid="557623" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3088129.html" target="_blank" data-index="2" data-lg-tj-id="8E00" data-lg-tj-no="
0103
" data-lg-tj-cid="3088129" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">python开发工程师</h3>
<span class="add">[<em>宝安区</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image2/M00/1D/93/CgoB5loJToSAXS5UAAAxRCAgR0I656.jpg">
<input type="hidden" class="hr_name" value="Fiona">
<input type="hidden" class="hr_position" value="HR">
<input type="hidden" class="target_hr" value="557623">
<input type="hidden" class="target_position" value="3088129">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0103" data-lg-tj-cid="32901" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">10k-20k</span>
<!--<i></i>-->经验1-3年 / 大专
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/32901.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0103
" data-lg-tj-cid="32901" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">对酒当歌</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
电子商务,O2O / A轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/32901.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0103
" data-lg-tj-cid="32901" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image2/M00/1D/93/CgoB5loJTs6AbgL2AAAxM5rVkG8611.jpg" alt="对酒当歌" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>php</span>
<span>Java</span>
<span>MySQL</span>
</div>
<div class="li_b_r">“五险一金,免费班车”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="3" data-positionid="3163697" data-salary="15k-30k" data-company="笨鸟社交" data-positionname="python工程师" data-companyid="84086" data-hrid="3628930" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3163697.html" target="_blank" data-index="3" data-lg-tj-id="8E00" data-lg-tj-no="
0104
" data-lg-tj-cid="3163697" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">python工程师</h3>
<span class="add">[<em>南山区</em>]</span>
</a>
<span class="format-time">1天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image2/M00/21/C0/CgotOVoTm-KAL5xbAAEYJ1YAVhc922.jpg">
<input type="hidden" class="hr_name" value="马小姐">
<input type="hidden" class="hr_position" value="">
<input type="hidden" class="target_hr" value="3628930">
<input type="hidden" class="target_position" value="3163697">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0104" data-lg-tj-cid="84086" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
<i class="pos_icon pos_icon_12"></i></div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-30k</span>
<!--<i></i>-->经验不限 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/84086.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0104
" data-lg-tj-cid="84086" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">笨鸟社交</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
企业服务 / B轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/84086.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0104
" data-lg-tj-cid="84086" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/10/1C/CgpFT1jwK86AVr_OAACaAuJFFLw446.png" alt="笨鸟社交" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>爬虫</span>
<span>后端</span>
<span>初级</span>
<span>中级</span>
<span>搜索</span>
</div>
<div class="li_b_r">“海量数据”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="4" data-positionid="3867111" data-salary="15k-28k" data-company="博奥特科技" data-positionname="Python工程师" data-companyid="69152" data-hrid="9207315" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3867111.html" target="_blank" data-index="4" data-lg-tj-id="8E00" data-lg-tj-no="
0105
" data-lg-tj-cid="3867111" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python工程师</h3>
<span class="add">[<em>上步</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image2/M00/1C/84/CgoB5loFXcaAMM23AABudjCbyWs865.png">
<input type="hidden" class="hr_name" value="qiuff">
<input type="hidden" class="hr_position" value="招聘部">
<input type="hidden" class="target_hr" value="9207315">
<input type="hidden" class="target_position" value="3867111">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0105" data-lg-tj-cid="69152" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-28k</span>
<!--<i></i>-->经验3-5年 / 大专
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/69152.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0105
" data-lg-tj-cid="69152" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">博奥特科技</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
移动互联网,金融 / 未融资
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/69152.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0105
" data-lg-tj-cid="69152" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image2/M00/18/B4/CgoB5ln71t2ADbc3AABFDELpI7U021.jpg" alt="博奥特科技" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>金融</span>
<span>Java</span>
<span>cobol</span>
</div>
<div class="li_b_r">“发展前景好,双休,互联网金融”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="5" data-positionid="2889252" data-salary="15k-30k" data-company="万科物业" data-positionname="Python开发工程师" data-companyid="8350" data-hrid="7279982" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/2889252.html" target="_blank" data-index="5" data-lg-tj-id="8E00" data-lg-tj-no="
0106
" data-lg-tj-cid="2889252" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python开发工程师</h3>
<span class="add">[<em>上梅林</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="">
<input type="hidden" class="hr_name" value="xiongx04">
<input type="hidden" class="hr_position" value="">
<input type="hidden" class="target_hr" value="7279982">
<input type="hidden" class="target_position" value="2889252">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0106" data-lg-tj-cid="8350" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-30k</span>
<!--<i></i>-->经验3-5年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/8350.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0106
" data-lg-tj-cid="8350" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">万科物业</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
O2O,生活服务 / 不需要融资
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/8350.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0106
" data-lg-tj-cid="8350" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/image2/M00/08/03/CgqLKVYBEtqAD2vhAAARnNY0kzg058.png" alt="万科物业" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>资深</span>
<span>高级</span>
<span>中级</span>
<span>后端开发</span>
<span>redis</span>
</div>
<div class="li_b_r">“平台好,空间大”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="6" data-positionid="2786718" data-salary="10k-15k" data-company="环球易购" data-positionname="高级python开发工程师" data-companyid="83025" data-hrid="2117758" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/2786718.html" target="_blank" data-index="6" data-lg-tj-id="8E00" data-lg-tj-no="
0107
" data-lg-tj-cid="2786718" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">高级python开发工程师</h3>
<span class="add">[<em>南山区</em>]</span>
</a>
<span class="format-time">1天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image2/M00/1E/F1/CgoB5loL-aGASIqDAAATxYblCtQ334.jpg">
<input type="hidden" class="hr_name" value="Hipson">
<input type="hidden" class="hr_position" value="首席神秘官">
<input type="hidden" class="target_hr" value="2117758">
<input type="hidden" class="target_position" value="2786718">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0107" data-lg-tj-cid="83025" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">10k-15k</span>
<!--<i></i>-->经验3-5年 / 大专
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/83025.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0107
" data-lg-tj-cid="83025" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">环球易购</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
电子商务 / 上市公司
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/83025.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0107
" data-lg-tj-cid="83025" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/image1/M00/38/9F/CgYXBlWmIt6Af8k5AABvK21LZWM490.jpg?cc=0.211507520172745" alt="环球易购" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>软件开发</span>
</div>
<div class="li_b_r">“上市公司 公司规模大 发展机会多”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="7" data-positionid="3325916" data-salary="15k-23k" data-company="晶泰科技" data-positionname="高级Python开发工程师" data-companyid="76066" data-hrid="5281055" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3325916.html" target="_blank" data-index="7" data-lg-tj-id="8E00" data-lg-tj-no="
0108
" data-lg-tj-cid="3325916" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">高级Python开发工程师</h3>
<span class="add">[<em>香蜜湖</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image/M00/48/46/CgpFT1loITKAe1YZAAD7YMAGvnI602.jpg">
<input type="hidden" class="hr_name" value="李丹慧">
<input type="hidden" class="hr_position" value="HR经理">
<input type="hidden" class="target_hr" value="5281055">
<input type="hidden" class="target_position" value="3325916">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0108" data-lg-tj-cid="76066" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-23k</span>
<!--<i></i>-->经验1-3年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/76066.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0108
" data-lg-tj-cid="76066" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">晶泰科技</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
企业服务,医疗健康 / B轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/76066.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0108
" data-lg-tj-cid="76066" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/46/CE/CgqKkVeNuE-AMgmGAACJzPxVN30374.jpg" alt="晶泰科技" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>云计算</span>
<span>后端开发</span>
<span>web</span>
<span>Go</span>
<span>django</span>
</div>
<div class="li_b_r">“大牛多,公司氛围好,周末双休,成长快”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="8" data-positionid="3727987" data-salary="14k-20k" data-company="盖威" data-positionname="Python开发工程师" data-companyid="3956" data-hrid="5713991" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3727987.html" target="_blank" data-index="8" data-lg-tj-id="8E00" data-lg-tj-no="
0109
" data-lg-tj-cid="3727987" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python开发工程师</h3>
<span class="add">[<em>科技园</em>]</span>
</a>
<span class="format-time">3天前发布</span>
<input type="hidden" class="hr_portrait" value="">
<input type="hidden" class="hr_name" value="hr-gw">
<input type="hidden" class="hr_position" value="HR经理">
<input type="hidden" class="target_hr" value="5713991">
<input type="hidden" class="target_position" value="3727987">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0109" data-lg-tj-cid="3956" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">14k-20k</span>
<!--<i></i>-->经验3-5年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/3956.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0109
" data-lg-tj-cid="3956" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">盖威</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
金融 / 不需要融资
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/3956.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0109
" data-lg-tj-cid="3956" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/image1/M00/00/0A/Cgo8PFTUWBWAJ-eQAAA1pEtnnYo973.png" alt="盖威" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>linux</span>
<span>C++</span>
</div>
<div class="li_b_r">“福利待遇好”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="9" data-positionid="2010228" data-salary="15k-25k" data-company="极光" data-positionname="高级Python开发工程师" data-companyid="917" data-hrid="2153176" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/2010228.html" target="_blank" data-index="9" data-lg-tj-id="8E00" data-lg-tj-no="
0110
" data-lg-tj-cid="2010228" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">高级Python开发工程师</h3>
<span class="add">[<em>南头</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="i/image2/M00/0A/38/CgotOVncPOGAR2E7AABUArhVY0U298.jpg">
<input type="hidden" class="hr_name" value="Maggie">
<input type="hidden" class="hr_position" value="HRBP">
<input type="hidden" class="target_hr" value="2153176">
<input type="hidden" class="target_position" value="2010228">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0110" data-lg-tj-cid="917" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
<i class="pos_icon pos_icon_12"></i></div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-25k</span>
<!--<i></i>-->经验3-5年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/917.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0110
" data-lg-tj-cid="917" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">极光</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
移动互联网,数据服务 / D轮及以上
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/917.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0110
" data-lg-tj-cid="917" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/37/20/CgqKkVdfms6Ac6dNAABY3gBvuqI944.jpg" alt="极光" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>云计算</span>
<span>linux</span>
<span>云平台</span>
<span>django</span>
</div>
<div class="li_b_r">“五险一金、14薪/年、下午茶、工作餐等等”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="10" data-positionid="2022151" data-salary="20k-35k" data-company="航仕科技" data-positionname="Python开发" data-companyid="129877" data-hrid="5095526" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/2022151.html" target="_blank" data-index="10" data-lg-tj-id="8E00" data-lg-tj-no="
0111
" data-lg-tj-cid="2022151" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python开发</h3>
<span class="add">[<em>科技园</em>]</span>
</a>
<span class="format-time">2017-11-21</span>
<input type="hidden" class="hr_portrait" value="">
<input type="hidden" class="hr_name" value="hr">
<input type="hidden" class="hr_position" value="高级招聘经理">
<input type="hidden" class="target_hr" value="5095526">
<input type="hidden" class="target_position" value="2022151">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0111" data-lg-tj-cid="129877" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">20k-35k</span>
<!--<i></i>-->经验3-5年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/129877.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0111
" data-lg-tj-cid="129877" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">航仕科技</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
移动互联网,O2O / C轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/129877.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0111
" data-lg-tj-cid="129877" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/2C/9A/Cgp3O1c5oTuAVfXXAABDG8Kg38w820.jpg" alt="航仕科技" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>年底双薪</span>
<span>午餐补助</span>
<span>专项奖金</span>
<span>绩效奖金</span>
</div>
<div class="li_b_r">“BAT的薪资福利待遇,更有技术大牛传授技术”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="11" data-positionid="3759498" data-salary="15k-30k" data-company="Minieye" data-positionname="python工程师" data-companyid="124262" data-hrid="4784940" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3759498.html" target="_blank" data-index="11" data-lg-tj-id="8E00" data-lg-tj-no="
0112
" data-lg-tj-cid="3759498" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">python工程师</h3>
<span class="add">[<em>南山区</em>]</span>
</a>
<span class="format-time">12:39发布</span>
<input type="hidden" class="hr_portrait" value="i/image/M00/6C/B8/CgpEMlmtQ92AOPnVAACbmIFfngQ360.jpg">
<input type="hidden" class="hr_name" value="HRM">
<input type="hidden" class="hr_position" value="">
<input type="hidden" class="target_hr" value="4784940">
<input type="hidden" class="target_position" value="3759498">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0112" data-lg-tj-cid="124262" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-30k</span>
<!--<i></i>-->经验1-3年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/124262.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0112
" data-lg-tj-cid="124262" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">Minieye</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
硬件,其他 / A轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/124262.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0112
" data-lg-tj-cid="124262" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/B1/8D/CgqKkVi5Jd6AA65GAAAmAaWjB9U423.png" alt="Minieye" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>无人驾驶</span>
<span>国际标准</span>
<span>车厂合作</span>
<span>海归团队</span>
</div>
<div class="li_b_r">“无人驾驶,车厂合作,扁平化管理,股票期权”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="12" data-positionid="2268991" data-salary="15k-30k" data-company="糗事百科" data-positionname="Python" data-companyid="1015" data-hrid="94398" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/2268991.html" target="_blank" data-index="12" data-lg-tj-id="8E00" data-lg-tj-no="
0113
" data-lg-tj-cid="2268991" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python</h3>
<span class="add">[<em>科技园</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="image1/M00/1C/89/Cgo8PFUmLZ-AJRIkAAAgw3YTWBk532.png">
<input type="hidden" class="hr_name" value="糗百招聘">
<input type="hidden" class="hr_position" value="招聘HR">
<input type="hidden" class="target_hr" value="94398">
<input type="hidden" class="target_position" value="2268991">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0113" data-lg-tj-cid="1015" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
<i class="pos_icon pos_icon_12"></i></div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-30k</span>
<!--<i></i>-->经验3-5年 / 本科
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/1015.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0113
" data-lg-tj-cid="1015" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">糗事百科</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
移动互联网 / A轮
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/1015.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0113
" data-lg-tj-cid="1015" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/image1/M00/00/05/CgYXBlTUWAGAY0KwAABsvAoi2t4880.png" alt="糗事百科" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>文化娱乐</span>
</div>
<div class="li_b_r">“老司机来开车”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="13" data-positionid="3834269" data-salary="15k-25k" data-company="通力互联" data-positionname="Python开发工程师" data-companyid="123777" data-hrid="4752170" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3834269.html" target="_blank" data-index="13" data-lg-tj-id="8E00" data-lg-tj-no="
0114
" data-lg-tj-cid="3834269" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python开发工程师</h3>
<span class="add">[<em>草埔</em>]</span>
</a>
<span class="format-time">1天前发布</span>
<input type="hidden" class="hr_portrait" value="">
<input type="hidden" class="hr_name" value="李龙辉">
<input type="hidden" class="hr_position" value="招聘经理">
<input type="hidden" class="target_hr" value="4752170">
<input type="hidden" class="target_position" value="3834269">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0114" data-lg-tj-cid="123777" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
</div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">15k-25k</span>
<!--<i></i>-->经验5-10年 / 大专
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/123777.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0114
" data-lg-tj-cid="123777" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">通力互联</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
电子商务,企业服务 / 不需要融资
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/123777.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0114
" data-lg-tj-cid="123777" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/i/image/M00/2E/03/CgqKkVc9jmCAI8DpAAD09YLPnBk157.png" alt="通力互联" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>云计算</span>
<span>Java</span>
<span>SVN</span>
</div>
<div class="li_b_r">“高薪高福利”</div>
</div>
</li>
<li class="con_list_item default_list" data-index="14" data-positionid="3836101" data-salary="12k-18k" data-company="金证股份" data-positionname="Python开发" data-companyid="25317" data-hrid="5715150" data-tpladword="0">
<div class="list_item_top">
<div class="position">
<div class="p_top">
<a class="position_link" href="https://www.lagou.com/jobs/3836101.html" target="_blank" data-index="14" data-lg-tj-id="8E00" data-lg-tj-no="
0115
" data-lg-tj-cid="3836101" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">
<h3 style="max-width: 180px;">Python开发</h3>
<span class="add">[<em>福田区</em>]</span>
</a>
<span class="format-time">2天前发布</span>
<input type="hidden" class="hr_portrait" value="">
<input type="hidden" class="hr_name" value="lis">
<input type="hidden" class="hr_position" value="运营总监">
<input type="hidden" class="target_hr" value="5715150">
<input type="hidden" class="target_position" value="3836101">
<div class="chat_me" data-lg-tj-id="1WI0" data-lg-tj-no="0115" data-lg-tj-cid="25317" data-lg-tj-track-code="search_code" data-lg-tj-track-type="1"></div>
<i class="pos_icon pos_icon_12"></i></div>
<div class="p_bot">
<div class="li_b_l">
<span class="money">12k-18k</span>
<!--<i></i>-->经验3-5年 / 大专
</div>
</div>
</div>
<div class="company">
<div class="company_name">
<a href="https://www.lagou.com/gongsi/25317.html" target="_blank" data-lg-tj-id="8F00" data-lg-tj-no="
0115
" data-lg-tj-cid="25317" data-lg-tj-abt="dm-csearch-useUserAllInterest|0">金证股份</a><i class="company_mark"><span>该企业已上传营业执照并通过资质验证审核</span></i>
</div>
<div class="industry">
金融 / 上市公司
</div>
</div>
<div class="com_logo">
<a href="https://www.lagou.com/gongsi/25317.html" target="_blank" data-lg-tj-id="8G00" data-lg-tj-no="
0115
" data-lg-tj-cid="25317" data-lg-tj-abt="dm-csearch-useUserAllInterest|0"><img src="//static.lagou.com/thumbnail_120x120/image1/M00/00/34/Cgo8PFTUXJOAMEEpAAAroeFn454603.jpg" alt="金证股份" width="60" height="60"></a>
</div>
</div>
<div class="list_item_bot">
<div class="li_b_l">
<span>统计</span>
<span>财经</span>
<span>Perl</span>
<span>自然语言处理</span>
</div>
<div class="li_b_r">“上市公司,大型项目,年度调薪,薪资福利好”</div>
</div>
</li>
</ul>
"""
html = etree.HTML(text) #将字符串解析为HTML文档
#序列化 HTML文档
result = etree.tostring(html)
print(result)
| 26.759594 | 249 | 0.485006 |
86f862c37f1fc4105b32506c2611e1984f6d6818
| 162 |
py
|
Python
|
sentinel/vpn/__init__.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | null | null | null |
sentinel/vpn/__init__.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | null | null | null |
sentinel/vpn/__init__.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from .helpers import disconnect_client
from .helpers import get_sessions
from .helpers import update_session_data
from .wireguard import wireguard
| 27 | 40 | 0.845679 |
86fb659f884277de17b93c2afa6b5604f63fec64
| 30,692 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/zabbix_template.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/zabbix_template.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/zabbix_template.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, sookido
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: zabbix_template
short_description: Create/update/delete/dump Zabbix template
description:
- This module allows you to create, modify, delete and dump Zabbix templates.
- Multiple templates can be created or modified at once if passing JSON or XML to module.
author:
- "sookido (@sookido)"
- "Logan Vig (@logan2211)"
- "Dusan Matejka (@D3DeFi)"
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.4"
options:
template_name:
description:
- Name of Zabbix template.
- Required when I(template_json) or I(template_xml) are not used.
- Mutually exclusive with I(template_json) and I(template_xml).
required: false
type: str
template_json:
description:
- JSON dump of templates to import.
- Multiple templates can be imported this way.
- Mutually exclusive with I(template_name) and I(template_xml).
required: false
type: json
template_xml:
description:
- XML dump of templates to import.
- Multiple templates can be imported this way.
- You are advised to pass XML structure matching the structure used by your version of Zabbix server.
- Custom XML structure can be imported as long as it is valid, but may not yield consistent idempotent
results on subsequent runs.
- Mutually exclusive with I(template_name) and I(template_json).
required: false
type: str
template_groups:
description:
- List of host groups to add template to when template is created.
- Replaces the current host groups the template belongs to if the template is already present.
- Required when creating a new template with C(state=present) and I(template_name) is used.
Not required when updating an existing template.
required: false
type: list
elements: str
link_templates:
description:
- List of template names to be linked to the template.
- Templates that are not specified and are linked to the existing template will be only unlinked and not
cleared from the template.
required: false
type: list
elements: str
clear_templates:
description:
- List of template names to be unlinked and cleared from the template.
- This option is ignored if template is being created for the first time.
required: false
type: list
elements: str
macros:
description:
- List of user macros to create for the template.
- Macros that are not specified and are present on the existing template will be replaced.
- See examples on how to pass macros.
required: false
type: list
elements: dict
suboptions:
name:
description:
- Name of the macro.
- Must be specified in {$NAME} format.
type: str
value:
description:
- Value of the macro.
type: str
dump_format:
description:
- Format to use when dumping template with C(state=dump).
- This option is deprecated and will eventually be removed in 2.14.
required: false
choices: [json, xml]
default: "json"
type: str
omit_date:
description:
- Removes the date field for the exported/dumped template
- Requires C(state=dump)
required: false
type: bool
default: false
state:
description:
- Required state of the template.
- On C(state=present) template will be created/imported or updated depending if it is already present.
- On C(state=dump) template content will get dumped into required format specified in I(dump_format).
- On C(state=absent) template will be deleted.
- The C(state=dump) is deprecated and will eventually be removed in 2.14. The M(zabbix_template_info) module should be used instead.
required: false
choices: [present, absent, dump]
default: "present"
type: str
extends_documentation_fragment:
- community.general.zabbix
'''
EXAMPLES = r'''
---
- name: Create a new Zabbix template linked to groups, macros and templates
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: ExampleHost
template_groups:
- Role
- Role2
link_templates:
- Example template1
- Example template2
macros:
- macro: '{$EXAMPLE_MACRO1}'
value: 30000
- macro: '{$EXAMPLE_MACRO2}'
value: 3
- macro: '{$EXAMPLE_MACRO3}'
value: 'Example'
state: present
- name: Unlink and clear templates from the existing Zabbix template
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: ExampleHost
clear_templates:
- Example template3
- Example template4
state: present
- name: Import Zabbix templates from JSON
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_json: "{{ lookup('file', 'zabbix_apache2.json') }}"
state: present
- name: Import Zabbix templates from XML
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_xml: "{{ lookup('file', 'zabbix_apache2.json') }}"
state: present
- name: Import Zabbix template from Ansible dict variable
zabbix_template:
login_user: username
login_password: password
server_url: http://127.0.0.1
template_json:
zabbix_export:
version: '3.2'
templates:
- name: Template for Testing
description: 'Testing template import'
template: Test Template
groups:
- name: Templates
applications:
- name: Test Application
state: present
- name: Configure macros on the existing Zabbix template
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
macros:
- macro: '{$TEST_MACRO}'
value: 'Example'
state: present
- name: Delete Zabbix template
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
state: absent
- name: Dump Zabbix template as JSON
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
omit_date: yes
state: dump
register: template_dump
- name: Dump Zabbix template as XML
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
dump_format: xml
omit_date: false
state: dump
register: template_dump
'''
RETURN = r'''
---
template_json:
description: The JSON dump of the template
returned: when state is dump and omit_date is no
type: str
sample: {
"zabbix_export":{
"date":"2017-11-29T16:37:24Z",
"templates":[{
"templates":[],
"description":"",
"httptests":[],
"screens":[],
"applications":[],
"discovery_rules":[],
"groups":[{"name":"Templates"}],
"name":"Test Template",
"items":[],
"macros":[],
"template":"test"
}],
"version":"3.2",
"groups":[{
"name":"Templates"
}]
}
}
template_xml:
description: dump of the template in XML representation
returned: when state is dump, dump_format is xml and omit_date is yes
type: str
sample: |-
<?xml version="1.0" ?>
<zabbix_export>
<version>4.2</version>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>test</template>
<name>Test Template</name>
<description/>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications/>
<items/>
<discovery_rules/>
<httptests/>
<macros/>
<templates/>
<screens/>
<tags/>
</template>
</templates>
</zabbix_export>
'''
import atexit
import json
import traceback
import xml.etree.ElementTree as ET
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
try:
from zabbix_api import ZabbixAPI, ZabbixAPIException
HAS_ZABBIX_API = True
except ImportError:
ZBX_IMP_ERR = traceback.format_exc()
HAS_ZABBIX_API = False
class Template(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" %
group_name)
return True
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if group_names is None or len(group_names) == 0:
return group_ids
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get(
{'output': 'extend',
'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get(
{'output': 'extend',
'filter': {'host': template}})
if len(template_list) < 1:
continue
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_template(self, template_name, group_ids, link_template_ids, macros):
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.template.create({'host': template_name, 'groups': group_ids, 'templates': link_template_ids,
'macros': macros})
def check_template_changed(self, template_ids, template_groups, link_templates, clear_templates,
template_macros, template_content, template_type):
"""Compares template parameters to already existing values if any are found.
template_json - JSON structures are compared as deep sorted dictionaries,
template_xml - XML structures are compared as strings, but filtered and formatted first,
If none above is used, all the other arguments are compared to their existing counterparts
retrieved from Zabbix API."""
changed = False
# Compare filtered and formatted XMLs strings for any changes. It is expected that provided
# XML has same structure as Zabbix uses (e.g. it was optimally exported via Zabbix GUI or API)
if template_content is not None and template_type == 'xml':
existing_template = self.dump_template(template_ids, template_type='xml')
if self.filter_xml_template(template_content) != self.filter_xml_template(existing_template):
changed = True
return changed
existing_template = self.dump_template(template_ids, template_type='json')
# Compare JSON objects as deep sorted python dictionaries
if template_content is not None and template_type == 'json':
parsed_template_json = self.load_json_template(template_content)
if self.diff_template(parsed_template_json, existing_template):
changed = True
return changed
# If neither template_json or template_xml were used, user provided all parameters via module options
if template_groups is not None:
existing_groups = [g['name'] for g in existing_template['zabbix_export']['groups']]
if set(template_groups) != set(existing_groups):
changed = True
if 'templates' not in existing_template['zabbix_export']['templates'][0]:
existing_template['zabbix_export']['templates'][0]['templates'] = []
# Check if any new templates would be linked or any existing would be unlinked
exist_child_templates = [t['name'] for t in existing_template['zabbix_export']['templates'][0]['templates']]
if link_templates is not None:
if set(link_templates) != set(exist_child_templates):
changed = True
else:
if set([]) != set(exist_child_templates):
changed = True
# Mark that there will be changes when at least one existing template will be unlinked
if clear_templates is not None:
for t in clear_templates:
if t in exist_child_templates:
changed = True
break
if 'macros' not in existing_template['zabbix_export']['templates'][0]:
existing_template['zabbix_export']['templates'][0]['macros'] = []
if template_macros is not None:
existing_macros = existing_template['zabbix_export']['templates'][0]['macros']
if template_macros != existing_macros:
changed = True
return changed
def update_template(self, template_ids, group_ids, link_template_ids, clear_template_ids, template_macros):
template_changes = {}
if group_ids is not None:
template_changes.update({'groups': group_ids})
if link_template_ids is not None:
template_changes.update({'templates': link_template_ids})
else:
template_changes.update({'templates': []})
if clear_template_ids is not None:
template_changes.update({'templates_clear': clear_template_ids})
if template_macros is not None:
template_changes.update({'macros': template_macros})
if template_changes:
# If we got here we know that only one template was provided via template_name
template_changes.update({'templateid': template_ids[0]})
self._zapi.template.update(template_changes)
def delete_template(self, templateids):
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.template.delete(templateids)
def ordered_json(self, obj):
# Deep sort json dicts for comparison
if isinstance(obj, dict):
return sorted((k, self.ordered_json(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(self.ordered_json(x) for x in obj)
else:
return obj
def dump_template(self, template_ids, template_type='json', omit_date=False):
if self._module.check_mode:
self._module.exit_json(changed=True)
try:
dump = self._zapi.configuration.export({'format': template_type, 'options': {'templates': template_ids}})
if template_type == 'xml':
xmlroot = ET.fromstring(dump.encode('utf-8'))
# remove date field if requested
if omit_date:
date = xmlroot.find(".date")
if date is not None:
xmlroot.remove(date)
return str(ET.tostring(xmlroot, encoding='utf-8').decode('utf-8'))
else:
return self.load_json_template(dump, omit_date=omit_date)
except ZabbixAPIException as e:
self._module.fail_json(msg='Unable to export template: %s' % e)
def diff_template(self, template_json_a, template_json_b):
# Compare 2 zabbix templates and return True if they differ.
template_json_a = self.filter_template(template_json_a)
template_json_b = self.filter_template(template_json_b)
if self.ordered_json(template_json_a) == self.ordered_json(template_json_b):
return False
return True
def filter_template(self, template_json):
# Filter the template json to contain only the keys we will update
keep_keys = set(['graphs', 'templates', 'triggers', 'value_maps'])
unwanted_keys = set(template_json['zabbix_export']) - keep_keys
for unwanted_key in unwanted_keys:
del template_json['zabbix_export'][unwanted_key]
# Versions older than 2.4 do not support description field within template
desc_not_supported = False
if LooseVersion(self._zapi.api_version()).version[:2] < LooseVersion('2.4').version:
desc_not_supported = True
# Filter empty attributes from template object to allow accurate comparison
for template in template_json['zabbix_export']['templates']:
for key in list(template.keys()):
if not template[key] or (key == 'description' and desc_not_supported):
template.pop(key)
return template_json
def filter_xml_template(self, template_xml):
"""Filters out keys from XML template that may wary between exports (e.g date or version) and
keys that are not imported via this module.
It is advised that provided XML template exactly matches XML structure used by Zabbix"""
# Strip last new line and convert string to ElementTree
parsed_xml_root = self.load_xml_template(template_xml.strip())
keep_keys = ['graphs', 'templates', 'triggers', 'value_maps']
# Remove unwanted XML nodes
for node in list(parsed_xml_root):
if node.tag not in keep_keys:
parsed_xml_root.remove(node)
# Filter empty attributes from template objects to allow accurate comparison
for template in list(parsed_xml_root.find('templates')):
for element in list(template):
if element.text is None and len(list(element)) == 0:
template.remove(element)
# Filter new lines and indentation
xml_root_text = list(line.strip() for line in ET.tostring(parsed_xml_root, encoding='utf8', method='xml').decode().split('\n'))
return ''.join(xml_root_text)
def load_json_template(self, template_json, omit_date=False):
try:
jsondoc = json.loads(template_json)
if omit_date and 'date' in jsondoc['zabbix_export']:
del jsondoc['zabbix_export']['date']
return jsondoc
except ValueError as e:
self._module.fail_json(msg='Invalid JSON provided', details=to_native(e), exception=traceback.format_exc())
def load_xml_template(self, template_xml):
try:
return ET.fromstring(template_xml)
except ET.ParseError as e:
self._module.fail_json(msg='Invalid XML provided', details=to_native(e), exception=traceback.format_exc())
def import_template(self, template_content, template_type='json'):
# rules schema latest version
update_rules = {
'applications': {
'createMissing': True,
'deleteMissing': True
},
'discoveryRules': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'graphs': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'groups': {
'createMissing': True
},
'httptests': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'items': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'templates': {
'createMissing': True,
'updateExisting': True
},
'templateLinkage': {
'createMissing': True
},
'templateScreens': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'triggers': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'valueMaps': {
'createMissing': True,
'updateExisting': True
}
}
try:
# old api version support here
api_version = self._zapi.api_version()
# updateExisting for application removed from zabbix api after 3.2
if LooseVersion(api_version).version[:2] <= LooseVersion('3.2').version:
update_rules['applications']['updateExisting'] = True
# templateLinkage.deleteMissing only available in 4.0 branch higher .16 and higher 4.4.4
# it's not available in 4.2 branches or lower 4.0.16
if LooseVersion(api_version).version[:2] == LooseVersion('4.0').version and \
LooseVersion(api_version).version[:3] >= LooseVersion('4.0.16').version:
update_rules['templateLinkage']['deleteMissing'] = True
if LooseVersion(api_version).version[:3] >= LooseVersion('4.4.4').version:
update_rules['templateLinkage']['deleteMissing'] = True
import_data = {'format': template_type, 'source': template_content, 'rules': update_rules}
self._zapi.configuration.import_(import_data)
except ZabbixAPIException as e:
self._module.fail_json(msg='Unable to import template', details=to_native(e),
exception=traceback.format_exc())
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
template_name=dict(type='str', required=False),
template_json=dict(type='json', required=False),
template_xml=dict(type='str', required=False),
template_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
clear_templates=dict(type='list', required=False),
macros=dict(type='list', required=False),
omit_date=dict(type='bool', required=False, default=False),
dump_format=dict(type='str', required=False, default='json', choices=['json', 'xml']),
state=dict(type='str', default="present", choices=['present', 'absent', 'dump']),
timeout=dict(type='int', default=10)
),
required_one_of=[
['template_name', 'template_json', 'template_xml']
],
mutually_exclusive=[
['template_name', 'template_json', 'template_xml']
],
required_if=[
['state', 'absent', ['template_name']],
['state', 'dump', ['template_name']]
],
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
template_name = module.params['template_name']
template_json = module.params['template_json']
template_xml = module.params['template_xml']
template_groups = module.params['template_groups']
link_templates = module.params['link_templates']
clear_templates = module.params['clear_templates']
template_macros = module.params['macros']
omit_date = module.params['omit_date']
dump_format = module.params['dump_format']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
atexit.register(zbx.logout)
except ZabbixAPIException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
template = Template(module, zbx)
# Identify template names for IDs retrieval
# Template names are expected to reside in ['zabbix_export']['templates'][*]['template'] for both data types
template_content, template_type = None, None
if template_json is not None:
template_type = 'json'
template_content = template_json
json_parsed = template.load_json_template(template_content)
template_names = list(t['template'] for t in json_parsed['zabbix_export']['templates'])
elif template_xml is not None:
template_type = 'xml'
template_content = template_xml
xml_parsed = template.load_xml_template(template_content)
template_names = list(t.find('template').text for t in list(xml_parsed.find('templates')))
else:
template_names = [template_name]
template_ids = template.get_template_ids(template_names)
if state == "absent":
if not template_ids:
module.exit_json(changed=False, msg="Template not found. No changed: %s" % template_name)
template.delete_template(template_ids)
module.exit_json(changed=True, result="Successfully deleted template %s" % template_name)
elif state == "dump":
module.deprecate("The 'dump' state has been deprecated and will be removed, use 'zabbix_template_info' module instead.", version='2.14')
if not template_ids:
module.fail_json(msg='Template not found: %s' % template_name)
if dump_format == 'json':
module.exit_json(changed=False, template_json=template.dump_template(template_ids, template_type='json', omit_date=omit_date))
elif dump_format == 'xml':
module.exit_json(changed=False, template_xml=template.dump_template(template_ids, template_type='xml', omit_date=omit_date))
elif state == "present":
# Load all subelements for template that were provided by user
group_ids = None
if template_groups is not None:
group_ids = template.get_group_ids_by_group_names(template_groups)
link_template_ids = None
if link_templates is not None:
link_template_ids = template.get_template_ids(link_templates)
clear_template_ids = None
if clear_templates is not None:
clear_template_ids = template.get_template_ids(clear_templates)
if template_macros is not None:
# Zabbix configuration.export does not differentiate python types (numbers are returned as strings)
for macroitem in template_macros:
for key in macroitem:
macroitem[key] = str(macroitem[key])
if not template_ids:
# Assume new templates are being added when no ID's were found
if template_content is not None:
template.import_template(template_content, template_type)
module.exit_json(changed=True, result="Template import successful")
else:
if group_ids is None:
module.fail_json(msg='template_groups are required when creating a new Zabbix template')
template.add_template(template_name, group_ids, link_template_ids, template_macros)
module.exit_json(changed=True, result="Successfully added template: %s" % template_name)
else:
changed = template.check_template_changed(template_ids, template_groups, link_templates, clear_templates,
template_macros, template_content, template_type)
if module.check_mode:
module.exit_json(changed=changed)
if changed:
if template_type is not None:
template.import_template(template_content, template_type)
else:
template.update_template(template_ids, group_ids, link_template_ids, clear_template_ids,
template_macros)
module.exit_json(changed=changed, result="Template successfully updated")
if __name__ == '__main__':
main()
| 38.557789 | 144 | 0.615893 |
9c18f8315105f2144d7afb8563f73d992aff655e
| 2,648 |
py
|
Python
|
tools/pythonpkg/tests/fast/arrow/test_timestamps.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 2,816 |
2018-06-26T18:52:52.000Z
|
2021-04-06T10:39:15.000Z
|
tools/pythonpkg/tests/fast/arrow/test_timestamps.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310 |
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
tools/pythonpkg/tests/fast/arrow/test_timestamps.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270 |
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
import duckdb
import os
import datetime
import pytest
try:
import pyarrow as pa
import pandas as pd
can_run = True
except:
can_run = False
class TestArrowTimestamps(object):
def test_timestamp_types(self, duckdb_cursor):
if not can_run:
return
data = (pa.array([datetime.datetime.now()], type=pa.timestamp('ns')),pa.array([datetime.datetime.now()], type=pa.timestamp('us')),pa.array([datetime.datetime.now()], pa.timestamp('ms')),pa.array([datetime.datetime.now()], pa.timestamp('s')))
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2],data[3]],['a','b','c','d'])
rel = duckdb.from_arrow_table(arrow_table).arrow()
assert (rel['a'] == arrow_table['a'])
assert (rel['b'] == arrow_table['b'])
assert (rel['c'] == arrow_table['c'])
assert (rel['d'] == arrow_table['d'])
def test_timestamp_nulls(self, duckdb_cursor):
if not can_run:
return
data = (pa.array([None], type=pa.timestamp('ns')),pa.array([None], type=pa.timestamp('us')),pa.array([None], pa.timestamp('ms')),pa.array([None], pa.timestamp('s')))
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2],data[3]],['a','b','c','d'])
rel = duckdb.from_arrow_table(arrow_table).arrow()
assert (rel['a'] == arrow_table['a'])
assert (rel['b'] == arrow_table['b'])
assert (rel['c'] == arrow_table['c'])
assert (rel['d'] == arrow_table['d'])
def test_timestamp_overflow(self, duckdb_cursor):
if not can_run:
return
data = (pa.array([9223372036854775807], pa.timestamp('s')),pa.array([9223372036854775807], pa.timestamp('ms')),pa.array([9223372036854775807], pa.timestamp('us')))
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2]],['a','b','c'])
arrow_from_duck = duckdb.from_arrow_table(arrow_table).arrow()
assert (arrow_from_duck['a'] == arrow_table['a'])
assert (arrow_from_duck['b'] == arrow_table['b'])
assert (arrow_from_duck['c'] == arrow_table['c'])
with pytest.raises(Exception):
duck_rel = duckdb.from_arrow_table(arrow_table)
res = duck_rel.project('a::TIMESTAMP_US')
res.fetchone()
with pytest.raises(Exception):
duck_rel = duckdb.from_arrow_table(arrow_table)
res = duck_rel.project('b::TIMESTAMP_US')
res.fetchone()
with pytest.raises(Exception):
duck_rel = duckdb.from_arrow_table(arrow_table)
res = duck_rel.project('c::TIMESTAMP_NS')
res.fetchone()
| 43.409836 | 249 | 0.604985 |
9c343a573df4394fcd53c5e1037b636600240360
| 13,378 |
py
|
Python
|
CodeAnalyzing/resnet.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | null | null | null |
CodeAnalyzing/resnet.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | null | null | null |
CodeAnalyzing/resnet.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | 1 |
2019-11-29T12:28:33.000Z
|
2019-11-29T12:28:33.000Z
|
#!/usr/bin/env python
#taken from https://github.com/raghakot/keras-resnet/blob/master/resnet.py
from __future__ import division
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.core import Lambda
from keras.layers.merge import (dot, concatenate)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
NUM_EMBEDDING = 512 #256 #512 #1024 #256 #1024 #256
TOP_HIDDEN = 4 #1 #4
NORMALIZATION_ON = False #True #False #True
def _bn_relu(input):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
def _bn_relu_for_dense(input):
norm = BatchNormalization(axis=1)(input)
return Activation('relu')(norm)
def _top_network(input):
raw_result = _bn_relu_for_dense(input)
for _ in xrange(TOP_HIDDEN):
raw_result = Dense(units=NUM_EMBEDDING, kernel_initializer='he_normal')(raw_result)
raw_result = _bn_relu_for_dense(raw_result)
output = Dense(units=2, activation='softmax', kernel_initializer='he_normal')(raw_result)
return output
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, is_classification):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'tf':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
# Load function from str if needed.
block_fn = _get_block(block_fn)
input = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
last_activation = None
if is_classification:
last_activation = "softmax"
dense = Dense(units=num_outputs, kernel_initializer="he_normal",
activation=last_activation)(flatten1)
model = Model(inputs=input, outputs=dense)
return model
@staticmethod
def build_resnet_18(input_shape, num_outputs, is_classification=True):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2], is_classification)
@staticmethod
def build_resnet_34(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])
@staticmethod
def build_resnet_50(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])
@staticmethod
def build_resnet_101(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])
@staticmethod
def build_resnet_152(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
@staticmethod
def build_top_network(edge_model):
number_of_top_layers = 3 + TOP_HIDDEN * 3
input = Input(shape=(2 * NUM_EMBEDDING,))
output = edge_model.layers[-number_of_top_layers](input) #_top_network(input)
for index in xrange(-number_of_top_layers + 1, 0):
output = edge_model.layers[index](output)
return Model(inputs=input, outputs=output)
@staticmethod
def build_bottom_network(edge_model, input_shape):
channels, height, width = input_shape
input = Input(shape=(height, width, channels))
branch = edge_model.layers[3]
output = branch(input)
if NORMALIZATION_ON:
output = Lambda(lambda x: K.l2_normalize(x, axis=1))(output)
return Model(inputs=input, outputs=output)
@staticmethod
def build_siamese_resnet_18(input_shape, num_outputs):
channels, height, width = input_shape
branch_channels = 3 #channels / 2
branch_input_shape = (branch_channels, height, width)
branch = ResnetBuilder.build_resnet_18(branch_input_shape, NUM_EMBEDDING, False)
input = Input(shape=(height, width, channels))
first_branch = branch(Lambda(lambda x: x[:, :, :, :3])(input))
second_branch = branch(Lambda(lambda x: x[:, :, :, 3:])(input))
if NORMALIZATION_ON:
first_branch = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_branch)
second_branch = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_branch)
raw_result = concatenate([first_branch, second_branch])
output = _top_network(raw_result)
# raw_result = dot([first_branch, second_branch], axes=1)
# result = Lambda(lambda x: (K.clip(x, 0.5, 1) - 0.5) * 2.0)(raw_result)
# negated_result = Lambda(lambda x: 1 - x)(result)
# output = concatenate([negated_result, result])
return Model(inputs=input, outputs=output)
@staticmethod
def build_pixel_comparison_network(input_shape):
channels, height, width = input_shape
input = Input(shape=(height, width, channels))
first = Flatten()(Lambda(lambda x: x[:, :, :, :1])(input))
second = Flatten()(Lambda(lambda x: x[:, :, :, 1:])(input))
# second = Lambda(lambda x: -x)(second)
# difference = add([first, second])
# raw_result = Lambda(lambda x: K.mean(K.abs(x), axis=1, keepdims=True))(difference)
# prob_zero = Lambda(lambda x: x / 255.0)(raw_result)
# prob_one = Lambda(lambda x: 1.0 - x)(prob_zero)
prob_one = dot([first, second], axes=1, normalize=True)
prob_zero = Lambda(lambda x: 1.0 - x)(prob_one)
output = concatenate([prob_zero, prob_one])
return Model(inputs=input, outputs=output)
| 20.362253 | 109 | 0.627897 |
92e714dacc8acd7270c7578f9487d0faf292923f
| 2,324 |
py
|
Python
|
dataset/utils/data_deal.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
dataset/utils/data_deal.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
dataset/utils/data_deal.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
"""
@Description: 加载数据,保存数据
@Author: Bao Wenjie
@Email: [email protected]
@Date: 2020/10/27
"""
import csv
import os
import json
class DataDeal:
def __init__(self):
pass
@classmethod
def load_data(cls, path, delimiter='\t', encoding="utf-8-sig", quotechar=None):
""" 从文件读取数据\n
@param:\n
:path: 路径\n
:cls: 分隔符(一行内)\n
:encoding: 编码方式\n
:quotechar: 引用符\n
@return:\n
:lines: list(),读取的数据\n
"""
with open(path, "r", encoding=encoding) as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def save_data(cls, data, path, head=None, encoding='utf-8', dialect='tsv'):
""" 保存多列数据到文件 [[str, ...], [str,...]]
@param:\n
:data: 数据
:head: 数据头
:path: 保存路径
:cls: 分隔符
:encoding: 编码方式
"""
with open(path, mode='w+', encoding=encoding) as file:
writer = csv.writer(file, dialect=dialect)
if head is not None:
writer.writerow(head)
writer.writerows(data)
return
@classmethod
def save_single(cls, data, path, encoding='utf-8'):
""" 保存单列数据 [str]
@param:
:data: 数据
:path: 保存路径
:encoding: 编码方式
"""
if not os.path.exists(os.path.dirname(path)):
raise Exception("该路径不存在")
with open(path, mode='w+', encoding=encoding) as file:
for line in data:
file.write(line)
file.write('\n')
print('-'*8, '写入成功', '-'*8)
return
@staticmethod
def save_dict_json(path, dict_, encoding='utf-8'):
""" 保存dict到json文件
@param:
:path: (str) 保存路径
:dict_: (dict) 字典
"""
# if not os.path.exists(os.path.dirname(path)):
# raise Exception("路径不存在")
with open(path, 'w+', encoding=encoding) as json_file:
json.dump(dict_, json_file, indent=2)
if __name__ == "__main__":
# data = DataDeal.load_data(path='dataset/CONLL2003/test.txt', cls=' ')
# print(data[:9])
data = {2:3, 4:5}
DataDeal.save_dict_json(dict_=data, path='a.out')
| 27.666667 | 83 | 0.527108 |
1363bbef712d0e4f7a4dec0992a594a5b0be200d
| 252 |
py
|
Python
|
Utils/py/naoth/setup.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/naoth/setup.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/naoth/setup.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from setuptools import setup
setup(name='naoth',
version='0.2',
description='Python Utils for the NaoTH toolchain',
packages=["naoth"],
install_requires=[
'protobuf',
],
zip_safe=False)
| 19.384615 | 57 | 0.603175 |
bc57b27a0d51013b4cd0c8401a627b982c16ee53
| 1,173 |
py
|
Python
|
src/test/tests/simulation/domainbounds.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/simulation/domainbounds.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/simulation/domainbounds.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: domainbounds.py
#
# Tests: libsim - connecting to simulation and retrieving data from it.
# mesh - 3D rectilinear mesh
#
# Programmer: Kathleen Biagas
# Date: June 17, 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
# Create our simulation object.
sim = TestSimulation("domainbounds", "domainbounds.sim2")
# Test that we can start and connect to the simulation.
started, connected = TestSimStartAndConnect("domainbounds00", sim)
# Perform our tests.
if connected:
# Make sure the metadata is right.
TestSimMetaData("domainbounds01", sim.metadata())
AddPlot("Subset", "Domains")
DrawPlots()
v = GetView3D()
v.viewNormal = (0.672727, 0.569817, 0.471961)
v.viewUp = (-0.252634, 0.776445, -0.57733)
SetView3D(v)
Test("domainbounds02")
DeleteAllPlots()
AddPlot("Pseudocolor", "zonal")
DrawPlots()
Test("domainbounds03")
DeleteAllPlots()
# Close down the simulation.
if started:
sim.endsim()
Exit()
| 24.957447 | 78 | 0.57289 |
4c5d24570ac7916f7fdc26625fc48412aa89e4a5
| 18,542 |
py
|
Python
|
MAIN/STM32F405_C/NORMAL/V38/DataBase.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405_C/NORMAL/V38/DataBase.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
MAIN/STM32F405_C/NORMAL/V38/DataBase.py
|
ozturkahmetcevdet/VSenst
|
07c068fefcbd66ae4d8ec0480b4da10d6b5c7410
|
[
"MIT"
] | null | null | null |
import gc
from micropython import const
from PXSensor import PxHub, key, T
import os
import peripheral
import binascii
import ujson as js
from machine import RTC
DEBUG = False
OPEN_SCENE_SHOW_TIME = const(5000)
CLOSING_TIME = const(10000)
CLOSE_SCENE_SHOW_TIME = const(1000)
class TextColour:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
HUBDB_JSON_FILE_NAME = "HUB_DB.json"
COORDINATEDB_JSON_FILE_NAME = "jf/COORDINATE_DB.json"
INSTRUCTIONDB_JSON_FILE_NAME = "jf/INSTRUCTION_DB.json"
LANGUAGEDB_JSON_FILE_NAME = "jf/LANGUAGE_DB.json"
MENUDB_JSON_FILE_NAME = "jf/MENU_DB.json"
NOTIFICATIONDB_JSON_FILE_NAME = "jf/NOTIFICATION_DB.json"
class DataBase(RTC):
def __init__(self):
self.HubList = []
self.HubJson = {}
self.HubJsonIsExist = False
self.CoordinateJson = {}
self.CoordinateJsonIsExist = False
self.CoordinateJsonRefresh = False
self.InstructionJson = {}
self.InstructionJsonIsExist = False
self.InstructionJsonRefresh = False
self.LanguageJson = {}
self.LanguageJsonIsExist = False
self.LanguageJsonRefresh = False
self.MenuJson = {}
self.MenuJsonIsExist = False
self.MwnuJsonRefresh = False
self.NotificationJson = {}
self.NotificationJsonIsExist = False
self.NotificationJsonRefresh = False
self.init()
self.Setup()
def Setup(self):
self.CreateAndCheckJsonFiles()
self.ImportRawDataFromCoordinateJson()
self.ImportRawDataFromInstructionJson()
self.ImportRawDataFromHubJson()
self.UnzipRawData()
def Process(self, fullData=None):
if fullData != None:
for data in fullData:
if self.HubList != []:
for item in self.HubList:
if item.Process(data=data):
self.InstructionJsonRefresh = True
break
if self.InstructionJsonRefresh:
self.InstructionJson['DateTime'] = self.datetime()
if self.HubList != []:
self.InstructionJson['Counter'] = sum([item.GetPassangerCount() for item in self.HubList])
if DEBUG:
print("\f***RF Data:\t{0}{1}{2}".format(TextColour.BOLD + TextColour.OKCYAN, [binascii.hexlify(d, ',') for d in fullData], TextColour.ENDC))
print("***Seat Count:\t{0}{1:2}{2}".format(TextColour.BOLD + TextColour.OKCYAN, self.InstructionJson['Counter'], TextColour.ENDC))
print("***CRC Err:\t{0}{1}{2}\n\r".format(TextColour.BOLD + TextColour.FAIL, sum([crc.features[key.hub.crcErrorCount] for crc in self.HubList]), TextColour.ENDC))
if self.HubList != []:
for item in self.HubList:
debugLog = ""
if item.features[key.hub.px1]:
debugLog += "{tc0}{0:2}-{tc1} ID:{id_c1}{1:3}{id_c2}, PxV:{re_c1}{2:5}{re_c2}, PxBV:{rv_c1}{3:5}{rv_c2}, Loop:{lo_c1}{4:3}{lo_c2}, Px Cable:{se_c1}{5}{se_c2}, Seatbelt:{be_c1}{6}{be_c2}, Count:{co_c1}{7:4}{co_c2} --> RF Count:{rf_c1}{8:6}{rf_c2}, RSSI:{rs_c1}{9:4}dBm{rs_c2}, CRCErr:{cc_c1}{10:6}{cc_c2}, Battery:{bt_c1}%{11:3}{bt_c2}" \
.format(item.features[key.hub.px1][key.px.number], binascii.hexlify(item.features[key.hub.idNumber], '-'), item.features[key.hub.px1][key.px.currentValue], item.features[key.hub.px1][key.px.baseLine], item.features[key.hub.loopCount], bool(item.features[key.hub.px1][key.px.cableStatus]), bool(item.features[key.hub.px1][key.px.beltStatus]), item.features[key.hub.px1][key.px.seatCount], item.features[key.hub.dataCount], item.features[key.hub.rssi], item.features[key.hub.crcErrorCount], item.features[key.hub.battery] \
, tc0 = TextColour.BOLD + TextColour.OKCYAN , tc1 = TextColour.ENDC \
, id_c1 = TextColour.OKBLUE , id_c2 = TextColour.ENDC \
, re_c1 = TextColour.OKGREEN if item.features[key.hub.px1][key.px.seatStatus] else TextColour.WARNING , re_c2 = TextColour.ENDC \
, rv_c1 = TextColour.BOLD + TextColour.OKCYAN , rv_c2 = TextColour.ENDC \
, lo_c1 = TextColour.BOLD + TextColour.OKGREEN , lo_c2 = TextColour.ENDC \
, se_c1 = TextColour.OKGREEN if item.features[key.hub.px1][key.px.cableStatus] else TextColour.FAIL , se_c2 = TextColour.ENDC \
, be_c1 = TextColour.OKGREEN if item.features[key.hub.px1][key.px.beltStatus] else TextColour.WARNING , be_c2 = TextColour.ENDC \
, co_c1 = TextColour.HEADER , co_c2 = TextColour.ENDC \
, rf_c1 = TextColour.OKGREEN if item.features[key.hub.dataCount] > 0 else TextColour.FAIL , rf_c2 = TextColour.ENDC \
, rs_c1 = TextColour.HEADER , rs_c2 = TextColour.ENDC \
, cc_c1 = TextColour.FAIL , cc_c2 = TextColour.ENDC \
, bt_c1 = TextColour.OKGREEN if item.features[key.hub.battery] > 20 else TextColour.FAIL , bt_c2 = TextColour.ENDC)
if item.features[key.hub.px2]:
debugLog += "\n\r{tc0}{0:2}-{tc1} ID:{id_c1}{1:3}{id_c2}, PxV:{re_c1}{2:5}{re_c2}, PxBV:{rv_c1}{3:5}{rv_c2}, Loop:{lo_c1}{4:3}{lo_c2}, Px Cable:{se_c1}{5}{se_c2}, Seatbelt:{be_c1}{6}{be_c2}, Count:{co_c1}{7:4}{co_c2} --> RF Count:{rf_c1}{8:6}{rf_c2}, RSSI:{rs_c1}{9:4}dBm{rs_c2}, CRCErr:{cc_c1}{10:6}{cc_c2}, Battery:{bt_c1}%{11:3}{bt_c2}" \
.format(item.features[key.hub.px2][key.px.number], binascii.hexlify(item.features[key.hub.idNumber], '-'), item.features[key.hub.px2][key.px.currentValue], item.features[key.hub.px2][key.px.baseLine], item.features[key.hub.loopCount], bool(item.features[key.hub.px2][key.px.cableStatus]), bool(item.features[key.hub.px2][key.px.beltStatus]), item.features[key.hub.px2][key.px.seatCount], item.features[key.hub.dataCount], item.features[key.hub.rssi], item.features[key.hub.crcErrorCount], item.features[key.hub.battery] \
, tc0 = TextColour.BOLD + TextColour.OKCYAN , tc1 = TextColour.ENDC \
, id_c1 = TextColour.OKBLUE , id_c2 = TextColour.ENDC \
, re_c1 = TextColour.OKGREEN if item.features[key.hub.px2][key.px.seatStatus] else TextColour.WARNING , re_c2 = TextColour.ENDC \
, rv_c1 = TextColour.BOLD + TextColour.OKCYAN , rv_c2 = TextColour.ENDC \
, lo_c1 = TextColour.BOLD + TextColour.OKGREEN , lo_c2 = TextColour.ENDC \
, se_c1 = TextColour.OKGREEN if item.features[key.hub.px2][key.px.cableStatus] else TextColour.FAIL , se_c2 = TextColour.ENDC \
, be_c1 = TextColour.OKGREEN if item.features[key.hub.px2][key.px.beltStatus] else TextColour.WARNING , be_c2 = TextColour.ENDC \
, co_c1 = TextColour.HEADER , co_c2 = TextColour.ENDC \
, rf_c1 = TextColour.OKGREEN if item.features[key.hub.dataCount] > 0 else TextColour.FAIL , rf_c2 = TextColour.ENDC \
, rs_c1 = TextColour.HEADER , rs_c2 = TextColour.ENDC \
, cc_c1 = TextColour.FAIL , cc_c2 = TextColour.ENDC \
, bt_c1 = TextColour.OKGREEN if item.features[key.hub.battery] > 20 else TextColour.FAIL , bt_c2 = TextColour.ENDC)
print(debugLog)
del debugLog
else:
print("{0}{1}{2}".format(TextColour.WARNING, "There is no data available !!", TextColour.ENDC))
print("\n\r{0}".format(self.free()))
else:
print("\f***RF Data:\t{0}{1}{2}".format(TextColour.BOLD + TextColour.OKCYAN, [binascii.hexlify(d, ',') for d in fullData], TextColour.ENDC))
print("***Seat Count:\t{0}{1:2}{2}".format(TextColour.BOLD + TextColour.OKCYAN, self.InstructionJson['Counter'], TextColour.ENDC))
print("***CRC Err:\t{0}{1}{2}".format(TextColour.BOLD + TextColour.FAIL, sum([crc.features[key.hub.crcErrorCount] for crc in self.HubList]), TextColour.ENDC))
print("*Warning:\tDebug mode is OFF\n\r*Execute:\tUse [-d] command to toggle Debug mode.\n\r*MEMUsage:\t{0}\n\r*Time:\t\t{1}".format(self.free(), self.datetime()))
#pass
return None
def CreateAndCheckJsonFiles(self):
try:
open(HUBDB_JSON_FILE_NAME, 'r')
self.HubJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(HUBDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(COORDINATEDB_JSON_FILE_NAME, 'r')
self.CoordinateJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(COORDINATEDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(INSTRUCTIONDB_JSON_FILE_NAME, 'r')
self.InstructionJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(INSTRUCTIONDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(LANGUAGEDB_JSON_FILE_NAME, 'r')
self.LanguageJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(LANGUAGEDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(MENUDB_JSON_FILE_NAME, 'r')
self.MenuJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(MENUDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
try:
open(NOTIFICATIONDB_JSON_FILE_NAME, 'r')
self.NotificationJsonIsExist = True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not open file. ---{}".format(NOTIFICATIONDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def FlushRawDataToJson(self):
self.HubJson = {}
hubCounter = 0
if self.HubList != []:
for hub in self.HubList:
self.HubJson[hubCounter] = hub.features
hubCounter +=1
with open(HUBDB_JSON_FILE_NAME, 'w') as jf:
js.dump(self.HubJson, jf, separators=(',', ':'))
jf.close()
self.ClearUnnecessaryFiles()
def ImportRawDataFromCoordinateJson(self):
if self.CoordinateJsonIsExist :
try:
with open(COORDINATEDB_JSON_FILE_NAME, 'r') as jf:
self.CoordinateJson = js.load(jf)
jf.close()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not read file. ---{}".format(COORDINATEDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def ImportRawDataFromInstructionJson(self):
if self.InstructionJsonIsExist :
try:
with open(INSTRUCTIONDB_JSON_FILE_NAME, 'r') as jf:
self.InstructionJson = js.load(jf)
jf.close()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not read file. ---{}".format(INSTRUCTIONDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def ImportRawDataFromHubJson(self):
if self.HubJsonIsExist:
try:
with open(HUBDB_JSON_FILE_NAME, 'r') as jf:
self.HubJson = js.load(jf)
jf.close()
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not read file. ---{}".format(HUBDB_JSON_FILE_NAME))
except:
print("Unexpected error!")
raise
def GetCoordinateJsonAsString(self):
return js.dumps(self.CoordinateJson, separators=(',', ':'))
def GetInstructionJsonAsString(self):
return js.dumps(self.InstructionJson, separators=(',', ':'))
def UnzipRawData(self):
if self.HubJson != {}:
self.HubList = []
for key in self.HubJson:
self.CreateHubObject(json=self.HubJson[key])
self.InstructionJsonRefresh = True
self.ClearUnnecessaryFiles()
def DefineHubObject(self, fullData=None):
if fullData != None:
sNo = -1
for data in fullData:
if bool(data[20] & 0x01):
checkFlag = not (bool((data[2] >> 0) & 0x01) or bool((data[2] >> 1) & 0x01))
if self.HubList != []:
for item in self.HubList:
if item.features[key.hub.idNumber] == data[:2]:
item.Process(data=data)
checkFlag = True
if item.features[key.hub.px1]:
sNo = item.features[key.hub.px1][key.px.number] if sNo < item.features[key.hub.px1][key.px.number] else sNo
if item.features[key.hub.px2]:
sNo = item.features[key.hub.px2][key.px.number] if sNo < item.features[key.hub.px2][key.px.number] else sNo
if checkFlag is False:
self.InstructionJsonRefresh = True
if int((data[9] << 8) | data[10]) > T.D or int((data[17] << 8) | data[18]) > T.D:
self.CreateHubObject(data=data, sNo=sNo+1)
def CreateHubObject(self, json=dict(), data=None, sNo=0):
if json != dict():
self.HubList.append(PxHub(json=json, dateTime=self.datetime()))
elif data != None:
#for _ in range(18):
self.HubList.append(PxHub(data=data, sNo=sNo, dateTime=self.datetime()))
if self.InstructionJson != {}:
hubCounter = 0
for item in self.HubList:
self.InstructionJson['PxHubs'][hubCounter] = item.features
hubCounter += 1
peripheral.buzzerObject(replay=1, onTime=25)
def ClearUnnecessaryFiles(self):
self.HubJson = {}
gc.collect()
def ClearAllData(self):
for item in self.HubList:
del item
self.HubList = []
self.InstructionJson['PxHubs'] = {}
self.ClearUnnecessaryFiles()
self.RemoveFile(HUBDB_JSON_FILE_NAME)
gc.collect()
def RemoveFile(self, fileName=None):
if fileName:
try:
os.remove(fileName)
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not remove file. ---{}".format(fileName))
except:
print("Unexpected error!")
raise
def free(self):
gc.collect()
F = gc.mem_free()
A = gc.mem_alloc()
T = F+A
P = '{0:.2f}%'.format(F/T*100)
return ('[RAM] -> Total:{0} Free:{1} ({2})'.format(T,F,P))
| 55.849398 | 559 | 0.494876 |
4c760f5d710417b40e6a1fb33ff995e7604f586d
| 6,161 |
py
|
Python
|
lego/test.py
|
tipptop3d/Horst-oder-Klaus
|
2afda1a64b5ccace74ebc7b806a5e5501c0f12bb
|
[
"MIT"
] | null | null | null |
lego/test.py
|
tipptop3d/Horst-oder-Klaus
|
2afda1a64b5ccace74ebc7b806a5e5501c0f12bb
|
[
"MIT"
] | null | null | null |
lego/test.py
|
tipptop3d/Horst-oder-Klaus
|
2afda1a64b5ccace74ebc7b806a5e5501c0f12bb
|
[
"MIT"
] | null | null | null |
"""
Test File
"""
import asyncio
import math
import matplotlib.pyplot as plt
import time
from calculus.expression import Expression
# Dummies
class Port:
A = 'A'
B = 'B'
C = 'C'
class Motor:
def __init__(self, port, gears=None) -> None:
self.port = port
self.gears = gears
def run_angle(self, speed, angle, wait=True):
# print('{}: Running to angle {} at speed {}'.format(self.port, speed, angle))
pass
def run(self, speed):
# print('{}: Running at speed {}'.format(self.port, speed))
pass
def hold(self):
pass
def wait(ms):
time.sleep(ms)
DRAW_ASPECT_RATIO = 10/6 # x:y
# Motors
motor_x = Motor(Port.A, gears=[20, 16])
motor_y = Motor(Port.B, gears=[16])
motor_z = Motor(Port.C)
# in mm
X_LENGTH = 127
X_LEFT_BOUND = -X_LENGTH/2
X_RIGHT_BOUND = X_LENGTH/2
Y_LENGTH = 91
Y_UPPER_BOUND = Y_LENGTH/2
Y_LOWER_BOUND = -Y_LENGTH/2
# in °/s
X_MAX_ANGLE_SPEED = 360
Y_MAX_ANGLE_SPEED = 720
# in °
ANGLE_TO_LIFT = 90
# Distance driven equals the amount of degrees multiplied by the angle ratio
# distance = angle * ANGLE_RATIO
# Angle needed for given distance is distance divided by angle ratio
# angle = distance / ANGLE_RATIO
# Same for speeds (replace distance with distance/time or angle with angle/time)
SIXTEEN_TEETH_GEAR_DIAMETER = 17.5 # mm
CIRCUMFERENCE = SIXTEEN_TEETH_GEAR_DIAMETER * math.pi
ANGLE_RATIO = CIRCUMFERENCE * (1/360)
PRECISION = 1000 # 100 intervals
# somehow the precision alters the speed, so maybe resulting in bugs
# if these bugs influences influence the real drawings
# fuck new algorithm is needed
def find_start(f):
x = X_LEFT_BOUND
while x < X_RIGHT_BOUND:
x += X_LENGTH / PRECISION
y = f.evaluate(x)
if Y_LOWER_BOUND < y < Y_UPPER_BOUND:
return x, y
class Plotter:
def __init__(self, current_x=X_LEFT_BOUND, current_y=Y_UPPER_BOUND, lifted=False):
self._current_x = current_x
self._current_y = current_y
self.lifted = lifted
@property
def current_x(self):
return self._current_x
@current_x.setter
def current_x(self, value):
# print('x', value)
self._current_x = value
@property
def current_y(self):
return self._current_y
@current_y.setter
def current_y(self, value):
# print('y', value)
self._current_y = value
@property
def coords(self):
return self.current_x, self.current_y
def lift(self, wait=True):
if self.lifted:
return
motor_z.run_angle(360, ANGLE_TO_LIFT, wait=wait)
self.lifted = True
def lower(self, wait=True):
if not self.lifted:
return
motor_z.run_angle(360, -ANGLE_TO_LIFT, wait=wait)
self.lifted = False
def move_to(self, coords, wait=True, x_wait=False):
x, y = coords
# same position, no movement needed
if x == self.current_x and y == self.current_y:
return
if not (X_LEFT_BOUND <= x <= X_RIGHT_BOUND or Y_LOWER_BOUND <= y <= Y_UPPER_BOUND):
raise ValueError('Values out of bounds')
angle_x = (x - self.current_x) / ANGLE_RATIO
angle_y = (y - self.current_y) / ANGLE_RATIO
# make sure to lift before moving, but retain old lift status
was_lifted = self.lifted
if not self.lifted:
self.lift()
motor_x.run_angle(X_MAX_ANGLE_SPEED, angle_x, wait=x_wait)
motor_y.run_angle(Y_MAX_ANGLE_SPEED, angle_y, wait=wait)
if not was_lifted:
self.lower()
self.current_x = x
self.current_y = y
def draw(self, f):
"""Draws the expression
Args:
f (Expression): Expression to draw
Yields:
float: percentage of progress
"""
# first derative
fp = f.diff()
x_points = []
y_points = []
self.lift()
# calculate timings
x_speed = (X_MAX_ANGLE_SPEED * ANGLE_RATIO)
total_time = X_LENGTH / x_speed # t = s / v
average_time = total_time / PRECISION
self.move_to(find_start(f))
# draw loop
while self.current_x < X_RIGHT_BOUND:
x_angle_speed = X_MAX_ANGLE_SPEED
motor_x.run(x_angle_speed)
if not (Y_LOWER_BOUND < f.evaluate(self.current_x) < Y_UPPER_BOUND):
y_speed = 0.0
print('not in bounds')
else:
y_speed = fp.evaluate(self.current_x)
y_angle_speed = y_speed / ANGLE_RATIO
# speed factor is the ratio of y to y_max
speed_factor = 1.0
# if y-speed exceeds, slow down x-motor to retain ratio
if abs(y_angle_speed) > Y_MAX_ANGLE_SPEED:
speed_factor = abs(y_angle_speed / Y_MAX_ANGLE_SPEED)
# respect orientation
y_angle_speed = math.copysign(Y_MAX_ANGLE_SPEED, y_speed)
x_angle_speed /= speed_factor
motor_x.run(x_angle_speed)
motor_y.run(y_angle_speed)
# average time multiplied with speed factor
# gives the actual time for the current speeds
time_spent = average_time * speed_factor
# needed for loop
# s = v · t
self.current_x += (x_angle_speed * ANGLE_RATIO) * time_spent
self.current_y += (y_angle_speed * ANGLE_RATIO) * time_spent
# scatter diagram
x_points.append(self.current_x)
y_points.append(self.current_y)
percentage = (self.current_x + X_LENGTH / 2) / X_LENGTH
yield percentage
wait(time_spent)
plt.scatter(x_points, y_points)
plt.show()
async def main():
p = Plotter()
# tokens = ['(VAL:0.1)', '(VAR:x)', '(TIMES:*)', '(SIN:sin)', '(VAL:30.0)', '(TIMES:*)']
tokens = ['(VAL:0.1)', '(VAR:x)', '(VAL:2.0)', '(POW:^)', '(TIMES:*)']
expr = Expression(tokens)
print(expr)
for p in p.draw(expr):
print(p)
if __name__ == '__main__':
asyncio.run(main())
| 25.25 | 92 | 0.603311 |
d5da25defc8cb7cc51e2168e8bc41398ce9bc71c
| 1,986 |
py
|
Python
|
test/test_npu/test_threshold.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_threshold.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_threshold.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestThreshold(TestCase):
def cpu_op_exec(self,input1, threshold, value):
output = torch.nn.functional.threshold(input1, threshold, value)
output = output.numpy()
return output
def npu_op_exec(self,input1, threshold, value):
output = torch.nn.functional.threshold(input1, threshold, value)
output = output.to("cpu")
output = output.numpy()
return output
def test_threshold_common_shape_format(self, device):
shape_format = [
[[np.float32, 0, (1,5)], [1.0], [20.0]],
[[np.int32, 0, (1,5)], [2], [20]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 3)
cpu_threshold = npu_threshold = item[1][0]
cpu_value = npu_value = item[2][0]
cpu_output = self.cpu_op_exec(cpu_input1, cpu_threshold, cpu_value)
npu_output = self.npu_op_exec(npu_input1, npu_threshold, npu_value)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestThreshold, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 38.192308 | 79 | 0.697382 |
e6da965a7373cc43a2028ac7ca1bab7e1dfec1fd
| 1,348 |
py
|
Python
|
ppyt/indicators/breakout_indicators.py
|
yusukemurayama/ppytrading
|
9804d0de870d77bf8a1c847736a636b1342d4600
|
[
"MIT"
] | 4 |
2016-08-16T07:47:15.000Z
|
2017-12-11T10:08:47.000Z
|
ppyt/indicators/breakout_indicators.py
|
yusukemurayama/ppytrading
|
9804d0de870d77bf8a1c847736a636b1342d4600
|
[
"MIT"
] | null | null | null |
ppyt/indicators/breakout_indicators.py
|
yusukemurayama/ppytrading
|
9804d0de870d77bf8a1c847736a636b1342d4600
|
[
"MIT"
] | 2 |
2018-06-15T04:43:15.000Z
|
2020-05-02T07:47:15.000Z
|
# coding: utf-8
import logging
import numpy as np
from ppyt.indicators import IndicatorBase
from ppyt.indicators.closerecenthighlow_indicators import (
CloseGtRecentHighIndicator, CloseLtRecentLowIndicator
)
logger = logging.getLogger(__name__)
class UpperBreakoutIndicator(IndicatorBase):
"""上にブレイクアウトしたかを示す指標です。"""
_findkey = 'UpperBreakout'
def _build_indicator(self, span, **kwds):
"""indicatorのデータを組み立てます。
Args:
span: 過去何日間の高値を上に抜いたか
"""
# 当日の高値が、前日までの直近高値を超えたかの指標を取得します。
indi = CloseGtRecentHighIndicator(stock=self.stock, span=span)
arr1 = indi.data
# 1日過去にずらした配列を取得します。
arr2 = indi.shifted(-1)
# 前日は直近高値以下で、当日に直近高値を超えているかを判定します。
return np.logical_and(arr1, np.logical_not(arr2))
class LowerBreakoutIndicator(IndicatorBase):
"""下にブレイクアウトしたかを示す指標です。"""
_findkey = 'LowerBreakout'
def _build_indicator(self, span, **kwds):
"""indicatorのデータを組み立てます。
Args:
span: 過去何日間の高値を上に抜いたか
"""
# 当日の安値が、前日までの直近安値を下回った指標を取得します。
indi = CloseLtRecentLowIndicator(stock=self.stock, span=span)
arr1 = indi.data
# 1日過去にずらした配列を取得します。
arr2 = indi.shifted(-1)
# 前日は直近安値以上で、当日に直近安値未満かを判定します。
return np.logical_and(arr1, np.logical_not(arr2))
| 25.923077 | 70 | 0.669881 |
e6ea66ab1cd7de4fcef1f4dd4f93d7356e431820
| 6,103 |
py
|
Python
|
python_experiments/paper_figures/vldbj/draw_varying_c.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 8 |
2020-04-14T23:17:00.000Z
|
2021-06-21T12:34:04.000Z
|
python_experiments/paper_figures/vldbj/draw_varying_c.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | null | null | null |
python_experiments/paper_figures/vldbj/draw_varying_c.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 1 |
2021-01-17T16:26:50.000Z
|
2021-01-17T16:26:50.000Z
|
import matplotlib.pyplot as plt
from data_analysis.vldbj_data_parsing.varying_c_statistics import *
from paper_figures.vldbj.draw_indexing_time_size import TICK_SIZE, LEGEND_SIZE, LABEL_SIZE, reads_d_tag, reads_rq_tag
import json
import itertools
us_to_ms_factor = 10 ** 3
large_size_plus = 4
def get_dict(file_path):
with open(file_path) as ifs:
return json.load(ifs)
relative_path = '../..'
c_index_dict = get_dict('{}/data_analysis/data-json/varying_parameters/varying_c_index.json'.format(relative_path))
c_reads_index_dict = get_dict(
'{}/data_analysis/data-json/varying_parameters/varying_c_index_reads.json'.format(relative_path))
c_index_dict = dict(itertools.chain(c_index_dict.items(), c_reads_index_dict.items()))
c_query_dict = get_dict('{}/data_analysis/data-json/varying_parameters/varying_c_query.json'.format(relative_path))
c_reads_query_dict = get_dict(
'{}/data_analysis/data-json/varying_parameters/varying_c_query_reads.json'.format(relative_path))
c_probesim_query_dict = get_dict(
'{}/data_analysis/data-json/varying_parameters/probesim_varying_c_query.json'.format(relative_path))
c_query_dict = dict(
itertools.chain(c_query_dict.items(), c_reads_query_dict.items(), c_probesim_query_dict.items()))
# print c_query_dict
def draw_query_index_time():
exp_figure, ax_tuple = plt.subplots(1, 2, sharex=True, figsize=(16, 7))
c_lst = [0.4, 0.5, 0.6, 0.7, 0.8]
# 1st: draw querying time
def draw_querying_time():
algorithm_tag_lst = [bflpmc_tag, flpmc_tag, bprw_tag, sling_tag,
reads_d_tag, reads_rq_tag, isp_tag, tsf_tag, probesim_tag]
legend_lst = ['FBLPMC', 'FLPMC', 'BLPMC', 'SLING',
'READS-D', 'READS-Rq',
'ISP', 'TSF', 'ProbeSim']
ax = ax_tuple[0]
lst_lst = []
for idx, algorithm in enumerate(algorithm_tag_lst):
time_lst = list(map(lambda c: c_query_dict[algorithm][format_str(c)], c_lst))
time_lst = list(map(lambda val: float(val) / us_to_ms_factor if val is not None else None, time_lst))
lst_lst.append(time_lst)
color_lst = ['blue', 'orange', 'green', 'red',
'#fe01b1', '#ceb301',
'm', 'brown', 'purple', 'k', 'gray']
shape_lst = ['D-.', 's--', 'o:', 'x-',
'P-', '*-',
'v-', '^-', '+-',
'<-', '>-', ]
def get_marker_size():
if idx == 0:
return 18
elif idx == 5:
return 26
elif idx == 8:
return 30
else:
return 22
# print idx, algorithm, time_lst
ax.plot(c_lst, time_lst, shape_lst[idx], color=color_lst[idx],
markersize=get_marker_size(),
markerfacecolor='none')
# print 'after plot', idx, algorithm
# setup ticks for x and y axis
ax.set_ylim(0.4 / us_to_ms_factor, 10 ** 10.5 * 0.3 / us_to_ms_factor)
ax.set_xticks(c_lst)
ax.set_yscale('log')
# setup font size for ticks and labels
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
ax.set_ylabel('Avg Query Time (ms)', fontsize=LABEL_SIZE + large_size_plus)
ax.set_xlabel('$c$', fontsize=LABEL_SIZE + large_size_plus)
ax.grid(True, alpha=0.4)
ax.legend(legend_lst, ncol=2, prop={'size': LEGEND_SIZE - 2, "weight": "bold"}, loc=1)
# 2nd: draw the index
def draw_idx():
algorithm_tag_lst = [flp_tag, sling_tag, reads_d_tag, reads_rq_tag, tsf_tag]
legend_lst = ['FLP', 'SLING', 'READS-D', 'READS-Rq', 'TSF']
ax = ax_tuple[1]
lst_lst = []
for idx, algorithm in enumerate(algorithm_tag_lst):
time_lst = list(map(lambda c: c_index_dict[algorithm][format_str(c)], c_lst))
if algorithm in [tsf_tag]:
time_lst = list(map(lambda time_val: 0.0042 if time_val > 0.005 else time_val, time_lst))
lst_lst.append(time_lst)
shape_lst = ['D-.', 'x-', 'P-', '*-', '^-']
color_lst = ['blue', 'red', '#fe01b1', '#ceb301', 'brown']
def get_marker_size():
if idx == 0:
return 18
elif idx == 3:
return 26
else:
return 22
ax.plot(c_lst, time_lst, shape_lst[idx], color=color_lst[idx],
markersize=get_marker_size(),
markerfacecolor='none')
ax.set_yscale('log')
# setup ticks for x and y axis
ax.set_ylim(10 ** -3, 10 ** 5 * 12)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
# setup labels and grid, legend
ax.set_ylabel('Indexing Time (s)', fontsize=LABEL_SIZE + large_size_plus)
ax.set_xlabel('$c$', fontsize=LABEL_SIZE + large_size_plus)
ax.grid(True, alpha=0.2)
ax.legend(legend_lst, ncol=2, prop={'size': LEGEND_SIZE, "weight": "bold"}, loc=1)
draw_querying_time()
draw_idx()
# 3rd: save the figure
exp_figure.subplots_adjust(wspace=0)
plt.tight_layout()
plt.savefig('figures/' + 'varying_c' + '.pdf', bbox_inches='tight', dpi=300)
plt.close()
if __name__ == '__main__':
# unit: us
algorithm_lst = [bflpmc_tag, flpmc_tag, bprw_tag, sling_tag, tsf_tag, probesim_tag]
# for algorithm in algorithm_lst:
# print algorithm, c_query_dict[algorithm]
index_lst = [flp_tag, sling_tag, tsf_tag]
# for algorithm in index_lst:
# print algorithm, c_index_dict[algorithm]
draw_query_index_time()
| 40.151316 | 117 | 0.600524 |
5dc79859d9e9acdadbcf74f50a802492149ea1d8
| 3,593 |
py
|
Python
|
fcos_core/layers/sigmoid_focal_loss_bce.py
|
HaoGood/MMMC
|
24dd4ce830efc7a8dc580735903c2a776b6a1a7b
|
[
"Apache-2.0"
] | 3 |
2022-01-12T11:22:38.000Z
|
2022-01-27T02:28:30.000Z
|
fcos_core/layers/sigmoid_focal_loss_bce.py
|
HaoGood/MMMC
|
24dd4ce830efc7a8dc580735903c2a776b6a1a7b
|
[
"Apache-2.0"
] | null | null | null |
fcos_core/layers/sigmoid_focal_loss_bce.py
|
HaoGood/MMMC
|
24dd4ce830efc7a8dc580735903c2a776b6a1a7b
|
[
"Apache-2.0"
] | 1 |
2022-01-23T02:45:03.000Z
|
2022-01-23T02:45:03.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
def sigmoid_focal_loss_bce(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = 0.25,
gamma: float = 2.0,
funcs: str = 'train',
reduction: str = "sum",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
Loss tensor with the reduction option applied.
"""
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
if funcs == 'stats_bce':
return ce_loss.sum(dim=1)
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
if funcs == 'stats_focal':
# loss *= targets
loss = loss.sum(dim=1)
else:
loss = loss.sum()
return loss
sigmoid_focal_loss_jit = torch.jit.script(
sigmoid_focal_loss_bce
) # type: torch.jit.ScriptModule
def sigmoid_focal_loss_star(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = -1,
gamma: float = 1,
reduction: str = "none",
) -> torch.Tensor:
"""
FL* described in RetinaNet paper Appendix: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Gamma parameter described in FL*. Default = 1 (no weighting).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
Loss tensor with the reduction option applied.
"""
shifted_inputs = gamma * (inputs * (2 * targets - 1))
loss = -(F.logsigmoid(shifted_inputs)) / gamma
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss *= alpha_t
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
sigmoid_focal_loss_star_jit = torch.jit.script(
sigmoid_focal_loss_star
) # type: torch.jit.ScriptModule
| 34.883495 | 83 | 0.601169 |
539e2f99665eec34798cacbc4a08edf40cebdaf5
| 4,245 |
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/stdnum/gb/vat.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/stdnum/gb/vat.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/stdnum/gb/vat.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# vat.py - functions for handling United Kingdom VAT numbers
#
# Copyright (C) 2012-2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""VAT (United Kingdom (and Isle of Man) VAT registration number).
The VAT number can either be a 9-digit standard number, a 12-digit standard
number followed by a 3-digit branch identifier, a 5-digit number for
government departments (first two digits are GD) or a 5-digit number for
health authorities (first two digits are HA). The 9-digit variants use a
weighted checksum.
>>> validate('GB 980 7806 84')
'980780684'
>>> validate('802311781') # invalid check digit
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('980780684')
'980 7806 84'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' -.').upper().strip()
if number.startswith('GB'):
number = number[2:]
return number
def checksum(number):
"""Calculate the checksum. The checksum is only used for the 9 digits
of the number and the result can either be 0 or 42."""
weights = (8, 7, 6, 5, 4, 3, 2, 10, 1)
return sum(w * int(n) for w, n in zip(weights, number)) % 97
def validate(number):
"""Check if the number is a valid VAT number. This checks the length,
formatting and check digit."""
number = compact(number)
if len(number) == 5:
if not number[2:].isdigit():
raise InvalidFormat()
if number.startswith('GD') and int(number[2:]) < 500:
# government department
pass
elif number.startswith('HA') and int(number[2:]) >= 500:
# health authority
pass
else:
raise InvalidComponent()
elif len(number) == 11 and number[0:6] in ('GD8888', 'HA8888'):
if not number[6:].isdigit():
raise InvalidFormat()
if number.startswith('GD') and int(number[6:9]) < 500:
# government department
pass
elif number.startswith('HA') and int(number[6:9]) >= 500:
# health authority
pass
else:
raise InvalidComponent()
if int(number[6:9]) % 97 != int(number[9:11]):
raise InvalidChecksum()
elif len(number) in (9, 12):
if not number.isdigit():
raise InvalidFormat()
# standard number: nnn nnnn nn
# branch trader: nnn nnnn nn nnn (ignore the last thee digits)
# restarting: 100 nnnn nn
if int(number[:3]) >= 100:
if checksum(number[:9]) not in (0, 42, 55):
raise InvalidChecksum()
else:
if checksum(number[:9]) != 0:
raise InvalidChecksum()
else:
raise InvalidLength()
return number
def is_valid(number):
"""Check if the number is a valid VAT number."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
number = compact(number)
if len(number) == 5:
# government department or health authority
return number
if len(number) == 12:
# includes branch number
return number[:3] + ' ' + number[3:7] + ' ' + number[7:9] + ' ' + number[9:]
# standard number: nnn nnnn nn
return number[:3] + ' ' + number[3:7] + ' ' + number[7:]
| 34.795082 | 84 | 0.636749 |
54eeee1ef2f2e2bf0363795cd01253c4393c049c
| 21,687 |
py
|
Python
|
tests/onegov/gazette/test_views_notice.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/gazette/test_views_notice.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/gazette/test_views_notice.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from freezegun import freeze_time
from io import BytesIO
from onegov.gazette.models import GazetteNotice
from onegov.pdf.utils import extract_pdf_info
from tests.onegov.gazette.common import accept_notice
from tests.onegov.gazette.common import edit_notice
from tests.onegov.gazette.common import edit_notice_unrestricted
from tests.onegov.gazette.common import login_users
from tests.onegov.gazette.common import publish_issue
from tests.onegov.gazette.common import reject_notice
from tests.onegov.gazette.common import submit_notice
def test_view_notice(gazette_app):
# Check if the details of the notice is displayed correctly in the
# display view (that is: organization, owner, group etc).
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-11-01 11:00"):
# create a notice for each editor
for count, user in enumerate((editor_1, editor_2, editor_3)):
manage = user.get('/notices/drafted/new-notice')
manage.form['title'] = 'Titel {}'.format(count + 1)
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['at_cost'].select('yes')
manage.form['billing_address'] = 'someone\nstreet\r\nplace'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
# check if the notices are displayed correctly
for number, owner, group in (
(1, 'First', True),
(2, 'Second', True),
(3, 'Third', False),
):
for user in (editor_1, editor_2, editor_3, publisher):
view = user.get(f'/notice/titel-{number}')
assert f"Titel {number}" in view
assert "1. Oktober 2017" in view
assert "Govikon, 1. Januar 2019" in view
assert "State Chancellerist" in view
assert "Civic Community" in view
assert "Education" in view
assert "<dd>Ja</dd>" in view
assert "someone<br>street<br>place" in view
assert f"{owner} Editor" in view
assert f"+4141511227{number}" in view
assert f"<br>editor{number}@example.org" in view
if group:
assert "TestGroup" in view
else:
assert "TestGroup" not in view
assert "Nr. 44, 03.11.2017" in view
assert "Nr. 45, 10.11.2017" in view
assert "in Arbeit" in view
assert "erstellt" in view
# Check if the publication numbers are displayed
submit_notice(editor_1, 'titel-1')
submit_notice(editor_2, 'titel-2')
submit_notice(editor_3, 'titel-3')
accept_notice(publisher, 'titel-1')
accept_notice(publisher, 'titel-2')
accept_notice(publisher, 'titel-3')
publish_issue(publisher, '2017-44')
publish_issue(publisher, '2017-45')
for number in range(1, 4):
for user in (editor_1, editor_2, editor_3, publisher):
view = user.get('/notice/titel-{}'.format(number))
assert "Nr. 44, 03.11.2017 / {}".format(number) in view
assert "Nr. 45, 10.11.2017 / {}".format(number + 3) in view
def test_view_notice_actions(gazette_app):
# Check if the actions are displayed correctly in the detail view
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-11-01 11:00"):
# create a notice for each editor
for count, user in enumerate(
(editor_1, editor_2, editor_3, publisher)
):
manage = user.get('/notices/drafted/new-notice')
manage.form['title'] = 'Titel {}'.format(count + 1)
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
# check the actions
actions = {
'p': 'action-preview',
't': 'action-attachments',
'c': 'action-copy',
'e': 'action-edit',
'd': 'action-delete',
's': 'action-submit',
'a': 'action-accept',
'r': 'action-reject'
}
def check(values):
for user, slug, can in values:
view = user.get('/notice/{}'.format(slug))
cannot = [x for x in actions.keys() if x not in can]
assert all((actions[action] in view for action in can))
assert all((actions[action] not in view for action in cannot))
# ... when drafted
check((
(admin, 'titel-1', 'pteds'),
(admin, 'titel-2', 'pteds'),
(admin, 'titel-3', 'pteds'),
(admin, 'titel-4', 'pteds'),
(publisher, 'titel-1', 'pteds'),
(publisher, 'titel-2', 'pteds'),
(publisher, 'titel-3', 'pteds'),
(publisher, 'titel-4', 'pteds'),
(editor_1, 'titel-1', 'peds'),
(editor_1, 'titel-2', 'peds'),
(editor_1, 'titel-3', 'p'),
(editor_1, 'titel-4', 'p'),
(editor_2, 'titel-1', 'peds'),
(editor_2, 'titel-2', 'peds'),
(editor_2, 'titel-3', 'p'),
(editor_2, 'titel-4', 'p'),
(editor_3, 'titel-1', 'p'),
(editor_3, 'titel-2', 'p'),
(editor_3, 'titel-3', 'peds'),
(editor_3, 'titel-4', 'p'),
))
# ... when submitted
submit_notice(editor_1, 'titel-1')
submit_notice(editor_2, 'titel-2')
submit_notice(editor_3, 'titel-3')
submit_notice(publisher, 'titel-4')
check((
(admin, 'titel-1', 'ptedar'),
(admin, 'titel-2', 'ptedar'),
(admin, 'titel-3', 'ptedar'),
(admin, 'titel-4', 'ptedar'),
(publisher, 'titel-1', 'ptear'),
(publisher, 'titel-2', 'ptear'),
(publisher, 'titel-3', 'ptear'),
(publisher, 'titel-4', 'ptear'),
(editor_1, 'titel-1', 'p'),
(editor_1, 'titel-2', 'p'),
(editor_1, 'titel-3', 'p'),
(editor_1, 'titel-4', 'p'),
(editor_2, 'titel-1', 'p'),
(editor_2, 'titel-2', 'p'),
(editor_2, 'titel-3', 'p'),
(editor_2, 'titel-4', 'p'),
(editor_3, 'titel-1', 'p'),
(editor_3, 'titel-2', 'p'),
(editor_3, 'titel-3', 'p'),
(editor_3, 'titel-4', 'p'),
))
# ... when rejected
reject_notice(publisher, 'titel-1')
reject_notice(publisher, 'titel-2')
reject_notice(publisher, 'titel-3')
reject_notice(publisher, 'titel-4')
check((
(admin, 'titel-1', 'pteds'),
(admin, 'titel-2', 'pteds'),
(admin, 'titel-3', 'pteds'),
(admin, 'titel-4', 'pteds'),
(publisher, 'titel-1', 'pteds'),
(publisher, 'titel-2', 'pteds'),
(publisher, 'titel-3', 'pteds'),
(publisher, 'titel-4', 'pteds'),
(editor_1, 'titel-1', 'peds'),
(editor_1, 'titel-2', 'peds'),
(editor_1, 'titel-3', 'p'),
(editor_1, 'titel-4', 'p'),
(editor_2, 'titel-1', 'peds'),
(editor_2, 'titel-2', 'peds'),
(editor_2, 'titel-3', 'p'),
(editor_2, 'titel-4', 'p'),
(editor_3, 'titel-1', 'p'),
(editor_3, 'titel-2', 'p'),
(editor_3, 'titel-3', 'peds'),
(editor_3, 'titel-4', 'p'),
))
# ... when accepted
submit_notice(editor_1, 'titel-1')
submit_notice(editor_2, 'titel-2')
submit_notice(editor_3, 'titel-3')
submit_notice(publisher, 'titel-4')
accept_notice(publisher, 'titel-1')
accept_notice(publisher, 'titel-2')
accept_notice(publisher, 'titel-3')
accept_notice(publisher, 'titel-4')
check((
(admin, 'titel-1', 'ptedc'),
(admin, 'titel-2', 'ptedc'),
(admin, 'titel-3', 'ptedc'),
(admin, 'titel-4', 'ptedc'),
(publisher, 'titel-1', 'pedc'),
(publisher, 'titel-2', 'pedc'),
(publisher, 'titel-3', 'pedc'),
(publisher, 'titel-4', 'pedc'),
(editor_1, 'titel-1', 'pc'),
(editor_1, 'titel-2', 'pc'),
(editor_1, 'titel-3', 'pc'),
(editor_1, 'titel-4', 'pc'),
(editor_2, 'titel-1', 'pc'),
(editor_2, 'titel-2', 'pc'),
(editor_2, 'titel-3', 'pc'),
(editor_2, 'titel-4', 'pc'),
(editor_3, 'titel-1', 'pc'),
(editor_3, 'titel-2', 'pc'),
(editor_3, 'titel-3', 'pc'),
(editor_3, 'titel-4', 'pc'),
))
# ... when published
publish_issue(publisher, '2017-44')
check((
(admin, 'titel-1', 'ptec'),
(admin, 'titel-2', 'ptec'),
(admin, 'titel-3', 'ptec'),
(admin, 'titel-4', 'ptec'),
(publisher, 'titel-1', 'pec'),
(publisher, 'titel-2', 'pec'),
(publisher, 'titel-3', 'pec'),
(publisher, 'titel-4', 'pec'),
(editor_1, 'titel-1', 'pc'),
(editor_1, 'titel-2', 'pc'),
(editor_1, 'titel-3', 'pc'),
(editor_1, 'titel-4', 'pc'),
(editor_2, 'titel-1', 'pc'),
(editor_2, 'titel-2', 'pc'),
(editor_2, 'titel-3', 'pc'),
(editor_2, 'titel-4', 'pc'),
(editor_3, 'titel-1', 'pc'),
(editor_3, 'titel-2', 'pc'),
(editor_3, 'titel-3', 'pc'),
(editor_3, 'titel-4', 'pc'),
))
# ... when imported
session = gazette_app.session()
notice = session.query(GazetteNotice).filter_by(name='titel-1').one()
notice.user = None
notice.group = None
notice.source = 'source'
notice.state = 'imported'
session.flush()
import transaction
transaction.commit()
check((
(admin, 'titel-1', 'pda'),
(publisher, 'titel-1', 'pad'),
(editor_1, 'titel-1', 'p'),
))
def test_view_notice_preview(gazette_app):
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-11-01 11:00"):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = 'Titel'
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
view = editor_1.get('/notice/titel/preview')
assert "Titel" in view
assert "1. Oktober 2017" in view
assert "Govikon, 1. Januar 2019" in view
assert "State Chancellerist" in view
assert "Civic Community" not in view
assert "Education" not in view
assert "TestGroup" not in view
assert "Nr. 44, 03.11.2017" not in view
assert "Nr. 45, 10.11.2017" not in view
assert "in Arbeit" not in view
assert "erstellt" not in view
def test_view_notice_pdf_preview(gazette_app):
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-11-01 11:00"):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = 'Titel'
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
with freeze_time("2018-01-01 12:00"):
response = editor_1.get('/notice/titel/preview-pdf')
assert response.headers['Content-Type'] == 'application/pdf'
assert response.headers['Content-Disposition'] == \
'inline; filename=amtsblatt-govikon-titel.pdf'
assert extract_pdf_info(BytesIO(response.body)) == (
1,
'xxx Titel\n'
' 1. Oktober 2017\n'
' Govikon, 1. Januar 2019\n'
' State Chancellerist\n'
'© 2018 Govikon 1'
)
def test_view_notice_delete(gazette_app):
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-11-01 11:00"):
# delete a drafted notice
for user in (editor_1, publisher):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
manage = user.get('/notice/erneuerungswahlen/delete')
manage = manage.form.submit().maybe_follow()
assert "Meldung gelöscht." in manage
# delete a submitted notice
for user in (editor_1, publisher):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
submit_notice(user, 'erneuerungswahlen')
manage = user.get('/notice/erneuerungswahlen/delete')
assert manage.forms == {}
manage = admin.get('/notice/erneuerungswahlen/delete')
manage.form.submit().maybe_follow()
# delete a rejected notice
for user in (editor_1, publisher):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
submit_notice(user, 'erneuerungswahlen')
reject_notice(publisher, 'erneuerungswahlen')
manage = user.get('/notice/erneuerungswahlen/delete')
manage = manage.form.submit().maybe_follow()
assert "Meldung gelöscht." in manage
# delete an accepted notice
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
submit_notice(editor_1, 'erneuerungswahlen')
accept_notice(publisher, 'erneuerungswahlen')
manage = editor_1.get('/notice/erneuerungswahlen/delete')
assert manage.forms == {}
manage = publisher.get('/notice/erneuerungswahlen/delete')
assert "Diese Meldung wurde bereits angenommen!" in manage
manage.form.submit().maybe_follow()
# delete a published notice
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
submit_notice(editor_1, 'erneuerungswahlen')
accept_notice(publisher, 'erneuerungswahlen')
publish_issue(publisher, '2017-44')
for user in (admin, editor_1, publisher):
manage = user.get('/notice/erneuerungswahlen/delete')
assert manage.forms == {}
def test_view_notice_changelog(gazette_app):
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-11-01 10:00"):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
with freeze_time("2017-11-01 11:02"):
submit_notice(editor_1, 'erneuerungswahlen')
with freeze_time("2017-11-01 11:30"):
reject_notice(publisher, 'erneuerungswahlen')
with freeze_time("2017-11-01 11:45"):
edit_notice(editor_2, 'erneuerungswahlen', organization='300')
with freeze_time("2017-11-01 11:48"):
submit_notice(editor_2, 'erneuerungswahlen')
with freeze_time("2017-11-01 15:00"):
accept_notice(publisher, 'erneuerungswahlen')
with freeze_time("2017-11-01 16:00"):
publish_issue(publisher, '2017-44')
view = editor_1.get('/notice/erneuerungswahlen')
changes = [
''.join(i.strip() for i in td.itertext())
for td in view.pyquery('table.changes td')
]
changes = sorted([
(
changes[4 * i + 0],
changes[4 * i + 1],
changes[4 * i + 2],
changes[4 * i + 3]
)
for i in range(len(changes) // 4)
])
assert changes == [
('01.11.2017 11:00', 'First Editor', 'TestGroup', 'erstellt'),
('01.11.2017 12:02', 'First Editor', 'TestGroup',
'eingereicht'),
('01.11.2017 12:30', 'Publisher', '', 'zurückgewiesenXYZ'),
('01.11.2017 12:45', 'Second Editor', 'TestGroup', 'bearbeitet'),
('01.11.2017 12:48', 'Second Editor', 'TestGroup',
'eingereicht'),
('01.11.2017 16:00', 'Publisher', '', 'Druck beauftragt'),
('01.11.2017 16:00', 'Publisher', '', 'angenommen'),
('01.11.2017 17:00', 'Publisher', '', 'veröffentlicht')
]
def test_view_notice_copy(gazette_app):
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
with freeze_time("2017-10-01 12:00"):
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-40']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
submit_notice(editor_1, 'erneuerungswahlen')
accept_notice(publisher, 'erneuerungswahlen')
with freeze_time("2017-10-01 12:00"):
edit_notice_unrestricted(publisher, 'erneuerungswahlen', note='NOTE!')
with freeze_time("2018-01-01 12:00"):
for user in (editor_1, editor_2, editor_3, publisher):
manage = user.get('/notice/erneuerungswahlen').click("Kopieren")
assert manage.form['title'].value == "Erneuerungswahlen"
assert manage.form['organization'].value == '200'
assert manage.form['category'].value == '11'
assert manage.form['text'].value == "1. Oktober 2017"
assert manage.form['issues'].value is None
manage.form['issues'] = ['2018-1']
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" in user.get('/dashboard')
assert "Erneuerungswahlen" in user.get('/notices/drafted')
"NOTE!" in publisher.get('/notice/erneuerungswahlen-1')
"NOTE!" in publisher.get('/notice/erneuerungswahlen-2')
"NOTE!" in publisher.get('/notice/erneuerungswahlen-3')
"NOTE!" in publisher.get('/notice/erneuerungswahlen-4')
| 39.647166 | 78 | 0.552451 |
07223f6646d28bbe8361254170c56512be80ca0b
| 401 |
py
|
Python
|
PMIa/2014/KUCHERYAVENKO_A_I/task_1_38.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2014/KUCHERYAVENKO_A_I/task_1_38.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2014/KUCHERYAVENKO_A_I/task_1_38.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 1. Вариант 38.
# Напишите программу, которая будет сообщать род деятельности и псевдоним под которым скрывается Аврора Жюпен. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
# Kucheryavenko A. I.
# 17.03.2016
print("Жорж Санд более известна, как французская писательница Амандина Аврора Люсиль Дюпен.")
input("\n\nНажмите Enter для выхода.")
| 40.1 | 205 | 0.795511 |
db8ed35f7e5c7d36b8d26c3414a13e509c3b709d
| 13,031 |
py
|
Python
|
official/nlp/bert/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
official/nlp/bert/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
official/nlp/bert/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Data operations, will be used in run_pretrain.py
"""
import os
import math
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
from mindspore import log as logger
class BucketDatasetGenerator:
"""
Provide data distribution of different gears for the bert network.
Args:
dataset (Dataset): The training dataset.
batch_size (Int): The training batchsize.
bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.
"""
def __init__(self, dataset, batch_size, bucket_list=None):
self.dataset = dataset
self.batch_size = batch_size
self.bucket_list = bucket_list
self.data_bucket = {bucket: [] for bucket in bucket_list}
bucket_size = len(bucket_list)
self.random_list = np.random.binomial(n=(bucket_size - 1), p=0.5, size=self.__len__())
self.random_list = (self.random_list + 2) % bucket_size
self.random_list = [bucket_list[i] for i in self.random_list]
self.iter = 0
def __next__(self):
for item in self.iterator:
for seq_length in self.bucket_list:
if np.sum(item[1]) <= seq_length:
self.data_bucket[seq_length].append(item)
break
for key in self.data_bucket.keys():
data = self.data_bucket[key]
if len(data) >= self.batch_size and self.random_list[self.iter] == key:
self.data_bucket[key] = self.data_bucket[key][self.batch_size:]
arr = data[0]
for i in range(1, self.batch_size):
current_data = data[i]
for j in range(len(current_data)):
arr[j] = np.concatenate((arr[j], current_data[j]))
res = ()
for label in arr:
newlabel = np.reshape(label, (self.batch_size, -1))
res += (newlabel,)
res += (np.array(key, np.int32),)
self.iter += 1
return res
raise StopIteration
def __iter__(self):
self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)
return self
def __len__(self):
return (self.dataset.get_dataset_size() // self.batch_size) - 1
def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None, batch_size=32,
bucket_list=None):
"""create train dataset"""
# apply repeat operations
files = os.listdir(data_dir)
data_files = []
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shuffle=ds.Shuffle.FILES if do_shuffle == "true" else False,
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
if bucket_list:
bucket_dataset = BucketDatasetGenerator(data_set, batch_size, bucket_list=bucket_list)
data_set = ds.GeneratorDataset(bucket_dataset,
column_names=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights",
"sentence_flag"],
shuffle=False)
else:
data_set = data_set.batch(batch_size, drop_remainder=True)
ori_dataset_size = data_set.get_dataset_size()
print('origin dataset size: ', ori_dataset_size)
type_cast_op = C.TypeCast(mstype.int32)
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions")
data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels")
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations
logger.info("data size: {}".format(data_set.get_dataset_size()))
logger.info("repeat count: {}".format(data_set.get_repeat_count()))
return data_set
def create_ner_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy", data_file_path=None,
dataset_format="mindrecord", schema_file_path=None, do_shuffle=True, drop_remainder=True):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
if dataset_format == "mindrecord":
dataset = ds.MindDataset([data_file_path],
columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"],
shuffle=do_shuffle)
else:
dataset = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"],
shuffle=do_shuffle)
if assessment_method == "Spearman_correlation":
type_cast_op_float = C.TypeCast(mstype.float32)
dataset = dataset.map(operations=type_cast_op_float, input_columns="label_ids")
else:
dataset = dataset.map(operations=type_cast_op, input_columns="label_ids")
dataset = dataset.map(operations=type_cast_op, input_columns="segment_ids")
dataset = dataset.map(operations=type_cast_op, input_columns="input_mask")
dataset = dataset.map(operations=type_cast_op, input_columns="input_ids")
dataset = dataset.repeat(repeat_count)
# apply batch operations
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
return dataset
def create_classification_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy",
data_file_path=None, schema_file_path=None, do_shuffle=True):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
data_set = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"],
shuffle=do_shuffle)
if assessment_method == "Spearman_correlation":
type_cast_op_float = C.TypeCast(mstype.float32)
data_set = data_set.map(operations=type_cast_op_float, input_columns="label_ids")
else:
data_set = data_set.map(operations=type_cast_op, input_columns="label_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
data_set = data_set.repeat(repeat_count)
# apply batch operations
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set
def generator_squad(data_features):
for feature in data_features:
yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id)
def create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None,
is_training=True, do_shuffle=True):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
if is_training:
data_set = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "start_positions",
"end_positions", "unique_ids", "is_impossible"],
shuffle=do_shuffle)
data_set = data_set.map(operations=type_cast_op, input_columns="start_positions")
data_set = data_set.map(operations=type_cast_op, input_columns="end_positions")
else:
data_set = ds.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle,
column_names=["input_ids", "input_mask", "segment_ids", "unique_ids"])
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="unique_ids")
data_set = data_set.repeat(repeat_count)
# apply batch operations
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set
def create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None, schema_dir=None):
"""create evaluation dataset"""
data_files = []
if os.path.isdir(data_dir):
files = os.listdir(data_dir)
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
else:
data_files.append(data_dir)
data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shard_equal_rows=True)
ori_dataset_size = data_set.get_dataset_size()
print("origin eval size: ", ori_dataset_size)
dtypes = data_set.output_types()
shapes = data_set.output_shapes()
output_batches = math.ceil(ori_dataset_size / device_num / batchsize)
padded_num = output_batches * device_num * batchsize - ori_dataset_size
print("padded num: ", padded_num)
if padded_num > 0:
item = {"input_ids": np.zeros(shapes[0], dtypes[0]),
"input_mask": np.zeros(shapes[1], dtypes[1]),
"segment_ids": np.zeros(shapes[2], dtypes[2]),
"next_sentence_labels": np.zeros(shapes[3], dtypes[3]),
"masked_lm_positions": np.zeros(shapes[4], dtypes[4]),
"masked_lm_ids": np.zeros(shapes[5], dtypes[5]),
"masked_lm_weights": np.zeros(shapes[6], dtypes[6])}
padded_samples = [item for x in range(padded_num)]
padded_ds = ds.PaddedDataset(padded_samples)
eval_ds = data_set + padded_ds
sampler = ds.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False)
eval_ds.use_sampler(sampler)
else:
eval_ds = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
type_cast_op = C.TypeCast(mstype.int32)
eval_ds = eval_ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="next_sentence_labels", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="segment_ids", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="input_mask", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="input_ids", operations=type_cast_op)
eval_ds = eval_ds.batch(batchsize, drop_remainder=True)
print("eval data size: {}".format(eval_ds.get_dataset_size()))
print("eval repeat count: {}".format(eval_ds.get_repeat_count()))
return eval_ds
| 53.187755 | 118 | 0.646919 |
37e912ce87ee8c5913f42ac0adcd81c81f7ac03a
| 204 |
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.9-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.9-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.9-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# Sorted Matrix Search
# Given a M x N matrix in which each row and each column is sorted in
# ascending order, write a method to find an element.
# time complexity: O()
# space complexity: O()
| 22.666667 | 70 | 0.696078 |
53386f341aeb3359e2968df0094cb4508536cf56
| 11,559 |
py
|
Python
|
DatenbankMain.py
|
klnrdknt/Sephrasto
|
591224fe01825a169c21ebc6136533f282ce9a0b
|
[
"MIT"
] | 15 |
2017-11-09T12:49:52.000Z
|
2022-03-06T12:18:48.000Z
|
DatenbankMain.py
|
klnrdknt/Sephrasto
|
591224fe01825a169c21ebc6136533f282ce9a0b
|
[
"MIT"
] | 40 |
2018-02-01T21:32:01.000Z
|
2022-03-22T11:35:28.000Z
|
DatenbankMain.py
|
klnrdknt/Sephrasto
|
591224fe01825a169c21ebc6136533f282ce9a0b
|
[
"MIT"
] | 13 |
2018-03-12T17:50:42.000Z
|
2022-03-06T12:21:41.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DatenbankMain.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.setWindowModality(QtCore.Qt.ApplicationModal)
Form.resize(661, 555)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.labelParameter = QtWidgets.QLabel(Form)
self.labelParameter.setObjectName("labelParameter")
self.verticalLayout.addWidget(self.labelParameter)
self.nameFilterEdit = QtWidgets.QLineEdit(Form)
self.nameFilterEdit.setObjectName("nameFilterEdit")
self.verticalLayout.addWidget(self.nameFilterEdit)
self.checkFilterTyp = QtWidgets.QCheckBox(Form)
self.checkFilterTyp.setChecked(True)
self.checkFilterTyp.setTristate(True)
self.checkFilterTyp.setObjectName("checkFilterTyp")
self.verticalLayout.addWidget(self.checkFilterTyp)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(10, -1, -1, -1)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.showVorteile = QtWidgets.QCheckBox(Form)
self.showVorteile.setChecked(True)
self.showVorteile.setTristate(False)
self.showVorteile.setObjectName("showVorteile")
self.verticalLayout_3.addWidget(self.showVorteile)
self.showFertigkeiten = QtWidgets.QCheckBox(Form)
self.showFertigkeiten.setChecked(True)
self.showFertigkeiten.setObjectName("showFertigkeiten")
self.verticalLayout_3.addWidget(self.showFertigkeiten)
self.showFreieFertigkeiten = QtWidgets.QCheckBox(Form)
self.showFreieFertigkeiten.setChecked(True)
self.showFreieFertigkeiten.setObjectName("showFreieFertigkeiten")
self.verticalLayout_3.addWidget(self.showFreieFertigkeiten)
self.showUebernatuerlicheFertigkeiten = QtWidgets.QCheckBox(Form)
self.showUebernatuerlicheFertigkeiten.setMinimumSize(QtCore.QSize(200, 0))
self.showUebernatuerlicheFertigkeiten.setChecked(True)
self.showUebernatuerlicheFertigkeiten.setObjectName("showUebernatuerlicheFertigkeiten")
self.verticalLayout_3.addWidget(self.showUebernatuerlicheFertigkeiten)
self.showTalente = QtWidgets.QCheckBox(Form)
self.showTalente.setChecked(True)
self.showTalente.setObjectName("showTalente")
self.verticalLayout_3.addWidget(self.showTalente)
self.showRuestungen = QtWidgets.QCheckBox(Form)
self.showRuestungen.setChecked(True)
self.showRuestungen.setObjectName("showRuestungen")
self.verticalLayout_3.addWidget(self.showRuestungen)
self.showWaffen = QtWidgets.QCheckBox(Form)
self.showWaffen.setChecked(True)
self.showWaffen.setObjectName("showWaffen")
self.verticalLayout_3.addWidget(self.showWaffen)
self.showWaffeneigenschaften = QtWidgets.QCheckBox(Form)
self.showWaffeneigenschaften.setChecked(True)
self.showWaffeneigenschaften.setObjectName("showWaffeneigenschaften")
self.verticalLayout_3.addWidget(self.showWaffeneigenschaften)
self.showManoever = QtWidgets.QCheckBox(Form)
self.showManoever.setChecked(True)
self.showManoever.setObjectName("showManoever")
self.verticalLayout_3.addWidget(self.showManoever)
self.verticalLayout.addLayout(self.verticalLayout_3)
self.labelParameter1 = QtWidgets.QLabel(Form)
self.labelParameter1.setObjectName("labelParameter1")
self.verticalLayout.addWidget(self.labelParameter1)
self.showDeleted = QtWidgets.QCheckBox(Form)
self.showDeleted.setChecked(True)
self.showDeleted.setObjectName("showDeleted")
self.verticalLayout.addWidget(self.showDeleted)
self.showUserAdded = QtWidgets.QCheckBox(Form)
self.showUserAdded.setChecked(False)
self.showUserAdded.setObjectName("showUserAdded")
self.verticalLayout.addWidget(self.showUserAdded)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.buttonCloseDB = QtWidgets.QPushButton(Form)
self.buttonCloseDB.setMinimumSize(QtCore.QSize(0, 25))
self.buttonCloseDB.setObjectName("buttonCloseDB")
self.verticalLayout.addWidget(self.buttonCloseDB)
self.buttonLoadDB = QtWidgets.QPushButton(Form)
self.buttonLoadDB.setMinimumSize(QtCore.QSize(0, 25))
self.buttonLoadDB.setObjectName("buttonLoadDB")
self.verticalLayout.addWidget(self.buttonLoadDB)
self.buttonSaveDB = QtWidgets.QPushButton(Form)
self.buttonSaveDB.setMinimumSize(QtCore.QSize(0, 25))
self.buttonSaveDB.setObjectName("buttonSaveDB")
self.verticalLayout.addWidget(self.buttonSaveDB)
self.buttonQuicksave = QtWidgets.QPushButton(Form)
self.buttonQuicksave.setMinimumSize(QtCore.QSize(0, 25))
self.buttonQuicksave.setObjectName("buttonQuicksave")
self.verticalLayout.addWidget(self.buttonQuicksave)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.listDatenbank = QtWidgets.QListView(Form)
self.listDatenbank.setObjectName("listDatenbank")
self.verticalLayout_2.addWidget(self.listDatenbank)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.buttonHinzufuegen = QtWidgets.QPushButton(Form)
self.buttonHinzufuegen.setMinimumSize(QtCore.QSize(0, 25))
self.buttonHinzufuegen.setObjectName("buttonHinzufuegen")
self.horizontalLayout.addWidget(self.buttonHinzufuegen)
self.buttonEditieren = QtWidgets.QPushButton(Form)
self.buttonEditieren.setMinimumSize(QtCore.QSize(0, 25))
self.buttonEditieren.setObjectName("buttonEditieren")
self.horizontalLayout.addWidget(self.buttonEditieren)
self.buttonDuplizieren = QtWidgets.QPushButton(Form)
self.buttonDuplizieren.setMinimumSize(QtCore.QSize(0, 25))
self.buttonDuplizieren.setObjectName("buttonDuplizieren")
self.horizontalLayout.addWidget(self.buttonDuplizieren)
self.buttonLoeschen = QtWidgets.QPushButton(Form)
self.buttonLoeschen.setMinimumSize(QtCore.QSize(0, 25))
self.buttonLoeschen.setObjectName("buttonLoeschen")
self.horizontalLayout.addWidget(self.buttonLoeschen)
self.buttonWiederherstellen = QtWidgets.QPushButton(Form)
self.buttonWiederherstellen.setMinimumSize(QtCore.QSize(0, 25))
self.buttonWiederherstellen.setVisible(False)
self.buttonWiederherstellen.setObjectName("buttonWiederherstellen")
self.horizontalLayout.addWidget(self.buttonWiederherstellen)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.gridLayout.addLayout(self.horizontalLayout_2, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
Form.setTabOrder(self.nameFilterEdit, self.checkFilterTyp)
Form.setTabOrder(self.checkFilterTyp, self.showVorteile)
Form.setTabOrder(self.showVorteile, self.showFertigkeiten)
Form.setTabOrder(self.showFertigkeiten, self.showFreieFertigkeiten)
Form.setTabOrder(self.showFreieFertigkeiten, self.showUebernatuerlicheFertigkeiten)
Form.setTabOrder(self.showUebernatuerlicheFertigkeiten, self.showTalente)
Form.setTabOrder(self.showTalente, self.showRuestungen)
Form.setTabOrder(self.showRuestungen, self.showWaffen)
Form.setTabOrder(self.showWaffen, self.showWaffeneigenschaften)
Form.setTabOrder(self.showWaffeneigenschaften, self.showManoever)
Form.setTabOrder(self.showManoever, self.showDeleted)
Form.setTabOrder(self.showDeleted, self.showUserAdded)
Form.setTabOrder(self.showUserAdded, self.buttonCloseDB)
Form.setTabOrder(self.buttonCloseDB, self.buttonLoadDB)
Form.setTabOrder(self.buttonLoadDB, self.buttonSaveDB)
Form.setTabOrder(self.buttonSaveDB, self.buttonQuicksave)
Form.setTabOrder(self.buttonQuicksave, self.listDatenbank)
Form.setTabOrder(self.listDatenbank, self.buttonHinzufuegen)
Form.setTabOrder(self.buttonHinzufuegen, self.buttonEditieren)
Form.setTabOrder(self.buttonEditieren, self.buttonDuplizieren)
Form.setTabOrder(self.buttonDuplizieren, self.buttonLoeschen)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Sephrasto - Datenbank-Editor"))
self.labelParameter.setText(_translate("Form", "Filter nach Name:"))
self.checkFilterTyp.setText(_translate("Form", "Filter nach Typ:"))
self.showVorteile.setText(_translate("Form", "Vorteile"))
self.showFertigkeiten.setText(_translate("Form", "Profane Fertigkeiten"))
self.showFreieFertigkeiten.setText(_translate("Form", "Freie Fertigkeiten"))
self.showUebernatuerlicheFertigkeiten.setText(_translate("Form", "Übernatürliche Fertigkeiten"))
self.showTalente.setText(_translate("Form", "Talente"))
self.showRuestungen.setText(_translate("Form", "Rüstungen"))
self.showWaffen.setText(_translate("Form", "Waffen"))
self.showWaffeneigenschaften.setText(_translate("Form", "Waffeneigenschaften"))
self.showManoever.setText(_translate("Form", "Manöver / Modifikationen"))
self.labelParameter1.setText(_translate("Form", "Filter nach Status:"))
self.showDeleted.setText(_translate("Form", "Gelöschte Standardelemente"))
self.showUserAdded.setText(_translate("Form", "Nur eigene Änderungen"))
self.buttonCloseDB.setText(_translate("Form", "Regelbasis schließen"))
self.buttonLoadDB.setText(_translate("Form", "Regelbasis laden"))
self.buttonSaveDB.setText(_translate("Form", "Regelbasis speichern als..."))
self.buttonQuicksave.setText(_translate("Form", "Regelbasis speichern"))
self.buttonHinzufuegen.setText(_translate("Form", "Hinzufügen"))
self.buttonEditieren.setText(_translate("Form", "Editieren"))
self.buttonDuplizieren.setText(_translate("Form", "Duplizieren"))
self.buttonLoeschen.setText(_translate("Form", "Löschen"))
self.buttonWiederherstellen.setText(_translate("Form", "Wiederherstellen"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 57.222772 | 115 | 0.739597 |
fe76e00351d56b33b2ebb4f06640167dd513b4b7
| 2,727 |
py
|
Python
|
RDS/circle1_adapters_and_ports/port_openscienceframework/tests/constant.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10 |
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle1_adapters_and_ports/port_openscienceframework/tests/constant.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78 |
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle1_adapters_and_ports/port_openscienceframework/tests/constant.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1 |
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
req = {
"userId": "admin",
"metadata": {
"@context": [
"https://w3id.org/ro/crate/1.0/context",
{
"@vocab": "https://schema.org/",
"osfcategory": "https://www.research-data-services.org/jsonld/osfcategory",
"zenodocategory": "https://www.research-data-services.org/jsonld/zenodocategory",
},
],
"@graph": [
{
"@id": "ro-crate-metadata.json",
"@type": "CreativeWork",
"about": {"@id": "./"},
"identifier": "ro-crate-metadata.json",
"conformsTo": {"@id": "https://w3id.org/ro/crate/1.0"},
"license": {"@id": "https://creativecommons.org/licenses/by-sa/3.0"},
"description": "Made with Describo: https://uts-eresearch.github.io/describo/",
},
{
"@type": "Dataset",
"datePublished": "2020-09-29T22:00:00.000Z",
"name": ["testtitle"],
"description": ["Beispieltest. Ganz viel\n\nasd mit umbruch"],
"creator": [
{"@id": "#edf6055e-9985-4dfe-9759-8f1aa640d396"},
{"@id": "#ac356e5f-fb71-400e-904e-a473c4fc890d"},
],
"zenodocategory": "publication/thesis",
"osfcategory": "analysis",
"@id": "./",
},
{
"@type": "Person",
"@reverse": {"creator": [{"@id": "./"}]},
"name": "Peter Heiss",
"familyName": "Heiss",
"givenName": "Peter",
"affiliation": [{"@id": "#4bafacfd-e123-44dc-90b9-63f974f85694"}],
"@id": "#edf6055e-9985-4dfe-9759-8f1aa640d396",
},
{
"@type": "Organization",
"name": "WWU",
"@reverse": {
"affiliation": [{"@id": "#edf6055e-9985-4dfe-9759-8f1aa640d396"}]
},
"@id": "#4bafacfd-e123-44dc-90b9-63f974f85694",
},
{
"@type": "Person",
"name": "Jens Stegmann",
"familyName": "Stegmann",
"givenName": "Jens",
"email": "",
"@reverse": {"creator": [{"@id": "./"}]},
"@id": "#ac356e5f-fb71-400e-904e-a473c4fc890d",
},
],
},
}
result = {
"data": {
"type": "nodes",
"attributes": {
"description": "Beispieltest. Ganz viel asd mit umbruch",
"category": "analysis",
"title": "testtitle",
},
}
}
| 36.36 | 97 | 0.409974 |
feb872640410fc1f83718fda29c0235d78d75632
| 349 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/17.01-Custom-Iterator.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/17.01-Custom-Iterator.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/17.01-Custom-Iterator.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
class TopTen:
def __init__(self):
self.num = 1
def __iter__(self):
return self
def __next__(self):
if self.num <= 10:
val = self.num
self.num += 1
return val
else:
raise StopIteration
values = TopTen()
print(next(values))
for i in values:
print(i)
| 15.863636 | 31 | 0.512894 |
22a659a330aa51abf44dc2d92f08f0f29ee6700f
| 513 |
py
|
Python
|
src/onegov/fsi/views/search.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/fsi/views/search.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/fsi/views/search.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.security import Personal
from onegov.fsi import FsiApp
from onegov.org.models import Search
from onegov.org.views.search import search as search_view
from onegov.org.views.search import suggestions as suggestions_view
@FsiApp.html(model=Search, template='search.pt', permission=Personal)
def search(self, request):
return search_view(self, request)
@FsiApp.json(model=Search, name='suggest', permission=Personal)
def suggestions(self, request):
return suggestions_view(self, request)
| 32.0625 | 69 | 0.80117 |
22df2e0e0527bd6381b3b4db13c33d2e8f7708b5
| 14,158 |
py
|
Python
|
workspace/cogrob/service_manager/model/docker_service.py
|
CogRob/Rorg
|
dbf9d849e150404c117f6f0062476d995cec7316
|
[
"BSD-3-Clause"
] | 8 |
2019-05-07T02:30:58.000Z
|
2021-12-10T18:44:45.000Z
|
workspace/cogrob/service_manager/model/docker_service.py
|
CogRob/Rorg
|
dbf9d849e150404c117f6f0062476d995cec7316
|
[
"BSD-3-Clause"
] | 1 |
2021-03-17T07:18:23.000Z
|
2021-03-17T07:18:23.000Z
|
workspace/cogrob/service_manager/model/docker_service.py
|
CogRob/Rorg
|
dbf9d849e150404c117f6f0062476d995cec7316
|
[
"BSD-3-Clause"
] | 2 |
2019-05-21T14:15:24.000Z
|
2022-02-09T12:50:24.000Z
|
# Copyright (c) 2019, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from absl import flags
from absl import logging
from cogrob.service_manager.model import base_service
from cogrob.service_manager.model import delayed_action
from cogrob.service_manager.model import docker_py
from cogrob.service_manager.model import fake_docker_py
from cogrob.service_manager.model import service_id
from cogrob.service_manager.model import service_request
from cogrob.service_manager.proto import service_options_pb2
from cogrob.service_manager.proto import service_state_pb2
from cogrob.service_manager.util import docker_options_pb_to_py
from cogrob.service_manager.util import errors
import datetime
import dateutil.parser
import docker
import random
import threading
import time
flags.DEFINE_string(
"docker_container_name_prefix", "rorg__",
"Name prefix for docker containers.")
flags.DEFINE_float(
"docker_stats_valid_time", 5.0,
"Time (in seconds) that one docker stats read will be valid.")
FLAGS = flags.FLAGS
ServiceId = service_id.ServiceId
ServiceOptions = service_options_pb2.ServiceOptions
ServiceStatePb = service_state_pb2.ServiceState
ServiceRequest = service_request.ServiceRequest
ServiceRequestId = service_request.ServiceRequestId
class DockerService(base_service.BaseService):
def __init__(self, manager):
super(DockerService, self).__init__(manager=manager)
self._docker_py_client = None
self._docker_py_inst = None
self._docker_stats = None
@staticmethod
def RestoreFromProto(service_state_pb, manager):
result = DockerService(manager=manager)
new_pb = ServiceStatePb()
new_pb.CopyFrom(service_state_pb)
result.SetStateProto(new_pb)
if result.IsInSimulation():
result._docker_py_client = fake_docker_py.GetGlobalFakeDockerClient()
else:
result._docker_py_client = docker_py.GetGlobalDockerClient()
result._docker_py_inst = result._docker_py_client.GetContainer(
result._GetContainerName())
return result
def _GetContainerName(self):
return (FLAGS.docker_container_name_prefix +
"__".join(self.GetServiceId().namespace)
+ "_" + self.GetServiceId().name)
@staticmethod
def _CheckServiceOption(pb_service_option):
assert pb_service_option.type == service_options_pb2.SERVICE_TYPE_DOCKER
docker_options = pb_service_option.docker_service_options
if docker_options.HasField("auto_remove") and docker_options.auto_remove:
raise errors.ServiceUnsupportedOptionsError(
"DockerService: auto_remove cannot be true.")
if docker_options.HasField("remove") and docker_options.remove:
raise errors.ServiceUnsupportedOptionsError(
"DockerService: remove cannot be true.")
@staticmethod
def CreateFromServiceOptionsPb(pb_service_option, manager):
assert pb_service_option.type == service_options_pb2.SERVICE_TYPE_DOCKER
pb_service_state = ServiceStatePb()
pb_service_state.id.CopyFrom(pb_service_option.id)
pb_service_state.type = pb_service_option.type
pb_service_state.options.CopyFrom(pb_service_option)
pb_service_state.status = ServiceStatePb.STATUS_STOPPED
result = DockerService(manager=manager)
result.SetStateProto(pb_service_state)
# TODO(shengye): Check the parameters and fill in the default parameters.
docker_py_args = docker_options_pb_to_py.DockerOptionsPbToDict(
result.GetStateProto().options.docker_service_options.container_options)
docker_py_args["name"] = result._GetContainerName()
# TODO(shengye): Some parameter need to be converted to docker-py structures
options_to_remove = ["stdout", "stderr", "remove"]
for opt_to_rm_name in options_to_remove:
if opt_to_rm_name in docker_py_args:
docker_py_args.pop(opt_to_rm_name)
if result.IsInSimulation():
result._docker_py_client = fake_docker_py.GetGlobalFakeDockerClient()
else:
result._docker_py_client = docker_py.GetGlobalDockerClient()
# FIXME(shengye): Pull image from registry.
result._docker_py_inst = result._docker_py_client.CreateContainer(
**docker_py_args)
return result
def Update(self, new_options):
# TODO(shengye): When we decide to implement this Update function,
# we should check and only update those parameters that can be updated
# online
# TODO(shengye): We can also have a flag that only recreates the underlying
# docker-py object, without touch anything else.
previous_status = self.GetStateProto().status
self.DeactivateSelf(force=True)
self._RemoveContainer()
self.GetStateProto().options.CopyFrom(new_options)
docker_py_args = docker_options_pb_to_py.DockerOptionsPbToDict(
self.GetStateProto().options.docker_service_options.container_options)
docker_py_args["name"] = self._GetContainerName()
# TODO(shengye): Some parameter need to be converted to docker-py structures
options_to_remove = ["stdout", "stderr", "remove"]
for opt_to_rm_name in options_to_remove:
if opt_to_rm_name in docker_py_args:
docker_py_args.pop(opt_to_rm_name)
if self.IsInSimulation():
self._docker_py_client = fake_docker_py.GetGlobalFakeDockerClient()
else:
self._docker_py_client = docker_py.GetGlobalDockerClient()
# FIXME(shengye): Pull image from registry.
self._docker_py_inst = self._docker_py_client.CreateContainer(
**docker_py_args)
if previous_status == ServiceStatePb.STATUS_ACTIVE:
self.ActivateSelf()
def Remove(self):
# Here we cancel all the requests this service sent and remove self from the
# manager.
self.DeactivateSelf(force=True)
self._RemoveContainer()
self._manager.RemoveService(self.GetServiceId())
def ActivateSelf(self):
# If current state is stopped, set it to active and generate a request, and
# send to all related services.
if self.IsActive():
logging.info("No need to activate service: %s, already active",
str(self.GetServiceId()))
return []
logging.info("Activating service: %s", str(self.GetServiceId()))
self._docker_py_inst.Start()
self.GetStateProto().status = ServiceStatePb.STATUS_ACTIVE
self.GetStateProto().docker_service_state.status = (
service_state_pb2.DockerServiceState.DOCKER_STATUS_ACTIVE)
all_delayed_actions = []
# Reactivate all implied requests.
all_delayed_actions += (
self.ActRequestService(self.GetImpliedServiceRequest()))
if self.GetStateProto().options.ready_detection_method.HasField(
"wait_fixed_time"):
all_delayed_actions.append(
delayed_action.WaitUntilTimestamp(time.time() +
self.GetStateProto().options.ready_detection_method.wait_fixed_time))
elif self.GetStateProto().options.ready_detection_method.HasField(
"wait_for_prober"):
raise errors.ServiceUnsupportedOptionsError(
"{} has an unsupported wait_for_prober ReadyDetectionMethod".format(
self.GetServiceId()))
return all_delayed_actions
def DeactivateSelf(self, force=False):
if not self.IsActive():
logging.info("No need to deactivate service: %s, not active",
str(self.GetServiceId()))
return []
self.GetStateProto().status = ServiceStatePb.STATUS_TO_BE_STOPPED
if not force and self.GetStateProto().options.disable_deactivate:
raise errors.InternalError(
"Cannot deactivate {}, disable_deactivate is true.".format(
self.GetServiceId()))
if not force and self.GetStateProto().requested_by_others:
raise errors.InternalError(
"Cannot deactivate {}, requested by services: {}.".format(
self.GetServiceId(),
", ".join(map(str, self.GetStateProto().requested_by_others))))
logging.info("Deactivating service (releasing requests): %s",
str(self.GetServiceId()))
requests_by_self = [ServiceRequest.FromProto(x) for x
in self.GetStateProto().requests_by_self]
all_delayed_actions = []
for request_by_self in requests_by_self:
# TODO(shengye): If it is pause, we can save these to somewhere else.
all_delayed_actions += self.ActReleaseService(request_by_self.request_id)
logging.info("Deactivating service (stopping docker): %s",
str(self.GetServiceId()))
self._docker_py_inst.Stop()
self.GetStateProto().docker_service_state.status = (
service_state_pb2.DockerServiceState.DOCKER_STATUS_STOPPED)
logging.info("Deactivated service: %s", str(self.GetServiceId()))
self.GetStateProto().status = ServiceStatePb.STATUS_STOPPED
# all_delayed_actions will always be [], which is OK for now.
return all_delayed_actions
def _RemoveContainer(self, force=True):
self._docker_py_inst.Remove(force=force)
def HandleRequestService(self, request):
return self.HandleRequestServiceBasic(request)
def HandleReleaseService(self, service_request_id):
return self.HandleReleaseServiceBasic(service_request_id)
def ForceRestart(self):
self._docker_py_inst.Restart()
def ActRequestService(self, service_request):
return self.ActRequestServiceBasic(service_request)
def ActReleaseService(self, service_request_id):
return self.ActReleaseServiceBasic(service_request_id)
def RefreshDockerStats(self):
if self.IsInSimulation():
return None
self._docker_stats = self._docker_py_inst.Stats(stream=False, decode=True)
return self._docker_stats
def GetDockerStats(self):
stats_dict = self._docker_stats
need_requery = False
if stats_dict is None:
need_requery = True
else:
read_time = dateutil.parser.parse(stats_dict["read"])
read_timestamp = (read_time - datetime.datetime(
1970, 1, 1, tzinfo=read_time.tzinfo)).total_seconds()
if time.time() - read_timestamp > FLAGS.docker_stats_valid_time:
need_requery = True
if need_requery:
self.RefreshDockerStats()
stats_dict = self._docker_stats
return stats_dict
def GetCpuUsage(self):
if not self.IsInSimulation():
# Query CPU usage from Docker, CPU usage is counted as number of logical
# cores. (Number can be greater than 1.)
# TODO(shengye): This read takes a lot of time, it should be asychronized
try:
stats_dict = self.GetDockerStats()
if "percpu_usage" in stats_dict["cpu_stats"]["cpu_usage"]:
cpu_count = len(stats_dict["cpu_stats"]["cpu_usage"]["percpu_usage"])
cpu_usage = (float(
stats_dict["cpu_stats"]["cpu_usage"]["total_usage"] -
stats_dict["precpu_stats"]["cpu_usage"]["total_usage"]) /
(stats_dict["cpu_stats"]["system_cpu_usage"] -
stats_dict["precpu_stats"]["system_cpu_usage"])) * cpu_count
return cpu_usage
else:
return 0
except docker.errors.DockerException as e:
raise errors.InternalError("Docker error: %s", str(e))
logging.error("Docker error: %s", str(e))
else:
if not self.IsActive():
return 0
cpu_usage_pb = (
self.GetStateProto().options.simulation_parameters.cpu_usage)
if cpu_usage_pb.HasField("guassian"):
return random.gauss(cpu_usage_pb.guassian.mean,
math.sqrt(cpu_usage_pb.guassian.variance))
elif cpu_usage_pb.HasField("fixed_value"):
return cpu_usage_pb.fixed_value
else:
return None
def GetMemoryUsage(self):
if not self.IsInSimulation():
# Query memory usage from Docker, memory usage is counted in bytes.
# TODO(shengye): This read takes a lot of time, it should be asychronized.
try:
stats_dict = self.GetDockerStats()
if ("memory_stats" in stats_dict
and "usage" in stats_dict["memory_stats"]):
return stats_dict["memory_stats"]["usage"]
return 0
except docker.errors.DockerException as e:
raise errors.InternalError("Docker error: %s", str(e))
logging.error("Docker error: %s", str(e))
else:
if not self.IsActive():
return 0
memory_usage_pb = (
self.GetStateProto().options.simulation_parameters.memory_usage)
if memory_usage_pb.HasField("guassian"):
return random.gauss(memory_usage_pb.guassian.mean,
math.sqrt(memory_usage_pb.guassian.variance))
elif memory_usage_pb.HasField("fixed_value"):
return memory_usage_pb.fixed_value
else:
return None
| 37.957105 | 80 | 0.727928 |
43000134c8e6e3af4141bab85388c271bc0ec983
| 972 |
py
|
Python
|
Server/admin/user.py
|
TateYdq/DietRegimen
|
66c88e84b7c4d9226db1d9567d300b1c6f7344b7
|
[
"MIT"
] | null | null | null |
Server/admin/user.py
|
TateYdq/DietRegimen
|
66c88e84b7c4d9226db1d9567d300b1c6f7344b7
|
[
"MIT"
] | null | null | null |
Server/admin/user.py
|
TateYdq/DietRegimen
|
66c88e84b7c4d9226db1d9567d300b1c6f7344b7
|
[
"MIT"
] | null | null | null |
import requests
import json
from utils.config import *
ss = requests.session()
def addUser():
data = json.dumps({
"token":"1wqe213",
"name":"123",
"age":60,
"gender":"female",
"user_image_path":"test",
"diseases_focus":"123"
})
url = URL.format(env=cur_url, term=admin_url, action="/addUser")
postReq(url,ss,data,admin_token)
def updateUser():
data = json.dumps({
"token":"1wqe213",
"user_id":8,
"name":"uuuu",
"age":63,
"gender":"male",
"user_image_path":"haha",
"diseases_focus":"高血脂"
})
url = URL.format(env=cur_url, term=admin_url, action="/updateUser")
postReq(url,ss,data,admin_token)
def createAllVoice():
url = URL.format(env=cur_url, term=admin_url, action="/createAllVoice")
getReq(url, ss, "", admin_token)
def main():
# addUser()
# updateUser()
createAllVoice()
if __name__ == '__main__':
main()
| 23.707317 | 75 | 0.585391 |
4318e5247f4b8d07d4dd70cfd77815483cc783dc
| 524 |
py
|
Python
|
misc/sympy_play/sympy_test.py
|
YoshimitsuMatsutaIe/hoge_flow_test
|
22e2e2ce043a3107bd06449f6f9958641293e414
|
[
"MIT"
] | null | null | null |
misc/sympy_play/sympy_test.py
|
YoshimitsuMatsutaIe/hoge_flow_test
|
22e2e2ce043a3107bd06449f6f9958641293e414
|
[
"MIT"
] | null | null | null |
misc/sympy_play/sympy_test.py
|
YoshimitsuMatsutaIe/hoge_flow_test
|
22e2e2ce043a3107bd06449f6f9958641293e414
|
[
"MIT"
] | null | null | null |
import sympy as sy
from sympy.printing.pycode import pycode
t = sy.Symbol("t")
q1 = sy.Function("q1")
q2 = sy.Function("q2")
q3 = sy.Function("q3")
q4 = sy.Function("q4")
q5 = sy.Function("q5")
q6 = sy.Function("q6")
qvec = sy.Matrix([[q1(t), q2(t), q3(t), q4(t), q5(t), q6(t)]]).T
l0, l1, l2, l3, l4, l5, l6, h45 = sy.symbols("l0, l1, l2, l3, l4, l5, l6, h45")
posi_1 = sy.Matrix([[0, 0, 0]]).T
posi_2 = posi_1 + sy.Matrix([[0, 0, l0]]).T
posi_3 = posi_2 + sy.Matrix([[0, 0, l1]]).T
code = pycode(posi_3)
print(code)
| 22.782609 | 79 | 0.593511 |
4357c36b2b65963566cfb1c5dcedc1eb5e1a7ceb
| 5,640 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/aireos/test_aireos_config.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/aireos/test_aireos_config.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/aireos/test_aireos_config.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.aireos import aireos_config
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..aireos_module import TestCiscoWlcModule, load_fixture
class TestCiscoWlcConfigModule(TestCiscoWlcModule):
module = aireos_config
def setUp(self):
super(TestCiscoWlcConfigModule, self).setUp()
self.mock_get_config = patch('ansible_collections.community.general.plugins.modules.network.aireos.aireos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible_collections.community.general.plugins.modules.network.aireos.aireos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.network.aireos.aireos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_save_config = patch('ansible_collections.community.general.plugins.modules.network.aireos.aireos_config.save_config')
self.save_config = self.mock_save_config.start()
def tearDown(self):
super(TestCiscoWlcConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'aireos_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_aireos_config_unchanged(self):
src = load_fixture('aireos_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_aireos_config_src(self):
src = load_fixture('aireos_config_src.cfg')
set_module_args(dict(src=src))
commands = ['sysname foo', 'interface address dynamic-interface mtc-1 10.33.20.4 255.255.255.0 10.33.20.2']
self.execute_module(changed=True, commands=commands)
def test_aireos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_aireos_config_save(self):
set_module_args(dict(save=True))
self.execute_module()
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
def test_aireos_config_before(self):
set_module_args(dict(lines=['sysname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'sysname foo']
self.execute_module(changed=True, commands=commands, sort=False)
def test_aireos_config_after(self):
set_module_args(dict(lines=['sysname foo'], after=['test1', 'test2']))
commands = ['sysname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False)
def test_aireos_config_before_after_no_change(self):
set_module_args(dict(lines=['sysname router'],
before=['test1', 'test2'],
after=['test3', 'test4']))
self.execute_module()
def test_aireos_config_config(self):
config = 'sysname localhost'
set_module_args(dict(lines=['sysname router'], config=config))
commands = ['sysname router']
self.execute_module(changed=True, commands=commands)
def test_aireos_config_match_none(self):
lines = ['sysname router', 'interface create mtc-1 1']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, sort=False)
def test_nxos_config_save_always(self):
args = dict(save_when='always')
set_module_args(args)
self.execute_module()
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
def test_nxos_config_save_changed_true(self):
args = dict(save_when='changed', lines=['sysname foo', 'interface create mtc-3 3'])
set_module_args(args)
self.execute_module(changed=True)
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
def test_nxos_config_save_changed_false(self):
args = dict(save_when='changed')
set_module_args(args)
self.execute_module()
self.assertEqual(self.save_config.call_count, 0)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
| 42.727273 | 137 | 0.715071 |
605e5ba542465d236c99c1654f2d7138207ee55d
| 3,751 |
py
|
Python
|
research/cv/stgcn/src/utility.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/stgcn/src/utility.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/stgcn/src/utility.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Calculate laplacian matrix, used to network weight.
Evaluate the performance of net work.
"""
import numpy as np
import mindspore.ops as ops
from scipy.linalg import fractional_matrix_power
from scipy.sparse.linalg import eigs
def calculate_laplacian_matrix(adj_mat, mat_type):
"""
calculate laplacian matrix used for graph convolution layer.
"""
n_vertex = adj_mat.shape[0]
# row sum
deg_mat_row = np.asmatrix(np.diag(np.sum(adj_mat, axis=1)))
# column sum
#deg_mat_col = np.asmatrix(np.diag(np.sum(adj_mat, axis=0)))
deg_mat = deg_mat_row
adj_mat = np.asmatrix(adj_mat)
id_mat = np.asmatrix(np.identity(n_vertex))
# Combinatorial
com_lap_mat = deg_mat - adj_mat
# For SpectraConv
# To [0, 1]
sym_normd_lap_mat = np.matmul(np.matmul(fractional_matrix_power(deg_mat, -0.5), \
com_lap_mat), fractional_matrix_power(deg_mat, -0.5))
# For ChebConv
# From [0, 1] to [-1, 1]
lambda_max_sym = eigs(sym_normd_lap_mat, k=1, which='LR')[0][0].real
wid_sym_normd_lap_mat = 2 * sym_normd_lap_mat / lambda_max_sym - id_mat
# For GCNConv
wid_deg_mat = deg_mat + id_mat
wid_adj_mat = adj_mat + id_mat
hat_sym_normd_lap_mat = np.matmul(np.matmul(fractional_matrix_power(wid_deg_mat, -0.5), \
wid_adj_mat), fractional_matrix_power(wid_deg_mat, -0.5))
# Random Walk
rw_lap_mat = np.matmul(np.linalg.matrix_power(deg_mat, -1), adj_mat)
# For SpectraConv
# To [0, 1]
rw_normd_lap_mat = id_mat - rw_lap_mat
# For ChebConv
# From [0, 1] to [-1, 1]
lambda_max_rw = eigs(rw_lap_mat, k=1, which='LR')[0][0].real
wid_rw_normd_lap_mat = 2 * rw_normd_lap_mat / lambda_max_rw - id_mat
# For GCNConv
wid_deg_mat = deg_mat + id_mat
wid_adj_mat = adj_mat + id_mat
hat_rw_normd_lap_mat = np.matmul(np.linalg.matrix_power(wid_deg_mat, -1), wid_adj_mat)
if mat_type == 'wid_sym_normd_lap_mat':
return wid_sym_normd_lap_mat
if mat_type == 'hat_sym_normd_lap_mat':
return hat_sym_normd_lap_mat
if mat_type == 'wid_rw_normd_lap_mat':
return wid_rw_normd_lap_mat
if mat_type == 'hat_rw_normd_lap_mat':
return hat_rw_normd_lap_mat
raise ValueError(f'ERROR: "{mat_type}" is unknown.')
def evaluate_metric(model, dataset, scaler):
"""
evaluate the performance of network.
"""
mae, sum_y, mape, mse = [], [], [], []
for data in dataset.create_dict_iterator():
x = data['inputs']
y = data['labels']
y_pred = model(x)
y_pred = ops.Reshape()(y_pred, (len(y_pred), -1))
y_pred = scaler.inverse_transform(y_pred.asnumpy()).reshape(-1)
y = scaler.inverse_transform(y.asnumpy()).reshape(-1)
d = np.abs(y - y_pred)
mae += d.tolist()
sum_y += y.tolist()
mape += (d / y).tolist()
mse += (d ** 2).tolist()
MAE = np.array(mae).mean()
MAPE = np.array(mape).mean()
RMSE = np.sqrt(np.array(mse).mean())
#WMAPE = np.sum(np.array(mae)) / np.sum(np.array(sum_y))
return MAE, RMSE, MAPE
| 34.1 | 93 | 0.661157 |
717fc92d06d8b606ab09ed90b150aa3633d8cd8a
| 971 |
py
|
Python
|
face_sdk/api_usage/face_crop.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 1,329 |
2021-01-13T07:06:30.000Z
|
2022-03-31T07:23:39.000Z
|
face_sdk/api_usage/face_crop.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 115 |
2021-01-13T10:42:57.000Z
|
2022-03-28T03:57:52.000Z
|
face_sdk/api_usage/face_crop.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 351 |
2021-01-13T07:21:00.000Z
|
2022-03-29T14:11:39.000Z
|
"""
@author: JiXuan Xu, Jun Wang
@date: 20201015
@contact: [email protected]
"""
import sys
sys.path.append('.')
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
import logging.config
logging.config.fileConfig("config/logging.conf")
logger = logging.getLogger('api')
import cv2
from core.image_cropper.arcface_cropper.FaceRecImageCropper import FaceRecImageCropper
if __name__ == '__main__':
image_path = 'api_usage/test_images/test1.jpg'
image_info_file = 'api_usage/test_images/test1_landmark_res0.txt'
line = open(image_info_file).readline().strip()
landmarks_str = line.split(' ')
landmarks = [float(num) for num in landmarks_str]
face_cropper = FaceRecImageCropper()
image = cv2.imread(image_path)
cropped_image = face_cropper.crop_image_by_mat(image, landmarks)
cv2.imwrite('api_usage/temp/test1_cropped.jpg', cropped_image)
logger.info('Crop image successful!')
| 32.366667 | 86 | 0.760041 |
e0804ce8566aa4a1f9127885216a5c7be2e727e9
| 180 |
py
|
Python
|
exercises/en/solution_01_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/en/solution_01_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/en/solution_01_02_01.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
# Import spaCy
import spacy
# Create the English nlp object
nlp = spacy.blank("en")
# Process a text
doc = nlp("This is a sentence.")
# Print the document text
print(doc.text)
| 13.846154 | 32 | 0.705556 |
e0a1c10ae6390769c98af7f96fec6a9849765502
| 396 |
py
|
Python
|
exercises/ja/test_03_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/ja/test_03_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/ja/test_03_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert nlp.meta["name"] == "core_news_sm", "正しいパイプラインをロードしましたか?"
assert nlp.meta["lang"] == "ja", "正しいパイプラインをロードしましたか?"
assert "print(nlp.pipe_names)" in __solution__, "パイプラインの名前をプリントしましたか?"
assert "print(nlp.pipeline)" in __solution__, "パイプラインをプリントしましたか?"
__msg__.good(
"Well done!今あるパイプラインについて調べたくなったときは、nlp.pipe_namesやnlp.pipelineを使ってプリントしましょう。"
)
| 39.6 | 85 | 0.707071 |
e809a6fba093e1a6e7525660b1fc58ee55f01ea9
| 7,631 |
py
|
Python
|
nayuki_qrcodegen/qrcodegen-demo.py
|
TG-Techie/HackUMass0111
|
603344064605979b85a2e142caf7a2a7439d60f5
|
[
"MIT"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/qrcodegen/python/qrcodegen-demo.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2019-10-19T09:24:56.000Z
|
2019-10-20T05:37:06.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/qrcodegen/python/qrcodegen-demo.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2019-10-18T14:18:28.000Z
|
2019-10-18T14:18:28.000Z
|
#
# QR Code generator demo (Python 2, 3)
#
# Run this command-line program with no arguments. The program computes a bunch of demonstration
# QR Codes and prints them to the console. Also, the SVG code for one QR Code is printed as a sample.
#
# Copyright (c) Project Nayuki. (MIT License)
# https://www.nayuki.io/page/qr-code-generator-library
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# - The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# - The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the Software or the use or other dealings in the
# Software.
#
from __future__ import print_function
from qrcodegen import QrCode, QrSegment
def main():
"""The main application program."""
do_basic_demo()
do_variety_demo()
do_segment_demo()
do_mask_demo()
# ---- Demo suite ----
def do_basic_demo():
"""Creates a single QR Code, then prints it to the console."""
text = u"Hello, world!" # User-supplied Unicode text
errcorlvl = QrCode.Ecc.LOW # Error correction level
# Make and print the QR Code symbol
qr = QrCode.encode_text(text, errcorlvl)
print_qr(qr)
print(qr.to_svg_str(4))
def do_variety_demo():
"""Creates a variety of QR Codes that exercise different features of the library, and prints each one to the console."""
# Numeric mode encoding (3.33 bits per digit)
qr = QrCode.encode_text("314159265358979323846264338327950288419716939937510", QrCode.Ecc.MEDIUM)
print_qr(qr)
# Alphanumeric mode encoding (5.5 bits per character)
qr = QrCode.encode_text("DOLLAR-AMOUNT:$39.87 PERCENTAGE:100.00% OPERATIONS:+-*/", QrCode.Ecc.HIGH)
print_qr(qr)
# Unicode text as UTF-8
qr = QrCode.encode_text(u"\u3053\u3093\u306B\u3061\u0077\u0061\u3001\u4E16\u754C\uFF01\u0020\u03B1\u03B2\u03B3\u03B4", QrCode.Ecc.QUARTILE)
print_qr(qr)
# Moderately large QR Code using longer text (from Lewis Carroll's Alice in Wonderland)
qr = QrCode.encode_text(
"Alice was beginning to get very tired of sitting by her sister on the bank, "
"and of having nothing to do: once or twice she had peeped into the book her sister was reading, "
"but it had no pictures or conversations in it, 'and what is the use of a book,' thought Alice "
"'without pictures or conversations?' So she was considering in her own mind (as well as she could, "
"for the hot day made her feel very sleepy and stupid), whether the pleasure of making a "
"daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly "
"a White Rabbit with pink eyes ran close by her.", QrCode.Ecc.HIGH)
print_qr(qr)
def do_segment_demo():
"""Creates QR Codes with manually specified segments for better compactness."""
# Illustration "silver"
silver0 = "THE SQUARE ROOT OF 2 IS 1."
silver1 = "41421356237309504880168872420969807856967187537694807317667973799"
qr = QrCode.encode_text(silver0 + silver1, QrCode.Ecc.LOW)
print_qr(qr)
segs = [
QrSegment.make_alphanumeric(silver0),
QrSegment.make_numeric(silver1)]
qr = QrCode.encode_segments(segs, QrCode.Ecc.LOW)
print_qr(qr)
# Illustration "golden"
golden0 = u"Golden ratio \u03C6 = 1."
golden1 = u"6180339887498948482045868343656381177203091798057628621354486227052604628189024497072072041893911374"
golden2 = u"......"
qr = QrCode.encode_text(golden0 + golden1 + golden2, QrCode.Ecc.LOW)
print_qr(qr)
segs = [
QrSegment.make_bytes(golden0.encode("UTF-8")),
QrSegment.make_numeric(golden1),
QrSegment.make_alphanumeric(golden2)]
qr = QrCode.encode_segments(segs, QrCode.Ecc.LOW)
print_qr(qr)
# Illustration "Madoka": kanji, kana, Cyrillic, full-width Latin, Greek characters
madoka = u"\u300C\u9B54\u6CD5\u5C11\u5973\u307E\u3069\u304B\u2606\u30DE\u30AE\u30AB\u300D\u3063\u3066\u3001\u3000\u0418\u0410\u0418\u3000\uFF44\uFF45\uFF53\uFF55\u3000\u03BA\u03B1\uFF1F"
qr = QrCode.encode_text(madoka, QrCode.Ecc.LOW)
print_qr(qr)
kanjicharbits = [ # Kanji mode encoding (13 bits per character)
0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1,
0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1,
0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0,
0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1,
0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1,
0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1,
0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1,
0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1,
0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
]
segs = [QrSegment(QrSegment.Mode.KANJI, len(kanjicharbits) // 13, kanjicharbits)]
qr = QrCode.encode_segments(segs, QrCode.Ecc.LOW)
print_qr(qr)
def do_mask_demo():
"""Creates QR Codes with the same size and contents but different mask patterns."""
# Project Nayuki URL
segs = QrSegment.make_segments("https://www.nayuki.io/")
print_qr(QrCode.encode_segments(segs, QrCode.Ecc.HIGH, mask=-1)) # Automatic mask
print_qr(QrCode.encode_segments(segs, QrCode.Ecc.HIGH, mask=3)) # Force mask 3
# Chinese text as UTF-8
segs = QrSegment.make_segments(
u"\u7DAD\u57FA\u767E\u79D1\uFF08\u0057\u0069\u006B\u0069\u0070\u0065\u0064\u0069\u0061\uFF0C"
"\u8046\u807D\u0069\u002F\u02CC\u0077\u026A\u006B\u1D7B\u02C8\u0070\u0069\u02D0\u0064\u0069"
"\u002E\u0259\u002F\uFF09\u662F\u4E00\u500B\u81EA\u7531\u5167\u5BB9\u3001\u516C\u958B\u7DE8"
"\u8F2F\u4E14\u591A\u8A9E\u8A00\u7684\u7DB2\u8DEF\u767E\u79D1\u5168\u66F8\u5354\u4F5C\u8A08"
"\u756B")
print_qr(QrCode.encode_segments(segs, QrCode.Ecc.MEDIUM, mask=0)) # Force mask 0
print_qr(QrCode.encode_segments(segs, QrCode.Ecc.MEDIUM, mask=1)) # Force mask 1
print_qr(QrCode.encode_segments(segs, QrCode.Ecc.MEDIUM, mask=5)) # Force mask 5
print_qr(QrCode.encode_segments(segs, QrCode.Ecc.MEDIUM, mask=7)) # Force mask 7
# ---- Utilities ----
def print_qr(qrcode):
"""Prints the given QrCode object to the console."""
border = 4
for y in range(-border, qrcode.get_size() + border):
for x in range(-border, qrcode.get_size() + border):
print(u"\u2588 "[1 if qrcode.get_module(x,y) else 0] * 2, end="")
print()
print()
# Run the main program
if __name__ == "__main__":
main()
| 40.807487 | 187 | 0.679727 |
1c3ba2386427f1f11255eabaa7bd9f3ff079e2ff
| 604 |
py
|
Python
|
exercises/fr/solution_02_06.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/solution_02_06.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/solution_02_06.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.blank("fr")
# Importe les classes Doc et Span
from spacy.tokens import Doc, Span
words = ["Elle", "aime", "David", "Bowie"]
spaces = [True, True, True, False]
# Crée un doc à partir des mots et des espaces
doc = Doc(nlp.vocab, words=words, spaces=spaces)
print(doc.text)
# Crée un span pour "David Bowie" à partir du doc
# et assigne-lui le label "PER"
span = Span(doc, 2, 4, label="PER")
print(span.text, span.label_)
# Ajoute le span aux entités du doc
doc.ents = [span]
# Affiche les textes et les labels des entités
print([(ent.text, ent.label_) for ent in doc.ents])
| 24.16 | 51 | 0.698675 |
1c4b56892f8c2ec0da93fecc9edd8661dc47cbad
| 1,050 |
py
|
Python
|
examples/all_pattern.py
|
xinetzone/dash-tests
|
cd4526caa2f9d906915c31370b3487bdcef92aa4
|
[
"Apache-2.0"
] | 1 |
2022-03-01T07:38:32.000Z
|
2022-03-01T07:38:32.000Z
|
examples/all_pattern.py
|
xinetzone/dash-tests
|
cd4526caa2f9d906915c31370b3487bdcef92aa4
|
[
"Apache-2.0"
] | 12 |
2021-07-13T12:33:36.000Z
|
2021-07-14T05:25:19.000Z
|
examples/all_pattern.py
|
xinetzone/dash-book
|
1f624e87e2aa02c9931318918df969e44bdd2c07
|
[
"Apache-2.0"
] | null | null | null |
from dash import dcc, html
from dash.dependencies import Input, Output, State, ALL
from app import app
layout = html.Div([
html.Button("Add Filter", id="add-filter", n_clicks=0),
html.Div(id='dropdown-container', children=[]),
html.Div(id='dropdown-container-output')
])
@app.callback(
Output('dropdown-container', 'children'),
Input('add-filter', 'n_clicks'),
State('dropdown-container', 'children'))
def display_dropdowns(n_clicks, children):
new_dropdown = dcc.Dropdown(
id={
'type': 'filter-dropdown',
'index': n_clicks
},
options=[{'label': i, 'value': i}
for i in ['NYC', 'MTL', 'LA', 'TOKYO']]
)
children.append(new_dropdown)
return children
@app.callback(
Output('dropdown-container-output', 'children'),
Input({'type': 'filter-dropdown', 'index': ALL}, 'value')
)
def display_output(values):
return html.Div([
html.Div('Dropdown {} = {}'.format(i + 1, value))
for (i, value) in enumerate(values)
])
| 27.631579 | 61 | 0.607619 |
296caaca728dcc7dc00f0e5afcaeaeb1b1d89dfb
| 5,535 |
py
|
Python
|
rengine/exercises.py
|
noahsolomon0518/rengine
|
a544951c6b7dd707586cd3c4f84ddec554f1c96a
|
[
"MIT"
] | null | null | null |
rengine/exercises.py
|
noahsolomon0518/rengine
|
a544951c6b7dd707586cd3c4f84ddec554f1c96a
|
[
"MIT"
] | null | null | null |
rengine/exercises.py
|
noahsolomon0518/rengine
|
a544951c6b7dd707586cd3c4f84ddec554f1c96a
|
[
"MIT"
] | null | null | null |
"""Exercise object that can effectively be utilized in workout class"""
import random
from statistics import mean
from copy import deepcopy
from typing import List, Tuple
import numpy as np
from rengine.config import EXERCISE_CATEGORY_DATA, EquipmentAvailable, MuscleGroup
from rengine.config import ExerciseLoad, ExerciseType, EXERCISE_DF
from rengine.config import ExperienceLevel
def pick_random_exercise(
muscle_groups_targeted: List[str],
exercise_type: ExerciseType,
allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT],
experience_levels = [ExperienceLevel.BEGINNER, ExperienceLevel.INTERMEDIATE, ExperienceLevel.EXPERIENCED],
equipment_available = EquipmentAvailable.ALL,
excluded_exercise_names: List[str] = []
):
"""Picks random exercise based on many parameters"""
global EXERCISE_DF
df = EXERCISE_DF.copy()
if(equipment_available != EquipmentAvailable.ALL):
df = df[df["Equipment"].isin(equipment_available)]
df = df[
(~df["EXERCISE"].isin(excluded_exercise_names)) &
(df["Muscle Group"].isin(muscle_groups_targeted)) &
(df[exercise_type] == 1) &
(df.loc[:,experience_levels].sum(axis = 1) > 0)
]
df.index = range(len(df.iloc[:,0]))
if(len(df) == 0):
return None
exercise_ind = random.randint(0, len(df.iloc[:,0]) - 1)
exercise_chose = df.iloc[exercise_ind, :]
return ExerciseFromTypePreset(exercise_chose["EXERCISE"], exercise_type, allowed_loads)
def listify_if_non_iterable(obj):
obj = deepcopy(obj)
if(type(obj) in [tuple, list]):
return obj
return [obj]
def get_variables_based_on_exercise_type_and_load(exercise_type: ExerciseType, exercise_load: ExerciseLoad):
variables = EXERCISE_CATEGORY_DATA[exercise_type][exercise_load]
return {
"sets": variables["sets"],
"rep_range": variables["rep_range"],
"rest_time_range": variables["rest_time_range"]
}
def get_muscle_group(exercise_name):
"""Finds muscle group based on exercise name. If does not exist returns 'UNKNOWN'"""
return EXERCISE_DF[EXERCISE_DF["EXERCISE"]==exercise_name]["Muscle Group"].values[0]
class Exercise:
"""Basic implementation of an exercise"""
def __init__(self, exercise_name: str, sets, rep_range: Tuple[int], rest_time_range: Tuple[float], muscle_group: MuscleGroup = None):
self.exercise_name = exercise_name
self.sets = sets
self.rep_range = rep_range
self.rest_time_range = rest_time_range
self.muscle_group = muscle_group
@property
def length(self):
"""Length in minutes. Currently with assumption that each set takes 1 minute"""
rest_time = listify_if_non_iterable(self.rest_time_range)
return self.sets * (1 + mean(rest_time))
def __str__(self) -> str:
return f"{{exercise_name: {self.exercise_name}, muscle_group: {self.muscle_group}, sets: {str(self.sets)}, rep_range: {str(self.rep_range)}, rest_time_range: {str(self.rest_time_range)}}}"
class ExerciseFromTypePreset(Exercise):
"""Similar to Exercise class but sets, rep_range and rest_time determined by ExerciseType"""
def __init__(self, exercise_name: str, exercise_type: ExerciseType, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
self.exercise_type = exercise_type
self.exercise_load = exercise_load or self.pick_random_load(allowed_loads)
super().__init__(exercise_name = exercise_name, muscle_group = get_muscle_group(exercise_name),**get_variables_based_on_exercise_type_and_load(self.exercise_type, self.exercise_load))
def pick_random_load(self, allowed_loads):
"""Picks randomly the load based on ExerciseType and valid ExerciseLoad"""
initial_probabilities = [EXERCISE_CATEGORY_DATA[self.exercise_type][load]["chance"] for load in allowed_loads]
normalized_probabilities = [prob/sum(initial_probabilities) for prob in initial_probabilities]
return np.random.choice(allowed_loads, p = normalized_probabilities)
def __str__(self):
return Exercise.__str__(self).rstrip("}") + f", exercise_type: {self.exercise_type}, exercise_load: {self.exercise_load}}}"
class StrengthExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.STRENGTH, allowed_loads=allowed_loads, exercise_load=exercise_load)
class EnduranceExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.ENDURANCE, allowed_loads=allowed_loads, exercise_load=exercise_load)
class HypertExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.HYPERTROPHY, allowed_loads=allowed_loads, exercise_load=exercise_load)
| 46.125 | 207 | 0.737308 |
d3c20ba30c799ad0c4ece97ff2f9f54b53c716dc
| 578 |
py
|
Python
|
pacman-termux/test/pacman/tests/fileconflict006.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/fileconflict006.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/fileconflict006.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "dir->symlink change during package upgrade (conflict)"
p1 = pmpkg("pkg1", "1.0-1")
p1.files = ["test/",
"test/file1",
"test/dir/file1",
"test/dir/file2"]
self.addpkg2db("local", p1)
p2 = pmpkg("pkg2")
p2.files = ["test/dir/file3"]
self.addpkg2db("local", p2)
p3 = pmpkg("pkg1", "2.0-1")
p3.files = ["test2/",
"test2/file3",
"test -> test2"]
self.addpkg2db("sync", p3)
self.args = "-S pkg1"
self.addrule("PACMAN_RETCODE=1")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_VERSION=pkg1|1.0-1")
| 23.12 | 74 | 0.593426 |
733484cdfca22c03540f2693d3d1f87f9af290b4
| 4,479 |
py
|
Python
|
shinrl/solvers/discrete_vi/_target_mixin.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | 34 |
2021-12-09T07:12:57.000Z
|
2022-03-11T08:17:20.000Z
|
shinrl/solvers/discrete_vi/_target_mixin.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | null | null | null |
shinrl/solvers/discrete_vi/_target_mixin.py
|
omron-sinicx/ShinRL
|
09f4ae274a33d1fc1d9d542f816aef40014af6b5
|
[
"MIT"
] | 4 |
2021-12-11T07:48:01.000Z
|
2022-03-01T23:50:33.000Z
|
"""MixIns to compute the target value of VI-based algorithms.
Author: Toshinori Kitamura
Affiliation: NAIST & OSX
"""
from chex import Array
import shinrl as srl
class TargetMixIn:
def target_tabular_dp(self, data: srl.DataDict) -> Array:
raise NotImplementedError
def target_tabular_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
raise NotImplementedError
def target_deep_dp(self, data: srl.DataDict) -> Array:
raise NotImplementedError
def target_deep_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
raise NotImplementedError
class QTargetMixIn(TargetMixIn):
"""MixIn to compute the vanilla Q target."""
def target_tabular_dp(self, data: srl.DataDict) -> Array:
return srl.optimal_backup_dp(
data["Q"],
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
)
def target_tabular_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
return srl.optimal_backup_rl(
data["Q"][samples.next_state.squeeze(axis=1)], # BxA
samples.rew,
samples.done,
self.config.discount,
)
def target_deep_dp(self, data: srl.DataDict) -> Array:
return srl.optimal_backup_dp(
self.q_net.apply(data["QNetTargParams"], self.env.mdp.obs_mat),
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
)
def target_deep_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
return srl.optimal_backup_rl(
self.q_net.apply(data["QNetTargParams"], samples.next_obs),
samples.rew,
samples.done,
self.config.discount,
)
class DoubleQTargetMixIn(TargetMixIn):
"""MixIn to compute the Double Q target.
Paper: https://arxiv.org/abs/1509.06461
"""
def target_deep_dp(self, data: srl.DataDict) -> Array:
return srl.double_backup_dp(
self.q_net.apply(data["QNetTargParams"], self.env.mdp.obs_mat),
self.q_net.apply(data["QNetParams"], self.env.mdp.obs_mat),
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
)
def target_deep_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
return srl.double_backup_rl(
self.q_net.apply(data["QNetTargParams"], samples.next_obs),
self.q_net.apply(data["QNetParams"], samples.next_obs),
samples.rew,
samples.done,
self.config.discount,
)
class MunchausenTargetMixIn(TargetMixIn):
"""MixIn to compute the Munchausen Q target.
Paper: https://arxiv.org/abs/2007.14430
"""
def target_tabular_dp(self, data: srl.DataDict) -> Array:
return srl.munchausen_backup_dp(
data["Q"],
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
self.config.kl_coef,
self.config.er_coef,
self.config.logp_clip,
)
def target_tabular_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
return srl.munchausen_backup_rl(
data["Q"][samples.next_state.squeeze(axis=1)], # BxA
data["Q"][samples.state.squeeze(axis=1)], # BxA
samples.rew,
samples.done,
samples.act,
self.config.discount,
self.config.kl_coef,
self.config.er_coef,
self.config.logp_clip,
)
def target_deep_dp(self, data: srl.DataDict) -> Array:
return srl.munchausen_backup_dp(
self.q_net.apply(data["QNetTargParams"], self.env.mdp.obs_mat),
self.env.mdp.rew_mat,
self.env.mdp.tran_mat,
self.config.discount,
self.config.kl_coef,
self.config.er_coef,
self.config.logp_clip,
)
def target_deep_rl(self, data: srl.DataDict, samples: srl.Sample) -> Array:
return srl.munchausen_backup_rl(
self.q_net.apply(data["QNetTargParams"], samples.next_obs), # BxA
self.q_net.apply(data["QNetTargParams"], samples.obs), # BxA
samples.rew,
samples.done,
samples.act,
self.config.discount,
self.config.kl_coef,
self.config.er_coef,
self.config.logp_clip,
)
| 32.933824 | 82 | 0.602367 |
7df6f040321310b7773154fbaea2f2f2ac5edd00
| 2,038 |
py
|
Python
|
official/cv/xception/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/xception/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/xception/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval Xception."""
import os
import numpy as np
from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export
from src.Xception import xception
from src.model_utils.config import config as args, config_gpu, config_ascend
from src.model_utils.moxing_adapter import moxing_wrapper
def modelarts_pre_process():
'''modelarts pre process function.'''
args.ckpt_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.ckpt_file)
args.file_name = os.path.join(args.output_path, args.file_name)
@moxing_wrapper(pre_process=modelarts_pre_process)
def run_export():
'''export function'''
if args.device_target == "Ascend":
config = config_ascend
elif args.device_target == "GPU":
config = config_gpu
else:
raise ValueError("Unsupported device_target.")
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
context.set_context(device_id=args.device_id)
net = xception(class_num=config.class_num)
# load checkpoint
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(net, param_dict)
net.set_train(False)
image = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))
export(net, image, file_name=args.file_name, file_format=args.file_format)
if __name__ == "__main__":
run_export()
| 35.754386 | 93 | 0.722767 |
b40e21f394f4b34daf2d5543856ad12c0a26c351
| 441 |
py
|
Python
|
examples/08 langDict/LangBot.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | 1 |
2020-01-16T08:40:00.000Z
|
2020-01-16T08:40:00.000Z
|
examples/08 langDict/LangBot.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | null | null | null |
examples/08 langDict/LangBot.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | 1 |
2019-10-16T08:11:51.000Z
|
2019-10-16T08:11:51.000Z
|
import asyncio
from samt import Bot, Answer, Context, Mode
marv = Bot()
@marv.default_answer
def default():
return 'unknown', Context.get('message').text
@marv.answer("/start")
async def start():
return Answer('greeting', Context.get('user'))
@marv.answer("Guten Tag")
def guten_tag():
a = Answer('greeting', Context.get('user'))
a.language_feature = False
return a
if __name__ == "__main__":
marv.listen()
| 16.333333 | 50 | 0.666667 |
5ef3604f75bc55c135004958ca8e50533ed54c4d
| 5,428 |
py
|
Python
|
IdeaProjects/PandasProj/PandasCourse5.py
|
sinomiko/project
|
00fadb0033645f103692f5b06c861939a9d4aa0e
|
[
"BSD-3-Clause"
] | 1 |
2018-12-30T14:07:42.000Z
|
2018-12-30T14:07:42.000Z
|
IdeaProjects/PandasProj/PandasCourse5.py
|
sinomiko/project
|
00fadb0033645f103692f5b06c861939a9d4aa0e
|
[
"BSD-3-Clause"
] | null | null | null |
IdeaProjects/PandasProj/PandasCourse5.py
|
sinomiko/project
|
00fadb0033645f103692f5b06c861939a9d4aa0e
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import pandas as pd
import numpy as np
# 1.2 实验知识点
#
# 时间戳 Timestamp
# 时间索引 DatetimeIndex
# 时间转换 to_datatime
# 时间序列检索
# 时间序列计算
# 二、时间序列分析介绍
#
# 2.1 简介
#
# 时间序列(英语:time series)是实证经济学的一种统计方法,它是采用时间排序的一组随机变量,国内生产毛额(GDP)、消费者物价指数(CPI)、股价指数、利率、汇率等等都是时间序列。时间序列的时间间隔可以是分秒(如高频金融数据),可以是日、周、月、季度、年、甚至更大的时间单位。[维基百科]
#
# 我们针对时间序列数据进行挖掘的过程又被成为时间序列分析,简称:时序分析。
#
# 2.2 常见问题
#
# Pandas 经常被用于处理与时间序列相关的数据,尤其是像财务数据。在处理时间序列数据时,会遇到各类需求,包括但不限于:
#
# 生成固定跨度的时期构成时间序列。
# 将现有的时间序列,转换成需要的时间序列格式。
# 计算序列中的相对时间,例如:每季度的第一周。
# 三、Pandas 处理时间序列
#
# 接下来,我们就时间序列中常遇到的一些需求类型,列举一些示例,并使用 Pandas 提供的方法进行处理。
#
# 3.1 时间戳 Timestamp
#
# 既然是时间序列类型的数据,那么就少不了时间戳这一关键元素。Pandas 中,我们有两个创建时间戳的方法,分别是:to_datatime和 Timestamp。
#
# to_datatime 后面集中详说。首先看一看 Timestamp,它针对于单一标量,举个例子:
print pd.Timestamp('2017-10-01')
# 如果要包含小时:分钟:秒:
print pd.Timestamp('2017-10-01 13:30:59')
# 当然,还支持其他的格式输入,比如:
print pd.Timestamp('1/10/2017 13:30:59')
# 3.2 时间索引 DatetimeIndex
#
# 在实际工作中,我们很少遇到用单个时间戳的情况。而大多数时候,是使用由时间戳构成的时间索引。
#
# 首先,我们来看一下如何使用 Pandas 创建时间索引。这里用到的方法为 date_range(),date_range() 和 python 自带的 range() 很相似。它可以用来创建一系列等间距时间,并作为 Series 或者 DataFrame 的索引。
#
# date_range() 方法带有的默认参数如下:
'''
pandas.date_range(start=None, end=None, periods=None, freq=’D’, tz=None, normalize=False,
name=None, closed=None, **kwargs)
'''
# 常用参数的含义如下:
#
# start= :设置起始时间
# end=:设置截至时间
# periods= :设置时间区间,若 None 则需要设置单独设置起止和截至时间。
# freq= :设置间隔周期,默认为 D,也就是天。可以设置为小时、分钟、秒等。
# tz=:设置时区。
# 举个例子:
rng1 = pd.date_range('1/10/2017', periods=24, freq='H')
print rng1
# 可以这样:
rng2 = pd.date_range('1/10/2017', periods=10, freq='D')
print rng2
# 我们可以发现 freq= 参数的特点:
#
# freq='s': 秒
# freq='min' : 分钟
# freq='H': 小时
# freq='D': 天
# freq='w': 周
# freq='m': 月
# 除了上面这些参数值,还有一些特别的:
#
# freq='BM': 每个月最后一天
# freq='W':每周的星期日
# 如果你想同时按天、小时更新,也是可以的。但需要像下面这样设置参数值:
rng3 = pd.date_range('1/10/2017', periods=20, freq='1H20min')
print rng3
# 所以,只要适当地组合,你可以生成任意想要的时间序列索引。
#
# 3.3 时间转换 to_datatime
#
# to_datatime 是 Pandas 用于处理时间序列时的一个重要方法,它可以将实参转换为时间戳。to_datatime 包含的默认参数如下:
'''
pandas.to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix')
arg:可以接受整数、浮点数、字符串、时间、列表、元组、一维数组、Series 等。
errors=:默认为 raise,表示遇到无法解析数据将会报错。还可以设置为 coerce,表示无法解析设为 NaT,或者设为 ignore 忽略错误。
dayfirst= :表示首先解析日期,例如:1/10/17 被解析为 2017-10-1。
yearfirst= :表示首先解析年,例如:1/10/17 被解析为 2001-10-17。
utc=:返回 UTC 格式时间索引。
box=:True 表示返回时间索引 DatatimeIndex,False 表示返回多维数组 ndarray。
format= :时间解析格式,例如:%d /%m /%Y。
对于 to_datatime 的返回值而言:
输入列表,默认返回时间索引 DatetimeIndex。
输入 Series,默认返回 datetime64 的 Series。
输入标量,默认返回时间戳 Timestamp。
下面,针对输入数据类型的不同,我们来看一看 to_datatime 的不同用法。
'''
# 3.3.1 输入标量
print pd.to_datetime('1/10/2017 10:00', dayfirst=True)
# 3.3.2 输入列表
print pd.to_datetime(['1/10/2017 10:00','2/10/2017 11:00','3/10/2017 12:00'])
# 3.3.2 输入 Series
print pd.to_datetime(pd.Series(['Oct 11, 2017', '2017-10-2', '3/10/2017']), dayfirst=True)
# 3.3.2 输入 DataFrame
print pd.to_datetime(pd.DataFrame({'year': [2017, 2018], 'month': [9, 10], 'day': [1, 2], 'hour': [11, 12]}))
# 3.3.2 errors=
#
# 接下来,看一看 errors= 遇到无法解析的数据时,所对应的不同返回值。这个参数对于我们解析大量数据时非常有用。
# print pd.to_datetime(['2017/10/1', 'abc'], errors='raise')
print pd.to_datetime(['2017/10/1', 'abc'], errors='ignore')
print pd.to_datetime(['2017/10/1', 'abc'], errors='coerce')
# 3.4 时间序列检索
#
# 上面,我们介绍了时间索引 DatetimeIndex 的生成方法。那么,它主要是用来做什么呢?
#
# 答案当然是 Pandas 对象的索引啦。将时间变成索引的优点非常多,包含但不限于:
#
# 查找和检索特定日期的字段非常快。
# 进行数据对齐时,拥有相同时间间隔的索引的数据将会非常快。
# 可以很方便地通过 shift 和 ishift 方法快速移动对象。
# 下面,针对时间序列索引的检索等操作举几个例子。首先,我们生成 10 万条数据:
ts = pd.DataFrame(np.random.randn(100000,1), columns=['Value'], index=pd.date_range('20170101', periods=100000, freq='T'))
print ts
# 当我们对数据进行快速检索是,其实和除了 Series 和 DataFrame 数据别无二致。例如:
#
# 检索 2017 年 3 月 2 号的数据:
print ts['2017-3-2']
# 检索 2017 年 3 月 3 号下午 2 点到 5 点 23 分之间的数据:
print ts['2017-3-2 14:00:00':'2017-3-2 17:23:00']
# 总之,一切在 Series 和 DataFrame 上可以用的数据选择与定位的方法,像 iloc(), loc() 等均可以用于时间序列,这里就不再赘述了。
#
# 3.5 时间序列计算
#
# 在 Pandas 中,包含有很多可以被加入到时间序列计算中去的类,这些被称为 Offsets 对象。
#
# 关于这一点,我们举出几个例子就一目了然了。例如:
from pandas.tseries import offsets # 载入 offsets
dt = pd.Timestamp('2017-10-1 10:59:59')
print dt + offsets.DateOffset(months=1, days=2, hour=3) # 增加时间
# 又或者我们减去 3 个周的时间:
dt - offsets.Week(3)
# 3.6 其他方法
#
# 最后,再介绍几个与时间序列处理相关的方法。
#
# 移动 Shifting
#
# shifting 可以将数据或者时间索引沿着时间轴的方向前移或后移,举例如下:
# 生成一个时间系列数据集
ts = pd.DataFrame(np.random.randn(7,2), columns=['Value1','Value2' ], index=pd.date_range('20170101', periods=7, freq='T'))
# 接下来开始移动。
print ts.shift(3)
# 默认是数据向后移动。这里数据值向后移动了 3 行。
print ts.shift(-3)
# 可以通过添加负号,使得向前移动。
# 那么,想移动索引怎么办?这里使用 tshift()。
print ts.tshift(3)
# 向前移动索引就不再演示了,同样可以通过负号完成。除此之外,shift() 是可以接受一些参数的,比如 freq=''。而这里的 freq='' 参数和上文在介绍 时间索引 DatetimeIndex 时提到的一致。
#
# 举个例子:
print ts.shift(3, freq='D') # 日期向后移动 3 天
# 所以说,shifting 可以让我们更加灵活地去操作时间序列数据集,完成数据对齐等目标。
#
# 重采样 Resample
#
# 重采样,即是将书剑序列从一个频率转换到另一个频率的过程。实施重采样的情形如下:
#
# 有时候,我们的时间序列数据集非常大,比如百万级别甚至更高。如果将全部数据用于后序计算,其实很多情况下是没有必要的。此时,我们可以对原有的时间序列进行降频采样。
# 除了上面的情形,重采样还可以被用于数据对齐。比如,两个数据集,但是时间索引的频率不一致,这时候,可以通过重采样使二者频率一致,方便数据合并、计算等操作。
# 下面,我们看一看 resample() 的使用。首先,还是生成一个数据集。
# 生成一个时间系列数据集
ts = pd.DataFrame(np.random.randn(50,1), columns=['Value' ], index=pd.date_range('2017-01', periods=50, freq='D'))
print ts
# 首先,可以升频采样,间隔变成小时。但是,由于间隔变小,我们就必须对新增加的行进行填充。
ts.resample('H').ffill()
print ts
# 下面,接着开始降频采样,从 1 天变成 5 天:
ts.resample('5D').sum()
print ts
| 24.232143 | 172 | 0.718312 |
6f568ef4980832456ce6c631d796eccf3dbb92df
| 1,365 |
py
|
Python
|
packages/watchmen-model/src/watchmen_model/system/data_source.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-model/src/watchmen_model/system/data_source.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-model/src/watchmen_model/system/data_source.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import List, Optional, Union
from pydantic import BaseModel
from watchmen_model.common import DataModel, DataSourceId, OptimisticLock, TenantBasedTuple
from watchmen_utilities import ArrayHelper
class DataSourceParam(DataModel, BaseModel):
name: str = None
value: str = None
class DataSourceType(str, Enum):
MYSQL = 'mysql',
ORACLE = 'oracle',
MONGODB = 'mongodb',
MSSQL = 'mssql'
POSTGRESQL = 'postgresql'
def construct_param(param: Optional[Union[dict, DataSourceParam]]) -> Optional[DataSourceParam]:
if param is None:
return None
elif isinstance(param, DataSourceParam):
return param
else:
return DataSourceParam(**param)
def construct_params(params: Optional[list] = None) -> Optional[List[DataSourceParam]]:
if params is None:
return None
else:
return ArrayHelper(params).map(lambda x: construct_param(x)).to_list()
class DataSource(TenantBasedTuple, OptimisticLock, BaseModel):
dataSourceId: DataSourceId = None
dataSourceCode: str = None
dataSourceType: DataSourceType = None
host: str = None
port: str = None
username: str = None
password: str = None
name: str = None
url: str = None
params: List[DataSourceParam] = []
def __setattr__(self, name, value):
if name == 'params':
super().__setattr__(name, construct_params(value))
else:
super().__setattr__(name, value)
| 24.375 | 96 | 0.747985 |
6f6176605e1cf42838a610e1b32a84c5323efe5f
| 1,479 |
py
|
Python
|
covidash/data/preprocessing.py
|
CRitter93/covidash
|
9daaa8e17c2487068bfd7a7b581880ee6698cedd
|
[
"Apache-2.0"
] | 1 |
2020-05-11T17:54:58.000Z
|
2020-05-11T17:54:58.000Z
|
covidash/data/preprocessing.py
|
CRitter93/covidash
|
9daaa8e17c2487068bfd7a7b581880ee6698cedd
|
[
"Apache-2.0"
] | null | null | null |
covidash/data/preprocessing.py
|
CRitter93/covidash
|
9daaa8e17c2487068bfd7a7b581880ee6698cedd
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import json
import numpy as np
import pprint
# from shapely.geometry import Polygon, Point
import geopandas as gpd
from tqdm import tqdm
def read_json_data(json_file):
with open(json_file) as json_file:
json_data = json.load(json_file)
return json_data
def merge_hopital_with_state_info():
file_name = "../../data/processed/osm_hospital_locations_germany_with_States.geojson"
if not Path(file_name).is_file():
gdf_states = gpd.read_file("../../data/raw/bundeslaender_grenzen_2019.geojson")
gdf_hospitals = gpd.read_file("../../data/raw/osm_hospital_locations_germany.json").drop([166, 284, 285, 291])
hospital_state_map = {}
for idx, coord in gdf_hospitals.iterrows():
for _idx, _row in gdf_states.iterrows():
if coord['geometry'].within(_row.geometry):
hospital_state_map[idx] = _row.LAN_GEN
gdf_hospitals['hospital_index'] = gdf_hospitals.index.tolist()
def hosp_index_to_state(state_map, item_idx):
try:
state_name = state_map[item_idx]
return state_name
except KeyError:
return np.nan
gdf_hospitals['state_name'] = gdf_hospitals.hospital_index.apply(
lambda row: hosp_index_to_state(hospital_state_map, row))
gdf_hospitals.to_file("../../data/processed/osm_hospital_locations_germany_with_States.geojson",driver='GeoJSON')
| 33.613636 | 121 | 0.678161 |
d28f383b3b641be6fe37ff39d7878d795e95a3d2
| 1,651 |
py
|
Python
|
rawio/grouping/timestamp.py
|
hdkai/Raw-IO
|
f0fa928d7ef59a363c6f4c876d642af6dede6ae4
|
[
"Apache-2.0"
] | null | null | null |
rawio/grouping/timestamp.py
|
hdkai/Raw-IO
|
f0fa928d7ef59a363c6f4c876d642af6dede6ae4
|
[
"Apache-2.0"
] | null | null | null |
rawio/grouping/timestamp.py
|
hdkai/Raw-IO
|
f0fa928d7ef59a363c6f4c876d642af6dede6ae4
|
[
"Apache-2.0"
] | null | null | null |
#
# RawIO
# Copyright (c) 2021 Yusuf Olokoba.
#
from dateutil.parser import parse as parse_datetime
from PIL import Image
from typing import Callable
def timestamp_similarity (max_delta_time: float=6.) -> Callable[[str, str], bool]:
"""
Create a similarity function which uses temporal proximity as a proxy measure.
Parameters:
max_delta_time (float): Maximum exposure time difference for images to be considered similar, in seconds.
Returns:
callable: Pairwise image similarity function returning a boolean.
"""
def similarity_fn (path_a: str, path_b: str) -> bool:
# Load images
image_a = Image.open(path_a)
image_b = Image.open(path_b)
# Check sizes
if image_a.size != image_b.size:
return False
# Check timestamps
timestamp_a = exposure_timestamp(image_a)
timestamp_b = exposure_timestamp(image_b)
delta_time = abs(timestamp_a - timestamp_b)
return timestamp_a > 0 and timestamp_b > 0 and delta_time <= max_delta_time
return similarity_fn
def exposure_timestamp (image: Image.Image) -> float:
"""
Get the exposure timestamp from its EXIF metadata.
If the required EXIF dictionary or tag is not present, `-1` will be returned.
Parameters:
image (PIL.Image): Exposure.
Returns:
float: Image timestamp.
"""
DATETIME_ORIGINAL = 36867
timestamp = image.getexif().get(DATETIME_ORIGINAL)
if timestamp:
timestamp = str(timestamp)
datetime = parse_datetime(timestamp)
return datetime.timestamp()
else:
return -1
| 31.150943 | 113 | 0.667474 |
815caa00acc103b5561bd58e974267e4156adda5
| 5,387 |
py
|
Python
|
MA/Robot_V002b.py
|
mirrorcoloured/slcypi
|
c47975b3523f770d12a521c82e2dfca181e3f35b
|
[
"MIT"
] | null | null | null |
MA/Robot_V002b.py
|
mirrorcoloured/slcypi
|
c47975b3523f770d12a521c82e2dfca181e3f35b
|
[
"MIT"
] | null | null | null |
MA/Robot_V002b.py
|
mirrorcoloured/slcypi
|
c47975b3523f770d12a521c82e2dfca181e3f35b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
sys.path.append("/home/pi/Documents/Robots/slcypi/MA") ### ADD PATH
sys.path.append("/home/pi/Documents/Robots/slcypi/HAT_Python3") ### ADD PATH
import time
from time import sleep
import atexit
import pygame
import pygame.camera
from PIL import Image
#from pylab import *
from Tank import Tank
from ImageAnalysis import ImageAnalysis
# Settings
WIDTH = 320
HEIGHT = 240
# Pygame and camera initialize
pygame.init()
pygame.display.set_caption('My Robot')
pygame.camera.init()
screen = pygame.display.set_mode((WIDTH,HEIGHT),0)
cam_list = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cam_list[0],(WIDTH,HEIGHT))
cam.start()
robot = Tank()
robot.correctDirections(False,False,True)
IA = ImageAnalysis()
followLine = False
try:
print('starting loop')
done = False
while not done:
# Camera
#sleep(5) # Sleep such that camera will get current image
image1 = cam.get_image()
#image1 = pygame.transform.scale(image1,(640,480))
#image1 = pygame.transform.flip(image1,1,1)
screen.blit(image1,(0,0))
pygame.display.update()
# User events
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == (pygame.K_UP):
robot.driveSync(1)
if event.key == (pygame.K_DOWN):
robot.driveSync(-1)
if (event.key == pygame.K_ESCAPE):
done = True
if (event.key == pygame.K_LEFT):
robot.rotateSync(1,45)
if (event.key == pygame.K_RIGHT):
robot.rotateSync(-1,45)
if (event.key == pygame.K_q):
followLine = True
#robot.driveSync(1,50)
#sleep(1)
#robot.driveSync(1,40)
if (event.key == pygame.K_w):
followLine = False
robot.driveSync(0)
robot.rotateSync(0)
if (event.key == pygame.K_s):
# Set target
rgb = IA.setTarget(image1,WIDTH,HEIGHT)
image1.fill(rgb)
screen.blit(image1,(0,0))
pygame.display.update()
sleep(5)
if (event.key ==pygame.K_r):
# Analyze
image1 = IA.convertRainbow(image1,WIDTH,HEIGHT)
screen.blit(image1,(0,0))
pygame.display.update()
sleep(5)
if (event.key ==pygame.K_a):
# Analyze
image1 = IA.convertTrueFalse(image1,WIDTH,HEIGHT)
screen.blit(image1,(0,0))
pygame.display.update()
sleep(5)
if (event.key==pygame.K_c):
pos = IA.getLinePosition(image1,WIDTH,HEIGHT)
print(pos)
if event.type == pygame.KEYUP:
if event.key == (pygame.K_UP):
robot.driveSync(0)
if event.key == (pygame.K_DOWN):
robot.driveSync(0)
if (event.key == pygame.K_LEFT):
robot.rotateSync(0)
if (event.key == pygame.K_RIGHT):
robot.rotateSync(0)
if followLine == True:
pos = IA.getLinePosition(image1,WIDTH,HEIGHT)
print(pos)
if abs(pos) >0.5:
if pos > 0:
robot.rotateSync(-1)
sleep(0.01)
robot.rotateSync(0)
else:
robot.rotateSync(1)
sleep(0.01)
robot.rotateSync(0)
else:
robot.driveSync(1)
sleep(0.01)
robot.driveSync(0)
except KeyboardInterrupt:
pygame.quit()
robot.stop()
cam.stop()
pygame.quit()
| 41.75969 | 101 | 0.374234 |
6f9c5de3ca64f08169d537485009741bbe8b12c3
| 1,623 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_alternative/item_alternative.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_alternative/item_alternative.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_alternative/item_alternative.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class ItemAlternative(Document):
def validate(self):
self.has_alternative_item()
self.validate_alternative_item()
self.validate_duplicate()
def has_alternative_item(self):
if (self.item_code and
not frappe.db.get_value('Item', self.item_code, 'allow_alternative_item')):
frappe.throw(_("Not allow to set alternative item for the item {0}").format(self.item_code))
def validate_alternative_item(self):
if self.item_code == self.alternative_item_code:
frappe.throw(_("Alternative item must not be same as item code"))
def validate_duplicate(self):
if frappe.db.get_value("Item Alternative", {'item_code': self.item_code,
'alternative_item_code': self.alternative_item_code, 'name': ('!=', self.name)}):
frappe.throw(_("Already record exists for the item {0}".format(self.item_code)))
def get_alternative_items(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" (select alternative_item_code from `tabItem Alternative`
where item_code = %(item_code)s and alternative_item_code like %(txt)s)
union
(select item_code from `tabItem Alternative`
where alternative_item_code = %(item_code)s and item_code like %(txt)s
and two_way = 1) limit {0}, {1}
""".format(start, page_len), {
"item_code": frappe.db.escape(filters.get('item_code')),
"txt": "%%%s%%" % frappe.db.escape(txt)
})
| 40.575 | 95 | 0.741836 |
6fad16cee289abdaf9a983e71d23f92f5203a4e3
| 772 |
py
|
Python
|
main.py
|
jakoubek/Technotes
|
7748d20100706e408e1d89c841098f987d0e1492
|
[
"MIT"
] | null | null | null |
main.py
|
jakoubek/Technotes
|
7748d20100706e408e1d89c841098f987d0e1492
|
[
"MIT"
] | 3 |
2022-03-27T08:37:43.000Z
|
2022-03-30T15:23:46.000Z
|
main.py
|
jakoubek/Technotes
|
7748d20100706e408e1d89c841098f987d0e1492
|
[
"MIT"
] | null | null | null |
def define_env(env):
"Definition of the module"
@env.macro
def feedback(title, section, slug):
email_address = f"{section}+{slug}@technotes.jakoubek.net"
md = "\n\n## Feedback / Kontakt\n\n"
md += f"Wenn Sie Fragen oder Anregungen zum Artikel *{title}* haben, senden Sie mir bitte eine E-Mail an: [{email_address}](mailto:{email_address}?subject=[Technotes] {title})"
return md
# def on_post_page_macros(env):
# env.raw_markdown += "{{ feedback(page.meta.title, page.meta.section, page.meta.slug) }}"
# env.raw_markdown += '\n\n## Feedback / Kontakt\n\n'
# env.raw_markdown += 'Wenn Sie Fragen oder Anregungen zum Artikel *' + \
# env.page.title + '* haben ...'
# env.raw_markdown += env.page.abs_url
| 42.888889 | 184 | 0.643782 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.