repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
SouthForkResearch/CHaMP_Metrics
|
tools/topoauxmetrics/methods/substrate.py
|
1
|
2066
|
from lib.sitkaAPI import latestMetricInstances
from metricsbychannelunit import metricsByChannelUnit, emptiesByChannelUnit
from lib.exception import MissingException
from lib.metrics import CHaMPMetric
class SubstrateMetrics(CHaMPMetric):
TEMPLATE = {}
# Definitions of names keyed to the API measurements# that comprise each metric.
# Note that the code that does the calculation expects the API measurement names
# in a list because some metric types incorporate multiple API measurements.
dSubstrateClasses = {
'Bldr': (['Boulders'], True),
'Cbl': (['Cobbles'], True),
'Grvl': (['CourseGravel', 'FineGravel'], True),
'SandFines': (['Fines', 'Sand'], True)
}
def __init__(self, apiData):
if SubstrateMetrics.TEMPLATE == {}:
SubstrateMetrics.TEMPLATE = emptiesByChannelUnit(SubstrateMetrics.dSubstrateClasses)
super(SubstrateMetrics, self).__init__(apiData)
def calc(self, apiData):
"""
Calculate substrate metrics
:param apiData: dictionary of API data. Key is API call name. Value is API data
:return: metrics dictionary
"""
self.log.info("Running Substrate Metrics")
if 'SubstrateCover' not in apiData:
raise MissingException("SubstrateCover missing from apiData")
# Retrieve the undercut API data
substrateCoverVals = [val['value'] for val in apiData['SubstrateCover']['values'] ]
if 'ChannelUnitMetrics' not in apiData:
raise MissingException('Missing channel metric instances')
# Retrieve the channel unit metrics
channelInstances = latestMetricInstances(apiData['ChannelUnitMetrics'])
channelUnitMeasurements = apiData['ChannelUnitMeasurements']
if channelInstances is None:
raise MissingException('Missing channel metric instances')
# calculate metrics
self.metrics = metricsByChannelUnit(SubstrateMetrics.dSubstrateClasses, channelInstances, substrateCoverVals, channelUnitMeasurements)
|
gpl-3.0
| -706,549,787,563,906,700 | 38.730769 | 142 | 0.695547 | false |
ways/rpisensor
|
ds18b20_mqtt.py
|
1
|
1349
|
#!/usr/bin/env python3
import os # os.sys.exit
import socket # hostname
import paho.mqtt.publish as publish # mosquitto
import RPi.GPIO as GPIO # gpio setup
GPIO.setmode(GPIO.BCM)
import w1thermsensor
# sudo pip3 install paho-mqtt w1thermsensor
mosquittoserver='192.168.1.11'
mosquittoport=1883
max_reading=100
min_reading=-70
verbose=True
pin=14
#Hostname is used in topic
hostname=socket.gethostname()
# Functions
def append_message(messages, topic, payload):
messages.append({
'topic': topic,
'payload': payload})
changed=True
# Initialize sensor setups
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
messages=[]
for count, w1 in enumerate(w1thermsensor.W1ThermSensor.get_available_sensors()):
input = None
try:
input = float("%.1f" % w1.get_temperature())
except ValueError:
continue
except w1thermsensor.core.SensorNotReadyError:
continue
if input and min_reading < input and max_reading > input:
append_message(messages, hostname + '/ds18b20_' + str(w1.id), input)
if 0 < len(messages):
if verbose: print (messages)
try:
publish.multiple(messages, hostname=mosquittoserver, port=mosquittoport, client_id="", keepalive=60)
except Exception as err:
print("*** Error sending message *** %s." % err)
|
gpl-3.0
| -2,199,302,055,027,417,000 | 23.981481 | 104 | 0.683469 | false |
binarybottle/mindboggle_sidelined
|
freesurfer.py
|
1
|
26053
|
#!/usr/bin/env python
"""
Functions that use FreeSurfer commands.
Authors:
- Arno Klein, 2012-2013 ([email protected]) http://binarybottle.com
Copyright 2013, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def label_with_classifier(subject, hemi, left_classifier='',
right_classifier='', annot_file='',
subjects_directory=''):
"""
Label a brain with the DKT atlas using FreeSurfer's mris_ca_label
FreeSurfer documentation ::
SYNOPSIS
mris_ca_label [options] <subject> <hemi> <surf> <classifier> <output>
DESCRIPTION
For a single subject, produces an annotation file, in which each
cortical surface vertex is assigned a neuroanatomical label.
This automatic procedure employs data from a previously-prepared atlas
file. An atlas file is created from a training set, capturing region
data manually drawn by neuroanatomists combined with statistics on
variability correlated to geometric information derived from the
cortical model (sulcus and curvature). Besides the atlases provided
with FreeSurfer, new ones can be prepared using mris_ca_train).
Notes regarding creation and use of FreeSurfer Gaussian classifier atlas:
Create the DKT classifier atlas (?h.DKTatlas40.gcs) --NEED TO VERIFY THIS:
$ mris_ca_train -t $FREESURFER_HOME/average/colortable_desikan_killiany.txt \
$hemi sphere.reg aparcNMMjt.annot $SCANS ./$hemi.DKTatlas40.gcs
Label a brain with the DKT atlas (surface annotation file ?h.DKTatlas40.annot):
$ mris_ca_label -l ./$x/label/$hemi.cortex.label $x/ $hemi sphere.reg \
./$hemi.DKTatlas40.gcs ./$x/label/$hemi.DKTatlas40.annot
Label the cortex of a subject's segmented volume according
to the edited surface labels (?h.aparcNMMjt.annot):
$ mri_aparc2aseg --s ./x --volmask --annot aparcNMMjt
Label a brain with the DKT atlas using FreeSurfer's mris_ca_label:
$ mris_ca_label MMRR-21-1 lh lh.sphere.reg ../lh.DKTatlas40.gcs ../out.annot
Parameters
----------
subject : string
subject corresponding to FreeSurfer subject directory
hemi : string
hemisphere ['lh' or 'rh']
left_classifier : string
name of left hemisphere FreeSurfer classifier atlas (full path)
right_classifier : string
name of right hemisphere FreeSurfer classifier atlas (full path)
annot_file : string
name of output .annot file
subjects_directory : string
FreeSurfer subjects directory (mris_ca_label -sdir option)
Returns
-------
annot_file : string
name of output .annot file
Examples
--------
>>> # This example requires a FreeSurfer subjects/<subject> subdirectory
>>> import os
>>> from mindboggle.utils.freesurfer import label_with_classifier
>>> subject = 'Twins-2-1'
>>> hemi = 'lh'
>>> left_classifier = '/homedir/mindboggle_cache/b28a600a713c269f4c561f66f64337b2/lh.DKTatlas40.gcs'
>>> right_classifier = ''
>>> annot_file = './lh.classifier.annot'
>>> subjects_directory = ''
>>> label_with_classifier(subject, hemi, left_classifier, right_classifier, annot_file, subjects_directory)
>>> #
>>> # View:
>>> from mindboggle.utils.freesurfer import annot_to_vtk
>>> from mindboggle.utils.plots import plot_surfaces
>>> path = os.environ['MINDBOGGLE_DATA']
>>> vtk_file = os.path.join(path, 'arno', 'freesurfer', 'lh.pial.vtk')
>>> output_vtk = './lh.classifier.vtk'
>>> #
>>> labels, output_vtk = annot_to_vtk(annot_file, vtk_file, output_vtk)
>>> plot_surfaces(output_vtk)
"""
import os
from mindboggle.utils.utils import execute
if not annot_file:
annot_file = os.path.join(os.getcwd(), hemi + '.classifier.annot')
if hemi == 'lh':
classifier = left_classifier
elif hemi == 'rh':
classifier = right_classifier
else:
print("label_with_classifier()'s hemi should be 'lh' or 'rh'")
if subjects_directory:
sdir = ' -sdir ' + subjects_directory
else:
sdir = ''
cmd = ['mris_ca_label', subject, hemi, hemi+'.sphere.reg', classifier,
annot_file, sdir]
execute(cmd)
if not os.path.exists(annot_file):
raise(IOError("mris_ca_label did not create " + annot_file + "."))
return annot_file
# def convert_mgh_to_native_nifti_mri_vol2vol(input_file, reference_file,
# output_file='', interp='nearest'):
# """
# Convert volume from FreeSurfer 'unconformed' to original space
# in nifti format using FreeSurfer's mri_vol2vol.
#
# Note: FreeSurfer's mri_convert example results in type: SHORT (4),
# while mri_vol2vol results in type: FLOAT (3), as does scipy.ndimage.
# The mri_vol2vol command is ::
#
# mri_vol2vol --mov <input_file> --targ <reference_file>
# --interp trilin --regheader --o <output_file>
#
# Parameters
# ----------
# input_file : string
# input file name
# reference_file : string
# file in original space
# output_file : string
# name of output file
# interp : string
# interpolation method {trilin, nearest}
#
# Returns
# -------
# output_file : string
# name of output file
#
# """
# import os
#
# from mindboggle.utils.utils import execute
#
# # Convert volume from FreeSurfer to original space:
# print("Convert volume from FreeSurfer 'unconformed' to original space...")
#
# if not os.path.exists(input_file):
# raise(IOError("Input file " + input_file + " not found"))
# if not os.path.exists(reference_file):
# raise(IOError("Reference file " + reference_file + " not found."))
# if not output_file:
# output_file = os.path.join(os.getcwd(),
# os.path.basename(input_file).split('mgz')[0] + 'nii.gz')
#
# cmd = ['mri_vol2vol',
# '--mov', input_file,
# '--targ', reference_file,
# '--interp', interp,
# '--regheader --o', output_file]
# execute(cmd)
# if not os.path.exists(output_file):
# raise(IOError("mri_vol2vol did not create " + output_file + "."))
#
# return output_file
#
#
# def annot_labels_to_volume(subject, annot_name, original_space, reference):
# """
# Propagate surface labels through hemisphere's gray matter volume
# using FreeSurfer's mri_aparc2aseg.
#
# Note ::
# From the mri_aparc2aseg documentation:
# The volumes of the cortical labels will be different than
# reported by mris_anatomical_stats because partial volume information
# is lost when mapping the surface to the volume. The values reported by
# mris_anatomical_stats will be more accurate than the volumes from the
# aparc+aseg volume.
#
# Parameters
# ----------
# subject : string
# subject name
# annot_name: string
# FreeSurfer annot filename without the hemisphere prepend or .annot append
# original_space: Boolean
# convert from FreeSurfer unconformed to original space?
# reference : string
# file in original space
#
# Returns
# -------
# output_file : string
# name of output file
#
# """
# import os
#
# from mindboggle.utils.freesurfer import convert_mgh_to_native_nifti
# from mindboggle.utils.utils import execute
#
# # Fill hemisphere gray matter volume with surface labels using FreeSurfer:
# print("Fill gray matter volume with surface labels using FreeSurfer...")
#
# output_file1 = os.path.join(os.getcwd(), annot_name + '.nii.gz')
#
# cmd = ['mri_aparc2aseg', '--s', subject, '--annot', annot_name,
# '--o', output_file1]
# execute(cmd)
# if not os.path.exists(output_file1):
# raise(IOError("mri_aparc2aseg did not create " + output_file1 + "."))
#
# # Convert label volume from FreeSurfer to original space:
# if original_space:
#
# output_file2 = os.path.join(os.getcwd(), annot_name + '.native.nii.gz')
# output_file = convert_mgh_to_native_nifti(output_file1, reference,
# output_file2, interp='nearest')
# else:
# output_file = output_file1
#
# if not os.path.exists(output_file):
# raise(IOError("Output file " + output_file + " not created."))
#
# return output_file
#
#
#def register_template(hemi, sphere_file, transform,
# templates_path, template):
# """
# Register surface to template with FreeSurfer's mris_register.
#
# Transform the labels from multiple atlases via a template
# (using FreeSurfer's mris_register).
#
# """
# import os
#
# from mindboggle.utils.utils import execute
#
# template_file = os.path.join(templates_path, hemi + '.' + template)
# output_file = hemi + '.' + transform
#
# cmd = ['mris_register', '-curv', sphere_file, template_file, output_file]
# execute(cmd)
# if not os.path.exists(output_file):
# raise(IOError(output_file + " not found"))
#
# return transform
#
#
#def transform_atlas_labels(hemi, subject, transform,
# subjects_path, atlas, atlas_string):
# """
# Transform atlas labels.
#
# Read in the FreeSurfer *.annot file for a subject's brain hemisphere,
# .
#
# Transform the labels from a surface atlas via a template
# using FreeSurfer's mri_surf2surf (wrapped in Nipype).
#
# nipype.workflows.smri.freesurfer.utils.fs.SurfaceTransform
# wraps command ``mri_surf2surf`` ::
#
# "Transform a surface file from one subject to another via a spherical
# registration. Both the source and target subject must reside in your
# Subjects Directory, and they must have been processed with recon-all,
# unless you are transforming to one of the icosahedron meshes."
#
# Parameters
# ----------
# hemi : string
# hemisphere ['lh' or 'rh']
# subject : string
# subject corresponding to FreeSurfer subject directory
# transform : string
# name of FreeSurfer spherical surface registration transform file
# subjects_path : string
# name of FreeSurfer subjects directory
# atlas : string
# name of atlas
# atlas_string : string
# name of atlas labeling protocol
#
# Returns
# -------
# output_file : string
# name of the output file
#
# """
# import os
# from nipype.interfaces.freesurfer import SurfaceTransform
#
# sxfm = SurfaceTransform()
# sxfm.inputs.hemi = hemi
# sxfm.inputs.target_subject = subject
# sxfm.inputs.source_subject = atlas
#
# # Source file
# sxfm.inputs.source_annot_file = os.path.join(subjects_path,
# atlas, 'label',
# hemi + '.' + atlas_string + '.annot')
# # Output annotation file
# output_file = os.path.join(os.getcwd(), hemi + '.' + atlas + '.' +
# atlas_string + '_to_' + subject + '.annot')
# sxfm.inputs.out_file = output_file
#
# # Arguments: strings within registered files
# args = ['--srcsurfreg', transform,
# '--trgsurfreg', transform]
# sxfm.inputs.args = ' '.join(args)
#
# sxfm.run()
#
# if not os.path.exists(output_file):
# raise(IOError(output_file + " not found"))
#
# return output_file
#
#
#def vote_labels(label_lists):
# """
# For each vertex, vote on the majority label.
#
# Parameters
# ----------
# label_lists : list of lists of integers
# vertex labels assigned by each atlas
#
# Returns
# -------
# labels_max : list of integers
# majority labels for vertices
# label_counts : list of integers
# number of different labels for vertices
# label_votes : list of integers
# number of votes for the majority labels
#
# Examples
# --------
# >>> from collections import Counter
# >>> X = [1,1,2,3,4,2,1,2,1,2,1,2]
# >>> Votes = Counter(X)
# >>> Votes
# Counter({1: 5, 2: 5, 3: 1, 4: 1})
# >>> Votes.most_common(1)
# [(1, 5)]
# >>> Votes.most_common(2)
# [(1, 5), (2, 5)]
# >>> len(Votes)
# 4
#
# """
# from collections import Counter
#
# print("Begin voting...")
# n_atlases = len(label_lists) # number of atlases used to label subject
# npoints = len(label_lists[0])
# labels_max = [-1 for i in range(npoints)]
# label_counts = [1 for i in range(npoints)]
# label_votes = [n_atlases for i in range(npoints)]
#
# consensus_vertices = []
# for vertex in range(npoints):
# votes = Counter([label_lists[i][vertex] for i in range(n_atlases)])
#
# labels_max[vertex] = votes.most_common(1)[0][0]
# label_votes[vertex] = votes.most_common(1)[0][1]
# label_counts[vertex] = len(votes)
# if len(votes) == n_atlases:
# consensus_vertices.append(vertex)
#
# print("Voting done.")
#
# return labels_max, label_votes, label_counts, consensus_vertices
#
#
#def majority_vote_label(surface_file, annot_files):
# """
# Load a VTK surface and corresponding FreeSurfer annot files.
# Write majority vote labels, and label counts and votes as VTK files.
#
# Parameters
# ----------
# surface_file : string
# name of VTK surface file
# annot_files : list of strings
# names of FreeSurfer annot files
#
# Returns
# -------
# labels_max : list of integers
# majority labels for vertices
# label_counts : list of integers
# number of different labels for vertices
# label_votes : list of integers
# number of votes for the majority labels
# consensus_vertices : list of integers
# indicating which are consensus labels
# maxlabel_file : string
# name of VTK file containing majority vote labels
# labelcounts_file : string
# name of VTK file containing number of different label counts
# labelvotes_file : string
# name of VTK file containing number of votes per majority label
#
# """
# from os import path, getcwd
# import nibabel as nb
# import pyvtk
# from mindboggle.utils.freesurfer import vote_labels
# from mindboggle.utils.io_table import string_vs_list_check
#
# # Load multiple label sets
# print("Load annotation files...")
# label_lists = []
# for annot_file in annot_files:
# labels, colortable, names = nb.freesurfer.read_annot(annot_file)
# label_lists.append(labels)
# print("Annotations loaded.")
#
# # Vote on labels for each vertex
# labels_max, label_votes, label_counts, \
# consensus_vertices = vote_labels(label_lists)
#
# # Check type to make sure the filename is a string
# # (if a list, return the first element)
# surface_file = string_vs_list_check(surface_file)
#
# # Save files
# VTKReader = pyvtk.VtkData(surface_file)
# Vertices = VTKReader.structure.points
# Faces = VTKReader.structure.polygons
#
# output_stem = path.join(getcwd(), path.basename(surface_file.strip('.vtk')))
# maxlabel_file = output_stem + '.labels.max.vtk'
# labelcounts_file = output_stem + '.labelcounts.vtk'
# labelvotes_file = output_stem + '.labelvotes.vtk'
#
# pyvtk.VtkData(pyvtk.PolyData(points=Vertices, polygons=Faces),\
# pyvtk.PointData(pyvtk.Scalars(labels_max,\
# name='Max (majority labels)'))).\
# tofile(maxlabel_file, 'ascii')
#
# pyvtk.VtkData(pyvtk.PolyData(points=Vertices, polygons=Faces),\
# pyvtk.PointData(pyvtk.Scalars(label_counts,\
# name='Counts (number of different labels)'))).\
# tofile(labelcounts_file, 'ascii')
#
# pyvtk.VtkData(pyvtk.PolyData(points=Vertices, polygons=Faces),\
# pyvtk.PointData(pyvtk.Scalars(label_votes,\
# name='Votes (number of votes for majority labels)'))).\
# tofile(labelvotes_file, 'ascii')
#
# if not os.path.exists(maxlabel_file):
# raise(IOError(maxlabel_file + " not found"))
# if not os.path.exists(labelcounts_file):
# raise(IOError(labelcounts_file + " not found"))
# if not os.path.exists(labelvotes_file):
# raise(IOError(labelvotes_file + " not found"))
#
# return labels_max, label_counts, label_votes, consensus_vertices, \
# maxlabel_file, labelcounts_file, labelvotes_file
#def relabel_annot_file(hemi, subject, annot_name, new_annot_name, relabel_file):
# """
# Combine surface labels in a .annot file.
#
# https://mail.nmr.mgh.harvard.edu/pipermail//freesurfer/2010-June/014620.html
#
# `mris_translate_annotation <subject> <hemi> <in annot> <translation file> <out annot>`
#
# ``translation file``: text file that lists the labels (one per line)
# you want to group, and the new label you want to create. You have to use
# the RGB codes; each line will provide the input and output RGB values::
#
# 221 220 60 223 220 60
# 221 220 160 223 220 60
# 221 220 100 223 220 60
#
# Parameters
# ----------
# hemi : string
# hemisphere ['lh' or 'rh']
# subject : string
# subject name
# annot_name : string
# name of .annot file (without pre- or post-pends)
# relabel_file : string
# text file with old and new RGB values
# new_annot_name : string
# new .annot name
#
# Returns
# -------
# new_annot_name : string
# new .annot name
#
# """
# from nipype.interfaces.base import CommandLine
#
# cli = CommandLine(command='mris_translate_annotation')
# cli.inputs.args = ' '.join([subject, hemi, annot_name, relabel_file,
# new_annot_name])
# cli.cmdline
# cli.run()
#
# return new_annot_name
#def thickness_to_ascii(hemi, subject, subjects_path):
# """
# Convert a FreeSurfer thickness (per-vertex) file
# to an ascii file.
#
# Note: Untested function
#
# Parameters
# ----------
# hemi : string indicating left or right hemisphere
# subject_path: string
# path to subject directory where the binary FreeSurfer
# thickness file is found ("lh.thickness")
#
# Returns
# -------
# thickness_file : string
# name of output file, where each element is the thickness
# value of a FreeSurfer mesh vertex. Elements are ordered
# by orders of vertices in FreeSurfer surface file.
#
# """
# import os
# from nipype.interfaces.base import CommandLine
#
# filename = hemi + 'thickness'
# filename_full = os.path.join(subjects_path, subject, filename)
# thickness_file = os.path.join(os.getcwd(), filename + '.dat')
#
# cli = CommandLine(command='mri_convert')
# cli.inputs.args = ' '.join([filename_full, '--ascii+crsf', thickness_file])
# cli.cmdline
# cli.run()
#
# return thickness_file
#def vtk_to_labels(hemi, surface_file, label_numbers, label_names,
# RGBs, scalar_name):
# """
# Write FreeSurfer .label files from a labeled VTK surface mesh.
#
# From https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles:
#
# "A label file is a text file capturing a list of vertices belonging to a region,
# including their spatial positions(using R,A,S coordinates). A label file
# corresponds only to a single label, thus contains only a single list of vertices"::
#
# 1806
# 7 -22.796 -66.405 -29.582 0.000000
# 89 -22.273 -43.118 -24.069 0.000000
# 138 -14.142 -81.495 -30.903 0.000000
# [...]
#
# Parameters
# ----------
# hemi : string
# hemisphere
# surface_file : string
# vtk surface mesh file with labels
# label_numbers : list of integers
# label numbers
# label_names : list of strings
# label names
# RGBs : list of lists of 3-tuples
# label RGB values for later conversion to a .annot file
# scalar_name : string
# name of scalar values in vtk file
#
# Returns
# -------
# label_files : list of strings
# label file names (order must match label list)
# colortable : string
# file with list of labels and RGB values
# NOTE: labels are identified by the colortable's RGB values
#
# """
# import os
# import numpy as np
# import vtk
#
# def string_vs_list_check(var):
# """
# Check type to make sure it is a string.
#
# (if a list, return the first element)
# """
#
# # Check type:
# NOTE: change to: type(var).__name__
# if type(var) == str:
# return var
# elif type(var) == list:
# return var[0]
# else:
# os.error("Check format of " + var)
#
# # Check type to make sure the filename is a string
# # (if a list, return the first element)
# surface_file = string_vs_list_check(surface_file)
#
# # Initialize list of label files and output colortable file
# label_files = []
# #relabel_file = os.path.join(os.getcwd(), 'relabel_annot.txt')
# #f_relabel = open(relabel_file, 'w')
# colortable = os.path.join(os.getcwd(), 'colortable.ctab')
# f_rgb = open(colortable, 'w')
#
# # Loop through labels
# irgb = 0
# for ilabel, label_number in enumerate(label_numbers):
#
# # Check type to make sure the number is an int
# label_number = int(label_number)
# label_name = label_names[ilabel]
#
# # Load surface
# reader = vtk.vtkDataSetReader()
# reader.SetFileName(surface_file)
# reader.ReadAllScalarsOn()
# reader.Update()
# data = reader.GetOutput()
# d = data.GetPointData()
# labels = d.GetArray(scalar_name)
#
# # Write vertex index, coordinates, and 0
# count = 0
# npoints = data.GetNumberOfPoints()
# L = np.zeros((npoints,5))
# for i in range(npoints):
# label = labels.GetValue(i)
# if label == label_number:
# L[count,0] = i
# L[count,1:4] = data.GetPoint(i)
# count += 1
#
# # Save the label file
# if count > 0:
# irgb += 1
#
# # Write to relabel_file
# #if irgb != label_number:
# # f_relabel.writelines('{0} {1}\n'.format(irgb, label_number))
#
# # Write to colortable
# f_rgb.writelines('{0} {1} {2}\n'.format(
# irgb, label_name, "0 0 0 0")) # ".join(RGBs[ilabel])))
#
# # Store in list of .label files
# label_file = hemi + '.' + label_name + '.label'
# label_file = os.path.join(os.getcwd(), label_file)
# label_files.append(label_file)
#
# # Write to .label file
# f = open(label_file, 'w')
# f.writelines('#!ascii label\n' + str(count) + '\n')
# for i in range(npoints):
# if any(L[i,:]):
# pr = '{0} {1} {2} {3} 0\n'.format(
# np.int(L[i,0]), L[i,1], L[i,2], L[i,3])
# f.writelines(pr)
# else:
# break
# f.close()
# f_rgb.close()
# #f_relabel.close()
#
# return label_files, colortable
#def labels_to_annot(hemi, subjects_path, subject, label_files,
# colortable, annot_name):
# """
# Convert FreeSurfer .label files to a FreeSurfer .annot file
# using FreeSurfer's mris_label2annot:
# https://surfer.nmr.mgh.harvard.edu/fswiki/mris_label2annot
#
# The order of the .label files must equal the order
# of the labels in the colortable:
# https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles
#
# NOTE: The resulting .annot file will have incorrect labels
# if the numbering of the labels is not sequential from 1,2,3...
# For programs like tksurfer, the labels are identified
# by the colortable's RGB values, so to some programs that display
# the label names, the labels could appear correct when not.
# NOTE: You cannot overwrite a .annot file of the same name,
# so in this script I delete it before running.
#
# Parameters
# ----------
# hemi : hemisphere [string]
# subjects_path : path to file
# subject : subject name
# label_files : .label file names [list of strings]
# colortable : file of label numbers & names (same order as label_files)
# annot_name : name of the output .annot file (without prepending hemi)
#
# Returns
# -------
# annot_name : name of .annot file (without prepend)
# annot_file : name of .annot file (with prepend)
#
# """
# import os
# from nipype.interfaces.base import CommandLine
#
# label_files = [f for f in label_files if f!=None]
# if label_files:
# annot_file = hemi + '.' + annot_name + '.annot'
# if os.path.exists(os.path.join(subjects_path, subject, 'label', annot_file)):
# cli = CommandLine(command='rm')
# cli.inputs.args = os.path.join(subjects_path, subject, \
# 'label', annot_file)
# cli.cmdline
# cli.run()
# cli = CommandLine(command='mris_label2annot')
# cli.inputs.args = ' '.join(['--h', hemi, '--s', subject, \
# '--l', ' --l '.join(label_files), \
# '--ctab', colortable, \
# '--a', annot_name])
# cli.cmdline
# cli.run()
#
# return annot_name, annot_file
|
apache-2.0
| -8,878,295,001,158,923,000 | 33.783712 | 111 | 0.597436 | false |
JrGoodle/clowder
|
clowder/cli/checkout.py
|
1
|
1689
|
"""Clowder command line checkout controller
.. codeauthor:: Joe DeCapo <[email protected]>
"""
import argparse
import clowder.util.formatting as fmt
from clowder.clowder_controller import CLOWDER_CONTROLLER, print_clowder_name, valid_clowder_yaml_required
from clowder.config import Config
from clowder.git.clowder_repo import print_clowder_repo_status
from clowder.util.console import CONSOLE
from .util import add_parser_arguments
def add_checkout_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder checkout parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('checkout', help='Checkout local branch in projects')
parser.formatter_class = argparse.RawTextHelpFormatter
parser.set_defaults(func=checkout)
add_parser_arguments(parser, [
(['branch'], dict(nargs=1, action='store', help='branch to checkout', metavar='<branch>')),
(['projects'], dict(metavar='<project|group>', default='default', nargs='*',
choices=CLOWDER_CONTROLLER.project_choices_with_default,
help=fmt.project_options_help_message('projects and groups to checkout branches for')))
])
@valid_clowder_yaml_required
@print_clowder_name
@print_clowder_repo_status
def checkout(args) -> None:
"""Clowder checkout command private implementation"""
projects = Config().process_projects_arg(args.projects)
projects = CLOWDER_CONTROLLER.filter_projects(CLOWDER_CONTROLLER.projects, projects)
for project in projects:
CONSOLE.stdout(project.status())
project.checkout(args.branch[0])
|
mit
| 5,555,558,991,207,249,000 | 34.93617 | 115 | 0.718769 | false |
CommonsCloud/Core-API
|
CommonsCloudAPI/utilities/statuses.py
|
1
|
10872
|
"""
For CommonsCloud copyright information please see the LICENSE document
(the "License") included with this software package. This file may not
be used in any manner except in compliance with the License
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Import Flask Dependencies
"""
from flask import jsonify
"""
A Base Class for centralizing the information regarding HTTP messages to the
end user, to give them a better idea of what's going on
@see RFC 2616
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
@variable (string) return_type
@method status_401
@method status_415
"""
class CommonsStatus():
"""
Define our default variables
@param (object) self
The object we are acting on behalf of
@param (string) return_type
The type of content we'd like to return to the user
"""
def __init__(self):
self.return_type = 'json'
"""
200 OK
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_200(self, system_message=""):
message = {
'status': '200 OK',
'code': '200',
'message': system_message or 'Looking good McFly'
}
response = jsonify(message)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Authorization, Accept, Content-Type, X-Requested-With, Origin, Access-Control-Request-Method, Access-Control-Request-Headers, Cache-Control, Expires, Set-Cookie')
response.headers.add('Access-Control-Allow-Credentials', True)
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, PATCH, DELETE, OPTIONS')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Cache-Control', 'no-cache')
return response
"""
201 Created
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_201(self, resource_id=""):
message = {
'status': '201 Created',
'code': '201',
'resource_id': resource_id,
'message': 'The request has been fulfilled and resulted in a new resource being created.'
}
response = jsonify(message)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Authorization, Accept, Content-Type, X-Requested-With, Origin, Access-Control-Request-Method, Access-Control-Request-Headers, Cache-Control, Expires, Set-Cookie')
response.headers.add('Access-Control-Allow-Credentials', True)
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, PATCH, DELETE, OPTIONS')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Cache-Control', 'no-cache')
return response
"""
204 No Content
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.2
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_204(self):
message = {
'status': '204 No Content',
'code': '204',
'message': 'This content no longer exists'
}
response = jsonify(message)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Authorization, Accept, Content-Type, X-Requested-With, Origin, Access-Control-Request-Method, Access-Control-Request-Headers, Cache-Control, Expires, Set-Cookie')
response.headers.add('Access-Control-Allow-Credentials', True)
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, PATCH, DELETE, OPTIONS')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Cache-Control', 'no-cache')
return response
"""
303 See Other
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_303(self):
message = {
'status': '303 See Other',
'code': '303',
'message': 'Please check the API documentation, as there is a different way you need to request your desired data.'
}
return jsonify(message) if self.return_type == 'json' else message
"""
400 Bad Request
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.2
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_400(self, system_message=""):
message = {
'status': '400 Bad Request',
'code': '400',
'error': str(system_message)
}
response = jsonify(message)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Authorization, Accept, Content-Type, X-Requested-With, Origin, Access-Control-Request-Method, Access-Control-Request-Headers, Cache-Control, Expires, Set-Cookie')
response.headers.add('Access-Control-Allow-Credentials', True)
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, PATCH, DELETE, OPTIONS')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Cache-Control', 'no-cache')
return response
"""
401 Unauthorized
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.2
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_401(self, system_message=''):
message = {
'status': '401 Unauthorized',
'code': '401',
'error': str(system_message)
}
response = jsonify(message)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Authorization, Accept, Content-Type, X-Requested-With, Origin, Access-Control-Request-Method, Access-Control-Request-Headers, Cache-Control, Expires, Set-Cookie')
response.headers.add('Access-Control-Allow-Credentials', True)
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, PATCH, DELETE, OPTIONS')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Cache-Control', 'no-cache')
return response
"""
403 Not Authorized
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.5
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_403(self, system_message=''):
if not system_message:
system_message = 'You are forbidden from accessing the requested resource.'
message = {
'status': '403 Forbidden',
'code': '403',
'message': system_message
}
response = jsonify(message)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Authorization, Accept, Content-Type, X-Requested-With, Origin, Access-Control-Request-Method, Access-Control-Request-Headers, Cache-Control, Expires, Set-Cookie')
response.headers.add('Access-Control-Allow-Credentials', True)
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, PATCH, DELETE OPTIONS')
response.headers.add('Pragma', 'no-cache')
response.headers.add('Cache-Control', 'no-cache')
return response
"""
404 Not Found
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.5
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_404(self, system_message=''):
if not system_message:
system_message = 'The server has not found anything matching the Request-URI.'
message = {
'status': '404 Not Found',
'code': '404',
'message': system_message,
'details': 'You probably entered the URL wrong or perhaps what you were looking for has been removed.'
}
return jsonify(message) if self.return_type == 'json' else message
"""
405 Method Not Allowed
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.5
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_405(self):
message = {
'status': '405 Method Not Allowed',
'code': '405',
'message': 'The method is not allowed for the requested URL.',
'details': 'Check the documentation to ensure the method you\'re attempting to use is one of GET, POST, PATCH, or DELETE'
}
return jsonify(message) if self.return_type == 'json' else message
"""
415 Unsupported Media Type
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.16
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_415(self, system_message=""):
message = {
'status': '415 Unsupported Media Type',
'code': '415',
'message': 'The server is refusing to service the request because the entity of the request is in a format not supported by the requested resource for the requested method.',
'error': str(system_message).replace('"', "")
}
return jsonify(message) if self.return_type == 'json' else message
"""
500 Internal Server Error
@see
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5.1
@param (object) self
The object we are acting on behalf of
@return (method) jsonify || (dict) message
Either a jsonfied dictionary or just the dictionary
"""
def status_500(self, system_message=""):
message = {
'status': '500 Internal Server Error',
'code': '500',
'message': 'The server has not found anything matching the Request-URI.',
'details': 'You need to check the system, application, and proxy logs.',
'error': str(system_message).replace('"', "")
}
return jsonify(message) if self.return_type == 'json' else message
|
agpl-3.0
| 578,113,238,443,479,900 | 27.838196 | 220 | 0.678716 | false |
d33tah/fdwatch
|
fdwatch.py
|
1
|
3926
|
#!/usr/bin/python
#! -*- coding: utf-8
from sys import argv, exit
from time import sleep
from os import path
"""
This file is part of fdwatch.
fdwatch is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
fdwatch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
def humanize_bytes(bytes, precision=1):
"""Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
abbrevs = (
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'bytes')
)
if bytes == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytes >= factor:
break
return '%.*f %s' % (precision, bytes / factor, suffix)
def elapsed_time(seconds, suffixes=['y', 'w', 'd', 'h', 'm', 's'], add_s=False,
separator=' '):
"""
Takes an amount of seconds and turns it into a human-readable amount
of time.
"""
# the formatted time string to be returned
time = []
# the pieces of time to iterate over (days, hours, minutes, etc)
# - the first piece in each tuple is the suffix (d, h, w)
# - the second piece is the length in seconds (a day is 60s*60m*24h)
parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52),
(suffixes[1], 60 * 60 * 24 * 7),
(suffixes[2], 60 * 60 * 24),
(suffixes[3], 60 * 60),
(suffixes[4], 60),
(suffixes[5], 1)]
# for each time piece, grab the value and remaining seconds, and add it
# to the time string
for suffix, length in parts:
value = int(seconds / length)
if value > 0:
seconds = seconds % length
time.append('%d%s' % (value,
(suffix, (suffix, suffix + 's')[value > 1])[
add_s]))
if seconds < 1:
break
return separator.join(time)
if __name__ == "__main__":
if len(argv) != 3:
exit("Usage: %s <PID> <FD_NUMBER>" % (argv[0]))
pid = argv[1]
fdno = argv[2]
while 1:
fd = open("/proc/%s/fdinfo/%s" % (pid, fdno))
old_stat = float(fd.readline()[5:].strip("\n"))
size = path.getsize("/proc/%s/fd/%s" % (pid, fdno))
sleep(1)
fd = open("/proc/%s/fdinfo/%s" % (pid, fdno))
new_stat = float(fd.readline()[5:].strip("\n"))
percent = "%0.2f%%" % (float(old_stat/size)*100)
delta = new_stat-old_stat
if delta == 0:
continue
#speed = "%0.2f MiB/s" % (delta/1024**2)
speed = "%s/s" % humanize_bytes(delta)
eta = "ETA=%s" % elapsed_time(separator=' ',
seconds=int((size - new_stat)/delta))
print("%s, %s, ETA: %s" % (percent, speed, eta))
|
gpl-3.0
| -7,925,187,953,884,488,000 | 31.716667 | 79 | 0.531584 | false |
mfulghum/pyZRD
|
tests/test_SQL.py
|
1
|
1656
|
import unittest
import sql.db
import sql.elements
class TestSQLConnection(unittest.TestCase):
def test_initialization(self):
"""
Start up the SQLAlchemy engine and initialize the database, then shut it all down.
:return:
"""
# Initialize the database.
session, engine = sql.db.initialize_database(verbose=False)
# Check that the database is shut down.
self.assertIsNotNone(engine)
self.assertIsNotNone(session)
# Shut the database down.
sql.db.shutdown_database(session, engine)
class TestSQLCommit(unittest.TestCase):
def setUp(self):
"""
:return:
"""
self.session, self.engine = sql.db.initialize_database(verbose=False)
def test_commit(self):
"""
Start up the SQLAlchemy engine and initialize the database
:return:
"""
# First make sure that we have a SQL session
self.assertIsNotNone(self.engine)
self.assertIsNotNone(self.session)
# Create a ray element without assigning an ID
ray = sql.elements.Ray()
# The ID should be None
self.assertIsNone(ray.id)
# Add the ray to the session
self.session.add(ray)
# The ID should *still* be None
self.assertIsNone(ray.id)
# Commit the change
self.session.commit()
# Now that the ray has been added, the ray ID should now be 1
self.assertEqual(ray.id, 1)
def tearDown(self):
"""
Shut down the SQL database
:return:
"""
sql.db.shutdown_database(self.session, self.engine)
|
mit
| -8,795,536,489,606,486,000 | 25.725806 | 90 | 0.610507 | false |
radjkarl/dataArtist
|
dataArtist/input/reader/FITSimageFormat.py
|
1
|
1513
|
# coding=utf-8
from __future__ import absolute_import
from imgProcessor.transformations import transpose
import cv2
# OWN
from .ImageWithOpenCV import ImageWithOpenCV
class FITSimageFormat(ImageWithOpenCV):
'''
Read one or multiple (stacked) TIF images
created with imageJ
'''
axes = ['x', 'y', '']
ftypes = ('fits', 'fit', 'fts')
preferred = True
def __init__(self, *args, **kwargs):
ImageWithOpenCV.__init__(self, *args, **kwargs)
p = self.preferences
p.pGrey.setValue(True)
p.pGrey.setOpts(readonly=True)
# p.pResize.setOpts(readonly=True)
@staticmethod
def check(ftype, fname):
if ftype in FITSimageFormat.ftypes:
return True
return False
def open(self, filename):
import pyfits # save startup time
f = pyfits.open(filename)
img = f[0].data
labels = None # f[0].name
prefs = self.preferences
# OPEN
# due to different conventions:
#img = transpose(img)
# crop
if prefs.pCrop.value():
r = (prefs.pCropX0.value(),
prefs.pCropX1.value(),
prefs.pCropY0.value(),
prefs.pCropY1.value())
img = img[r[0]:r[1], r[2]:r[3]]
# resize
if prefs.pResize.value():
img = cv2.resize(
img, (prefs.pResizeX.value(), prefs.pResizeY.value()))
img = self.toFloat(img)
return img, labels
|
gpl-3.0
| -701,288,895,234,726,100 | 25.54386 | 70 | 0.558493 | false |
jhuapl-boss/intern
|
intern/remote/boss/tests/int_test_metadata_v1.py
|
1
|
8175
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from intern.service.boss.httperrorlist import HTTPErrorList
import random
import requests
from requests import Session, HTTPError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import unittest
API_VER = 'v1'
class MetadataServiceTest_v1(unittest.TestCase):
"""Integration tests of the Boss metadata API.
Because setup and teardown involves many REST calls, tests are only
divided into tests of the different types of data model resources. All
operations are performed within a single test of each resource.
"""
@classmethod
def setUpClass(cls):
"""Do an initial DB clean up in case something went wrong the last time.
If a test failed really badly, the DB might be in a bad state despite
attempts to clean up during tearDown().
"""
cls.initialize()
cls.cleanup_db()
cls.rmt.create_project(cls.coll)
coord_actual = cls.rmt.create_project(cls.coord)
cls.rmt.create_project(cls.exp)
chan_actual = cls.rmt.create_project(cls.chan)
@classmethod
def tearDownClass(cls):
"""Remove all data model objects created in the DB.
"""
cls.cleanup_db()
@classmethod
def initialize(cls):
"""Initialization for each test.
Called by both setUp() and setUpClass().
"""
cls.rmt = BossRemote('test.cfg', API_VER)
# Turn off SSL cert verification. This is necessary for interacting with
# developer instances of the Boss.
cls.rmt.project_service.session_send_opts = { 'verify': False }
cls.rmt.metadata_service.session_send_opts = { 'verify': False }
cls.rmt.volume_service.session_send_opts = { 'verify': False }
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
coll_name = 'coll2309_{}'.format(random.randint(0, 9999))
cls.coll = CollectionResource(coll_name, 'bar')
cf_name = 'MetaFrame{}'.format(random.randint(0, 9999))
cls.coord = CoordinateFrameResource(
cf_name, 'Test coordinate frame.', 0, 10, -5, 5, 3, 6,
1, 1, 1, 'nanometers', 1, 'nanoseconds')
cls.exp = ExperimentResource(
'myMetaExp2309', cls.coll.name, cls.coord.name, 'my experiment',
1, 'isotropic', 1)
cls.chan = ChannelResource(
'myTestMetaChan', cls.coll.name, cls.exp.name, 'image', 'test channel',
0, 'uint8', 0)
@classmethod
def cleanup_db(cls):
"""Clean up the data model objects used by this test case.
This method is used by both tearDownClass() and setUpClass().
"""
try:
cls.rmt.delete_project(cls.chan)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.exp)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.coord)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.coll)
except HTTPError:
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_collection(self):
actual_list = self.rmt.list_metadata(self.coll)
self.assertEqual([], actual_list)
keys_vals = {'red': 'green', 'two': 'four', 'inside': 'out'}
self.rmt.create_metadata(self.coll, keys_vals)
actual = self.rmt.get_metadata(self.coll, list(keys_vals.keys()))
six.assertCountEqual(self,keys_vals, actual)
with self.assertRaises(HTTPErrorList):
# Should fail when trying create keys that already exist.
self.rmt.create_metadata(self.coll, keys_vals)
update = {'two': 'six', 'inside': 'upside-down'}
self.rmt.update_metadata(self.coll, update)
actual_upd = self.rmt.get_metadata(self.coll, list(update.keys()))
six.assertCountEqual(self, update, actual_upd)
actual_list_upd = self.rmt.list_metadata(self.coll)
six.assertCountEqual(self, list(keys_vals.keys()), actual_list_upd)
with self.assertRaises(HTTPErrorList):
# Try updating a non-existent key.
self.rmt.update_metadata(self.coll, {'foo': 'bar'})
self.rmt.delete_metadata(self.coll, list(keys_vals.keys()))
with self.assertRaises(HTTPErrorList):
# Try getting keys that don't exist.
self.rmt.get_metadata(self.coll, ['foo', 'bar'])
actual_list_end = self.rmt.list_metadata(self.coll)
self.assertEqual([], actual_list_end)
def test_experiment(self):
actual_list = self.rmt.list_metadata(self.exp)
self.assertEqual([], actual_list)
keys_vals = {'red': 'green', 'two': 'four', 'inside': 'out'}
self.rmt.create_metadata(self.exp, keys_vals)
actual = self.rmt.get_metadata(self.exp, list(keys_vals.keys()))
six.assertCountEqual(self, keys_vals, actual)
with self.assertRaises(HTTPErrorList):
# Should fail when trying create keys that already exist.
self.rmt.create_metadata(self.exp, keys_vals)
update = { 'two': 'six', 'inside': 'upside-down' }
self.rmt.update_metadata(self.exp, update)
actual_upd = self.rmt.get_metadata(self.exp, list(update.keys()))
six.assertCountEqual(self, update, actual_upd)
actual_list_upd = self.rmt.list_metadata(self.exp)
six.assertCountEqual(self, list(keys_vals.keys()), actual_list_upd)
with self.assertRaises(HTTPErrorList):
# Try updating a non-existent key.
self.rmt.update_metadata(self.exp, {'foo': 'bar'})
self.rmt.delete_metadata(self.exp, list(keys_vals.keys()))
with self.assertRaises(HTTPErrorList):
# Try getting keys that don't exist.
self.rmt.get_metadata(self.exp, ['foo', 'bar'])
actual_list_end = self.rmt.list_metadata(self.exp)
self.assertEqual([], actual_list_end)
def test_channel(self):
actual_list = self.rmt.list_metadata(self.chan)
self.assertEqual([], actual_list)
keys_vals = { 'red': 'green', 'two': 'four', 'inside': 'out'}
self.rmt.create_metadata(self.chan, keys_vals)
actual = self.rmt.get_metadata(self.chan, list(keys_vals.keys()))
six.assertCountEqual(self, keys_vals, actual)
with self.assertRaises(HTTPErrorList):
# Should fail when trying create keys that already exist.
self.rmt.create_metadata(self.chan, keys_vals)
update = { 'two': 'six', 'inside': 'upside-down' }
self.rmt.update_metadata(self.chan, update)
actual_upd = self.rmt.get_metadata(self.chan, list(update.keys()))
six.assertCountEqual(self,update, actual_upd)
actual_list_upd = self.rmt.list_metadata(self.chan)
six.assertCountEqual(self,keys_vals, actual_list_upd)
with self.assertRaises(HTTPErrorList):
# Try updating a non-existent key.
self.rmt.update_metadata(self.chan, {'foo': 'bar'})
self.rmt.delete_metadata(self.chan, list(keys_vals.keys()))
with self.assertRaises(HTTPErrorList):
# Try getting keys that don't exist.
self.rmt.get_metadata(self.chan, ['foo', 'bar'])
actual_list_end = self.rmt.list_metadata(self.chan)
self.assertEqual([], actual_list_end)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 6,731,717,979,212,477,000 | 35.486607 | 83 | 0.635018 | false |
DedMemez/ODS-August-2017
|
contextlib.py
|
1
|
2267
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: contextlib
import sys
from functools import wraps
from warnings import warn
__all__ = ['contextmanager', 'nested', 'closing']
class GeneratorContextManager(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
return exc is not value
except:
if sys.exc_info()[1] is not value:
raise
return
def contextmanager(func):
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
warn('With-statements now directly support multiple context managers', DeprecationWarning, 3)
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
raise exc[0], exc[1], exc[2]
return
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
|
apache-2.0
| -3,804,324,277,043,676,700 | 23.211111 | 97 | 0.504191 | false |
paulyang0125/QueryExpansionSystem
|
cgi-bin/segQuery.py
|
1
|
1742
|
#encoding=utf-8
import jieba, re, string
testDict = "dict/dict.txt.big"
jieba.load_userdict(testDict)
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import jieba.posseg as pseg
############# find file name to seg ####################
def segmentCTListPerQuerys(listOfCTPerQuery):
segCTStringInAQuery= []
allLinePerQuery = []
for line in listOfCTPerQuery:
out1 = re.sub('[a-zA-Z]+', '', line)
out1 = re.sub('[%s]' % re.escape(string.punctuation), '', out1)
segline = pseg.cut(out1.decode("utf-8"))
allLinePerQuery.append(segline)
for line in allLinePerQuery:
for z in line:
seglinePerQuery = []
if z.flag == "n" or z.flag == "ns" or z.flag == "v" or z.flag == "t" or z.flag == "a" or z.flag == "nr" or z.flag == "nz" or z.flag == "i" or z.flag == "m":
seglinePerQuery.append(z.word.encode("utf-8"))
seglineString = ' '.join(str(e) for e in seglinePerQuery)
segCTStringInAQuery.append(seglineString)
return segCTStringInAQuery
def segmentToListPerQuery(queryString):
listPerQuery = []
segedList = []
out1 = re.sub('[a-zA-Z]+', '', queryString)
out1 = re.sub('[%s]' % re.escape(string.punctuation), '', out1)
#segString = pseg.cut(queryString.decode("utf-8"))
segString = pseg.cut(queryString.decode("utf-8"))
#segString = jieba.cut(queryString,cut_all=False)
#print ".. ".join(segString)
#for i in segString:
# listPerQuery.append(i)
for z in segString:
#print z.word + "\n"
#if z.flag == "n" or z.flag == "ns" or z.flag == "v" or z.flag == "t" or z.flag == "a" or z.flag == "nr" or z.flag == "nz" or z.flag == "i" or z.flag == "m":
if z.flag != "x":
segedList.append(z.word.encode("utf-8"))
return segedList
|
mit
| 5,259,403,091,544,082,000 | 33.84 | 159 | 0.638347 | false |
ADEQUATeDQ/portalmonitor
|
odpw/quality/__init__.py
|
1
|
2870
|
from abc import abstractmethod
from collections import defaultdict
import structlog
log = structlog.get_logger()
from pybloom import ScalableBloomFilter
class Analyser(object):
@classmethod
def name(cls): return cls.__name__
def analyse(self, node, *args, **kwargs):
meth = None
for cls in node.__class__.__mro__:
meth_name = 'analyse_' + cls.__name__
meth = getattr(self, meth_name, None)
if meth:
break
if not meth:
meth = self.analyse_generic
return meth(node, *args, **kwargs)
@abstractmethod
def analyse_generic(self, element): pass
def update(self, node, *args, **kwargs):
meth = None
for cls in node.__class__.__mro__:
meth_name = 'update_' + cls.__name__
meth = getattr(self, meth_name, None)
if meth:
break
if not meth:
meth = self.update_generic
return meth(node, *args, **kwargs)
@abstractmethod
def update_generic(self, element): pass
@abstractmethod
def getResult(self): pass
@abstractmethod
def done(self): pass
class DistinctElementCount(Analyser):
def __init__(self, withDistinct=None):
super(DistinctElementCount, self).__init__()
self.count=0
self.bloom=None
self.set=None
if withDistinct:
self.bloom=ScalableBloomFilter(error_rate=0.00001)
self.distinct=0
self.set=set([])
def getResult(self):
res= {'count':self.count}
if self.bloom is not None:
res['distinct']=self.distinct
return res
class ElementCountAnalyser(Analyser):
"""
Provides a count per distinct element
"""
def __init__(self, funct=None):
self.dist=defaultdict(int)
self.funct=funct
def analyse_generic(self, element):
if self.funct is not None:
self.add(self.funct(element))
else:
self.add(element)
def add(self, value, count=1):
self.dist[value] += count
def getDist(self):
return dict(self.dist)
def getResult(self):
return self.getDist()
# class CKANDMD(Analyser):
# def __init__(self):
# super(CKANDMD, self).__init__()
# self.analysers = ckan_analyser()
#
# def analyse_Dataset(self,dataset):
# if hasattr(dataset,'dmd'):
# dataset.dmd['ckan'] = {}
# else:
# dataset.dmd={'ckan': {}}
# for id, a in self.analysers:
# try:
# res = a.analyse(dataset)
# if res:
# dataset.dmd['ckan'][id] = res
# except Exception as e:
# ErrorHandler.handleError(log, "CkanAnalyserException", analyser=id, exception=e, exc_info=True)
|
gpl-3.0
| -9,106,362,200,313,998,000 | 23.741379 | 113 | 0.557491 | false |
loiclefloch/MarkdownEditor
|
classes/Model.py
|
1
|
5472
|
import Constants
import os, json
from PyQt4 import QtGui, QtCore
from . import MarkdownHighlighter
from directory import directory
from mTheme import mTheme
class Model():
def __init__(self, view):
self.VIEW = view
self.TABS = [ { "path": "" } ]
self.THEMES = []
self.MTHEMES = []
self.themeContent = None
self.theme = self.get_from_config("theme")
if not self.theme or type(self.theme) != dict:
self.theme = {"path":Constants.DEFAULT_CSS_PATH, "name": Constants.DEFAULT_CSS_NAME}
self.mtheme = self.get_from_config("mtheme")
if not self.mtheme or type(self.mtheme) != dict:
self.mtheme = {"path":Constants.DEFAULT_MTHEME_PATH, "name": Constants.DEFAULT_MTHEME_NAME}
self.load_css()
self.load_themes()
self.load_mthemes()
def get_filename(self, filepath):
path = str(filepath)
t = path.split("/")
return t[ len(t) - 1 ]
def get_file_folder(self, filepath):
path = filepath
t = path.split("")
str = ""
for i in range(len(t) - 1):
str = str + t[i] + "/"
return str
def is_document_present(self, filepath):
for i in range(len(self.TABS)):
if self.TABS[i]['path'] == filepath:
return i
return -1
def append_doc(self, filepath):
if len(filepath) != 0:
self.TABS.append({ "path":filepath })
def get_document_title(self):
return self.TABS[self.get_active_tab()]['path']
def get_active_tab(self):
return self.VIEW.tabs.currentIndex() + 1
def get_active_tab_name(self):
return self.TABS[self.get_active_tab()]['path']
def get_file_content(self, filepath):
try:
f = open(filepath, "r")
return f.read()
except Exception:
raise Exception()
def write_file_content(self, filepath, data):
f = open(filepath, "w")
f.write(str(data))
f.close()
def remove_tab(self, index):
self.TABS.pop(index)
def load_css(self):
extension = ".css"
themepath = self.theme["path"]
try:
self.css = self.get_file_content(themepath)
except Exception:
self.VIEW.runError("Error", "Unable to load theme: " + self.theme + " on " + themepath)
def set_css(self, themepath):
for theme in self.THEMES:
if theme["path"] == themepath:
self.theme = theme
break
self.load_css()
self.save_in_config("theme", self.theme)
self.VIEW.update_status("Load theme: " + self.theme["name"])
def set_mtheme(self, themepath):
for theme in self.MTHEMES:
if theme["path"] == themepath:
self.mtheme = theme
break
theme = mTheme(self, self.mtheme["path"])
self.themeContent = theme.load()
if not self.themeContent:
self.VIEW.runError("Error", "Unable to load Markdown theme " + self.mtheme["path"])
return
# we apply theme on each tab
tabs = self.VIEW.tabs
i = 0
while i < tabs.count():
tab = tabs.widget(i)
if not tab:
print ("No tab")
return
textEdit = tab.findChildren(QtGui.QTextEdit)[0]
if not textEdit:
print("No text edit")
return
i = i + 1
self.VIEW.highlighter = MarkdownHighlighter.MarkdownHighlighter(textEdit, self.themeContent)
self.VIEW.update_status("Load Markdown theme: " + self.mtheme["name"])
self.save_in_config("mtheme", self.mtheme)
def load_themes(self):
dir = directory(Constants.CSS_DIR)
files = dir.read()
for f in files:
style = { "name": f.filename(), "path": f.getPath() }
self.THEMES.append(style)
dir.close()
if not self.THEMES:
self.VIEW.runError("Error", "Can't load themes...")
return False
for theme in self.THEMES:
checked = False
if theme["path"] == self.theme["path"]:
checked = True
action = self.VIEW.add_theme_to_menu(theme["name"], theme["path"], checked)
action.triggered.connect(self.VIEW.themesMapper.map)
def load_mthemes(self):
dir = directory(Constants.MTHEMES_DIR)
files = dir.read()
for f in files:
theme = { "name": f.filename(), "path": f.getPath() }
self.MTHEMES.append(theme)
dir.close()
if not self.MTHEMES:
self.VIEW.runError("Error", "Can't load Markdown themes...")
return False
for theme in self.MTHEMES:
checked = False
if theme["path"] == self.mtheme["path"]:
checked = True
action = self.VIEW.add_mtheme_to_menu(theme["name"], theme["path"], checked)
action.triggered.connect(self.VIEW.mthemesMapper.map)
def get_from_config(self, key):
try:
result = self.get_file_content(Constants.CONFIG_FILE)
except Exception:
self.VIEW.runError("Error", "Unable to load configuration file: " + Constants.CONFIG_FILE)
return None
try:
data = json.loads(result)
except ValueError:
self.VIEW.runError("Error", "Config file broken. Json can't be decode")
if key in data and type(data) is not None:
return data[key]
else:
return None
def save_in_config(self, key, value):
try:
result = self.get_file_content(Constants.CONFIG_FILE)
except Exception:
self.VIEW.runError("Error", "Unable to load configuration file: " + Constants.CONFIG_FILE)
return None
try:
data = json.loads(result)
except ValueError:
self.VIEW.runError("Error", "Config file broken. Json can't be decode")
data[key] = value
self.write_file_content(Constants.CONFIG_FILE, json.dumps(data))
|
gpl-2.0
| -5,423,827,405,756,423,000 | 28.578378 | 98 | 0.625548 | false |
miketonks/governor
|
helpers/postgresql.py
|
1
|
11391
|
import os, psycopg2, re, time, shutil
import logging
from urlparse import urlparse
logger = logging.getLogger(__name__)
class Postgresql:
def __init__(self, config):
self.name = config["name"]
self.host, self.port = config["listen"].split(":")
self.read_only_port = config.get('read_only_port', self.port)
self.data_dir = config["data_dir"]
self.replication = config["replication"]
self.config = config
self.cursor_holder = None
# advertised connection for replication
self.advertised_connection_string = "postgres://%s:%s@%s:%s/postgres" % (self.replication["username"], self.replication["password"], self.host, self.port)
self.conn = None
self.master = None
def cursor(self):
if not self.cursor_holder:
local_connection_string = None
# local connection for admin control and local reads
if self.config.get('connect', None) == 'local':
local_connection_string = "user=postgres port=%s" % self.server_port()
else:
local_connection_string = "postgres://%s:%s/postgres" % (self.host, self.server_port())
logger.info("CONNECT: %s", local_connection_string)
self.conn = psycopg2.connect(local_connection_string)
self.conn.autocommit = True
self.cursor_holder = self.conn.cursor()
return self.cursor_holder
def disconnect(self):
try:
self.conn.close()
except Exception as e:
logger.error("Error disconnecting: %s" % e)
def query(self, sql):
max_attempts = 0
while True:
try:
self.cursor().execute(sql)
break
except psycopg2.OperationalError as e:
if self.conn:
self.disconnect()
self.cursor_holder = None
if max_attempts > 4:
raise e
max_attempts += 1
time.sleep(5)
return self.cursor()
def data_directory_empty(self):
return not os.path.exists(self.data_dir) or os.listdir(self.data_dir) == []
def initialize(self):
if os.system("initdb -D %s" % self.data_dir) == 0:
#self.write_pg_hba()
self.copy_pg_hba()
self.start()
self.create_replication_user()
self.run_init_sql()
self.stop()
return True
return False
def sync_from_leader(self, leader):
leader = urlparse(leader["address"])
f = open("./pgpass", "w")
f.write("%(hostname)s:%(port)s:*:%(username)s:%(password)s\n" %
{"hostname": leader.hostname, "port": leader.port, "username": leader.username, "password": leader.password})
f.close()
os.system("chmod 600 pgpass")
return os.system("PGPASSFILE=pgpass pg_basebackup -R -D %(data_dir)s --host=%(host)s --port=%(port)s -U %(username)s" %
{"data_dir": self.data_dir, "host": leader.hostname, "port": leader.port, "username": leader.username}) == 0
def is_leader(self):
return not self.query("SELECT pg_is_in_recovery();").fetchone()[0]
def is_running(self):
return os.system("pg_ctl status -D %s > /dev/null" % self.data_dir) == 0
def start(self, master=False):
if self.is_running():
logger.error("Cannot start PostgreSQL because one is already running.")
return False
pid_path = "%s/postmaster.pid" % self.data_dir
if os.path.exists(pid_path):
os.remove(pid_path)
logger.info("Removed %s" % pid_path)
self.master = master
if master:
logger.info("Starting PostgreSQL in Master mode")
else:
logger.info("Starting PostgreSQL in Slave mode")
command_code = os.system("postgres -D %s %s &" % (self.data_dir, self.server_options()))
while not self.is_running():
time.sleep(5)
return command_code != 0
def stop(self):
logger.info("Stopping PostgreSQL")
return os.system("pg_ctl stop -w -D %s -m fast -w" % self.data_dir) != 0
def reload(self):
return os.system("pg_ctl reload -w -D %s" % self.data_dir) == 0
def restart(self, master=False):
self.master = master
if master:
logger.info("Restarting PostgreSQL in Master mode")
else:
logger.info("Restarting PostgreSQL in Slave mode")
# return os.system("pg_ctl restart -m fast -w -D %s -o \"%s\"" % (self.data_dir, self.server_options())) == 0
# return os.system("pg_ctl restart -w -D %s -o \"%s\" -m fast" % (self.data_dir, self.server_options())) == 0
self.stop() # fast shutdown
self.start(master)
def server_options(self):
options = "-c listen_addresses=%s -c port=%s" % (self.host, self.server_port())
for setting, value in self.config["parameters"].iteritems():
options += " -c \"%s=%s\"" % (setting, value)
return options
def server_port(self):
logger.info("MASTER: %s", self.master)
return self.port if self.master else self.read_only_port
def is_healthy(self):
if not self.is_running():
logger.warning("Postgresql is not running.")
return False
if self.is_leader():
return True
return True
def is_healthiest_node(self, state_store):
# this should only happen on initialization
if state_store.last_leader_operation() is None:
return True
if (state_store.last_leader_operation() - self.xlog_position()) > self.config["maximum_lag_on_failover"]:
return False
for member in state_store.members():
if member["hostname"] == self.name:
continue
try:
member_conn = psycopg2.connect(member["address"])
member_conn.autocommit = True
member_cursor = member_conn.cursor()
member_cursor.execute("SELECT %s - (pg_last_xlog_replay_location() - '0/000000'::pg_lsn) AS bytes;" % self.xlog_position())
xlog_diff = member_cursor.fetchone()[0]
logger.info([self.name, member["hostname"], xlog_diff])
if xlog_diff < 0:
member_cursor.close()
return False
member_cursor.close()
except psycopg2.OperationalError:
continue
return True
def replication_slot_name(self):
member = os.environ.get("MEMBER")
(member, _) = re.subn(r'[^a-z0-9]+', r'_', member)
return member
def write_pg_hba(self):
f = open("%s/pg_hba.conf" % self.data_dir, "a")
f.write("host replication %(username)s %(network)s md5" %
{"username": self.replication["username"], "network": self.replication["network"]})
f.close()
def copy_pg_hba(self):
if os.path.exists('pg_hba.conf'):
logger.info("Copying pg_hba.conf file")
shutil.copy2('pg_hba.conf', self.data_dir)
else:
logger.info("No pg_hba.conf file found - skipping")
if os.path.exists('pg_extras.conf'):
logger.info("Copying pg_extras.conf file")
shutil.copy2('pg_extras.conf', self.data_dir)
else:
logger.info("No pg_extras.conf file found - writing empty file")
with open("%s/pg_extras.conf" % self.data_dir, "w"):
pass
pgconf = "%s/postgresql.conf" % self.data_dir
line = "include = 'pg_extras.conf'"
with open(pgconf, "r") as f:
if any(line == l.rstrip("\r\n") for l in f.readlines()):
logger.info("pg_extras.conf include found")
return
logger.info("Adding pg_extras.conf include")
with open(pgconf, "a") as f:
f.write(line + "\n")
def write_recovery_conf(self, leader_hash):
f = open("%s/recovery.conf" % self.data_dir, "w")
f.write("""
standby_mode = 'on'
primary_slot_name = '%(recovery_slot)s'
recovery_target_timeline = 'latest'
""" % {"recovery_slot": self.name})
if leader_hash is not None:
leader = urlparse(leader_hash["address"])
logger.info("Write Recovery Conf: %s:%s", leader.hostname, leader.port)
f.write("""
primary_conninfo = 'user=%(user)s password=%(password)s host=%(hostname)s port=%(port)s sslmode=require sslcompression=1'
""" % {"user": leader.username, "password": leader.password, "hostname": leader.hostname, "port": leader.port})
if "recovery_conf" in self.config:
for name, value in self.config["recovery_conf"].iteritems():
f.write("%s = '%s'\n" % (name, value))
f.close()
def follow_the_leader(self, leader_hash):
leader = urlparse(leader_hash["address"])
if os.system("grep 'host=%(hostname)s port=%(port)s' %(data_dir)s/recovery.conf > /dev/null" % {"hostname": leader.hostname, "port": leader.port, "data_dir": self.data_dir}) != 0:
self.write_recovery_conf(leader_hash)
self.restart()
return True
def follow_no_leader(self):
if not os.path.exists("%s/recovery.conf" % self.data_dir) or os.system("grep primary_conninfo %(data_dir)s/recovery.conf &> /dev/null" % {"data_dir": self.data_dir}) == 0:
self.write_recovery_conf(None)
if self.is_running():
self.restart()
return True
def promote(self):
self.stop()
self.start(master=True)
return os.system("pg_ctl promote -w -D %s" % self.data_dir) == 0
# self.restart(master=True)
def demote(self, leader):
self.write_recovery_conf(leader)
# self.restart()
self.stop()
self.start(master=False)
def run_init_sql(self):
if os.path.exists('init.sql'):
logger.info("Running init.sql")
sql_lines = open("init.sql", "r").read().split("\n")
for sql in sql_lines:
if not sql:
continue
self.query(sql)
def create_replication_user(self):
#logger.info("Governor Starting Up: Running postgres single user mode to create repliaction user")
#os.system("postgres --single -jE << CREATE USER '%s' WITH REPLICATION ENCRYPTED PASSWORD '%s';" % (self.replication["username"], self.replication["password"]))
self.query("CREATE USER \"%s\" WITH REPLICATION ENCRYPTED PASSWORD '%s';" % (self.replication["username"], self.replication["password"]))
def create_replication_slot(self, member):
check = self.query("SELECT count(*) FROM pg_replication_slots WHERE slot_name = '%s';" % member).fetchone()[0]
if not check:
logging.info("Governor Running: Create Replication Slot: %s" % member)
self.query("SELECT * FROM pg_create_physical_replication_slot('%s');" % member)
def xlog_position(self):
return self.query("SELECT pg_last_xlog_replay_location() - '0/0000000'::pg_lsn;").fetchone()[0]
def last_operation(self):
return self.query("SELECT pg_current_xlog_location() - '0/00000'::pg_lsn;").fetchone()[0]
|
mit
| 2,957,386,529,479,896,600 | 37.613559 | 187 | 0.570363 | false |
wikimedia/analytics-snuggle
|
snuggle/mediawiki/templates.py
|
1
|
3448
|
import re, heapq
#One liner to create a heap class
Heap = type("Heap", (list,), {item: (lambda item2: (lambda self, *args: getattr(heapq, "heap" + item2)(self, *args)))(item) for item in ("pop", "push", "pushpop", "replace")})
class Template:
expression = NotImplemented
groups = NotImplemented
def __init__(self, match, group): pass
class VandalWarning(Template):
expression = r'uw-vandalism([1-4])?(im)?'
groups = 2
priority = 1
def __init__(self, match, offset):
self.level = match.group(offset+1)
self.immediate = match.group(offset+2) != None
def classes(self):
return [
"warning",
"vandal"
] + (["level_" + self.level] if self.level else [])
class SpamWarning(Template):
expression = r'uw-spam([1-4])?(im)?'
groups = 2
priority = 1
def __init__(self, match, offset):
self.level = match.group(offset+1)
self.immediate = match.group(offset+2) != None
def classes(self):
return [
"warning",
"spam"
] + (["level_" + self.level] if self.level else [])
class CopyrightWarning(Template):
expression = r'uw-copyright(-([a-z]+))?([1-4])?'
groups = 3
priority = 1
def __init__(self, match, offset):
self.type = match.group(offset+1) != None
self.level = match.group(offset+2)
def classes(self):
return [
"warning",
"copyright"
] + (["level_" + self.level] if self.level else [])
class Block(Template):
expression = r'.*?block.*?|uw-[a-z]*block[a-z]*'
groups = 0
priority = 0
def classes(self): return ["block"]
class GeneralWarning(Template):
expression = r'uw-.+?([1-4])?(im)?'
groups = 2
priority = 2
def __init__(self, match, offset):
self.level = match.group(offset+1)
self.immediate = match.group(offset+2) != None
def classes(self):
return [
"warning",
] + (["level_" + self.level] if self.level else [])
class Welcome(Template):
expression = r'w-[a-z]+|welcome|First article'
groups = 0
priority = 3
def classes(self): return ["welcome"]
class CSD(Template):
expression = r'.*?csd|db-|speedy.*?'
groups = 0
priority = 0
def classes(self): return ["csd"]
class Deletion(Template):
expression = r'proposed del.*?|prod|afd|.*?delet.*?'
groups = 0
priority = 1
def classes(self): return ["deletion"]
class AFC(Template):
expression = r'afc.*?'
groups = 0
priority = 0
def classes(self): return ["afc"]
class Teahouse(Template):
expression = r'teahouse.*?'
groups = 0
priority = 0
def classes(self): return ["teahouse"]
TEMPLATES = [
VandalWarning,
SpamWarning,
CopyrightWarning,
Block,
GeneralWarning,
Welcome,
CSD,
Deletion,
AFC,
Teahouse
]
class Templates:
def __init__(self, templates):
self.re = re.compile(
"|".join(
"Template:(%s)" % t.expression
for t in templates
),
re.I
)
self.templateMap = {}
offset = 1
for template in templates:
self.templateMap[offset] = (template, offset)
offset += template.groups + 1
def classes(self, markup):
t = self.find(markup)
if t: return t.classes()
else: return []
def find(self, markup):
h = Heap()
for t in self.search(markup):
h.push((t.priority, t))
try:
prority, t = h.pop()
return t
except IndexError:
return None
def search(self, markup):
for match in self.re.finditer(markup):
template, offset = self.templateMap[match.lastindex]
yield template(match, offset)
|
mit
| -8,899,284,807,679,431,000 | 19.52381 | 176 | 0.62036 | false |
PhilLidar-DAD/geonode
|
geonode/reports/models.py
|
1
|
1930
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import models
from django.utils.translation import ugettext_lazy as _
from geonode.people.models import Profile
from datetime import datetime
class DownloadCount(models.Model):
date = models.DateTimeField( default=datetime.now)
category = models.CharField(_('Category'), max_length=100)
chart_group = models.CharField(_('Chart Group'), max_length=100)
download_type = models.CharField(_('Type'), max_length=100)
count = models.IntegerField(_('Count'))
class SUCLuzViMin(models.Model):
province = models.CharField(_('Province'), max_length=100)
suc = models.CharField(_('Suc'), max_length=100)
luzvimin = models.CharField(_('LuzViMin'), max_length=100)
class DownloadTracker(models.Model):
timestamp = models.DateTimeField(default=datetime.now)
actor = models.ForeignKey(
Profile
)
title = models.CharField(_('Title'), max_length=100)
resource_type = models.CharField(_('Resource Type'), max_length=100)
keywords = models.CharField(_('Keywords'), max_length=255)
dl_type = models.CharField(_('Download Type'), max_length=100)
|
gpl-3.0
| -6,825,539,287,019,205,000 | 41.888889 | 73 | 0.673057 | false |
Elucidation/blemflark_bot
|
blemflark_bot.py
|
1
|
3787
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# BlemflarkBot daemon
# Listens on rickandmorty subreddit for comments that
# have Blemflark conversion requests in them and respond with conversion
# Run with --dry to dry run without actual comments
from __future__ import print_function
import praw
import requests
import socket
import time
from datetime import datetime
import argparse
from bb_helpers import *
REPLY_WAIT_TIME = 300
FAIL_WAIT_TIME = 30
def startStream(args):
reddit = praw.Reddit('BB') # client credentials set up in local praw.ini file
bb = reddit.user.me() # BlemflarkBot object
subreddit = reddit.subreddit('rickandmorty')
# Start live stream on all comments in the subreddit
for comment in subreddit.stream.comments():
# Check if comment already has a reply
if not previouslyRepliedTo(comment, bb):
# check if comment has Blemflarks in it
if len(comment.body) > 9000:
# Ignore really long comments, worst case 9000 nines takes ~27 seconds
# to search through
search_result = None
else:
search_result = searchForBlemflarks(comment.body)
if search_result:
# Generate response
response = generateResponseMessage(search_result)
# Reply to submission with response
if not args.dry:
logMessage(comment,"[REPLIED]")
comment.reply(response)
else:
logMessage(comment,"[DRY-RUN-REPLIED]")
print('---')
print(response)
print('---')
# Wait after submitting to not overload
waitWithComments(REPLY_WAIT_TIME)
else:
# Not a Blemflark message
logMessage(comment)
time.sleep(1) # Wait a second between normal comments
else:
logMessage(comment,"[SKIP]") # Skip since replied to already
def processComment(args):
"""Process individual comment, dry run applies as needed"""
reddit = praw.Reddit('BB') # client credentials set up in local praw.ini file
comment = reddit.comment(args.id) # Use specific comment
try:
search_result = searchForBlemflarks(comment.body)
if search_result:
print('Found blemflarks, generating response')
# Generate response
response = generateResponseMessage(search_result)
if not args.dry:
logMessage(comment,"[REPLIED]")
comment.reply(response)
else:
logMessage(comment,"[DRY-RUN-REPLIED]")
print('---')
print(response)
print('---')
except Exception as e:
print("Unable to process comment, probably an incorrect ID:", e)
def main(args):
running = True
while running:
try:
startStream(args)
except (socket.error, requests.exceptions.ReadTimeout,
requests.packages.urllib3.exceptions.ReadTimeoutError,
requests.exceptions.ConnectionError) as e:
print(
"> %s - Connection error, retrying in %d seconds: %s" % (
FAIL_WAIT_TIME, datetime.now(), e))
time.sleep(FAIL_WAIT_TIME)
continue
except Exception as e:
print("Unknown Error, attempting restart in %d seconds:" % FAIL_WAIT_TIME
,e)
time.sleep(FAIL_WAIT_TIME)
continue
except KeyboardInterrupt:
print("Keyboard Interrupt: Exiting...")
running = False
print('Finished')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dry', help='dry run (don\'t actually submit replies)',
action="store_true", default=False)
parser.add_argument('--id', help='Process given comment id only')
args = parser.parse_args()
if args.id is not None:
# Given specific comment id, process this one only
processComment(args)
else:
# Process stream indefinitely
main(args)
|
mit
| -3,671,076,311,809,092,000 | 30.305785 | 79 | 0.657248 | false |
nividic/SolvationToolkit
|
solvationtoolkit/solvated_mixtures.py
|
1
|
30218
|
######################################################################
# SolvationToolkit: A toolkit for setting up molecular simulations of mixtures
# Copyright 2011-2016 UC Irvine and the Authors
#
# Authors: David Mobley and Gaetano Calabro
# With thanks to Kyle Beauchamp, whose liquid_tools.py provided an initial basis for this code
# (https://github.com/choderalab/LiquidBenchmark/blob/master/src/simulation/liquid_tools.py) in April 2015
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, see <http://www.gnu.org/licenses/>.
######################################################################
import numpy as np
import os,sys
import inspect
import itertools
import mdtraj as md
import parmed
import openmoltools
import solvationtoolkit.mol2tosdf as mol2tosdf
from openeye.oechem import *
from openeye.oeiupac import *
from simtk.unit import *
# We require at least ParmEd 2.5.1 because of issues with the .mol2 writer (issue #691 on ParmEd) prior to that, and 2.5.1.10 because of OpenEye reader formatting bugs requireing compressed spacing in .mol2 files (added in ParmEd 2.5.1.10)
# Previously 2.0.4 or later was required due to issues with FudgeLJ/FudgeQQ in resulting GROMACS topologies in
# earlier versions
try: #Try to get version tag
ver = parmed.version
except: #If too old for version tag, it is too old
oldParmEd = Exception('ERROR: ParmEd is too old, please upgrade to 2.5.1 or later')
raise oldParmEd
if ver < (2,5,1,10):
raise RuntimeError("ParmEd is too old, please upgrade to 2.5.1 or later")
def make_path(pathname):
try:
os.makedirs(pathname)
except:
pass
class MixtureSystem(object):
"""A pipeline for simulating liquid mixtures using amber and gromacs parameter files.
Limitations
-----------
Existing files with the same name present in the data directory tree may be overwritten.
This results in a limitation/failure in a small (and probably random) fraction of cases
if multiple systems involving the same monomers are written into the same data directory.
Specifically, openmoltools.amber.build_mixture_prmtop requires that each mol2 file for a
component have a unique residue name, which is handled automatically by openmoltools when
constructing monomers (each is assigned a unique random residue name). However, if these
are overwritten with other monomers (i.e. if we set up, say, 'octanol' in the same directory twice)
which by chance end up with non-unique residue names then amber.build_mixture_prmtop will
fail with a ValueError. This can be avoided by ensuring that if you are constructing multiple
MixtureSystems involving the same monomers, your data directories are different.
This issue also will likely be fixed when openmoltools switches to topology merging
via ParmEd rather than tleap, as unique residue names are built into ParmEd in a better way.
"""
def __init__(self, directory='data'):
"""
Initialization of the Molecule Database Class
Parameters
----------
directory : str
the directory name used to save the data
"""
# Set directory names
self.data_path = directory
self.data_path_monomers = os.path.join(self.data_path,'monomers')
self.data_path_packmol = os.path.join(self.data_path,'packmol_boxes')
self.data_path_amber = os.path.join(self.data_path,'amber')
self.data_path_gromacs = os.path.join(self.data_path,'gromacs')
# List container of all the added components to the solution
self.component_list = []
# List of all the smile strings
self.smile_strings = []
# List of all the number of monomers
self.n_monomers = []
# List of all the mole fractions
self.mole_fractions = []
# List of all the effective compound names. If the compond name is None
# than the compound label will be used in this list as compound name
self.labels = []
# The filling compound is a compound with None molecule number and None
# mole fraction. It is used to fill out the solution
self.filling_compound = None
# Lists of filenames related to gaff mol2 files, amber files and sdf file format
self.gaff_mol2_filenames = []
self.frcmod_filenames = []
self.inpcrd_filenames = []
self.prmtop_filenames = []
self.sdf_filenames = []
# Usefull strings used to concatenate the previous lists of filenames
self.mix_fname = ''
self.pdb_filename = ''
self.prmtop_filename = ''
self.inpcrd_filename = ''
self.top_filename = ''
self.gro_filename = ''
# Index used to perform index selection by using __iter__ function
self.__ci = 0
return
def __str__(self):
"""
Printing object function
"""
string = ''
for i in self.component_list:
string = string + str(i)
return string
def __iter__(self):
"""
Index generator
"""
return self
def next(self): # Python 3: def __next__(self)
"""
Select the molecule during an iteration
"""
if self.__ci > len(self.component_list) - 1:
self.__ci = 0
raise StopIteration
else:
self.__ci = self.__ci + 1
return self.component_list[self.__ci - 1]
def __getitem__(self, index):
"""
Index selection function
"""
return self.component_list[index]
def __setitem__(self, index, component):
"""
Index setting function
Parameters
----------
index : int
the component index
component : Component obj
the component to assign to the component in the mixture
MixtureSystem[index] = component
"""
if not isinstance(component, Component):
raise ValueError('The passed component is not a Component class object')
self.component_list[index] = component
def addComponent(self, name=None, **args):
"""
Add a compoennt to the solution
Parameters
----------
name : string
the name of the compound to add the solution
**args : see class Component for a full description
"""
# Component object creation
component=Component(name, **args)
# Add object to the componet list
self.component_list.append(component)
def build(self, amber=False, gromacs=False, solute_index='auto'):
"""
Build all the monomers and the amber or gromacs mixture files
Parameters
----------
amber : bool
this flag is used to control if output or not the amber files
gromacs : bool
this flag is used to control if output or not the gromacs files
solute_index : int/str, optional. Default: "auto"
Optional parameter to specify which of the components (in the list of specified components)
will be treated as a solute in constructing GROMACS topology files
(which means that a single molecule of this component will be singled out as the 'solute'
in the resulting GROMACS topology file). Valid options are 'auto'
(pick the first component present with n_monomers = 1,
otherwise the first component), None (don't pick any), or an integer
(pick the component smiles_strings[solute_index].
"""
def build_monomers(self):
"""
Generate GAFF mol2 and frcmod files for each chemical
"""
# Filenames generation
for comp in self.component_list:
if comp.label:
mol2_filename = os.path.join(self.data_path_monomers, comp.label+'.mol2')
frcmod_filename = os.path.join(self.data_path_monomers, comp.label+'.frcmod')
inpcrd_filename = os.path.join(self.data_path_monomers, comp.label+'.inpcrd')
prmtop_filename = os.path.join(self.data_path_monomers, comp.label+'.prmtop')
sdf_filename = os.path.join(self.data_path_monomers, comp.label+'.sdf')
self.mix_fname = self.mix_fname + '_' + comp.label
else:
mol2_filename = os.path.join(self.data_path_monomers, comp.name+'.mol2')
frcmod_filename = os.path.join(self.data_path_monomers, comp.name+'.frcmod')
inpcrd_filename = os.path.join(self.data_path_monomers, comp.name+'.inpcrd')
prmtop_filename = os.path.join(self.data_path_monomers, comp.name+'.prmtop')
sdf_filename = os.path.join(self.data_path_monomers, comp.name+'.sdf')
self.mix_fname = self.mix_fname + '_' + comp.name
# Filling compound selection
if comp.numbers == None and comp.mole_fraction == None:
if self.filling_compound == None:
self.filling_compound = comp
self.mole_fractions.append(comp.mole_fraction)
else:
raise ValueError('Error: Two or more fillig compounds have been specified')
# Number and mol fractions lists generation
if comp.numbers:
self.n_monomers.append(comp.numbers)
if comp.mole_fraction is not None:
self.mole_fractions.append(comp.mole_fraction)
# Lists of filenames generation
self.smile_strings.append(comp.smile)
self.gaff_mol2_filenames.append(mol2_filename)
self.frcmod_filenames.append(frcmod_filename)
self.inpcrd_filenames.append(inpcrd_filename)
self.prmtop_filenames.append(prmtop_filename)
self.sdf_filenames.append(sdf_filename)
if not (os.path.exists(mol2_filename) and os.path.exists(frcmod_filename)):
#Convert SMILES strings to mol2 and frcmod files for antechamber
openmoltools.openeye.smiles_to_antechamber(comp.smile, mol2_filename, frcmod_filename)
#Correct the mol2 file partial atom charges to have a total net integer molecule charge
mol2f = parmed.formats.Mol2File
mol2f.write(parmed.load_file(mol2_filename).fix_charges(),mol2_filename, compress_whitespace=True)
#Generate amber coordinate and topology files for the unsolvated molecules
mol_name = os.path.basename(mol2_filename).split('.')[0]
openmoltools.amber.run_tleap(mol_name, mol2_filename, frcmod_filename, prmtop_filename, inpcrd_filename)
#Read Mol2 File and write SDF file
mol2tosdf.writeSDF(mol2_filename, sdf_filename, mol_name)
#Generate unique residue names for molecules in mol2 files
openmoltools.utils.randomize_mol2_residue_names(self.gaff_mol2_filenames)
def build_boxes(self):
"""
Build an initial box with packmol and use it to generate AMBER files
"""
def mole_fractions_to_n_monomers(self, density= 1 * grams/milliliter, cutoff=12*angstrom):
"""
This function is used to generate the number of molecules for each compound
in the solution from the mole fractions of each molecule.
Parameters
----------
density : openmm units
the solution density
cutoff : openmm units
the cutoff distance of the largest compound in the solution
Returns
-------
self.n_monomers : integer list
the list of molecule number for each compound in the solution
size : float
the edge of the box volume
"""
# Calculate the maximum atomic distance in a molecule
def max_dist_mol(mol):
max_dist = 0.0
coords = mol.GetCoords() # Are the coords always in A in mol2 file?
for i in range(0, mol.NumAtoms()):
crdi = np.array([coords[i][0], coords[i][1], coords[i][2]])
for j in range(i+1, mol.NumAtoms()):
crdj = np.array([coords[j][0], coords[j][1], coords[j][2]])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist * angstrom
# The sum of all the mole fractions
sum_fractions = sum([i for i in self.mole_fractions if i != None])
if sum_fractions > 1.0:
raise ValueError('Error: The total molar fraction is greater than 1.0')
if sum_fractions == 1.0 and self.filling_compound:
raise ValueError('Error: The total molar fraction is 1.0 and it is not possible to add any filling compound to the solution')
if sum_fractions < 1.0 and not self.filling_compound:
raise ValueError('Error: The total molar fraction is less than 1.0 and the filling compoind is missing')
if self.filling_compound:
self.filling_compound.mole_fraction = 1.0 - sum_fractions
self.mole_fractions = [i if i != None else (1.0 - sum_fractions) for i in self.mole_fractions]
max_dist_mols = 0.0 * angstrom
delta_volume = 0.0 * angstrom**3
sum_wgt_frac = 0.0 * grams/mole
for i in range(0, len(self.sdf_filenames)):
istream = oemolistream(self.sdf_filenames[i])#gaff_mol2_files give wrong wgt because not sybyl format!
mol = oechem.OEMol()
if not OEReadMolecule(istream, mol):
raise IOError('Error: It was not possible to create the OpenEye molecule object reading the file: %s' % self.gaff_mol2_filenames[i])
# Molecular weight
wgt = oechem.OECalculateMolecularWeight(mol) * grams/mole
if self.component_list[i].mole_fraction == 0.0:
delta_volume = oechem.OECalculateMolecularWeight(mol) * angstrom**3
sum_wgt_frac = sum_wgt_frac + wgt * self.component_list[i].mole_fraction
max_dist= max_dist_mol(mol)
if max_dist > max_dist_mols:
max_dist_mols = max_dist
cube_length = ((max_dist_mols + 2*cutoff)**3 + delta_volume)**(1.0/3.0)
n_monomers = []
# n_i = Volume * Density * mole_fraction_i/sum_j(wgt_j * mole_fraction_j)
self.n_monomers = [int(round(AVOGADRO_CONSTANT_NA * comp.mole_fraction * density * cube_length**3 / sum_wgt_frac)) \
if comp.mole_fraction !=0 else 1 for comp in self.component_list]
return self.n_monomers, cube_length
if not self.gaff_mol2_filenames:
raise ValueError('The list of gaff mol2 molecules is empty')
if self.n_monomers and self.mole_fractions:
raise ValueError('Error: For different compounds it is not possible to mix mole_fractions and number of molecules')
# The solution has been specified by using number of molecules
if self.n_monomers:
if self.filling_compound:
raise ValueError('Error: The filling compound cannot be mixed with components specified by defining the number of molecules')
size = openmoltools.packmol.approximate_volume_by_density(self.smile_strings, self.n_monomers)
packed_trj = openmoltools.packmol.pack_box([md.load(mol2) for mol2 in self.gaff_mol2_filenames], self.n_monomers, box_size = size)
self.labels = self.mix_fname[1:].split('_')
self.mix_fname = self.mix_fname[1:] + ''.join(['_'+str(i) for i in self.n_monomers])
self.pdb_filename = os.path.join(self.data_path_packmol, self.mix_fname+'.pdb')
packed_trj.save(self.pdb_filename)
# The solutions has been specified by using mole fractions
elif self.mole_fractions:
n_monomers, size = mole_fractions_to_n_monomers(self)
# WARNING: The size estimated with the mole_to_n_monomers function is underestimating
# the volume calculated by using openmoltools and for now we are using this estimate.
# Apparently Packmol is struggling to find convergence and introduces extra molecules
# into the found best solutionx (bug?)
size = openmoltools.packmol.approximate_volume_by_density(self.smile_strings, self.n_monomers)
packed_trj = openmoltools.packmol.pack_box([md.load(mol2) for mol2 in self.gaff_mol2_filenames], n_monomers, box_size = size)
#packed_trj = openmoltools.packmol.pack_box([md.load(mol2) for mol2 in self.gaff_mol2_filenames], n_monomers, box_size = size/anstrom)
self.labels = self.mix_fname[1:].split('_')
self.mix_fname = self.mix_fname[1:] +''.join(['_'+str(i) for i in self.mole_fractions if i is not None])
self.pdb_filename = os.path.join(self.data_path_packmol, self.mix_fname+'.pdb')
packed_trj.save(self.pdb_filename)
return
def convert_to_gromacs(self, solute_index):
"""From AMBER-format prmtop and crd files, generate final solvated GROMACS topology and coordinate files. Ensure that the desired "solute" (as per solute_index) has a single monomer treated via a unique residue name to allow treatment as a solute separate from other residues of the same name (if desired). The solute will be given residue name "solute" Also, check to see if there are "WAT" residues present, in which case tleap will have re-ordered them to the end of the data file. If so, update data structures accordingly and handle conversion appropriately.
Notes
-----
Currently, this function ensures that - after AMBER conversion reorders water molecules with residue names 'WAT' to occur last in the resulting parameter/coordinate files - the internal data structures are updated to have the correct order in the relevant lists (labels, smiles_strings, n_monomers). If for some reason GROMACS conversion were removed, these would need to be updated elsewhere. (Probably this should be done anyway, as this is not really a GROMACS issue.)
"""
# Read in AMBER format parameter/coordinate file and convert in gromacs
gromacs_topology = parmed.load_file(self.prmtop_filename, self.inpcrd_filename )
# Split the topology into components and check that we have the right number of components
components = gromacs_topology.split()
assert len(components)==len(self.n_monomers), "Number of monomers and number of components in the combined topology do not match."
#### HANDLE ORDERING OF WATER ####
# Check if any of the residues is named "WAT". If it is, antechamber will potentially have re-ordered it from where it was (it places residues named "WAT" at the end) so it may no longer appear in the order in which we expect.
resnames = [ components[i][0].residues[0].name for i in range(len(components)) ]
wat_present = False
# Manage presence of WAT residues and possible re-ordering
if 'WAT' in resnames:
# If there is a water present, then we MIGHT have re-ordering. Check smiles to find out where it was originally.
wat_orig_index = self.smile_strings.index('O')
# Where is it now?
wat_new_index = resnames.index('WAT')
# Reordered? If so, we have to adjust the ordering of n_monomers, smiles_strings, labels,
# and potentially solute_index. Filenames will be preserved since these were already created
if wat_orig_index != wat_new_index:
# tleap moves water to the end so if they aren't equal, we know where water will be...
self.n_monomers = self.n_monomers[0:wat_orig_index] + self.n_monomers[wat_orig_index+1:] + [self.n_monomers[wat_orig_index]]
self.smile_strings = self.smile_strings[0:wat_orig_index] + self.smile_strings[wat_orig_index+1:] + [self.smile_strings[wat_orig_index]]
self.labels = self.labels[0:wat_orig_index] + self.labels[wat_orig_index+1:] + [self.labels[wat_orig_index] ]
# Check solute_index and alter if needed
if not solute_index=='auto' and not solute_index==None:
# Index unchanged if it's before the water
if solute_index < wat_orig_index:
pass
# If it is the water, now it is at the end
elif solute_index == wat_orig_index:
solute_index = len(self.n_monomers)-1
# If it was after the water, then it moved up one position
else:
solute_index -= 1
#### END HANDLING OF ORDERING OF WATER ####
# Figure out what we're treating as the solute (if anything)
if solute_index=='auto':
# Check which of the molecules is present in qty 1
try:
solute_index = self.n_monomers.index(1)
except ValueError:
# If none is present in qty 1, then use the first
solute_index = 0
# Check that the passed solute index is correct
check_solute_indices = range(0,len(self.n_monomers))
assert solute_index in check_solute_indices and isinstance(solute_index, int) or solute_index == None, "Solute index must be an element of the list: %s or None. The value passed is: %s" % (check_solute_indices,self.solute_index)
# Now all we have to do is to change the name of the solute molecule (residue, in ParmEd) and ParmEd will automatically make it a new molecule on write.
# To do this, first build a list of the residue names we want, by molecule
resnames = [ ]
for i in range(len(self.n_monomers)):
# If this is not the solute, just keep what we had
if i!=solute_index:
resnames += [ self.labels[i] ] * self.n_monomers[i]
# If it is the solute, make the first residue be named solute and the rest what they were already
else:
resnames += [ 'solute' ] + [ self.labels[i]] * (self.n_monomers[i]-1)
# Make sure we didn't botch this
assert len(resnames) == len( gromacs_topology.residues ), "Must have the same number of residues named as defined in the topology file."
# Now we just go through and rename all the residues and we're done
for i in range(len(resnames)):
gromacs_topology.residues[i].name = resnames[i]
# Write GROMACS topology/coordinate files
gromacs_topology.save(self.top_filename, format='gromacs')
gromacs_topology.save(self.gro_filename)
return
# Create monomers and packmol directories
make_path(os.path.join(self.data_path_monomers))
make_path(os.path.join(self.data_path_packmol))
# Call the monomers creation and packmol systems
build_monomers(self)
build_boxes(self)
# Create amber files
if amber:
make_path(os.path.join(self.data_path_amber))
self.prmtop_filename = os.path.join(self.data_path_amber, self.mix_fname+'.prmtop')
self.inpcrd_filename = os.path.join(self.data_path_amber, self.mix_fname+'.inpcrd')
tleap_cmd = openmoltools.amber.build_mixture_prmtop(self.gaff_mol2_filenames, self.frcmod_filenames, self.pdb_filename, self.prmtop_filename, self.inpcrd_filename)
# Create gromacs files
if gromacs:
make_path(os.path.join(self.data_path_gromacs))
self.top_filename = os.path.join(self.data_path_gromacs, self.mix_fname+'.top')
self.gro_filename = os.path.join(self.data_path_gromacs, self.mix_fname+'.gro')
convert_to_gromacs(self,solute_index)
#*************************
# Component Class
#*************************
class Component(object):
"""
This Class is used to save the componet parameters
"""
def __init__(self, name=None, label=None, smile=None, numbers=None, mole_fraction=None):
"""
Initialization class function
Parameters
----------
name : str
the molecule name
label : str
the molecule label used to generates files
smile : str
the molecule smile string
numbers : int
the number of molecule
mole_fraction : float
molecular mole fraction
"""
# Checking name and label
ref_str = ''
if not name and not label:
raise ValueError("Error: No component parameters name or label has been provided for the component")
if label:
if not isinstance(label, str):
raise ValueError("Error: The component label %s is not a string" % label)
ref_str = label
if name:
if not isinstance(name, str):
raise ValueError("Error: The component name %s is not a string" % name)
ref_str = name
if label and not name:
print('\nWARNING: component name not provided label will be used as component name\n')
# Checking smile, molecule numbers and mole fraction
if smile:
if not isinstance(smile, str):
raise ValueError("Error: The smile % for the component %s is not a string" % (smile, ref_str))
#TO DO: Check if a string is a valid smile string
if numbers is not None:
if not isinstance(numbers, int):
raise ValueError("Error: The molecule numbers %s for the component %s is not an integer" % (numbers, ref_str))
if numbers < 1:
raise ValueError("Error: The molecule numbers %s for the component %s must be a positive integer" % (numbers, ref_str))
if mole_fraction:
if not isinstance(mole_fraction, float):
raise ValueError("Error: The mole fraction %s for the component %s is not a float number" % (mole_fraction, ref_str))
if mole_fraction < 0.0:
raise ValueError("Error: The mole fraction %s for the component %s must be positive" % (mole_fraction, ref_str))
if mole_fraction > 1.0:
raise ValueError("Error: The mole fraction %s for the component %s is greater than one" % (mole_fraction, ref_str))
if numbers and mole_fraction:
raise ValueError("Error: molecule numbers and mole fraction for the compound %s cannot be both specified" % ref_str)
if not smile:
mol = OEMol()
if name:
try:
OEParseIUPACName(mol, name)
smile = OECreateIsoSmiString(mol)
except:
raise ValueError("Error: The supplied name '%s' could not be parsed" % name)
elif label:
try:
OEParseIUPACName(mol, label)
smile = OECreateIsoSmiString(mol)
except:
raise ValueError("Error: The supplied label '%s' could not be parsed" % label)
self.name = name
self.label = label
self.smile = smile
self.numbers = numbers
self.mole_fraction = mole_fraction
return
def __str__(self):
"""
Printing object function
"""
return "\nname = %s\nlabel = %s\nsmile = %s\nnumbers = %s\nmole_frac = %s\n" \
%(self.name, self.label, self.smile, self.numbers, self.mole_fraction)
|
lgpl-2.1
| -3,433,859,057,155,332,600 | 44.304348 | 576 | 0.583162 | false |
slowkow/pytabix
|
test/test.py
|
1
|
3022
|
#!/usr/bin/env python
"""
test_tabix.py
Hyeshik Chang <[email protected]>
Kamil Slowikowski <[email protected]>
March 21, 2015
The MIT License
Copyright (c) 2011 Seoul National University.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import random
import gzip
import tabix
TEST_FILE = 'test/example.gtf.gz'
def read_gtf(filename):
"""Read a GTF file and return a list of [chrom, start, end] items."""
retval = []
for line in gzip.open(filename):
fields = line.rstrip().split('\t')
chrom = fields[0]
start = fields[3]
end = fields[4]
retval.append([chrom, start, end])
return retval
def overlap1(a0, a1, b0, b1):
"""Check if two 1-based intervals overlap."""
return int(a0) <= int(b1) and int(a1) >= int(b0)
def get_result(regions, chrom, start, end):
retval = []
for r in regions:
if r[0] == chrom and overlap1(r[1], r[2], start, end):
retval.append(r)
return retval
class TabixTest(unittest.TestCase):
regions = read_gtf(TEST_FILE)
chrom = 'chr1'
start = 25944
end = 27000
result = get_result(regions, chrom, start, end)
tb = tabix.open(TEST_FILE)
def test_query(self):
it = self.tb.query(self.chrom, self.start, self.end)
tb_result = [ [x[0], x[3], x[4]] for x in it ]
self.assertEqual(self.result, tb_result)
def test_querys(self):
query = '{}:{}-{}'.format(self.chrom, self.start, self.end)
it = self.tb.querys(query)
tb_result = [ [x[0], x[3], x[4]] for x in it ]
self.assertEqual(self.result, tb_result)
def test_remote_file(self):
file1 = "ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20100804/" \
"ALL.2of4intersection.20100804.genotypes.vcf.gz"
tabix.open(file1)
def test_remote_file_bad_url(self):
file1 = "ftp://badurl"
with self.assertRaises(tabix.TabixError):
tabix.open(file1)
if __name__ == '__main__':
unittest.main()
|
mit
| 4,635,247,878,536,380,000 | 29.836735 | 78 | 0.673395 | false |
Debian/dput-ng
|
dput/dsc.py
|
1
|
3396
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Copyright (c) 2012 dput authors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""
This code deals with the reading and processing of Debian .dsc files.
"""
import os
import os.path
from debian import deb822
from dput.exceptions import DscFileException
class Dsc(object):
"""
Changes object to help process and store information regarding Debian
.dsc files, used in the upload process.
"""
def __init__(self, filename=None, string=None):
"""
Object constructor. The object allows the user to specify **either**:
#. a path to a *changes* file to parse
#. a string with the *changes* file contents.
::
a = Dsc(filename='/tmp/packagename_version.changes')
b = Dsc(string='Source: packagename\\nMaintainer: ...')
``filename``
Path to *changes* file to parse.
``string``
*dsc* file in a string to parse.
"""
if (filename and string) or (not filename and not string):
raise TypeError
if filename:
self._absfile = os.path.abspath(filename)
self._data = deb822.Dsc(file(filename))
else:
self._data = deb822.Dsc(string)
if len(self._data) == 0:
raise DscFileException('Changes file could not be parsed.')
if filename:
self.basename = os.path.basename(filename)
else:
self.basename = None
self._directory = ""
def __getitem__(self, key):
"""
Returns the value of the rfc822 key specified.
``key``
Key of data to request.
"""
return self._data[key]
def __contains__(self, key):
"""
Returns whether the specified RFC822 key exists.
``key``
Key of data to check for existence.
"""
return key in self._data
def get(self, key, default=None):
"""
Returns the value of the rfc822 key specified, but defaults
to a specific value if not found in the rfc822 file.
``key``
Key of data to request.
``default``
Default return value if ``key`` does not exist.
"""
return self._data.get(key, default)
def parse_dsc_file(filename, directory=None):
"""
Parse a .dsc file and return a dput.changes.Dsc instance with
parsed changes file data. The optional directory argument refers to the
base directory where the referred files from the changes file are expected
to be located.
XXX: The directory argument is ignored
"""
_c = Dsc(filename=filename)
return(_c)
|
gpl-2.0
| 254,489,527,281,749,600 | 28.530435 | 78 | 0.626325 | false |
anniejw6/numb3rs_randomizer
|
app/views.py
|
1
|
4158
|
from flask import (render_template, Flask, request,
flash, session, redirect, url_for, g)
from app import app, forms, models, db, lm, bcrypt
from random import randint
from sqlalchemy import func
import pandas as pd
from flask.ext.login import (LoginManager, login_required, login_user,
logout_user, current_user)
import json
from flask.ext.bcrypt import Bcrypt
import logging
import uuid
import sys
from numpy.random import RandomState
from datetime import datetime
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
lm.login_view = 'login'
@lm.user_loader
def user_loader(user_id):
"""Given *user_id*, return the associated User object.
:param unicode user_id: user_id (email) user to retrieve
"""
#return models.User.query.filter_by(email = user_id).first()
return models.User.query.get(user_id)
def randShots(seed):
prng = RandomState(seed)
treat = prng.randint(0, 2)
if treat == 1:
return('1 Shot')
else:
return(str(treat) + ' Shots')
# before request
@app.before_request
def before_request():
if 'round' not in session:
session['round'] = 0
if 'session_idd' not in session:
session['session_idd'] = uuid.uuid4().hex
if current_user.is_authenticated():
session['user_idd'] = session['user_id']
else:
session['user_idd'] = session['session_idd']
# Home Page
# Start New Round
@app.route('/', methods = ['GET', 'POST'])
@app.route('/index/', methods = ['GET', 'POST'])
@login_required
def index():
app.logger.info(session['round'])
#app.logger.info(session['session_idd'])
# seed = datetime.utcnow().microsecond
# app.logger.info(randShots(seed))
if request.method == 'GET':
# Just the button
return render_template('randomize_get.html')
else:
# Generate Treatment
time = datetime.utcnow()
seed = time.microsecond
assignment = randShots(seed)
# Add Round
session['round'] = (session['round'] + 1)
# Record Things
assign = models.Assignment(
session_id = session['session_idd'],
user_id = session['user_idd'],
time = str(time),
seed = seed,
outcome = assignment,
round_num = session['round'])
db.session.add(assign)
db.session.commit()
return render_template('randomize_post.html', treat = assignment)
# Record Errors
@app.route('/record/', methods = ['GET', 'POST'])
@login_required
def record():
return render_template('record.html')
# Log-In & Register
@app.route("/login/", methods=["GET", "POST"])
def login():
"""For GET requests, display the login form.
For POSTS, login the current user by processing the form."""
form = forms.LoginForm()
#app.logger.info(session['user_id'])
if form.validate_on_submit():
# Try to find user
user = models.User.query.filter_by(name = form.name.data).first()
app.logger.info(user)
# If it exists, log in
if user:
user.authenticated = True
app.logger.info('logged')
# If it doesn't exist, register and log in
else:
app.logger.info('registered')
user = models.User(request.form['name'])
db.session.add(user)
db.session.commit()
login_user(user, remember=True)
session['user_idd'] = session['user_id']
flash('User successfully registered')
#app.logger.info(current_user)
#app.logger.info(session['user_id'])
return redirect(url_for("index"))
return render_template("reg_login.html", form=form)
# Put responses in database
@app.route('/submission', methods=['POST'])
def submission():
app.logger.info(request.form)
time = str(datetime.utcnow())
response = models.Response(
session_id = session['session_idd'],
time = time,
user_id = session['user_idd'],
num_err = int(request.form['num_err']),
err_desc = request.form['err_descrip'],
round_num = session['round'])
db.session.add(response)
db.session.commit()
return str(len(models.Response.query.all()))
# Logout User
@app.route("/logout/", methods=["GET"])
@login_required
def logout():
"""Logout the current user."""
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
session['user_idd'] = session['session_idd']
session['round'] = 0
return redirect(url_for("index"))
|
mit
| 4,077,815,832,180,595,700 | 23.464706 | 70 | 0.689274 | false |
Scille/parsec-cloud
|
tests/backend/test_organization_stats.py
|
1
|
2041
|
# Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
import pytest
from uuid import uuid4
from unittest.mock import ANY
from pendulum import now as pendulum_now
from parsec.core.backend_connection import BackendConnectionError
from parsec.core.logged_core import OrganizationStats
@pytest.mark.trio
async def test_organization_stats(
running_backend, backend, realm, alice, alice_core, bob_core, otheralice_core
):
organization_stats = await alice_core.get_organization_stats()
assert organization_stats == OrganizationStats(users=3, data_size=0, metadata_size=ANY)
initial_metadata_size = organization_stats.metadata_size
# Create new metadata
await backend.vlob.create(
organization_id=alice.organization_id,
author=alice.device_id,
encryption_revision=1,
timestamp=pendulum_now(),
realm_id=realm,
vlob_id=uuid4(),
blob=b"1234",
)
organization_stats = await alice_core.get_organization_stats()
assert organization_stats == OrganizationStats(
users=3, data_size=0, metadata_size=initial_metadata_size + 4
)
# Create new data
await backend.block.create(
organization_id=alice.organization_id,
author=alice.device_id,
realm_id=realm,
block_id=uuid4(),
block=b"1234",
)
organization_stats = await alice_core.get_organization_stats()
assert organization_stats == OrganizationStats(
users=3, data_size=4, metadata_size=initial_metadata_size + 4
)
# Bob is not admin, it should fail
with pytest.raises(BackendConnectionError) as exc:
await bob_core.get_organization_stats()
assert (
str(exc.value)
== "Backend error: {'reason': 'User `bob` is not admin', 'status': 'not_allowed'}"
)
# Ensure organization isolation
other_organization_stats = await otheralice_core.get_organization_stats()
assert other_organization_stats == OrganizationStats(users=1, data_size=0, metadata_size=ANY)
|
agpl-3.0
| -1,567,281,872,073,851,100 | 34.189655 | 97 | 0.695737 | false |
RPGOne/Skynet
|
pytorch-master/torch/autograd/_functions/stochastic.py
|
1
|
3277
|
from ..stochastic_function import StochasticFunction
# Gradient formulas are based on Simple Statistical Gradient-Following
# Algorithms for Connectionist Reinforcement Learning, available at
# http://incompleteideas.net/sutton/williams-92.pdf
class Multinomial(StochasticFunction):
def __init__(self, num_samples, with_replacement):
super(Multinomial, self).__init__()
self.num_samples = num_samples
self.with_replacement = with_replacement
def forward(self, probs):
samples = probs.multinomial(self.num_samples, self.with_replacement)
self.save_for_backward(probs, samples)
self.mark_non_differentiable(samples)
return samples
def backward(self, reward):
probs, samples = self.saved_tensors
if probs.dim() == 1:
probs = probs.unsqueeze(0)
samples = samples.unsqueeze(0)
# normalize probs (multinomial accepts weights)
probs /= probs.sum(1).expand_as(probs)
grad_probs = probs.new().resize_as_(probs).zero_()
output_probs = probs.gather(1, samples)
output_probs.add_(1e-6).reciprocal_()
output_probs.neg_().mul_(reward)
# TODO: add batched index_add
for i in range(probs.size(0)):
grad_probs[i].index_add_(0, samples[i], output_probs[i])
return grad_probs
class Bernoulli(StochasticFunction):
def forward(self, probs):
samples = probs.new().resize_as_(probs).bernoulli_(probs)
self.save_for_backward(probs, samples)
self.mark_non_differentiable(samples)
return samples
def backward(self, reward):
probs, samples = self.saved_tensors
rev_probs = probs.neg().add_(1)
return (probs - samples) / (probs * rev_probs + 1e-6) * reward
class Normal(StochasticFunction):
def __init__(self, stddev=None):
super(Normal, self).__init__()
self.stddev = stddev
assert stddev is None or stddev > 0
def forward(self, means, stddevs=None):
output = means.new().resize_as_(means)
output.normal_()
if self.stddev is not None:
output.mul_(self.stddev)
elif stddevs is not None:
output.mul_(stddevs)
else:
raise RuntimeError("Normal function requires specifying a common "
"stddev, or per-sample stddev")
output.add_(means)
self.save_for_backward(output, means, stddevs)
self.mark_non_differentiable(output)
return output
def backward(self, reward):
output, means, stddevs = self.saved_tensors
grad_stddevs = None
grad_means = means - output # == -(output - means)
assert self.stddev is not None or stddevs is not None
if self.stddev is not None:
grad_means /= 1e-6 + self.stddev ** 2
else:
stddevs_sq = stddevs * stddevs
stddevs_cb = stddevs_sq * stddevs
stddevs_sq += 1e-6
stddevs_cb += 1e-6
grad_stddevs = (stddevs_sq - (grad_means * grad_means))
grad_stddevs /= stddevs_cb
grad_stddevs *= reward
grad_means /= stddevs_sq
grad_means *= reward
return grad_means, grad_stddevs
|
bsd-3-clause
| -2,520,832,152,124,860,400 | 35.010989 | 78 | 0.610009 | false |
JQIamo/artiq
|
artiq/protocols/packed_exceptions.py
|
1
|
1125
|
import inspect
import builtins
import traceback
import sys
__all__ = ["GenericRemoteException", "current_exc_packed", "raise_packed_exc"]
class GenericRemoteException(Exception):
pass
builtin_exceptions = {v: k for k, v in builtins.__dict__.items()
if inspect.isclass(v) and issubclass(v, BaseException)}
def current_exc_packed():
exc_class, exc, exc_tb = sys.exc_info()
if exc_class in builtin_exceptions:
return {
"class": builtin_exceptions[exc_class],
"message": str(exc),
"traceback": traceback.format_tb(exc_tb)
}
else:
message = traceback.format_exception_only(exc_class, exc)[0].rstrip()
return {
"class": "GenericRemoteException",
"message": message,
"traceback": traceback.format_tb(exc_tb)
}
def raise_packed_exc(pack):
if pack["class"] == "GenericRemoteException":
cls = GenericRemoteException
else:
cls = getattr(builtins, pack["class"])
exc = cls(pack["message"])
exc.parent_traceback = pack["traceback"]
raise exc
|
lgpl-3.0
| 7,025,006,415,535,964,000 | 25.785714 | 78 | 0.611556 | false |
imtapps/django-dynamic-rules
|
dynamic_rules/ext.py
|
1
|
1218
|
"""
Helper Extensions for Dynamic Rules
"""
from django.db import models
from django.contrib.contenttypes.models import ContentType
class RuleExtensionManager(models.Manager):
"""
RuleExtensionManager can be used by any model that needs to plug into
the dynamic rules in someway
A model that uses this manager must include the following:
rule = models.ForeignKey('dynamic_rules.Rule')
trigger_content_type = models.ForeignKey('contenttypes.ContentType')
trigger_model_id = models.PositiveIntegerField(db_index=True)
trigger_model = generic.GenericForeignKey(fk_field='trigger_model_id',
ct_field='trigger_content_type')
The trigger_model represents the model that the Rule inspects to
determine what "dynamic action" to take.
"""
def get_by_trigger_model(self, trigger_model):
trigger_content_type = ContentType.objects.get_for_model(trigger_model)
return self.filter(trigger_content_type=trigger_content_type, trigger_model_id=trigger_model.pk)
def get_by_rule(self, rule, trigger_model):
base_query = self.get_by_trigger_model(trigger_model)
return base_query.filter(rule=rule)
|
bsd-2-clause
| -5,115,389,525,447,233,000 | 37.0625 | 104 | 0.710181 | false |
Abhi9k/wireshark-dissector
|
tools/rdps.py
|
1
|
4290
|
#!/usr/bin/env python
#
# rdps.py
#
# $Id: rdps.py 32287 2010-03-25 22:22:45Z wmeier $
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
'''\
takes the file listed as the first argument and creates the file listed
as the second argument. It takes a PostScript file and creates a C source
with 2 functions:
print_ps_preamble()
print_ps_finale()
Ported to Python from rdps.c.
'''
import sys
import os.path
def ps_clean_string(raw_str):
ps_str = ''
for c in raw_str:
if c == '\\':
ps_str += '\\\\'
elif c == '%':
ps_str += '%%'
elif c == '\n':
ps_str += '\\n'
else:
ps_str += c
return ps_str
def start_code(fd, func):
script_name = os.path.split(__file__)[-1]
fd.write("void print_ps_%s(FILE *fd) {\n" % func)
def write_code(fd, raw_str):
ps_str = ps_clean_string(raw_str)
fd.write("\tfprintf(fd, \"%s\");\n" % ps_str)
def end_code(fd):
fd.write("}\n\n\n")
def exit_err(msg=None, *param):
if msg is not None:
sys.stderr.write(msg % param)
sys.exit(1)
# Globals
STATE_NULL = 'null'
STATE_PREAMBLE = 'preamble'
STATE_FINALE = 'finale'
def main():
state = STATE_NULL;
if len(sys.argv) != 3:
exit_err("%s: input_file output_file\n", __file__)
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
script_name = os.path.split(__file__)[-1]
output.write('''\
/* DO NOT EDIT
*
* Created by %s.
*
* ps.c
* Definitions for generating PostScript(R) packet output.
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
#include "ps.h"
''' % script_name)
for line in input:
#line = line.rstrip()
if state is STATE_NULL:
if line.startswith("% ---- wireshark preamble start ---- %"):
state = STATE_PREAMBLE
start_code(output, "preamble")
continue
elif line.startswith("% ---- wireshark finale start ---- %"):
state = STATE_FINALE
start_code(output, "finale")
continue
elif state is STATE_PREAMBLE:
if line.startswith("% ---- wireshark preamble end ---- %"):
state = STATE_NULL
end_code(output)
continue
else:
write_code(output, line)
elif state is STATE_FINALE:
if line.startswith("% ---- wireshark finale end ---- %"):
state = STATE_NULL
end_code(output)
continue
else:
write_code(output, line)
else:
exit_err("NO MATCH:%s", line)
sys.exit(0)
if __name__ == "__main__":
main()
|
gpl-2.0
| -1,736,997,416,584,621,600 | 27.791946 | 78 | 0.610723 | false |
vmware/ansible-module-chaperone
|
os_heat_status.py
|
1
|
6902
|
#!/usr/bin/python
# coding=utf-8
#
# Copyright 2015 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: vio_check_heat_stack
Short_description: Checks if heat stack is present and deletes if heat stack is in given state
description:
Module will check if a heat stack is present for a specified tenant. If the heat stack is in
the following states DELETE_FAILED, CREATE_COMPLETE, CREATE_FAILED
Module will delete the heat stack. Module specifically developed for the ansible-role-vio
requirements:
- keystoneclient.v2_0
- requests
- urlparse
Tested on:
- vio 2.5
- ansible 2.1.2
version_added: 2.2
author: VMware
options:
auth_url:
description:
- keystone authentication for the openstack api endpoint
required: True
username:
description:
- user with rights to specified project
required: True
password:
description:
- password for specified user
required: True
tenant_name:
description:
- tenant name with authorization for specified project
required: True
'''
EXAMPLES = '''
- name: Check Heat stack present
vio_check_heat_stack:
auth_url: "https://{{ vio_loadbalancer_vip }}:5000/v2.0"
username: "{{ projectuser }}"
password: "{{ projectpass }}"
tenant_name: "{{ vio_val_project_name }}"
heat_stack_name: "{{ vio_val_heat_name }}"
register: stack_present
tags:
- validate_openstack
'''
RETURN = '''
description: Returns an empty list if there are no stacks present or a list of stacks deleted
returned:
type:
sample:
'''
try:
from keystoneclient.v2_0 import client as ks_client
from urlparse import urlparse
import requests
import time
HAS_CLIENTS = True
except ImportError:
HAS_CLIENTS = False
def keystone_auth(module):
ksclient = None
try:
ksclient = ks_client.Client(username=module.params['username'],
password=module.params['password'],
tenant_name=module.params['project_name'],
auth_url=module.params['auth_url'],
insecure=True)
except Exception as e:
module.fail_json(msg="Failed to get keystone client authentication: {}".format(e))
return ksclient
def stack_get(module, heaturl, token, status_code):
rheaders = {'X-Auth-Token': "%s" % token}
resp = requests.get(heaturl, headers=rheaders, verify=False)
if resp.status_code != status_code:
module.fail_json(msg="Failed to get stack status: {}".format(resp.status_code))
content = resp.json()
return content
def stack_delete(module, heaturl, token, status_code):
rheaders = {'X-Auth-Token': "%s" % token}
resp = requests.delete(heaturl, headers=rheaders, verify=False)
if resp.status_code != status_code:
module.fail_json(msg="Failed to get stack status: {}".format(resp.status_code))
return resp.status_code
def project_stacks(module, token, endpoint, project_id):
url = 'https://{}:8004/v1/{}/stacks'.format(endpoint, project_id)
content = stack_get(module, url, token, 200)
return content['stacks']
def stack_status(module, token, endpoint, project_id, stack_data):
stack_name = stack_data['stack_name']
stack_id = stack_data['id']
url = 'https://{}:8004/v1/{}/stacks/{}/{}'.format(endpoint, project_id, stack_name, stack_id)
content = stack_get(module, url, token, 200)
return content['stack']['stack_status']
def wait_for_stack(module, token, endpoint, project_id):
stack_info = []
url = 'https://{}:8004/v1/{}/stacks'.format(endpoint, project_id)
del_url = '{}/{}/{}'
stacks = project_stacks(module, token, endpoint, project_id)
if not stacks:
return stack_info
for stack in stacks:
stack_delete_url = del_url.format(url, stack['stack_name'], stack['id'])
wait_count = 0
while wait_count < 21:
project_stack_status = project_stacks(module, token, endpoint, project_id)
if not project_stack_status:
break
status = stack_status(module, token, endpoint, project_id, stack)
stack_data = {'name': stack['name'], 'status': status}
if status == "CREATE_COMPLETE" or status == "CREATE_FAILED":
delete_status = stack_delete(module, stack_delete_url, token, 204)
stack_info.append(stack_data)
elif status == "DELETE_IN_PROGRESS":
stack_data.update({'status': status})
stack_info.append(stack_data)
wait_count += 1
time.sleep(45)
elif status == "DELETE_FAILED":
delete_status = stack_delete(module, stack_delete_url, token, 204)
if not (delete_status == 204):
msg = "Failed to Delete Stack: {} with STATUS - {}".format(stack['stack_name'], delete_status)
module.fail_json(msg=msg)
elif delete_status == 204:
break
else:
wait_count += 1
time.sleep(20)
if wait_count == 21:
break
return stack_info
def main():
argument_spec = dict(
auth_url=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(required=True, type='str', no_log=True),
project_name=dict(required=True, type='str'),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_CLIENTS:
module.fail_json(msg='python-requests is required for this module')
changed = False
ks = keystone_auth(module)
token = ks.auth_token
project_id = ks.tenant_id
vioendpoint = urlparse(module.params['auth_url']).netloc.split(':')[0]
project_stack_info = wait_for_stack(module, token, vioendpoint, project_id)
if project_stack_info:
changed=True
module.exit_json(changed=changed, stack_data_info=project_stack_info)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
apache-2.0
| -1,103,609,170,580,953,200 | 30.515982 | 114 | 0.620545 | false |
moehle/investor_lifespan_model
|
examples/example.py
|
1
|
6290
|
import investor_lifespan_model as mdl
import numpy as np
import matplotlib.pyplot as plt
# Define problem parameters:
np.random.seed(0)
t0 = 30 # Current age.
W0 = 100 # Initial wealth.
δ = 10 # Relative risk aversion parameter.
K = 10 # Number of lifetime simulations to run.
Δt = 1/12 # Duration of each time step.
tf = 100 # Maximum age considered.
# Define after-tax income over time.
def Y(t):
t_vec = [ 29, 30, 40, 50, 60, 61, 70, 100 ] # age
Y_vec = [ 70, 70, 80, 90, 100, 0, 0, 0 ] # income
return np.interp(t, t_vec, Y_vec)
# Define time-weighting functions for utility.
def h(t):
t_vec = [ 30, 40, 50, 60, 70, 80, 90, 100 ] # age
h_vec = [ 1, 1, 1, 1, 1, 1, 1, 1 ] # no children
h_vec = [ 1, 2, 2, 1, 1, 1, 1, 1 ] # with children
return np.interp(t, t_vec, h_vec)**(δ-1)
# Define time-weighting functions for bequeathment.
def m(t):
t_vec = [ 30, 40, 50, 60, 70, 80, 90, 100 ] # age
m_vec = [ 0, 0, 0, 0, 0, 0, 0, 0 ] # no children
m_vec = [ 0, 20, 10, 8, 5, 3, 2, 1 ] # with children
return np.interp(t, t_vec, m_vec)**(-(1-δ))
# Set up problem:
inv = mdl.Investor(mdl.π, mdl.G, δ, h, m, Y)
ins = mdl.Insurer(inv)
mkt = mdl.Market()
mdl = mdl.LifespanModel(inv, ins, mkt, t0=t0, tf=tf, Δt=Δt)
# Simulate several lifetimes:
res = []
for k in range(K):
res += [mdl.simulate(W0)]
print('\nRECOMMENDATIONS:')
print('Consumption / year : $', int(1000*mdl.C(W0, 0)))
print('Stock/bond ratio : ', int(100*mdl.w(W0, 0)), '%')
print('Insurance premium : $', int(1000*np.max([mdl.P(W0, 0),0])))
print('Annuity income : $', int(1000*np.max([-mdl.P(W0, 0),0])))
print('NPV of future wages : $', int(1000*(mdl.b_vec[0])))
print('Rel. risk aversion : ', δ)
print('Abs. risk aversion : ', res[0]['ARA'][0])
print('Discount factor : ', int(res[0]['discount']*1000)/10, '%' )
#PLOTTING
if True:
plt.close('all')
# Plot mortality statistics:
if True:
plt.figure()
plt.title('MORTALITY STATISTICS')
plt.subplot(311)
plt.step(res[0]['t'], res[0]['π'] / res[0]['G'][0], color='k')
plt.ylim(ymin=0)
plt.xlim(xmin=t0)
plt.ylabel('Death PDF')
plt.subplot(312)
plt.step(res[0]['t'], res[0]['G'], color='k')
plt.ylim(ymin=0)
plt.xlim(xmin=t0)
plt.ylabel('Survival function')
plt.subplot(313)
plt.step(res[0]['t'], res[0]['λ'], color='k')
plt.ylim(ymin=0, ymax=1)
plt.xlim(xmin=t0)
plt.ylabel('Force of mortality')
# Plot risk aversion, marginal utility:
if True:
plt.figure()
plt.title('PERSONAL ECONOMIC DATA')
plt.subplot(211)
for k in range(K):
plt.step(res[k]['t'], res[k]['ARA'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['ARA'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.xlim(xmin=t0)
plt.ylim(ymin=0)
plt.ylabel('Absolute risk aversion')
plt.subplot(212)
for k in range(K):
plt.step(res[k]['t'], res[k]['dJdW'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['dJdW'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.step(res[k]['t'], res[k]['dJdW_bar'], color='k', zorder=1)
plt.xlim(xmin=t0)
plt.yscale('log')
plt.ylabel('Marginal utility of wealth')
# Plot investment fraction, life insurance:
if True:
plt.figure()
plt.title('DECISIONS')
plt.subplot(311)
for k in range(K):
plt.step(res[k]['t'], res[k]['w'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['w'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.xlim(xmin=t0)
plt.ylabel('Stock/bond ratio')
plt.subplot(312)
for k in range(K):
plt.step(res[k]['t'], res[k]['P'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['P'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.xlim(xmin=t0)
plt.ylabel('Insurance premium (k$)')
plt.subplot(313)
for k in range(K):
plt.step(res[k]['t'], res[k]['P']/res[k]['μ'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['P'][res[k]['k_death']] / res[k]['μ'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.xlim(xmin=t0)
plt.ylabel('Insurance Payout (k$)')
plt.xlabel('Age')
# Plot wealth, consumption, and bequeathment:
if True:
plt.figure()
plt.title('WEALTH USAGE')
plt.subplot(311)
for k in range(K):
plt.step(res[k]['t'], res[k]['W'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['W'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.step(res[k]['t'], res[k]['Wbar'], color='k', zorder=1)
plt.ylim(ymin=0)
plt.xlim(xmin=t0)
plt.ylabel('Wealth (k$)')
plt.subplot(312)
for k in range(K):
plt.step(res[k]['t'], res[k]['C'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['C'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.ylim(ymin=0)
plt.xlim(xmin=t0)
plt.ylabel('Consumption (k$)')
plt.subplot(313)
for k in range(K):
plt.step(res[k]['t'], res[k]['Z'], color='.75', zorder=1)
plt.scatter(res[k]['t'][res[k]['k_death']],
res[k]['Z'][res[k]['k_death']],
marker='x', color='r', zorder=2)
plt.xlim(xmin=t0)
plt.ylim(ymin=0)
plt.ylabel('Bequeathment (k$)')
plt.xlabel('Age')
plt.show()
|
mit
| 4,605,922,961,293,976,000 | 33.679558 | 88 | 0.490362 | false |
sergiotocalini/pyaejokuaa
|
trunk/lib/DBAdmin.py
|
1
|
3904
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from DBModel import *
setup_all()
create_all()
class Administrator():
def identify_table(self, wtable):
if wtable == 'servers':
table = Servers()
elif wtable == 'profiles':
table = Profiles()
else:
print ('La tabla donde desea insertar no existe.')
table = None
return table
def insert_dict(self, wtable, listdict):
table = self.identify_table(wtable)
if table:
result = [table.from_dict(i) for i in listdict]
session.commit()
return result
else:
return False
def delete(self, listquery):
result = [i.delete() for i in listquery]
session.commit()
return result
def update(self, listquery, dic):
result = [i.from_dict(dic) for i in listquery]
session.commit()
return result
class Querys():
def identify_table(self, wtable):
if wtable == 'servers':
return Servers.query.all()
elif wtable == 'profiles':
return Profiles.query.all()
else:
print ('La tabla donde obtener la consulta no existe.')
return False
def all_table(self, wtable, flag=False):
table = self.identify_table(wtable)
if flag:
dic = {}
counter = 0
for i in table:
dic[counter] = i.to_dict()
counter += 1
return dic
else:
return table
def like_table(self, wtable, value):
value = "%" + value + "%"
if wtable == "host":
query_filter = Servers.host.like(value.decode())
return Servers.query.filter(query_filter).all()
elif wtable == "addr":
query_filter = Servers.addr.like(value.decode())
return Servers.query.filter(query_filter).all()
elif wtable == "system":
query_filter = Servers.system.like(value.decode())
return Servers.query.filter(query_filter).all()
elif wtable == "comment":
query_filter = Servers.comment.like(value.decode())
return Servers.query.filter(query_filter).all()
else:
return None
def search_table(self, wtable, value):
if wtable == "server_id":
return Servers.get_by(id=value)
elif wtable == "server_addr":
return Servers.get_by(add=value.decode())
elif wtable == "server_who":
return Servers.query.filter_by(profile=value).all()
elif wtable == "profile":
return Profiles.get_by(profile=value.decode())
else:
return None
def like_servers(self, user_id, table_filter=None, value=None, asdict=False):
profile = self.search_table("profile", user_id)
if not profile:
print ('Debe especificar un user_id valido.')
return False
else:
if table_filter and value:
servers = self.like_table(table_filter, value)
if asdict:
return [i.to_dict() for i in servers]
else:
return servers
else:
return False
def get_profile_servers(self, user_id, asdict=True):
profile = self.search_table("profile", user_id)
if not profile:
print ('Debe especificar un user_id valido.')
return False
else:
servers = self.search_table("server_who", profile)
if asdict:
return dict((i.id,i.to_dict()) for i in servers)
else:
return servers
def get_server_info(self, server, asdict=True):
res = self.search_table('server_id', server)
if asdict:
return res.to_dict()
return res
|
gpl-2.0
| -4,011,784,613,249,496,000 | 31.533333 | 81 | 0.538166 | false |
hayj/WorkspaceManager
|
workspacemanager/test/utils.py
|
1
|
1472
|
# coding: utf-8
import os
from shutil import *
from workspacemanager.utils import *
import sh
def fileToStr(path):
with open(path, 'r') as myfile:
data = myfile.read()
return data
def createFakeDir(projectName="ProjectTest", resetWorkspaceTest=True, parentFolder=None):
""" Create a fake project """
if parentFolder is None:
parentFolder = ""
else:
parentFolder = parentFolder + "/"
projectPackage = projectName.lower()
thisLibPackageDirectory = os.path.dirname(os.path.realpath(__file__))
thisLibPackageDirectory = os.path.abspath(os.path.join(thisLibPackageDirectory, os.pardir))
workspaceTestPath = thisLibPackageDirectory + '/test/workspacetest/'
if resetWorkspaceTest and os.path.isdir(workspaceTestPath):
rmtree(workspaceTestPath)
if resetWorkspaceTest:
os.mkdir(workspaceTestPath)
if parentFolder != "" and not os.path.isdir(workspaceTestPath + "/" + parentFolder):
os.mkdir(workspaceTestPath + "/" + parentFolder) # Warning don't work if parentFolder's depth > 1
sh.touch(workspaceTestPath + "/wm-conf.json")
theProjectDirectory = workspaceTestPath + '/' + parentFolder + projectName
theProjectPackageDirectory = theProjectDirectory + '/' + projectPackage
os.mkdir(theProjectDirectory)
os.mkdir(theProjectPackageDirectory)
touch(theProjectPackageDirectory + "/__init__.py")
return theProjectDirectory
if __name__ == '__main__':
pass
|
mit
| -8,198,744,592,615,761,000 | 37.763158 | 105 | 0.70856 | false |
kayhayen/Nuitka
|
nuitka/nodes/LoopNodes.py
|
1
|
16137
|
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop nodes.
There are for and loop nodes, but both are reduced to loops with break/continue
statements for it. These re-formulations require that optimization of loops has
to be very general, yet the node type for loop, becomes very simple.
"""
from nuitka.optimizations.TraceCollections import TraceCollectionBranch
from nuitka.tree.Extractions import getVariablesWrittenOrRead
from .Checkers import checkStatementsSequenceOrNone
from .NodeBases import StatementBase, StatementChildHavingBase
from .shapes.StandardShapes import tshape_unknown, tshape_unknown_loop
tshape_unknown_set = frozenset([tshape_unknown])
def minimizeShapes(shapes):
# Merge some shapes automatically, no need to give a set.
if tshape_unknown in shapes:
return tshape_unknown_set
return shapes
class StatementLoop(StatementChildHavingBase):
kind = "STATEMENT_LOOP"
named_child = "loop_body"
checker = checkStatementsSequenceOrNone
__slots__ = (
"loop_variables",
"loop_start",
"loop_resume",
"loop_previous_resume",
"incomplete_count",
)
def __init__(self, loop_body, source_ref):
StatementChildHavingBase.__init__(self, value=loop_body, source_ref=source_ref)
self.loop_variables = None
# Traces of the variable at the start of loop, to detect changes and make
# those restart optimization.
self.loop_start = {}
# Shapes currently known to be present when the loop is started or resumed
# with continue statements.
self.loop_resume = {}
# Shapes from last time around, to detect the when it becomes complete, i.e.
# we have seen it all.
self.loop_previous_resume = {}
# To allow an upper limit in case it doesn't terminate.
self.incomplete_count = 0
def mayReturn(self):
loop_body = self.subnode_loop_body
if loop_body is not None and loop_body.mayReturn():
return True
return False
@staticmethod
def mayBreak():
# The loop itself may never break another loop.
return False
@staticmethod
def mayContinue():
# The loop itself may never continue another loop.
return False
def isStatementAborting(self):
loop_body = self.subnode_loop_body
if loop_body is None:
return True
else:
return not loop_body.mayBreak()
@staticmethod
def mayRaiseException(exception_type):
# Loops can only raise, if their body does, but they also issue the
# async exceptions, so we must make them do it all the time.
return True
# loop_body = self.subnode_loop_body
# return loop_body is not None and \
# self.subnode_loop_body.mayRaiseException(exception_type)
def _computeLoopBody(self, trace_collection):
# Rather complex stuff, pylint: disable=too-many-branches,too-many-locals,too-many-statements
# print("Enter loop body", self.source_ref)
loop_body = self.subnode_loop_body
if loop_body is None:
return None, None, None
# Look ahead. what will be written and degrade to initial loop traces
# about that if we are in the first iteration, later we # will have more
# precise knowledge.
if self.loop_variables is None:
self.loop_variables = getVariablesWrittenOrRead(loop_body)
all_first_pass = True
else:
all_first_pass = False
# Track if we got incomplete knowledge due to loop. If so, we are not done, even
# if no was optimization done, once we are complete, they can come.
incomplete_variables = None
# Mark all variables as loop wrap around that are written in the loop and
# hit a 'continue' and make them become loop merges. We will strive to
# reduce self.loop_variables if we find ones that have no change in all
# 'continue' exits.
loop_entry_traces = set()
for loop_variable in self.loop_variables:
current = trace_collection.getVariableCurrentTrace(loop_variable)
if all_first_pass:
first_pass = True
# Remember what we started with, so we can detect changes from outside the
# loop and make them restart the collection process, if the pre-conditions
# got better.
self.loop_start[loop_variable] = current
else:
if not self.loop_start[loop_variable].compareValueTrace(current):
first_pass = True
self.loop_start[loop_variable] = current
else:
first_pass = False
if first_pass:
incomplete = True
self.loop_previous_resume[loop_variable] = None
# Don't forget to initialize the loop resume traces with the starting point. We use
# a special trace class that will not take the list too serious though.
self.loop_resume[loop_variable] = set()
current.getTypeShape().emitAlternatives(
self.loop_resume[loop_variable].add
)
else:
if (
self.loop_resume[loop_variable]
!= self.loop_previous_resume[loop_variable]
):
# print("incomplete", self.source_ref, loop_variable, ":",
# self.loop_previous_resume[loop_variable], "<->", self.loop_resume[loop_variable])
incomplete = True
if incomplete_variables is None:
incomplete_variables = set()
incomplete_variables.add(loop_variable)
else:
# print("complete", self.source_ref, loop_variable, ":",
# self.loop_previous_resume[loop_variable], "<->", self.loop_resume[loop_variable])
incomplete = False
# Mark the variable as loop usage before executing it.
loop_entry_traces.add(
(
loop_variable,
trace_collection.markActiveVariableAsLoopMerge(
loop_node=self,
current=current,
variable=loop_variable,
shapes=self.loop_resume[loop_variable],
incomplete=incomplete,
),
)
)
abort_context = trace_collection.makeAbortStackContext(
catch_breaks=True,
catch_continues=True,
catch_returns=False,
catch_exceptions=False,
)
with abort_context:
# Forget all iterator and other value status. TODO: These should be using
# more proper tracing to benefit.
trace_collection.resetValueStates()
result = loop_body.computeStatementsSequence(
trace_collection=trace_collection
)
# Might be changed.
if result is not loop_body:
self.setChild("loop_body", result)
loop_body = result
if loop_body is not None:
# Emulate terminal continue if not aborting.
if not loop_body.isStatementAborting():
trace_collection.onLoopContinue()
continue_collections = trace_collection.getLoopContinueCollections()
# Rebuild this with only the ones that actually changed in the loop.
self.loop_variables = []
for loop_variable, loop_entry_trace in loop_entry_traces:
# Giving up
if self.incomplete_count >= 20:
self.loop_previous_resume[loop_variable] = self.loop_resume[
loop_variable
] = set((tshape_unknown_loop,))
continue
# Remember what it was at the start, to be able to tell if it changed.
self.loop_previous_resume[loop_variable] = self.loop_resume[
loop_variable
]
self.loop_resume[loop_variable] = set()
loop_resume_traces = set(
continue_collection.getVariableCurrentTrace(loop_variable)
for continue_collection in continue_collections
)
# Only if the variable is re-entering the loop, annotate that.
if not loop_resume_traces:
# Remove the variable, need not consider it
# ever again.
del self.loop_resume[loop_variable]
del self.loop_previous_resume[loop_variable]
del self.loop_start[loop_variable]
continue
# Keep this as a loop variable
self.loop_variables.append(loop_variable)
# Tell the loop trace about the continue traces.
loop_entry_trace.addLoopContinueTraces(loop_resume_traces)
# Also consider the entry trace before loop from here on.
loop_resume_traces.add(self.loop_start[loop_variable])
shapes = set()
for loop_resume_trace in loop_resume_traces:
loop_resume_trace.getTypeShape().emitAlternatives(shapes.add)
self.loop_resume[loop_variable] = minimizeShapes(shapes)
# If we break, the outer collections becomes a merge of all those breaks
# or just the one, if there is only one.
break_collections = trace_collection.getLoopBreakCollections()
if incomplete_variables:
self.incomplete_count += 1
trace_collection.signalChange(
"loop_analysis",
self.source_ref,
lambda: "Loop has incomplete variable types after %d attempts for '%s'."
% (
self.incomplete_count,
",".join(variable.getName() for variable in incomplete_variables),
),
)
else:
if self.incomplete_count:
trace_collection.signalChange(
"loop_analysis",
self.source_ref,
lambda: "Loop has complete variable types after %d attempts."
% self.incomplete_count,
)
self.incomplete_count = 0
return loop_body, break_collections, continue_collections
def computeStatement(self, trace_collection):
outer_trace_collection = trace_collection
trace_collection = TraceCollectionBranch(parent=trace_collection, name="loop")
loop_body, break_collections, continue_collections = self._computeLoopBody(
trace_collection
)
if break_collections:
outer_trace_collection.mergeMultipleBranches(break_collections)
# Consider trailing "continue" statements, these have no effect, so we
# can remove them.
if loop_body is not None:
assert loop_body.isStatementsSequence()
statements = loop_body.subnode_statements
assert statements # Cannot be empty
# If the last statement is a "continue" statement, it can simply
# be discarded.
last_statement = statements[-1]
if last_statement.isStatementLoopContinue():
if len(statements) == 1:
self.subnode_body.finalize()
self.clearChild("loop_body")
loop_body = None
else:
last_statement.parent.replaceChild(last_statement, None)
last_statement.finalize()
trace_collection.signalChange(
"new_statements",
last_statement.getSourceReference(),
"""\
Removed useless terminal 'continue' as last statement of loop.""",
)
elif last_statement.isStatementLoopBreak():
if not continue_collections and len(break_collections) == 1:
loop_body = loop_body.removeStatement(last_statement)
return (
loop_body,
"new_statements",
"Removed useless loop with only a break at the end.",
)
# Consider leading "break" statements, they should be the only, and
# should lead to removing the whole loop statement. Trailing "break"
# statements could also be handled, but that would need to consider if
# there are other "break" statements too. Numbering loop exits is
# nothing we have yet.
if loop_body is not None:
assert loop_body.isStatementsSequence()
statements = loop_body.subnode_statements
assert statements # Cannot be empty
if len(statements) == 1 and statements[-1].isStatementLoopBreak():
# TODO: Should be dead code now, due to the optimization above.
assert False
return (
None,
"new_statements",
"""\
Removed useless loop with immediate 'break' statement.""",
)
# Also consider the threading intermission. TODO: We ought to make it
# explicit, so we can see it potentially disrupting and changing the
# global variables. It may also raise.
outer_trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
@staticmethod
def getStatementNiceName():
return "loop statement"
class StatementLoopContinue(StatementBase):
kind = "STATEMENT_LOOP_CONTINUE"
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
@staticmethod
def isStatementAborting():
return True
@staticmethod
def mayRaiseException(exception_type):
return False
@staticmethod
def mayContinue():
return True
def computeStatement(self, trace_collection):
# This statement being aborting, will already tell everything.
trace_collection.onLoopContinue()
return self, None, None
@staticmethod
def getStatementNiceName():
return "loop continue statement"
class StatementLoopBreak(StatementBase):
kind = "STATEMENT_LOOP_BREAK"
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
@staticmethod
def isStatementAborting():
return True
@staticmethod
def mayRaiseException(exception_type):
return False
@staticmethod
def mayBreak():
return True
def computeStatement(self, trace_collection):
# This statement being aborting, will already tell everything.
trace_collection.onLoopBreak()
return self, None, None
@staticmethod
def getStatementNiceName():
return "loop break statement"
|
apache-2.0
| 5,799,127,045,616,669,000 | 34.86 | 103 | 0.590011 | false |
metomi/isodatetime
|
metomi/isodatetime/tests/test_01.py
|
1
|
30974
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=pointless-statement
# ----------------------------------------------------------------------------
# Copyright (C) British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""This tests the ISO 8601 data model functionality."""
import pytest
import unittest
from metomi.isodatetime import data
from metomi.isodatetime.exceptions import BadInputError
def get_timeduration_tests():
"""Yield tests for the duration class."""
tests = {
"get_days_and_seconds": [
([], {"hours": 25}, (1, 3600)),
([], {"seconds": 59}, (0, 59)),
([], {"minutes": 10}, (0, 600)),
([], {"days": 5, "minutes": 2}, (5, 120)),
([], {"hours": 2, "minutes": 5, "seconds": 11.5}, (0, 7511.5)),
([], {"hours": 23, "minutes": 1446}, (1, 83160))
],
"get_seconds": [
([], {"hours": 25}, 90000),
([], {"seconds": 59}, 59),
([], {"minutes": 10}, 600),
([], {"days": 5, "minutes": 2}, 432120),
([], {"hours": 2, "minutes": 5, "seconds": 11.5}, 7511.5),
([], {"hours": 23, "minutes": 1446}, 169560)
]
}
for method, method_tests in tests.items():
for method_args, test_props, ctrl_results in method_tests:
yield test_props, method, method_args, ctrl_results
def get_duration_subtract_tests():
"""Yield tests for subtracting a duration from a timepoint."""
return [
{
"start": {
"year": 2010, "day_of_year": 65,
# "month_of_year": 3, "day_of_month": 6,
"hour_of_day": 12, "minute_of_hour": 0, "second_of_minute": 0,
"time_zone_hour": 0, "time_zone_minute": 0
},
"duration": {
"years": 6
},
"result": {
"year": 2004, # "day_of_year": 65,
"month_of_year": 3, "day_of_month": 5,
"hour_of_day": 12, "minute_of_hour": 0, "second_of_minute": 0,
"time_zone_hour": 0, "time_zone_minute": 0
}
},
{
"start": {
"year": 2010, "week_of_year": 10, "day_of_week": 3,
# "month_of_year": 3, "day_of_month": 10,
"hour_of_day": 12, "minute_of_hour": 0, "second_of_minute": 0,
"time_zone_hour": 0, "time_zone_minute": 0
},
"duration": {
"years": 6
},
"result": {
"year": 2004, # "week_of_year": 10, "day_of_week": 3,
"month_of_year": 3, "day_of_month": 3,
"hour_of_day": 12, "minute_of_hour": 0, "second_of_minute": 0,
"time_zone_hour": 0, "time_zone_minute": 0
}
},
]
def get_duration_comparison_tests():
"""Yield tests for executing comparison operators on Durations.
All True "==" tests will be carried out for "<=" & ">= too. Likewise
all True "<" & "> tests will be carried out for "<=" & ">=" respectively.
Tuple format --> test:
(args1, args2, bool1 [, bool2]) -->
Duration(**args1) <operator> Duration(**args2) is bool1
& the reverse:
Duration(**args2) <operator> Duration(**args1) is bool2 if
bool2 supplied else bool1
"""
nominal_units = ["years", "months"]
# TODO: test in different calendars
return {
"==": [
# Durations of same type:
*[({prop: 1}, {prop: 1}, True) for prop in nominal_units],
*[(dur, dur, True) for dur in [
{"years": 1, "months": 1, "days": 1}]],
*[({prop: 1}, {prop: 2}, False) for prop in nominal_units],
# Nominal durations of different type unequal:
({"years": 1}, {"months": 12}, False),
*[({"years": 1}, {"days": i}, False) for i in [365, 366]],
*[({"months": 1}, {"days": i}, False) for i in [28, 29, 30, 31]],
({"months": 1, "days": 7}, {"weeks": 1}, False),
# Non-nominal/exact durations of different types equal:
({"weeks": 1}, {"days": 7}, True),
({"weeks": 1}, {"hours": 7 * 24}, True),
({"days": 1}, {"hours": 24}, True),
({"days": 1}, {"seconds": 24 * 60 * 60}, True),
({"hours": 1}, {"minutes": 60}, True),
({"hours": 1}, {"minutes": 30, "seconds": 30 * 60}, True),
({"hours": 1.5}, {"minutes": 90}, True)
],
"<": [
# Durations of same type:
*[({prop: 1}, {prop: 1}, False) for prop in nominal_units],
*[(dur, dur, False) for dur in [
{"years": 1, "months": 1, "days": 1}]],
*[({prop: 1}, {prop: 2}, True, False) for prop in nominal_units],
# Durations of different type:
({"years": 1}, {"months": 12}, False, True),
({"years": 1}, {"months": 12, "days": 10}, True, False),
({"years": 1}, {"days": 364}, False, True),
({"years": 1}, {"days": 365}, False),
({"years": 1}, {"days": 366}, True, False),
({"months": 1}, {"days": 29}, False, True),
({"months": 1}, {"days": 30}, False),
({"months": 1}, {"days": 31}, True, False),
({"weeks": 1}, {"days": 6}, False, True),
({"weeks": 1}, {"days": 7}, False),
({"weeks": 1}, {"days": 8}, True, False),
({"days": 1}, {"seconds": 24 * 60 * 60 - 1}, False, True),
({"days": 1}, {"seconds": 24 * 60 * 60}, False),
({"days": 1}, {"seconds": 24 * 60 * 60 + 1}, True, False),
],
"<=": [
({"years": 1}, {"days": 365}, True),
({"months": 1}, {"days": 30}, True),
],
">": [
# Durations of same type:
*[({prop: 1}, {prop: 1}, False) for prop in nominal_units],
*[({prop: 2}, {prop: 1}, True, False) for prop in nominal_units],
# Ddurations of different type:
({"years": 1}, {"months": 12}, True, False),
({"years": 1}, {"days": 364}, True, False),
({"years": 1}, {"days": 365}, False),
({"years": 1}, {"days": 366}, False, True),
({"months": 1}, {"days": 29}, True, False),
({"months": 1}, {"days": 30}, False),
({"months": 1}, {"days": 31}, False, True),
({"weeks": 1}, {"days": 6}, True, False),
({"weeks": 1}, {"days": 7}, False),
({"weeks": 1}, {"days": 8}, False, True),
({"days": 1}, {"seconds": 24 * 60 * 60 - 1}, True, False),
({"days": 1}, {"seconds": 24 * 60 * 60}, False),
({"days": 1}, {"seconds": 24 * 60 * 60 + 1}, False, True),
],
">=": [
({"years": 1}, {"days": 365}, True),
({"months": 1}, {"days": 30}, True),
]
}
def get_timepoint_subtract_tests():
"""Yield tests for subtracting one timepoint from another."""
return [
(
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 4, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
"P763DT3H58M1S"
),
(
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 4, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
"-P763DT3H58M1S"
),
(
{"year": 1991, "month_of_year": 6, "day_of_month": 3,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 1991, "month_of_year": 5, "day_of_month": 4,
"hour_of_day": 5, "time_zone_hour": 0, "time_zone_minute": 0},
"P29DT19H"
),
(
{"year": 1969, "month_of_year": 7, "day_of_month": 20,
"hour_of_day": 20, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 1969, "month_of_year": 7, "day_of_month": 20,
"hour_of_day": 19, "time_zone_hour": 0, "time_zone_minute": 0},
"PT1H"
),
(
{"year": 1969, "month_of_year": 7, "day_of_month": 20,
"hour_of_day": 19, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 1969, "month_of_year": 7, "day_of_month": 20,
"hour_of_day": 20, "time_zone_hour": 0, "time_zone_minute": 0},
"-PT1H"
),
(
{"year": 1991, "month_of_year": 5, "day_of_month": 4,
"hour_of_day": 5, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 1991, "month_of_year": 6, "day_of_month": 3,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"-P29DT19H"
),
(
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2013, "month_of_year": 12, "day_of_month": 31,
"hour_of_day": 23, "time_zone_hour": 0, "time_zone_minute": 0},
"PT1H"
),
(
{"year": 2013, "month_of_year": 12, "day_of_month": 31,
"hour_of_day": 23, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"-PT1H"
),
(
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2013, "month_of_year": 12, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"P31D"
),
(
{"year": 2013, "month_of_year": 12, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"-P31D"
),
(
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 13, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
"P762DT18H58M1S"
),
(
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 13, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
"-P762DT18H58M1S"
),
]
def get_timepoint_comparison_tests():
"""Yield tests for executing comparison operators on TimePoints.
All True "==" tests will be carried out for "<=" & ">= too. Likewise
all True "<" & "> tests will be carried out for "<=" & ">=" respectively.
Tuple format --> test:
(args1, args2, bool1 [, bool2]) -->
TimePoint(**args1) <operator> TimePoint(**args2) is bool1
& the reverse:
TimePoint(**args2) <operator> TimePoint(**args1) is bool2 if
bool2 supplied else bool1
"""
base_YMD = {"year": 2020, "month_of_year": 3, "day_of_month": 14}
trunc = {"truncated": True}
return {
"==": [
(base_YMD, base_YMD, True),
({"year": 2020, "month_of_year": 2, "day_of_month": 5},
{"year": 2020, "day_of_year": 36},
True),
({"year": 2019, "month_of_year": 12, "day_of_month": 30},
{"year": 2020, "week_of_year": 1, "day_of_week": 1},
True),
({"year": 2019, "day_of_year": 364},
{"year": 2020, "week_of_year": 1, "day_of_week": 1},
True),
({**base_YMD, "hour_of_day": 9, "time_zone_hour": 0},
{**base_YMD, "hour_of_day": 11, "minute_of_hour": 30,
"time_zone_hour": 2, "time_zone_minute": 30},
True),
({"month_of_year": 3, "day_of_month": 14, **trunc},
{"month_of_year": 3, "day_of_month": 14, **trunc},
True),
# Truncated datetimes of different modes can't be equal:
({"month_of_year": 2, "day_of_month": 5, **trunc},
{"day_of_year": 36, **trunc},
False),
({"month_of_year": 12, "day_of_month": 30, **trunc},
{"week_of_year": 1, "day_of_week": 1, **trunc},
False),
({"day_of_year": 364, **trunc},
{"week_of_year": 1, "day_of_week": 1, **trunc},
False)
# TODO: test equal truncated datetimes with different timezones
# when not buggy
],
"<": [
(base_YMD, base_YMD, False),
({"year": 2019}, {"year": 2020}, True, False),
({"year": -1}, {"year": 1}, True, False),
({"year": 2020, "month_of_year": 2},
{"year": 2020, "month_of_year": 3},
True, False),
({"year": 2020, "month_of_year": 2, "day_of_month": 5},
{"year": 2020, "month_of_year": 2, "day_of_month": 6},
True, False),
({**base_YMD, "hour_of_day": 9}, {**base_YMD, "hour_of_day": 10},
True, False),
({**base_YMD, "hour_of_day": 9, "time_zone_hour": 0},
{**base_YMD, "hour_of_day": 7, "time_zone_hour": -3},
True, False),
({"day_of_month": 3, **trunc}, {"day_of_month": 4, **trunc},
True, False),
({"month_of_year": 1, "day_of_month": 3, **trunc},
{"month_of_year": 1, "day_of_month": 4, **trunc},
True, False)
],
">": [
(base_YMD, base_YMD, False),
({"year": 2019}, {"year": 2020}, False, True),
({"year": -1}, {"year": 1}, False, True),
({"year": 2020, "month_of_year": 2},
{"year": 2020, "month_of_year": 3},
False, True),
({"year": 2020, "month_of_year": 2, "day_of_month": 5},
{"year": 2020, "month_of_year": 2, "day_of_month": 6},
False, True),
({**base_YMD, "hour_of_day": 9}, {**base_YMD, "hour_of_day": 10},
False, True),
({**base_YMD, "hour_of_day": 9, "time_zone_hour": 0},
{**base_YMD, "hour_of_day": 7, "time_zone_hour": -3},
False, True),
({"day_of_month": 3, **trunc}, {"day_of_month": 4, **trunc},
False, True),
({"month_of_year": 1, "day_of_month": 3, **trunc},
{"month_of_year": 1, "day_of_month": 4, **trunc},
False, True)
]
}
def get_timepoint_bounds_tests():
"""Yield tests for checking out of bounds TimePoints."""
return {
"in_bounds": [
{"year": 2020, "month_of_year": 2, "day_of_month": 29},
{"truncated": True, "month_of_year": 2, "day_of_month": 29},
{"year": 2020, "week_of_year": 53},
{"truncated": True, "week_of_year": 53},
{"year": 2020, "day_of_year": 366},
{"truncated": True, "day_of_year": 366},
{"year": 2019, "hour_of_day": 24},
{"year": 2019, "time_zone_hour": 99},
{"year": 2019, "time_zone_hour": 0, "time_zone_minute": -1},
{"year": 2019, "time_zone_hour": 0, "time_zone_minute": 1},
{"year": 2019, "time_zone_hour": -1, "time_zone_minute": -1},
],
"out_of_bounds": [
{"year": 2019, "month_of_year": 0},
{"year": 2019, "month_of_year": 13},
{"year": 2019, "month_of_year": 1, "day_of_month": 0},
{"year": 2019, "month_of_year": 1, "day_of_month": 32},
{"year": 2019, "month_of_year": 2, "day_of_month": 29},
{"truncated": True, "month_of_year": 1, "day_of_month": 32},
{"year": 2019, "week_of_year": 0},
{"year": 2019, "week_of_year": 53},
{"year": 2019, "week_of_year": 1, "day_of_week": 0},
{"year": 2019, "week_of_year": 1, "day_of_week": 8},
{"year": 2019, "day_of_year": 0},
{"year": 2019, "day_of_year": 366},
{"year": 2019, "hour_of_day": -1},
{"year": 2019, "hour_of_day": 25},
{"year": 2019, "hour_of_day": 10, "hour_of_day_decimal": -0.1},
{"year": 2019, "hour_of_day": 10, "hour_of_day_decimal": 1},
{"year": 2019, "hour_of_day": 24, "hour_of_day_decimal": 0.1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": -1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 60},
{"year": 2019, "hour_of_day": 24, "minute_of_hour": 1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 1,
"minute_of_hour_decimal": -0.1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 1,
"minute_of_hour_decimal": 1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 1,
"second_of_minute": -1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 1,
"second_of_minute": 60},
{"year": 2019, "hour_of_day": 24, "minute_of_hour": 1,
"second_of_minute": 1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 1,
"second_of_minute": 1, "second_of_minute_decimal": -0.1},
{"year": 2019, "hour_of_day": 10, "minute_of_hour": 1,
"second_of_minute": 1, "second_of_minute_decimal": 1},
{"year": 2019, "time_zone_hour": -100},
{"year": 2019, "time_zone_hour": 100},
{"year": 2019, "time_zone_hour": 0, "time_zone_minute": -60},
{"year": 2019, "time_zone_hour": 1, "time_zone_minute": -1},
{"year": 2019, "time_zone_hour": 1, "time_zone_minute": 60},
{"year": 2019, "time_zone_hour": -1, "time_zone_minute": 1}
]
}
def get_timepoint_conflicting_input_tests():
"""Yield tests for checking TimePoints initialized with incompatible
inputs."""
return [
{"year": 2020, "day_of_year": 1, "month_of_year": 1},
{"year": 2020, "day_of_year": 1, "day_of_month": 1},
{"year": 2020, "day_of_year": 6, "week_of_year": 2},
{"year": 2020, "day_of_year": 1, "day_of_week": 3},
{"year": 2020, "month_of_year": 2, "week_of_year": 5},
{"year": 2020, "month_of_year": 2, "day_of_week": 6},
{"year": 2020, "day_of_month": 6, "week_of_year": 2},
{"year": 2020, "day_of_month": 1, "day_of_week": 3}
]
def run_comparison_tests(data_class, test_cases):
"""
Args:
data_class: E.g. Duration or TimePoint
test_cases (dict): Of the form {"==": [...], "<": [...], ...}
"""
for op in test_cases:
for case in test_cases[op]:
lhs = data_class(**case[0])
rhs = data_class(**case[1])
expected = {"forward": case[2],
"reverse": case[3] if len(case) == 4 else case[2]}
if op == "==":
tests = [
{"op": "==", "forward": lhs == rhs, "reverse": rhs == lhs}]
if True in expected.values():
tests.append({"op": "<=", "forward": lhs <= rhs,
"reverse": rhs <= lhs})
tests.append({"op": ">=", "forward": lhs >= rhs,
"reverse": rhs >= lhs})
if op == "<":
tests = [
{"op": "<", "forward": lhs < rhs, "reverse": rhs < lhs}]
if True in expected.values():
tests.append({"op": "<=", "forward": lhs <= rhs,
"reverse": rhs <= lhs})
if op == "<=":
tests = [
{"op": "<=", "forward": lhs <= rhs, "reverse": rhs <= lhs}]
if op == ">":
tests = [
{"op": ">", "forward": lhs > rhs, "reverse": rhs > lhs}]
if True in expected.values():
tests.append({"op": ">=", "forward": lhs >= rhs,
"reverse": rhs >= lhs})
if op == ">=":
tests = [
{"op": ">=", "forward": lhs >= rhs, "reverse": rhs >= lhs}]
for test in tests:
assert test["forward"] is expected["forward"], (
"{0} {1} {2}".format(lhs, test["op"], rhs))
assert test["reverse"] is expected["reverse"], (
"{0} {1} {2}".format(rhs, test["op"], lhs))
if op == "==":
test = lhs != rhs
assert test is not expected["forward"], (
"{0} != {1}".format(lhs, rhs))
test = hash(lhs) == hash(rhs)
assert test is expected["forward"], (
"hash of {0} == hash of {1}".format(rhs, lhs))
class TestDataModel(unittest.TestCase):
"""Test the functionality of data model manipulation."""
def test_days_in_year_range(self):
"""Test the summing-over-days-in-year-range shortcut code."""
for start_year in range(-401, 2):
for end_year in range(start_year, 2):
test_days = data.get_days_in_year_range(
start_year, end_year)
control_days = 0
for year in range(start_year, end_year + 1):
control_days += data.get_days_in_year(year)
self.assertEqual(
control_days, test_days, "days in %s to %s" % (
start_year, end_year)
)
def test_timeduration(self):
"""Test the Duration class methods."""
for test_props, method, method_args, ctrl_results in (
get_timeduration_tests()):
duration = data.Duration(**test_props)
duration_method = getattr(duration, method)
test_results = duration_method(*method_args)
self.assertEqual(
test_results, ctrl_results,
"%s -> %s(%s)" % (test_props, method, method_args)
)
def test_duration_float_args(self):
"""Test that floats passed to Duration() init are handled correctly."""
for kwarg in ["years", "months", "weeks", "days"]:
with self.assertRaises(BadInputError):
data.Duration(**{kwarg: 1.5})
for kwarg, expected_secs in [("hours", 5400), ("minutes", 90),
("seconds", 1.5)]:
self.assertEqual(data.Duration(**{kwarg: 1.5}).get_seconds(),
expected_secs)
def test_duration_in_weeks(self):
"""Test the Duration class when the week arg is supplied."""
dur = data.Duration(weeks=4)
self.assertEqual(dur.get_is_in_weeks(), True)
for kwarg, expected_days in [ # 1 unit of each property + 4 weeks
("years", 365 + 28), ("months", 30 + 28), ("days", 1 + 28),
("hours", 28), ("minutes", 28), ("seconds", 28)]:
dur = data.Duration(weeks=4, **{kwarg: 1})
self.assertFalse(dur.get_is_in_weeks())
self.assertIsNone(dur.weeks)
self.assertEqual(dur.get_days_and_seconds()[0], expected_days)
def test_duration_to_weeks(self):
"""Test converting Duration in days to Duration in weeks"""
duration_in_days = data.Duration(days=365).to_weeks()
duration_in_weeks = data.Duration(weeks=52) # 364 days (!)
self.assertEqual(duration_in_days.weeks, duration_in_weeks.weeks)
def test_duration_to_days(self):
"""Test converting Duration in weeks to Duration in days"""
dur = data.Duration(weeks=4)
self.assertEqual(dur.to_days().days, 28)
def test_duration_comparison(self):
"""Test the Duration rich comparison methods and hashing."""
run_comparison_tests(data.Duration, get_duration_comparison_tests())
dur = data.Duration(days=1)
for var in [7, 'foo', (1, 2), data.TimePoint(year=2000)]:
self.assertFalse(dur == var)
with self.assertRaises(TypeError):
dur < var
def test_timeduration_add_week(self):
"""Test the Duration not in weeks add Duration in weeks."""
self.assertEqual(
str(data.Duration(days=7) + data.Duration(weeks=1)),
"P14D")
def test_duration_floordiv(self):
"""Test the existing dunder floordir, which will be removed when we
move to Python 3"""
duration = data.Duration(years=4, months=4, days=4, hours=4,
minutes=4, seconds=4)
expected = data.Duration(years=2, months=2, days=2, hours=2,
minutes=2, seconds=2)
duration //= 2
self.assertEqual(duration, expected)
def test_duration_in_weeks_floordiv(self):
"""Test the existing dunder floordir, which will be removed when we
move to Python 3"""
duration = data.Duration(weeks=4)
duration //= 2
self.assertEqual(2, duration.weeks)
def test_duration_subtract(self):
"""Test subtracting a duration from a timepoint."""
for test in get_duration_subtract_tests():
start_point = data.TimePoint(**test["start"])
test_duration = data.Duration(**test["duration"])
end_point = data.TimePoint(**test["result"])
test_subtract = (start_point - test_duration).to_calendar_date()
self.assertEqual(test_subtract, end_point,
"%s - %s" % (start_point, test_duration))
def test_timepoint_comparison(self):
"""Test the TimePoint rich comparison methods and hashing."""
run_comparison_tests(data.TimePoint, get_timepoint_comparison_tests())
point = data.TimePoint(year=2000)
for var in [7, 'foo', (1, 2), data.Duration(days=1)]:
self.assertFalse(point == var)
with self.assertRaises(TypeError):
point < var
# Cannot use "<", ">=" etc truncated TimePoints of different modes:
day_month_point = data.TimePoint(month_of_year=2, day_of_month=5,
truncated=True)
ordinal_point = data.TimePoint(day_of_year=36, truncated=True)
with self.assertRaises(TypeError): # TODO: should be ValueError?
day_month_point < ordinal_point
def test_timepoint_plus_float_time_duration_day_of_month_type(self):
"""Test (TimePoint + Duration).day_of_month is an int."""
time_point = data.TimePoint(year=2000) + data.Duration(seconds=1.0)
self.assertEqual(type(time_point.day_of_month), int)
def test_timepoint_subtract(self):
"""Test subtracting one time point from another."""
for test_props1, test_props2, ctrl_string in (
get_timepoint_subtract_tests()):
point1 = data.TimePoint(**test_props1)
point2 = data.TimePoint(**test_props2)
test_string = str(point1 - point2)
self.assertEqual(test_string, ctrl_string,
"%s - %s" % (point1, point2))
def test_timepoint_add_duration(self):
"""Test adding a duration to a timepoint"""
seconds_added = 5
timepoint = data.TimePoint(year=1900, month_of_year=1, day_of_month=1,
hour_of_day=1, minute_of_hour=1)
duration = data.Duration(seconds=seconds_added)
t = timepoint + duration
self.assertEqual(seconds_added, t.second_of_minute)
def test_timepoint_add_duration_without_minute(self):
"""Test adding a duration to a timepoint"""
seconds_added = 5
timepoint = data.TimePoint(year=1900, month_of_year=1, day_of_month=1,
hour_of_day=1)
duration = data.Duration(seconds=seconds_added)
t = timepoint + duration
self.assertEqual(seconds_added, t.second_of_minute)
def test_timepoint_bounds(self):
"""Test out of bounds TimePoints"""
tests = get_timepoint_bounds_tests()
for kwargs in tests["in_bounds"]:
data.TimePoint(**kwargs)
for kwargs in tests["out_of_bounds"]:
with self.assertRaises(BadInputError) as cm:
data.TimePoint(**kwargs)
assert "out of bounds" in str(cm.exception)
def test_timepoint_conflicting_inputs(self):
"""Test TimePoints initialized with incompatible inputs"""
tests = get_timepoint_conflicting_input_tests()
for kwargs in tests:
with self.assertRaises(BadInputError) as cm:
data.TimePoint(**kwargs)
assert "Conflicting input" in str(cm.exception)
def test_timepoint_without_year():
"""Test that TimePoints cannot be init'd without a year unless
truncated"""
for kwargs in [{}, {"month_of_year": 2}, {"hour_of_day": 9}]:
with pytest.raises(BadInputError) as exc:
data.TimePoint(**kwargs)
assert "Missing input: year" in str(exc.value)
# If truncated, it's fine:
data.TimePoint(truncated=True, month_of_year=2)
# TODO: what about just TimePoint(truncated=True) ?
|
lgpl-3.0
| 3,343,498,518,141,713,400 | 43.824891 | 79 | 0.485213 | false |
jim-easterbrook/pyctools
|
src/pyctools/components/interp/resize.py
|
1
|
6984
|
# Pyctools - a picture processing algorithm development kit.
# http://github.com/jim-easterbrook/pyctools
# Copyright (C) 2014-20 Pyctools contributors
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
__all__ = ['Resize', 'FilterResponse']
__docformat__ = 'restructuredtext en'
import sys
if 'sphinx' in sys.modules:
__all__.append('resize_frame')
import numpy
from pyctools.core.config import ConfigInt
from pyctools.core.base import Transformer
from .resizecore import resize_frame
class Resize(Transformer):
"""Filter an image and/or resize with interpolation.
Resize (or just filter) an image using user supplied filter(s). The
filters are supplied in a :py:class:`~pyctools.core.frame.Frame`
object sent to the :py:meth:`filter` input. If the frame data's 3rd
dimension is unity then the same filter is applied to each component
of the input. Alternatively the frame data's 3rd dimension should
match the input's, allowing a different filter to be applied to each
colour.
Images can be resized by almost any amount. The resizing is
controlled by integer "up" and "down" factors and is not constrained
to simple ratios such as 2:1 or 5:4.
To filter images without resizing leave the "up" and "down" factors
at their default value of 1.
The core method :py:meth:`resize_frame` is written in Cython,
allowing real-time image resizing on a typical computer.
The ``filter`` output forwards the filter frame whenever it changes.
It can be connected to a :py:class:`FilterResponse` component to
compute the (new) frequency response.
Config:
========= === ====
``xup`` int Horizontal up-conversion factor.
``xdown`` int Horizontal down-conversion factor.
``yup`` int Vertical up-conversion factor.
``ydown`` int Vertical down-conversion factor.
========= === ====
"""
inputs = ['input', 'filter'] #:
outputs = ['output', 'filter'] #:
def initialise(self):
self.config['xup'] = ConfigInt(min_value=1)
self.config['xdown'] = ConfigInt(min_value=1)
self.config['yup'] = ConfigInt(min_value=1)
self.config['ydown'] = ConfigInt(min_value=1)
self.filter_frame = None
def get_filter(self):
new_filter = self.input_buffer['filter'].peek()
if not new_filter:
return False
if new_filter == self.filter_frame:
return True
self.send('filter', new_filter)
filter_coefs = new_filter.as_numpy(dtype=numpy.float32)
if filter_coefs.ndim != 3:
self.logger.warning('Filter input must be 3 dimensional')
return False
ylen, xlen = filter_coefs.shape[:2]
if (xlen % 2) != 1 or (ylen % 2) != 1:
self.logger.warning('Filter input must have odd width & height')
return False
self.filter_frame = new_filter
self.filter_coefs = filter_coefs
self.fil_count = None
return True
def transform(self, in_frame, out_frame):
if not self.get_filter():
return False
self.update_config()
x_up = self.config['xup']
x_down = self.config['xdown']
y_up = self.config['yup']
y_down = self.config['ydown']
in_data = in_frame.as_numpy(dtype=numpy.float32)
if self.fil_count != self.filter_coefs.shape[2]:
self.fil_count = self.filter_coefs.shape[2]
if self.fil_count != 1 and self.fil_count != in_data.shape[2]:
self.logger.warning('Mismatch between %d filters and %d images',
self.fil_count, in_data.shape[2])
norm_filter = self.filter_coefs * numpy.float32(x_up * y_up)
out_frame.data = resize_frame(
in_data, norm_filter, x_up, x_down, y_up, y_down)
audit = 'data = filter(data)\n'
if x_up != 1 or x_down != 1:
audit = 'data = resize(data)\n'
audit += ' x_up: %d, x_down: %d\n' % (x_up, x_down)
if y_up != 1 or y_down != 1:
audit = 'data = resize(data)\n'
audit += ' y_up: %d, y_down: %d\n' % (y_up, y_down)
audit += ' filter = {\n '
audit += '\n '.join(
self.filter_frame.metadata.get('audit').splitlines())
audit += '\n }\n'
out_frame.set_audit(self, audit)
return True
class FilterResponse(Transformer):
"""Compute frequency response of a 1-D filter.
The filter is padded to a power of 2 (e.g. 1024) before computing
the Fourier transform. The magnitude of the positive frequency half
is output in a form suitable for the
:py:class:`~pyctools.components.io.plotdata.PlotData` component.
"""
inputs = ['filter'] #:
outputs = ['response'] #:
def transform(self, in_frame, out_frame):
filter_coefs = in_frame.as_numpy(dtype=numpy.float32)
if filter_coefs.ndim != 3:
self.logger.warning('Filter frame must be 3 dimensional')
return False
ylen, xlen, comps = filter_coefs.shape
if xlen > 1 and ylen > 1:
return False
responses = []
pad_len = 1024
if xlen > 1:
while pad_len < xlen:
pad_len *= 2
padded = numpy.zeros(pad_len)
for c in range(comps):
padded[0:xlen] = filter_coefs[0, :, c]
responses.append(numpy.absolute(numpy.fft.rfft(padded)))
elif ylen > 1:
while pad_len < ylen:
pad_len *= 2
padded = numpy.zeros(pad_len)
for c in range(comps):
padded[0:ylen] = filter_coefs[:, 0, c]
responses.append(numpy.absolute(numpy.fft.rfft(padded)))
responses.insert(0, numpy.linspace(0.0, 0.5, responses[0].shape[0]))
# generate output frame
out_frame.data = numpy.stack(responses)
out_frame.type = 'resp'
labels = ['normalised frequency']
if comps > 1:
for c in range(comps):
labels.append('component {}'.format(c))
out_frame.metadata.set('labels', repr(labels))
audit = out_frame.metadata.get('audit')
audit += 'data = FilterResponse(data)\n'
out_frame.metadata.set('audit', audit)
return True
|
gpl-3.0
| 4,787,031,185,198,858,000 | 38.235955 | 80 | 0.604954 | false |
JazzeYoung/VeryDeepAutoEncoder
|
doc/tutorial/using_gpu_solution_1.py
|
1
|
17208
|
#!/usr/bin/env python
# Theano tutorial
# Solution to Exercise in section 'Using the GPU'
# 1. Raw results
from __future__ import absolute_import, print_function, division
import numpy
import theano
import theano.tensor as tt
theano.config.floatX = 'float32'
rng = numpy.random
N = 400
feats = 784
D = (rng.randn(N, feats).astype(theano.config.floatX),
rng.randint(size=N, low=0, high=2).astype(theano.config.floatX))
training_steps = 10000
# Declare Theano symbolic variables
x = theano.shared(D[0], name="x")
y = theano.shared(D[1], name="y")
w = theano.shared(rng.randn(feats).astype(theano.config.floatX), name="w")
b = theano.shared(numpy.asarray(0., dtype=theano.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
#print "Initial model:"
#print w.get_value(), b.get_value()
# Construct Theano expression graph
p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability of having a one
prediction = p_1 > 0.5 # The prediction that is done: 0 or 1
xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy
cost = tt.cast(xent.mean(), 'float32') + \
0.01 * (w ** 2).sum() # The cost to optimize
gw, gb = tt.grad(cost, [w, b])
# Compile expressions to functions
train = theano.function(
inputs=[],
outputs=[prediction, xent],
updates=[(w, w - 0.01 * gw), (b, b - 0.01 * gb)],
name="train")
predict = theano.function(inputs=[], outputs=prediction,
name="predict")
if any([n.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for n in
train.maker.fgraph.toposort()]):
print('Used the cpu')
elif any([n.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for n in
train.maker.fgraph.toposort()]):
print('Used the gpu')
else:
print('ERROR, not able to tell if theano used the cpu or the gpu')
print(train.maker.fgraph.toposort())
for i in range(training_steps):
pred, err = train()
#print "Final model:"
#print w.get_value(), b.get_value()
print("target values for D")
print(D[1])
print("prediction on D")
print(predict())
"""
# 2. Profiling
# 2.1 Profiling for CPU computations
# In your terminal, type:
$ THEANO_FLAGS=profile=True,device=cpu python using_gpu_solution_1.py
# You'll see first the output of the script:
Used the cpu
target values for D
prediction on D
# Followed by the output of profiling.. You'll see profiling results for each function
# in the script, followed by a summary for all functions.
# We'll show here only the summary:
Results were produced using an Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz
Function profiling
==================
Message: Sum of all(2) printed profiles at exit excluding Scan op profile.
Time in 10001 calls to Function.__call__: 1.300452e+00s
Time in Function.fn.__call__: 1.215823e+00s (93.492%)
Time in thunks: 1.157602e+00s (89.015%)
Total compile time: 8.922548e-01s
Number of Apply nodes: 17
Theano Optimizer time: 6.270301e-01s
Theano validate time: 5.993605e-03s
Theano Linker time (includes C, CUDA code generation/compiling): 2.949309e-02s
Import time 3.543139e-03s
Time in all call to theano.grad() 1.848292e-02s
Time since theano import 2.864s
Class
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>
64.5% 64.5% 0.747s 3.73e-05s C 20001 3 theano.tensor.blas_c.CGemv
33.1% 97.7% 0.384s 4.79e-06s C 80001 9 theano.tensor.elemwise.Elemwise
1.0% 98.6% 0.011s 1.14e-06s C 10000 1 theano.tensor.elemwise.Sum
0.7% 99.4% 0.009s 2.85e-07s C 30001 4 theano.tensor.elemwise.DimShuffle
0.3% 99.7% 0.004s 3.64e-07s C 10001 2 theano.tensor.basic.AllocEmpty
0.3% 100.0% 0.004s 1.78e-07s C 20001 3 theano.compile.ops.Shape_i
... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)
Ops
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>
64.5% 64.5% 0.747s 3.73e-05s C 20001 3 CGemv{inplace}
18.7% 83.2% 0.217s 2.17e-05s C 10000 1 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)]
8.9% 92.1% 0.103s 1.03e-05s C 10000 1 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)]
4.3% 96.4% 0.050s 4.98e-06s C 10000 1 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}}
1.0% 97.4% 0.011s 1.14e-06s C 10000 1 Sum{acc_dtype=float64}
0.5% 97.9% 0.006s 2.83e-07s C 20001 3 InplaceDimShuffle{x}
0.4% 98.3% 0.004s 4.22e-07s C 10000 1 Elemwise{sub,no_inplace}
0.3% 98.6% 0.004s 3.70e-07s C 10000 1 Elemwise{neg,no_inplace}
0.3% 98.9% 0.004s 3.64e-07s C 10001 2 AllocEmpty{dtype='float32'}
0.3% 99.2% 0.004s 1.78e-07s C 20001 3 Shape_i{0}
0.2% 99.5% 0.003s 2.88e-07s C 10000 1 InplaceDimShuffle{1,0}
0.2% 99.7% 0.003s 2.65e-07s C 10000 1 Elemwise{Composite{((-i0) - i1)}}[(0, 0)]
0.2% 99.9% 0.002s 1.98e-07s C 10000 1 Elemwise{Cast{float32}}
0.1% 100.0% 0.002s 1.54e-07s C 10000 1 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]
0.0% 100.0% 0.000s 4.77e-06s C 1 1 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}
... (remaining 0 Ops account for 0.00%(0.00s) of the runtime)
Apply
------
<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>
34.0% 34.0% 0.394s 3.94e-05s 10000 7 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0})
30.5% 64.5% 0.353s 3.53e-05s 10000 15 CGemv{inplace}(w, TensorConstant{-0.00999999977648}, x.T, Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0, TensorConstant{0.999800026417})
18.7% 83.2% 0.217s 2.17e-05s 10000 12 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)](y, Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, Elemwise{sub,no_inplace}.0, Elemwise{neg,no_inplace}.0)
8.9% 92.1% 0.103s 1.03e-05s 10000 13 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)](Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, y, Elemwise{Cast{float32}}.0, Elemwise{sub,no_inplace}.0)
4.3% 96.4% 0.050s 4.98e-06s 10000 11 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}}(Elemwise{neg,no_inplace}.0, TensorConstant{(1,) of 0.5})
1.0% 97.4% 0.011s 1.14e-06s 10000 14 Sum{acc_dtype=float64}(Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0)
0.4% 97.8% 0.004s 4.22e-07s 10000 4 Elemwise{sub,no_inplace}(TensorConstant{(1,) of 1.0}, y)
0.3% 98.1% 0.004s 3.76e-07s 10000 0 InplaceDimShuffle{x}(b)
0.3% 98.4% 0.004s 3.70e-07s 10000 10 Elemwise{neg,no_inplace}(Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0)
0.3% 98.7% 0.004s 3.64e-07s 10000 5 AllocEmpty{dtype='float32'}(Shape_i{0}.0)
0.2% 99.0% 0.003s 2.88e-07s 10000 2 InplaceDimShuffle{1,0}(x)
0.2% 99.2% 0.003s 2.65e-07s 10000 9 Elemwise{Composite{((-i0) - i1)}}[(0, 0)](CGemv{inplace}.0, InplaceDimShuffle{x}.0)
0.2% 99.4% 0.002s 2.21e-07s 10000 1 Shape_i{0}(x)
0.2% 99.6% 0.002s 1.98e-07s 10000 8 Elemwise{Cast{float32}}(InplaceDimShuffle{x}.0)
0.2% 99.7% 0.002s 1.90e-07s 10000 6 InplaceDimShuffle{x}(Shape_i{0}.0)
0.1% 99.9% 0.002s 1.54e-07s 10000 16 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)](b, TensorConstant{0.00999999977648}, Sum{acc_dtype=float64}.0)
0.1% 100.0% 0.001s 1.34e-07s 10000 3 Shape_i{0}(y)
0.0% 100.0% 0.000s 3.89e-05s 1 3 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0})
0.0% 100.0% 0.000s 4.77e-06s 1 4 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}(CGemv{inplace}.0, InplaceDimShuffle{x}.0, TensorConstant{(1,) of 0.5})
0.0% 100.0% 0.000s 1.19e-06s 1 0 InplaceDimShuffle{x}(b)
... (remaining 2 Apply instances account for 0.00%(0.00s) of the runtime)
# 2.2 Profiling for GPU computations
# In your terminal, type:
$ CUDA_LAUNCH_BLOCKING=1 THEANO_FLAGS=profile=True,device=cuda python using_gpu_solution_1.py
# You'll see first the output of the script:
Used the gpu
target values for D
prediction on D
Results were produced using a GeForce GTX TITAN X
# Profiling summary for all functions:
Function profiling
==================
Message: Sum of all(2) printed profiles at exit excluding Scan op profile.
Time in 10001 calls to Function.__call__: 4.181247e+00s
Time in Function.fn.__call__: 4.081113e+00s (97.605%)
Time in thunks: 3.915566e+00s (93.646%)
Total compile time: 9.256095e+00s
Number of Apply nodes: 21
Theano Optimizer time: 9.996419e-01s
Theano validate time: 6.523132e-03s
Theano Linker time (includes C, CUDA code generation/compiling): 8.239602e+00s
Import time 4.228115e-03s
Time in all call to theano.grad() 3.286195e-02s
Time since theano import 15.415s
Class
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>
59.5% 59.5% 2.329s 1.16e-04s C 20001 3 theano.sandbox.gpuarray.blas.GpuGemv
29.8% 89.3% 1.166s 1.30e-05s C 90001 10 theano.sandbox.gpuarray.elemwise.GpuElemwise
4.1% 93.4% 0.162s 8.10e-06s C 20001 3 theano.sandbox.gpuarray.basic_ops.HostFromGpu
3.3% 96.7% 0.131s 1.31e-05s C 10000 1 theano.sandbox.gpuarray.elemwise.GpuCAReduceCuda
1.6% 98.3% 0.061s 6.10e-06s C 10000 1 theano.sandbox.gpuarray.basic_ops.GpuFromHost
0.8% 99.1% 0.033s 1.09e-06s C 30001 4 theano.sandbox.gpuarray.elemwise.GpuDimShuffle
0.7% 99.8% 0.026s 2.59e-06s C 10001 2 theano.sandbox.gpuarray.basic_ops.GpuAllocEmpty
0.2% 100.0% 0.008s 3.95e-07s C 20001 3 theano.compile.ops.Shape_i
... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)
Ops
---
<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>
59.5% 59.5% 2.329s 1.16e-04s C 20001 3 GpuGemv{inplace=True}
4.1% 63.6% 0.162s 8.10e-06s C 20001 3 HostFromGpu(gpuarray)
4.0% 67.6% 0.157s 1.57e-05s C 10000 1 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>
3.8% 71.4% 0.149s 1.49e-05s C 10000 1 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>
3.7% 75.1% 0.144s 1.44e-05s C 10000 1 GpuElemwise{sub,no_inplace}
3.6% 78.7% 0.141s 1.41e-05s C 10000 1 GpuElemwise{gt,no_inplace}
3.4% 82.1% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Cast{float32}}[]<gpuarray>
3.4% 85.5% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>
3.3% 88.8% 0.131s 1.31e-05s C 10000 1 GpuCAReduceCuda{add}
2.9% 91.7% 0.112s 1.12e-05s C 10000 1 GpuElemwise{neg,no_inplace}
2.6% 94.3% 0.102s 1.02e-05s C 10000 1 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray>
2.5% 96.7% 0.096s 9.63e-06s C 10000 1 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>
1.6% 98.3% 0.061s 6.10e-06s C 10000 1 GpuFromHost<None>
0.7% 99.0% 0.026s 2.59e-06s C 10001 2 GpuAllocEmpty{dtype='float32', context_name=None}
0.5% 99.5% 0.021s 1.06e-06s C 20001 3 InplaceGpuDimShuffle{x}
0.3% 99.8% 0.011s 1.14e-06s C 10000 1 InplaceGpuDimShuffle{1,0}
0.2% 100.0% 0.008s 3.95e-07s C 20001 3 Shape_i{0}
0.0% 100.0% 0.000s 2.00e-05s C 1 1 GpuElemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}[]<gpuarray>
... (remaining 0 Ops account for 0.00%(0.00s) of the runtime)
Apply
------
<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>
55.0% 55.0% 2.154s 2.15e-04s 10000 7 GpuGemv{inplace=True}(GpuAllocEmpty{dtype='float32', context_name=None}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0})
4.5% 59.5% 0.176s 1.76e-05s 10000 18 GpuGemv{inplace=True}(w, TensorConstant{-0.00999999977648}, InplaceGpuDimShuffle{1,0}.0, GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0, TensorConstant{0.999800026417})
4.0% 63.5% 0.157s 1.57e-05s 10000 12 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>(y, GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, GpuElemwise{sub,no_inplace}.0, GpuElemwise{neg,no_inplace}.0)
3.8% 67.3% 0.149s 1.49e-05s 10000 15 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, y, GpuElemwise{Cast{float32}}[]<gpuarray>.0, GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuElemwise{sub,no_inplace}.0)
3.7% 71.0% 0.144s 1.44e-05s 10000 4 GpuElemwise{sub,no_inplace}(GpuArrayConstant{[ 1.]}, y)
3.6% 74.6% 0.141s 1.41e-05s 10000 16 GpuElemwise{gt,no_inplace}(GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[ 0.5]})
3.4% 78.0% 0.133s 1.33e-05s 10000 10 GpuElemwise{Cast{float32}}[]<gpuarray>(InplaceGpuDimShuffle{x}.0)
3.4% 81.4% 0.133s 1.33e-05s 10000 9 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>(GpuGemv{inplace=True}.0, InplaceGpuDimShuffle{x}.0)
3.3% 84.7% 0.131s 1.31e-05s 10000 17 GpuCAReduceCuda{add}(GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0)
2.9% 87.5% 0.112s 1.12e-05s 10000 11 GpuElemwise{neg,no_inplace}(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0)
2.6% 90.1% 0.102s 1.02e-05s 10000 20 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray>(b, GpuArrayConstant{0.00999999977648}, GpuCAReduceCuda{add}.0)
2.5% 92.6% 0.096s 9.63e-06s 10000 13 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>(GpuElemwise{neg,no_inplace}.0)
2.3% 94.9% 0.090s 9.04e-06s 10000 19 HostFromGpu(gpuarray)(GpuElemwise{gt,no_inplace}.0)
1.8% 96.7% 0.072s 7.16e-06s 10000 14 HostFromGpu(gpuarray)(GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>.0)
1.6% 98.3% 0.061s 6.10e-06s 10000 6 GpuFromHost<None>(Shape_i{0}.0)
0.7% 99.0% 0.026s 2.59e-06s 10000 5 GpuAllocEmpty{dtype='float32', context_name=None}(Shape_i{0}.0)
0.3% 99.3% 0.013s 1.33e-06s 10000 0 InplaceGpuDimShuffle{x}(b)
0.3% 99.6% 0.011s 1.14e-06s 10000 2 InplaceGpuDimShuffle{1,0}(x)
0.2% 99.8% 0.008s 7.94e-07s 10000 8 InplaceGpuDimShuffle{x}(GpuFromHost<None>.0)
0.1% 99.9% 0.005s 5.27e-07s 10000 1 Shape_i{0}(x)
... (remaining 7 Apply instances account for 0.07%(0.00s) of the runtime)
# 3. Conclusions
Examine and compare 'Ops' summaries for CPU and GPU. Usually GPU ops 'GpuFromHost' and 'HostFromGpu' by themselves
consume a large amount of extra time, but by making as few as possible data transfers between GPU and CPU, you can minimize their overhead.
Notice that each of the GPU ops consumes more time than its CPU counterpart. This is because the ops operate on small inputs;
if you increase the input data size (e.g. set N = 4000), you will see a gain from using the GPU.
"""
|
bsd-3-clause
| 5,040,977,168,494,259,000 | 63.935849 | 374 | 0.573977 | false |
sinnwerkstatt/django-bettertemplates
|
example/example/settings.py
|
1
|
2013
|
"""
Django settings for example project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tkyw7bl$)5p*77#0$54ahp7lurux^c2=#j7h1l(o@gm9l@ts&6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bettertemplates',
'example',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
|
bsd-3-clause
| -5,482,895,633,802,655,000 | 22.964286 | 71 | 0.723299 | false |
gitcoinco/web
|
app/townsquare/migrations/0019_pinnedpost.py
|
1
|
1253
|
# Generated by Django 2.2.4 on 2020-05-03 21:50
from django.db import migrations, models
import django.db.models.deletion
import economy.models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0105_auto_20200430_1352'),
('townsquare', '0018_comment_is_edited'),
]
operations = [
migrations.CreateModel(
name='PinnedPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True, default=economy.models.get_time)),
('modified_on', models.DateTimeField(default=economy.models.get_time)),
('what', models.CharField(default='', max_length=100, unique=True)),
('created', models.DateTimeField(auto_now=True)),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pins', to='dashboard.Activity')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pins', to='dashboard.Profile')),
],
options={
'abstract': False,
},
),
]
|
agpl-3.0
| -5,791,778,937,997,083,000 | 39.419355 | 139 | 0.601756 | false |
huangxiaohen2738/tornado-rpc
|
gateway/main.py
|
1
|
1221
|
#!/usr/bin/env python
#-*-coding: utf-8-*-
# Version: 0.1
# Author: Song Huang <[email protected]>
# License: Copyright(c) 2015 Song.Huang
# Summary:
import sys
from os.path import abspath, dirname, join, normpath
PREFIX = normpath(dirname(abspath(__file__)))
for path in (PREFIX, normpath(join(PREFIX, '../lib'))):
if path not in sys.path:
sys.path = [path] + sys.path
from server import RPCServer
from tornado import ioloop
from logging.handlers import RotatingFileHandler
import logging
def log_initialize():
logger = logging.getLogger()
Rthandler = RotatingFileHandler('logs/gateway.log', maxBytes=10*1024*1024,backupCount=10)
logger.setLevel(logging.DEBUG)
#sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(filename)s-%(lineno)d: %(message)s')
Rthandler.setFormatter(formatter)
#logger.addHandler(sh)
logger.addHandler(Rthandler)
logger = logging.getLogger(__name__)
def start():
log_initialize()
server = RPCServer(('localhost', 5500))
server.start()
if __name__ == '__main__':
start()
io_loop = ioloop.IOLoop.instance()
try:
io_loop.start()
except KeyboardInterrupt:
io_loop.stop()
|
apache-2.0
| 6,461,799,097,083,478,000 | 26.133333 | 93 | 0.682228 | false |
Ecpy/ecpy
|
tests/instruments/widgets/test_profile_edition.py
|
1
|
10091
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tests for the instrument model selection widget.
"""
import enaml
import pytest
from exopy.testing.util import handle_dialog, wait_for_window_displayed
with enaml.imports():
from exopy.instruments.widgets.profile_edition\
import (SetValidator, ConnectionCreationDialog,
ConnectionValidationWindow, SettingsCreationDialog,
RenameSettingsPopup, ProfileEditionDialog,
clean_name, trim_description)
# HINT the QtListStrWidget has some issues of display in test mode
@pytest.fixture
def profile_infos(prof_plugin):
"""A profile model to edit.
"""
return prof_plugin._profiles['fp1']
@pytest.fixture
def model_infos(profile_infos):
"""A model infos to use for testing.
"""
return profile_infos.model
def test_set_validator():
"""Test the SetValidator used to restrict allowed names.
"""
v = SetValidator(existing=['a', 'b', 'c'])
assert not v.validate('a') and not v.valid
assert v.validate('bc') and v.valid
def test_clean_name():
"""Test cleaning a name.
"""
assert clean_name('a_b') == 'a b'
def test_trim_description():
"""Test triming the description (connection or settings).
"""
desc = """test\n\nDefaults\n-------\n\n details"""
assert trim_description(desc) == 'test'
def test_connection_creation_dialog(prof_plugin, model_infos, exopy_qtbot,
dialog_sleep):
"""Test the dialog dedicated to create new connections.
"""
d = ConnectionCreationDialog(plugin=prof_plugin, model_infos=model_infos,
existing=['false_connection2'])
d.show()
wait_for_window_displayed(exopy_qtbot, d)
exopy_qtbot.wait(dialog_sleep)
assert d.connection
assert len(d._connections) == 2
ws = d.central_widget().widgets()
ws[0].selected_item = ws[0].items[1]
def assert_id():
assert d.connection.declaration.id == 'false_connection3'
exopy_qtbot.wait_until(assert_id)
ws[-1].clicked = True # Ok button
def assert_result():
assert d.result
exopy_qtbot.wait_until(assert_result)
exopy_qtbot.wait(dialog_sleep)
d = ConnectionCreationDialog(plugin=prof_plugin, model_infos=model_infos,
existing=['false_connection2'])
d.show()
wait_for_window_displayed(exopy_qtbot, d)
exopy_qtbot.wait(dialog_sleep)
d.central_widget().widgets()[-2].clicked = True # Cancel button
def assert_result():
assert not d.result
exopy_qtbot.wait_until(assert_result)
exopy_qtbot.wait(dialog_sleep)
def test_connection_validation_window(prof_plugin, exopy_qtbot, dialog_sleep,
profile_infos):
"""Test the window used to check that connection infos allows to open a
connection.
"""
ed = ProfileEditionDialog(plugin=prof_plugin, profile_infos=profile_infos)
ed.show()
wait_for_window_displayed(exopy_qtbot, ed)
exopy_qtbot.wait(dialog_sleep)
w = ConnectionValidationWindow(editor=ed.central_widget().widgets()[0])
w.show()
wait_for_window_displayed(exopy_qtbot, w)
exopy_qtbot.wait(dialog_sleep)
widgets = w.central_widget().widgets()
form_widgets = widgets[0].widgets()
combo_driver = form_widgets[1]
combo_connection = form_widgets[3]
combo_settings = form_widgets[5]
combo_driver.selected = 'test <instruments.test.FalseDriver2>'
combo_connection.selected = 'false_connection2'
combo_settings.selected = 'false_settings2'
p = widgets[-3]
p.clicked = True
assert 'The connection was successfully established' in widgets[-2].text
# XXX add a test for failed connection test
widgets[-1].clicked = True
exopy_qtbot.wait(10)
def test_settings_creation_dialog(prof_plugin, model_infos, exopy_qtbot,
dialog_sleep):
"""Test the dialog dedicated to create new settings.
"""
d = SettingsCreationDialog(plugin=prof_plugin, model_infos=model_infos,
existing=['false_settings2'])
d.show()
wait_for_window_displayed(exopy_qtbot, d)
exopy_qtbot.wait(dialog_sleep)
assert d.settings
assert len(d._settings) == 3
ws = d.central_widget().widgets()
ws[0].selected_item = ws[0].items[1]
ok = ws[-1]
def assert_enabled():
assert not ok.enabled
exopy_qtbot.wait_until(assert_enabled)
exopy_qtbot.wait(dialog_sleep)
n = ws[-3]
n.text = 'dummy'
assert ok.enabled
n.validator.validate('false_settings2')
assert not ok.enabled
n = ws[-3]
n.text = 'dummy'
ok.clicked = True
assert d.settings.user_id == n.text
assert d.result
d2 = SettingsCreationDialog(plugin=prof_plugin, model_infos=model_infos,
existing=['false_settings2'])
d2.show()
wait_for_window_displayed(exopy_qtbot, d2)
d2.central_widget().widgets()[-2].clicked = False # Cancel button
def assert_result():
assert not d2.result
exopy_qtbot.wait_until(assert_result)
def test_rename_settings_popup(prof_plugin, profile_infos, exopy_qtbot,
dialog_sleep):
"""Test the popup used to rename a settings.
"""
ed = ProfileEditionDialog(plugin=prof_plugin, profile_infos=profile_infos)
ed.show()
wait_for_window_displayed(exopy_qtbot, ed)
exopy_qtbot.wait(dialog_sleep)
ed_widgets = ed.central_widget().widgets()
ed_widget = ed_widgets[0]
nb = ed_widget.widgets()[5]
nb.selected_tab = 'settings'
exopy_qtbot.wait(10 + dialog_sleep)
c_page, s_page = nb.pages()
# Open the renaming popup.
s_page.page_widget().widgets()[3].clicked = True
# Get the popup.
assert len(RenameSettingsPopup.popup_views) == 1
p = RenameSettingsPopup.popup_views[0]
settings = p.settings
ws = p.central_widget().widgets()
ws[1].text = ''
def assert_enabled():
assert not ws[-1].enabled
exopy_qtbot.wait_until(assert_enabled)
exopy_qtbot.wait(dialog_sleep)
ws[1].text = ed_widget.settings[1].name
ws[1].validator.validate(ed_widget.settings[1].name)
assert not ws[-1].enabled
ws[1].text = 'dummy'
ws[1].validator.validate('dummy')
def assert_enabled():
assert ws[-1].enabled
exopy_qtbot.wait_until(assert_enabled)
exopy_qtbot.wait(dialog_sleep)
ws[-1].clicked = True
def assert_user_id():
assert settings.user_id == 'dummy'
exopy_qtbot.wait_until(assert_user_id)
exopy_qtbot.wait(dialog_sleep)
exopy_qtbot.wait_until(lambda: len(RenameSettingsPopup.popup_views) == 0)
# Open a new popup and cancel the name change
s_page.page_widget().widgets()[3].clicked = True
assert len(RenameSettingsPopup.popup_views) == 1
p = RenameSettingsPopup.popup_views[0]
ws = p.central_widget().widgets()
ws[1].text = 'dummy2'
def assert_enabled():
assert ws[-1].enabled
exopy_qtbot.wait_until(assert_enabled)
exopy_qtbot.wait(dialog_sleep)
ws[-2].clicked = True
def assert_user_id():
assert settings.user_id == 'dummy'
exopy_qtbot.wait_until(assert_user_id)
exopy_qtbot.wait(dialog_sleep)
def test_profile_edition_dialog_ok(prof_plugin, dialog_sleep, exopy_qtbot,
profile_infos):
"""Test the dialog used to edit a profile.
"""
# XXX need to test model selection
profile_infos.connections.clear()
profile_infos.settings.clear()
ed = ProfileEditionDialog(plugin=prof_plugin, profile_infos=profile_infos)
ed.show()
wait_for_window_displayed(exopy_qtbot, ed)
exopy_qtbot.wait(dialog_sleep)
ed_widgets = ed.central_widget().widgets()
ed_widget = ed_widgets[0]
nb = ed_widget.widgets()[5]
c_page, s_page = nb.pages()
# Add a connection
with handle_dialog(exopy_qtbot, cls=ConnectionCreationDialog):
c_page.page_widget().widgets()[2].clicked = True
exopy_qtbot.wait(10 + dialog_sleep)
# Add a settings
with handle_dialog(exopy_qtbot, cls=SettingsCreationDialog):
s_page.page_widget().widgets()[2].clicked = True
exopy_qtbot.wait(10 + dialog_sleep)
assert len(ed_widget.connections) == 1
assert len(ed_widget.settings) == 1
ed_widgets[-1].clicked = True
def assert_cn_st():
assert len(profile_infos.connections) == 1
assert len(profile_infos.settings) == 1
exopy_qtbot.wait_until(assert_cn_st)
def test_profile_edition_dialog_cancel(prof_plugin, exopy_qtbot, dialog_sleep,
profile_infos):
"""Test the dialog used to edit a profile.
"""
ed = ProfileEditionDialog(plugin=prof_plugin, profile_infos=profile_infos)
ed.show()
wait_for_window_displayed(exopy_qtbot, ed)
exopy_qtbot.wait(dialog_sleep)
ed_widgets = ed.central_widget().widgets()
ed_widget = ed_widgets[0]
nb = ed_widget.widgets()[5]
c_page, s_page = nb.pages()
# Delete a connection and open valiadtion window
c_page.page_widget().widgets()[3].clicked = True
c_page.page_widget().widgets()[4].clicked = True
# Delete a settings
s_page.page_widget().widgets()[4].clicked = True
exopy_qtbot.wait(10 + dialog_sleep)
w = ed_widget._validator
assert len(ed_widget.connections) == 2
assert len(ed_widget.settings) == 2
ed_widgets[-2].clicked = True
def assert_visible():
assert not ed.visible and not w.visible
exopy_qtbot.wait_until(assert_visible)
assert len(profile_infos.connections) == 3
assert len(profile_infos.settings) == 3
|
bsd-3-clause
| -5,623,552,698,780,249,000 | 28.080692 | 79 | 0.636508 | false |
h2oloopan/easymerge
|
EasyMerge/tests/reddit/r2/r2/models/printable.py
|
1
|
3469
|
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
from pylons import c, request
from r2.lib.strings import Score
from r2.lib import hooks
class Printable(object):
show_spam = False
show_reports = False
is_special = False
can_ban = False
deleted = False
rowstyle = ''
collapsed = False
author = None
margin = 0
is_focal = False
childlisting = None
cache_ignore = set(['c', 'author', 'score_fmt', 'child',
# displayed score is cachable, so remove score
# related fields.
'voting_score', 'display_score',
'render_score', 'score', '_score',
'upvotes', '_ups',
'downvotes', '_downs',
'subreddit_slow', '_deleted', '_spam',
'cachable', 'make_permalink', 'permalink',
'timesince',
])
@classmethod
def add_props(cls, user, wrapped):
from r2.lib.wrapped import CachedVariable
for item in wrapped:
# insert replacement variable for timesince to allow for
# caching of thing templates
item.display = CachedVariable("display")
item.timesince = CachedVariable("timesince")
item.childlisting = CachedVariable("childlisting")
score_fmt = getattr(item, "score_fmt", Score.number_only)
item.display_score = map(score_fmt, item.voting_score)
if item.cachable:
item.render_score = item.display_score
item.display_score = map(CachedVariable,
["scoredislikes", "scoreunvoted",
"scorelikes"])
hooks.get_hook("add_props").call(items=wrapped)
@property
def permalink(self, *a, **kw):
raise NotImplementedError
def keep_item(self, wrapped):
return True
@staticmethod
def wrapped_cache_key(wrapped, style):
s = [wrapped._fullname, wrapped._spam, wrapped.reported]
if style == 'htmllite':
s.extend([c.bgcolor, c.bordercolor,
request.GET.has_key('style'),
request.GET.get("expanded"),
getattr(wrapped, 'embed_voting_style', None)])
return s
|
mit
| 6,576,516,820,168,086,000 | 37.977528 | 79 | 0.587201 | false |
odahoda/noisicaa
|
noisicaa/ui/track_list/time_view_mixin.py
|
1
|
7990
|
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import fractions
import logging
import typing
from typing import Any
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from noisicaa import audioproc
from noisicaa.ui import slots
from noisicaa.ui import ui_base
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
QObjectMixin = QtCore.QObject
QWidgetMixin = QtWidgets.QWidget
else:
QObjectMixin = object
QWidgetMixin = object
class ScaledTimeMixin(ui_base.ProjectMixin, QObjectMixin):
scaleXChanged = QtCore.pyqtSignal(fractions.Fraction)
contentWidthChanged = QtCore.pyqtSignal(int)
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
# pixels per beat
self.__scale_x = fractions.Fraction(500, 1)
self.__content_width = 100
self.project.duration_changed.add(lambda _: self.__updateContentWidth())
self.__updateContentWidth()
def __updateContentWidth(self) -> None:
width = int(self.project.duration.fraction * self.__scale_x) + 120
self.setContentWidth(width)
def leftMargin(self) -> int:
return 100
def projectEndTime(self) -> audioproc.MusicalTime:
return audioproc.MusicalTime() + self.project.duration
def contentWidth(self) -> int:
return self.__content_width
def setContentWidth(self, width: int) -> None:
if width == self.__content_width:
return
self.__content_width = width
self.contentWidthChanged.emit(self.__content_width)
def scaleX(self) -> fractions.Fraction:
return self.__scale_x
def setScaleX(self, scale_x: fractions.Fraction) -> None:
if scale_x == self.__scale_x:
return
self.__scale_x = scale_x
self.__updateContentWidth()
self.scaleXChanged.emit(self.__scale_x)
class ContinuousTimeMixin(ScaledTimeMixin, slots.SlotContainer):
additionalXOffset, setAdditionalXOffset, additionalXOffsetChanged = slots.slot(
int, 'additionalXOffset', default=0)
snapToGrid, setSnapToGrid, snapToGridChanged = slots.slot(bool, 'snapToGrid', default=True)
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.__grid_step = audioproc.MusicalDuration(1, 1)
self.scaleXChanged.connect(self.__scaleXChanged)
self.__scaleXChanged(self.scaleX())
def __scaleXChanged(self, scale_x: fractions.Fraction) -> None:
self.__grid_step = audioproc.MusicalDuration(1, 64)
min_dist = 96
while int(self.__grid_step * scale_x) <= min_dist:
self.__grid_step *= 2
if int(self.__grid_step) > 1:
min_dist = 36
def durationPerPixel(self) -> audioproc.MusicalDuration:
return audioproc.MusicalDuration(1 / self.scaleX())
def timeToX(self, time: audioproc.MusicalTime) -> int:
return self.leftMargin() + self.additionalXOffset() + int(self.scaleX() * time.fraction)
def xToTime(self, x: int) -> audioproc.MusicalTime:
x -= self.leftMargin() + self.additionalXOffset()
if x <= 0:
return audioproc.MusicalTime(0, 1)
return audioproc.MusicalTime(x / self.scaleX())
def gridStep(self) -> audioproc.MusicalDuration:
return self.__grid_step
def shouldSnap(self, evt: QtGui.QMouseEvent) -> bool:
return self.snapToGrid() and not evt.modifiers() & Qt.ShiftModifier
def snapTime(self, time: audioproc.MusicalTime) -> audioproc.MusicalTime:
grid_time = (
audioproc.MusicalTime(0, 1)
+ self.gridStep() * int(round(float(time / self.gridStep()))))
time_x = int(time * self.scaleX())
grid_x = int(grid_time * self.scaleX())
if abs(time_x - grid_x) <= 10:
return grid_time
return time
def renderTimeGrid(
self, painter: QtGui.QPainter, rect: QtCore.QRect, *, show_numbers: bool = False
) -> None:
grid_step = self.gridStep()
tick_num = int(self.xToTime(rect.x()) / grid_step)
tick_time = (grid_step * tick_num).as_time()
while tick_time < self.projectEndTime():
x = self.timeToX(tick_time)
if x > rect.right():
break
if tick_num == 0:
painter.fillRect(x, rect.y(), 2, rect.height(), Qt.black)
else:
if tick_time % audioproc.MusicalTime(1, 1) == audioproc.MusicalTime(0, 1):
c = QtGui.QColor(0, 0, 0)
elif tick_time % audioproc.MusicalTime(1, 4) == audioproc.MusicalTime(0, 1):
c = QtGui.QColor(160, 160, 160)
elif tick_time % audioproc.MusicalTime(1, 8) == audioproc.MusicalTime(0, 1):
c = QtGui.QColor(185, 185, 185)
elif tick_time % audioproc.MusicalTime(1, 16) == audioproc.MusicalTime(0, 1):
c = QtGui.QColor(210, 210, 210)
elif tick_time % audioproc.MusicalTime(1, 32) == audioproc.MusicalTime(0, 1):
c = QtGui.QColor(225, 225, 225)
else:
c = QtGui.QColor(240, 240, 240)
painter.fillRect(x, rect.y(), 1, rect.height(), c)
if (show_numbers
and tick_time % audioproc.MusicalTime(1, 1) == audioproc.MusicalTime(0, 1)):
beat_num = int(tick_time / audioproc.MusicalTime(1, 4))
painter.setPen(Qt.black)
painter.drawText(x + 5, 12, '%d' % (beat_num + 1))
tick_time += grid_step
tick_num += 1
x = self.timeToX(self.projectEndTime())
painter.fillRect(x, rect.y(), 2, rect.height(), Qt.black)
class TimeViewMixin(ScaledTimeMixin, QWidgetMixin):
maximumXOffsetChanged = QtCore.pyqtSignal(int)
xOffsetChanged = QtCore.pyqtSignal(int)
pageWidthChanged = QtCore.pyqtSignal(int)
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
# pixels per beat
self.__x_offset = 0
self.setMinimumWidth(100)
self.contentWidthChanged.connect(self.__contentWidthChanged)
self.__contentWidthChanged(self.contentWidth())
def __contentWidthChanged(self, width: int) -> None:
self.maximumXOffsetChanged.emit(self.maximumXOffset())
self.setXOffset(min(self.xOffset(), self.maximumXOffset()))
def maximumXOffset(self) -> int:
return max(0, self.contentWidth() - self.width())
def pageWidth(self) -> int:
return self.width()
def xOffset(self) -> int:
return self.__x_offset
def setXOffset(self, offset: int) -> int:
offset = max(0, min(offset, self.maximumXOffset()))
if offset == self.__x_offset:
return 0
dx = self.__x_offset - offset
self.__x_offset = offset
self.xOffsetChanged.emit(self.__x_offset)
return dx
def resizeEvent(self, evt: QtGui.QResizeEvent) -> None:
super().resizeEvent(evt)
self.maximumXOffsetChanged.emit(self.maximumXOffset())
self.pageWidthChanged.emit(self.width())
|
gpl-2.0
| 7,932,034,679,043,490,000 | 33.73913 | 96 | 0.624531 | false |
jrwdunham/old-webapp
|
onlinelinguisticdatabase/lib/mysql2sqlite.py
|
1
|
2267
|
# −*− coding: UTF−8 −*−
# Copyright (C) 2010 Joel Dunham
#
# This file is part of OnlineLinguisticDatabase.
#
# OnlineLinguisticDatabase is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OnlineLinguisticDatabase is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OnlineLinguisticDatabase. If not, see
# <http://www.gnu.org/licenses/>.
import re
from pylons import request, response, session, app_globals, tmpl_context as c
from pylons.controllers.util import abort, redirect
from onlinelinguisticdatabase.lib.base import BaseController, render
import onlinelinguisticdatabase.model as model
import onlinelinguisticdatabase.model.meta as meta
import onlinelinguisticdatabase.lib.helpers as h
def y():
form_q = meta.Session.query(model.Form)
return len(form_q.all())
"""
def x()
# Get the SQLAlchemy Session
Session = sessionmaker(bind=engine)
session = Session()
# Get a dict of the pages of the UBCWPL Web Site
ubcwpl = UBCWPLWebSite(session)
ubcwpl.pages = ubcwpl.getAllUBCWPLWebSitePages()
# Create the ubcwplHTML/YYYY-MM-DD_ubcwpl_web_site directory,
# overwriting an identically-named, preexisting directory.
today = datetime.date.today()
dirName = '%s_ubcwpl_web_site' % today
dirName = os.path.join('ubcwplHTML', dirName)
if os.path.isdir(dirName):
shutil.rmtree(dirName)
os.mkdir(dirName)
# Write the .txt files for each page
for key in ubcwpl.pages:
filename = '%s.txt' % key.replace('/', '_')
filename = os.path.join(dirName, filename)
f = codecs.open(filename, encoding='utf-8', mode='w')
f.write(ubcwpl.pages[key])
f.close()
# Reassure the user
print '\nUBCWPL Web Site successfully written.\n\n'
print 'See ubcwplHTML/%s directory\n\n' % dirName
"""
|
gpl-3.0
| 7,866,052,606,457,570,000 | 33.212121 | 77 | 0.715995 | false |
PeqNP/ObjC-Stub
|
src/objcstub.py
|
1
|
10827
|
""" Reads in an Obj-C header file and recursively generates stub
implementation files.
Notes:
- This script does not duplicate source files.
- This script does not over-write implementation files if the interface of the
implementation doesn't change. If you wish to over-write the interface
entirely you must pass the --overwrite-implementation flag.
- The script will look for all other header files within the same directory as
the source header file that was given.
- Currently this put all files in ./obj-c_output/
@todo Recursively search for headers that are not already in the repository AFTER the
main header file has been parsed.
@todo Better typedef parsing using the Typedef class.
@todo return type of 'id' may be 'return nil' in all cases.
@todo Everything after a category MUST be removed. Consider: UIViewController (UIStateRestoration) <ProtocolName>
'<ProtocolName>' must be removed!
@todo Interfaces can sometimes be on one line.
@since 2015.04.21
@copyright
"""
import os
import re
import shutil
import sys
class HeaderFile(object):
def __init__(self, path, interfaces):
self.path = path
self.interfaces = interfaces
def getHeaderImportPath(self):
return os.path.basename(self.path)
def getImplementationPath(self):
basedir = os.path.dirname(self.path)
basename = os.path.basename(self.path)
return os.path.join(basedir, basename.rstrip(".h") + ".m")
class Interface(object):
def __init__(self, name):
self.name = name
self.methods = []
""" Remove all whitespace and macros from method. """
def cleanMethod(self, method):
# Remove macros.
method = getCleanLine(method)
# Remove the semi-colon and everything after it (comments, etc.)
method = method.split(";")[0]
# Strip all whitespace again.
method = " ".join(method.split())
method = method.replace(" NS_DESIGNATED_INITIALIZER", "")
return method
def addMethod(self, method):
self.methods.append(self.cleanMethod(method))
def __str__(self):
return "{}: {} method(s)".format(self.name, len(self.methods))
class Typedef(object):
def __init__(self, _type, name):
self.type = _type
self.name = name
def getCleanLine(line):
# Remove macros
while True:
# Only search for cases where all chars are uppercase. Some
# interfaces are directly next to their corresponding category
# brackets.
m = re.search(r"[A-Z0-9_]+\((.+?(?=\)))\)", line)
if not m: break
line = line.replace(m.group(0), "")
# Strip all excess white space.
return " ".join(line.split())
def getEnumTypedef(line):
m = re.search(r"[A-Z0-9_]+\((.+?(?=\)))\)", line)
print "typedef:", line
line = m.groups(0)[0].split(",")[1]
#_type, name = m.groups(0)[0].split(",")
#return Typedef(_type.replace(" ", ""), name.replace(" ", ""))
return " ".join(line.split())
""" Parses a simple typedef such as: 'typedef NSInteger NSTimeInterval;' """
def getTypedef(line):
_, _type, name = line.split(" ")
return Typedef(_type.replace(" ", ""), name.replace(" ", "").rstrip(";"))
class FrameworkReader(object):
def __init__(self, path, exportdir, overwrite):
self.path = path
self.exportdir = exportdir
self.overwrite = overwrite
self.headers = []
self.typedefs = []
def addHeader(self, header):
# when parsing interfaces, always strip the method names of extra whitespace. This will make it
# much easier to compare methods that already exist.
#print "HEADER", header
interface = False
methodName = False
interfaces = []
lines = []
print "addHeader", header
with open(header, "r") as f:
num = 0
for line in f:
num = num + 1
if methodName:
# Continue appending to the method name until the entire defintion
# has been added.
methodName = methodName + " " + line
if ";" in line:
interface.addMethod(methodName)
methodName = False
elif "#import" in line:
# @hack Replace UIKit with FakeUIKit until this becomes a CLI option.
line = line.replace("<UIKit/", "<FakeUIKit/")
elif "NS_ENUM(" in line or "NS_OPTIONS(" in line:
typedef = getEnumTypedef(line)
self.typedefs.append(typedef)
"""
elif "typedef" in line:
typedef = getTypedef(line)
self.typedefs.append(typedef)
"""
elif "@interface" in line:
#print "LINE", line
iface = getCleanLine(line)
# Category.
if "(" in iface:
iface = iface.strip("@interface ")
else: # Normal interface
iface = iface.split(":")[0].split(" ")[1]
# @interface ClassName : NSObject { -- this extracts 'ClassName'
interface = Interface(iface)
elif interface and (line.startswith("-") or line.startswith("+")):
if ";" in line:
#print "no methodName", interface.name, line
interface.addMethod(line)
else: # Continue to concatenate method until complete.
#print "methodName:", line
methodName = line
elif interface and "@end" in line:
interfaces.append(interface)
interface = False
lines.append(line)
# Write the new header file.
with open(header, "w") as f:
f.writelines(lines)
self.headers.append(HeaderFile(header, interfaces))
def getImplementation(self, interface):
s = "\n@implementation " + interface.name + "\n\n"
for method in interface.methods:
isStatic = "+" in method
# Get the first brackets.
m = re.search(r"\((.+?(?=\)))\)", method)
val = m.groups(0)[0]
if "*" in val or val.lower() in ("cgpathref", "class"):
ret = " return nil;\n"
elif val.lower() in ("instancetype", "id"):
ret = isStatic and " return [[self alloc] init];\n" or " return [super init];\n"
elif "void" in val:
ret = " \n"
elif val.lower() in ("int", "bool", "nsinteger", "nsuinteger", "double", "float", "cgfloat", "cgglyph") or val in self.typedefs: # primitive numeric values.
ret = " return 0;\n"
else: # Cast type
ret = " return (" + val + "){};\n"
s += method + "\n"
s += "{\n"
s += ret
s += "}\n\n"
s += "@end\n"
return s
def writeImplementations(self, exportdir):
for header in self.headers:
imppath = os.path.join(exportdir, header.getImplementationPath())
with open(imppath, "w") as f:
f.write("\n#import \"{}\"\n".format(header.getHeaderImportPath()))
for interface in header.interfaces:
f.write(self.getImplementation(interface))
# @todo Delete implementation file if there is no code.
def copyHeaderFile(headerPath, name, exportDir):
headerDir = os.path.dirname(headerPath)
dstPath = os.path.join(exportDir, name)
srcPath = os.path.join(headerDir, name)
if not os.path.exists(srcPath): # Source file does not exist.
return False
shutil.copyfile(srcPath, dstPath)
return dstPath
def readHeaderFile(reader, headerPath, exportDir):
name = os.path.basename(headerPath)
dstPath = copyHeaderFile(headerPath, name, exportDir)
if not dstPath:
print "Failed to copy header file:", dstPath
return False
reader.addHeader(dstPath)
def readAllHeaderFiles(reader, headerPath, exportDir):
with open(headerPath, "r") as f:
num = 0
for line in f:
num = num + 1
line = line.strip()
if line.startswith("#import"):
m = re.search(r"\<(.*)\>", line)
if not m:
print(header, "Line #", num, "- Failed to match #import declaration", line)
continue
name = m.group(0)
dstPath = copyHeaderFile(headerPath, name, exportDir)
if not dstPath:
continue
reader.addHeader(dstPath)
# @todo Copy the this header file to the directory.
def main(headerPath, exportDir, overwrite, headerFile):
# Any import that is part of a framework is automatically searched within the respective folder.
# For example, if we find <UIKit/UIFont.h>, 'UIFont.h' will attempt to be found within the base
# directory.
reader = FrameworkReader(headerPath, exportDir, overwrite)
if headerFile:
readHeaderFile(reader, headerPath, exportDir)
else:
readAllHeaderFiles(reader, headerPath, exportDir)
#reader.replaceInHeader("<UIKit/", "<FakeUIKit/")
# Create implementation file.
reader.writeImplementations(exportDir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Recursively creates stub Obj-C implementation files an Obj-C header file")
parser.add_argument("header"
, metavar="H"
, type=str
, help="The path to the header file to parse"
)
parser.add_argument("--overwrite-implementation"
, dest="overwrite_implementation"
, action="store_true"
, help="Overwrite implementation, regardless if there is an existing implementation"
)
parser.add_argument("--header-file"
, dest="header_file"
, action="store_true"
, help="Indicates that a header and implementation file should be created from the file provided"
)
args = parser.parse_args()
if not os.path.exists(args.header):
print "Header file does not exist at:", args.header
sys.exit(1)
exportDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "export")
if os.path.isfile(exportDir):
print "Export path must be a directory. Given a file at:", exportDir
sys.exit(1)
if not os.path.isdir(exportDir):
os.makedirs(exportDir)
main(args.header, exportDir, args.overwrite_implementation, args.header_file)
sys.exit(0)
|
mit
| 963,265,743,668,182,800 | 38.805147 | 168 | 0.576891 | false |
waipu/bakawipe
|
lib/beon/rsp.py
|
1
|
3370
|
# -*- coding: utf-8 -*-
'''Server responses'''
redir = 'REDIR'
antispam = 'ERRORСработала защита от спама. Пожалуйста, подождите некоторое время и попробуйте добавить комментарий снова.'
shortcom = 'ERRORОчень короткий текст комментария.' # Very short comment.
wrongauth = 'ERRORНеверный пароль' # Wrong login/pass
cantadd = '' # Yep, ''.
bumplimit = 'HIDEERRORПревышено максимальное количество комментариев в одном топике. Создайте, пожалуйста, другой топик и продолжите обсуждение в нём.'
opblacklisted = 'ERRORКомментировать тему нельзя. Автор темы находится в Вашем игнор-листе. Или не пишите комментарий, или удалите автора записи из своего игнор-листа.'
onlyregistred = 'ERRORКомментировать тему могут только зарегистрированные пользователи.'
onlyfriend = 'ERRORКомментировать тему могут только друзья её автора.'
onlysome = 'ERRORКомментировать тему могуг только пользователи, заданные автором темы.'
onlyvip = 'ERRORКомментировать тему могут только VIP-пользователи.'
closed = 'ERRORТема закрыта. Комментировать её нельзя.'
userbumplimit = 'HIDEERRORПревышено максимальное количество комментариев для одной записи. Продолжите, пожалуйста, обсуждение в другой записи.'
useropblacklisted = 'ERRORКомментировать запись нельзя. Автор записи находится в Вашем игнор-листе. Или не пишите комментарий, или удалите автора записи из своего игнор-листа.'
useronlyregistred = 'ERRORКомментировать запись могут только зарегистрированные пользователи.'
useronlyfriend = 'ERRORКомментировать запись могут только друзья её автора.' # Only fiends can post.
useronlymember = 'ERRORКомментировать запись могут только участники сообщества.' # only members can post.
useronlysome = 'ERRORКомментировать запись могуг только пользователи, заданные автором записи.' # Only some users can post.
useronlyvip = 'ERRORКомментировать запись могут только VIP-пользователи.' # Only vip-users can post.
userclosed = 'ERRORЗапись закрыта. Комментировать её нельзя.'
succes = '1'
othersucces = 'HIDE1' # From 1k comments.
chatredir = 'REDIR'
chatsucces = 'CLEAR%s' # chatname
mail_success = 'Письмо успешно отправлено'
mail_server_connection_error = 'Ошибка соединения с сервером, попробуйте позднее'
|
gpl-3.0
| 4,756,947,490,747,020,000 | 65.212121 | 176 | 0.8 | false |
IT-SeanWANG/CodeJam
|
2017_4th/Q1.py
|
1
|
2192
|
#! /usr/bin/env python
# coding: utf-8
# python version: 2.7.9
__author__ = 'Sean'
# @param sudoku 9x9 array
# @return a boolean
def is_valid_sudoku(sudoku):
rows = [list(lst[::]) for lst in sudoku]
columns = [[lst[idx] for lst in sudoku] for idx in range(9)]
blocks_origin = [sudoku[row][column] for x in [[0, 1, 2], [3, 4, 5], [6, 7, 8]] for y in [[0, 1, 2], [3, 4, 5], [6, 7, 8]] for row in x for column in y]
# convert to list
blocks = [[blocks_origin[row * 9 + column] for column in range(9)] for row in range(9)]
# for only several numbers is put in case '.' means no number is set in this position
#check = lambda lst: all([lst.count(x) == 1 for x in lst if x != '.'])
# make sure one element in rows, columns and blocks are only
check = lambda lst: all([lst.count(x) == 1 for x in lst])
return all([check(x) for style in (rows, columns, blocks) for x in style])
# main function
# initial 9x9 array
sudoku_list = [[0 for col in range(9)]for row in range(9)]
for i in range(9):
sudoku_list[i] = raw_input().split(",")
if(is_valid_sudoku(sudoku_list)):
print 1
else:
print -1
'''
Appendix test case
#test 1: True
5, 3, 4, 6, 7, 8, 9, 1, 2
6, 7, 2, 1, 9, 5, 3, 4, 8
1, 9, 8, 3, 4, 2, 5, 6, 7
8, 5, 9, 7, 6, 1, 4, 2, 3
4, 2, 6, 8, 5, 3, 7, 9, 1
7, 1, 3, 9, 2, 4, 8, 5, 6
9, 6, 1, 5, 3, 7, 2, 8, 4
2, 8, 7, 4, 1, 9, 6, 3, 5
3, 4, 5, 2, 8, 6, 1, 7, 9
#test 2: True
4, 8, 3, 2, 7, 1, 6, 9, 5
9, 7, 6, 4, 8, 5, 3, 2, 1
5, 2, 1, 3, 9, 6, 4, 7, 8
2, 9, 4, 6, 5, 8, 1, 3, 7
1, 3, 8, 9, 2, 7, 5, 6, 4
6, 5, 7, 1, 3, 4, 9, 8, 2
8, 4, 2, 5, 6, 3, 7, 1, 9
3, 1, 9, 7, 4, 2, 8, 5, 6
7, 6, 5, 8, 1, 9, 2, 4, 3
#test 3: False
1, 3, 2, 5, 7, 9, 4, 6, 8
4, 9, 8, 2, 6, 1, 3, 7, 5
7, 5, 6, 3, 8, 4, 2, 1, 9
6, 4, 3, 1, 5, 8, 7, 9, 2
5, 2, 1, 7, 9, 3, 8, 4, 6
9, 8, 7, 4, 2, 6, 5, 3, 1
2, 1, 4, 9, 3, 5, 6, 8, 7
3, 6, 5, 8, 1, 7, 9, 2, 4
8, 7, 9, 6, 4, 2, 1, 3, 5
#test 4: True
8, 1, 3, 9, 6, 5, 2, 7, 4
4, 5, 6, 8, 7, 2, 1, 9, 3
9, 7, 2, 1, 3, 4, 6, 8, 5
3, 6, 5, 4, 9, 1, 7, 2, 8
2, 9, 4, 7, 5, 8, 3, 1, 6
7, 8, 1, 3, 2, 6, 4, 5, 9
1, 4, 7, 5, 8, 3, 9, 6, 2
5, 2, 9, 6, 4, 7, 8, 3, 1
6, 3, 8, 2, 1, 9, 5, 4, 7
'''
|
apache-2.0
| -3,541,122,106,672,194,600 | 27.648649 | 153 | 0.493157 | false |
Dnomyar/Biper
|
main.py
|
1
|
1388
|
# -*- coding: utf8 -*-
# AUTEUR : Damien
# CREATION : 02/04/2014
# VERSION : 0.2
# Aide technique : http://www.utc.fr/~mecagom4/MECAWEB/EXEMPLE/EX13/WEB/piano.htm
import winsound
from songs import *
# Fréquence du La3 :
fLa3 = 440
# Constante : racine 12ième de 2. Elle permet de conserver un rapport de 2 entre 2 octaves.
a = 1.059463
# ROLE : récupérer la fréquence d'une note
# ARG 1 (note) = la note (par exemple : "do")
# ARG 2 (num) = octave de la note (par exemple : 3)
def getFrequence(note, num):
numLa3 = 3
# (nom de la note, distance avec La)
listNote = [("do", -9),("do#", -8),("reb", -8),("re", -7),("re#", -6),("mib", -6),("mi", -5),("fa", -4),("fa#", -3),("solb", -3),("sol", -2),("sol#", -1),("lab", -1),("la", 0),("la#", 1),("sib", 1),("si", 2)]
# Si on veut do4 => 12 - 9 = 3
# On récupère un liste contenant un tuple qui correspond a la note passée en paramètre. Note non trouvée -> []
res = [item for item in listNote if note in item]
n = 0
if res:
# n : nombre de demi-ton de décalage avec La3
n = ((num - numLa3) * 12) + res[0][1]
else:
print("La note n'est pas dans la liste")
return (a**n) * fLa3
def player(song):
for x in song[1]:
f = int(getFrequence(x[0],x[1]))
tempo = int(song[0] * x[2])
winsound.Beep(f, tempo)
player(lettreAElise())
|
mit
| 4,545,261,846,817,819,600 | 26 | 212 | 0.566449 | false |
mytliulei/boundless
|
docker/dockerfile/ScanSmb/discoverFile/DiscoverFile.py
|
1
|
10972
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
# Copyright 2008-2016 Hangzhou DPtech Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Discover the latest version file by scanning given paths on smb server
"""
import sys
import os,os.path
import re
import shutil
import socket
from argparse import ArgumentParser
import time
import logging,logging.handlers
from nmb.NetBIOS import NetBIOS
from smb.SMBConnection import SMBConnection
import redis
class ScanSmbPath(object):
"""
"""
def __init__(self, smbcon, paths, file_pattern, *args, **kwargs):
"""
"""
self.smb_con = smbcon
self.paths = paths
self.file_re = re.compile(file_pattern)
self.new_file = {}
self.new_ctime = {}
self.new_fsize = {}
self.logger = logging.getLogger("scansmb")
# self.write_uid = pwd.getpwnam("ftpuser").pw_uid
# self.write_gid = pwd.getpwnam("ftpuser").pw_gid
def walk_path(self,server_name,path,*args):
"""
"""
rel_path = "/"+path
dirs , files, c_times,f_sizes = [], [], [], []
try:
names = self.smb_con.listPath(server_name,rel_path)
except Exception,ex:
sys.stdout.write('[%s] listPath error %s' % (time.ctime(),ex))
sys.stdout.write('\n')
self.logger.critical("listPath error %s" % ex)
sys.exit(1)
for name in names:
if name.isDirectory:
if name.filename not in [u'.', u'..']:
dirs.append(name.filename)
else:
if self.file_re.search(name.filename):
files.append(name.filename)
c_times.append(name.create_time)
f_sizes.append(name.file_size)
ret_path = os.path.join(u"/"+server_name,path)
yield ret_path,files,c_times,f_sizes
for name in dirs:
new_path = os.path.join(path, name)
for x in self.walk_path(server_name, new_path):
yield x
def find_file(self,file_filter=None):
"""
"""
if file_filter:
self.file_re = re.compile(file_filter)
for xpath in self.paths:
ipath = xpath.strip("/")
ipath_list = ipath.split("/",1)
iserver_name = ipath_list[0]
iserver_path = ipath_list[1]
for (w_path,w_files,w_ctimes,w_fsizes) in self.walk_path(iserver_name,iserver_path):
self.set_new_file(xpath,w_path,w_files,w_ctimes,w_fsizes)
def set_new_file(self,path,r_path,files,ctimes,fsizes):
"""
"""
if path not in self.new_ctime.keys():
base_ctime = 0
else:
base_ctime = self.new_ctime[path]
base_file = None
base_fsize = None
for (ifile,ictime,fsize) in zip(files,ctimes,fsizes):
if ictime > base_ctime:
base_ctime = ictime
base_file = ifile
base_fsize = fsize
if base_file:
self.new_file[path] = os.path.join(r_path,base_file)
self.new_ctime[path] = base_ctime
self.new_fsize[path] = base_fsize
def get_new_file(self,path):
"""
"""
if path not in self.new_file.keys():
return None
return (self.new_file[path],self.new_fsize[path])
def retrieve_file(self,src_path,to_path,n_fsize):
"""
"""
if src_path in self.new_file.keys():
new_file_path = self.new_file[src_path]
filename = os.path.split(new_file_path)[1]
r_to_path = os.path.realpath(to_path)
to_file = os.path.join(r_to_path,filename)
if os.path.exists(to_file):
tftp_fsize = os.path.getsize(to_file)
if n_fsize == tftp_fsize:
sys.stdout.write('file %s exists, not download to overwrite' % filename)
sys.stdout.write('\n')
self.logger.info("file %s exists, not download to overwrite" % filename)
return 0
else:
pass
ipath = new_file_path.strip("/")
ipath_list = ipath.split("/",1)
iserver_name = ipath_list[0]
iserver_path = "/"+ipath_list[1]
try:
with open(to_file,"wb") as fobj:
sys.stdout.write('[%s] download file %s ...' % (time.ctime(),filename))
sys.stdout.write('\n')
self.logger.info("download file %s ..." % filename)
self.smb_con.retrieveFile(iserver_name,iserver_path,fobj,timeout=180)
# os.chown(to_file, self.write_uid, self.write_gid)
except Exception,ex:
if os.path.exists(to_file):
os.remove(to_file)
self.logger.error("download file error,remove it ...")
self.logger.error(ex)
return 2
else:
return 1
return 0
def get_sub_path(redis_con,scankey):
"""
"""
redispipe = redis_con.pipeline()
redispipe.smembers(scankey)
ret = redispipe.execute()
if not ret[0]:
logger = logging.getLogger("scansmb")
sys.stdout.write('key %s has no member' % scankey)
logger.error("key %s has no member" % scankey)
return None
for pkey in ret[0]:
redispipe.smembers(pkey)
pret = redispipe.execute()
pdict = {}
for (k,p) in zip(ret[0],pret):
pdict[k] = [idec.decode("utf-8") for idec in p]
# pdict[k] = list(p)
return pdict
def update_key(redis_con,scankey):
"""
"""
redispipe = redis_con.pipeline()
redispipe.smembers(scankey)
ret = redispipe.execute()
if not ret[0]:
logger = logging.getLogger("scansmb")
sys.stdout.write('key %s has no member' % scankey)
logger.error("key %s has no member" % scankey)
return -1
# update smb server in scankey
smbkey_list = list(ret[0])
for ikey in smbkey_list:
redispipe.exists(ikey)
ret = redispipe.execute()
r_smbkey_list = []
for (ikey,val) in zip(smbkey_list,ret):
if not val:
redispipe.srem(scankey,ikey)
else:
r_smbkey_list.append(ikey)
ret = redispipe.execute()
# update path in smb+path key
for pkey in r_smbkey_list:
redispipe.smembers(pkey)
ret = redispipe.execute()
for (sname,key_set) in zip(r_smbkey_list,ret):
for ikey in key_set:
redispipe.exists(sname+ikey)
rfret = redispipe.execute()
for (jkey,val) in zip(key_set,rfret):
if not val:
redispipe.srem(sname,jkey)
ret = redispipe.execute()
return 0
def pulish_update_msg(redis_con,pub_key,smb_name,path,tftp_server_ip,filename):
"""
"""
redispipe = redis_con.pipeline()
broadcast_key = "%s_%s_%s" % (pub_key, smb_name, path)
broadcast_str = "%s||||%s" % (filename,tftp_server_ip)
redispipe.set(broadcast_key,filename)
redispipe.expire(broadcast_key,60*60*24*5)
redispipe.publish(broadcast_key,broadcast_str)
ret = redispipe.execute()
print('[%s] publish message on channel:' % time.ctime())
print(broadcast_str, broadcast_key)
logger = logging.getLogger("scansmb")
logger.info("publish message on channel:")
logger.info(broadcast_str)
logger.info(broadcast_key)
# sys.stdout.write('publish message %s on channel %s' % (broadcast_str, broadcast_key))
# sys.stdout.write('\n')
def main():
"""
"""
p = ArgumentParser(description='Discover file on smb server')
p.add_argument('--scankey', "-s", default="DP_VERSIONUPDATE_SCANPAHT", help='the key of smb server paths on redis server')
p.add_argument('--pubkey', "-k", default="DP_VERSIONUPDATE_PUB", help='the key of publish new version file on redis server')
p.add_argument('--redisip', '-i', default="10.18.142.48", help='redis server ip address')
p.add_argument('--filepattren', '-f', default="\\.bin$", help='file filter regexp express')
p.add_argument('--redisport', '-p', type=int, default=6379, help='redis server port')
p.add_argument('--tftppath', '-t', default="/home/ftpusers/tftp", help='tftp server root path')
p.add_argument('--tftpip', '-a', default="10.18.142.48", help='tftp server ip address')
args = p.parse_args()
logger = logging.getLogger()
log_Handle = logging.handlers.RotatingFileHandler("/home/ftpusers/scansmb.log",maxBytes=1024*1024,backupCount=5)
log_format=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(logging.INFO)
log_Handle.setFormatter(log_format)
logger.addHandler(log_Handle)
try:
RedisPool = redis.ConnectionPool(host=args.redisip,port=args.redisport,db=0)
redis_con = redis.Redis(connection_pool=RedisPool)
except Exception,ex:
sys.stdout.write('%s' % ex)
sys.stdout.write('\n')
return 101
# del expire keys from redis server
update_code = update_key(redis_con,args.scankey)
if update_code < 0:
return 201
# get scan path from redis server
smb_path = get_sub_path(redis_con,args.scankey)
if smb_path is None:
return 201
# smb_path = {"192.168.2.30@dp:dpdp":[u"/产品版本/BSW/BSWV100R003/神州二号"],}
print("[%s] scanning smb server path %s" % (time.ctime(),smb_path))
logger.info("scanning smb server path %s" % smb_path)
client_name = socket.gethostname()
for ismb in smb_path.keys():
ismb_ip = ismb.split("@")[0]
userpasw = ismb.split("@")[1].split(":")
bios = NetBIOS()
srv_name = bios.queryIPForName(ismb_ip)
bios.close()
smb_con = SMBConnection(userpasw[0],userpasw[1],client_name,srv_name[0])
smb_con.connect(ismb_ip)
scansmb = ScanSmbPath(smb_con,smb_path[ismb],args.filepattren)
scansmb.find_file()
for ipath in smb_path[ismb]:
(n_file,n_fsize) = scansmb.get_new_file(ipath)
filename = os.path.split(n_file)[1]
ret = scansmb.retrieve_file(ipath,args.tftppath,n_fsize)
if ret == 0:
pulish_update_msg(redis_con,args.pubkey,ismb,ipath,args.tftpip,filename)
smb_con.close()
if __name__ == '__main__':
main()
|
apache-2.0
| -8,177,271,517,234,883,000 | 36.910035 | 128 | 0.58306 | false |
d4rkforce/clewareADC
|
setup.py
|
1
|
1077
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup( name='clewareADC',
version='0.1',
author='Johannes Koch',
author_email='[email protected]',
description='Python module to read values from Cleware USB-ADC 2',
long_description=read('README.rst'),
license='MIT',
keywords=['cleware', 'adc', 'hidapi'],
classifiers=[ 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Home Automation',
'Topic :: Scientific/Engineering' ],
url='https://github.com/d4rkforce/clewareADC',
download_url='https://github.com/d4rkforce/clewareADC/tarball/0.1',
packages=['clewareADC'],
install_requires=['hidapi'],
)
|
mit
| -867,214,814,206,403,600 | 36.137931 | 74 | 0.579387 | false |
heromod/migrid
|
mig/cgi-sid/ps3live.py
|
1
|
8641
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# ps3live - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
# Martin Rehr 27/03/2007
import cgi
import cgitb
cgitb.enable()
import os
import tempfile
from shared.cgishared import init_cgiscript_possibly_with_cert, \
cgiscript_header
from shared.defaults import default_vgrid
from shared.fileio import make_symlink
from shared.resource import create_resource_home
from shared.sandbox import get_resource_name
from shared.resadm import get_frontend_script, get_master_node_script
from shared.resadm import fill_frontend_script, \
fill_master_node_script, get_resource_exe
import shared.confparser as confparser
def create_ps3_resource(sandboxkey):
resource_name = 'ps3live'
mig_user = 'mig'
hosturl = 'ps3live'
resource_home = '/opt/mig/data/MiG/mig_frontend/'
script_language = 'sh'
ssh_port = -1
memory = 128
# disk = 0.064
disk = 0
cpucount = 1
sandbox = True
arch = 'PS3'
nodecount = 1
hostkey = 'N/A'
frontend_node = 'localhost'
frontend_log = '/dev/null'
if debug:
frontend_log = '/opt/mig/data/MiG/mig_frontend/frontendlog'
exe_name = 'localhost'
exe_nodecount = 1
exe_cputime = 100000
exe_execution_precondition = '""'
exe_prepend_execute = '""'
exe_exehostlog = '/dev/null'
if debug:
exe_exehostlog = '/opt/mig/data/MiG/mig_exe/exechostlog'
exe_joblog = '/dev/null'
if debug:
exe_joblog = '/opt/mig/data/MiG/mig_exe/joblog'
exe_execution_user = 'mig'
exe_execution_node = 'localhost'
exe_execution_dir = '/opt/mig/data/MiG/mig_exe/'
exe_start_command = \
'cd /opt/mig/data/MiG/mig_exe/; chmod 700 master_node_script_ps3.sh; ./master_node_script_ps3.sh'
exe_status_command = 'N/A'
exe_stop_command = 'kill -9 -$mig_exe_pgid'
exe_clean_command = 'N/A'
exe_continuous = False
exe_shared_fs = True
exe_vgrid = default_vgrid
result = create_resource_home(configuration, sandboxkey, resource_name)
if not result[0]:
o.out(result[1])
cgiscript_header()
o.reply_and_exit(o.ERROR)
resource_identifier = result[1]
unique_resource_name = resource_name + '.'\
+ str(resource_identifier)
# create a resource configuration string that we can write to a file
res_conf_string = \
""" \
::MIGUSER::
%s
::HOSTURL::
%s
::HOSTIDENTIFIER::
%s
::RESOURCEHOME::
%s
::SCRIPTLANGUAGE::
%s
::SSHPORT::
%s
::MEMORY::
%s
::DISK::
%s
::CPUCOUNT::
%s
::SANDBOX::
%s
::SANDBOXKEY::
%s
::ARCHITECTURE::
%s
::NODECOUNT::
%s
::RUNTIMEENVIRONMENT::
::HOSTKEY::
%s
::FRONTENDNODE::
%s
::FRONTENDLOG::
%s
::EXECONFIG::
name=%s
nodecount=%s
cputime=%s
execution_precondition=%s
prepend_execute=%s
exehostlog=%s
joblog=%s
execution_user=%s
execution_node=%s
execution_dir=%s
start_command=%s
status_command=%s
stop_command=%s
clean_command=%s
continuous=%s
shared_fs=%s
vgrid=%s"""\
% (
mig_user,
hosturl,
resource_identifier,
resource_home,
script_language,
str(ssh_port),
str(memory),
str(disk),
str(cpucount),
str(sandbox),
sandboxkey,
arch,
str(nodecount),
hostkey,
frontend_node,
frontend_log,
exe_name,
str(exe_nodecount),
str(exe_cputime),
exe_execution_precondition,
exe_prepend_execute,
exe_exehostlog,
exe_joblog,
exe_execution_user,
exe_execution_node,
exe_execution_dir,
exe_start_command,
exe_status_command,
exe_stop_command,
exe_clean_command,
str(exe_continuous),
str(exe_shared_fs),
exe_vgrid,
)
# write the conf string to a conf file
conf_file_src = os.path.join(configuration.resource_home,
unique_resource_name, 'config.MiG')
try:
fd = open(conf_file_src, 'w')
fd.write(res_conf_string)
fd.close()
except Exception, e:
o.out(e)
o.reply_and_exit(o.ERROR)
# parse and pickle the conf file
(status, msg) = confparser.run(conf_file_src, resource_name + '.'
+ str(resource_identifier))
if not status:
o.out(msg, conf_file_src)
o.reply_and_exit(o.ERROR)
# Create PGID file in resource_home, this is needed for timeout/kill of jobs
exe_pgid_file = os.path.join(configuration.resource_home,
unique_resource_name,
'EXE_%s.PGID' % exe_name)
try:
fd = open(exe_pgid_file, 'w')
fd.write('stopped')
fd.close()
except Exception, e:
o.out(e)
o.reply_and_exit(o.ERROR)
return resource_name + '.' + str(resource_identifier)
def get_ps3_resource():
log_msg = 'ps3live'
# Identify sandboxkey
sandboxkey = fieldstorage.getfirst('sandboxkey', None)
if not sandboxkey:
# No sandboxkey provided,
log_msg = log_msg + ', Remote IP: %s, provided no sandboxkey.'\
% os.getenv('REMOTE_ADDR')
return (False, log_msg)
if not os.path.exists(configuration.sandbox_home + sandboxkey):
# Create resource
unique_resource_name = create_ps3_resource(sandboxkey)
log_msg = log_msg + ' Created resource: %s'\
% unique_resource_name
# Make symbolic link from
# sandbox_home/sandboxkey to resource_home/resource_name
sandbox_link = configuration.sandbox_home + sandboxkey
resource_path = os.path.abspath(os.path.join(configuration.resource_home,
unique_resource_name))
make_symlink(resource_path, sandbox_link, logger)
else:
(status, unique_resource_name) = get_resource_name(sandboxkey,
logger)
if not status:
return (False, unique_resource_name)
# If resource has a jobrequest pending, remove it.
job_pending_file = os.path.join(configuration.resource_home,
unique_resource_name,
'jobrequest_pending.ps3')
if os.path.exists(job_pending_file):
os.remove(job_pending_file)
log_msg = log_msg + ', Remote IP: %s, Key: %s'\
% (os.getenv('REMOTE_ADDR'), sandboxkey)
o.internal('''
%s
''' % log_msg)
return (True, unique_resource_name)
# ## Main ###
# Get Quirystring object
fieldstorage = cgi.FieldStorage()
(logger, configuration, client_id, o) = \
init_cgiscript_possibly_with_cert()
# Check we are using GET method
if os.getenv('REQUEST_METHOD') != 'GET':
# Request method is not GET
cgiscript_header()
o.out('You must use HTTP GET!')
o.reply_and_exit(o.ERROR)
# Make sure that we're called with HTTPS.
if str(os.getenv('HTTPS')) != 'on':
o.out('Please use HTTPS with session id for authenticating job requests!'
)
cgiscript_header()
o.reply_and_exit(o.ERROR)
action = fieldstorage.getfirst('action', None)
debug = fieldstorage.getfirst('debug', None)
if action == 'get_frontend_script':
(status, msg) = get_ps3_resource()
if status:
(status, msg) = get_frontend_script(msg, logger)
elif action == 'get_master_node_script':
(status, msg) = get_ps3_resource()
if status:
(status, msg) = get_master_node_script(msg, 'localhost', logger)
elif action == 'get_resourcename':
(status, msg) = get_ps3_resource()
else:
status = False
msg = 'Unknown action: %s' % action
# Get a resource for the connection client.
o.out(msg)
if status:
o.reply_and_exit(o.OK)
else:
o.reply_and_exit(o.ERROR)
|
gpl-2.0
| 6,965,660,446,324,680,000 | 23.340845 | 105 | 0.619026 | false |
chrislit/abydos
|
abydos/distance/_size.py
|
1
|
4498
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._size_difference.
Penrose's size difference
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Size']
class Size(_TokenDistance):
r"""Penrose's size difference.
For two sets X and Y and a population N, the Penrose's size difference
:cite:`Penrose:1952` is
.. math::
sim_{Size}(X, Y) =
\frac{(|X \triangle Y|)^2}{|N|^2}
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{Size} =
\frac{(b+c)^2}{n^2}
In :cite:`IBM:2017`, the formula is instead :math:`\frac{(b-c)^2}{n^2}`,
but it is clear from :cite:`Penrose:1952` that this should not be an
assymmetric value with respect two the ordering of the two sets. Meanwhile,
:cite:`Deza:2016` gives a formula that is equivalent to
:math:`\sqrt{n}\cdot(b+c)`.
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize Size instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(Size, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def dist(self, src: str, tar: str) -> float:
"""Return the Penrose's size difference of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Size difference
Examples
--------
>>> cmp = Size()
>>> cmp.sim('cat', 'hat')
0.9999739691795085
>>> cmp.sim('Niall', 'Neil')
0.9999202806122449
>>> cmp.sim('aluminum', 'Catalan')
0.9996348736257049
>>> cmp.sim('ATCG', 'TAGC')
0.9998373073719283
.. versionadded:: 0.4.0
"""
if src == tar:
return 0.0
self._tokenize(src, tar)
return (
self._symmetric_difference_card()
) ** 2 / self._population_unique_card() ** 2
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gpl-3.0
| 514,981,144,175,944,400 | 28.592105 | 79 | 0.586261 | false |
msfstef/TicTacToe
|
tictactoe.py
|
1
|
12093
|
import sys, copy, random
import pygame
class Board():
def __init__(self, side, UI = True, level = "undetermined"):
self._end_game = False
#pygame.mixer.pre_init(44100, -16, 1, 512)
#pygame.mixer.init(44100, -16, 1, 512)
pygame.init()
self._font = pygame.font.SysFont("arial", 40, bold=True)
self._move_sound = pygame.mixer.Sound("./sounds/move.wav")
self._win_sound = pygame.mixer.Sound("./sounds/win.wav")
self._win_sound.set_volume(0.3)
self._lose_sound = pygame.mixer.Sound("./sounds/lose.wav")
self._lose_sound.set_volume(0.4)
self._tie_sound = pygame.mixer.Sound("./sounds/tie.wav")
self._side = side
self._size = self._side, self._side
self._screen = pygame.display.set_mode(self._size)
self._win_screen = pygame.Surface(self._size)
self._win_screen.fill((128, 128, 128))
self._win_screen.set_alpha(170)
self._play_again = self._font.render(" Play Again ", 1, (0,0,0), (192,192,192))
self._play_again_rect = self._play_again.get_rect()
self._play_again_rect.centerx = 300
self._play_again_rect.centery = 200
self._diff = self._font.render(" Choose Difficulty ", 1, (0,0,0), (192,192,192))
self._diff_rect = self._diff.get_rect()
self._diff_rect.centerx = 300
self._diff_rect.centery = 300
self._quit = self._font.render(" Quit ", 1, (0,0,0), (192,192,192))
self._quit_rect = self._quit.get_rect()
self._quit_rect.centerx = 300
self._quit_rect.centery = 400
self._level = level
if self._level == "undetermined" and UI:
self._choose_diff = pygame.Surface(self._size)
self._choose_diff.fill((128, 128, 128))
self._choose_diff.set_alpha(170)
self._easy = self._font.render(" Easy ", 1, (0,0,0), (192,192,192))
self._easy_rect = self._easy.get_rect()
self._easy_rect.centerx = 300
self._easy_rect.centery = 120
self._intermediate = self._font.render(" Intermediate ", 1, (0,0,0), (192,192,192))
self._intermediate_rect = self._intermediate.get_rect()
self._intermediate_rect.centerx = 300
self._intermediate_rect.centery = 240
self._advanced = self._font.render(" Advanced ", 1, (0,0,0), (192,192,192))
self._advanced_rect = self._advanced.get_rect()
self._advanced_rect.centerx = 300
self._advanced_rect.centery = 360
self._expert = self._font.render(" Expert ", 1, (0,0,0), (192,192,192))
self._expert_rect = self._expert.get_rect()
self._expert_rect.centerx = 300
self._expert_rect.centery = 480
self._scale = side/3
pos1, pos2 = self._scale, 2*self._scale
self._positions = [(0,0), (pos1,0), (pos2,0),
(0,pos1), (pos1,pos1), (pos2,pos1),
(0,pos2), (pos1,pos2), (pos2,pos2)]
self._win_cons=[[(0,0),(0,pos1),(0,pos2)],
[(pos1,0),(pos1,pos1),(pos1,pos2)],
[(pos2,0),(pos2,pos1),(pos2,pos2)],
[(0,0),(pos1,0),(pos2,0)],
[(0,pos1),(pos1,pos1),(pos2,pos1)],
[(0,pos2),(pos1,pos2),(pos2,pos2)],
[(0,0),(pos1,pos1),(pos2,pos2)],
[(pos2,0),(pos1,pos1),(0,pos2)]]
self._win_type = "tie"
self._end_decision = "undecided"
self._cell = pygame.image.load("./images/cell.bmp").convert()
self._cross = pygame.image.load("./images/cross.bmp").convert()
self._circle = pygame.image.load("./images/circle.bmp").convert()
self._red_cross = pygame.image.load("./images/red_cross.bmp").convert()
self._red_circle = pygame.image.load("./images/red_circle.bmp").convert()
for pos in self._positions:
self._screen.blit(self._cell, pos)
if level == "undetermined" and UI:
self._screen.blit(self._choose_diff, (0,0))
self._screen.blit(self._easy, self._easy_rect)
self._screen.blit(self._intermediate, self._intermediate_rect)
self._screen.blit(self._advanced, self._advanced_rect)
self._screen.blit(self._expert, self._expert_rect)
if UI:
pygame.display.flip()
self._UI = UI
self._occupied = []
self._circles = []
self._crosses = []
def draw_empty(self):
for pos in self._positions:
self._screen.blit(self._cell, pos)
pygame.display.flip()
def get_pos(self, pos_no):
if pos_no < 1 or pos_no > 9:
raise Exception("Must be an integer between 1 and 9.")
return self._positions[pos_no - 1]
def get_pos_no(self, pos):
return self._positions.index(pos) + 1
def add_move(self, pos_no, player_type):
pos = self.get_pos(pos_no)
self._occupied.append(pos)
if player_type == "x":
if self._UI: self._move_sound.play()
self._crosses.append(pos)
elif player_type == "o":
self._circles.append(pos)
def check_win_con(self):
for win_con in self._win_cons:
if set(win_con).issubset(set(self._circles)):
self._win_type = "o"
self._win_pos = win_con
self._end_game = True
self._win_screen.fill((204, 0, 0))
if set(win_con).issubset(set(self._crosses)):
self._win_type = "x"
self._win_pos = win_con
self._end_game = True
self._win_screen.fill((0, 204, 0))
if len(self._occupied) == 9:
self._end_game = True
def draw_board_state(self):
for circ_pos in self._circles:
self._screen.blit(self._circle, circ_pos)
for cross_pos in self._crosses:
self._screen.blit(self._cross, cross_pos)
self.check_win_con()
if self._end_game:
if self._win_type == "x":
for pos in self._win_pos:
self._screen.blit(self._red_cross, pos)
self._win_sound.play()
elif self._win_type == "o":
for pos in self._win_pos:
self._screen.blit(self._red_circle, pos)
self._lose_sound.play()
elif self._win_type == "tie":
self._tie_sound.play()
self._screen.blit(self._win_screen, (0,0))
self._screen.blit(self._play_again, self._play_again_rect)
self._screen.blit(self._diff, self._diff_rect)
self._screen.blit(self._quit, self._quit_rect)
pygame.display.flip()
def is_occupied(self, pos_no):
for oc_pos in self._occupied:
if self.get_pos_no(oc_pos) == pos_no:
return True
return False
def get_mouse_pos(self, mouse_pos):
for pos in self._positions:
if (pos[0] <= mouse_pos[0] <= pos[0] + self._scale and
pos[1] <= mouse_pos[1] <= pos[1] + self._scale):
pos_clicked = pos
pos_clicked_no = self.get_pos_no(pos_clicked)
if not self.is_occupied(pos_clicked_no):
return pos_clicked_no
else:
return False
def get_rand_play(self):
pos = (random.uniform(0, self._side), random.uniform(0, self._side))
pos_no = self.get_mouse_pos(pos)
loop = True
while loop:
randomize = False
if self.is_occupied(pos_no) or not pos_no:
randomize = True
if randomize:
pos = (random.uniform(0, self._side), random.uniform(0, self._side)))
pos_no = self.get_mouse_pos(pos)
else:
loop = False
return pos_no
def get_good_play(self):
occ_pos = copy.deepcopy(self._occupied)
circles = copy.deepcopy(self._circles)
crosses = copy.deepcopy(self._crosses)
if self._level == "advanced" or self._level == "expert":
if len(self._crosses) == 1:
if self._crosses[0] == (200, 200):
return (0,0)
else:
return (200,200)
possible_pos = list(set(self._positions) - set(occ_pos))
for pos in possible_pos:
occ_pos = copy.deepcopy(self._occupied)
circles = copy.deepcopy(self._circles)
crosses = copy.deepcopy(self._crosses)
occ_pos.append(pos)
circles.append(pos)
test_board = Board(600, False)
test_board._occupied = occ_pos
test_board._crosses = crosses
test_board._circles = circles
test_board.check_win_con()
if (test_board._end_game and
test_board._win_type == "o"):
return circles[-1]
for pos in possible_pos:
occ_pos = copy.deepcopy(self._occupied)
circles = copy.deepcopy(self._circles)
crosses = copy.deepcopy(self._crosses)
occ_pos.append(pos)
circles.append(pos)
possible_pos_x = list(set(self._positions) - set(occ_pos))
for pos_x in possible_pos_x:
occ_pos_x = copy.deepcopy(occ_pos)
crosses_x = copy.deepcopy(crosses)
occ_pos_x.append(pos_x)
crosses_x.append(pos_x)
test_board = Board(600, False)
test_board._occupied = occ_pos_x
test_board._crosses = crosses_x
test_board._circles = circles
test_board.check_win_con()
if (test_board._end_game and
test_board._win_type == "x"):
return crosses_x[-1]
if (test_board._end_game and
test_board._win_type == "tie"):
return circles[-1]
if self._level == "expert":
occ_edges = []
corners = [1,3,7,9]
edges = [2,4,6,8]
for edge in edges:
if self.is_occupied(edge):
occ_edges.append(edge)
if len(self._crosses) == 2:
if (self._crosses[0] != (200,200) and
len(occ_edges) != 0):
if len(occ_edges) == 0:
return self.get_pos(edge[0])
if len(occ_edges) == 1:
if (edges[0] in occ_edges
or edges[3] in occ_edges):
return self.get_pos(edges[1])
else:
return self.get_pos(edges[0])
if len(occ_edges) == 2:
if ((edges[0] in occ_edges and
edges[3] in occ_edges) or
(edges[1] in occ_edges and
edges[2] in occ_edges)):
return self.get_pos(1)
else:
if edges[0] in occ_edges:
return self.get_pos(1)
elif edges[3] in occ_edges:
return self.get_pos(9)
else:
free_edges = list(set(edges) - set(occ_edges))
return self.get_pos(free_edges[0])
if (len(self._crosses) == 3 and
len(occ_edges) == 3 and
self.get_pos_no(self._crosses[0]) in corners):
for corner in corners:
if self.is_occupied(corner):
occ_corner = corner
if occ_corner == corners[0]:
return self.get_pos(corners[3])
if occ_corner == corners[3]:
return self.get_pos(corners[0])
if occ_corner == corners[1]:
return self.get_pos(corners[2])
if occ_corner == corners[2]:
return self.get_pos(corners[1])
return self.get_pos(self.get_rand_play())
def get_play(self):
if self._level == "easy":
return self.get_pos(self.get_rand_play())
if (self._level == "intermediate" or
self._level == "advanced" or
self._level == "expert"):
return self.get_good_play()
def tictactoe_game(level):
level = level
choose_diff = False
tictac = Board(600, True, level)
while level == "undetermined":
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
mouse_click = pygame.mouse.get_pos()
if tictac._easy_rect.collidepoint(mouse_click):
tictac._level = "easy"
level = "easy"
tictac.draw_empty()
if tictac._intermediate_rect.collidepoint(mouse_click):
tictac._level = "intermediate"
level = "intermediate"
tictac.draw_empty()
if tictac._advanced_rect.collidepoint(mouse_click):
tictac._level = "advanced"
level = "advanced"
tictac.draw_empty()
if tictac._expert_rect.collidepoint(mouse_click):
tictac._level = "expert"
level = "expert"
tictac.draw_empty()
while not tictac._end_game:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
mouse_click = pygame.mouse.get_pos()
mouse_pos = tictac.get_mouse_pos(mouse_click)
if mouse_pos:
tictac.add_move(mouse_pos, "x")
tictac.draw_board_state()
if not tictac._end_game:
next_pos = tictac.get_play()
next_pos_no = tictac.get_pos_no(next_pos)
tictac.add_move(next_pos_no, "o")
tictac.draw_board_state()
while tictac._end_decision == "undecided":
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
mouse_click = pygame.mouse.get_pos()
if tictac._play_again_rect.collidepoint(mouse_click):
tictac._end_decision = False
if tictac._quit_rect.collidepoint(mouse_click):
tictac._end_decision = True
if tictac._diff_rect.collidepoint(mouse_click):
tictac._end_decision = False
choose_diff = True
return tictac._end_decision, tictac._level, choose_diff
def tictactoe():
level = "undetermined"
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init(44100, -16, 1, 512)
bgmusic = pygame.mixer.Sound("./sounds/bgmusic.wav")
bgmusic.play(-1)
while 1:
exit, lvl, diff = tictactoe_game(level)
level = lvl
if diff:
level = "undetermined"
if exit:
sys.exit()
tictactoe()
|
gpl-2.0
| -3,910,030,076,047,299,000 | 30.574413 | 86 | 0.634334 | false |
raptiq/BHA
|
api/views.py
|
1
|
4731
|
import django_filters
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.models import User
from rest_framework.decorators import detail_route, list_route
from rest_framework import viewsets, views, filters #, status
from .models import Volunteer, Assignment
from .email import process_notification
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser
from .serializers import VolunteerSerializer, UserSerializer, AdminVolunteerSerializer, AdminAssignmentSerializer, AssignmentSerializer
class VolunteerFilter(filters.FilterSet):
language = django_filters.CharFilter(name="languages__language_name")
can_write = django_filters.CharFilter(name="languages__can_written_translate")
first_name = django_filters.CharFilter(name="first_name", lookup_type="icontains")
last_name = django_filters.CharFilter(name="last_name", lookup_type="icontains")
class Meta:
model = Volunteer
fields = ('first_name', 'last_name', 'language', 'can_write', 'volunteer_level')
class AssignmentFilter(filters.FilterSet):
unassigned = django_filters.MethodFilter()
name = django_filters.CharFilter(name='name', lookup_type='icontains')
class Meta:
model = Assignment
fields = ('name', 'type', 'status', 'language_name', 'unassigned')
def filter_unassigned(self, queryset, value):
if value:
return queryset.filter(volunteers=None).distinct()
return queryset.distinct()
class NotificationView(views.APIView):
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
subject = request.data.get("subject", "No subject")
message = request.data.get("message", "No message")
emailList = request.data.get("emails", [{"id":1,"email":"[email protected]"},])
textList = request.data.get("texts", [])
process_notification(subject, message, emailList, textList)
return Response({"success": True})
class VolunteerViewSet(viewsets.ModelViewSet):
queryset = Volunteer.objects.all().distinct()
#filter_backends = (filters.DjangoFilterBackend,)
#filter_class = VolunteerFilter
@list_route(permission_classes=[IsAuthenticated])
def me(self, request, *args, **kwargs):
volunteer = get_object_or_404(Volunteer, user_id=request.user.id)
serializer = self.get_serializer(volunteer, context={'request': request})
return Response(serializer.data)
@detail_route(methods=['get'])
def assignments(self, request, *args, **kwargs):
volunteer = get_object_or_404(Volunteer, id=int(kwargs['pk']))
assignments = Assignment.objects.filter(volunteers=volunteer)
serializer = AssignmentSerializer(assignments, context={'request': request}, many=True)
return Response(serializer.data)
def get_permissions(self):
# allow non-authenticated user to create via POST
return (AllowAny() if self.request.method == 'POST'
else IsAuthenticated()),
def get_serializer_class(self):
if (self.request.user.is_superuser):
return AdminVolunteerSerializer
return VolunteerSerializer
class AssignmentViewSet(viewsets.ModelViewSet):
filter_backends = (filters.DjangoFilterBackend,)
filter_class = AssignmentFilter
def get_queryset(self):
me = get_object_or_404(Volunteer, user_id=self.request.user.id)
# If volunteers are verified but not trained, only return training assignments
if not self.request.user.is_superuser and me.volunteer_level < 2:
return Assignment.objects.filter(type=2)
else:
return Assignment.objects.all()
def get_permissions(self):
return (IsAuthenticated()),
def get_serializer_class(self):
if (self.request.user.is_superuser):
return AdminAssignmentSerializer
return AssignmentSerializer
@detail_route(methods=['post'])
def add_volunteer(self, request, pk=None):
assignment = get_object_or_404(Assignment, id=pk)
volunteer = get_object_or_404(Volunteer, id=request.data['volunteer_id'])
assignment.volunteers.add(volunteer)
assignment.save()
return Response({'success': 'volunteer added to assignment'})
@detail_route(methods=['post'])
def remove_volunteer(self, request, pk=None):
assignment = get_object_or_404(Assignment, id=pk)
volunteer = get_object_or_404(Volunteer, id=request.data['volunteer_id'])
assignment.volunteers.remove(volunteer)
assignment.save()
return Response({'success': 'volunteer removed from assignment'})
|
mit
| 1,803,269,212,378,535,200 | 41.621622 | 135 | 0.69795 | false |
dacb/viscount
|
viscount/api/__init__.py
|
1
|
1855
|
"""
viscount.api
provides the framework for the REST api
"""
from functools import wraps
from flask import jsonify
from flask.ext.security.decorators import login_required, roles_required
from ..core import ViscountException, ViscountFormException
from .datatables import DataTablesException, handle_DataTablesException
from .cytoscape import CytoscapeException, handle_CytoscapeException
from ..utils import JSONEncoder
from .. import factory
def create_app(config_override=None, register_security_blueprint=False):
"""Returns the Viscount REST API application instance"""
app = factory.create_app(__name__, __path__, config_override, register_security_blueprint=register_security_blueprint)
# Set the default JSON encoder
app.json_encoder = JSONEncoder
# Register custom error handlers
app.errorhandler(ViscountException)(handle_ViscountException)
app.errorhandler(ViscountFormException)(handle_ViscountFormException)
app.errorhandler(DataTablesException)(handle_DataTablesException)
app.errorhandler(CytoscapeException)(handle_CytoscapeException)
app.errorhandler(404)(handle_404)
app.errorhandler(500)(handle_500)
return app
# define all routes, enforce valid login
def route(bp, *args, **kwargs):
kwargs.setdefault('strict_slashes', False)
def decorator(f):
@bp.route(*args, **kwargs)
@login_required
@wraps(f)
def wrapper(*args, **kwargs):
sc = 200
rv = f(*args, **kwargs)
if isinstance(rv, tuple):
sc = rv[1]
rv = rv[0]
return jsonify(rv), sc
return f
return decorator
# error handlers
def handle_ViscountException(e):
return jsonify(dict(error=e.message)), 400
def handle_ViscountFormException(e):
return jsonify(dict(errors=e.errors)), 400
def handle_404(e):
return jsonify(dict(error='Not found')), 404
def handle_500(e):
return jsonify(dict(error='Internal server error')), 500
|
bsd-2-clause
| 54,382,150,313,527,970 | 26.686567 | 119 | 0.766038 | false |
amolsharma99/stockfighter
|
level3.py
|
1
|
2784
|
'''
Strategy -
observed that range is increasing with time for both buy and sell
buy everything at start, hold for 5-10 mins and sell it later...upto 5-10$ profit possible per share.
enable sell later only when scale increases...doing this twice in game is sufficient to build $10,000 profit.
not really possible to know how many you actually buy since buy can be open for really long time. have to work with heuristics.
money is not really a factor
1000 stock on either side is more important.
'''
import requests
import json
import math
venue = "FLWEX"
stock = "EAVI"
account = 'TAY60073625'
buy_target = 7400 #heuristically decided based on value around which stock value is fluctuating.
units_in_hand = 0
max_units = 1000 #not allowed to go more than 1000 on either side.
batch_size = 200
#qty, direction, price for order will be set later.
order = {
"account": account,
"venue": venue,
"stock": stock,
"orderType": "limit"
}
hdr = {'X-Starfighter-Authorization' : '4a92bf2f7714296cad41c09b6de8235fc21e9529'}
def get_orderbook():
url = 'https://api.stockfighter.io/ob/api/venues/'+venue+'/stocks/'+stock
return requests.get(url).json()
def get_info_for_key(r, key):
#return avg asks/bids price and sum_qty
keyInfo = r.get(key)
sum_price = 0
sum_qty = 0
if keyInfo != None:
for info in keyInfo:
sum_price += info['price'] * info['qty']
sum_qty += info['qty']
return (float(sum_price)/float(sum_qty), sum_qty)
else:
return (-1, 0)
def stock_order(order, qty, direction, price):
order['qty'] = qty
order['direction'] = direction
order['price'] = price
return requests.post('https://api.stockfighter.io/ob/api/venues/'+venue+'/stocks/'+stock+'/orders',
data = json.dumps(order), headers = hdr)
#Tasks end by itself when profit exceeds $ 10,000
while True:
orderbook = get_orderbook()
#for buying
ask_price, ask_qty = get_info_for_key(orderbook, 'asks')
ask_price = int(math.ceil(ask_price))
print "asks_price: ", ask_price, "money: ", money, "units_in_hand: ", units_in_hand
if ask_price <= buy_target and ask_price != -1:
units = min(batch_size, max_units - (batch_size+units_in_hand) )
#since we are not allowed to go 1000 shares on either side.
if units > 0:
r = stock_order(order, units, 'buy', ask_price)
print "buying ", r.text
units_in_hand += units
#for selling
bids_price, bids_qty = get_info_for_key(orderbook, 'bids')
bids_price = int(math.floor(bids_price))
print "bids_price: ", bids_price
if bids_price > buy_target and bids_price != -1:
if(units_in_hand > 0):
units = units_in_hand
if(units_in_hand < batch_size):
units = units_in_hand
else:
units = batch_size
r = stock_order(order, units, 'sell', bids_price)
units_in_hand -= units
print "selling ", r.text
|
mit
| 2,661,061,566,017,036,300 | 30.647727 | 127 | 0.691451 | false |
simsong/grr-insider
|
lib/rdfvalues/volatility_types.py
|
2
|
2141
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""RDFValues used to communicate with the memory analysis framework."""
from grr.lib import rdfvalue
from grr.proto import jobs_pb2
class VolatilityRequest(rdfvalue.RDFProtoStruct):
"""A request to the volatility subsystem on the client."""
protobuf = jobs_pb2.VolatilityRequest
class MemoryInformation(rdfvalue.RDFProtoStruct):
"""Information about the client's memory geometry."""
protobuf = jobs_pb2.MemoryInformation
# The following define the data returned by Volatility plugins in a structured
# way. Volatility plugins typically produce tables, these are modeled using the
# following types.
# A Volatility plugin will produce a list of sections, each section refers to a
# different entity (e.g. information about about a different PID). Each section
# is then split into a list of tables. Tables in turn consist of a header (which
# represent the list of column names), and rows. Each row consists of a list of
# values.
class VolatilityFormattedValue(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.VolatilityFormattedValue
class VolatilityFormattedValues(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.VolatilityFormattedValues
class VolatilityValue(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.VolatilityValue
def GetValue(self):
if self.HasField("svalue"):
return self.svalue
elif self.HasField("value"):
return self.value
else:
raise RuntimeError("VolatilityValue without data.")
class VolatilityValues(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.VolatilityValues
class VolatilityTable(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.VolatilityTable
class VolatilityHeader(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.VolatilityHeader
class VolatilitySection(rdfvalue.RDFProtoStruct):
"""A Volatility response returns multiple sections.
Each section typically covers a single object (e.g. a PID).
"""
protobuf = jobs_pb2.VolatilitySection
class VolatilityResult(rdfvalue.RDFProtoStruct):
"""The result of running a plugin."""
protobuf = jobs_pb2.VolatilityResponse
|
apache-2.0
| -7,733,199,661,564,917,000 | 28.328767 | 80 | 0.776273 | false |
prisconapoli/mercury
|
app/api_1_0/utils.py
|
1
|
2706
|
import time
import json
import requests
from flask import url_for, abort,current_app
from .mail_service.exceptions import ValidationError
def timestamp():
"""Get the current system time from epoch in nanosecond
Returns:
long:current system time
"""
return long(time.time()*1e9)
def post_event(url, id, data):
"""Post an event for a mail.
The event will be sent if TRACK_EVENTS is True, otherwise
the function returns silently
Args:
url(str): the endpoint defined as route, e.g. 'api.new_event'
id(int): mail identifier
data(dict): additional data to attach for this event
"""
if current_app.config['TRACK_EVENTS']:
return post_event_url(url_for(url, id=id, _external=True), data)
def post_event_url(url=None, data=None):
"""Post an event to a specific url.
The event will be sent if the parameter ''url is not None.
Args:
url(str): complete url, e.g. http://<server>/api/v1.0/mails/<id>/events
data(dict): additional data to attach for the event
"""
if url is None:
return
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data) if data else '', headers=headers)
if not r.ok:
print "r.status_code", r.status_code
abort(500)
return r
def build_event(created_by, event, mail_id, created_at=None, blob=''):
""" Utility function to create a generic event
Args:
created_by(str): creator of the event
event(str): event name
mail_id(int): reference to mail id
created_at(str): the creation time of this event. Default is None
blob(str): data to attach. Defaults is is an empty string
Returns:
dict: a dictionary as below
value = {
'created_at': created_at,
'created_by': created_by,
'event': event,
'mail_id' : mail_id,
'blob':blob
}
"""
return {
'created_at': created_at if created_at else timestamp(),
'created_by': created_by,
'event': event,
'mail_id': mail_id,
'blob': blob
}
def get_or_error(dict_event, key, nullable=False):
"""Check if value is present in the dictionary
Args:
dict_event(dict): input dictionary
key(str): key to search
nullable(bool): true id the value is not compulsory. Defaults is false
Raise:
ValidationError if the value is compulsory
"""
value = dict_event.get(key)
if (value is None or value == '') and nullable is False:
raise ValidationError('missing %s' % key)
return value if value is not None else ''
|
mit
| 3,083,810,116,711,008,000 | 27.1875 | 82 | 0.613821 | false |
quantwizard-com/pythonbacktest
|
testcases/indicator_tests/datadelaytests.py
|
1
|
1468
|
import unittest
from pythonbacktest.indicator import DataDelay
class DataDelayTests(unittest.TestCase):
def test_delay_individual_values(self):
test_data = [None, None, 1, 2, 3, 4, 5]
delay = range(0, 5)
for single_delay in delay:
expected_result = (single_delay * [None] + test_data[0:-single_delay]) if single_delay != 0 else test_data
delay_indicator = DataDelay(delay_size=single_delay)
# insert individual values
for test_number in test_data:
delay_indicator.on_new_upstream_value(test_number)
self.assertEqual(expected_result, delay_indicator.all_result)
def test_delay_individual_list_on_input(self):
test_data = [None, None, 1, 2, 3, 4, 5]
delay = range(1, 5)
for single_delay in delay:
delay_indicator = DataDelay(delay_size=single_delay)
# this is real-life scenario: to the same object add list multiple times with growing number of elements
for test_data_length in range(4, len(test_data) + 1):
test_subset = test_data[0:test_data_length]
expected_result = (single_delay * [None] + test_subset[0:-single_delay]) if single_delay != 0 else test_subset
delay_indicator.on_new_upstream_value(test_subset)
actual_result = delay_indicator.all_result
self.assertEqual(expected_result, actual_result)
|
apache-2.0
| 6,316,997,177,643,729,000 | 35.7 | 126 | 0.626022 | false |
google-research/google-research
|
protein_lm/fine_tune.py
|
1
|
4422
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for fine-tuning FlaxLM models."""
import functools
from flax import jax_utils as flax_jax_utils
from flax.training import common_utils
import jax
import tensorflow.compat.v1 as tf
import tqdm
from protein_lm import models
from protein_lm import utils
_SHUFFLE_BUFFER_SIZE = 5000
def _get_dataset(sequences, example_weights, batch_size, shuffle):
data_dict = dict(sequence=sequences)
if example_weights is not None:
data_dict['example_weight'] = example_weights
dataset = tf.data.Dataset.from_tensor_slices(data_dict)
if shuffle:
dataset = dataset.shuffle(_SHUFFLE_BUFFER_SIZE)
dataset = dataset.repeat().batch(batch_size)
return dataset
class _OptimizationRunner(object):
"""Helper class for running optimization steps."""
def __init__(self, model, learning_rate, **optimizer_kwargs):
self._bos_token = model.bos_token
self._pad_token = model.pad_token
unreplicated_optimizer = model.get_weights()
self._replicated_optimizer = utils.create_adam_optimizer(
model=unreplicated_optimizer.target,
learning_rate=learning_rate,
**optimizer_kwargs)
self._dropout_rngs = model._dropout_rngs
self._p_train_step = jax.pmap(
functools.partial(
models.train_step,
preprocess_fn=model.preprocess,
learning_rate_fn=lambda t: learning_rate),
axis_name='batch')
def fit_batch(self, batch, example_weights=None):
"""Runs one optimization step on batch."""
batch = common_utils.shard(batch)
if example_weights is not None:
example_weights = common_utils.shard(example_weights)
(self._replicated_optimizer, metrics,
self._dropout_rngs) = self._p_train_step(
self._replicated_optimizer,
inputs=batch,
example_weights=example_weights,
dropout_rng=self._dropout_rngs)
return metrics
def get_weights(self):
return flax_jax_utils.unreplicate(self._replicated_optimizer)
def fine_tune(model,
initial_weights,
sequences,
batch_size,
num_epochs,
learning_rate,
example_weights=None,
shuffle=True,
progress_bar=True,
**optimizer_kwargs):
"""Fine tunes model on sequences.
Args:
model: A models.FlaxLM.
initial_weights: The model is initialized with these weights.
sequences: A list of int-encoded sequences to train on.
batch_size: The batch size used when optimizing the model.
num_epochs: Number of passes to take through the input sequences.
learning_rate: Learning rate for optimization.
example_weights: Optional per-sequence weights for performing weighted MLE
training.
shuffle: Whether the input sequences should be shuffled.
progress_bar: Whether to display a progress bar.
**optimizer_kwargs: Additional kwargs to pass to
utils.create_adam_optimizer().
Returns:
A set of fine tuned weights. The model can be set to use these using
model.set_weights(fine_tuned_weights).
"""
model.set_weights(initial_weights)
runner = _OptimizationRunner(
model, learning_rate=learning_rate, **optimizer_kwargs)
dataset = _get_dataset(sequences, example_weights, batch_size, shuffle)
dataset_iter = iter(dataset.repeat())
num_iter = int(num_epochs * len(sequences) / batch_size)
iterations = list(range(num_iter))
if progress_bar:
iterations = tqdm.tqdm(iterations, position=0)
for _ in iterations:
batch = next(dataset_iter)
batch_example_weights = batch['example_weight'].numpy(
) if example_weights is not None else None
batch_sequences = batch['sequence'].numpy()
runner.fit_batch(batch_sequences, batch_example_weights)
return runner.get_weights()
|
apache-2.0
| 2,070,980,063,228,542,700 | 32.755725 | 78 | 0.696517 | false |
jucacrispim/toxicbuild
|
toxicbuild/master/repository.py
|
1
|
29784
|
# -*- coding: utf-8 -*-
# Copyright 2015-2020 Juca Crispim <[email protected]>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
from asyncio import ensure_future
import re
from bson.objectid import ObjectId
from mongoengine import PULL
from mongomotor import Document, EmbeddedDocument
from mongomotor.fields import (
StringField,
IntField, ReferenceField,
DateTimeField,
ListField,
BooleanField,
EmbeddedDocumentField,
DictField,
DynamicField
)
from toxicbuild.common.exchanges import (
notifications,
ui_notifications,
scheduler_action,
)
from toxicbuild.core import utils, build_config
from toxicbuild.core.utils import string2datetime
from toxicbuild.master.build import (BuildSet, Builder, BuildManager)
from toxicbuild.master.client import get_poller_client
from toxicbuild.master.document import OwnedDocument, ExternalRevisionIinfo
from toxicbuild.master.exceptions import RepoBranchDoesNotExist
from toxicbuild.master.utils import (get_build_config_type,
get_build_config_filename)
from toxicbuild.master.signals import buildset_added
from toxicbuild.master.slave import Slave
# The thing here is: When a repository poller is scheduled, I need to
# keep track of the hashes so I can remove it from the scheduler
# when needed.
# {repourl-update-code: hash}
_update_code_hashes = {}
# The is {repourl-start-pending: hash} for starting pending builds
_scheduler_hashes = {}
class RepositoryBranch(EmbeddedDocument):
"""The configuration for a branch of a repository."""
name = StringField(required=True)
"""The name of the branch."""
notify_only_latest = BooleanField(default=False)
"""If True, only the latest revision will be notified and only
the last revision will generate a buildset."""
def to_dict(self):
"""Returns a dict representation of the obj."""
return {'name': self.name,
'notify_only_latest': self.notify_only_latest}
class LatestBuildSet(EmbeddedDocument):
"""The most recent buildset in the repository."""
status = StringField()
"""The buildset status"""
commit = StringField()
"""The commit sha"""
title = StringField()
"""The commit title"""
started = DateTimeField()
"""When the buildset started"""
total_time = IntField()
"""Total time in seconds spent by the buildset"""
commit_date = DateTimeField()
"""Date of the commit"""
class Repository(OwnedDocument, utils.LoggerMixin):
"""Repository is where you store your code and where toxicbuild
looks for incomming changes."""
url = StringField(required=True, unique=True)
"""The url of the repository."""
fetch_url = StringField()
"""A url used to actually fetch the code. If using some
kind of authentication based in a url, this may change often."""
update_seconds = IntField(default=300, required=True)
"""If the repository added manually (not imported), indicates the inteval
for polling for new revisions."""
vcs_type = StringField(required=True, default='git')
"""The type of vcs used in this repo."""
branches = ListField(EmbeddedDocumentField(RepositoryBranch))
"""A list of :class:`~toxicbuild.master.repository.RepositoryBranch`.
These branches are the ones that trigger builds. If no branches,
all branches will trigger builds."""
slaves = ListField(ReferenceField(Slave, reverse_delete_rule=PULL))
"""A list of :class:`~toxicbuild.master.slave.Slave`. The slaves here
are the slaves allowed to run builds for this repo."""
clone_status = StringField(choices=('cloning', 'ready', 'clone-exception'),
default='cloning')
"""The status of the clone."""
schedule_poller = BooleanField(default=True)
"""Indicates if we should schedule periodical polls for changes in code. If
the repo was imported from an external service that sends webhooks
(or something else) this should be False."""
parallel_builds = IntField()
"""Max number of builds in parallel that this repo exeutes
If None, there's no limit for parallel builds.
"""
enabled = BooleanField(default=True)
"""Indicates if this repository is enabled to run builds."""
external_id = DynamicField()
"""The repository id in an external service."""
external_full_name = StringField()
"""The full name of the repository in an external service"""
envvars = DictField()
"""Environment variables that are used in every build in this
repository. It is a dictionary {'VAR': 'VAL', ...}
"""
latest_buildset = EmbeddedDocumentField(LatestBuildSet)
"""The most recent buildset for a repository."""
meta = {
'ordering': ['name'],
}
_running_builds = 0
_stop_consuming_messages = False
def __init__(self, *args, **kwargs):
from toxicbuild.master import scheduler
super(Repository, self).__init__(*args, **kwargs)
self.scheduler = scheduler
self.build_manager = BuildManager(self)
self.config_type = get_build_config_type()
self.config_filename = get_build_config_filename()
self._old_status = None
self._vcs_instance = None
@classmethod
def add_running_build(cls):
"""Add a running build to the count of running builds among all
repositories."""
cls._running_builds += 1
@classmethod
def remove_running_build(cls):
"""Removes a running build from the count of running builds among all
repositories."""
cls._running_builds -= 1
@classmethod
def get_running_builds(cls):
"""Returns the number of running builds among all the repos."""
return cls._running_builds
@classmethod
async def create(cls, **kwargs):
"""Creates a new repository and schedule it if needed.
:param kwargs: kwargs used to create the repository."""
slaves = kwargs.pop('slaves', [])
branches = kwargs.pop('branches', [])
repo = cls(**kwargs, slaves=slaves, branches=branches)
await repo.save()
await cls._notify_repo_creation(repo)
if repo.schedule_poller:
repo.schedule()
return repo
def get_url(self):
return self.fetch_url or self.url
@classmethod
async def get(cls, **kwargs):
"""Returns a repository instance and create locks if needed
:param kwargs: kwargs to match the repository."""
repo = await cls.objects.get(**kwargs)
return repo
@classmethod
async def get_for_user(cls, user, **kwargs):
"""Returns a repository if ``user`` has permission for it.
If not raises an error.
:param user: User who is requesting the repository.
:param kwargs: kwargs to match the repository.
"""
repo = await super().get_for_user(user, **kwargs)
return repo
async def save(self, *args, **kwargs):
set_full_name = (hasattr(self, '_changed_fields') and
('name' in self._changed_fields or
'owner' in self._changed_fields))
if set_full_name or not self.full_name:
owner = await self.owner
self.full_name = '{}/{}'.format(owner.name, self.name)
r = await super().save(*args, **kwargs)
return r
async def to_dict(self, short=False):
"""Returns a dict representation of the object.
:param short: Indicates if the returned dict has only basic information
"""
my_dict = {'id': str(self.id), 'name': self.name, 'url': self.url,
'full_name': self.full_name,
'vcs_type': self.vcs_type,
'enabled': self.enabled,
'clone_status': self.clone_status}
if not short:
slaves = await self.slaves
my_dict.update(
{'external_full_name': self.external_full_name,
'update_seconds': self.update_seconds,
'fetch_url': self.fetch_url,
'branches': [b.to_dict() for b in self.branches],
'slaves': [s.to_dict(id_as_str=True) for s in slaves],
'parallel_builds': self.parallel_builds,
'envvars': self.envvars}
)
return my_dict
async def get_status(self):
"""Returns the status for the repository. The status is the
status of the last buildset created for this repository that is
not pending."""
last_buildset = await self.get_lastest_buildset()
clone_statuses = ['cloning', 'clone-exception']
if not last_buildset and self.clone_status in clone_statuses:
status = self.clone_status
elif not last_buildset:
status = 'ready'
else:
status = last_buildset.status
return status
async def bootstrap(self):
"""Initialise the needed stuff. Schedules updates for code,
start of pending builds, connect to signals.
"""
self.schedule()
@classmethod
async def bootstrap_all(cls):
async for repo in cls.objects.all():
await repo.bootstrap()
def schedule(self):
"""Schedules all needed actions for a repository. The actions are:
* Sends an ``add-udpate-code`` to the scheduler server.
* Starts builds that are pending using
``self.build_manager.start_pending``.
"""
self.log('Scheduling {url}'.format(url=self.url))
if self.schedule_poller:
# add update_code
update_code_hash = self.scheduler.add(self.update_code,
self.update_seconds)
_update_code_hashes['{}-update-code'.format(
self.url)] = update_code_hash
# adding start_pending
start_pending_hash = self.scheduler.add(
self.build_manager.start_pending, 120)
_scheduler_hashes['{}-start-pending'.format(
self.url)] = start_pending_hash
@classmethod
async def schedule_all(cls):
""" Schedule all repositories. """
repos = await cls.objects.all().to_list()
for repo in repos:
repo.schedule()
async def remove(self):
""" Removes all builds and builders and revisions related to the
repository, removes the poller from the scheduler, removes the
source code from the file system and then removes the repository.
"""
builds = BuildSet.objects.filter(repository=self)
await builds.delete()
builders = Builder.objects.filter(repository=self)
await builders.delete()
revisions = RepositoryRevision.objects.filter(repository=self)
await revisions.delete()
sched_msg = {'type': 'rm-update-code', 'repository_id': str(self.id)}
await scheduler_action.publish(sched_msg)
try:
update_hash = _update_code_hashes['{}-update-code'.format(
self.url)]
self.scheduler.remove_by_hash(update_hash)
del _update_code_hashes['{}-update-code'.format(
self.url)]
pending_hash = _scheduler_hashes['{}-start-pending'.format(
self.url)]
self.scheduler.remove_by_hash(pending_hash)
del _scheduler_hashes['{}-start-pending'.format(self.url)]
except KeyError: # pragma no cover
# means the repository was not scheduled
pass
await self.delete()
async def request_removal(self):
"""Request the removal of a repository by publishing a message in the
``notifications`` queue with the routing key
`repo-removal-requested`."""
msg = {'repository_id': str(self.id)}
await notifications.publish(
msg, routing_key='repo-removal-requested')
async def request_code_update(self, repo_branches=None, external=None):
"""Request the code update of a repository by publishing a message in
the ``notifications`` queue with the routing key
`repo-update-code-requested`.
:param repo_branches: A dictionary with information about the branches
to be updated. If no ``repo_branches`` all branches in the repo
config will be updated.
The dictionary has the following format.
.. code-block:: python
{'branch-name': {'notify_only_latest': True}}
:param external: If we should update code from an external
(not the origin) repository, `external` is the information about
this remote repo.
"""
msg = {'repository_id': str(self.id),
'repo_branches': repo_branches,
'external': external}
await notifications.publish(
msg, routing_key='update-code-requested')
async def update_code(self, repo_branches=None, external=None):
"""Requests a code update to a poller and adds builds to the
new revisions returned.
:param repo_branches: A dictionary with information about the branches
to be updated. If no ``repo_branches`` all branches in the repo
config will be updated.
The dictionary has the following format.
.. code-block:: python
{'branch-name': notify_only_latest}
:param external: If we should update code from an external
(not the origin) repository, `external` is the information about
this remote repo.
"""
async with get_poller_client(self) as client:
ret = await client.poll_repo(branches_conf=repo_branches,
external=external)
if ret['revisions']:
revs = []
for rinfo in ret['revisions']:
rev = RepositoryRevision(repository=self, **rinfo)
rev.commit_date = string2datetime(rinfo['commit_date'])
rev.config_type = self.config_type
revs.append(rev)
revs = await RepositoryRevision.objects.insert(revs)
await self.build_manager.add_builds(revs)
if ret['with_clone']:
self.clone_status = ret['clone_status']
await self.save()
status_msg = {'repository_id': str(self.id),
'old_status': 'cloning',
'new_status': self.clone_status}
await self._notify_status_changed(status_msg)
async def add_slave(self, slave):
"""Adds a new slave to a repository.
:param slave: A slave instance."""
slaves = await self.slaves
slaves.append(slave)
self.slaves = slaves
await self.save()
return slave
async def remove_slave(self, slave):
"""Removes a slave from a repository.
:param slave: A slave instance."""
slaves = await self.slaves
slaves.pop(slaves.index(slave))
await self.update(set__slaves=slaves)
return slave
async def add_or_update_branch(self, branch_name,
notify_only_latest=False):
"""Adds a new branch to this repository. If the branch
already exists updates it with a new value.
:param branch_name: The name of a branch
:param notify_only_latest: If we should build only the most
recent build of this branch"""
# this is a shitty way of doing this. What is the
# better way?
def get_branch(branch_name):
for b in self.branches:
if b.name == branch_name:
return b
branch = get_branch(branch_name)
if branch:
branch.notify_only_latest = notify_only_latest
else:
branch = RepositoryBranch(name=branch_name,
notify_only_latest=notify_only_latest)
self.branches.append(branch)
await self.save()
async def remove_branch(self, branch_name):
"""Removes a branch from this repository.
:param branch_name: The branch name."""
await self.update(pull__branches__name=branch_name)
async def get_lastest_buildset(self):
return self.latest_buildset
async def set_latest_buildset(self, buildset):
lb = LatestBuildSet(status=buildset.status, commit=buildset.commit,
title=buildset.title, started=buildset.started,
total_time=buildset.total_time,
commit_date=buildset.commit_date)
self.latest_buildset = lb
await self.save()
async def get_latest_revision_for_branch(self, branch):
""" Returns the latest revision for a given branch
:param branch: branch name
"""
latest = RepositoryRevision.objects.filter(
repository=self, branch=branch).order_by('-commit_date')
latest = await latest.first()
return latest
async def get_latest_revisions(self):
""" Returns the latest revision for all known branches
"""
branches = await self.get_known_branches()
revs = {}
for branch in branches:
rev = await self.get_latest_revision_for_branch(branch)
revs[branch] = rev
return revs
async def get_known_branches(self):
""" Returns the names for the branches that already have some
revision here.
"""
branches = await RepositoryRevision.objects.filter(
repository=self).distinct('branch')
return branches
async def add_revision(self, branch, commit, commit_date, author, title,
body=None, external=None, builders_fallback=''):
""" Adds a revision to the repository.
:param commit: Commit's sha
:param branch: branch name
:param commit_date: commit's date (on authors time)
:param author: The author of the commit
:param title: The commit title.
:param body: The commit body.
:param external: Information about an external remote if the revision
came from an external.
:param builders_fallback: If not None, builders from this branch will
be used in case of the revision branch has no builders configured
for it
"""
kw = dict(repository=self, commit=commit,
branch=branch, commit_date=commit_date,
author=author, title=title, body=body)
if external:
external_rev = ExternalRevisionIinfo(**external)
kw['external'] = external_rev
revision = RepositoryRevision(**kw)
await revision.save()
return revision
async def add_builds_for_buildset(self, buildset, conf, builders=None,
builders_origin=None):
"""Adds a buildset to the build queue of a given slave
for this repository.
:param buildset: An instance of
:class:`toxicbuild.master.build.BuildSet`.
:param conf: The build configuration for the buidset.
:param builders: The builders to use in the buids. If no builds,
all builders for the revision will be used.
:param builders_origin: Indicates from which branch config the builds
came. Useful for merge requests to test agains the tests on the main
branch.
"""
builders = builders or []
await self.build_manager.add_builds_for_buildset(
buildset, conf, builders=builders,
builders_origin=builders_origin)
def _get_builder_kw(self, name_or_id):
kw = {'repository': self}
if ObjectId.is_valid(name_or_id):
kw['id'] = name_or_id
else:
kw['name'] = name_or_id
return kw
async def start_build(self, branch, builder_name_or_id=None,
named_tree=None, builders_origin=None):
""" Starts a (some) build(s) in the repository. """
if not named_tree:
rev = await self.get_latest_revision_for_branch(branch)
named_tree = rev.commit
else:
rev = await RepositoryRevision.objects(branch=branch,
commit=named_tree).first()
buildset = await BuildSet.create(repository=self, revision=rev)
if not rev.config:
self.log('No config found', level='debug')
buildset.status = type(buildset).NO_CONFIG
await buildset.save()
buildset_added.send(str(self.id), buildset=buildset)
return
conf = self.get_config_for(rev)
if not builder_name_or_id:
builders, builders_origin = await self._get_builders(rev, conf)
else:
builders_origin = None
kw = self._get_builder_kw(builder_name_or_id)
builders = [(await Builder.get(**kw))]
await self.add_builds_for_buildset(buildset, conf,
builders=builders,
builders_origin=builders_origin)
async def request_build(self, branch, builder_name=None, named_tree=None,
slaves=None):
"""Publishes a message in the `notifications` exchange requesting
a build. Uses the routing_key `build-requested`"""
slaves = slaves or []
msg = {'repository_id': str(self.id),
'branch': branch, 'builder_name': builder_name,
'named_tree': named_tree,
'slaves_ids': [str(s.id) for s in slaves]}
await notifications.publish(msg, routing_key='build-requested')
async def cancel_build(self, build_uuid):
"""Cancels a build.
:param build_uuid: The uuid of the build."""
await self.build_manager.cancel_build(build_uuid)
async def enable(self):
self.enabled = True
await self.save()
async def disable(self):
self.enabled = False
await self.save()
def get_branch(self, branch_name):
"""Returns an instance of
:class:`~toxicbuild.master.repository.RepositoryBranch`"""
for branch in self.branches:
if branch.name == branch_name:
return branch
raise RepoBranchDoesNotExist(branch_name)
def notify_only_latest(self, branch_name):
"""Indicates if a branch notifies only the latest revision.
:param branch_name: The name of the branch."""
try:
branch = self.get_branch(branch_name)
only_latest = branch.notify_only_latest
except RepoBranchDoesNotExist:
only_latest = True
return only_latest
def get_config_for(self, revision):
"""Returns the build configuration for a given revision.
:param revision: A
:class`~toxicbuild.master.repository.RepositoryRevision` instance.
"""
conf = build_config.load_config(
self.config_type, revision.config)
return conf
async def add_envvars(self, **envvars):
"""Adds new environment variables to this repository.
:param envvars: A dictionary {var: val, ...}.
"""
self.envvars.update(envvars)
await self.save()
async def rm_envvars(self, **envvars):
"""Removes environment variables from this repository.
:param envvars: A dictionary {var: val, ...}.
"""
for k in envvars:
try:
self.envvars.pop(k)
except KeyError:
pass
await self.save()
async def replace_envvars(self, **envvars):
"""Replaces the current environment variables of the repository.
:param envvars: The environment variables that will replace the
current one.
"""
self.envvars = envvars
await self.save()
async def _get_builders(self, revision, conf):
builders, origin = await self.build_manager.get_builders(
revision, conf)
return builders, origin
@classmethod
async def _notify_repo_creation(cls, repo):
repo_added_msg = await repo.to_dict()
await ui_notifications.publish(repo_added_msg)
repo_added_msg['msg_type'] = 'repo_added'
async for user in await repo.get_allowed_users():
ensure_future(ui_notifications.publish(
repo_added_msg, routing_key=str(user.id)))
async def _notify_status_changed(self, status_msg):
self.log('Notify status changed {}'.format(status_msg),
level='debug')
await ui_notifications.publish(status_msg,
routing_key=str(self.id))
status_msg['msg_type'] = 'repo_status_changed'
async for user in await self.get_allowed_users():
ensure_future(ui_notifications.publish(
status_msg, routing_key=str(user.id)))
class RepositoryRevision(Document):
"""A commit in the code tree."""
repository = ReferenceField(Repository, required=True)
"""A referece to :class:`~toxicbuild.master.repository.Repository`."""
commit = StringField(required=True)
"""The identifier of the revision, a sha, a tag name, etc..."""
branch = StringField(required=True)
"""The name of the revison branch."""
author = StringField(required=True)
"""The author of the commit."""
title = StringField(required=True)
"""The title of the commit."""
body = StringField()
"""The commit body."""
commit_date = DateTimeField(required=True)
"""Commit's date."""
external = EmbeddedDocumentField(ExternalRevisionIinfo)
"""A list of :class:`~toxicbuild.master.bulid.RepositoryRevisionExternal`.
"""
builders_fallback = StringField()
"""A name of a branch. If not None, builders from this branch will be used
if there are no builders for the branch of the revision."""
config = StringField()
"""The build configuration for this revision"""
config_type = StringField()
"""The type of congif used"""
@classmethod
async def get(cls, **kwargs):
"""Returs a RepositoryRevision object."""
ret = await cls.objects.get(**kwargs)
return ret
async def to_dict(self):
"""Returns a dict representation of the object."""
repo = await self.repository
rev_dict = {'repository_id': str(repo.id),
'commit': self.commit,
'branch': self.branch,
'author': self.author,
'title': self.title,
'commit_date': utils.datetime2string(self.commit_date)}
if self.external:
rev_dict.update({'external': self.external.to_dict()})
return rev_dict
def create_builds(self):
r"""Checks for instructions in the commit body to know if a
revision should create builds.
Known instructions:
- ``ci: skip`` - If in the commit body there's this instruction,
no builds will be created for this revision. The regex for
match this instruction is ``(^|.*\s+)ci:\s*skip(\s+|$)``
"""
if not self.body:
# No body, no instructions, we create builds normally
return True
return not self._check_skip()
def _get_match(self, pattern):
if not self.body:
return None
for l in self.body.splitlines():
m = re.match(pattern, l)
if m:
return m
def _check_skip(self):
skip_pattern = re.compile(r'(^|.*\s+)ci:\s*skip(\s+|$)')
return bool(self._get_match(skip_pattern))
def _get_builders_match(self, pattern):
builders_match = self._get_match(pattern)
if builders_match:
builders = builders_match.groups()[1].split(',')
builders = [b.strip() for b in builders]
else:
builders = []
return builders
def get_builders_conf(self):
"""Returns the builder configuration - includes and excludes -
for a given revison in its commit body.
Known instructions:
- ``ci: include-builders builder-name,other-builder``: Include only
the builders listed in the configuration. The names are separated
by comma.
- ``ci: exclude-builders builder-name,other-builder``: Exclude
the builders listed in the configuration. The names are separated
by comma.
"""
confs = {}
include_pattern = re.compile(
r'(^|.*\s+)ci:\s*include-builders\s+(.*)$')
exclude_pattern = re.compile(
r'(^|.*\s+)ci:\s*exclude-builders\s+(.*)$')
confs['include'] = self._get_builders_match(include_pattern)
confs['exclude'] = self._get_builders_match(exclude_pattern)
return confs
|
agpl-3.0
| -3,229,045,883,458,122,000 | 33.592334 | 79 | 0.608145 | false |
PedalPi/PluginsManager
|
test/example.py
|
1
|
2150
|
# Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pluginsmanager.banks_manager import BanksManager
from pluginsmanager.observer.mod_host.mod_host import ModHost
from pluginsmanager.model.bank import Bank
from pluginsmanager.model.pedalboard import Pedalboard
from pluginsmanager.model.connection import Connection
from pluginsmanager.model.lv2.lv2_effect_builder import Lv2EffectBuilder
from pluginsmanager.model.system.system_effect import SystemEffect
if __name__ == '__main__':
manager = BanksManager()
bank = Bank('Bank 1')
manager.append(bank)
mod_host = ModHost('raspberrypi.local')
mod_host.connect()
manager.register(mod_host)
pedalboard = Pedalboard('Rocksmith')
bank.append(pedalboard)
mod_host.pedalboard = pedalboard
builder = Lv2EffectBuilder()
reverb = builder.build('http://calf.sourceforge.net/plugins/Reverb')
fuzz = builder.build('http://guitarix.sourceforge.net/plugins/gx_fuzz_#fuzz_')
reverb2 = builder.build('http://calf.sourceforge.net/plugins/Reverb')
pedalboard.append(reverb)
pedalboard.append(fuzz)
pedalboard.append(reverb2)
sys_effect = SystemEffect('system', ['capture_1'], ['playback_1', 'playback_2'])
pedalboard.connections.append(Connection(sys_effect.outputs[0], reverb.inputs[0]))
reverb.outputs[0].connect(fuzz.inputs[0])
reverb.outputs[1].connect(fuzz.inputs[0])
fuzz.outputs[0].connect(reverb2.inputs[0])
reverb.outputs[0].connect(reverb2.inputs[0])
# Causes error
reverb2.outputs[0].connect(sys_effect.inputs[0])
reverb2.outputs[0].connect(sys_effect.inputs[1])
|
apache-2.0
| -6,037,759,852,432,589,000 | 33.126984 | 86 | 0.741395 | false |
richardxx/mongoctl-service
|
mongoctl/mongoctl_command_config.py
|
1
|
60619
|
#
# The MIT License
#
# Copyright (c) 2012 ObjectLabs Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__author__ = 'abdul'
MONGOCTL_PARSER_DEF = {
"prog": "mongoctl",
"usage": "Usage: mongoctl [<options>] <command> [<command-args>]",
"description" : "A utility that simplifies the management of MongoDB servers and replica set clusters.",
"args": [
{
"name": "mongoctlVerbose",
"type" : "optional",
"help": "make mongoctl more verbose",
"cmd_arg": [
"-v",
"--verbose"
],
"nargs": 0,
"action": "store_true",
"default": False
},
{
"name": "noninteractive",
"type" : "optional",
"help": "bypass prompting for user interaction",
"cmd_arg": [
"-n",
"--noninteractive"
],
"nargs": 0,
"action": "store_true",
"default": False
},
{
"name": "yesToEverything",
"type" : "optional",
"help": "auto yes to all yes/no prompts",
"cmd_arg": [
"--yes"
],
"nargs": 0,
"action": "store_true",
"default": False
},
{
"name": "noToEverything",
"type" : "optional",
"help": "auto no to all yes/no prompts",
"cmd_arg": [
"--no"
],
"nargs": 0,
"action": "store_true",
"default": False
},
{
"name": "configRoot",
"type" : "optional",
"help": "path to mongoctl config root; defaults to ~/.mongoctl",
"cmd_arg": [
"--config-root"
],
"nargs": 1
}
],
"child_groups": [
{
"name" :"adminCommands",
"display": "Admin Commands"
},
{
"name" :"clientCommands",
"display": "Client Commands"
},
{
"name" :"serverCommands",
"display": "Server Commands"
},
{
"name" :"clusterCommands",
"display": "Cluster Commands"
},
{
"name" :"miscCommands",
"display": "Miscellaneous"
},
{
"name" :"shardCommands",
"display": "Sharding"
}
],
"children":[
#### start ####
{
"prog": "start",
"group": "serverCommands",
#"usage" : generate default usage
"shortDescription" : "start a server",
"description" : "Starts a specific server.",
"function": "mongoctl.commands.server.start.start_command",
"args":[
{
"name": "server",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER_ID",
"help": "a valid server id"
},
{
"name": "dryRun",
"type" : "optional",
"cmd_arg": ["-n" , "--dry-run"],
"nargs": 0,
"help": "prints the mongod command to execute without "
"executing it",
"default": False
},
{
"name": "assumeLocal",
"type" : "optional",
"cmd_arg": "--assume-local",
"nargs": 0,
"help": "Assumes that the server will be started on local"
" host. This will skip local address/dns check",
"default": False
},
{
"name": "rsAdd",
"type" : "optional",
"cmd_arg": "--rs-add",
"nargs": 0,
"help": "Automatically add server to replicaset conf if "
"its not added yet",
"default": False
},
{
"name": "rsAddNoInit",
"type" : "optional",
"cmd_arg": "--rs-add-noinit",
"nargs": 0,
"help": "Automatically add server to an "
"initialized replicaset conf if "
"its not added yet",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
},
# mongod supported options
# confusing
# {
# "name": "config",
# "type" : "optional",
# "cmd_arg": ["-f", "--config"],
# "nargs": 1,
# "help": "configuration file specifying additional options"
# },
{
"name": "verbose",
"type" : "optional",
"cmd_arg": ["-v", "--verbose"],
"nargs": 0,
"help": "be more verbose (include multiple times for more"
" verbosity e.g. -vvvvv)"
},
{
"name": "quiet",
"type" : "optional",
"cmd_arg": "--quiet",
"nargs": 0,
"help": "quieter output"
},
{
"name": "address",
"type" : "optional",
"cmd_arg": "--address",
"nargs": 1,
"help": "specify host address for this server, default is localhost"
},
{
"name": "port",
"type" : "optional",
"cmd_arg": "--port",
"nargs": 1,
"help": "specify port number"
},
{
"name": "bind_ip",
"type" : "optional",
"cmd_arg": "--bind_ip",
"nargs": 1,
"help": "comma separated list of ip addresses to listen "
"on- all local ips by default"
},
{
"name": "maxConns",
"type" : "optional",
"cmd_arg": "--maxConns",
"nargs": 1,
"help": "max number of simultaneous connections"
},
{
"name": "objcheck",
"type" : "optional",
"cmd_arg": "--objcheck",
"nargs": 0,
"help": "inspect client data for validity on receipt"
},
{
"name": "logpath",
"type" : "optional",
"cmd_arg": "--logpath",
"nargs": 1,
"help": "log file to send write to instead of stdout -"
" has to be a file, not directory. "
"mongoctl defaults that to dbpath/mongodb.log"
},
{
"name": "logappend",
"type" : "optional",
"cmd_arg": "--logappend",
"nargs": 1,
"help": "append to logpath instead of over-writing"
},
{
"name": "pidfilepath",
"type" : "optional",
"cmd_arg": "--pidfilepath",
"nargs": 1,
"help": "full path to pidfile (if not set,"
" no pidfile is created). "
"mongoctl defaults that to dbpath/pid.txt"
},
{
"name": "keyFile",
"type" : "optional",
"cmd_arg": "--keyFile",
"nargs": 1,
"help": "private key for cluster authentication "
"(only for replica sets)"
},
{
"name": "nounixsocket",
"type" : "optional",
"cmd_arg": "--nounixsocket",
"nargs": 0,
"help": "disable listening on unix sockets"
},
{
"name": "unixSocketPrefix",
"type" : "optional",
"cmd_arg": "--unixSocketPrefix",
"nargs": 1,
"help": "alternative directory for UNIX domain sockets "
"(defaults to /tmp)"
},
{
"name": "auth",
"type" : "optional",
"cmd_arg": "--auth",
"nargs": 0,
"help": "run with security"
},
{
"name": "cpu",
"type" : "optional",
"cmd_arg": "--cpu",
"nargs": 0,
"help": "periodically show cpu and iowait utilization"
},
{
"name": "dbpath",
"type" : "optional",
"cmd_arg": "--dbpath",
"nargs": 1,
"help": "directory for datafiles"
},
{
"name": "diaglog",
"type" : "optional",
"cmd_arg": "--diaglog",
"nargs": 1,
"help": "0=off 1=W 2=R 3=both 7=W+some reads"
},
{
"name": "directoryperdb",
"type" : "optional",
"cmd_arg": "--directoryperdb",
"nargs": 0,
"help": "each database will be stored in a"
" separate directory"
},
{
"name": "journal",
"type" : "optional",
"cmd_arg": "--journal",
"nargs": 0,
"help": "enable journaling"
},
{
"name": "journalOptions",
"type" : "optional",
"cmd_arg": "--journalOptions",
"nargs": 1,
"help": "journal diagnostic options"
},
{
"name": "journalCommitInterval",
"type" : "optional",
"cmd_arg": "--journalCommitInterval",
"nargs": 1,
"help": "how often to group/batch commit (ms)"
},
{
"name": "ipv6",
"type" : "optional",
"cmd_arg": "--ipv6",
"nargs": 0,
"help": "enable IPv6 support (disabled by default)"
},
{
"name": "jsonp",
"type" : "optional",
"cmd_arg": "--jsonp",
"nargs": 0,
"help": "allow JSONP access via http "
"(has security implications)"
},
{
"name": "noauth",
"type" : "optional",
"cmd_arg": "--noauth",
"nargs": 0,
"help": "run without security"
},
{
"name": "nohttpinterface",
"type" : "optional",
"cmd_arg": "--nohttpinterface",
"nargs": 0,
"help": "disable http interface"
},
{
"name": "nojournal",
"type" : "optional",
"cmd_arg": "--nojournal",
"nargs": 0,
"help": "disable journaling (journaling is on by default "
"for 64 bit)"
},
{
"name": "noprealloc",
"type" : "optional",
"cmd_arg": "--noprealloc",
"nargs": 0,
"help": "disable data file preallocation - "
"will often hurt performance"
},
{
"name": "notablescan",
"type" : "optional",
"cmd_arg": "--notablescan",
"nargs": 0,
"help": "do not allow table scans"
},
{
"name": "nssize",
"type" : "optional",
"cmd_arg": "--nssize",
"nargs": 1,
"help": ".ns file size (in MB) for new databases"
},
{
"name": "profile",
"type" : "optional",
"cmd_arg": "--profile",
"nargs": 1,
"help": "0=off 1=slow, 2=all"
},
{
"name": "quota",
"type" : "optional",
"cmd_arg": "--quota",
"nargs": 0,
"help": "limits each database to a certain number"
" of files (8 default)"
},
{
"name": "quotaFiles",
"type" : "optional",
"cmd_arg": "--quotaFiles",
"nargs": 1,
"help": "number of files allower per db, requires --quota"
},
{
"name": "rest",
"type" : "optional",
"cmd_arg": "--rest",
"nargs": 1,
"help": "turn on simple rest api"
},
{
"name": "repair",
"type" : "optional",
"cmd_arg": "--repair",
"nargs": 0,
"help": "run repair on all dbs"
},
{
"name": "repairpath",
"type" : "optional",
"cmd_arg": "--repairpath",
"nargs": 1,
"help": "root directory for repair files - defaults "
"to dbpath"
},
{
"name": "slowms",
"type" : "optional",
"cmd_arg": "--slowms",
"nargs": 1,
"help": "value of slow for profile and console log"
},
{
"name": "smallfiles",
"type" : "optional",
"cmd_arg": "--smallfiles",
"nargs": 0,
"help": "use a smaller default file size"
},
{
"name": "syncdelay",
"type" : "optional",
"cmd_arg": "--syncdelay",
"nargs": 1,
"help": "seconds between disk syncs "
"(0=never, but not recommended)"
},
{
"name": "sysinfo",
"type" : "optional",
"cmd_arg": "--sysinfo",
"nargs": 0,
"help": "print some diagnostic system information"
},
{
"name": "upgrade",
"type" : "optional",
"cmd_arg": "--upgrade",
"nargs": 0,
"help": "upgrade db if needed"
},
{
"name": "fastsync",
"type" : "optional",
"cmd_arg": "--fastsync",
"nargs": 0,
"help": "indicate that this instance is starting from "
"a dbpath snapshot of the repl peer"
},
{
"name": "oplogSize",
"type" : "optional",
"cmd_arg": "--oplogSize",
"nargs": 1,
"help": "size limit (in MB) for op log"
},
{
"name": "master",
"type" : "optional",
"cmd_arg": "--master",
"nargs": 0,
"help": "master mode"
},
{
"name": "slave",
"type" : "optional",
"cmd_arg": "--slave",
"nargs": 0,
"help": "slave mode"
},
{
"name": "source",
"type" : "optional",
"cmd_arg": "--source",
"nargs": 1,
"help": "when slave: specify master as <server:port>"
},
{
"name": "only",
"type" : "optional",
"cmd_arg": "--only",
"nargs": 1,
"help": "when slave: specify a single database"
" to replicate"
},
{
"name": "slavedelay",
"type" : "optional",
"cmd_arg": "--slavedelay",
"nargs": 1,
"help": "specify delay (in seconds) to be used when "
"applying master ops to slave"
},
{
"name": "autoresync",
"type" : "optional",
"cmd_arg": "--autoresync",
"nargs": 0,
"help": "automatically resync if slave data is stale"
},
{
"name": "replSet",
"type" : "optional",
"cmd_arg": "--replSet",
"nargs": 1,
"help": "arg is <setname>[/<optionalseedhostlist>]"
},
{
"name": "configsvr",
"type" : "optional",
"cmd_arg": "--configsvr",
"nargs": 0,
"help": "declare this is a config db of a cluster;"
" default port 27019; default dir /data/configdb"
},
{
"name": "shardsvr",
"type" : "optional",
"cmd_arg": "--shardsvr",
"nargs": 0,
"help": "declare this is a shard db of a cluster;"
" default port 27018"
},
{
"name": "noMoveParanoia",
"type" : "optional",
"cmd_arg": "--noMoveParanoia",
"nargs": 0,
"help": "turn off paranoid saving of data for moveChunk."
" this is on by default for now,"
" but default will switch"
},
{
"name": "setParameter",
"type" : "optional",
"cmd_arg": "--setParameter",
"nargs": 1,
"help": "Set a configurable parameter"
}
]
},
#### stop ####
{
"prog": "stop",
"group": "serverCommands",
"shortDescription" : "stop a server",
"description" : "Stops a specific server.",
"function": "mongoctl.commands.server.stop.stop_command",
"args":[
{ "name": "server",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER_ID or PROCESS_ID",
"help": "A valid server id or process id"
},
{ "name": "forceStop",
"type": "optional",
"cmd_arg": ["-f", "--force"],
"nargs": 0,
"help": "force stop if needed via kill",
"default": False
},
{
"name": "assumeLocal",
"type" : "optional",
"cmd_arg": "--assume-local",
"nargs": 0,
"help": "Assumes that the server will be stopped on local"
" host. This will skip local address/dns check",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
#### restart ####
{
"prog": "restart",
"group": "serverCommands",
"shortDescription" : "restart a server",
"description" : "Restarts a specific server.",
"function": "mongoctl.commands.server.restart.restart_command",
"args":[
{ "name": "server",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER_ID",
"help": "A valid server id"
},
{
"name": "assumeLocal",
"type" : "optional",
"cmd_arg": "--assume-local",
"nargs": 0,
"help": "Assumes that the server will be stopped on local"
" host. This will skip local address/dns check",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
#### status ####
{
"prog": "status",
"group": "serverCommands",
"shortDescription" : "retrieve status of server or a cluster",
"description" : "Retrieves the status of a server or a cluster",
"function": "mongoctl.commands.common.status.status_command",
"args":[
{ "name": "id",
"type" : "positional",
"nargs": 1,
"displayName": "[SERVER OR CLUSTER ID]",
"help": "A valid server or cluster id"
},
{ "name": "statusVerbose",
"type" : "optional",
"cmd_arg": ["-v", "--verbose"],
"nargs": 0,
"help": "include more information in status"
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
#### list-servers ####
{
"prog": "list-servers",
"group": "serverCommands",
"shortDescription" : "show list of configured servers",
"description" : "Show list of configured servers.",
"function": "mongoctl.commands.server.list_servers.list_servers_command"
},
#### show-server ####
{
"prog": "show-server",
"group": "serverCommands",
"shortDescription" : "show server's configuration",
"description" : "Shows the configuration for a specific server.",
"function": "mongoctl.commands.server.show.show_server_command" ,
"args":[
{ "name": "server",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER_ID",
"help": "A valid server id"
}
]
},
#### connect ####
{
"prog": "connect",
"group": "clientCommands",
"shortDescription" : "open a mongo shell connection to a server",
"description" : "Opens a mongo shell connection to the specified database. If a\n"
"cluster is specified command will connect to the primary server.\n\n"
"<db-address> can be one of:\n"
" (a) a mongodb URI (e.g. mongodb://localhost:27017[/mydb])\n"
" (b) <server-id>[/<db>]\n"
" (c) <cluster-id>[/<db>]\n",
"function": "mongoctl.commands.common.connect.connect_command",
"args": [
{
"name": "dbAddress",
"type" : "positional",
"nargs": 1,
"displayName": "<db-address>",
"help": "database addresses supported by mongoctl."
" Check docs for more details."
},
{
"name": "jsFiles",
"type" : "positional",
"nargs": "*",
"displayName": "[file names (ending in .js)]",
"help": "file names: a list of files to run. files have to"
" end in .js and will exit after unless --shell"
" is specified"
},
{
"name": "username",
"type" : "optional",
"help": "username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "password",
"cmd_arg": [
"-p"
],
"nargs": "?"
},
{
"name": "shell",
"type" : "optional",
"help": "run the shell after executing files",
"cmd_arg": [
"--shell"
],
"nargs": 0
},
{
"name": "norc",
"type" : "optional",
"help": 'will not run the ".mongorc.js" file on start up',
"cmd_arg": [
"--norc"
],
"nargs": 0
},
{
"name": "quiet",
"type" : "optional",
"help": 'be less chatty',
"cmd_arg": [
"--quiet"
],
"nargs": 0
},
{
"name": "eval",
"type" : "optional",
"help": 'evaluate javascript',
"cmd_arg": [
"--eval"
],
"nargs": 1
},
{
"name": "verbose",
"type" : "optional",
"help": 'increase verbosity',
"cmd_arg": [
"--verbose"
],
"nargs": 0
},
{
"name": "ipv6",
"type" : "optional",
"help": 'enable IPv6 support (disabled by default)',
"cmd_arg": [
"--ipv6"
],
"nargs": 0
},
]
},
#### tail-log ####
{
"prog": "tail-log",
"group": "serverCommands",
"shortDescription" : "tails a server's log file",
"description" : "Tails server's log file. Works only on local host",
"function": "mongoctl.commands.server.tail_log.tail_log_command",
"args": [
{
"name": "server",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER_ID",
"help": "a valid server id"
},
{
"name": "assumeLocal",
"type" : "optional",
"cmd_arg": "--assume-local",
"nargs": 0,
"help": "Assumes that the server is running on local"
" host. This will skip local address/dns check",
"default": False
}
]
},
#### dump ####
{
"prog": "dump",
"group": "clientCommands",
"shortDescription" : "Export MongoDB data to BSON files (using mongodump)",
"description" : "Runs a mongodump to the specified database address or dbpath. If a\n"
"cluster is specified command will run the dump against "
"the primary server.\n\n"
"<db-address> can be one of:\n"
" (a) a mongodb URI (e.g. mongodb://localhost:27017[/mydb])\n"
" (b) <server-id>[/<db>]\n"
" (c) <cluster-id>[/<db>]\n",
"function": "mongoctl.commands.common.dump.dump_command",
"args": [
{
"name": "target",
"displayName": "TARGET",
"type" : "positional",
"nargs": 1,
"help": "database address or dbpath. Check docs for"
" more details."
},
{
"name": "useBestSecondary",
"type" : "optional",
"help": "Only for clusters. Dump from the best secondary "
"(passive / least repl lag)",
"cmd_arg": [
"--use-best-secondary"
],
"nargs": 0
},
# {
# "name": "maxReplLag",
# "type" : "optional",
# "help": "Used only with --use-best-secondary. Select "
# "members whose repl lag is less than than "
# "specified max ",
# "cmd_arg": [
# "--max-repl-lag"
# ],
# "nargs": 1
#},
{
"name": "username",
"type" : "optional",
"help": "username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "password",
"cmd_arg": [
"-p"
],
"nargs": "?"
},
{
"name": "verbose",
"type" : "optional",
"help": 'increase verbosity',
"cmd_arg": [
"-v",
"--verbose"
],
"nargs": 0
},
{
"name": "directoryperdb",
"type" : "optional",
"help": "if dbpath specified, each db is in a separate directory",
"cmd_arg": [
"--directoryperdb"
],
"nargs": 0
},
{
"name": "journal",
"type" : "optional",
"help": "enable journaling",
"cmd_arg": [
"--journal"
],
"nargs": 0
},
{
"name": "collection",
"type" : "optional",
"displayName": "COLLECTION",
"help": "collection to use (some commands)",
"cmd_arg": [
"-c",
"--collection"
],
"nargs": 1
},
{
"name": "out",
"type" : "optional",
"displayName": "DIR",
"help": "output directory or '-' for stdout",
"cmd_arg": [
"-o",
"--out"
],
"nargs": 1
},
{
"name": "query",
"type" : "optional",
"displayName": "QUERY",
"help": "json query",
"cmd_arg": [
"-q",
"--query"
],
"nargs": 1
},
{
"name": "oplog",
"type" : "optional",
"help": " Use oplog for point-in-time snapshotting",
"cmd_arg": [
"--oplog"
],
"nargs": 0
},
{
"name": "repair",
"type" : "optional",
"help": " try to recover a crashed database",
"cmd_arg": [
"--repair"
],
"nargs": 0
},
{
"name": "forceTableScan",
"type" : "optional",
"help": " force a table scan (do not use $snapshot)",
"cmd_arg": [
"--forceTableScan"
],
"nargs": 0
},
{
"name": "ipv6",
"type" : "optional",
"cmd_arg": "--ipv6",
"nargs": 0,
"help": "enable IPv6 support (disabled by default)"
},
{
"name": "authenticationDatabase",
"type" : "optional",
"cmd_arg": "--authenticationDatabase",
"nargs": 1,
"help": "user source (defaults to dbname). 2.4.x or greater only."
}
]
},
#### restore ####
{
"prog": "restore",
"group": "clientCommands",
"shortDescription" : "Restore MongoDB (using mongorestore)",
"description" : "Runs a mongorestore from specified file or directory"
" to database address or dbpath. If a\n"
"cluster is specified command will restore against "
"the primary server.\n\n"
"<db-address> can be one of:\n"
" (a) a mongodb URI (e.g. mongodb://localhost:27017[/mydb])\n"
" (b) <server-id>[/<db>]\n"
" (c) <cluster-id>[/<db>]\n",
"function": "mongoctl.commands.common.restore.restore_command",
"args": [
{
"name": "destination",
"displayName": "DESTINATION",
"type" : "positional",
"nargs": 1,
"help": "database address or dbpath. Check docs for"
" more details."
},
{
"name": "source",
"displayName": "SOURCE",
"type" : "positional",
"nargs": 1,
"help": "directory or filename to restore from"
},
{
"name": "username",
"type" : "optional",
"help": "username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "password",
"cmd_arg": [
"-p"
],
"nargs": "?"
},
{
"name": "verbose",
"type" : "optional",
"help": 'increase verbosity',
"cmd_arg": [
"-v",
"--verbose"
],
"nargs": 0
},
{
"name": "directoryperdb",
"type" : "optional",
"help": "if dbpath specified, each db is in a separate directory",
"cmd_arg": [
"--directoryperdb"
],
"nargs": 0
},
{
"name": "journal",
"type" : "optional",
"help": "enable journaling",
"cmd_arg": [
"--journal"
],
"nargs": 0
},
{
"name": "collection",
"type" : "optional",
"displayName": "COLLECTION",
"help": " collection to use (some commands)",
"cmd_arg": [
"-c",
"--collection"
],
"nargs": 1
},
{
"name": "objcheck",
"type" : "optional",
"help": "validate object before inserting",
"cmd_arg": [
"--objectcheck"
],
"nargs": 0
},
{
"name": "filter",
"type" : "optional",
"displayName": "FILTER",
"help": "filter to apply before inserting",
"cmd_arg": [
"--filter"
],
"nargs": 1
},
{
"name": "drop",
"type" : "optional",
"help": " drop each collection before import",
"cmd_arg": [
"--drop"
],
"nargs": 0
},
{
"name": "oplogReplay",
"type" : "optional",
"help": "replay oplog for point-in-time restore",
"cmd_arg": [
"--oplogReplay"
],
"nargs": 0
},
{
"name": "keepIndexVersion",
"type" : "optional",
"help": " don't upgrade indexes to newest version",
"cmd_arg": [
"--keepIndexVersion"
],
"nargs": 0
},
{
"name": "ipv6",
"type" : "optional",
"cmd_arg": "--ipv6",
"nargs": 0,
"help": "enable IPv6 support (disabled by default)"
},
{
"name": "authenticationDatabase",
"type" : "optional",
"cmd_arg": "--authenticationDatabase",
"nargs": 1,
"help": "user source (defaults to dbname). 2.4.x or greater only."
}
]
},
#### resync-secondary ####
{
"prog": "resync-secondary",
"group": "serverCommands",
"shortDescription" : "Resyncs a secondary member",
"description" : "Resyncs a secondary member",
"function": "mongoctl.commands.server.resync_secondary.resync_secondary_command",
"args": [
{
"name": "server",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER_ID",
"help": "a valid server id"
},
{
"name": "assumeLocal",
"type" : "optional",
"cmd_arg": "--assume-local",
"nargs": 0,
"help": "Assumes that the server is running on local"
" host. This will skip local address/dns check",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
{
"prog": "start-cluster",
"group": "clusterCommands",
"shortDescription": "Start specified cluster",
"description": "Start specified cluster",
"function": "mongoctl.commands.cluster.control.start_cluster_command",
"args": [
{
"name": "ClusterId",
"type" : "positional",
"nargs": 1,
"displayName": "CLUSTER_ID",
"help": "A valid cluster id"
}
]
},
{
"prog": "stop-cluster",
"group": "clusterCommands",
"shortDescription": "Stop specified cluster",
"description": "Stop specified cluster",
"function": "mongoctl.commands.cluster.control.stop_cluster_command",
"args": [
{
"name": "ClusterId",
"type" : "positional",
"nargs": 1,
"displayName": "CLUSTER_ID",
"help": "A valid cluster id"
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
#### configure-cluster ####
{
"prog": "configure-cluster",
"group": "clusterCommands",
"shortDescription" : "initiate or reconfigure a cluster",
"description" : "Initiaties or reconfigures a specific cluster. "
"This command is \nused both to initiate the "
"cluster for the first time \nand to reconfigure "
"the cluster.",
"function": "mongoctl.commands.cluster.configure.configure_cluster_command",
"args": [
{
"name": "cluster",
"type" : "positional",
"nargs": 1,
"displayName": "CLUSTER_ID",
"help": "A valid cluster id"
},
{
"name": "dryRun",
"type" : "optional",
"cmd_arg": ["-n" , "--dry-run"],
"nargs": 0,
"help": "prints configure cluster db command to execute "
"without executing it",
"default": False
},
{
"name": "forcePrimaryServer",
"type" : "optional",
"displayName": "SERVER",
"cmd_arg": [ "-f", "--force"],
"nargs": 1,
"help": "force member to become primary",
"default": None
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
#### list-clusters ####
{
"prog": "list-clusters",
"group": "clusterCommands",
"shortDescription" : "show list of configured clusters",
"description" : "Show list of configured servers",
"function": "mongoctl.commands.cluster.list_clusters.list_clusters_command"
},
#### show-cluster ####
{
"prog": "show-cluster",
"group": "clusterCommands",
"shortDescription" : "show cluster's configuration",
"description" : "Shows specific cluster's configuration",
"function": "mongoctl.commands.cluster.show.show_cluster_command",
"args": [
{
"name": "cluster",
"type" : "positional",
"nargs": 1,
"displayName": "CLUSTER_ID",
"help": "A valid cluster id"
}
]
},
#### install ####
# TODO: Remove and replace by install-mongodb
{
"prog": "install",
"hidden": True,
"group": "adminCommands",
"shortDescription" : "install MongoDB",
"description" : "install MongoDB",
"function": "mongoctl.commands.misc.install.install_command",
"args": [
{
"name": "version",
"type" : "positional",
"nargs": "?",
"displayName": "VERSION",
"help": "MongoDB version to install"
}
]
},
#### uninstall ####
# TODO: Remove and replace by uninstall-mongodb
{
"prog": "uninstall",
"hidden": True,
"group": "adminCommands",
"shortDescription" : "uninstall MongoDB",
"description" : "uninstall MongoDB",
"function": "mongoctl.commands.misc.install.uninstall_command",
"args": [
{
"name": "version",
"type" : "positional",
"nargs": 1,
"displayName": "VERSION",
"help": "MongoDB version to uninstall"
}
]
},
#### install-mongodb ####
{
"prog": "install-mongodb",
"group": "adminCommands",
"shortDescription" : "install MongoDB",
"description" : "install MongoDB",
"function": "mongoctl.commands.misc.install.install_command",
"args": [
{
"name": "version",
"type" : "positional",
"nargs": "?",
"displayName": "VERSION",
"help": "MongoDB version to install"
}
]
},
#### uninstall-mongodb ####
{
"prog": "uninstall-mongodb",
"group": "adminCommands",
"shortDescription" : "uninstall MongoDB",
"description" : "uninstall MongoDB",
"function": "mongoctl.commands.misc.install.uninstall_command",
"args": [
{
"name": "version",
"type" : "positional",
"nargs": 1,
"displayName": "VERSION",
"help": "MongoDB version to uninstall"
}
]
},
#### list-versions ####
{
"prog": "list-versions",
"group": "adminCommands",
"shortDescription" : "list all available MongoDB installations on"
" this machine",
"description" : "list all available MongoDB installations on"
" this machine",
"function": "mongoctl.commands.misc.install.list_versions_command",
},
#### print-uri ####
{
"prog": "print-uri",
"group": "miscCommands",
"shortDescription" : "prints connection URI for a"
" server or cluster",
"description" : "Prints MongoDB connection URI of the specified"
" server or clurter",
"function": "mongoctl.commands.misc.print_uri.print_uri_command",
"args": [
{
"name": "id",
"type" : "positional",
"nargs": 1,
"displayName": "SERVER or CLUSTER ID",
"help": "Server or cluster id"
},
{
"name": "db",
"type" : "optional",
"help": "database name",
"cmd_arg": [
"-d",
"--db"
],
"nargs": 1
}
]
},
{
"prog": "add-shard",
"group": "shardCommands",
"shortDescription" : "Adds specified shard to sharded cluster",
"description" : "Adds specified shard to sharded cluster",
"function": "mongoctl.commands.sharding.sharding.add_shard_command",
"args": [
{
"name": "shardId",
"type" : "positional",
"nargs": 1,
"displayName": "SHARD_ID",
"help": "A valid shard cluster id or shard server id"
},
{
"name": "dryRun",
"type" : "optional",
"cmd_arg": ["-n" , "--dry-run"],
"nargs": 0,
"help": "prints configure cluster db command to execute "
"without executing it",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
{
"prog": "remove-shard",
"group": "shardCommands",
"shortDescription": "Removes shard from sharded cluster",
"description": "Removes shard from sharded cluster",
"function": "mongoctl.commands.sharding.sharding.remove_shard_command",
"args": [
{
"name": "shardId",
"type" : "positional",
"nargs": 1,
"displayName": "SHARD_ID",
"help": "A valid shard cluster id or shard server id"
},
{
"name": "dryRun",
"type" : "optional",
"cmd_arg": ["-n" , "--dry-run"],
"nargs": 0,
"help": "prints db command to execute "
"without executing it",
"default": False
},
{
"name": "unshardedDataDestination",
"displayName": "SHARD_ID",
"type" : "optional",
"cmd_arg": ["--move-unsharded-data-to"],
"nargs": 1,
"help": "Moves unsharded to data to specified shard id",
"default": None
},
{
"name": "synchronized",
"type" : "optional",
"cmd_arg": ["--synchronized"],
"nargs": 0,
"help": "synchronized",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
},
#### configure-cluster ####
{
"prog": "configure-shard-cluster",
"group": "shardCommands",
"shortDescription" : "configures a sharded cluster",
"description" : "configures a sharded cluster",
"function": "mongoctl.commands.sharding.sharding.configure_shard_cluster_command",
"args": [
{
"name": "cluster",
"type" : "positional",
"nargs": 1,
"displayName": "CLUSTER_ID",
"help": "A valid cluster id"
},
{
"name": "dryRun",
"type" : "optional",
"cmd_arg": ["-n" , "--dry-run"],
"nargs": 0,
"help": "prints configure cluster db command to execute "
"without executing it",
"default": False
},
{
"name": "username",
"type" : "optional",
"help": "admin username",
"cmd_arg": [
"-u"
],
"nargs": 1
},
{
"name": "password",
"type" : "optional",
"help": "admin password",
"cmd_arg": [
"-p"
],
"nargs": "?"
}
]
}
]
}
|
mit
| -2,101,069,160,173,738,200 | 33.639429 | 108 | 0.322226 | false |
ajyoon/brown
|
brown/core/ped_and_star.py
|
1
|
1748
|
from brown.core.music_text import MusicText
from brown.core.object_group import ObjectGroup
from brown.core.spanner import Spanner
from brown.core.staff_object import StaffObject
from brown.utils.units import GraphicUnit
class PedAndStar(ObjectGroup, Spanner, StaffObject):
"""Pedal notation in the ornate 'Ped' and release star style."""
def __init__(self,
start, start_parent,
end, end_parent=None):
"""
Args:
start (Point or tuple init args): The position of the start-pedal
mark relative to start_parent.
start_parent (GraphicObject): An object either in a Staff or
a staff itself. This object will become the line's parent.
end (Point): The position of the release-pedal mark relative
to end_parent (if provided).
end_parent (GraphicObject): An object either in a Staff or
a staff itself. The root staff of this *must* be the same
as the root staff of `start_parent`. If omitted, the
stop point is relative to the start point.
"""
ObjectGroup.__init__(self, start, start_parent)
Spanner.__init__(self, end, end_parent)
StaffObject.__init__(self, self.parent)
# Add opening pedal mark
# (GraphicObject init handles registration with ObjectGroup)
self.depress_mark = MusicText((GraphicUnit(0), GraphicUnit(0)),
'keyboardPedalPed',
parent=self)
self.lift_mark = MusicText(self.end_pos,
'keyboardPedalUp',
parent=self.end_parent)
|
gpl-3.0
| -2,398,806,068,909,348,400 | 43.820513 | 77 | 0.586957 | false |
knowmetools/km-api
|
km_api/functional_tests/know_me/profile/profile_items/test_update_profile_item.py
|
1
|
2440
|
from rest_framework import status
def test_update_attach_media_resource(
api_client,
enable_premium_requirement,
media_resource_factory,
profile_item_factory,
user_factory,
):
"""
If a premium user has an existing profile item, they should be able
to attach a media resource to it.
Regression test for #317.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
item = profile_item_factory(topic__profile__km_user__user=user)
resource = media_resource_factory(km_user=user.km_user)
url = f"/know-me/profile/profile-items/{item.pk}/"
data = {"media_resource_id": resource.id}
response = api_client.patch(url, data)
assert response.status_code == status.HTTP_200_OK
assert response.json()["media_resource"]["id"] == resource.pk
def test_update_detatch_media_resource(
api_client,
enable_premium_requirement,
media_resource_factory,
profile_item_factory,
user_factory,
):
"""
Premium users should be able to detach media resources from their
profile items.
Regression test for #321
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
resource = media_resource_factory(km_user__user=user)
item = profile_item_factory(
media_resource=resource, topic__profile__km_user=resource.km_user
)
url = f"/know-me/profile/profile-items/{item.pk}/"
data = {"media_resource_id": ""}
response = api_client.patch(url, data)
assert response.status_code == status.HTTP_200_OK
assert response.json()["media_resource"] is None
def test_update_profile_item(
api_client, enable_premium_requirement, profile_item_factory, user_factory
):
"""
Premium users should be able to update profile items that they own.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
item = profile_item_factory(
topic__profile__km_user__user=user, name="Old Name"
)
url = f"/know-me/profile/profile-items/{item.pk}/"
data = {"name": "New Name"}
response = api_client.patch(url, data)
assert response.status_code == status.HTTP_200_OK
assert response.json()["name"] == data["name"]
|
apache-2.0
| 6,564,945,373,334,430,000 | 29.123457 | 78 | 0.67541 | false |
konrado0/vosqa
|
forum/models/tag.py
|
1
|
2474
|
import datetime
from base import *
from django.conf import settings as django_settings
from django.core.cache.backends.base import BaseCache
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.utils.http import urlquote
from forum import modules
class ActiveTagManager(CachedManager):
use_for_related_fields = True
def get_queryset(self):
return super(ActiveTagManager, self).get_queryset().exclude(used_count__lt=1)
class Tag(BaseModel):
name = models.CharField(max_length=255, unique=True)
created_by = models.ForeignKey(User, related_name='created_tags')
created_at = models.DateTimeField(default=datetime.datetime.now, blank=True, null=True)
marked_by = models.ManyToManyField(User, related_name="marked_tags", through="MarkedTag")
# Denormalised data
used_count = models.PositiveIntegerField(default=0)
active = ActiveTagManager()
class Meta:
ordering = ('-used_count', 'name')
app_label = 'forum'
def __unicode__(self):
return force_unicode(self.name)
def add_to_usage_count(self, value):
if self.used_count + value < 0:
self.used_count = 0
else:
self.used_count = models.F('used_count') + value
def cache_key(self):
return self._generate_cache_key(Tag.safe_cache_name(self.name))
@classmethod
def safe_cache_name(cls, name):
return "".join([str(ord(c)) for c in name])
@classmethod
def infer_cache_key(cls, querydict):
if 'name' in querydict:
cache_key = cls._generate_cache_key(cls.safe_cache_name(querydict['name']))
if len(cache_key) > django_settings.CACHE_MAX_KEY_LENGTH:
cache_key = cache_key[:django_settings.CACHE_MAX_KEY_LENGTH]
return cache_key
return None
@classmethod
def value_to_list_on_cache_query(cls):
return 'name'
@models.permalink
def get_absolute_url(self):
return ('tag_questions', (), {'tag': urlquote(self.name)})
class MarkedTag(models.Model):
TAG_MARK_REASONS = (('good', _('interesting')), ('bad', _('ignored')))
tag = models.ForeignKey(Tag, related_name='user_selections')
user = models.ForeignKey(User, related_name='tag_selections')
reason = models.CharField(max_length=16, choices=TAG_MARK_REASONS)
class Meta:
app_label = 'forum'
|
gpl-3.0
| -5,613,400,704,126,808,000 | 31.552632 | 99 | 0.658044 | false |
TheAlgorithms/Python
|
backtracking/coloring.py
|
1
|
3219
|
"""
Graph Coloring also called "m coloring problem"
consists of coloring given graph with at most m colors
such that no adjacent vertices are assigned same color
Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring
"""
from typing import List
def valid_coloring(
neighbours: List[int], colored_vertices: List[int], color: int
) -> bool:
"""
For each neighbour check if coloring constraint is satisfied
If any of the neighbours fail the constraint return False
If all neighbours validate constraint return True
>>> neighbours = [0,1,0,1,0]
>>> colored_vertices = [0, 2, 1, 2, 0]
>>> color = 1
>>> valid_coloring(neighbours, colored_vertices, color)
True
>>> color = 2
>>> valid_coloring(neighbours, colored_vertices, color)
False
"""
# Does any neighbour not satisfy the constraints
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(neighbours)
)
def util_color(
graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int
) -> bool:
"""
Pseudo-Code
Base Case:
1. Check if coloring is complete
1.1 If complete return True (meaning that we successfully colored graph)
Recursive Step:
2. Itterates over each color:
Check if current coloring is valid:
2.1. Color given vertex
2.2. Do recursive call check if this coloring leads to solving problem
2.4. if current coloring leads to solution return
2.5. Uncolor given vertex
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> colored_vertices = [0, 1, 0, 0, 0]
>>> index = 3
>>> util_color(graph, max_colors, colored_vertices, index)
True
>>> max_colors = 2
>>> util_color(graph, max_colors, colored_vertices, index)
False
"""
# Base Case
if index == len(graph):
return True
# Recursive Step
for i in range(max_colors):
if valid_coloring(graph[index], colored_vertices, i):
# Color current vertex
colored_vertices[index] = i
# Validate coloring
if util_color(graph, max_colors, colored_vertices, index + 1):
return True
# Backtrack
colored_vertices[index] = -1
return False
def color(graph: List[List[int]], max_colors: int) -> List[int]:
"""
Wrapper function to call subroutine called util_color
which will either return True or False.
If True is returned colored_vertices list is filled with correct colorings
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> color(graph, max_colors)
[0, 1, 0, 2, 0]
>>> max_colors = 2
>>> color(graph, max_colors)
[]
"""
colored_vertices = [-1] * len(graph)
if util_color(graph, max_colors, colored_vertices, 0):
return colored_vertices
return []
|
mit
| -3,990,282,754,374,691,000 | 27.236842 | 84 | 0.572538 | false |
vincent-noel/libSigNetSim
|
libsignetsim/model/sbml/HasUnits.py
|
1
|
2773
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from libsignetsim.model.sbml.UnitDefinition import UnitDefinition
from libsignetsim.settings.Settings import Settings
class HasUnits(object):
def __init__ (self, model):
self.__model = model
self.__unit = None
self.__builtinUnit = False
def readSbml(self, sbml_object, sbml_level=Settings.defaultSbmlLevel, sbml_version=Settings.defaultSbmlVersion):
""" Reads a parameter from a sbml file """
if sbml_object.isSetUnits():
if self.__model.listOfUnitDefinitions.containsSbmlId(sbml_object.getUnits()):
self.__unit = sbml_object.getUnits()
elif sbml_level < 3:
self.__builtinUnit = True
self.__unit = sbml_object.getUnits()
def writeSbml(self, sbml_object, sbml_level=Settings.defaultSbmlLevel, sbml_version=Settings.defaultSbmlVersion):
""" Writes a parameter to a sbml file """
if self.__unit is not None:
if self.__builtinUnit is False:
sbml_object.setUnits(self.__unit)
if self.__builtinUnit is True and sbml_level < 3:
sbml_object.setUnits(self.__unit)
def new(self, unit=None):
if unit is None:
self.setUnits(unit)
elif self.__model.getSubstanceUnits() == self.__model.getDefaultSubstanceUnits():
self.setDefaultSubstanceUnits()
def setUnits(self, unit, prefix=""):
if unit is not None:
self.__unit = prefix + unit.getSbmlId()
def getUnits(self):
if self.__unit is not None:
if self.__builtinUnit:
return self.__unit
else:
return self.__model.listOfUnitDefinitions.getBySbmlId(self.__unit)
else:
return None
def setUnitId(self, unit_id, prefix=""):
if unit_id is not None:
self.__unit = prefix + unit_id
def getUnitId(self):
return self.__unit
def hasUnits(self):
return self.__unit is not None
def setDefaultVolumeUnit(self):
self.__unit = "volume"
def copy(self, obj, usids_subs={}):
if obj.getUnitId() in list(usids_subs.keys()):
self.__unit = usids_subs[obj.getUnitId()]
else:
self.__unit = obj.getUnitId()
|
gpl-3.0
| -7,774,924,472,220,570,000 | 25.92233 | 114 | 0.707176 | false |
ColeFrench/python-analysis
|
CORGIS/analysis.py
|
1
|
2928
|
#!/usr/bin/env python3
import sys
from pprint import PrettyPrinter
from weather.data import weather
from weather.analysis import storage
from weather.analysis import visualization as vis
from geopy.geocoders import GoogleV3
raw_data = []
data = []
weather_objs = []
pprinter = PrettyPrinter()
geolocator = GoogleV3()
raw_data = weather.get_weather()
data = raw_data
weather_objs = [storage.Weather(datum) for datum in data]
def restrict_to_range(start=None, stop=None, step=None):
data[:] = raw_data[start:stop:step]
weather_objs[:] = [storage.Weather(datum) for datum in data]
return len(data)
def restrict_by_latitude(latitude, tolerance=0):
for weather_obj in weather_objs:
if weather_obj.get_location() == None:
weather_obj.set_location(geolocator)
weather_objs[:] = storage.get_by_coords(weather_objs, latitude, tolerance)
def restrict_by_longitude(longitude, tolerance=0):
for weather_obj in weather_objs:
if weather_obj.get_location() == None:
weather_obj.set_location(geolocator)
storage.get_by_coords(weather_objs, longitude=longitude,
longitude_tolerance=tolerance)
def restrict_by_location(latitude, longitude, latitude_tolerance=0, longitude_tolerance=0):
restrict_by_latitude(latitude, latitude_tolerance)
restrict_by_longitude(longitude, longitude_tolerance)
# for weather_obj in weather_objs:
# weather_obj.set_location(geolocator)
#
# storage.get_by_coords(weather_objs, latitude,
# latitude_tolerance, longitude, longitude_tolerance)
def print_data():
pprinter.pprint(data)
def plot_data(key, fit_line=True):
vis.plot_over_time(weather_objs, key, fit_line)
def main():
if len(sys.argv) < 2:
raise SyntaxError('too few program arguments')
# A dictionary of commands mapped to the additional arguments they require.
commands = {
'plot': 2,
'loc': 6,
'lat': 4,
'long': 4,
'struct': 1
}
command = sys.argv[1]
if command not in commands:
raise SyntaxError('invalid command')
if len(sys.argv) < commands[command] + 2:
raise SyntaxError('too few command arguments')
restrict_to_range(stop=int(sys.argv[2]))
if command == 'struct':
print_data()
elif command == 'plot':
plot_data(' '.join(sys.argv[3:]))
elif command == 'loc':
restrict_by_location(float(
sys.argv[-4]), float(sys.argv[-2]), float(sys.argv[-3]), float(sys.argv[-1]))
plot_data(' '.join(sys.argv[3:-4]))
else:
if command == 'lat':
restrict_by_latitude(float(sys.argv[-2]), float(sys.argv[-1]))
else: # if command == 'long'
restrict_by_longitude(float(sys.argv[-2]), float(sys.argv[-1]))
plot_data(' '.join(sys.argv[3:-2]))
if __name__ == '__main__':
main()
|
mit
| -7,566,473,320,993,610,000 | 26.111111 | 91 | 0.635929 | false |
state-hiu/ittc-server-django
|
ittc/logs.py
|
1
|
6788
|
import os
import sys
import httplib2
import base64
import math
import copy
import string
import datetime
import email.utils as eut
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.core.cache import cache, caches, get_cache
from django.http import Http404
from geojson import Polygon, Feature, FeatureCollection, GeometryCollection
from .stats import buildStats, incStats
import iso8601
import time
import glob
#from ittc.source.models import TileSource
from ittc.cache.tasks import taskIncStats
http_client = httplib2.Http()
def clearLogs():
# Import Gevent and monkey patch
from gevent import monkey
monkey.patch_all()
# Init Mongo Client
from pymongo import MongoClient
#client = MongoClient('localhost', 27017)
client = MongoClient('/tmp/mongodb-27017.sock')
db = client.ittc
# Clear Logs
db.drop_collection(settings.LOG_REQUEST_COLLECTION)
def reloadLogs():
# Import Gevent and monkey patch
from gevent import monkey
monkey.patch_all()
# Init Mongo Client
from pymongo import MongoClient
#client = MongoClient('localhost', 27017)
client = MongoClient('/tmp/mongodb-27017.sock')
db = client.ittc
# Clear Logs
db.drop_collection(settings.LOG_REQUEST_COLLECTION)
# Reload Logs
log_root = settings.LOG_REQUEST_ROOT
if log_root:
log_files = glob.glob(log_root+os.sep+"requests_tiles_*.tsv")
if log_files:
collection = db[settings.LOG_REQUEST_COLLECTION]
for log_file in log_files:
reloadLog(log_file,collection)
def reloadLog(path_file, collection):
if path_file:
if os.path.exists(path_file):
lines = None
with open(path_file,'r') as f:
lines = f.readlines()
if lines:
documents = []
for line in lines:
values = line.rstrip('\n').split("\t")
status = values[0]
tileorigin = values[1]
tilesource = values[2]
z = values[3]
x = values[4]
y = values[5]
ip = values[6]
#dt = datetime.datetime.strptime(values[6],'YYYY-MM-DDTHH:MM:SS.mmmmmm')
dt = iso8601.parse_date(values[7])
location = z+"/"+x+"/"+y
r = buildTileRequestDocument(tileorigin, tilesource, x, y, z, status, dt, ip)
documents.append(r)
#collection.insert_one(r)
#insert_many available in 3.0, which is still in Beta
#collection.insert_many(documents, ordered=False)
collection.insert(documents, continue_on_error=True)
def buildTileRequestDocument(tileorigin, tilesource, x, y, z, status, datetime, ip):
r = {
'ip': ip,
'origin': tileorigin if tileorigin else "",
'source': tilesource,
'location': z+'/'+x+'/'+y,
'z': z,
'status': status,
'year': datetime.strftime('%Y'),
'month': datetime.strftime('%Y-%m'),
'date': datetime.strftime('%Y-%m-%d'),
'date_iso': datetime.isoformat()
}
return r
def logTileRequest(tileorigin,tilesource, x, y, z, status, datetime, ip):
#starttime = time.clock()
#==#
log_root = settings.LOG_REQUEST_ROOT
#log_format = settings.LOG_REQUEST_FORMAT['tile_request']
log_format = settings.LOG_REQUEST_FORMAT
if log_root and log_format:
#if not os.path.exists(log_root):
# os.makedirs(log_root)
log_file = log_root+os.sep+"requests_tiles_"+datetime.strftime('%Y-%m-%d')+".tsv"
with open(log_file,'a') as f:
line = log_format.format(status=status,tileorigin=tileorigin.name,tilesource=tilesource.name,z=z,x=x,y=y,ip=ip,datetime=datetime.isoformat())
f.write(line+"\n")
# Import Gevent and monkey patch
from gevent import monkey
monkey.patch_all()
# Update MongoDB
from pymongo import MongoClient
client = None
db = None
r = None
try:
#client = MongoClient('localhost', 27017)
client = MongoClient('/tmp/mongodb-27017.sock')
db = client.ittc
r = buildTileRequestDocument(tileorigin.name,tilesource.name, x, y, z, status, datetime, ip)
except:
client = None
db = None
errorline = "Error: Could not connet to log database. Most likely issue with connection pool"
error_file = settings.LOG_ERRORS_ROOT+os.sep+"requests_tiles_"+datetime.strftime('%Y-%m-%d')+"_errors.txt"
with open(error_file,'a') as f:
f.write(errorline+"\n")
# Update Mongo Logs
if client and db and r:
try:
db[settings.LOG_REQUEST_COLLECTION].insert(r, w=0)
except:
errorline = "Error: Could not write log entry into database. Most likely socket issue. For the following: "+line
error_file = settings.LOG_ERRORS_ROOT+os.sep+"requests_tiles_"+datetime.strftime('%Y-%m-%d')+"_errors.txt"
with open(error_file,'a') as f:
f.write(errorline+"\n")
# Update Mongo Aggregate Stats
stats = buildStats(r)
# Sync stats
if settings.ASYNC_STATS:
try:
taskIncStats.apply_async(
args=[stats],
kwargs=None,
queue="statistics")
except:
errorline = "Error: Could not queue taskIncStats. Most likely issue with rabbitmq."
error_file = settings.LOG_ERRORS_ROOT+os.sep+"requests_tiles_"+datetime.strftime('%Y-%m-%d')+"_errors.txt"
with open(error_file,'a') as f:
f.write(errorline+"\n")
else:
incStats(db, stats)
#print "Time Elapsed: "+str(time.clock()-starttime)
def logTileRequestError(line, datetime):
log_root = settings.LOG_ERRORS_ROOT
if log_root:
#if not os.path.exists(log_root):
# os.makedirs(log_root)
error_file = log_root+os.sep+"requests_tiles_"+datetime.strftime('%Y-%m-%d')+"_errors.txt"
with open(error_file,'a') as f:
f.write(line+"\n")
|
mit
| -7,324,074,851,903,065,000 | 34.726316 | 153 | 0.568503 | false |
fedora-infra/the-new-hotness
|
hotness/hotness_consumer.py
|
1
|
23833
|
# -*- coding: utf-8 -*-
#
# Copyright © 2021 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
import logging
import requests
from requests.packages.urllib3.util import retry
from anitya_schema.project_messages import ProjectVersionUpdated
from hotness.config import config
from hotness.domain import Package
from hotness.builders import Koji
from hotness.databases import Cache
from hotness.notifiers import Bugzilla as bz_notifier, FedoraMessaging
from hotness.patchers import Bugzilla as bz_patcher
from hotness.validators import MDApi, Pagure, PDC
from hotness.requests import (
BuildRequest,
InsertDataRequest,
NotifyRequest,
PackageRequest,
RetrieveDataRequest,
SubmitPatchRequest,
)
from hotness.use_cases import (
InsertDataUseCase,
NotifyUserUseCase,
PackageScratchBuildUseCase,
PackageCheckUseCase,
RetrieveDataUseCase,
SubmitPatchUseCase,
)
_logger = logging.getLogger(__name__)
# Prefix used for the topic of published messages
PREFIX = "hotness"
class HotnessConsumer(object):
"""
A fedora-messaging consumer that is the heart of the-new-hotness.
This consumer subscribes to the following topics:
* 'org.fedoraproject.prod.buildsys.task.state.change'
handled by :method:`BugzillaTicketFiler.handle_buildsys_scratch`
* 'org.release-monitoring.prod.anitya.project.version.update'
handled by :method:`BugzillaTicketFiler.handle_anitya_version_update`
* 'org.release-monitoring.prod.anitya.project.map.new'
handled by :method:`BugzillaTicketFiler.handle_anitya_map_new`
Attributes:
short_desc_template (str): Short description template for notifier
description_template (str): Template for the message content
distro (str): Distro to watch the updates for
builder_koji (`Koji`): Koji builder to use for scratch builds
database_cache (`Cache`): Database that will be used for holding key/value
for build id/bug id
notifier_bugzilla (`bz_notifier`): Bugzilla notifier for creating and updating
tickets in Bugzilla
notifier_fedora_messaging (`FedoraMessaging`): Fedora messaging notifier to send
fedora messages to broker
patcher_bugzilla (`bz_patcher`): Bugzilla patcher for attaching patcher to tickets
in Bugzilla
validator_mdapi (`MDApi`): MDApi validator to retrieve the metadata for package
validator_pagure (`Pagure`): Pagure dist git for retrieval of notification settings
validator_pdc (`PDC`): PDC validator to check if package is retired
"""
def __init__(self):
"""
Consumer initialization.
It loads the configuration and then initializes all external systems
use cases will call.
"""
# Prepare requests session
requests_session = requests.Session()
timeout = (
config["connect_timeout"],
config["read_timeout"],
)
retries = config["requests_retries"]
retry_conf = retry.Retry(
total=retries, connect=retries, read=retries, backoff_factor=1
)
retry_conf.BACKOFF_MAX = 5
requests_session.mount(
"http://", requests.adapters.HTTPAdapter(max_retries=retry_conf)
)
requests_session.mount(
"https://", requests.adapters.HTTPAdapter(max_retries=retry_conf)
)
# Initialize attributes
self.short_desc_template = config["bugzilla"]["short_desc_template"]
self.description_template = config["bugzilla"]["description_template"]
self.explanation_url = config["bugzilla"]["explanation_url"]
self.distro = config["distro"]
self.repoid = config["repoid"]
self.builder_koji = Koji(
server_url=config["koji"]["server"],
web_url=config["koji"]["weburl"],
kerberos_args={
"krb_principal": config["koji"]["krb_principal"],
"krb_keytab": config["koji"]["krb_keytab"],
"krb_ccache": config["koji"]["krb_ccache"],
"krb_proxyuser": config["koji"]["krb_proxyuser"],
"krb_sessionopts": config["koji"]["krb_sessionopts"],
},
git_url=config["koji"]["git_url"],
user_email=tuple(config["koji"]["user_email"]),
opts=config["koji"]["opts"],
priority=config["koji"]["priority"],
target_tag=config["koji"]["target_tag"],
)
self.database_cache = Cache()
self.notifier_bugzilla = bz_notifier(
server_url=config["bugzilla"]["url"],
reporter=config["bugzilla"]["reporter"],
username=config["bugzilla"]["user"],
password=config["bugzilla"]["password"],
api_key=config["bugzilla"]["api_key"],
product=config["bugzilla"]["product"],
keywords=config["bugzilla"]["keywords"],
version=config["bugzilla"]["version"],
status=config["bugzilla"]["bug_status"],
)
self.notifier_fedora_messaging = FedoraMessaging(prefix=PREFIX)
self.patcher_bugzilla = bz_patcher(
server_url=config["bugzilla"]["url"],
username=config["bugzilla"]["user"],
password=config["bugzilla"]["password"],
api_key=config["bugzilla"]["api_key"],
)
self.validator_mdapi = MDApi(
url=config["mdapi_url"], requests_session=requests_session, timeout=timeout
)
self.validator_pagure = Pagure(
url=config["dist_git_url"],
requests_session=requests_session,
timeout=timeout,
)
self.validator_pdc = PDC(
url=config["pdc_url"],
requests_session=requests_session,
timeout=timeout,
branch=config["repoid"],
package_type="rpm",
)
def __call__(self, msg: "fedora_messaging.message.Message") -> None: # noqa: F821
"""
Called when a message is received from RabbitMQ queue.
Params:
msg: The message we received from the queue.
"""
topic, body, msg_id = msg.topic, msg.body, msg.id
_logger.debug("Received %r" % msg_id)
if topic.endswith("anitya.project.version.update"):
message = ProjectVersionUpdated(topic=topic, body=body)
self._handle_anitya_version_update(message)
elif topic.endswith("buildsys.task.state.change"):
self._handle_buildsys_scratch(msg)
def _handle_buildsys_scratch(
self, message: "fedora_messaging.message.Message" # noqa: F821
) -> None:
"""
Message handler for build messages.
This handler checks if we have build in the database, checks if the build
is in completed state and follow up comment is added to bugzilla issues.
Topic: `org.fedoraproject.prod.buildsys.task.state.change`
Params:
message: Message to process
"""
msg_id, body = message.id, message.body
instance = body["instance"]
if instance != "primary":
_logger.debug("Ignoring secondary arch task...")
return
method = body["method"]
if method != "build":
_logger.debug("Ignoring non-build task...")
return
task_id = body["info"]["id"]
# Retrieve the build_id with bz_id from cache
retrieve_data_request = RetrieveDataRequest(key=str(task_id))
retrieve_data_cache_use_case = RetrieveDataUseCase(self.database_cache)
response = retrieve_data_cache_use_case.retrieve(retrieve_data_request)
if not response:
_logger.error(
"Couldn't retrieve value for build %s from cache." % str(task_id)
)
return
if not response.value["value"]:
_logger.debug(
"ignoring [%s] as it's not one of our outstanding "
"builds" % str(task_id)
)
return
bz_id = response.value["value"]
_logger.info("Handling koji scratch msg %r" % msg_id)
# see koji.TASK_STATES for all values
done_states = {
"CLOSED": "completed",
"FAILED": "failed",
"CANCELED": "canceled",
}
state = body["new"]
if state not in done_states:
_logger.info("The build is not in done state. Dropping message.")
return
link = f"http://koji.fedoraproject.org/koji/taskinfo?taskID={task_id}"
# One last little switch-a-roo for stg
if ".stg." in message.topic:
link = f"http://koji.stg.fedoraproject.org/koji/taskinfo?taskID={task_id}"
owner = body["owner"]
srpm = body["srpm"]
target = ""
if body.get("info", {}).get("request"):
targets = set()
for item in body["info"]["request"]:
if not isinstance(item, (dict, list)) and not item.endswith(".rpm"):
targets.add(item)
if targets:
target = " for %s" % (self._list_to_series(targets))
texts_for_state = {
"FAILED": f"{owner}'s scratch build of {srpm}{target} failed",
"CLOSED": f"{owner}'s scratch build of {srpm}{target} completed",
"CANCELED": f"{owner}'s scratch build of {srpm}{target} was canceled",
}
text = texts_for_state[state]
description = text + " " + link
package_name = "-".join(srpm.split("-")[:-2])
package_version = srpm.split("-")[-2]
package = Package(
name=package_name, version=package_version, distro=self.distro
)
notify_request = NotifyRequest(
package=package, message=description, opts={"bz_id": int(bz_id)}
)
notifier_bugzilla_use_case = NotifyUserUseCase(self.notifier_bugzilla)
notifier_bugzilla_use_case.notify(notify_request)
def _list_to_series(
self, items: list, N: int = 3, oxford_comma: bool = True
) -> str:
"""Convert a list of things into a comma-separated string.
>>> list_to_series(['a', 'b', 'c', 'd'])
'a, b, and 2 others'
>>> list_to_series(['a', 'b', 'c', 'd'], N=4, oxford_comma=False)
'a, b, c and d'
Params:
items: List of strings to concatenate
N: Number of items to show in concatenated list
oxford_comma: Flag for setting if comma should be added before 'and'
Returns:
Concatenated string of items separated by comma
"""
# uniqify items + sort them to have predictable (==testable) ordering
items = list(sorted(set(items)))
if len(items) == 1:
return items[0]
if len(items) > N:
items[N - 1 :] = ["%i others" % (len(items) - N + 1)]
first = ", ".join(items[:-1])
conjunction = " and "
if oxford_comma and len(items) > 2:
conjunction = "," + conjunction
return first + conjunction + items[-1]
def _handle_anitya_version_update(self, message: ProjectVersionUpdated) -> None:
"""
Message handler for new versions found by Anitya.
This handler deals with new versions found by Anitya. A new upstream
release can map to several downstream packages, so each package in
Rawhide (if any) are checked against the newly released version. If
they are older than the new version, a bug is filed.
Topic: `org.release-monitoring.prod.anitya.project.version.update`
Publishes to `update.drop` if:
* there is no mapping to Fedora
* any validation fails
* bug is already in Bugzilla
Publishes to `update.bug.file` if the bug is filled.
Params:
message: Message to process.
"""
_logger.info("Handling anitya msg %r" % message.id)
package = None
fedora_messaging_use_case = NotifyUserUseCase(self.notifier_fedora_messaging)
# No mapping for the distribution we want to watch, just sent the message and
# be done with it
if self.distro not in message.distros:
_logger.info(
"No %r mapping for %r. Dropping." % (self.distro, message.project_name)
)
package = Package(
name=message.project_name, version=message.version, distro=""
)
opts = {
"body": {
"trigger": {"msg": message.body, "topic": message.topic},
"reason": "anitya",
}
}
notify_request = NotifyRequest(
package=package, message="update.drop", opts=opts
)
fedora_messaging_use_case.notify(notify_request)
for mapping in message.mappings:
if mapping["distro"] == self.distro:
package = Package(
name=mapping["package_name"],
version=message.version,
distro=self.distro,
)
validation_output = self._validate_package(package)
# Check if validation failed
if validation_output["reason"]:
opts = {
"body": {
"trigger": {"msg": message.body, "topic": message.topic},
"reason": validation_output["reason"],
}
}
notify_request = NotifyRequest(
package=package, message="update.drop", opts=opts
)
fedora_messaging_use_case.notify(notify_request)
return
scratch_build = validation_output["scratch_build"]
current_version = validation_output["version"]
current_release = validation_output["release"]
# Comment on bugzilla
bz_id = self._comment_on_bugzilla_with_template(
package=package,
current_version=current_version,
current_release=current_release,
project_homepage=message.project_homepage,
project_id=message.project_id,
)
# Failure happened when communicating with bugzilla
if bz_id == -1:
opts = {
"body": {
"trigger": {"msg": message.body, "topic": message.topic},
"reason": "bugzilla",
}
}
notify_request = NotifyRequest(
package=package, message="update.drop", opts=opts
)
fedora_messaging_use_case.notify(notify_request)
return
# Send Fedora messaging notification
opts = {
"body": {
"trigger": {"msg": message.body, "topic": message.topic},
"bug": {"bug_id": bz_id},
"package": package.name,
}
}
notify_request = NotifyRequest(
package=package, message="update.bug.file", opts=opts
)
fedora_messaging_use_case.notify(notify_request)
# Do a scratch build
if scratch_build:
self._handle_scratch_build(package, bz_id)
def _validate_package(self, package: Package) -> dict:
"""
Validates the package with every external validator.
Used validators:
* Pagure (dist-git): To retrieve monitoring settings
* PDC: To check if package is retired or not
* MDAPI: To check if the package is newer
Params:
package: Package to validate
Returns:
Dictionary containing output from the validators.
Example:
{
# Monitoring setting for scratch build
"scratch_build": True,
# Current version in MDAPI
"version": "1.0.0",
# Current release in MDAPI
"release": 1,
# Reason for validation failure, empty if no validation successful
"reason": ""
}
"""
output = {"scratch_build": False, "version": "", "release": 0, "reason": ""}
# Check if we are monitoring the package
validate_request = PackageRequest(package)
validate_pagure_use_case = PackageCheckUseCase(self.validator_pagure)
response = validate_pagure_use_case.validate(validate_request)
# We encountered an issue during retrieving of monitoring settings
if not response:
_logger.error(
"Couldn't retrieve monitoring settings for %r. Dropping." % package.name
)
output["reason"] = "dist-git"
return output
# Maintainer doesn't want to monitor the package
if not response.value["monitoring"]:
_logger.info("Repo says not to monitor %r. Dropping." % package.name)
output["reason"] = "monitoring settings"
return output
output["scratch_build"] = response.value["scratch_build"]
# Check if the package is retired in PDC
validate_pdc_use_case = PackageCheckUseCase(self.validator_pdc)
response = validate_pdc_use_case.validate(validate_request)
# We encountered an issue with PDC
if not response:
_logger.error(
"Couldn't retrieve retired information for %r. Dropping." % package.name
)
output["reason"] = "pdc"
return output
# Package is retired
if response.value["retired"]:
_logger.info("Package %r is retired. Dropping." % package.name)
output["reason"] = "retired"
return output
# Check if the version is newer
validate_mdapi_use_case = PackageCheckUseCase(self.validator_mdapi)
response = validate_mdapi_use_case.validate(validate_request)
# We encountered an issue with MDAPI
if not response:
_logger.error("Couldn't retrieve metadata for %r. Dropping." % package.name)
output["reason"] = "mdapi"
return output
# Version in upstream is not newer
if not response.value["newer"]:
_logger.info(
"Message doesn't contain newer version of %r. Dropping." % package.name
)
output["reason"] = "not newer"
return output
output["version"] = response.value["version"]
output["release"] = response.value["release"]
return output
def _comment_on_bugzilla_with_template(
self,
package: Package,
current_version: str,
current_release: int,
project_homepage: str,
project_id: int,
) -> int:
"""
Comment on bugzilla bug using the configured template.
Params:
package: Package to comment on.
current_version: Current version of package in distro
current_release: Current release of package in distro
project_homepage: Upstream homepage
project_id: Project id in Anitya
Returns:
Bugzilla ticket id. -1 if failure was encountered.
"""
bz_id = -1
# Prepare message for bugzilla
description = self.description_template % dict(
latest_upstream=package.version,
repo_name=self.repoid,
repo_version=current_version,
repo_release=current_release,
url=project_homepage,
explanation_url=self.explanation_url,
projectid=project_id,
)
notify_request = NotifyRequest(
package=package,
message=description,
opts={
"bz_short_desc": self.short_desc_template
% dict(name=package.name, latest_upstream=package.version)
},
)
notifier_bugzilla_use_case = NotifyUserUseCase(self.notifier_bugzilla)
response = notifier_bugzilla_use_case.notify(notify_request)
if not response:
return bz_id
bz_id = response.value["bz_id"]
return bz_id
def _handle_scratch_build(self, package: Package, bz_id: int) -> None:
"""
Start scratch build in builder, insert build_id to database
and attach patch to bugzilla bug.
Params:
package: Package to start scratch build for
bz_id: Bugzilla bug id to reference in build
"""
build_request = BuildRequest(package=package, opts={"bz_id": bz_id})
build_koji_use_case = PackageScratchBuildUseCase(self.builder_koji)
response = build_koji_use_case.build(build_request)
if not response:
notify_request = NotifyRequest(
package=package,
message="Build failed:\n{}".format(response.value["message"]),
opts={"bz_id": bz_id},
)
notifier_bugzilla_use_case = NotifyUserUseCase(self.notifier_bugzilla)
notifier_bugzilla_use_case.notify(notify_request)
return
build_id = response.value["build_id"]
patch = response.value["patch"]
patch_filename = response.value["patch_filename"]
message = response.value["message"]
if message:
notify_request = NotifyRequest(
package=package,
message=message,
opts={"bz_id": bz_id},
)
notifier_bugzilla_use_case = NotifyUserUseCase(self.notifier_bugzilla)
notifier_bugzilla_use_case.notify(notify_request)
# Save the build_id with bz_id to cache
insert_data_request = InsertDataRequest(key=str(build_id), value=str(bz_id))
insert_data_cache_use_case = InsertDataUseCase(self.database_cache)
response = insert_data_cache_use_case.insert(insert_data_request)
# Attach patch to Bugzilla
submit_patch_request = SubmitPatchRequest(
package=package,
patch=patch,
opts={"bz_id": bz_id, "patch_filename": patch_filename},
)
submit_patch_bugzilla_use_case = SubmitPatchUseCase(self.patcher_bugzilla)
response = submit_patch_bugzilla_use_case.submit_patch(submit_patch_request)
|
lgpl-2.1
| 2,312,930,478,106,928,000 | 37.1312 | 91 | 0.574899 | false |
stuartsale/marg_iso
|
marg_iso/cluster.py
|
1
|
1589
|
import numpy
class posterior_cluster:
""" A class to store clusters in posterior space
"""
def __init__(self, data, probs):
""" __init__(data, probs)
Initialise a cluster in posterior space.
Parameters
----------
data : ndarray(float)
The coordinates of the data points associated
with the cluster
probs : ndarray(float)
The probabilities of each of the data points
"""
self.data = data
self.probs = probs
self.set_weight()
def __len__(self):
""" __len__()
Gives the number of points in the cluster
Returns
-------
The number of points in the cluster
"""
return self.data.shape[0]
def set_weight(self, weight=None):
""" set_weight(weight=None)
Sets the probability weight of the cluster. If no
weight is provided, the weight is set to the mean
of the probabilities of each point in the cluster
multiplied by the standard deviation of the cluster
member positions (with a floor).
Parameters
----------
weight : float
The probaility weight of the cluster
"""
if weight:
self.weight = weight
else:
self.weight = (np.mean(np.exp(self.probs))
* max(np.std(self.data[:, 1]), 0.01)
* max(np.std(self.data[:, 2]), 0.01))
|
bsd-3-clause
| 2,003,826,637,186,097,700 | 26.396552 | 64 | 0.500315 | false |
osantana/pactum
|
tests/test_exporters/test_openapi.py
|
1
|
10422
|
import pytest
from copy import copy
from pactum import Action, API, fields
from pactum import ListResource, Querystring
from pactum import Resource, Response, Request, Route
from pactum import Version, verbs
from pactum.exporters.openapi import NotSpecified, OpenAPIV3Exporter
def test_openapi_exporter_initialization():
exporter = OpenAPIV3Exporter()
assert exporter.result['openapi'] == OpenAPIV3Exporter.OPENAPI_VERSION
assert exporter.result['servers'] == []
assert exporter.result['info'] == {}
assert exporter.result['paths'] == {}
assert exporter.result['components'] == {'schemas': {}}
assert exporter.result['security'] == {}
assert exporter.result['tags'] == {}
def test_visit_api_for_api_with_versions_overrides_versions():
exporter = OpenAPIV3Exporter()
v1 = Version(name='v1', routes=[])
v2 = Version(name='v2', routes=[])
api = API(
name='Test API', versions=[v1, v2],
description='API for tests.'
)
exporter.visit_api(api)
assert exporter.result['info']['title'] == 'Test API'
assert exporter.result['info']['description'] == 'API for tests.'
assert exporter.result['info']['termsOfService'] == ''
assert exporter.result['info']['contact'] == {}
assert exporter.result['info']['license'] == {}
assert exporter.result['info']['version'] == 'v2'
assert api.versions == [v2]
def test_visit_version_does_nothing_to_openapi_spec():
exporter = OpenAPIV3Exporter()
result = copy(exporter.result)
version = Version(name='v1', routes=[])
exporter.visit_version(version)
assert exporter.result == result
def test_visit_route_sets_specs_paths():
exporter = OpenAPIV3Exporter()
route = Route(path='/test-path/', description='Route for tests.')
exporter.visit_route(route)
paths = exporter.result['paths']
assert '/test-path/' in paths
assert paths['/test-path/']['summary'] == ''
assert paths['/test-path/']['description'] == 'Route for tests.'
assert paths['/test-path/']['servers'] == []
def test_visit_action_populates_paths_verbs():
exporter = OpenAPIV3Exporter()
route = Route(path='/test-path/', description='Route for tests.')
request = Request(verb=verbs.GET)
action = Action(request=request, responses=[], description='Testing action')
action.parent = route
exporter.result['paths'] = {'/test-path/': {}}
exporter.visit_action(action)
assert 'get' in exporter.result['paths']['/test-path/']
parsed_action = exporter.result['paths']['/test-path/']['get']
assert parsed_action['description'] == 'Testing action'
assert parsed_action['summary'] == 'Testing action'
assert parsed_action['operationId'] == 'TestingAction'
assert parsed_action['deprecated'] is False
assert parsed_action['tags'] == []
assert parsed_action['externalDocs'] == []
assert parsed_action['parameters'] == []
assert parsed_action['responses'] == {}
assert parsed_action['callbacks'] == []
assert parsed_action['security'] == {}
assert parsed_action['servers'] == {}
def test_visit_action_populates_paths_verbs_with_parameters():
exporter = OpenAPIV3Exporter()
route = Route(path='/test-path/{code}', description='Route for tests.')
request = Request(verb=verbs.GET)
action = Action(request=request, responses=[], description='Testing action')
action.parent = route
exporter.result['paths'] = {'/test-path/{code}': {}}
exporter.visit_action(action)
assert 'get' in exporter.result['paths']['/test-path/{code}']
parsed_action = exporter.result['paths']['/test-path/{code}']['get']
assert parsed_action['description'] == 'Testing action'
assert parsed_action['summary'] == 'Testing action'
assert parsed_action['operationId'] == 'TestingAction'
assert parsed_action['deprecated'] is False
assert parsed_action['tags'] == []
assert parsed_action['externalDocs'] == []
assert len(parsed_action['parameters']) == 1
assert parsed_action['parameters'][0] == {
'name': 'code',
'in': 'path',
'required': True
}
assert parsed_action['responses'] == {}
assert parsed_action['callbacks'] == []
assert parsed_action['security'] == {}
assert parsed_action['servers'] == {}
def test_visit_action_populates_queries_with_route_qs():
exporter = OpenAPIV3Exporter()
querystring = Querystring(name='limit', type=fields.IntegerField)
route = Route(path='/test-path', querystrings=[querystring])
request = Request(verb=verbs.GET)
action = Action(request=request, responses=[], description='Testing action')
action.parent = route
exporter.result['paths'] = {'/test-path': {}}
exporter.visit_action(action)
assert 'get' in exporter.result['paths']['/test-path']
parsed_action = exporter.result['paths']['/test-path']['get']
assert len(parsed_action['parameters']) == 1
assert parsed_action['parameters'][0] == {
'name': 'limit',
'in': 'query',
'required': False,
'schema': {'type': 'integer'},
'description': '',
}
def test_visit_request_populates_requestBody_with_payload_reference(resource):
exporter = OpenAPIV3Exporter()
route = Route(path='/test-path/')
request = Request(verb=verbs.GET, payload=resource)
action = Action(request=request, responses=[])
action.parent = route
exporter.result['paths'] = {
'/test-path/': {'get': {}}
}
exporter.visit_request(request)
parsed_action = exporter.result['paths']['/test-path/']['get']
assert 'schema' in parsed_action['requestBody']
assert parsed_action['requestBody']['schema']['$ref'] == '#/components/schemas/Resource'
def test_visit_response_appends_response_objects_to_path(resource):
exporter = OpenAPIV3Exporter()
route = Route(path='/test-path/')
request = Request(verb=verbs.GET)
response = Response(
status=200, description='Response for testing',
headers=[('content-type', 'application/json')],
body=resource
)
action = Action(
request=request, responses=[response]
)
action.parent = route
exporter.result['paths'] = {
'/test-path/': {'get': {'responses': {}}}
}
exporter.visit_response(response)
parsed_responses = exporter.result['paths']['/test-path/']['get']['responses']
assert '200' in parsed_responses
expected_schema = {'schema': {'$ref': '#/components/schemas/Resource'}}
assert parsed_responses['200']['content']['application/json'] == expected_schema
assert parsed_responses['200']['headers'] == {}
def test_visit_resource_populates_schemas_component(resource):
exporter = OpenAPIV3Exporter()
exporter.visit_resource(resource)
assert 'Resource' in exporter.result['components']['schemas']
resource = exporter.result['components']['schemas']['Resource']
assert resource['type'] == 'object'
assert resource['required'] == []
assert resource['properties'] == {}
def test_visit_resource_populates_required_fields():
class TestResource(Resource):
fields = [fields.IntegerField(name='code', required=True)]
exporter = OpenAPIV3Exporter()
exporter.visit_resource(TestResource())
assert 'TestResource' in exporter.result['components']['schemas']
schema = exporter.result['components']['schemas']['TestResource']
assert schema['type'] == 'object'
assert schema['required'] == ['code']
assert schema['properties'] == {}
def test_visit_list_resource_populates_schemas_with_array_and_ref():
class TestResource(Resource):
pass
class TestListResource(ListResource):
resource = TestResource()
exporter = OpenAPIV3Exporter()
exporter.visit_list_resource(TestListResource())
assert 'TestListResource' in exporter.result['components']['schemas']
schema = exporter.result['components']['schemas']['TestListResource']
assert schema['type'] == 'array'
assert schema['items'] == {'$ref': '#/components/schemas/TestResource'}
def test_visit_field_populates_component_schema_with_field_type():
code_field = fields.IntegerField(name='code', required=True)
class TestResource(Resource):
fields = [code_field]
TestResource()
exporter = OpenAPIV3Exporter()
exporter.result['components']['schemas'] = {
'TestResource': {'properties': {}}
}
exporter.visit_field(code_field)
properties = exporter.result['components']['schemas']['TestResource']['properties']
assert 'code' in properties
assert properties['code']['type'] == 'integer'
def test_resource_field_visit_populated_with_resource_reference():
class OtherResource(Resource):
pass
resource_field = fields.ResourceField(name='other_resource', resource=OtherResource())
class TestResource(Resource):
fields = [resource_field]
TestResource()
exporter = OpenAPIV3Exporter()
exporter.result['components']['schemas'] = {
'TestResource': {'properties': {}}
}
exporter.visit_field(resource_field)
properties = exporter.result['components']['schemas']['TestResource']['properties']
assert 'other_resource' in properties
assert properties['other_resource'] == {'$ref': '#/components/schemas/OtherResource'}
def test_custom_field_with_extension():
class CustomField(fields.Field):
extensions = {'openapi.type': 'custom'}
custom_field = CustomField(name='test_name')
class TestResource(Resource):
fields = [custom_field]
TestResource()
exporter = OpenAPIV3Exporter()
exporter.result['components']['schemas'] = {
'TestResource': {'properties': {}}
}
exporter.visit_field(custom_field)
properties = exporter.result['components']['schemas']['TestResource']['properties']
assert 'test_name' in properties
assert properties['test_name']['type'] == 'custom'
def test_visit_for_custom_field_without_extension_raises_error():
class CustomField(fields.Field):
pass
custom_field = CustomField()
class TestResource(Resource):
fields = [custom_field]
TestResource()
exporter = OpenAPIV3Exporter()
exporter.result['components']['schemas'] = {
'TestResource': {'properties': {}}
}
with pytest.raises(NotSpecified):
exporter.visit_field(custom_field)
|
gpl-3.0
| 3,091,503,506,802,026,000 | 32.191083 | 92 | 0.659182 | false |
heromod/migrid
|
mig/shared/functionality/delres.py
|
1
|
6690
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# delres - Deletes a resource
# Copyright (C) 2003-2010 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Deletion of a resource"""
import os
import fcntl
import shared.returnvalues as returnvalues
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables, find_entry
from shared.resource import resource_owners
from shared.vgridaccess import unmap_resource
def signature():
"""Signature of the main function"""
defaults = {'unique_resource_name': REJECT_UNSET}
return ['resource_info', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
resource_list = accepted['unique_resource_name']
resource_id = resource_list.pop()
res_dir = os.path.join(configuration.resource_home, resource_id)
# Prevent unauthorized access
(owner_status, owner_list) = resource_owners(configuration, resource_id)
if not owner_status:
output_objects.append(
{'object_type': 'error_text', 'text'
: "Could not look up '%s' owners - no such resource?" % resource_id
})
return (output_objects, returnvalues.CLIENT_ERROR)
elif client_id not in owner_list:
logger.warning('user %s tried to delete resource "%s" not owned' % \
(client_id, resource_id))
output_objects.append({'object_type': 'error_text', 'text'
: "You can't delete '%s' - you don't own it!"
% resource_id})
output_objects.append({'object_type': 'link', 'destination':
'resman.py', 'class': 'infolink', 'title':
'Show resources', 'text': 'Show resources'})
return (output_objects, returnvalues.CLIENT_ERROR)
# Locking the access to resources and vgrids.
lock_path_vgrid = os.path.join(configuration.resource_home, "vgrid.lock")
lock_handle_vgrid = open(lock_path_vgrid, 'a')
fcntl.flock(lock_handle_vgrid.fileno(), fcntl.LOCK_EX)
lock_path_res = os.path.join(configuration.resource_home, "resource.lock")
lock_handle_res = open(lock_path_res, 'a')
fcntl.flock(lock_handle_res.fileno(), fcntl.LOCK_EX)
# Only resources that are down may be deleted.
# A "FE.PGID" file with a PGID in the resource's home directory means that
# the FE is running.
pgid_path = os.path.join(res_dir, 'FE.PGID')
fe_running = True
try:
# determine if fe runs by finding out if pgid is numerical
pgid_file = open(pgid_path, 'r')
fcntl.flock(pgid_file, fcntl.LOCK_EX)
pgid = pgid_file.readline().strip()
fcntl.flock(pgid_file, fcntl.LOCK_UN)
pgid_file.close()
if not pgid.isdigit():
raise Exception('FE already stopped')
except:
fe_running = False
if fe_running:
output_objects.append({'object_type': 'error_text', 'text'
: "Can't delete the running resource %s!"
% resource_id})
output_objects.append({'object_type': 'link', 'destination':
'resman.py', 'class': 'infolink', 'title':
'Show resources', 'text': 'Show resources'})
lock_handle_vgrid.close()
lock_handle_res.close()
return (output_objects, returnvalues.CLIENT_ERROR)
# Deleting the resource files, but not the resource directory itself.
# The resource directory is kept, to prevent hijacking of resource id's
try:
for name in os.listdir(res_dir):
file_path = os.path.join(res_dir, name)
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, err:
output_objects.append({'object_type': 'error_text', 'text'
: 'Deletion exception: ' + str(err)})
output_objects.append({'object_type': 'link', 'destination':
'resman.py', 'class': 'infolink', 'title':
'Show resources', 'text': 'Show resources'})
lock_handle_vgrid.close()
lock_handle_res.close()
return (output_objects, returnvalues.CLIENT_ERROR)
# The resource has been deleted, and OK is returned.
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'Resource Deletion'
output_objects.append({'object_type': 'header', 'text'
: 'Deleting resource'})
output_objects.append({'object_type': 'text', 'text'
: 'Sucessfully deleted resource: ' + resource_id})
output_objects.append({'object_type': 'link', 'destination': 'resman.py',
'class': 'infolink', 'title': 'Show resources',
'text': 'Show resources'})
# Releasing locks
lock_handle_vgrid.close()
lock_handle_res.close()
# Remove resource from resource and vgrid caches (after realeasing locks)
unmap_resource(configuration, resource_id)
return (output_objects, returnvalues.OK)
|
gpl-2.0
| -5,916,421,399,750,690,000 | 37.228571 | 81 | 0.618236 | false |
donlee888/JsObjects
|
Python/Prog282SimpleDb/scripts/simpledb.py
|
2
|
1593
|
#!/usr/bin/python
'''
Created on May 14, 2012
@author: Charlie
'''
import ConfigParser
import boto
import cgitb
cgitb.enable()
class MyClass(object):
def __init__(self, domain):
config = ConfigParser.RawConfigParser()
config.read('.boto')
key = config.get('Credentials', 'aws_access_key_id')
secretKey = config.get('Credentials', 'aws_secret_access_key')
self.conn = boto.connect_sdb(key, secretKey)
self.domain = domain
def showDomains(self):
domains = self.conn.get_all_domains()
print domains
def createDomain(self):
self.conn.create_domain(self.domain)
def addData(self, itemName, itemAttrs):
dom = self.conn.get_domain(self.domain)
item_name = itemName
dom.put_attributes(item_name, itemAttrs)
def startXml(self):
xml = "Content-Type: text/xml\n\n"
xml += "<?xml version='1.0'?>\n"
xml += '<test01 count="5">\n'
return xml
def showQuery(self, query):
dom = self.conn.get_domain(self.domain)
result = dom.select(query)
xml = self.startXml()
for item in result:
xml += "\t<line>\n"
keys = item.keys()
keys.sort()
for x in keys:
xml += '\t\t<' + x + '>' + item[x] + '</' + x + '>\n'
xml += "\t</line>\n"
xml += '</test01>'
return xml
my_class = MyClass("Test01")
# my_class.addData('Line01', {'Field01': 'one', 'Field02': 'two'})
# my_class.showDomains()
print my_class.showQuery('select * from Test01')
|
mit
| -136,384,148,186,261,460 | 26 | 70 | 0.564972 | false |
educloudalliance/eca-auth-data
|
authdata/datasources/dreamschool.py
|
1
|
12221
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014-2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Example response from Dreamschool::
{
"meta": {
"limit": 0,
"offset": 0,
"total_count": 1
},
"objects": [
{
"email": "[email protected]",
"first_name": "Foo",
"id": "123",
"last_name": "Bar",
"organisations": [
{
"created": "2014-03-12T19:21:47.403524",
"id": "3",
"modified": "2015-08-10T12:37:54.719312",
"name": "Organisation",
"override_username_cleanup": false,
"registration_allowed": false,
"resource_uri": "/api/2/organisation/3/",
"source": "zap",
"title": "Organisation Name"
}
],
"phone_number": "+3581234567",
"picture_url": "https://id.dreamschool.fi/media/avatar/foo.png",
"resource_uri": "/api/2/user/123/",
"roles": [
{
"created": "2014-03-12T19:21:47.403524",
"id": "1",
"modified": "2015-10-13T14:10:54.732225",
"name": "teacher",
"official": true,
"organisation": {
"created": "2014-03-12T19:21:47.403524",
"id": "3",
"modified": "2015-08-10T12:37:54.719312",
"name": "foo",
"override_username_cleanup": false,
"registration_allowed": false,
"resource_uri": "/api/2/organisation/3/",
"source": "zap",
"title": "Organisation Name"
},
"permissions": [
{
"code": "dreamdiary.diary.supervisor",
"id": "12",
"name": "dreamdiary",
"resource_uri": ""
},
],
"resource_uri": "/api/2/role/1/",
"source": "zap",
"title": "teacher"
}
],
"theme_color": "ffffff",
"user_groups": [
{
"created": "2014-03-12T19:21:47.403524",
"filter_type": null,
"id": "2",
"level": 0,
"lft": 1,
"modified": "2014-03-12T19:21:47.403524",
"name": "1a",
"official": false,
"organisation": {
"created": "2014-03-12T19:21:47.403524",
"id": "3",
"modified": "2015-08-10T12:37:54.719312",
"name": "foo",
"override_username_cleanup": false,
"registration_allowed": false,
"resource_uri": "/api/2/organisation/3/",
"source": "zap",
"title": "Organisation Name"
},
"resource_uri": "/api/2/group/2/",
"rght": 2,
"source": "",
"title": "1a",
"tree_id": 150
},
],
"username": "foo.bar"
},
]
}
"""
import logging
import hashlib
import requests
from django.conf import settings
from authdata.datasources.base import ExternalDataSource
LOG = logging.getLogger(__name__)
TEACHER_PERM = 'dreamdiary.diary.supervisor'
class DreamschoolDataSource(ExternalDataSource):
"""
Required configuration parameters:
* api_url
* username
* password
"""
external_source = 'dreamschool'
def __init__(self, api_url, username, password, *args, **kwargs):
self.request = None
self.api_url = api_url
self.username = username
self.password = password
# PRIVATE METHODS
def _get_municipality_by_org_id(self, org_id):
org_id = int(org_id)
LOG.debug('Fetching municipality for org_id',
extra={'data': {'org_id': org_id}})
for municipality in settings.AUTHDATA_DREAMSCHOOL_ORG_MAP.keys():
for org_title in settings.AUTHDATA_DREAMSCHOOL_ORG_MAP[municipality]:
if int(settings.AUTHDATA_DREAMSCHOOL_ORG_MAP[municipality][org_title]) == org_id:
return municipality.capitalize()
return u''
def _get_roles(self, user_data):
"""Create roles structure
Example of output::
[
{
"school": "17392",
"role": "teacher",
"group": "7A",
"municipality": "City"
},
{
"school": "17392",
"role": "teacher",
"group": "7B",
"municipality": "City"
}
]
"""
roles_data = user_data['roles']
groups_data = user_data['user_groups']
# First we get list of schools where user is a teacher
schools_as_teacher = []
for r in roles_data:
org_id = r['organisation']['id']
if TEACHER_PERM in [i['code'] for i in r['permissions']]:
schools_as_teacher.append(org_id)
# iterate through groups
for g in groups_data:
out = {}
out['school'] = g['organisation']['title']
if g['organisation']['id'] in schools_as_teacher:
out['role'] = 'teacher'
else:
out['role'] = 'student'
out['group'] = g['title']
out['municipality'] = self._get_municipality_by_org_id(g['organisation']['id'])
yield out
def _get_org_id(self, municipality, school):
if not municipality or not school:
return None
LOG.debug('Fetching org id for given municipality and school',
extra={'data': {'municipality': repr(municipality),
'school': repr(school)}})
try:
muni = settings.AUTHDATA_DREAMSCHOOL_ORG_MAP[municipality.lower()]
except KeyError:
LOG.error('Unknown municipality')
return None
try:
org_id = muni[school.lower()]
except KeyError:
LOG.error('Unknown school', extra={'data':
{'school': repr(school),
'municipality': repr(municipality),
'muni_data': repr(muni),
}})
return None
LOG.debug('Mapped municipality and school to org id', extra={'data': {
'municipality': repr(municipality),
'school': repr(school),
'org_id': org_id,
}})
return org_id
# INTERFACE METHODS
def get_oid(self, username):
"""
There is no OID information in this external source. Generate fake OID
from username.
"""
# TODO: OID is cut to 30 chars due to django username limitation
return 'MPASSOID.{user_hash}'.format(user_hash=hashlib.sha1('dreamschool' + username).hexdigest())[:30]
def get_user_data(self, request):
"""
Requested by mpass-connector
Returns a list of users based on request.GET filtering values
"""
self.request = request
school = u''
group = u''
municipality = request.GET['municipality'].lower()
if 'school' in request.GET:
school = unicode(request.GET['school'])
if 'group' in request.GET:
group = unicode(request.GET['group'])
url = self.api_url
username = self.username
password = self.password
org_id = self._get_org_id(municipality, school)
params = {}
if org_id:
params = {
'organisations__id': org_id,
}
if group:
params['user_groups__title__icontains'] = group
r = requests.get(url, auth=(username, password), params=params)
else:
# This may fail to proxy timeout error
# TODO: Catch status code 502 Proxy error
r = requests.get(url, auth=(username, password))
LOG.debug('Fetched from dreamschool', extra={'data':
{'api_url': self.api_url,
'params': params,
'status_code': r.status_code,
}})
if r.status_code != requests.codes.ok:
LOG.warning('Dreamschool API response not OK', extra={'data':
{'status_code': r.status_code,
'municipality': repr(municipality),
'api_url': self.api_url,
'username': self.username,
'params': params,
}})
return {
'count': 0,
'next': None,
'previous': None,
'results': [],
}
response = []
user_data = {}
try:
user_data = r.json()
except ValueError:
LOG.exception('Could not parse user data from dreamschool API')
return {
'count': 0,
'next': None,
'previous': None,
'results': [],
}
for d in user_data['objects']:
user_id = d['id']
username = d['username']
first_name = d['first_name']
last_name = d['last_name']
oid = self.get_oid(username)
external_id = str(user_id)
attributes = [
]
roles = self._get_roles(d)
response.append({
'username': oid,
'first_name': first_name,
'last_name': last_name,
'roles': roles,
'attributes': attributes
})
# On Demand provisioning of the users
self.provision_user(oid, external_id)
# TODO: support actual paging via SimplePagedResultsControl
return {
'count': len(response),
'next': None,
'previous': None,
'results': response,
}
def get_data(self, external_id):
"""Requested by idP
external_id: user id in dreamschool
"""
url = self.api_url + external_id + '/' # TODO: use join
username = self.username
password = self.password
r = requests.get(url, auth=(username, password))
LOG.debug('Fetched from dreamschool', extra={'data':
{'url': url,
'status_code': r.status_code,
}})
if r.status_code != requests.codes.ok:
LOG.warning('Dreamschool API response not OK', extra={'data':
{'status_code': r.status_code,
'url': url,
'username': self.username,
}})
return None
user_data = {}
try:
user_data = r.json()
except ValueError:
LOG.exception('Could not parse user data from dreamschool API')
return None
d = user_data
username = d['username']
first_name = d['first_name']
last_name = d['last_name']
attributes = [
]
roles = self._get_roles(d)
# On Demand provisioning of the user
external_id = str(d['id'])
oid = self.get_oid(username)
self.provision_user(oid, external_id)
return {
'username': self.get_oid(username),
'first_name': first_name,
'last_name': last_name,
'roles': roles,
'attributes': attributes
}
# TODO: support actual paging via SimplePagedResultsControl
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
|
mit
| -1,082,429,924,152,609,700 | 29.5525 | 107 | 0.524834 | false |
lebauce/artub
|
depplatform/darwin/__init__.py
|
1
|
1134
|
# Glumol - An adventure game creator
# Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour
# This file is part of Glumol.
# Glumol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# Glumol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Glumol. If not, see <http://www.gnu.org/licenses/>.
startup_path = "startup/index.html"
startup_path_mozilla = "startup/index.html"
test_path = "/home/bob/glumol/foo2.xml"
directory_sep = "/"
index_templates_path = "startup/index_templates.html"
def set_sys_path():
import sys; sys.path.append('/Applications/Artub.app/Contents/Frameworks/Python.framework/Versions/2.5/lib/python2.5/site-packages/OpenGL-3.0.0a4-py2.5.egg/')
|
gpl-2.0
| 4,421,569,277,991,204,400 | 38.103448 | 162 | 0.748677 | false |
AlayaCare/pentaho-rest-api
|
tests/test_roles_api.py
|
1
|
6857
|
import pytest
import requests_mock
import os
from penapi.pentaho import Pentaho
from tests import FIXTURES_DIR
def list_roles():
with open(os.path.join(FIXTURES_DIR, 'roles_api.xml'), 'r') as fixture_file:
data = fixture_file.read().replace('\n', '')
return data
def list_one_role():
with open(os.path.join(FIXTURES_DIR, 'one_role_api.xml'), 'r') as fixture_file:
data = fixture_file.read().replace('\n', '')
return data
def list_users():
with open(os.path.join(FIXTURES_DIR, 'users_api.xml'), 'r') as fixture_file:
data = fixture_file.read().replace('\n', '')
return data
def list_perm_role_map():
with open(os.path.join(FIXTURES_DIR, 'perm_role_map.xml'), 'r') as fixture_file:
data = fixture_file.read().replace('\n', '')
return data
@pytest.fixture
def pentaho():
return Pentaho(pentaho_base_url='http://test.com')
def test_role_api_list_user_role_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/userRoles?userName=test', text=list_roles())
roles_list = pentaho.roles.list_for_user('test')
assert len(roles_list) == 3
def test_role_api_list_user_one_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/userRoles?userName=test', text=list_one_role())
roles_list = pentaho.roles.list_for_user('test')
assert len(roles_list) == 1
def test_role_api_list_user_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/userRoles?userName=test', text='', status_code=500)
roles_list = pentaho.roles.list_for_user('test')
assert len(roles_list) == 0
def test_role_api_assign_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/assignRoleToUser?userName=test&roleNames=power', text='')
success = pentaho.roles.assign_to_user('test', ['power'])
assert success
def test_role_api_assign_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/assignRoleToUser?userName=test&roleNames=power',
text='', status_code=403)
success = pentaho.roles.assign_to_user('test', ['power'])
assert not success
def test_role_api_assign_fail_2(pentaho):
with pytest.raises(ValueError):
pentaho.roles.assign_to_user('test', 'power')
def test_role_api_remove_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/removeRoleFromUser?userName=test&roleNames=Business', text='')
success = pentaho.roles.remove_from_user('test', ['Business'])
assert success
def test_role_api_remove_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/removeRoleFromUser?userName=test&roleNames=Business',
text='', status_code=403)
success = pentaho.roles.remove_from_user('test', ['Business'])
assert not success
def test_role_api_remove_fail_2(pentaho):
with pytest.raises(ValueError):
pentaho.roles.remove_from_user('test', 'Business')
def test_role_api_create_role_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/createRole?roleName=test', text='')
success = pentaho.roles.create('test')
assert success
def test_role_api_create_role_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/createRole?roleName=test', text='', status_code=403)
success = pentaho.roles.create('test')
assert not success
def test_role_api_delete_roles_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/deleteRoles?roleNames=test', text='')
success = pentaho.roles.delete('test')
assert success
def test_role_api_delete_roles_list_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/deleteRoles?roleNames=test1%09test2', text='')
success = pentaho.roles.delete(['test1', 'test2'])
assert success
def test_role_api_delete_roles_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/deleteRoles?roleNames=test', text='', status_code=403)
success = pentaho.roles.delete('test')
assert not success
def test_role_api_list_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/roles', text=list_roles())
roles_list = pentaho.roles.list()
assert len(roles_list) == 3
def test_role_api_list_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/roles', text='', status_code=500)
roles_list = pentaho.roles.list()
assert len(roles_list) == 0
def test_role_api_list_user_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/roleMembers?roleName=test', text=list_users())
user_list = pentaho.roles.list_members('test')
assert len(user_list) == 6
def test_role_api_list_user_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/roleMembers?roleName=test', text='', status_code=500)
user_list = pentaho.roles.list_members('test')
assert len(user_list) == 0
def test_role_api_assign_perm_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.put('http://test.com/pentaho/api/userroledao/roleAssignments', text='')
success = pentaho.roles.assign_permissions_to_role('test1', read=True, create=True)
assert success
def test_role_api_list_perm_role_map_success(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/logicalRoleMap?locale=en', text=list_perm_role_map())
perm_role_map = pentaho.roles.get_permissions_role_map()
assert len(perm_role_map['assignments']) == 2
assert len(perm_role_map['localizedRoleNames']) == 7
def test_role_api_list_perm_role_map_fail(pentaho):
with requests_mock.Mocker() as mock_api:
mock_api.get('http://test.com/pentaho/api/userroledao/logicalRoleMap?locale=en', text='', status_code=403)
perm_role_map = pentaho.roles.get_permissions_role_map()
assert not perm_role_map
|
gpl-3.0
| -3,616,110,431,990,989,300 | 36.883978 | 124 | 0.673327 | false |
chb/indivo_admin
|
admin/management/commands/reset_admin.py
|
1
|
2076
|
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management import call_command
from django.contrib.auth.models import User
from optparse import make_option
from django.conf import settings
class Command(NoArgsCommand):
args = ''
help = 'Resets the admin app to its initial state, recreating the default users specified in settings.py'
option_list=NoArgsCommand.option_list + (
make_option('--no-users',
action='store_false',
dest='add_users',
default=True,
help="Don't create the default users when performing the reset"),
)
def handle_noargs(self, **options):
verbosity = int(options['verbosity'])
# sync the database
if verbosity:
print "Syncing the Database..."
call_command('syncdb', interactive=False)
if verbosity:
print "Done."
# flush the database
if verbosity:
print "Flushing the Database..."
call_command('flush', interactive=False, verbosity=verbosity)
if verbosity:
print "Done."
# create the default users
if options['add_users'] and settings.CREATE_USERS:
if verbosity:
print "Creating Default Users..."
for user_data in settings.DEFAULT_USERS:
fullname, email, username, password = user_data
if verbosity:
print "\tCreating user %s"%username
try:
user = User.objects.create_user(username, email, password)
user.is_superuser = True
name_bits = fullname.strip().split(' ')
user.first_name = name_bits[0]
user.last_name = name_bits[-1]
user.save()
except Exception as e:
if verbosity:
print "Error creating user: %s"%str(e)
if verbosity:
print "Done."
|
lgpl-3.0
| -925,390,528,987,213,300 | 34.186441 | 109 | 0.552987 | false |
jpburstrom/sampleman
|
settings.py
|
1
|
8195
|
# -*- coding: utf-8 -*-
import os
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import SIGNAL, SLOT
from models import *
from settingsui import Ui_SettingsDialog
class SettingsDialog(QtGui.QDialog):
"""Settings editor."""
newRepo = QtCore.pyqtSignal(unicode)
newFolder = QtCore.pyqtSignal(unicode)
newFormat = QtCore.pyqtSignal(unicode)
newApp = QtCore.pyqtSignal(unicode)
def __init__(self, *args):
QtGui.QDialog.__init__(self, *args)
#NB: Repo settings are stored in db. This is for all other settings.
self._settings = QtCore.QSettings("ljud.org", "Sampleman")
self.ui = Ui_SettingsDialog()
self.ui.setupUi(self)
self._readSettingsArray("apps", self.ui.listWidget)
self._readSettingsArray("formats", self.ui.listWidget_2)
self._readSettingsArray("folders", self.ui.listWidget_3)
self.repomodel = RepoModel(self)
self.ui.treeView.setModel(self.repomodel)
self.ui.treeView.resizeColumnToContents(0)
#Repo-related signals
self.connect(self.ui.buttonAdd_4, SIGNAL("clicked()"), self.addRepo)
self.connect(self.ui.buttonDelete_4, SIGNAL("clicked()"), self.deleteRepo)
self.connect(self.ui.buttonRescan, SIGNAL("clicked()"), self.rescanRepo)
self.connect(self.ui.buttonEdit, SIGNAL("clicked()"), self.editRepo)
#TODO: deactivate delete/rescan when item is not selected
#Application
self.connect(self.ui.buttonAdd, SIGNAL("clicked()"), self.addApp)
self.connect(self.ui.buttonDelete, SIGNAL("clicked()"), self.deleteApp)
self.connect(self.ui.listWidget, SIGNAL("itemChanged(QListWidgetItem *)"), self.appEdited)
#Formats
self.connect(self.ui.buttonAdd_2, SIGNAL("clicked()"), self.addFormat)
self.connect(self.ui.buttonDelete_2, SIGNAL("clicked()"), self.deleteFormat)
self.connect(self.ui.listWidget_2, SIGNAL("itemChanged(QListWidgetItem *)"), self.formatEdited)
#Folders (For export)
self.connect(self.ui.buttonAdd_3, SIGNAL("clicked()"), self.addFolder)
self.connect(self.ui.buttonDelete_3, SIGNAL("clicked()"), self.deleteFolder)
#self.connect(self.ui.listWidget_3, SIGNAL("itemChanged(QListWidgetItem *)"), self.folderEdited)
def setTab(self, tab):
"""Set current tab by label, sort of."""
tabs = ["repositories", "applications", "formats", "folders"]
self.ui.tabWidget.setCurrentIndex(tabs.index(tab))
def addRepo(self):
"""Add new repository.
"""
path = QtGui.QFileDialog.getExistingDirectory(self, "Add a repository", os.path.expanduser("~"))
if not path:
return False
repo = self.repomodel.add_repo(unicode(path))
if QtGui.QMessageBox.question(
self, "Question", "Would you like to scan the new repository?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes ) == QtGui.QMessageBox.Yes:
self.repomodel.scan_repo(repo)
session.commit()
self.newRepo.emit(path)
def deleteRepo(self):
"""Delete selected repository.
"""
#TODO : dialog box choosing btwn deleting single path and deleting entire repo
if QtGui.QMessageBox.question(
self, "Are you sure?", "Delete repository and soundfile data? (This will not remove them from your filesystem)",
QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel ) == QtGui.QMessageBox.Ok:
for mi in self.ui.treeView.selectedIndexes():
#FIXME: set column to 0 and find path
if mi.column() == 0:
path = mi.data().toString()
break
self.repomodel.delete_repo(unicode(path))
self.parent().rebuild()
session.commit()
def rescanRepo(self):
"""Rescan selected repository.
"""
for mi in self.ui.treeView.selectedIndexes():
#FIXME: set column to 0 and find path
if mi.column() == 0:
path = mi.data().toString()
repo = Repo.get_by(path=unicode(path))
self.repomodel.scan_repo(repo)
break
def editRepo(self):
"""Edit repository paths.
"""
print "Edit repo - to implement"
mi = self.ui.treeView.selectedIndexes()[0]
#FIXME
#Get repo
#find item to edit
#... (model stuff really)
def addApp(self):
"""Add "open with..."-application.
"""
#listWidget
self._newListItem(self.ui.listWidget)
def deleteApp(self):
"""Delete "Open with..."-application.
"""
items = self._deleteSelected(self.ui.listWidget, "apps")
def appEdited(self, item):
text = item.text()
if not text:
return
row = self.ui.listWidget.row(item)
self._writeSettingsArray("apps", row, text)
self.newApp.emit(text)
def addFormat(self):
"""Add file export format.
"""
self._newListItem(self.ui.listWidget_2)
def deleteFormat(self):
"""Delete file export format.
"""
items = self._deleteSelected(self.ui.listWidget_2, "formats")
def formatEdited(self, item):
text = item.text()
if not text:
return
row = self.ui.listWidget_2.row(item)
self._writeSettingsArray("formats", row, text)
self.newFormat.emit(text)
def addFolder(self):
"""Add file export folder.
"""
path = QtGui.QFileDialog.getExistingDirectory(self, "Add an export folder", os.path.expanduser("~"))
if not path:
return False
w = self.ui.listWidget_3
w.addItem(path)
i = w.count() - 1
self._writeSettingsArray("folders", i, path)
def deleteFolder(self):
"""Delete file export folder.
"""
items = self._deleteSelected(self.ui.listWidget_3, "folders")
def folderEdited(self, item):
text = item.text()
if not text:
return
row = self.ui.listWidget_3.row(item)
self._writeSettingsArray("folders", row, text)
self.newFolder.emit(text)
def _deleteSelected(self, w, key):
"""Delete and return selected items from widget w.
"""
if w.selectedIndexes() and not self._confirm():
return None
li = []
self._settings.beginGroup(key)
for item in w.selectedItems():
row = w.row(item)
self._settings.remove("{0}".format(row + 1))
li.append(w.takeItem(row))
self._settings.setValue("size", self._settings.value("size").toInt()[0] - 1)
self._settings.endGroup()
return li
def _confirm(self):
"""Simple annoying confirmation dialog.
"""
return QtGui.QMessageBox.question(
self, "Are you sure?", "Are you sure?",
QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel ) == QtGui.QMessageBox.Ok
def _readSettingsArray(self, key, w):
"""Get settings array *key* into QListWidget *w*.
"""
w.clear()
size = self._settings.beginReadArray(key)
for i in range(size):
self._settings.setArrayIndex(i)
w.addItem(self._settings.value("key").toString())
self._settings.endArray()
def _writeSettingsArray(self, key, row, v):
"""Write setting value *v* into array *key*, row *row*.
"""
#no empty strings, please.
if not v:
return
self._settings.beginWriteArray(key)
self._settings.setArrayIndex(row)
self._settings.setValue("key", v)
self._settings.endArray()
def _newListItem(self, w):
"""Add new item to w and start editing.
"""
w.addItem("")
i = w.count()
item = w.item(i-1)
item.setFlags( item.flags() | QtCore.Qt.ItemIsEditable )
w.editItem(item)
|
gpl-3.0
| 3,741,517,568,021,530,000 | 32.863636 | 128 | 0.592068 | false |
jailuthra/asr
|
ctm2tg.py
|
1
|
2122
|
#!/usr/bin/env python
'''
Convert Kaldi's CTM alignment output to Praat's TextGrid format.
'''
import csv, sys, os
from praatio import tgio
def readCSV(filename):
'''Read a CSV (CTM) file.'''
with open(filename, 'rb') as fileobj:
out = list(csv.reader(fileobj, delimiter=' '))
return out
def csv2tgdict(ctmlist):
'''Convert a list of tuples read from the CTM file to a TextGrid dictionary.'''
out = {}
for row in ctmlist:
if row[0] not in out:
out[row[0]] = []
segment = (row[2], str(float(row[2]) + float(row[3])), row[4].split('_')[0])
out[row[0]].append(segment)
return out
def wavscp2dict(wavscp):
'''Convert a list of tuples read from the wavscp file to a dictionary.'''
out = {}
for row in wavscp:
out[row[0]] = row[1]
return out
def ctm2tg(wavdir, outdir):
'''Convert CTM alignment files to Praat's TextGrid format.
Args:
wavdir -- path to the directory containing speech wav files
outdir -- path to output the textgrid files in
'''
print "Converting ctm files to Praat Textgrids...",
words = readCSV(os.path.join(outdir, 'wordlvl.ctm'))
phones = readCSV(os.path.join(outdir, 'phonelvl.ctm'))
word_dict = csv2tgdict(words)
phone_dict = csv2tgdict(phones)
wavscp = wavscp2dict(readCSV(os.path.join(outdir, 'wav.scp')))
if not os.path.exists(os.path.join(outdir, 'tg')):
os.makedirs(os.path.join(outdir, 'tg'))
for utt in wavscp.keys():
tg = tgio.Textgrid()
wordTier = tgio.IntervalTier('words', word_dict[utt], 0, pairedWav=wavscp[utt])
phoneTier = tgio.IntervalTier('phones', phone_dict[utt], 0, pairedWav=wavscp[utt])
tg.addTier(wordTier)
tg.addTier(phoneTier)
tg.save(os.path.join(outdir, 'tg', utt + '.TextGrid'))
print "stored in " + os.path.join(outdir, 'tg')
def main():
if (len(sys.argv) < 3):
print "Usage: %s <wavdir> <datadir>" % (sys.argv[0])
exit(1)
wavdir = sys.argv[1]
outdir = sys.argv[2]
ctm2tg(wavdir, outdir)
if __name__ == '__main__':
main()
|
mit
| 3,275,712,974,011,107,300 | 31.151515 | 90 | 0.614515 | false |
mission-liao/pyopenapi
|
pyopenapi/migration/versions/v2_0/objects.py
|
1
|
10147
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from ...spec import Base2, rename, child, list_, map_
def is_str(spec, path, override):
if override:
raise Exception('attemp to override "str" in {}'.format(path))
if isinstance(spec, six.string_types):
return spec
raise Exception('should be a string, not {}, {}'.format(
str(type(spec)), path))
def if_not_ref_else(class_builder):
def _f(spec, path, override):
if '$ref' in spec:
return Reference(spec, path=path, override=override)
return class_builder(spec, path=path, override=override)
_f.__name__ = 'if_not_ref_else_' + class_builder.__name__
return _f
def if_not_bool_else(class_builder):
def _f(spec, path, override):
if isinstance(spec, bool):
return spec
return class_builder(spec, path=path, override=override)
_f.__name__ = 'if_not_bool_else_' + class_builder.__name__
return _f
# pylint: disable=invalid-name
class BaseObj_v2_0(Base2):
__swagger_version__ = '2.0'
class Reference(BaseObj_v2_0):
""" $ref
"""
__fields__ = {
'$ref': dict(readonly=False),
}
__internal__ = {
'ref': dict(key='$ref', builder=rename),
}
class XMLObject(BaseObj_v2_0):
""" XML Object
"""
__fields__ = {
'name': dict(),
'namespace': dict(),
'prefix': dict(),
'attribute': dict(),
'wrapped': dict(),
}
class ExternalDocumentation(BaseObj_v2_0):
""" External Documentation Object
"""
__fields__ = {
'description': dict(),
'url': dict(),
}
class Tag(BaseObj_v2_0):
""" Tag Object
"""
__fields__ = {
'name': dict(),
'description': dict(),
}
__children__ = {
'externalDocs': dict(child_builder=ExternalDocumentation),
}
__renamed__ = {
'external_docs': dict(key='externalDocs'),
}
class BaseSchema(BaseObj_v2_0):
""" Base type for Items, Schema, Parameter, Header
"""
__fields__ = {
'type': dict(),
'format': dict(),
'default': dict(),
'maximum': dict(),
'exclusiveMaximum': dict(),
'minimum': dict(),
'exclusiveMinimum': dict(),
'maxLength': dict(),
'minLength': dict(),
'maxItems': dict(),
'minItems': dict(),
'multipleOf': dict(),
'enum': dict(),
'pattern': dict(),
'uniqueItems': dict(),
}
__renamed__ = {
'type_': dict(key='type'),
'format_': dict(key='format'),
'exclusive_maximum': dict(key='exclusiveMaximum'),
'exclusive_minimum': dict(key='exclusiveMinimim'),
'max_length': dict(key='max_length'),
'min_length': dict(key='min_length'),
'max_items': dict(key='max_items'),
'min_items': dict(key='min_items'),
'multiple_of': dict(key='multipleOf'),
'unique_items': dict(key='uniqueItems'),
}
class Items(BaseSchema):
""" Items Object
"""
__fields__ = {
'collectionFormat': dict(default='csv'),
}
__renamed__ = {
'collection_format': dict(key='collectionFormat'),
}
Items.attach_field('items', builder=child, child_builder=Items)
class Schema(BaseSchema):
""" Schema Object
"""
__fields__ = {
'$ref': dict(readonly=False),
'maxProperties': dict(),
'minProperties': dict(),
'title': dict(),
'description': dict(),
'discriminator': dict(),
'readOnly': dict(),
'example': dict(),
'required': dict(default=[]),
}
__children__ = {
'xml': dict(child_builder=XMLObject),
'externalDocs': dict(child_builder=ExternalDocumentation),
}
__internal__ = {
'ref': dict(key='$ref', builder=rename),
'max_properties': dict(key='maxProperties', builder=rename),
'min_properties': dict(key='minProperties', builder=rename),
'read_only': dict(key='readOnly', builder=rename),
'external_docs': dict(key='externalDocs', builder=rename),
'all_of': dict(key='allOf', builder=rename),
'additional_properties': dict(
key='additionalProperties', builder=rename),
}
BoolOrSchema = if_not_bool_else(Schema)
Schema.attach_field('items', builder=child, child_builder=Schema)
Schema.attach_field('allOf', builder=child, child_builder=list_(Schema))
Schema.attach_field('properties', builder=child, child_builder=map_(Schema))
Schema.attach_field(
'additionalProperties', builder=child, child_builder=BoolOrSchema)
class Contact(BaseObj_v2_0):
""" Contact Object
"""
__fields__ = {
'name': dict(),
'url': dict(),
'email': dict(),
}
class License(BaseObj_v2_0):
""" License Object
"""
__fields__ = {
'name': dict(),
'url': dict(),
}
class Info(BaseObj_v2_0):
""" Info Object
"""
__fields__ = {
'version': dict(),
'title': dict(),
'description': dict(),
'termsOfService': dict(),
}
__children__ = {
'contact': dict(child_builder=Contact),
'license': dict(child_builder=License),
}
__internal__ = {
'terms_of_service': dict(key='termsOfService', builder=rename),
}
class Parameter(BaseSchema):
""" Parameter Object
"""
__fields__ = {
'name': dict(),
'in': dict(),
'required': dict(),
'collectionFormat': dict(default='csv'),
'description': dict(),
'allowEmptyValue': dict(),
}
__children__ = {
'schema': dict(child_builder=Schema),
'items': dict(child_builder=Items),
}
__internal__ = {
'in_': dict(key='in', builder=rename),
'collection_format': dict(key='collectionFormat', builder=rename),
'allow_empty_value': dict(key='allowEmptyValue', builder=rename),
}
ParameterOrReference = if_not_ref_else(Parameter)
class Header(BaseSchema):
""" Header Object
"""
__fields__ = {
'collectionFormat': dict(default='csv'),
'description': dict(),
}
__children__ = {
'items': dict(child_builder=Items),
}
__internal__ = {
'collection_format': dict(key='collectionFormat', builder=rename),
}
class Response(BaseObj_v2_0):
""" Response Object
"""
__fields__ = {
'description': dict(),
'examples': dict(),
}
__children__ = {
'schema': dict(child_builder=Schema),
'headers': dict(child_builder=map_(Header)),
}
ResponseOrReference = if_not_ref_else(Response)
MapOfResponseOrReference = map_(ResponseOrReference)
class Operation(BaseObj_v2_0):
""" Operation Object
"""
__fields__ = {
'operationId': dict(),
'deprecated': dict(),
'description': dict(),
'summary': dict(),
}
__children__ = {
'tags': dict(child_builder=list_(is_str)),
'consumes': dict(child_builder=list_(is_str)),
'produces': dict(child_builder=list_(is_str)),
'schemes': dict(child_builder=list_(is_str)),
'parameters': dict(child_builder=list_(ParameterOrReference)),
'responses': dict(child_builder=MapOfResponseOrReference),
'security': dict(child_builder=list_(map_(list_(is_str)))),
'externalDocs': dict(child_builder=ExternalDocumentation),
}
__internal__ = {
'method': dict(),
'url': dict(),
'path': dict(),
'base_path': dict(),
'cached_schemes': dict(default=[]),
'cached_consumes': dict(default=[]),
'cached_produces': dict(default=[]),
'cached_security': dict(),
'operation_id': dict(key='operationId', builder=rename),
'external_docs': dict(key='externalDocs', builder=rename),
}
class PathItem(BaseObj_v2_0):
""" Path Item Object
"""
__fields__ = {
'$ref': dict(readonly=False),
}
__children__ = {
'get': dict(child_builder=Operation),
'put': dict(child_builder=Operation),
'post': dict(child_builder=Operation),
'delete': dict(child_builder=Operation),
'options': dict(child_builder=Operation),
'head': dict(child_builder=Operation),
'patch': dict(child_builder=Operation),
'parameters': dict(child_builder=list_(ParameterOrReference)),
}
__internal__ = {
'ref': dict(key='$ref', builder=rename),
}
class SecurityScheme(BaseObj_v2_0):
""" Security Scheme Object
"""
__fields__ = {
'type': dict(),
'description': dict(),
'name': dict(),
'in': dict(),
'flow': dict(),
'authorizationUrl': dict(),
'tokenUrl': dict(),
}
__children__ = {'scopes': dict(child_builder=map_(is_str))}
__internal__ = {
'type_': dict(key='type', builder=rename),
'in_': dict(key='in', builder=rename),
}
class Swagger(BaseObj_v2_0):
""" Swagger Object
"""
__fields__ = {
'swagger': dict(),
'host': dict(),
'basePath': dict(),
}
__children__ = {
'info': dict(child_builder=Info),
'schemes': dict(child_builder=list_(is_str)),
'consumes': dict(child_builder=list_(is_str)),
'produces': dict(child_builder=list_(is_str)),
'paths': dict(child_builder=map_(PathItem)),
'definitions': dict(child_builder=map_(Schema)),
'parameters': dict(child_builder=map_(Parameter)),
'responses': dict(child_builder=map_(Response)),
'securityDefinitions': dict(child_builder=map_(SecurityScheme)),
'security': dict(child_builder=list_(list_(is_str))),
'tags': dict(child_builder=list_(Tag)),
'externalDocs': dict(child_builder=ExternalDocumentation),
}
__internal__ = {
'base_path': dict(key='basePath', builder=rename),
'security_definitions': dict(key='securityDefinitions', builder=rename),
'external_docs': dict(key='externalDocs', builder=rename),
}
|
mit
| 7,771,566,013,706,399,000 | 24.623737 | 80 | 0.55307 | false |
ljbade/libswiftnav
|
python/tests/test_sats_management.py
|
1
|
3530
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Bhaskar Mookerji <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import swiftnav.observation as o
import swiftnav.sats_management as sat
import swiftnav.signal as s
def test_creation():
new_ref = s.GNSSSignal(sat=8, band=0, constellation=0)
sids = [s.GNSSSignal(sat=2, band=0, constellation=0),
s.GNSSSignal(sat=1, band=0, constellation=0),
s.GNSSSignal(sat=3, band=0, constellation=0),
s.GNSSSignal(sat=4, band=0, constellation=0)]
sm = sat.SatsManagement(sids=sids)
assert sm.num_sats
assert sm.sids[0]['sat'] == 2
assert sm.sids[1]['sat'] == 1
assert sm.sids[2]['sat'] == 3
assert sm.sids[3]['sat'] == 4
assert not sm._print()
assert not sm._print_short()
sdiffs = [o.SingleDiff(sid={'sat':1, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=1),
o.SingleDiff(sid={'sat':2, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=5),
o.SingleDiff(sid={'sat':3, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=10)]
# TODO (Buro): Check outputs!
assert sm.rebase(sdiffs)
assert not sm.update(sdiffs)
def test_choose_ref_sat():
sdiffs = [o.SingleDiff(sid={'sat':1, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=0),
o.SingleDiff(sid={'sat':2, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=0),
o.SingleDiff(sid={'sat':3, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=0)]
assert isinstance(sat.choose_reference_sat_(sdiffs), s.GNSSSignal)
assert sat.choose_reference_sat_(sdiffs).sat == 1
sdiffs = [o.SingleDiff(sid={'sat':1, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=1),
o.SingleDiff(sid={'sat':2, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=5),
o.SingleDiff(sid={'sat':3, 'band': 0, 'constellation': 0},
pseudorange=0, sat_pos=(0, 0, 0), sat_vel=(0, 0, 0),
carrier_phase=0, raw_doppler=0, doppler=0, snr=10)]
assert isinstance(sat.choose_reference_sat_(sdiffs), s.GNSSSignal)
assert sat.choose_reference_sat_(sdiffs).sat == 3
|
lgpl-3.0
| 4,821,503,331,596,980,000 | 53.307692 | 78 | 0.573088 | false |
mkelcb/knet
|
knet/com/application/logic/scanner/scanner.py
|
1
|
7024
|
# -*- coding: utf-8 -*-
#MIT License
#Copyright (c) 2017 Marton Kelemen
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import collections
import numpy as np
from ..reml import reml
from ..reml import kinship
#from ..utils import geno_qc
import gc
from ....application.utils import geno_qc
# from com.applicatoin.utils import geno_qc
MAXDELTA = np.exp(10) # the theoretical max for delta, IE when there is no h2 in region
# goes through a genome, tests for if there is any h2, in each block, which might be overlapping
# at the end they are merged
def findRegions(y, M, irsIds= None, blockSize = 10, stride = 5, X = None) : # X is the fixed effects here, M is the GWAS design matrix
#n = M.shape[0]
# do some QC
if X == None: # This algo will NOT run if there are no Fixed effects, so if there were none passed in, then we add an intercept ( a column of 1s)
print("no fixed effects specified, adding an intercept")
X = np.ones( (y.shape[0], 1) ) # make sure that this is a 2D array, so that we can refer to shape[1]
print("Genotype matrix size in MBs is: ",geno_qc.getSizeInMBs(M)," and blockSize is: " , blockSize )
regions = list()
deltas = list()
endPos = 0
startPos = 0
qc_data = geno_qc.genoQC_all(M, rsIds = irsIds)
M = qc_data["X"]
MAFs = qc_data["MAFs"] # need to store the original MAFs as, after standardising the genotype matrix it will be impossible to calculate MAFs anymore...
irsIds = qc_data["rsIds"]
p = M.shape[1] # only set this after we have potentially removed all the bad SNPs
Standardised_SNPs = geno_qc.standardise_Genotypes(M) # Z-score them ( it is good enough to standardise these just once outside of the loop)
print("After standardising, matrix size in MBs is: ",geno_qc.getSizeInMBs(Standardised_SNPs) )
#eigSum = None
# delta = None
i = 0
while(True) : # go through each block of the genome ( later I will have to treat different Chroms differently)
# 1 pick start/end of current block
endPos = startPos + blockSize # -1
# check if we have reached the end IE if endpos > last SNP
if(endPos > p) :
endPos = p
print("reached the end of the genome, last endposition is: " + str(endPos) )
print("numSNPS in matrix: " + str(Standardised_SNPs.shape[1]) + " // testing block "+ str(i) + " / startpos: " + str(startPos) + " / endPos: ", str(endPos), " / "+ str(p), "(", str("{0:.0f}%".format(endPos / p * 100) ) ,")" ) ## , end='\r'
M_region = Standardised_SNPs[:,startPos:endPos] # 2 subset genome to this block
K = kinship.calc_Kinship( M_region ) # 3. create kinship matrix from block
try:
results = reml.REML_GWAS(y, K) # 4. check if there is any h2 in this block via EMMA
delta = results["delta"]
except Exception as e :
print("Eigenvalues won't converge, but we try once more with feelin' ( MAF filter: 0.03) ")
print("precise error message: " , str(e))
try:
M_region_filtered = geno_qc.removeRareVariants_quick(M_region, MAFs[startPos:endPos], rsIds = irsIds[startPos:endPos],minMAF = 0.03)
K = kinship.calc_Kinship( M_region_filtered ) # 3. create kinship matrix from block
results = reml.REML_GWAS(y, K)
delta = results["delta"]
except Exception as e:
delta = MAXDELTA
print("Still won't converge, so we use default values")
print("precise error message: " , str(e))
print("delta for block ", str(i) , " is: " , str(delta))
regions.append( np.array([startPos,endPos]) ) # add delta to the ones collected so far
deltas.append( delta ) # maybe significnce test this??
# update start position for next round
startPos = startPos+stride
if(endPos >= p ) : # if the new start position would be beyond the end then we stop
break
i =i +1
gc.collect()
#del K
# del results
# return results, regions with h2 BG regions h2 in each region deltas in in each deltas of each BG region overall H2 in all Bgs overall delta in all BGs
return ( {"REGIONS":regions, "DELTAS":deltas, "rsIds":irsIds } )
def concatRegions(allRegions) :
deltaConcat = list()
regionsConcat = list()
for i in range( len(allRegions) ): # go through all regions
deltaConcat = deltaConcat + allRegions[i]["DELTAS"] # the deltas are just simply concated
# for regions we need to offset all of the next region's blocks by the last added block's endpos:
lastPos = 0
if( i is not 0) : lastPos = regionsConcat[-1][1]
for j in range( len(allRegions[i]) ): # go through all regionss' blocks
allRegions[i]["REGIONS"][j] = allRegions[i]["REGIONS"][j] + lastPos # offset each block
regionsConcat = regionsConcat + allRegions[i]["REGIONS"]
return ( {"REGIONS":regionsConcat, "DELTAS":deltaConcat } )
def getDummyEigSum() :
eigenSummary = collections.namedtuple('values', 'vectors')
eigenSummary.values = np.array([[0,0],[0,0]])
eigenSummary.vectors =np.array([0,0,0]) # make it deliberately mismatched so that using this will fail too
# local testing
#allDeltas = list()
#delta1 = [1,2]
#delta2 = [3,4]
#allDeltas.append(delta1)
#allDeltas.append(delta2)
#allRegions = list()
#regions1 = list()
#regions1.append( np.array([0,50]))
#regions1.append( np.array([25,75]))
#regions2 = list()
#regions2.append( np.array([0,100]))
#regions2.append( np.array([50,200]))
#allRegions.append(regions1)
#allRegions.append(regions2)
#results = concatRegions(allRegions,allDeltas)
|
mit
| -4,663,438,944,914,809,000 | 42.364198 | 252 | 0.636674 | false |
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/procedure_call/test_rule_302.py
|
1
|
1284
|
import os
import unittest
from vsg.rules import procedure_call
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_302_test_input.vhd'))
dIndentMap = utils.read_indent_file()
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_302_test_input.fixed.vhd'), lExpected, False)
class test_procedure_call_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
self.oFile.set_indent_map(dIndentMap)
def test_rule_302(self):
oRule = procedure_call.rule_302()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'procedure_call')
self.assertEqual(oRule.identifier, '302')
lExpected = [31, 33, 42, 44]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_302(self):
oRule = procedure_call.rule_302()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
gpl-3.0
| -2,041,835,647,543,222,500 | 25.75 | 106 | 0.683022 | false |
letuananh/intsem.fx
|
isftk/ttl.py
|
1
|
39015
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Script for processing TTL profiles
Latest version can be found at https://github.com/letuananh/intsem.fx
@author: Le Tuan Anh <[email protected]>
@license: MIT
'''
# Copyright (c) 2018, Le Tuan Anh <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
########################################################################
import os
import logging
import json
from collections import defaultdict as dd
from lxml import etree
from lelesk.util import ptpos_to_wn
from chirptext import TextReport, Counter, piter
from chirptext.cli import CLIApp, setup_logging
from chirptext.leutile import StringTool
from chirptext import chio
from chirptext.chio import CSV
from chirptext import texttaglib as ttl
from yawlib import SynsetID
from yawlib.helpers import get_omw, get_wn
from coolisf.common import read_file, overlap
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
setup_logging('logging.json', 'logs')
def getLogger():
return logging.getLogger(__name__)
NONSENSES = ['02604760-v', # : 85 | have the quality of being - ['be']
'00024073-r', #: 22 | negation of a word or group of words - ['not', 'non', "n't"]
'02749904-v', #: 15 | happen, occur, take place - ['be']
'01552885-a', #: 9 | a quantifier that can be used with count nouns and is often preceded by `a'; a small but indefinite number - ['few']
'02655135-v', #: 8 | occupy a certain position or area; be somewhere - ['be']
'01712704-v', #: 5 | carry out or perform an action - ['do', 'execute', 'perform']
'02603699-v', #: 5 | have an existence, be extant - ['be', 'exist']
'00031899-r', #: 5 | used to give emphasis - ['very', 'really', 'real', 'rattling']
'02560585-v', #: 4 | engage in - ['do', 'make']
]
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
def read_ttl(ttl_path, ttl_format=ttl.MODE_TSV):
return ttl.read(ttl_path, ttl_format)
def get_ttl_writer(path, ttl_format=ttl.MODE_TSV, id_seed=1):
if ttl_format == ttl.MODE_JSON:
return ttl.JSONWriter.from_path(path, id_seed=id_seed)
else:
return ttl.TxtWriter.from_path(path, id_seed=id_seed)
def prepare_tags(doc, args=None, nonsense=True):
''' Return a map of sentID -> map of {synsetid:(cfrom:cto)}, number of used tags (int), and ignored tags (int)'''
tags = dd(lambda: dd(set))
tagcount = 0
ignored = 0
for s in doc:
for tag in s.tags:
cfrom = int(tag.cfrom)
cto = int(tag.cto)
if tag.tagtype in ('WN', 'EXTRA', 'OMW'):
# ignore nonsense
if not nonsense and tag.label in NONSENSES:
ignored += 1
continue
tags[s.ID][tag.label].add((cfrom, cto))
tagcount += 1
else:
ignored += 1
if args and not args.quiet:
getLogger().warning("Unknown label: {} in sentence #{}".format(tag.tagtype, s.ID))
return tags, tagcount, ignored
def score(gold_tags, profile_tags, args=None):
matched = set()
notmatched = set() # false negative
for sid in gold_tags.keys():
gstags = gold_tags[sid] # gold tags of this sent
pstags = profile_tags[sid] # profile tags of this sent
for gstag in gstags.keys(): # gstag = synsetID
glocs = gstags[gstag]
plocs = set(pstags[gstag]) if gstag in pstags else []
for gfrom, gto in glocs:
tag = (sid, gfrom, gto, gstag) # sentID, gfrom, gto, synsetID
for pfrom, pto in plocs:
getLogger().debug("Checking {}:{} againts {}:{}".format(gfrom, gto, pfrom, pto))
if overlap(pfrom, pto, gfrom, gto):
matched.add(tag)
plocs.remove((pfrom, pto))
break
if tag not in matched:
if args and not args.quiet:
getLogger().warning("Not found: {}".format(tag))
notmatched.add(tag)
return matched, notmatched
# return gold_tags.intersection(profile_tags)
def compare_ttls(cli, args):
''' Compare TTL to gold '''
rp = TextReport()
omw = get_omw()
ctx = omw.ctx()
gold = None
profile = None
ignored_ids = []
if args.ignore:
ignored_ids = [x.strip() for x in read_file(args.ignore).splitlines() if x.strip()]
getLogger().debug("Ignored sentence IDs: {}".format(', '.join(ignored_ids)))
if args.gold_profile:
gold = read_ttl(args.gold_profile, ttl_format=args.ttl_format)
# remove ignored sentences
if ignored_ids:
for sid in ignored_ids:
gold.pop(sid, default=None)
if not args.batch:
rp.header("Gold sentences: {} | Loc: {}".format(len(gold), args.gold_profile))
if args.verbose and not args.batch:
for s in gold:
rp.print("Sent #{}: {} tags".format(s.ID, len(s.tags)))
elif not args.batch:
print("Oops, no gold!")
# read profile
if args.profile:
profile = read_ttl(args.profile, ttl_format=args.ttl_format)
if not args.batch:
rp.header("Profile sentences: {} | Loc: {}".format(len(profile), args.profile))
# remove ignored sentences
if ignored_ids:
for sid in ignored_ids:
profile.pop(sid, default=None)
if not args.batch:
rp.header("Profile sentences: {} (ignored: {}) | Loc: {}".format(len(profile), len(ignored_ids), args.profile))
if args.verbose and not args.batch:
for s in profile:
getLogger().debug("Profile/Sent #{}: {} tags".format(s.ID, len(s.tags)))
elif not args.batch:
print("Oops, no profile to evaluate")
# calculate precision and recall
if gold and profile:
gold_tags, gold_tags_len, gold_ignored = prepare_tags(gold, args=args, nonsense=args.nonsense)
profile_tags, profile_tags_len, profile_ignored = prepare_tags(profile, args=args, nonsense=args.nonsense)
if gold_tags_len == 0:
rp.print("WARNING: There was no tag found in the gold profile. Please make sure that the tags for comparison are *sentence level* tags")
if profile_tags_len == 0:
rp.print("WARNING: There was no tag found in the evaluating profile. Please make sure that the tags for comparison are *sentence level* tags")
getLogger().debug("Gold tags: {}".format(gold_tags_len))
getLogger().debug(list(gold_tags.items())[:5])
getLogger().debug("Profile tags: {}".format(profile_tags_len))
getLogger().debug(list(profile_tags.items())[:5])
true_positive, false_negative = score(gold_tags, profile_tags, args=args)
precision = len(true_positive) / profile_tags_len
recall = len(true_positive) / gold_tags_len
f1 = 2 * precision * recall / (precision + recall)
getLogger().debug("TP: {}".format(len(true_positive)))
getLogger().debug("FN: {}".format(len(false_negative)))
getLogger().debug("Recall (TP/Gtags): {}".format(recall))
getLogger().debug("Precision (TP/Ptags): {}".format(precision))
getLogger().debug("F1 (2*p*r/(p+r)): {}".format(f1))
rc_text = "{:.2f}%".format(recall * 100)
pr_text = "{:.2f}%".format(precision * 100)
f1_text = "{:.2f}%".format(f1 * 100)
if not args.batch:
rp.print("True positive: {}".format(len(true_positive)))
rp.print("False Negative: {}".format(len(false_negative)))
rp.print("Gold # senses: {} | Ignored: {} | Total: {}".format(gold_tags_len, gold_ignored, gold_tags_len + gold_ignored))
rp.print("Predicted # senses: {} | Ignored: {} | Total: {}".format(profile_tags_len, profile_ignored, profile_tags_len + profile_ignored))
rp.print("Recall: {}".format(rc_text))
rp.print("Precision: {}".format(pr_text))
rp.print("F1 : {}".format(f1_text))
if args.org:
# output org-mode
columns = [rc_text, pr_text, f1_text]
if args.cols:
columns = args.cols + columns
rp.print('| {} |'.format(' | '.join(columns)))
if args.debug:
if not args.batch:
print("Debug file: {}".format(args.debug))
debugfile = TextReport(args.debug)
debugfile.print(".:: Table of content ::.")
debugfile.print("")
debugfile.print("[Misisng senses]")
debugfile.print("[By classes]")
debugfile.print("[Summary]")
debugfile.print("")
ss_map = {}
debugfile.header("[Missing senses]")
for sid, cfrom, cto, label in sorted(false_negative):
if label not in ss_map:
ss = omw.get_synset(label, ctx=ctx)
ss_map[label] = ss
else:
ss = ss_map[label]
# get the surface form
surface = gold.get(sid).text[int(cfrom):int(cto)]
debugfile.print("{}\t{}\t{}\t{}\t{}\t{}\t{}".format(sid, cfrom, cto, surface, label, ss.definition, ss.lemmas))
# by classes
c = Counter()
c.update(synsetID for sentID, cfrom, cto, synsetID in false_negative)
debugfile.header("[By classes]")
for synsetID, freq in c.most_common():
ss = ss_map[synsetID]
debugfile.print("{}: {} | ({}) - {}".format(synsetID, freq, ', '.join(ss.lemmas), ss.definition))
# summary
debugfile.header("[Summary]")
debugfile.print("True positive: {}".format(len(true_positive)))
debugfile.print("False positive: {}".format(len(false_negative)))
debugfile.print("Gold # senses: {} | Ignored: {} | Total: {}".format(gold_tags_len, gold_ignored, gold_tags_len + gold_ignored))
debugfile.print("Predicted # senses: {} | Ignored: {} | Total: {}".format(profile_tags_len, profile_ignored, profile_tags_len + profile_ignored))
debugfile.print("Recall (TP/Gtags) : {}".format(rc_text))
debugfile.print("Precision (TP/Ptags): {}".format(pr_text))
debugfile.print("F1 (2*p*r/(p+r)) : {}".format(f1_text))
ctx.close()
def pop_concept(sent, c):
if c not in sent.concepts:
return
getLogger().debug("Popping concept {} from sent #{}".format(c, sent.ID))
cfrom = min(t.cfrom for t in c.tokens)
cto = min(t.cto for t in c.tokens)
synset = c.tag
sent.pop_concept(c.cidx)
remove_tags = set()
# remove tags in sentence as well
for tag in sent.tags:
if (int(tag.cfrom), int(tag.cto), tag.label) == (cfrom, cto, synset):
remove_tags.add(tag)
if remove_tags:
for tag in remove_tags:
getLogger().debug("Removing tag: {}".format(tag))
sent.tags.remove(tag)
return remove_tags
def remove_msw_ttl(cli, args):
doc = read_ttl(args.path)
rp = TextReport(args.debug)
rp.print("Doc size: {}".format(len(doc)))
orig_tag_count = 0
orig_concept_count = 0
for s in doc:
orig_concept_count += len(s.concepts)
orig_tag_count += len(s.tags)
print("# tags: {}".format(orig_tag_count))
print("# concepts: {}".format(orig_concept_count))
manual = dd(lambda: dd(dict))
nonsenses = set() # just ignore any tag with these sense IDs
if args.manual:
entries = CSV.read_tsv(args.manual)
for sid, wid, tag, keep, lemma in entries:
sid, wid, keep = int(sid), int(wid), int(keep)
if (sid, wid, keep, lemma) == (-1, -1, -1, 'U'):
nonsenses.add(tag)
if not lemma:
manual[sid][wid][tag] = keep
else:
manual[sid][wid][(tag, lemma)] = keep
wn = get_wn()
ctx = wn.ctx()
nope_synsets = set()
ok_synsets = set()
if args.wn30:
rp.print("WN30 filter is activated")
for sidx, sent in enumerate(doc):
if args.topk and sidx > int(args.topk):
break
getLogger().debug("Processing sentence {}/{}".format(sidx + 1, len(doc)))
getLogger().debug("Before concepts: {}".format(sent.concepts))
getLogger().debug("Before tags: {}".format(sent.tags))
# remove concepts that are not in PWN 3.0
if args.wn30:
remove_tags = set()
for tag in sent.tags:
if tag.tagtype == 'OMW' or tag.label in nonsenses:
remove_tags.add(tag)
for tag in remove_tags:
sent.tags.remove(tag)
remove_concepts = set()
for c in sent.concepts:
if c.tag in ok_synsets:
pass
elif c.tag in nope_synsets:
remove_concepts.add(c)
# pop_concept(sent, c)
elif wn.get_synset(c.tag, ctx=ctx) is None:
# remove it
nope_synsets.add(c.tag)
remove_concepts.add(c)
# pop_concept(sent, c)
else:
ok_synsets.add(c.tag)
for c in remove_concepts:
pop_concept(sent, c)
msw = list(sent.msw())
tcmap = sent.tcmap()
# remove_tags = set()
if msw:
keep_remove = []
for w in msw:
max_len = 0
keep = []
remove = set()
wid = sent.tokens.index(w)
for c in tcmap[w]:
if c.tag in manual[sent.ID][wid]:
if manual[sent.ID][wid][c.tag]:
keep.append(c)
else:
remove.add(c)
elif (c.tag, c.clemma) in manual[sent.ID][wid]:
if manual[sent.ID][wid][(c.tag, c.clemma)]:
keep.append(c)
else:
remove.add(c)
elif len(c.tokens) == 1 or len(c.tokens) < max_len:
remove.add(c)
elif c.tag in nonsenses:
remove.add(c)
else:
max_len = len(c.tokens)
keep.append(c)
if len(keep) != 1:
keep_remove.append((w, keep, remove))
else:
# everything is OK, remove them now
for c in remove:
if args.debug:
rp.print("Removing concept {} from {}".format(c, sent.ID))
getLogger().debug("Removing concept {} from {}".format(c, sent.ID))
pop_concept(sent, c)
if keep_remove:
rp.header(sent)
for w, keep, remove in keep_remove:
rp.write(w)
rp.writeline(" - Keep: {} | Remove: {}".format(keep, remove))
# remove sent's tags
# for tag in remove_tags:
# getLogger().debug("removing tag: {}".format(tag))
# sent.tags.remove(tag)
getLogger().debug("After concepts: {}".format(sent.concepts))
getLogger().debug("After tags: {}".format(sent.tags))
if nope_synsets:
rp.print("Noped synsets: {}".format(nope_synsets))
if args.output:
doc_path = os.path.dirname(args.output)
doc_name = os.path.basename(args.output)
new_doc = ttl.Document(doc_name, doc_path)
sents = doc if not args.topk else list(doc)[:int(args.topk)]
for s in sents:
new_doc.add_sent(s)
tag_count = 0
concept_count = 0
for s in sents:
concept_count += len(s.concepts)
tag_count += len(s.tags)
# baking ...
if args.bake:
print("Baking doc ...")
bake_doc(new_doc)
print("[New] # tags: {}".format(tag_count))
print("[New] # concepts: {}".format(concept_count))
rp.print("Writing fixed TTL to {}".format(new_doc.sent_path))
new_doc.write_ttl()
def strip_ttl(cli, args):
doc = read_ttl(args.path, ttl_format=args.ttl_format)
print("In doc: {} | Sentences: {}".format(args.path, len(doc)))
if args.noconcept:
for sent in doc:
cids = [c.cidx for c in sent.concepts]
for cid in cids:
sent.pop_concept(cid)
if args.notag:
for sent in doc:
sent.tags.clear()
if args.output:
print("Writing output to: {}".format(args.output))
# doc_path = os.path.dirname(args.output)
# doc_name = os.path.basename(args.output)
# new_doc = ttl.Document(doc_name, doc_path)
# for s in doc:
# new_doc.add_sent(s)
# new_doc.write_ttl()
ttl.write(args.output, doc, mode=args.ttl_format)
print("Done")
def bake_doc(doc):
''' Convert concepts to tags '''
for sent in doc:
for concept in sent.concepts:
cfrom = min(t.cfrom for t in concept.tokens)
cto = min(t.cto for t in concept.tokens)
sid = SynsetID.from_string(concept.tag, default=None) # must be a valid synsetID
if cfrom >= 0 and cto >= 0 and sid is not None:
sent.new_tag(concept.tag, cfrom, cto, tagtype='WN')
return doc
def concept_to_tags(cli, args):
doc = read_ttl(args.path, ttl_format=args.ttl_format)
print("In doc: {} | Sentences: {}".format(args.path, len(doc)))
bake_doc(doc)
if args.output:
ttl.write(args.output, doc, mode=args.ttl_format)
print("Done")
def ttl_to_txt(cli, args):
doc = read_ttl(args.path, ttl_format=args.ttl_format)
print("In doc: {} | Sentences: {}".format(args.path, len(doc)))
lines = [s.text for s in doc]
if args.output:
chio.write_file(args.output, '\n'.join(lines))
print("Written {} lines to {}".format(len(lines), args.output))
print("Done")
def ttl_to_tokens(cli, args):
''' Convert TTL file to tokenized text '''
doc = read_ttl(args.input, ttl_format=args.ttl_format)
print("In doc: {} | Sentences: {}".format(args.input, len(doc)))
if args.output:
with chio.open(args.output, mode='wt') as outfile:
processed_sents = 0
for idx, sent in enumerate(doc):
if args.topk and idx >= args.topk:
break
if args.ident and str(sent.ID) not in args.ident:
continue
# outfile.write('{}\n'.format(sent.ID))
outfile.write(" ".join(w.text for w in sent))
outfile.write("\n")
processed_sents += 1
print("{} sentences have been processed.".format(processed_sents))
print("[TTL => tokens] Written {} lines to {}".format(len(doc), args.output))
else:
print("output file cannot be empty")
print("Done")
def ttl_to_ukb(cli, args):
''' Convert TTL file to UKB file '''
doc = read_ttl(args.input, ttl_format=args.ttl_format)
print("In doc: {} | Sentences: {}".format(args.input, len(doc)))
if args.output:
with chio.open(args.output, mode='wt') as outfile, chio.open(args.output + '.tokens.txt', mode='wt') as tokenfile:
processed_sents = 0
for idx, sent in enumerate(doc):
if args.topk and idx >= args.topk:
break
if args.ident and str(sent.ID) not in args.ident:
continue
outfile.write('{}\n'.format(sent.ID))
for idx, w in enumerate(sent):
if w.pos and w.pos.lower() in 'xnvar':
token_pos = w.pos.lower()
else:
token_pos = ptpos_to_wn(w.pos)
if not args.strict:
mode = 1
elif token_pos in 'nvar':
mode = 1
else:
mode = 0
word_text = w.lemma.lower() if w.lemma else w.text.lower()
word_text = word_text.replace(' ', '_')
outfile.write("{text}#{p}#w{wid}#{mode} ".format(text=word_text, p=token_pos, wid=idx, mode=mode))
tokenfile.write('\t'.join((str(sent.ID), str(idx), str(w.cfrom), str(w.cto))))
tokenfile.write('\n')
outfile.write('\n\n')
processed_sents += 1
print("{} sentences have been processed.".format(processed_sents))
print("[TTL => UKB] Written {} lines to {}".format(len(doc), args.output))
else:
print("output file cannot be empty")
print("Done")
def ukb_to_ttl(cli, args):
''' Convert UKB output to TTL '''
doc = read_ttl(args.ttl, ttl_format=args.ttl_format)
print("Source TTL file: {} | Sentences: {}".format(args.ttl, len(doc)))
token_map = {}
if args.tokens:
# token file is provided
tokens = [list(int(x) for x in line) for line in chio.read_tsv(args.tokens)]
for sid, wid, cfrom, cto in tokens:
token_map[(sid, wid)] = (cfrom, cto)
print("Found tokens: {}".format(len(token_map)))
c = Counter()
sids = set()
input_sids = {int(s.ID) for s in doc}
for line_idx, line in enumerate(chio.read_file(args.input).splitlines()):
if line.startswith('!! '):
continue
parts = line.split()
if len(parts) != 5:
print("WARNING: Invalid line -> {}: {}".format(line_idx, line))
continue
sid_text, wid_text, synsetid, unknown, lemma = line.split()
sid = int(sid_text)
wid = int(wid_text[1:])
sent_obj = doc.get(sid, default=None)
if sent_obj is None:
print("SID #{} could not be found".format(sid))
elif not token_map and wid >= len(sent_obj):
print("Invalid wid: line#{} - sent#{} - wid#{}".format(line_idx, sid, wid))
else:
# now can tag ...
# remove current concepts if needed
# if args.removetags:
# cids = list(c.cidx for c in sent_obj.concepts)
# for cid in cids:
# sent_obj.pop_concept(cid)
if not token_map:
token = sent_obj[wid]
# double check token text
if lemma != token.text.lower() and lemma != token.lemma.lower():
print("Invalid token text: {} <> {}/{}".format(lemma, token.text.lower(), token.lemma.lower()))
sent_obj.new_concept(synsetid, lemma, tokens=[wid])
else:
# create sentence-level tag instead
cfrom, cto = token_map[(sid, wid)]
sent_obj.new_tag(synsetid, cfrom, cto, tagtype='WN')
c.count("Tokens")
sids.add(sid)
print("UKB sentences: {}".format(len(sids)))
print("Not found: {}".format(input_sids.difference(sids)))
c.summarise()
# removetags if needed
if args.removetags:
for sent_obj in doc:
sent_obj.tags.clear()
print("Sent #1 tags: {}".format(len(doc[0].tags)))
# baking
if not args.tokens:
print("Now baking to tags ...")
bake_doc(doc)
else:
print("WARNING: Because token file was provided, no auto-baking will be done")
print("Sent #1 tags after baking: {}".format(len(doc[0].tags)))
# Now output ...
if args.output:
print("Output to file ...")
_writer = get_ttl_writer(args.output, ttl_format=args.ttl_format, id_seed=args.seed)
for sent in doc:
_writer.write_sent(sent)
print("Written {} sentences to {}".format(len(doc), args.output))
print("Done")
def tsv_to_json(cli, args):
doc_tsv = read_ttl(args.input, ttl_format=args.ttl_format)
_json_writer = ttl.JSONWriter.from_path(args.output)
for idx, sent in enumerate(doc_tsv):
sent.ID = args.seed + idx
if args.textonly:
_json_writer.write_sent(ttl.Sentence(sent.text, ID=sent.ID))
else:
_json_writer.write_sent(sent)
print("Create JSON file: {}".format(args.output))
def fix_ttl_id(cli, args):
''' Fix sentence ID '''
in_doc = read_ttl(args.input, ttl_format=args.ttl_format)
out_doc = get_ttl_writer(args.output, ttl_format=args.ttl_format)
for idx, sent in enumerate(in_doc):
sent.ID = args.seed + idx
out_doc.write_sent(sent)
print("Fixed file: {}".format(args.output))
def txt_to_ttl(cli, args):
print("Input file: {}".format(args.input))
print("TTL/{} output: {}".format(args.ttl_format, args.output))
print("With ID column: {}".format(args.with_idcolumn))
raw_sents = chio.read_file(args.input).splitlines()
_writer = get_ttl_writer(args.output, ttl_format=args.ttl_format, id_seed=args.seed)
for sent in raw_sents:
if args.with_idcolumn:
sid, text = sent.split('\t', maxsplit=1)
_writer.write_sent(ttl.Sentence(text=text, ID=sid))
else:
_writer.write_sent(ttl.Sentence(text=text))
print("Written {} sentences to {}".format(len(raw_sents), args.output))
def import_ttl_data(cli, args):
doc = read_ttl(args.path)
print("In doc: {} | Sentences: {}".format(args.path, len(doc)))
# import tokens
if args.tokens:
p = piter(chio.read_tsv_iter(args.tokens))
groups = []
current = []
for row in p:
if row:
current.append(row)
if not p.peep() or not p.peep().value:
if current:
groups.append(current)
current = []
print("Found {} sentences.".format(len(groups)))
if len(groups) != len(doc):
print("Wrong token files")
exit()
for sent, row in zip(doc, groups):
sent.tokens = [tk[0] for tk in row]
for token, (text, pos) in zip(sent, row):
token.text = text
token.pos = pos
# output stuff
if args.output:
ttl.TxtWriter.from_path(args.output).write_doc(doc)
print("Written {} sentences to {}".format(len(doc), args.output))
print("Done")
def corenlp_to_ttl(cli, args):
print("Core NLP output file: {}".format(args.input))
print("TTL file: {}".format(args.output))
print("Source (raw) file: {}".format(args.raw))
cn_sents = json.loads(chio.read_file(args.input))['sentences']
print("Found {} core-nlp sents".format(len(cn_sents)))
raw_sents = chio.read_file(args.raw).splitlines()
_writer = get_ttl_writer(args.output, ttl_format=args.ttl_format, id_seed=args.seed)
for sent_text, cn_sent in zip(raw_sents, cn_sents):
ttl_sent = ttl.Sentence(sent_text)
ttl_sent.tokens = (cn_tk['originalText'] for cn_tk in cn_sent['tokens'])
for ttl_tk, cn_tk in zip(ttl_sent, cn_sent['tokens']):
if 'lemma' in cn_tk:
ttl_tk.lemma = cn_tk['lemma']
if 'pos' in cn_tk:
ttl_tk.pos = cn_tk['pos']
_writer.write_sent(ttl_sent)
print("{} sentences was written to {}".format(len(raw_sents), args.output))
def semeval_to_ttl(cli, args):
print("Semeval file: {}".format(args.input))
print("Semeval key file: {}".format(args.keys))
print("TTL file: {}".format(args.output))
print("TTL format: {}".format(args.ttl_format))
# Read document data
tree = etree.iterparse(args.input)
doc = ttl.Document()
sent_id_map = {}
for event, element in tree:
if event == 'end' and element.tag == 'sentence':
# do some processing here
sent_ident = element.get('id')
tokens = []
tids = []
# docID & sentID
docID = sent_ident[1:4]
sent_id = sent_ident[6:9]
wfs = []
for wf in element:
wident, lemma, pos, text = wf.get('id'), wf.get('lemma'), wf.get('pos'), wf.text
wfs.append((wident, lemma, pos, text))
wid = wident[11:]
tokens.append(text)
tids.append('{}/{}'.format(wid, lemma))
sent_text = StringTool.detokenize(tokens)
print("Doc: {} - Sent: {} - {}".format(docID, sent_id, sent_text))
sent_obj = doc.new_sent(text=sent_text)
sent_obj.new_tag(label=sent_ident, tagtype='origid')
sent_id_map[sent_ident] = sent_obj
sent_obj.tokens = tokens # add original token in
for (sent_token, (wident, lemma, pos, text)) in zip(sent_obj, wfs):
sent_token.new_tag(label=wident, tagtype='origid')
if pos:
sent_token.pos = pos
if lemma:
sent_token.lemma = lemma
element.clear()
# Read tag data
if args.keys:
keys = chio.read_tsv(args.keys)
wn = get_wn()
not_found = 0
mwe_count = 0
# TODO Add option to split a semeval file into several documents
for line in keys:
from_token = line[0]
from_token_idx = int(from_token[-3:]) - 1
sent_id = from_token[:9]
to_token = line[1]
to_token_idx = int(to_token[-3:]) - 1
if from_token != to_token:
mwe_count += 1
print("MWE: {}".format(line))
bbss = line[2]
wn_keys = [x[3:] for x in line[3:] if x.startswith('wn:')]
found_ss = None
for wn_key in wn_keys:
ss = wn.get_by_key(wn_key)
if ss is not None:
# print("{} => {}".format(" ".join(wn_keys), ss))
sent_id_map[sent_id].new_concept(tag=str(ss.ID), tokens=range(from_token_idx, to_token_idx + 1))
found_ss = ss
break
if found_ss is None:
getLogger().warning("Not found: {}".format(line))
not_found += 1
print("Total: {} - Not found: {} - MWE: {}".format(len(keys), not_found, mwe_count))
ttl.write(args.output, doc, mode=args.ttl_format)
print("Output file: {}".format(args.output))
def check_ttl_stats(cli, args):
gold = read_ttl(args.path, ttl_format=args.ttl_format)
tag_count = 0
for sent in gold:
tag_count += len([t for t in sent.tags if t.tagtype in ('WN', 'EXTRA', 'OMW')])
print("Sense count: {}".format(tag_count))
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
def main():
''' TTL processors '''
app = CLIApp(desc='ISF TTL Toolkit', logger=__name__)
# add tasks
task = app.add_task('cmp', func=compare_ttls)
task.add_argument('-g', '--gold_profile', help='Gold Profile', default=None)
task.add_argument('-p', '--profile', help='Profile for evaluation', default=None)
task.add_argument('--debug', help='Debug file')
task.add_argument('--ignore', help='Sentence IDs to ignore')
task.add_argument('--nonsense', help='Count nonsense tags too', action="store_true")
task.add_argument('--batch', help='Output in batch mode only (no detailed summary)', action="store_true")
task.add_argument('--org', help='Output ORG-mode format', action="store_true")
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_TSV, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task.add_argument('--cols', help='Extra columns', nargs='*')
task.add_argument('--bake', action='store_true')
task = app.add_task('msw', func=remove_msw_ttl)
task.add_argument('path', help='Path to TTL document')
task.add_argument('--manual', help='Manual entries to be removed')
task.add_argument('--wn30', help='Only keep PWN3.0 synsets', action='store_true')
task.add_argument('-n', '--topk', help='Only process top k items')
task.add_argument('-o', '--output', help='New TTL path')
task.add_argument('--bake', action='store_true')
task.add_argument('--debug', help='Debug file')
task = app.add_task('strip', func=strip_ttl)
task.add_argument('path', help='Path to TTL document')
task.add_argument('--noconcept', help='Remove concepts', action='store_true')
task.add_argument('--notag', help='Remove tags', action='store_true')
task.add_argument('-o', '--output', help='New TTL path')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_TSV, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('bake', func=concept_to_tags)
task.add_argument('path', help='Path to TTL document')
task.add_argument('-o', '--output', help='New TTL path')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_TSV, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('stats', func=check_ttl_stats)
task.add_argument('path', help='Path to TTL document')
task.add_argument('-o', '--output', help='Output log file')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_TSV, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('totxt', func=ttl_to_txt)
task.add_argument('path', help='Path to TTL document')
task.add_argument('-o', '--output', help='Output text file')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('tojson', func=tsv_to_json)
task.add_argument('input', help='Path to TTL/TSV document')
task.add_argument('output', help='Path to TTL/JSON output document')
task.add_argument('--seed', default=1, type=int)
task.add_argument('--textonly', action='store_true')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('fixid', func=fix_ttl_id)
task.add_argument('input', help='Path to TTL/TSV document')
task.add_argument('output', help='Path to TTL/JSON output document')
task.add_argument('--seed', default=10000, type=int)
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_TSV, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('fromtxt', func=txt_to_ttl)
task.add_argument('input', help='Path to TXT file')
task.add_argument('output', help='Path to TTL output document')
task.add_argument('--seed', default=10000, type=int)
task.add_argument('--with_idcolumn', action='store_true')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('import', func=import_ttl_data)
task.add_argument('path', help='Path to TTL document')
task.add_argument('-o', '--output', help='Output text file')
task.add_argument('--tokens', help='Path to token file')
task = app.add_task('corenlp', func=corenlp_to_ttl)
task.add_argument('input', help='Path to core NLP file')
task.add_argument('output', help='Path to TTL/JSON output document')
task.add_argument('--raw', help='Raw file')
task.add_argument('--seed', default=1, type=int)
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_TSV, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('semeval', func=semeval_to_ttl)
task.add_argument('input', help='Path to semeval file')
task.add_argument('output', help='Path to TTL/JSON output document')
task.add_argument('--raw', help='Raw file')
task.add_argument('--keys', help='Key file')
task.add_argument('--seed', default=1, type=int)
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('to_ukb', func=ttl_to_ukb)
task.add_argument('input', help='Path to TTL file')
task.add_argument('output', help='Path to UKB output file')
task.add_argument('-n', '--topk', help='Only process top k items', type=int)
task.add_argument('--ident', nargs='*')
task.add_argument('--strict', action='store_true')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('to_tokens', func=ttl_to_tokens)
task.add_argument('input', help='Path to TTL file')
task.add_argument('output', help='Path to token output file')
task.add_argument('-n', '--topk', help='Only process top k items', type=int)
task.add_argument('--ident', nargs='*')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
task = app.add_task('ukb', func=ukb_to_ttl)
task.add_argument('input', help='Path to UKB output file')
task.add_argument('ttl', help='Input TTL file')
task.add_argument('--tokens', help='Path to token file')
task.add_argument('-o', '--output', help='Path to TTL output file')
task.add_argument('--seed', default=1, type=int)
task.add_argument('--removetags', help='Remove all sentence-tags from input TTL file', action='store_true')
task.add_argument('--ttl_format', help='TTL format', default=ttl.MODE_JSON, choices=[ttl.MODE_JSON, ttl.MODE_TSV])
# run app
app.run()
if __name__ == "__main__":
main()
|
mit
| -2,547,709,474,229,150,700 | 43.134615 | 157 | 0.558196 | false |
Joshuaalbert/IonoTomo
|
src/ionotomo/notebooks/ionosphere_characteristics.py
|
1
|
9107
|
# coding: utf-8
# In[1]:
#%matplotlib
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
from ionotomo import *
from ionotomo.tomography.simulate import SimulateTec
import tensorflow as tf
import numpy as np
import gpflow as gpf
import pymc3 as pm
import os
import pylab as plt
import seaborn as sns
###
# Create radio array
load_preexisting = True
datapack_to_load = "../data/rvw_datapack_full_phase_dec27.hdf5"
if load_preexisting:
datapack_facets = DataPack(filename=datapack_to_load)
_,timestamps_flag = datapack_facets.get_times(-1)
timestamps_flag = timestamps_flag[1:]
freqs_flag = datapack_facets.get_freqs(-1)
keep_freqs = freqs_flag[200:220]
freqs_flag = freqs_flag[np.bitwise_not(np.isin(freqs_flag,keep_freqs))]
datapack_facets.flag_times(timestamps_flag)
#datapack_facets.flag_freqs(freqs_flag)
#Flagged all but first time, channels 200-219, etc
else:
ra = 126
dec = 64
timestamp = "2016-12-08T23:25:01.384"
radio_array = generate_example_radio_array(config='lofar')
p0 = ac.SkyCoord(ra=ra*au.deg,dec=dec*au.deg, frame='icrs')
obstime = at.Time(timestamp,format='isot')
location = radio_array.get_center()
altaz = ac.AltAz(location = location, obstime = obstime)
p = p0.transform_to(altaz)
print(p)
datapack_facets = generate_example_datapack(alt=p.alt.deg,az=p.az.deg,Ndir=42,Nfreqs=20,Ntime=1,radio_array=radio_array)
datapack_screen = phase_screen_datapack(15,datapack=datapack_facets)
times, timestamps = datapack_facets.get_times(-1)
antennas,antenna_labels = datapack_facets.get_antennas(-1)
freqs = datapack_facets.get_freqs(-1)
phase_track = datapack_facets.get_center_direction()
obstime = times[0]
location = datapack_facets.radio_array.get_center()
directions_facets,_ = datapack_facets.get_directions(-1)
Nd1 = directions_facets.shape[0]
directions_screen,_ = datapack_screen.get_directions(-1)
Nd2 = directions_screen.shape[0]
X_facets = np.array([directions_facets.ra.deg,directions_facets.dec.deg]).T
X_screen = np.array([directions_screen.ra.deg,directions_screen.dec.deg]).T
# uvw = UVW(location = location,obstime=obstime,phase = phase_track)
# X0 = directions_facets.transform_to(uvw)
# X0 = np.array([np.arctan2(X0.u.value,X0.w.value),np.arctan2(X0.v.value,X0.w.value)]).T
# X1 = directions_screen.transform_to(uvw)
# X1 = np.array([np.arctan2(X1.u.value,X1.w.value),np.arctan2(X1.v.value,X1.w.value)]).T
# x_scale = np.mean(np.std(X1,axis=0))
# X1 /= x_scale
# X0 /= x_scale
###
# Generate ionospheres following I(sigma, l)
def sample_ionosphere(sim,sigma,l):
"""Generate an ionosphere, I(sigma,l).
sim : SimulatedTec object (non reentrant)
sigma : float log_electron variance
l : float length scale
Returns a the model as ndarray
"""
sim.generate_model(sigma, l)
model = sim.model
return model
###
# simulate and place in datapack_screen
def simulate_screen(sim,datapack,aj=0,s=1.01,ls=10.,draw_new=False):
if draw_new:
sim.generate_model(s,ls)
tec = sim.simulate_tec()
phase = tec[...,None]*-8.4479e9/freqs
datapack.set_phase(phase,ant_idx=-1,time_idx=[aj],dir_idx=-1,freq_idx=-1)
return tec
def log_posterior_true(tec,X1, tec_obs, X0,samples=1000):
"""
Calculate the logp of the true underlying.
tec : array (Nd1,)
X1 : array (Nd1,2)
tec_obs : array (Nd2,)
X0 : array (Nd2, 2)
"""
with pm.Model() as model:
l = pm.Exponential('l',1.)
sigma = pm.Exponential('sigma',1.)
#c = pm.Normal('c',mu=0,sd=1)
cov_func = pm.math.sqr(sigma)*pm.gp.cov.ExpQuad(1, ls=l)
#mean_func = pm.gp.mean.Constant(c=c)
gp = pm.gp.Marginal(cov_func=cov_func)
eps = pm.HalfNormal('eps',sd=0.1)
y0_ = gp.marginal_likelihood('y0',X0,tec_obs,eps)
mp = pm.find_MAP()
print(mp)
trace = pm.sample(samples,start={'sigma':0.25,'l':0.25},chains=4)
pm.traceplot(trace,combined=True)
plt.show()
print(pm.summary(trace))
df = pm.trace_to_dataframe(trace, varnames=['sigma','l','eps'])
sns.pairplot(df)
plt.show()
with model:
y1_ = gp.conditional('y1',X1)#,given={'X':X0,'y':y0,'noise':0.1})
logp = y1_.logp
logp_val = np.zeros(len(trace))
for i,point in enumerate(trace):
point['y1'] = tec
logp_val[i] = logp(point)
return logp_val
# tec = simulate_screen(sim,datapack_screen,draw_new=True)
# d_mask = np.random.choice(Nd2,size=Nd1,replace=False)
# logp = log_posterior_true(tec[51,0,:],X1,tec[51,0,d_mask],X1[d_mask,:])
# logp = {}
# d_mask = np.random.choice(Nd2,size=Nd1,replace=False)
# for i in range(10):
# tec = simulate_screen(sim,datapack_screen,draw_new=True)
# logp[i] = []
# for ai in range(1,62):
# print(antenna_labels[ai])
# tec_mean = np.mean(tec[ai,0,:])
# tec_std = np.std(tec[ai,0,:])
# tec_ = (tec[ai,0,:] - tec_mean) / tec_std
# logp[i].append(np.mean(log_posterior_true(tec_,X1,tec_[d_mask],X1[d_mask,:])))
# In[2]:
import theano as th
def solve_vi(X,Y,initial=None,batch_size=100):
X_t = th.shared(X)#pm.Minibatch(X,batch_size=batch_size,)
Y_t = th.shared(Y)#pm.Minibatch(Y,batch_size=batch_size)
# sigma_Y_t = th.shared(sigma_Y)#pm.Minibatch(sigma_Y,batch_size=batch_size)
#initial=(0.3,0.5,2.)
dx = np.max(X) - np.min(X)
dy = np.max(Y) - np.min(Y)
with pm.Model() as model:
sigma_K = pm.HalfNormal('sigma_K',sd=dy/3.)
l_space = pm.HalfNormal('l_space',sd=dx/3.,testval=1.)
cov_func = sigma_K**2 * pm.gp.cov.ExpQuad(2,active_dims=[0,1], ls=l_space)
gp = pm.gp.Marginal(cov_func=cov_func)
eps = pm.Uniform('eps',0.0,np.std(Y))
y1 = gp.marginal_likelihood('y1',X_t,Y_t,eps)
#y2 = gp.marginal_likelihood('y2',X[:100,:],Y[:100],eps*sigma_Y[:100])
initial = initial or pm.find_MAP()
approx = pm.fit(1000, start=initial,method='advi',callbacks=[pm.callbacks.CheckParametersConvergence(tolerance=1e-4)])
# plt.plot(approx.hist)
# plt.show()
means = approx.bij.rmap(approx.mean.eval())
# print(means)
# sds = approx.bij.rmap(approx.std.eval())
# print(sds)
df = approx.sample(10000)
p={k:pm.summary(df)['mean'][k] for k in pm.summary(df)['mean'].keys()}
# pm.traceplot(df,lines=p)
# plt.show()
return p
from ionotomo.bayes.gpflow_contrib import GPR_v2
def solve_gpf(X,Y,initial=None,batch_size=100):
dx = np.max(X[:,0]) - np.min(X[:,0])
dy = np.max(Y) - np.min(Y)
with gpf.defer_build():
k_space = gpf.kernels.RBF(2,active_dims = [0,1],lengthscales=[0.1])
kern = k_space
mean = gpf.mean_functions.Constant()
m = GPR_v2(X, Y[:,None], kern, mean_function=mean,var=1.,trainable_var=True)
m.kern.lengthscales.prior = gpf.priors.Uniform(0,dx)
m.kern.variance.prior = gpf.priors.Uniform(0,dy)
m.compile()
o = gpf.train.ScipyOptimizer(method='BFGS')
o.minimize(m,maxiter=100)
ls= m.kern.lengthscales.value[0]
v = m.kern.variance.value
#print(m)
return {"l_space":ls,"var":v, 'eps': m.likelihood.variance.value}
def _solve_gpf(arg):
X,Y,initial = arg
with tf.Session(graph=tf.Graph()):
return solve_gpf(X,Y,initial)
from concurrent import futures
def parallel_solve_gpf(X,Y,initial=None,num_threads=1):
"""Assume batch dimension 0"""
batch = Y.shape[0]
with futures.ThreadPoolExecutor(max_workers=num_threads) as exe:
args = []
for i in range(batch):
args.append((X,Y[i,...],initial))
jobs = exe.map(_solve_gpf,args)
results = list(jobs)
return results
# In[3]:
def determine_simulated_characteristics(X_screen, freqs, s, l, num_threads):
sim = SimulateTec(datapack_screen,spacing=1.,res_n=501)
print("Generating {} km scale".format(l))
sim.generate_model(s,l)
print("Simulating {} km scale".format(l))
tec = sim.simulate_tec()
phase = tec[...,None]*-8.4479e9/freqs
results = parallel_solve_gpf(X_screen,phase[1:,0,:,0],num_threads=num_threads)
stats = {
'l_space':[r['l_space'] for r in results],
'var':[r['var'] for r in results],
'eps':[r['eps'] for r in results]
}
return stats
def _determine_simulated_characteristics(arg):
return determine_simulated_characteristics(*arg)
with futures.ProcessPoolExecutor(max_workers=4) as pexe:
args = []
for l in np.linspace(5.,50.,1):
args.append(( X_screen, freqs, 1.008, l, 16))
jobs = pexe.map(_determine_simulated_characteristics, args)
results = {l: r for l,r in zip(np.linspace(5.,50.,1),list(jobs))}
from ionotomo import DatapackPlotter
# simulate_screen(sim,datapack_screen,s=1.01,ls=10.,draw_new=True)
# dp = DatapackPlotter(datapack = datapack_screen)
# dp.plot(observable='phase',show=True,labels_in_radec=True,plot_crosses=False)
|
apache-2.0
| 6,884,605,889,395,511,000 | 31.996377 | 126 | 0.635665 | false |
analysiscenter/dataset
|
batchflow/notifier.py
|
1
|
14105
|
""" Progress notifier. """
import math
from time import time, gmtime, strftime
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from tqdm.auto import tqdm as tqdm_auto
import numpy as np
from IPython import display
import matplotlib.pyplot as plt
from .monitor import ResourceMonitor, MONITOR_ALIASES
from .named_expr import NamedExpression, eval_expr
class DummyBar:
""" Progress tracker without visual representation. """
#pylint: disable=invalid-name
def __init__(self, total, *args, **kwargs):
self.total = total
self.args, self.kwargs = args, kwargs
self.n = 0
self.start_t = time()
def update(self, n):
self.n += n
def format_meter(self, n, total, t, **kwargs):
_ = kwargs
return f'{n}/{total} iterations done; elapsed time is {t:3.3} seconds'
def sp(self, *args, **kwargs):
_ = args, kwargs
def set_description(self, *args, **kwargs):
_ = args, kwargs
def close(self):
pass
class Notifier:
""" Progress tracker and a resource monitor tool in one.
Parameters
----------
bar : {'n', 'a', 'j', True} or callable
Sets the type of used progress bar:
- `callable` must provide a tqdm-like interface.
- `n` stands for notebook version of tqdm bar.
- `a` stands for automatic choise of appropriate tqdm bar.
- `j` stands for graph drawing as a progress bar.
- `t` or True for standard text tqdm is used.
- otherwise, no progress bar will be displayed. Note that iterations,
as well as everything else (monitors, variables, logs) are still tracked.
total, batch_size, n_iters, n_epochs, length : int
Parameters to calculate total amount of iterations.
drop_last : bool
Whether the last batch of data is dropped from iterations.
variables : str, :class:`.NamedExpression` or sequence of them
Allows to set trackable entities from the pipeline the Notifier is used in:
If str, then stands for name of the variable to get from the pipeline.
If any of the named expressions, then evaluated with the pipeline.
monitors : str, :class:`.Monitor` or sequence of them
Allows to monitor resources. Strings should be registered aliases for monitors like `cpu`, `gpu`, etc.
monitor_kwargs : dict
Parameters of monitor creation like `frequency`, `pid`, etc.
plot : bool
If True, then tracked data (usually list of values like memory usage or loss over training process)
is dynamically tracked on graphs. Note that rendering takes a lot of time.
window : int
Allows to plot only the last `window` values from every tracked container.
layout : str
If `h`, then subplots are drawn horizontally; vertically otherwise.
figsize : tuple of numbers
Total size of drawn figure.
*args, **kwargs
Positional and keyword arguments that are used to create underlying progress bar.
"""
def __init__(self, bar=None, *args,
total=None, batch_size=None, n_iters=None, n_epochs=None, drop_last=False, length=None,
frequency=1, monitors=None, graphs=None, file=None,
window=None, layout='h', figsize=None, savepath=None, **kwargs):
# Prepare data containers like monitors and pipeline variables
if monitors:
monitors = monitors if isinstance(monitors, (tuple, list)) else [monitors]
else:
monitors = []
if graphs:
graphs = graphs if isinstance(graphs, (tuple, list)) else [graphs]
else:
graphs = []
self.has_monitors = False
self.has_graphs = len(graphs) > 0
self.n_monitors = len(monitors)
self.data_containers = []
for container in monitors + graphs:
if not isinstance(container, dict):
container = {'source': container}
if isinstance(container['source'], str) and container['source'].lower() in MONITOR_ALIASES:
container['source'] = MONITOR_ALIASES[container['source'].lower()]()
source = container.get('source')
if source is None:
raise ValueError('Passed dictionaries as `monitors` or `graphs` should contain `source` key!')
if isinstance(source, ResourceMonitor):
self.has_monitors = True
if 'name' not in container:
if isinstance(source, ResourceMonitor):
container['name'] = source.__class__.__name__
elif isinstance(source, NamedExpression):
container['name'] = source.name
elif isinstance(source, str):
container['name'] = source
self.data_containers.append(container)
self.frequency = frequency
self.timestamps = []
self.start_monitors()
# Prepare file log
self.file = file
if self.file:
with open(self.file, 'w') as _:
pass
# Create bar; set the number of total iterations, if possible
self.bar = None
if callable(bar):
bar_func = bar
elif bar in ['n', 'nb', 'notebook', 'j', 'jpn', 'jupyter']:
bar_func = tqdm_notebook
elif bar in ['a', 'auto']:
bar_func = tqdm_auto
elif bar in [True, 't', 'tqdm']:
bar_func = tqdm
else:
bar_func = DummyBar
# Set default values for bars
if 'ncols' not in kwargs:
if bar_func == tqdm_notebook:
kwargs['ncols'] = min(700 + 100 * len(monitors or []), 1000)
elif bar_func == tqdm:
kwargs['ncols'] = min(80 + 10 * len(monitors or []), 120)
self.bar_func = lambda total: bar_func(total=total, *args, **kwargs)
self.update_total(total=total, batch_size=batch_size, n_iters=n_iters, n_epochs=n_epochs,
drop_last=drop_last, length=length)
# Prepare plot params
#pylint: disable=invalid-unary-operand-type
self.slice = slice(-window, None, None) if isinstance(window, int) else slice(None)
self.layout, self.figsize, self.savepath = layout, figsize, savepath
def update_total(self, batch_size, n_iters, n_epochs, drop_last, length, total=None):
""" Re-calculate total number of iterations. """
if total is None:
if n_iters is not None:
total = n_iters
if n_epochs is not None:
if drop_last:
total = length // batch_size * n_epochs
else:
total = math.ceil(length * n_epochs / batch_size)
# Force close previous bar, create new
if self.bar is not None:
try:
# jupyter bar must be closed and reopened
self.bar.sp(close=True)
self.bar = self.bar_func(total=total)
except TypeError:
# text bar can work with a simple reassigning of `total`
self.bar.total = total
else:
self.bar = self.bar_func(total=total)
def update(self, n=1, pipeline=None, batch=None):
""" Update Notifier with new info:
- fetch up-to-date data from batch, pipeline and monitors
- set bar description
- draw plots anew
- update log file
- increment underlying progress bar tracker
"""
if (self.bar.n + 1) % self.frequency == 0 or (self.bar.n == self.bar.total - 1):
self.timestamps.append(gmtime())
if self.data_containers:
self.update_data(pipeline=pipeline, batch=batch)
self.update_description()
if self.has_graphs:
self.update_plots(self.n_monitors, True)
if self.file:
self.update_file()
self.bar.update(n)
def update_data(self, pipeline=None, batch=None):
""" Get data from monitor or pipeline. """
for container in self.data_containers:
source = container['source']
if isinstance(source, ResourceMonitor):
source.fetch()
container['data'] = source.data
elif isinstance(source, NamedExpression):
value = eval_expr(source, pipeline=pipeline, batch=batch)
container['data'] = value
elif isinstance(source, str):
value = pipeline.v(source)
container['data'] = value
def update_description(self):
""" Set new bar description. """
description = self.create_description(iteration=-1)
self.bar.set_description(description)
def update_plots(self, index=0, add_suptitle=False, savepath=None, clear_display=True):
""" Draw plots anew. """
num_graphs = len(self.data_containers) - index
layout = (1, num_graphs) if self.layout.startswith('h') else (num_graphs, 1)
figsize = self.figsize or ((20, 5) if self.layout.startswith('h') else (20, 5*num_graphs))
if clear_display:
display.clear_output(wait=True)
fig, ax = plt.subplots(*layout, figsize=figsize)
ax = ax if isinstance(ax, np.ndarray) else [ax]
for i, container in enumerate(self.data_containers):
if i >= index:
source = container['source']
name = container['name']
plot_function = container.get('plot_function')
if isinstance(source, ResourceMonitor):
data_x = np.array(source.ticks)[self.slice] - source.ticks[0]
data_y = source.data[self.slice]
x_label, y_label = 'Time, s', source.UNIT
else:
data_y = container['data']
data_x = list(range(len(data_y)))[self.slice]
data_y = data_y[self.slice]
x_label, y_label = 'Iteration', ''
if plot_function is not None:
plot_function(fig=fig, ax=ax[i - index], i=i,
data_x=data_x, data_y=data_y, container=container)
# Default plotting functionality
elif isinstance(data_y, (tuple, list)) or (isinstance(data_y, np.ndarray) and data_y.ndim == 1):
ax[i - index].plot(data_x, data_y)
ax[i - index].set_title(name, fontsize=12)
ax[i - index].set_xlabel(x_label, fontsize=12)
ax[i - index].set_ylabel(y_label, fontsize=12, rotation='horizontal', labelpad=15)
ax[i - index].grid(True)
elif isinstance(data_y, np.ndarray) and data_y.ndim == 2:
ax[i - index].imshow(data_y)
ax[i - index].set_title(name, fontsize=12)
if add_suptitle:
title = self.format_meter(self.n+1, self.total, time()-self.start_t, ncols=80)
plt.suptitle(title, y=0.99, fontsize=14)
savepath = savepath or (f'{self.savepath}_{self.bar.n}' if self.savepath is not None else None)
if savepath:
plt.savefig(savepath, bbox_inches='tight', pad_inches=0)
plt.show()
def update_file(self):
""" Update file on the fly. """
with open(self.file, 'a+') as f:
print(self.create_message(self.bar.n, self.bar.desc[:-2]), file=f)
def visualize(self):
""" Convenient alias for working with an instance. """
self.update_plots(clear_display=False)
def to_file(self, file):
""" Log all the iteration-wise info (timestamps, descriptions) into file."""
with open(file, 'w') as f:
for i in range(self.bar.n):
description = self.create_description(iteration=i)
print(self.create_message(i, description), file=f)
def __call__(self, iterable):
self.update_total(0, 0, 0, 0, 0, total=len(iterable))
for item in iterable:
yield item
self.update()
self.close()
def close(self):
""" Close the underlying progress bar. """
self.bar.close()
self.stop_monitors()
# Utility functions
def start_monitors(self):
""" Start collection of data for every resource monitor. """
for container in self.data_containers:
source = container['source']
if isinstance(source, ResourceMonitor):
source.start()
def stop_monitors(self):
""" Stop collection of data for every resource monitor. """
for container in self.data_containers:
source = container['source']
if isinstance(source, ResourceMonitor):
source.stop()
def create_description(self, iteration):
""" Create string description of a given iteration. """
description = []
for container in self.data_containers:
source = container['source']
name = container['name']
if isinstance(source, (str, NamedExpression)):
value = container['data'][iteration]
if isinstance(value, (int, float, np.signedinteger, np.floating)):
desc = f'{name}={value:<6.6}' if isinstance(value, float) else f'{name}={value:<6}'
description.append(desc)
return '; '.join(description)
def create_message(self, iteration, description):
""" Combine timestamp, iteration and description into one string message. """
timestamp = strftime("%Y-%m-%d %H:%M:%S", self.timestamps[iteration])
return f'{timestamp} Iteration {iteration:5}; {description}'
def __getattr__(self, key):
""" Redirect everything to the underlying bar. """
if not key in self.__dict__ and hasattr(self.bar, key):
return getattr(self.bar, key)
raise AttributeError(key)
|
apache-2.0
| 1,683,971,285,586,305,300 | 38.844633 | 112 | 0.575257 | false |
bgruening/docker-ipython-notebook
|
galaxy.py
|
1
|
5828
|
#!/usr/bin/env python
from bioblend.galaxy import objects
import subprocess
import argparse
import os
from string import Template
import logging
DEBUG = os.environ.get('DEBUG', "False").lower() == 'true'
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("bioblend").setLevel(logging.CRITICAL)
log = logging.getLogger()
def _get_ip():
"""Get IP address for the docker host
"""
cmd_netstat = ['netstat','-nr']
p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)
cmd_grep = ['grep', '^0\.0\.0\.0']
p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)
cmd_awk = ['awk', '{ print $2 }']
p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)
galaxy_ip = p3.stdout.read()
log.debug('Host IP determined to be %s', galaxy_ip)
return galaxy_ip
def _test_url(url, key, history_id):
"""Test the functionality of a given galaxy URL, to ensure we can connect
on that address."""
try:
gi = objects.GalaxyInstance(url, key)
gi.histories.get(history_id)
log.debug('Galaxy URL %s is functional', url)
return gi
except Exception:
return None
def get_galaxy_connection(history_id=None):
"""
Given access to the configuration dict that galaxy passed us, we try and connect to galaxy's API.
First we try connecting to galaxy directly, using an IP address given
us by docker (since the galaxy host is the default gateway for docker).
Using additional information collected by galaxy like the port it is
running on and the application path, we build a galaxy URL and test our
connection by attempting to get a history listing. This is done to
avoid any nasty network configuration that a SysAdmin has placed
between galaxy and us inside docker, like disabling API queries.
If that fails, we failover to using the URL the user is accessing
through. This will succeed where the previous connection fails under
the conditions of REMOTE_USER and galaxy running under uWSGI.
"""
history_id = history_id or os.environ['HISTORY_ID']
key = os.environ['API_KEY']
### Customised/Raw galaxy_url ###
galaxy_ip = _get_ip()
# Substitute $DOCKER_HOST with real IP
url = Template(os.environ['GALAXY_URL']).safe_substitute({'DOCKER_HOST': galaxy_ip})
gi = _test_url(url, key, history_id)
if gi is not None:
return gi
### Failover, fully auto-detected URL ###
# Remove trailing slashes
app_path = os.environ['GALAXY_URL'].rstrip('/')
# Remove protocol+host:port if included
app_path = ''.join(app_path.split('/')[3:])
if 'GALAXY_WEB_PORT' not in os.environ:
# We've failed to detect a port in the config we were given by
# galaxy, so we won't be able to construct a valid URL
raise Exception("No port")
else:
# We should be able to find a port to connect to galaxy on via this
# conf var: galaxy_paster_port
galaxy_port = os.environ['GALAXY_WEB_PORT']
built_galaxy_url = 'http://%s:%s/%s' % (galaxy_ip.strip(), galaxy_port, app_path.strip())
url = built_galaxy_url.rstrip('/')
gi = _test_url(url, key, history_id)
if gi is not None:
return gi
### Fail ###
msg = "Could not connect to a galaxy instance. Please contact your SysAdmin for help with this error"
raise Exception(msg)
def put(filename, file_type='auto', history_id=None):
"""
Given a filename of any file accessible to the docker instance, this
function will upload that file to galaxy using the current history.
Does not return anything.
"""
gi = get_galaxy_connection(history_id=history_id)
history_id = history_id or os.environ['HISTORY_ID']
history = gi.histories.get( history_id )
history.upload_dataset(filename, file_type=file_type)
def get(dataset_id, history_id=None):
"""
Given the history_id that is displayed to the user, this function will
download the file from the history and stores it under /import/
Return value is the path to the dataset stored under /import/
"""
history_id = history_id or os.environ['HISTORY_ID']
gi = get_galaxy_connection(history_id=history_id)
file_path = '/import/%s' % dataset_id
# Cache the file requests. E.g. in the example of someone doing something
# silly like a get() for a Galaxy file in a for-loop, wouldn't want to
# re-download every time and add that overhead.
if not os.path.exists(file_path):
history = gi.histories.get(history_id)
datasets = dict([( d.wrapped["hid"], d.id ) for d in history.get_datasets()])
dataset = history.get_dataset( datasets[dataset_id] )
dataset.download( open(file_path, 'wb') )
return file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Connect to Galaxy through the API')
parser.add_argument('--action', help='Action to execute', choices=['get', 'put'])
parser.add_argument('--argument', help='File/ID number to Upload/Download, respectively')
parser.add_argument('--history-id', dest="history_id", default=None,
help='History ID. The history ID and the dataset ID uniquly identify a dataset. Per default this is set to the current Galaxy history.')
parser.add_argument('-t', '--filetype', help='Galaxy file format. If not specified Galaxy will try to guess the filetype automatically.', default='auto')
args = parser.parse_args()
if args.action == 'get':
# Ensure it's a numerical value
get(int(args.argument), history_id=args.history_id)
elif args.action == 'put':
put(args.argument, file_type=args.filetype, history_id=args.history_id)
|
mit
| 752,225,443,310,770,800 | 39.193103 | 157 | 0.666095 | false |
VolVoz/keysafe.software
|
read_card/read_card_model.py
|
1
|
1979
|
# -*- coding: utf8 -*-
from PyQt4 import QtGui, QtCore
import zmq
from design import read_card_design
from info_window.info_model import InfoWindow
from welcome_window.welcome_model import WelcomeWindow
from get_key.get_key_model import GetKeyWindow
from database.models import User, Key
class ReadCardWindow(QtGui.QMainWindow, read_card_design.Ui_ReadKeyWindow):
def __init__(self, parent=None):
super(self.__class__, self).__init__()
self.setupUi(self)
self.parent = parent
self.info_error = InfoWindow(label_text=u'Вибачте, сталася помилка, зверніться будь ласка до адміністратора')
def read_card_result(self):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect('tcp://127.0.0.1:5555')
socket.send("getTeacherId")
msg = socket.recv()
if msg != '0':
check_user = User.get_by_rfid(msg)
if check_user['warnings']:
self.close()
else:
self.login(check_user['data'])
self.close()
else:
self.close()
def welcome_window(self, label_text):
self.welcome = WelcomeWindow(label_text)
self.welcome.showFullScreen()
QtCore.QTimer.singleShot(5000, self.welcome.close)
def get_key_window(self, keys, user):
self.get_keys_window = GetKeyWindow(keys, user)
QtCore.QTimer.singleShot(5000, self.get_keys_window.showFullScreen)
def login(self, user):
username = user.firstname + u' ' + user.lastname
self.welcome_window(username)
keys = Key.get_all()
if keys['errors']:
self.info_error.showFullScreen()
QtCore.QTimer.singleShot(5000, self.info_error.close)
elif keys['warnings']:
self.get_key_window(keys=None, user=None)
else:
self.get_key_window(keys['data'], user=user)
|
gpl-3.0
| -7,997,574,167,434,149,000 | 34.611111 | 117 | 0.621425 | false |
juanka1331/VAN-applied-to-Nifti-images
|
final_scripts/region3d_plotter_mask.py
|
1
|
2187
|
import os
import sys
sys.path.append(os.path.dirname(os.getcwd()))
from lib.data_loader.mri_loader import load_mri_regions_segmented3d
from lib.data_loader.pet_loader import load_pet_regions_segmented
from lib.session_helper import select_regions_to_evaluate
from lib.utils import output_utils
from lib.utils.os_aux import create_directories
import settings
images = "PET"
#images = "MRI"
list_regions = select_regions_to_evaluate("all")
list_regions = [40]
sample_selected = 40
path_folder3D = os.path.join(settings.path_to_project, "folder3D")
path_folder_region3d = os.path.join(path_folder3D, "region3D")
path_folder_images = os.path.join(path_folder_region3d,
"brain3D_img:{0}_sample:{1}".format(images, sample_selected))
create_directories([path_folder3D, path_folder_region3d, path_folder_images])
pet_regions_segmented = None
mri_gm_regions_segmented = None
mri_wm_regions_segmented = None
if images == "PET":
pet_regions_segmented = load_pet_regions_segmented(
list_regions=list_regions,
folder_to_store_3d_images=None,
bool_logs=True,
out_csv_region_dimensions=None)
for region in list_regions:
region_img_path = os.path.join(
path_folder_images,"region:{}".format(region))
output_utils.from_3d_image_to_nifti_file(
path_to_save=region_img_path,
image3d=pet_regions_segmented[region][sample_selected,:,:,:])
if images == "MRI":
tuple_regions_segmented = load_mri_regions_segmented3d(
list_regions=list_regions,
folder_to_store_3d_images=None,
bool_logs=True)
[mri_gm_regions_segmented, mri_wm_regions_segmented] = \
tuple_regions_segmented
for region in list_regions:
region_img_path = os.path.join(
path_folder_images, "region:{}".format(region))
output_utils.from_3d_image_to_nifti_file(
path_to_save=region_img_path + "_wm",
image3d=mri_wm_regions_segmented[region][sample_selected,:,:,:])
output_utils.from_3d_image_to_nifti_file(
path_to_save=region_img_path + "_gm",
image3d=mri_gm_regions_segmented[region][sample_selected,:,:,:])
|
gpl-2.0
| -430,380,061,067,501,500 | 33.171875 | 77 | 0.681756 | false |
raphv/cardmapper
|
src/cardapp/utils.py
|
1
|
2171
|
# -*- coding: utf-8 -*-
import re
from html import unescape
from bleach.sanitizer import Cleaner
from html5lib.filters.base import Filter
PARAGRAPH_TAGS = ['p', 'h1', 'h2', 'h3', 'h4', 'li']
STYLE_TAGS = ['strong', 'em']
class ProcessDescription(Filter):
def __iter__(self):
for token in Filter.__iter__(self):
if token['type'] == 'StartTag':
continue
if token['type'] in ['EndTag','EmptyTag']:
token = {'type': 'Characters', 'data': '\n'}
yield token
description_cleaner = Cleaner(
tags = PARAGRAPH_TAGS + ['br'],
filters = [ProcessDescription],
strip = True
)
newline_re = re.compile('\n{2,}')
def process_description(txt):
return unescape(
newline_re.sub(
'\n',
description_cleaner.clean(txt)
).strip()
)
class ProcessShortDescription(Filter):
max_length = 200
def __iter__(self):
current_length = 0
reached_max_length = False
nesting_level = 0
for token in Filter.__iter__(self):
if reached_max_length and nesting_level == 0:
return
if token['type'] in ['StartTag','EndTag'] and token['name'] in PARAGRAPH_TAGS:
token['name'] = 'p'
if token['type'] == 'EndTag':
nesting_level -= 1
if token['type'] == 'StartTag':
nesting_level += 1
if token['type'] in ['Characters', 'SpaceCharacters']:
if reached_max_length:
continue
total_length = current_length + len(token['data'])
if total_length > self.max_length:
reached_max_length = True
token['data'] = token['data'][:self.max_length-current_length] + '...'
token['type'] = 'Characters'
current_length = total_length
yield token
short_description_cleaner = Cleaner(
tags = PARAGRAPH_TAGS + STYLE_TAGS,
filters = [ProcessShortDescription],
strip = True
)
def process_short_description(txt):
return short_description_cleaner.clean(txt)
|
mit
| -1,381,150,501,085,682,000 | 28.739726 | 90 | 0.543068 | false |
nico-ralf-ii-fpuna/paper
|
waf/data_sets/torpeda/__init__.py
|
1
|
5258
|
# -*- coding: utf-8 -*-
"""
Torpeda 2012 HTTP data sets.
http://www.tic.itefi.csic.es/torpeda/datasets.html
"""
# Copyright (C) 2017 Nico Epp and Ralf Funk
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from xml.etree import cElementTree as ElementTree
from xml.etree.ElementTree import ParseError
from typing import Iterable, List
from ...base import BASE_PATH
from ..base import Request, group_requests
_ORIGINAL_FILES_PATH = os.path.join(BASE_PATH, 'data_sets', 'torpeda', 'original_files')
NORMAL_FILE_NAMES = (
'allNormals1.xml',
)
ANOMALOUS_FILE_NAMES = (
'allAnomalies1.xml',
'allAnomalies2.xml',
'allAttacks1.xml',
'allAttacks2.xml',
'allAttacks3.xml',
'allAttacks4.xml',
'allAttacks5.xml',
)
SELECTED_ENDPOINT_LIST = ( # see comment in function 'print_info'
'POST /tienda1/miembros/editar.jsp',
'POST /tienda1/publico/registro.jsp',
)
DS_URL_LIST = tuple(
't{:02d}'.format(i)
for i in range(len(SELECTED_ENDPOINT_LIST))
)
def read_requests(file_name_list: Iterable[str]) -> List[Request]:
r_list = []
for filename in file_name_list:
try:
tree = ElementTree.parse(os.path.join(_ORIGINAL_FILES_PATH, filename))
for sample in tree.getroot():
# request data
r_elem = sample.find('request')
new_r = Request(
method=r_elem.find('method').text,
url=r_elem.find('path').text,
encoding='Windows-1252',
params_to_exclude=('ntc', )) # param 'ntc' is the same in all normal samples
new_r.original_str = '\n'.join(s.strip() for s in r_elem.itertext())
e = r_elem.find('headers')
if e is not None:
new_r.headers = e.text
e = r_elem.find('query')
if e is not None:
new_r.query_params = e.text
e = r_elem.find('body')
if e is not None:
new_r.body_params = e.text
# request classification
l_elem = sample.find('label')
new_r.label_type = l_elem.find('type').text
if new_r.label_type == 'attack':
new_r.label_attack = l_elem.find('attack').text
r_list.append(new_r)
except (FileNotFoundError, ParseError):
pass
return r_list
def print_info():
"""
This function gives the following output:
OBS: only printing urls which have normal and anomalous samples
-------------------------------------------------------------------------------
# | url and method | normal | anomalous | attack
-------------------------------------------------------------------------------
35 | POST /tienda1/miembros/editar.jsp | 5,608 | 8,090 | 2,031
68 | POST /tienda1/publico/registro.jsp | 2,522 | 8,145 | 5,018
-------------------------------------------------------------------------------
| SELECTED SAMPLES | 8,130 | 16,235 | 7,049
| TOTAL SAMPLES | 8,363 | 16,459 | 49,311
-------------------------------------------------------------------------------
"""
r_list = read_requests(NORMAL_FILE_NAMES + ANOMALOUS_FILE_NAMES)
d1 = group_requests(r_list, lambda r: '{} {}'.format(r.url, r.method))
print()
print('OBS: only printing urls which have normal and anomalous samples')
print('-' * 79)
print('{:4s} | {:41s} | {:6s} | {:9s} | {:6s}'.format(
'#', 'url and method', 'normal', 'anomalous', 'attack'))
print('-' * 79)
qty_total_normal = 0
qty_total_anomalous = 0
qty_total_attack = 0
qty_selected_normal = 0
qty_selected_anomalous = 0
qty_selected_attack = 0
for i, (k, v_list) in enumerate(sorted(d1.items())):
d2 = group_requests(v_list, lambda r: r.label_type)
qty_normal = len(d2.get('normal', []))
qty_anomalous = len(d2.get('anomalous', []))
qty_attack = len(d2.get('attack', []))
if qty_normal > 100 and (qty_anomalous > 100 or qty_attack > 100):
qty_selected_normal += qty_normal
qty_selected_anomalous += qty_anomalous
qty_selected_attack += qty_attack
url, method = k.split()
print('{:4d} | {:4s} {:36s} | {:6,d} | {:9,d} | {:6,d}'.format(
i+1, method, url, qty_normal, qty_anomalous, qty_attack))
qty_total_normal += qty_normal
qty_total_anomalous += qty_anomalous
qty_total_attack += qty_attack
print('-' * 79)
print('{:4s} | {:41s} | {:6,d} | {:9,d} | {:6,d}'.format(
'', 'SELECTED SAMPLES', qty_selected_normal, qty_selected_anomalous, qty_selected_attack))
print('{:4s} | {:41s} | {:6,d} | {:9,d} | {:6,d}'.format(
'', 'TOTAL SAMPLES', qty_total_normal, qty_total_anomalous, qty_total_attack))
print('-' * 79)
print()
|
mpl-2.0
| -2,407,511,306,200,125,000 | 36.827338 | 99 | 0.516356 | false |
pinballwizard/service-partner
|
opensky/migrations/0017_auto_20150402_0740.py
|
1
|
1111
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('opensky', '0016_auto_20150401_1007'),
]
operations = [
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.ForeignKey(to='opensky.Worker')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='worker',
name='phone',
),
migrations.AlterField(
model_name='socialwidget',
name='name',
field=models.CharField(max_length=2, choices=[('vk', 'Вконтакте'), ('ok', 'Одноклассники'), ('fb', 'Facebook'), ('tw', 'Twitter'), ('li', 'LinkedIn'), ('yt', 'YouTube'), ('in', 'Instagram')], verbose_name='Название'),
preserve_default=True,
),
]
|
gpl-2.0
| -6,771,096,823,296,523,000 | 30.794118 | 229 | 0.526364 | false |
getwarped/powershift-image
|
setup.py
|
1
|
1387
|
import sys
import os
from setuptools import setup
long_description = open('README.rst').read()
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
setup_kwargs = dict(
name='powershift-image',
version='1.4.2',
description='PowerShift command plugin for working in S2I images.',
long_description=long_description,
url='https://github.com/getwarped/powershift-image',
author='Graham Dumpleton',
author_email='[email protected]',
license='BSD',
classifiers=classifiers,
keywords='openshift kubernetes',
packages=['powershift', 'powershift.image', 'powershift.image.scripts'],
package_dir={'powershift': 'src/powershift'},
extras_require={'cli': ['powershift-cli>=1.2.0']},
entry_points = {'powershift_cli_plugins': ['image = powershift.image']},
package_data = {'powershift.image.scripts': ['alive.sh', 'assemble.sh',
'exec.sh', 'jobs.sh', 'migrate.sh', 'ready.sh', 'run.sh', 'setup.sh',
'shell.sh', 'verify.sh']},
)
setup(**setup_kwargs)
|
bsd-2-clause
| 5,057,765,491,666,269,000 | 33.675 | 77 | 0.647441 | false |
smehan/py-scratch
|
Euler/E12.py
|
1
|
1103
|
__author__ = 'shawnmehan'
"""Class to calculate the triangle numbers and then determine how many divisors they have.
Objective is to find first triangle number with > 500 divisors"""
import math, time
ti = time.time()
def nofactors2(num):
count = 0
x = math.sqrt(num)
y = int(math.ceil(x))
if x - y == 0:
count = 1
count += 2*len(filter(lambda a:num % a == 0, range(1, y)))
return count
n = [0 for a in range(3)]
n[2] = nofactors2(22/2)
m = 1
for a in range(22, (10**5)-1, 2):
n = [n[2], nofactors2(a+1), nofactors2((a+2)/2)]
m = max(m, n[0]*n[1]-1, n[1]*n[2]-1)
if m > 500:
break
print(m, a)
print("Time taken(secs):", time.time() - ti)
from math import sqrt
ti = time.time()
def triangle():
a, b = 2, 1
while 1:
yield b
b += a
a += 1
for n in triangle():
c = 0
for i in range(1, int(sqrt(n)) + 1):
if n % i == 0:
c += 1
if i * i != n:
c += 1
if c > 500:
print("Answer: ", n)
break
print("Time taken(secs):", time.time() - ti)
|
apache-2.0
| -2,251,755,324,120,026,600 | 18.350877 | 90 | 0.513146 | false |
skysports-digitalmedia/php-buildpack
|
tests/common/components.py
|
1
|
13406
|
from common.integration import FileAssertHelper
from common.integration import TextFileAssertHelper
class DownloadAssertHelper(object):
"""Helper to assert download counts"""
def __init__(self, download, install):
self.download = download
self.install = install
def assert_downloads_from_output(self, output):
assert output is not None, "Output is None"
tfah = TextFileAssertHelper()
(tfah.expect()
.on_string(output)
.line_count_equals(self.download,
lambda l: l.startswith('Downloaded'))
.line_count_equals(self.install,
lambda l: l.startswith('Installing'))
.line(-1).startswith('Finished:'))
class BuildPackAssertHelper(object):
"""Helper to assert build pack is working"""
def assert_start_script_is_correct(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, 'start.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'start.sh')
.line(0)
.equals('export PYTHONPATH=$HOME/.bp/lib\n') # noqa
.line(-1)
.equals('$HOME/.bp/bin/start'))
def assert_scripts_are_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, '.bp', 'bin', 'rewrite')
.root(build_dir, '.bp', 'lib', 'build_pack_utils')
.directory_count_equals(22) # noqa
.path('utils.py')
.path('process.py')
.exists())
def assert_config_options(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, '.bp-config', 'options.json')
.exists())
class PhpAssertHelper(object):
"""Helper to assert PHP is installed & configured correctly"""
def assert_start_script_is_correct(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, 'start.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'start.sh')
.any_line()
.equals('$HOME/.bp/bin/rewrite "$HOME/php/etc"\n'))
def assert_contents_of_procs_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.procs').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.procs')
.any_line()
.equals('php-fpm: $HOME/php/sbin/php-fpm -p ' # noqa
'"$HOME/php/etc" -y "$HOME/php/etc/php-fpm.conf"'
' -c "$HOME/php/etc"\n'))
def assert_contents_of_env_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.profile.d', 'bp_env_vars.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.profile.d', 'bp_env_vars.sh')
.any_line()
.equals('export '
'PATH=$PATH:$HOME/php/bin:$HOME/php/sbin\n') # noqa
.equals('export '
'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/php/lib\n')
.equals('export PHPRC=$HOME/php/etc\n'))
def assert_files_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.root(build_dir, 'php')
.path('etc', 'php-fpm.conf') # noqa
.path('etc', 'php.ini')
.path('sbin', 'php-fpm')
.path('bin')
.root(build_dir, 'php', 'lib', 'php', 'extensions',
'no-debug-non-zts-20100525')
.path('bz2.so')
.path('zlib.so')
.path('curl.so')
.path('mcrypt.so')
.exists())
class HttpdAssertHelper(object):
"""Helper to assert HTTPD is installed and configured correctly"""
def assert_start_script_is_correct(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, 'start.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'start.sh')
.any_line()
.equals('$HOME/.bp/bin/rewrite "$HOME/httpd/conf"\n'))
def assert_contents_of_procs_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.procs').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.procs')
.any_line()
.equals('httpd: $HOME/httpd/bin/apachectl -f ' # noqa
'"$HOME/httpd/conf/httpd.conf" -k start '
'-DFOREGROUND\n'))
def assert_contents_of_env_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.profile.d', 'bp_env_vars.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.profile.d', 'bp_env_vars.sh')
.any_line()
.equals('export [email protected]\n'))
def assert_web_dir_exists(self, build_dir, web_dir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, web_dir)
.exists())
def assert_files_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.root(build_dir, 'httpd', 'conf')
.path('httpd.conf') # noqa
.root('extra')
.path('httpd-modules.conf') # noqa
.path('httpd-remoteip.conf')
.root(build_dir, 'httpd', 'modules', reset=True)
.path('mod_authz_core.so')
.path('mod_authz_host.so')
.path('mod_dir.so')
.path('mod_env.so')
.path('mod_log_config.so')
.path('mod_mime.so')
.path('mod_mpm_event.so')
.path('mod_proxy.so')
.path('mod_proxy_fcgi.so')
.path('mod_reqtimeout.so')
.path('mod_unixd.so')
.path('mod_remoteip.so')
.path('mod_rewrite.so')
.exists())
class NginxAssertHelper(object):
"""Helper to assert Nginx is installed and configured correctly"""
def assert_start_script_is_correct(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, 'start.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'start.sh')
.any_line()
.equals('$HOME/.bp/bin/rewrite "$HOME/nginx/conf"\n'))
def assert_contents_of_procs_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.procs').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.procs')
.any_line()
.equals('nginx: $HOME/nginx/sbin/nginx -c ' # noqa
'"$HOME/nginx/conf/nginx.conf"\n'))
def assert_web_dir_exists(self, build_dir, web_dir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, web_dir)
.exists())
def assert_files_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.root(build_dir, 'nginx')
.path('logs') # noqa
.path('sbin', 'nginx')
.root(build_dir, 'nginx', 'conf')
.directory_count_equals(10)
.path('fastcgi_params')
.path('http-logging.conf')
.path('http-defaults.conf')
.path('http-php.conf')
.exists())
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'nginx', 'conf', 'http-php.conf')
.any_line()
.does_not_contain('#{PHP_FPM_LISTEN}') # noqa
.does_not_contain('{TMPDIR}'))
class NoWebServerAssertHelper(object):
"""Helper to assert when we're not using a web server"""
def assert_no_web_server_is_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, 'httpd')
.path(build_dir, 'nginx')
.does_not_exist())
def assert_downloads_from_output(self, output):
tfah = TextFileAssertHelper()
(tfah.expect()
.on_string(output)
.line_count_equals(6, lambda l: l.startswith('Downloaded'))
.line_count_equals(1, lambda l: l.startswith('No Web'))
.line_count_equals(1, lambda l: l.startswith('Installing PHP'))
.line_count_equals(1, lambda l: l.find('php-cli') >= 0)
.line(-1).startswith('Finished:'))
def assert_contents_of_procs_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.procs').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.procs')
.line(0)
.equals('php-app: $HOME/php/bin/php -c "$HOME/php/etc" app.php\n'))
def assert_files_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.root(build_dir, 'php')
.path('etc', 'php.ini') # noqa
.path('bin', 'php')
.path('bin', 'phar.phar')
.root(build_dir, 'php', 'lib', 'php', 'extensions',
'no-debug-non-zts-20100525')
.path('bz2.so')
.path('zlib.so')
.path('curl.so')
.path('mcrypt.so')
.exists())
def assert_no_web_dir(self, build_dir, webdir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, webdir)
.does_not_exist())
class NewRelicAssertHelper(object):
"""Helper to assert NewRelic is installed and configured correctly"""
def assert_files_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.root(build_dir, 'newrelic') # noqa
.path('daemon', 'newrelic-daemon.x64')
.path('agent', 'x64', 'newrelic-20100525.so')
.exists())
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'php', 'etc', 'php.ini')
.any_line()
.equals(
'extension=@{HOME}/newrelic/agent/x64/newrelic-20100525.so\n')
.equals('[newrelic]\n')
.equals('newrelic.license=JUNK_LICENSE\n')
.equals('newrelic.appname=app-name-1\n'))
def is_not_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.path(build_dir, 'newrelic')
.does_not_exist())
class HhvmAssertHelper(object):
"""Helper to assert HHVM is installed and configured correctly."""
def assert_start_script_is_correct(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, 'start.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'start.sh')
.any_line()
.equals('$HOME/.bp/bin/rewrite "$HOME/hhvm/etc"\n')
.equals('hhvm() { $HOME/hhvm/usr/bin/hhvm '
'-c "$HOME/hhvm/etc/php.ini" "$@"; }\n'))
def assert_contents_of_procs_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.procs').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.procs')
.any_line()
.equals('hhvm: $HOME/hhvm/usr/bin/hhvm --mode server ' # noqa
'-c $HOME/hhvm/etc/server.ini '
'-c $HOME/hhvm/etc/php.ini\n'))
def assert_contents_of_env_file(self, build_dir):
fah = FileAssertHelper()
fah.expect().path(build_dir, '.profile.d', 'bp_env_vars.sh').exists()
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, '.profile.d', 'bp_env_vars.sh')
.any_line()
.equals('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:'
'$HOME/hhvm/usr/lib/hhvm\n')
.equals('export PATH=$PATH:$HOME/hhvm/usr/bin\n'))
def assert_files_installed(self, build_dir):
fah = FileAssertHelper()
(fah.expect()
.root(build_dir, 'hhvm')
.path('usr', 'bin', 'hhvm') # noqa
.root(build_dir, 'hhvm', 'usr', 'lib', 'hhvm', reset=True)
.path('libboost_program_options.so.1.55.0')
.path('libevent-2.0.so.5')
.path('libicuuc.so.48')
.path('libjemalloc.so.1')
.path('libcurl.so.4')
.path('libicudata.so.48')
.path('libMagickWand-6.Q16.so.2')
.path('libonig.so.2')
.path('libmcrypt.so.4')
.path('libstdc++.so.6')
.exists())
def assert_server_ini_contains(self, build_dir, expected_listener):
tfah = TextFileAssertHelper()
(tfah.expect()
.on_file(build_dir, 'hhvm', 'etc', 'server.ini')
.any_line()
.contains(expected_listener))
|
apache-2.0
| 2,778,926,090,950,244,000 | 36.551821 | 79 | 0.518648 | false |
pheelee/tinynfogen
|
tng/core/nfo.py
|
1
|
2700
|
"""
Created on 30.03.2013
@author: ritterph
"""
import cgi
import re, os
import xml.etree.ElementTree as et
import logging
class NFO(object):
header = [
u'<?xml version="1.0" encoding="utf-8"?>',
u'<movie xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">'
]
footer = [
u'<generated>422344cf76177667d7d3fded1e7538df</generated>',
u'<fileinfo />',
u'</movie>'
]
name = u''
def __init__(self,path,content):
self.path = path
self.content = content
self.logger = logging.getLogger('TinyNFOGen')
def GetIMDBID(self):
self.logger.debug('Searching for IMDB in: %s' % self.path)
with open(self.path,'r') as nfo:
content = nfo.readlines()
IMDBid = re.findall('tt\d{7}',str(content).encode('utf-8'))
if len(IMDBid) > 0:
self.logger.debug('Found IMDB in: %s' % self.path)
return IMDBid[0]
else:
return False
def CheckElement(self,key,value):
if os.path.isfile(self.path):
try:
val = et.parse(self.path)
val2 = val.find(key)
if val2 is not None:
return val2.text == value
else:
return False
except:
self.logger.log(logging.ERROR,'Invalid XML Format: %s' % self.path)
return False
else:
return False
def Write(self,NamingDict):
self.__NormalizeData()
with open(self.path,'w') as NFOfile:
#Write the Header
for line in self.header:
NFOfile.write(str(line + '\n'))
#Write the main Content
for item in NamingDict.keys():
content = self.content[NamingDict[item]]
if isinstance(content, unicode):
content = cgi.escape(content)
s = ' <%s>%s</%s>' % (item,content,item)
NFOfile.write(s.encode('utf-8') + '\n')
#Write the Footer
for line in self.footer:
NFOfile.write(str(line + '\n'))
@staticmethod
def __multiValueToString(value):
s=''
for item in value:
s += item['name'] + ', '
return s.rstrip(', ')
def __NormalizeData(self):
for sub in self.content:
if isinstance(self.content[sub], list):
self.content[sub] = self.__multiValueToString(self.content[sub])
|
gpl-2.0
| -959,799,774,449,654,000 | 29.681818 | 127 | 0.496667 | false |
protojas/simple-learnpy
|
simplestats.py
|
1
|
9774
|
import itertools
from math import pi as PI_CONST, e as E_CONST, sqrt
import cmath
# returns the mean of a list
def mean(arr):
return sum(arr)/float(len(arr))
# returns the euclidean distance between two lists
def euclidean(a,b):
z = zip(a,b)
z = map(lambda r: pow(r[0]-r[1],2),z)
return sqrt(sum(z))
# flattens a list of lists
def flatten(a):
return [i for s in a for i in s]
# returns the city block distance between two lists
def cityblock(a,b):
z = zip(a,b)
z = map(lambda r: math.abs(a-b),z)
return sum(z)
# returns the variance of a list
def var(arr):
m = mean(arr);
if len(arr) == 1:
return 0
return sum(map(lambda x: ((x - m) * (x - m)),arr))/float(len(arr)-1)
# returns the covariance matrix over the columns of a matrix
def cov(M):
width = len(M[0])
MT = transpose(M)
covm = []
for i in range(width):
tmp = []
for j in range(width):
tmp += [covariance(MT[i], MT[j])]
covm += [tmp]
return covm
# returns the covariance between two random variables
def covariance(X, Y):
assert(len(X) == len(Y)), "vectors not of equal size"
xmean = mean(X)
ymean = mean(Y)
if len(X) == 1:
return 0
return sum(map(lambda i: ((X[i] - xmean) * (Y[i] - ymean)), range(len(X))))/float(len(X)-1)
# returns the transpose of a matrix
def transpose(M):
nwidth = len(M)
nheight = len(M[0])
MT = []
for m in range(nheight):
tmp = []
for n in range(nwidth):
tmp += [M[n][m]]
MT += [tmp]
return MT
# returns the dot product of two matrices if they can be multiplied
def dot(P, Q):
pheight = len(P)
pwidth = len(P[0])
qheight = len(Q)
qwidth = len(Q[0])
assert (pwidth == qheight), "matrices cannot be multiplied due to mismatched dimension"
prod = []
for i in range(pheight):
tmp = []
for j in range(qwidth):
tmp += [sum(map(lambda k: P[i][k] * Q[k][j],range(pwidth)))]
prod += [tmp]
return prod
# returns the difference X - Y
def vecsub(X, Y):
assert (len(X) == len(Y)), "vectors have different dimensions"
return map(lambda i: X[i] - Y[i], range(len(X)))
# returns the determinant of a matrix if it is square
def det(M):
assert(len(M) == len(M[0])), "matrix is not square"
n = len(M)
perms = permute(n)
det = 0
for p in perms:
prod = 1
for i in range(n):
prod *= M[i][p[i]]
prod *= sgn(p)
det += prod
return det
# returns the parity of the permutation
def sgn(P):
n = len(P)
v = [False] * n
ret = 1
for k in range(n):
if not v[k]:
r = k
L = 0
while not v[r]:
L += 1
v[r] = True
r = P[r]
if L % 2 == 0:
ret = -1 * ret
return ret
# returns the length of a vector
def veclen(v): #should be [[x x x x x]] or [[x] [x] [x] [x]]
assert len(v) == 1 or len(v[0]) == 1, "not a vector"
if len(v[0]) == 1:
v = transpose(v)
return euclidean(v[0], [0]*len(v[0]))
# returns Hn as a result of the Arnoldi iteration
# currently unused
def arnoldi(A):
b = [5] * len(A)
qs = [[]] * (len(A[0])+1)
qs[0] = map(lambda x: float(x) / veclen([b]), b)
h = [ [ 0 for i in range(len(A)) ] for j in range(len(A)+1) ]
for n in range(0, len(A)):
v = dot(A, transpose([qs[n]]))
for j in range(0, n+1):
h[j][n] = dot([qs[j]], v)[0][0]
v = transpose([vecsub(transpose(v)[0],map(lambda x: x * h[j][n], qs[j]))])
h[n+1][n] = veclen(v)
qs[n+1] = map(lambda x: float(x) / h[n+1][n],transpose(v)[0])
return h[:-1]
# note: does not support complex eigenvalues!! don't try to use it for that!
# this is meant only for symmetric matrices (namely, covariance matrices, for PCA)
def eig(A):
if len(A) == 2 and len(A[0]) == 2:
return eig22(A)
T,V = qr(A, 4*len(A))
return [T[i][i] for i in range(len(T))], V
# QR factorization
def householder(A):
n = len(A)
R = A
Q = [[float(0)] * n for i in range(n)]
for k in range(n-1):
I = identity(n)
x = column(R[k:],k)
e = column(I[k:],k)
a = -cmp(x[0][0], 0) * veclen(x)
u = matsub(x, scale(a,e))
lenu = veclen(u)
v = [map(lambda p: p/lenu, u[0])]
Qm = matsub(identity(len(u[0])), scale(float(2), dot(transpose(v),v)))
Qt = [[ Qi(Qm,i,j,k) for i in range(n)] for j in range(n)]
if k == 0:
Q = Qt
R = dot(Qt,A)
else:
Q = dot(Qt,Q)
R = dot(Qt,R)
return transpose(Q), R
def Qi(Qm, i, j, k):
if i < k or j < k:
return float(i == j)
else:
return Qm[i-k][j-k]
# iterates and does QR factorizations up to itermax iterations
def qr(A, itermax):
T = A
iter = 0
Qprod = identity(len(T))
while iter < itermax:
Q,R = householder(T)
T = dot(R,Q)
Qprod = dot(Qprod, Q)
iter += 1
return T, Qprod
#returns A scaled by constant k
def scale(k,A):
ret = A
for j in range(len(ret)):
ret = rowscale(ret, j, k)
return ret
# returns A-B if possible
def matsub(A,B):
assert len(A) == len(B) and len(A[0]) == len(B[0]), "matrices not same size"
ret = []
for i in range(len(A)):
tmp = []
for j in range(len(A[0])):
tmp += [A[i][j] - B[i][j]]
ret += [tmp]
return ret
# adds two matrices together
def matadd(A,B):
return matsub(A, scale(-1,B))
# eigenvalues for a 2x2 matrix
def eig22(A):
assert len(A) == 2 and len(A[0]) == 2, "not a 2x2 matrix"
T = float(A[0][0] + A[1][1]) #T = a + d
D = float(det(A))
eig1 = T/2 + cmath.sqrt(pow(T,2)/4 - D)
eig2 = T/2 - cmath.sqrt(pow(T,2)/4 - D)
return eig1,eig2
# gets a column of a matrix as a 1xn vector
def column(A,i):
return [transpose(A)[i]]
# gets a row of a matrix as a 1xn vector
def row(A,j):
return [A[j]]
# returns a list of all permutations of the set {0, 1, 2, ... n}
def permute(n):
return map(list,list(itertools.permutations(range(n))))
# recursive implementation that fails at len=5 because of recursive limits in python
def permute_rec(arr, start, collect):
if len(arr) == 1:
collect += [start + arr]
return collect
else:
return reduce(lambda x,y: x+y, map(lambda m: permute_rec(arr[:m] + arr[m+1:], start + [arr[m]], collect), range(len(arr))))
def normalX(X, cov, mean, covinv=[]):
if (covinv == []):
covinv = inv(cov)
standardized = vecsub(X, mean)
eexpo = map(lambda x: rowscale([x],0,-0.5)[0], dot([standardized], dot(covinv, transpose([standardized]))))
ecoeff = (1 / (((2 * PI_CONST) ** (float(len(cov))/2)) * sqrt(det(cov))))
return ecoeff * (E_CONST ** det(eexpo))
# returns the inverse of a matrix if it exists
def inv(M):
assert (hasinv(M)), "matrix is not square or is not invertible"
n = len(M)
i = 0
j = 0
R = []
I = identity(n)
#append the identity matrix
for k in range(n):
R += [M[k] + I[k]]
RT = transpose(R)
while j != n and i != n:
curr = RT[j] # get the current column
#check if the current column is all zeroes
if reduce(lambda x,y: x and y, map(lambda m: m == 0, curr[i:])):
j += 1
i += 1
continue
# make the leading value equal to 1
R = rowscale(R, i, 1/float(R[i][j]))
# make the other rows have zero in this column
for x in range(n):
if x != i:
R = rowadd(R, x, i, -1 * R[x][j])
i += 1
j += 1
RT = transpose(R)
# return the transformed identity
return transpose(RT[n:])
# multiply row i by factor n
def rowscale(M, i, n):
return M[:i] + [map(lambda x: n * x, M[i])] + M[i+1:]
# add n * row j to row i and store in row i
def rowadd(M, i, j, n):
jscale = map(lambda x: n * x, M[j])
return M[:i] + [map(lambda y: jscale[y] + M[i][y],range(len(jscale)))] + M[i+1:]
#swap rows i and j in M
def rowswap(M, i, j):
if i < j:
p, q = i, j
elif j < i:
p, q = j, i
else:
return M
return M[:p] + [M[q]] + M[p+1:q] + [M[p]] + M[q+1:]
# returns true if the matrix has an inverse and is square
def hasinv(M):
return len(M) == len(M[0]) and det(M) != 0
# returns an N x N identity matrix multiplied by a constant k
def kidentity(N,k):
I = []
for i in range(N):
I += [[float(0)]*i +[float(k)] + [float(0)]*(N-i-1)]
return I
# returns an N x N identity matrix
def identity(N):
return kidentity(N,1)
# creates a diagonal matrix from the given array
def diag(arr):
N = len(arr)
diag = 0
tmp = []
for i in range(N):
Ei = [[0] * N] * i + [[0] * i + [1] + [0] * (N-i-1)] + [[0] * N] * (N-i-1)
ei = transpose([Ei[i]])
tmp += [dot(dot(Ei, transpose([arr])), transpose(ei))]
return reduce(lambda p,q: addM(p,q), tmp)
# adds two same size matrices together
def addM(P, Q):
assert len(P) == len(Q), "matrices have different row counts"
assert len(P[0]) == len(Q[0]), "matrices have different column counts"
return reduce(lambda z,w: z + w, [map(lambda x: map(lambda y: Q[x][y] + P[x][y], range(len(P[x]))),range(len(P)))])
# prints a matrix in a readable way
def prettyM(M):
print "["
for i in M:
print(map(lambda x: round(x, 8),i))
print "]"
# rounds all the things in a matrix to two decimal places
def roundM(M):
return map(lambda x: map(lambda y: round(y, 2), x), M)
# returns the mean of all the datapoints in the matrix
def matmean(M):
return map(mean, transpose(M))
|
gpl-2.0
| -4,208,824,742,078,333,000 | 26.688385 | 131 | 0.538879 | false |
zesik/zkb
|
test/test_readers.py
|
1
|
14870
|
# -*- coding: utf-8 -*-
"""
test.test_readers
~~~~~~~~~~~~~~~~~
This is the unit test file for readers.
:Copyright: Copyright 2014 Yang LIU <[email protected]>
:License: BSD, see LICENSE for details.
"""
import unittest
import io
from zkb.readers import *
from zkb.utils import *
class TestYamlReader(unittest.TestCase):
def _create_stream_h_utf8(self):
output = io.BytesIO()
data = (u'encoding: utf-8\n'
u'title: english 中文日本語言葉叶子\n'
u'\n')
output.write(data.encode('utf-8', 'replace'))
return output
def _create_stream_h_gb18030(self):
output = io.BytesIO()
data = (u'encoding: gb18030\n'
u'title: 中文标题\n')
output.write(data.encode('gb18030', 'replace'))
return output
def _create_stream_h_euc_jp(self):
output = io.BytesIO()
data = (u'encoding: euc-jp\n'
u'title: 日本語言葉\n'
u'\n')
output.write(data.encode('euc-jp', 'replace'))
return output
def _create_stream_f_utf8(self):
output = io.BytesIO()
data = (u'title: 中文日本語言葉叶子\n'
u'\n'
u'コンテンツ。\n'
u'テスト。文章。\n'
u'\n'
u'アーティカル。\n'
u'--MORE--\n'
u'詳しい内容。\n'
u'--MORE--\n'
u'詳しい内容。\n')
output.write(data.encode('utf-8', 'replace'))
return output
def _create_stream_f_gb18030(self):
output = io.BytesIO()
data = (u'encoding: gb18030\n'
u'title: 中文标题\n'
u'\n'
u'中文正文。\n'
u'测试。文章。\n'
u'\n'
u'文章。\n'
u'--MORE--\n'
u'详细正文。\n')
output.write(data.encode('gb18030', 'replace'))
return output
def _create_stream_f_euc_jp(self):
output = io.BytesIO()
data = (u'encoding: euc-jp\n'
u'title: 日本語言葉\n'
u'\n'
u'コンテンツ。\n'
u'テスト。文章。\n'
u'\n'
u'アーティカル。\n'
u'--MORE--\n'
u'詳しい内容。\n')
output.write(data.encode('euc-jp', 'replace'))
return output
def _create_stream_h_unk(self):
output = io.BytesIO()
data = (u'encoding: fake-encoding\n'
u'title: fake-encoding\n')
output.write(data.encode('utf-8', 'replace'))
return output
def _create_stream_fm_utf8(self):
output = io.BytesIO()
data = (u'title: Title\n'
u'more_separator: --CUSTOMIZED-MORE-SEPARATOR--\n'
u'\n'
u'Contents.\n'
u'Article\n'
u'\n'
u'Article\n'
u'--MORE--\n'
u'Here should belong to abstract.\n'
u'--CUSTOMIZED-MORE-SEPARATOR--\n'
u'Here should belong to full article.\n')
output.write(data.encode('utf-8', 'replace'))
return output
def _create_stream_fnm_utf8(self):
output = io.BytesIO()
data = (u'title: Title\n'
u'more_separator: --CUSTOMIZED-MORE-SEPARATOR--\n'
u'\n'
u'Contents.\n'
u'Article\n'
u'\n'
u'Article\n'
u'--MORE--\n'
u'Here should belong to full article.\n'
u'--MORE--\n'
u'Here should belong to full article.\n')
output.write(data.encode('utf-8', 'replace'))
return output
def _destroy_buffers(self, streams):
for stream in streams:
stream.close()
def _verify_headers(self, result):
self.assertEqual(result[0][0]['encoding'], 'utf-8',
'encoding should be correctly read as utf-8')
self.assertEqual(result[1][0]['encoding'], 'gb18030',
'encoding should be correctly read as gb18030')
self.assertEqual(result[2][0]['encoding'], 'euc-jp',
'encoding should be correctly read as euc-jp')
self.assertFalse('encoding' in result[3][0],
'encoding should not be included in the result if '
'not specified')
self.assertEqual(result[4][0]['encoding'], 'gb18030',
'encoding should be correctly read as gb-18030')
self.assertEqual(result[5][0]['encoding'], 'euc-jp',
'encoding should be correctly read as euc-jp')
self.assertEqual(result[0][0]['title'], u'english 中文日本語言葉叶子',
'title should be corrected decoded with utf-8')
self.assertEqual(result[1][0]['title'], u'中文标题',
'title should be corrected decoded with gb18030')
self.assertEqual(result[2][0]['title'], u'日本語言葉',
'title should be corrected decoded with euc-jp')
self.assertEqual(result[3][0]['title'], u'中文日本語言葉叶子',
'title should be corrected decoded with default '
'encoding')
self.assertEqual(result[4][0]['title'], u'中文标题',
'title should be corrected decoded with gb-18030')
self.assertEqual(result[5][0]['title'], u'日本語言葉',
'title should be corrected decoded with euc-jp')
def test_read_unsupported_encoding(self):
reader = YamlHeaderedContentReader()
s = self._create_stream_h_unk()
with self.assertRaises(UnknownEncodingError) as e:
reader.read(s)
self._destroy_buffers([s])
self.assertEqual(e.exception.encoding, 'fake-encoding',
'unknown encoding should throw exception with '
'encoding name')
def test_read_header_only(self):
reader = YamlHeaderedContentReader()
s = [self._create_stream_h_utf8(),
self._create_stream_h_gb18030(),
self._create_stream_h_euc_jp(),
self._create_stream_f_utf8(),
self._create_stream_f_gb18030(),
self._create_stream_f_euc_jp()]
result = [reader.read(s[0], False),
reader.read(s[1], False),
reader.read(s[2], False),
reader.read(s[3], False),
reader.read(s[4], False),
reader.read(s[5], False)]
self._destroy_buffers(s)
self._verify_headers(result)
self.assertIsNone(result[0][1],
'content should be none if requesting to read '
'header only')
self.assertIsNone(result[1][1],
'content should be none if requesting to read '
'header only')
self.assertIsNone(result[2][1],
'content should be none if requesting to read '
'header only')
self.assertIsNone(result[3][1],
'content should be none if requesting to read '
'header only')
self.assertIsNone(result[4][1],
'content should be none if requesting to read '
'header only')
self.assertIsNone(result[5][1],
'content should be none if requesting to read '
'header only')
def test_read_entire_file(self):
reader = YamlHeaderedContentReader()
s = [self._create_stream_h_utf8(),
self._create_stream_h_gb18030(),
self._create_stream_h_euc_jp(),
self._create_stream_f_utf8(),
self._create_stream_f_gb18030(),
self._create_stream_f_euc_jp(),
self._create_stream_fm_utf8(),
self._create_stream_fnm_utf8()]
result = [reader.read(s[0]),
reader.read(s[1]),
reader.read(s[2]),
reader.read(s[3]),
reader.read(s[4]),
reader.read(s[5]),
reader.read(s[6]),
reader.read(s[7])]
self._destroy_buffers(s)
self._verify_headers(result)
self.assertIsNotNone(result[0][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[1][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[2][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[3][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[4][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[5][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[6][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNotNone(result[7][2],
'content should be none if requesting to read '
'entire file')
self.assertIsNone(result[0][1],
'abstract should be none if no separator detected')
self.assertIsNone(result[1][1],
'abstract should be none if no separator detected')
self.assertIsNone(result[2][1],
'abstract should be none if no separator detected')
self.assertEqual(result[3][1], u'コンテンツ。\n'
u'テスト。文章。\n'
u'\n'
u'アーティカル。',
'abstract should be correctly parsed when separator '
'detected')
self.assertEqual(result[4][1], u'中文正文。\n'
u'测试。文章。\n'
u'\n'
u'文章。',
'abstract should be correctly parsed when separator '
'detected')
self.assertEqual(result[5][1], u'コンテンツ。\n'
u'テスト。文章。\n'
u'\n'
u'アーティカル。',
'abstract should be correctly parsed when separator '
'detected')
self.assertEqual(result[6][1], u'Contents.\n'
u'Article\n'
u'\n'
u'Article\n'
u'--MORE--\n'
u'Here should belong to abstract.',
'abstract should be correctly parsed when separator '
'detected')
self.assertIsNone(result[7][1],
'abstract should be none if no separator detected')
self.assertEqual(result[0][2], '', 'content should be empty')
self.assertEqual(result[1][2], '', 'content should be empty')
self.assertEqual(result[2][2], '', 'content should be empty')
self.assertEqual(result[3][2], u'コンテンツ。\n'
u'テスト。文章。\n'
u'\n'
u'アーティカル。\n'
u'詳しい内容。\n'
u'--MORE--\n'
u'詳しい内容。',
'content should be correctly parsed and if more than '
'one separator is detected, the latter one should '
'not be deleted')
self.assertEqual(result[4][2], u'中文正文。\n'
u'测试。文章。\n'
u'\n'
u'文章。\n'
u'详细正文。',
'content should be correctly parsed and first '
'separator should be removed')
self.assertEqual(result[5][2], u'コンテンツ。\n'
u'テスト。文章。\n'
u'\n'
u'アーティカル。\n'
u'詳しい内容。',
'content should be correctly parsed and first '
'separator should be removed')
self.assertEqual(result[6][2], u'Contents.\n'
u'Article\n'
u'\n'
u'Article\n'
u'--MORE--\n'
u'Here should belong to abstract.\n'
u'Here should belong to full article.',
'content should be correctly parsed and first '
'customized separator should be removed while '
'default separator should remain in the content')
self.assertEqual(result[7][2], u'Contents.\n'
u'Article\n'
u'\n'
u'Article\n'
u'--MORE--\n'
u'Here should belong to full article.\n'
u'--MORE--\n'
u'Here should belong to full article.',
'content should be correctly parsed and default '
'separator should remain in the content when '
'customize separator is found')
|
bsd-3-clause
| -2,744,906,734,917,284,400 | 42.232628 | 79 | 0.442558 | false |
vuphan314/LED
|
src/led_tangler.py
|
1
|
28204
|
#!/usr/bin/python3
"""Convert an LED parsetree into a SL program."""
################################################################################
import sys
sys.path.append('..')
from debugtools.debug_tool import *
from os import path
from led_tree import *
################################################################################
"""Python global variables."""
defedFuncs = () # defined functions (including constants): ('f1', 'f2',...)
defedConsts = () # defined constants (to write tests)
auxFuncNum = 0
auxFuncDefs = '' # 'auxFunc1 := 1; auxFunc2 := [2];...'
# Easel:
isGame = False
funcsAddParams = {} # set by: setFuncsAddParams
################################################################################
class LedDatum:
indepSymbs = () # ('i1',...)
depSymbs = () # ('d1',...)
def getNumIndepSymbs(self) -> int:
return len(self.indepSymbs)
def getNumDepSymbs(self) -> int:
return len(self.depSymbs)
def getSymbs(self) -> tuple:
return self.indepSymbs + self.depSymbs
def getAnotherInst(self, isNext=False):
if isNext:
symbs = self.getNextIndepSymbs()
else:
symbs = self.indepSymbs
dat = LedDatum()
dat.indepSymbs = symbs
return dat
def getNextIndepSymbs(self) -> tuple:
return self.getSymbs()
# current dependent symbols will be next independent symbols
def appendToAux(self, postfix: str, isNext=False) -> str:
num = auxFuncNum
if isNext:
num += 1
st = 'AUX_' + str(num) + '_' + postfix + '_'
return st
"""Fields specific to aggregation."""
# must assign immediately when instantiating (see `AGGR_CATEGS`):
aggrCateg = None # str
# must assign later by calling `aDefFunc`:
aFormFunExpr = None # 'AUX_3_(x, y)'
aVal = '' # of `{-x | x = 1}` is `1`
# term:
aTerm = None # 'x + y'
condInst = None # condition instance (type: LedDatum)
# for disjunction/conjunction:
subInst1 = None # LedDatum
subInst2 = None # LedDatum
def aCheckCateg(self):
if self.aggrCateg not in AGGR_CATEGS:
raiseError('INVALID AGGREGATE CATEGORY')
def aDefFunc(self) -> str:
global auxFuncNum
auxFuncNum += 1
func = self.appendToAux('AGGR')
args = self.indepSymbs
self.aFormFunExpr = applyRecur(self, func, args)
self.aCheckCateg()
if self.aggrCateg == READY_LIST:
st = self.aDefFuncReadyList()
elif self.aggrCateg in LIB_SOLS:
st = self.aDefFuncLib()
else: # CONJ_SOL
st = self.aDefFuncConj()
global auxFuncDefs
auxFuncDefs += st
return self.aFormFunExpr
def aDefFuncReadyList(self) -> str:
ind = 'i_'
fun_name = self.aFormFunExpr
letCls = self.aGetAggrLetClauses(ind)
inCl = self.aTerm
st = defRecur(
self, fun_name, (), inCl, letCls=letCls, inds=(ind,), moreSpace=True
)
return st
def aGetAggrLetClauses(self, ind: str) -> str:
binding = 'b_'
expr = applyRecur(self, self.condInst.aFormFunExpr, (), inds=(ind,))
letCls = defRecur(self, binding, (), expr),
for i in range(self.getNumDepSymbs()):
num = str(i + 1)
expr = applyRecur(self, binding, (), inds=(num,))
letCls += defRecur(self, self.depSymbs[i], (), expr),
return letCls
def aDefFuncLib(self) -> str:
expr = applyRecur(self, self.aggrCateg, self.aGetArgsLib())
st = defRecur(self, self.aFormFunExpr, (), expr, moreSpace=True)
return st
def aGetArgsLib(self) -> tuple:
if self.aggrCateg == DISJ_SOL:
return self.subInst1.aFormFunExpr, self.subInst2.aFormFunExpr
elif self.aggrCateg in LIB_SOLS:
return self.aVal,
else:
raiseError('NOT IN LIBRARY')
def aDefFuncConj(self) -> str:
func = 'join'
args = self.aGetFuncConjDeep(),
expr = applyRecur(self, func, args)
st = defRecur(self, self.aFormFunExpr, (), expr, moreSpace=True)
st = self.aDefFuncConjDeep() + st
return st
def aDefFuncConjDeep(self) -> str:
bindings = 'b1_', 'b2_'
inds = 'i1_', 'i2_'
func = self.aGetFuncConjDeep()
expr = applyRecur(self, 'unnBindings', bindings)
letCls = self.aGetConjLetClauses(bindings, inds)
st = defRecur(
self, func, (), expr, letCls=letCls, inds=inds, moreSpace=True
)
return st
def aGetConjLetClauses(self, bindings: tuple, inds: tuple) -> tuple:
workarounds = 'workaround1_', 'workaround2_'
# Bryant's solution to avoid SL bug
funcs = self.subInst1.aFormFunExpr, self.subInst2.aFormFunExpr
letCls = ()
for i in range(2):
workaround = workarounds[i]
func = funcs[i]
letCls += defRecur(self, workaround, (), func),
ind = inds[i]
expr = applyRecur(self, workaround, (), inds=(ind,))
binding = bindings[i]
letCls += defRecur(self, binding, (), expr),
n = int(len(letCls) / 2)
sts = ()
for i in range(self.subInst1.getNumDepSymbs()):
symb = self.subInst1.depSymbs[i]
func = bindings[0]
num = str(i + 1)
expr = applyRecur(self, func, (), inds=(num,))
sts += defRecur(self, symb, (), expr),
return letCls[:n] + sts + letCls[n:]
def aGetFuncConjDeep(self) -> str:
func = self.appendToAux('DEEP')
args = self.indepSymbs
return applyRecur(self, func, args)
"""Fields specific to quantification."""
isUniv = None # bool
qSet = '' # '{1, 2,...}'
qPred = '' # 'all y in S. y > x'
def qDefFuncs(self) -> str:
st = self.qDefFuncMain() + self.qDefFuncPred() + self.qDefFuncSet()
return st
def qDefFuncMain(self) -> str:
global auxFuncNum
auxFuncNum += 1
S = self.indepSymbs
funcPred = self.qGetFuncPred()
argsQuant = applyRecur(self, funcPred, S),
funcQuant = self.qGetFuncQuant()
expr = applyRecur(self, funcQuant, argsQuant)
funcMain = self.qGetFuncMain()
st = defRecur(self, funcMain, S, expr, moreSpace=True)
return st
def qDefFuncPred(self) -> str:
ind = 'i_'
letCls = self.qGetPredLetClause(ind),
func = self.qPred
if funcIsAux(func):
args = self.getNextIndepSymbs()
func = applyRecur(self, func, args)
expr = func
func2 = self.qGetFuncPred()
args2 = self.indepSymbs
st = defRecur(self, func2, args2, expr, inds=(ind,), letCls=letCls)
return st
def qGetPredLetClause(self, ind: str) -> str:
"""Return 'y := S(x)[i_];'."""
expr = applyRecur(
self, self.qGetFuncSet(), self.indepSymbs, inds=(ind,)
)
st = defRecur(self, self.depSymbs[0], (), expr)
return st
def qDefFuncSet(self) -> str:
func = self.qGetFuncSet()
args = self.indepSymbs
expr = applyRecur(self, 'valToSet', (self.qSet,))
st = defRecur(self, func, args, expr, moreSpace=True)
return st
def qGetFuncQuant(self) -> str:
if self.isUniv:
func = 'allSet'
else: # universal
func = 'someSet'
return func
def qGetFuncMain(self) -> str:
st = self.appendToAux('A')
return st
def qGetFuncPred(self) -> str:
st = self.appendToAux('B')
return st
def qGetFuncSet(self) -> str:
st = self.appendToAux('C')
return st
################################################################################
"""Top-level function.
Convert an LED parsetree into a string which represents a SL program.
Python pseudotype `Tree` is either type `tuple` or `str`.
"""
def tangleTop(T: tuple) -> str:
T = setIsGame(T)
T = addOtherwiseClauses(T)
setDefedFuncsConsts(T)
if isGame:
setFuncsAddParams(T)
T = addEaselParams(T)
T = appendUnderscore(T)
setDefedFuncsConsts(T)
# parameters were added to some constants,
# making them non-constants
imports = ''
# Easel doesn't work well with imports,
# so I will append a copy of the LED library to the output SL file
else:
imports = importLib()
T = expandSymsInS(T)
st = tangleRecur(LedDatum(), T)
if auxFuncDefs != '':
st += blockComment('AUXILIARY FUNCTIONS') + '\n\n' + auxFuncDefs
st = writeTest() + imports + st
if isGame:
st += EASEL_FRAGMENT + getLibsStr()
return st + '\n'
################################################################################
EASEL_FRAGMENT = '''
/*
Easel fragment
*/
/* easel required functions */
initialState: State;
initialState :=
valToState(initialState_);
newState: Input * State -> State;
newState(I, S) :=
let
v := newState_(I, S);
in
valToState(v);
images: State -> Image(1);
images(S) :=
let
v := images_(S);
in
valToImages(v);
/* easel default sound */
sounds: Input * State -> char(2);
sounds(I, S) := ["ding"] when I.iClick.clicked else [];
'''
################################################################################
"""Recursion iterators."""
def tangleRecur(dat: LedDatum, T) -> str:
if isinstance(T, str):
return T
elif T[0] in LEXEMES:
return tangleLexemes(dat, T)
elif T[0] == ACT_FUN_EXPR:
args = T[2][1:] if not isConstFunExpr(T) else ()
return applyRecur(dat, T[1], args)
elif T[0] == 'tpl':
return tangleTuple(dat, T)
elif T[0] in SET_LABELS:
return tangleSet(dat, T)
elif T[0] in AGGR_OPS:
return tangleAggr(dat, T)
elif T[0] in QUANT_OPS:
return tangleQuant(dat, T)
elif T[0] in NONSTRICT_OPS:
return tangleNonstrictOps(dat, T)
elif T[0] in LIB_OPS:
return tangleLibOps(dat, T)
elif T[0] in IF_LABELS:
return tangleIfClauses(dat, T)
elif T[0] in DEF_LABELS:
return tangleDef(dat, T)
elif T[0] == CMNT_LABEL:
return ''
else:
return recurStr(tangleRecur, dat, T)
def defRecur(
dat: LedDatum, func, args: tuple, expr, inds=(), letCls=(), moreSpace=False
) -> str:
head = applyRecur(dat, func, args, inds=inds)
expr = tangleRecur(dat, expr)
if letCls != ():
letCls = writeLetClauses(letCls)
inCl = writeInClause(expr)
expr = letCls + inCl
moreSpace = True
body = expr + ';\n'
if moreSpace:
indent = '\n'
if letCls == ():
indent += '\t\t'
body = indent + body + '\n'
st = head + ' := ' + body
return st
def applyRecur(
dat: LedDatum, func, args: tuple, isInLib=False, argsAreBracketed=False,
inds=()
) -> str:
func = tangleRecur(dat, func)
if isInLib:
func = prependLib(func)
st = func
if args != ():
st2 = tangleRecur(dat, args[0])
for arg in args[1:]:
st2 += ', ' + tangleRecur(dat, arg)
if argsAreBracketed:
st2 = addBrackets(st2)
st += addParentheses(st2)
st = appendInds(st, inds)
return st
################################################################################
"""Recursion helpers."""
def recurStr(F, dat: LedDatum, T) -> str:
"""F: LedDatum * tree -> str."""
st = ''
for t in T[1:]:
st += F(dat, t)
return st
def recurTuple(F, dat: LedDatum, T) -> tuple:
"""F: LedDatum * tree -> tuple."""
tu = ()
for t in T[1:]:
tu += F(dat, t)
return tu
def recurVoid(F, dat: LedDatum, T):
"""F: LedDatum * tree."""
for t in T[1:]:
F(dat, t)
def recurTree(F, T):
"""F: tree -> tree."""
T2 = T[:1]
for t in T[1:]:
T2 += F(t),
return T2
################################################################################
"""Set defined functions/constants."""
def setDefedFuncsConsts(prog):
global defedFuncs
global defedConsts
# clear before possible 2nd call:
defedFuncs = ()
defedConsts = ()
for prog_el in prog[1:]:
if is_led_def(prog_el):
fun_name = prog_el[1][1] # no: ('syms',...)
st = tangleRecur(LedDatum(), fun_name)
defedFuncs += st,
if isConstDef(prog_el):
defedConsts += st,
################################################################################
"""Easel."""
def addEaselParams(T):
if isinstance(T, str):
return T
elif T[0] == ACT_FUN_EXPR:
fun_name = T[1]
params = getEaselParamsFromLexeme(fun_name)
params = getLabeledTuple(ID, params)
params = getLabeledTuple(ACT_FUN_EXPR, params)
if isConstFunExpr(T):
if params != ():
terms = getLabeledTree(TERMS, params)
T = ACT_FUN_EXPR, fun_name, terms
return T
else:
terms = T[2]
terms += params
T = T[:2] + (terms,)
return recurTree(addEaselParams, T)
elif T[0] == FORM_FUN_EXPR:
fun_name = T[1]
params = getEaselParamsFromLexeme(fun_name)
params = getLabeledTuple(ID, params)
if params != ():
if isConstFunExpr(T):
syms = getLabeledTree(SYMS, params)
T += syms,
else:
syms = T[2] + params
T = T[:2] + (syms,)
return T
else:
return recurTree(addEaselParams, T)
def getLabeledTree(label: str, tup: tuple):
return (label,) + tup
def getLabeledTuple(label: str, tup: tuple) -> tuple:
T = ()
for t in tup:
t = label, t
T += t,
return T
EASEL_INPUT = 'I'
EASEL_STATE = 'S'
EASEL_PARAMS_INPUT = EASEL_INPUT,
EASEL_PARAMS_STATE = EASEL_STATE,
EASEL_PARAMS = EASEL_PARAMS_INPUT + EASEL_PARAMS_STATE
def getEaselParamsFromLexeme(id) -> tuple:
st = tangleRecur(LedDatum(), id)
if not isinstance(st, str):
raiseError('MUST BE STRING')
if not (st in defedFuncs or st in EASEL_FUNCS): # symbol
return ()
elif st in funcsAddParams['addNeither']:
return ()
elif st in funcsAddParams['addInput']:
return EASEL_PARAMS_INPUT
elif st in funcsAddParams['addState']:
return EASEL_PARAMS_STATE
else:
return EASEL_PARAMS
def appendUnderscore(T):
if isinstance(T, str):
if T in EASEL_FUNCS - EASEL_FUNCS_GLOBAL:
T += '_'
return T
else:
return recurTree(appendUnderscore, T)
EASEL_FUNCS_CLICK = {'mouseClicked', 'mouseX', 'mouseY'}
EASEL_FUNCS_CURRENT_STATE = {'currentState'}
EASEL_FUNCS_GLOBAL = EASEL_FUNCS_CLICK | EASEL_FUNCS_CURRENT_STATE
EASEL_FUNCS_CONSTRUCTOR = {
'point', 'color', 'click', 'input', 'segment', 'circle', 'text', 'disc',
'fTri', 'graphic'
}
EASEL_FUNCS_ADD_NEITHER = EASEL_FUNCS_CONSTRUCTOR | {'initialState'}
EASEL_FUNCS_ADD_INPUT = EASEL_FUNCS_CLICK
EASEL_FUNCS_ADD_STATE = EASEL_FUNCS_CURRENT_STATE | {'images'}
EASEL_FUNCS_ADD_BOTH = {'newState'}
EASEL_FUNCS = (
EASEL_FUNCS_ADD_NEITHER | EASEL_FUNCS_ADD_INPUT | EASEL_FUNCS_ADD_STATE |
EASEL_FUNCS_ADD_BOTH
)
def setFuncsAddParams(prog):
global funcsAddParams
funcsAddParams = {
'addNeither': EASEL_FUNCS_ADD_NEITHER,
'addInput': EASEL_FUNCS_ADD_INPUT,
'addState': EASEL_FUNCS_ADD_STATE,
'addBoth': EASEL_FUNCS_ADD_BOTH
}
for prog_el in prog[1:]:
if is_led_def(prog_el):
fun_name = tangleRecur(LedDatum(), prog_el[1][1]) # no: ('syms',...)
if fun_name not in EASEL_FUNCS:
body = prog_el[2]
if needBoth(body):
key = 'addBoth'
elif needInput(body):
key = 'addInput'
elif needState(body):
key = 'addState'
else:
key = 'addNeither'
funcsAddParams[key] |= {fun_name}
def needBoth(body) -> bool:
return (
someStrFound(body, funcsAddParams['addBoth']) or
eachStrFound(body, EASEL_PARAMS) or
needInput(body) and needState(body)
)
def needInput(body) -> bool:
"""Assumption:
not someStrFound(body, funcsAddParams['addBoth'])
"""
return (
someStrFound(body, funcsAddParams['addInput']) or
someStrFound(body, EASEL_PARAMS_INPUT)
)
def needState(body) -> bool:
"""Assumption:
not someStrFound(body, funcsAddParams['addBoth'])
"""
return (
someStrFound(body, funcsAddParams['addState']) or
someStrFound(body, EASEL_PARAMS_STATE)
)
def eachStrFound(T, sts) -> bool:
for st in sts:
sts2 = {st}
if not someStrFound(T, sts2):
return False
return True
def someStrFound(T, sts) -> bool:
if isinstance(T, str):
return T in sts
else:
for t in T[1:]:
if someStrFound(t, sts):
return True
return False
################################################################################
"""Tangle function definition."""
def tangleDef(dat: LedDatum, T) -> str:
formFunExpr = T[1]
fun_name = tangleRecur(dat, formFunExpr[1])
dat2 = dat.getAnotherInst()
if len(formFunExpr) > 2: # non-constant function
dat2.indepSymbs = getSymbsFromSyms(formFunExpr[2])
letCls = ()
if T[0] in DEF_WHERE_LABELS:
letCls = tangleWhereClauses(dat2, T[3])
st = defRecur(
dat2, fun_name, dat2.indepSymbs, T[2], moreSpace=True, letCls=letCls
)
return st
IF_LABELS = {'condTerms', 'termIfBoolTerm', 'termOw'}
def tangleIfClauses(dat: LedDatum, T) -> str:
if T[0] == 'termOw':
st = tangleRecur(dat, T[1])
return st
elif T[0] == 'termIfBoolTerm':
st1 = tangleRecur(dat, T[1])
st2 = tangleRecur(dat, T[2])
st2 = applyRecur(dat, 'valToTrth', (st2,))
st = st1 + ' when ' + st2
return st
elif T[0] == 'condTerms':
st = tangleIfClauses(dat, T[1])
for t in T[2:]:
st2 = tangleIfClauses(dat, t)
st += writeElseClause(st2)
return st
else:
raiseError('INVALID IF-CLAUSES')
def tangleWhereClauses(dat: LedDatum, T) -> Tuple[str]:
if T[0] == 'eq':
st = defRecur(dat, T[1], (), T[2])
return st,
elif T[0] == 'conj':
return recurTuple(tangleWhereClauses, dat, T)
else:
raiseError('INVALID WHERE-CLAUSES')
################################################################################
"""Tangle collection."""
def tangleTuple(dat: LedDatum, T) -> str:
func = 'tu'
terms = T[1]
st = applyRecur(dat, func, terms[1:], isInLib=True, argsAreBracketed=True)
return st
def tangleSet(dat: LedDatum, T) -> str:
func = 'se'
if T[0] == 'setEmpty':
args = '',
else:
terms = T[1]
args = terms[1:]
st = applyRecur(dat, func, args, isInLib=True, argsAreBracketed=True)
return st
################################################################################
"""Nonstrict operations."""
NONSTRICT_OPS = {'impl', 'conj'}
def tangleNonstrictOps(dat: LedDatum, T):
st1 = tangleRecur(dat, T[1])
st2 = tangleRecur(dat, T[2])
if T[0] == 'conj':
mainSt = 'valFalse'
whenSt = 'not ' + applyRecur(dat, 'valToTrth', (st1,))
elif T[0] == 'impl':
mainSt = 'valTrue'
whenSt = 'not ' + applyRecur(dat, 'valToTrth', (st1,))
else:
raiseError('MUST BE NON-STRICT OPERATION')
elseSt = st2
st = writeWhenElseClause(mainSt, whenSt, elseSt)
return st
def writeWhenElseClause(
mainSt: str, whenSt: str, elseSt: str
) -> str:
st = mainSt + ' when ' + whenSt + ' else ' + elseSt
st = addParentheses(st)
return st
################################################################################
"""Tangle library operation."""
def tangleLibOps(dat: LedDatum, T) -> str:
st = applyRecur(dat, T[0], T[1:], isInLib=True)
return st
################################################################################
"""SequenceL helpers."""
def writeLetClauses(tup: tuple) -> str:
st = '\tlet\n'
for t in tup:
st += '\t\t' + t
return st
def writeInClause(st: str) -> str:
st = '\tin\n\t\t' + st
return st
def writeElseClause(st: str) -> str:
st = ' else\n\t\t' + st
return st
def appendInds(st: str, tup: tuple) -> str:
if tup != ():
st2 = tup[0]
for t in tup[1:]:
st2 += ', ' + t
st2 = addBrackets(st2)
st += st2
return st
def addBrackets(st: str) -> str:
st = '[' + st + ']'
return st
def addDoubleQuotes(st: str) -> str:
st = '"' + st + '"'
return st
def addParentheses(st: str) -> str:
st = '(' + st + ')'
return st
def funcIsAux(st: str) -> bool:
b = st[-1] == '_'
return b
################################################################################
"""Add otherwise-clauses."""
def addOtherwiseClauses(T):
if isinstance(T, str):
return T
elif T[0] == 'condTerms':
if T[-1][0] == 'termIfBoolTerm': # != 'termOw'
t = 'valNull'
t = ID, t
t = ACT_FUN_EXPR, t
t = 'termOw', t
T += t,
return T
else:
return recurTree(addOtherwiseClauses, T)
################################################################################
"""Expand quantifying symbols."""
QUANT_OPS = {'exist', 'univ'}
def expandSymsInS(T):
if isinstance(T, str):
return T
elif T[0] in QUANT_OPS:
T2 = symsInSetToSymbInSet(T)
return T2
else:
return recurTree(expandSymsInS, T)
def symsInSetToSymbInSet(T):
quantifier = T[0]
pred = T[2]
symsInSet = T[1]
syms = symsInSet[1][1:][::-1]
theSet = symsInSet[2]
symb = syms[0]
symb = 'symb', symb
symbInS = 'symbInS', symb, theSet
T2 = quantifier, symbInS, pred
for sym in syms[1:]:
symb = sym
symb = 'symb', symb
symbInS = 'symbInSet', symb, theSet
T2 = quantifier, symbInS, T2
T2 = recurTree(expandSymsInS, T2)
return T2
################################################################################
"""Tangle aggregation."""
def tangleAggr(dat: LedDatum, T) -> str:
if T[0] in AGGR_OPS:
dat.aggrCateg = READY_LIST
if T[0] == SET_COMPR:
termTree = T[1]
condTree = T[2]
else:
termTree = T[2]
condTree = T[1]
updateDepSymbsRecur(dat, condTree)
uTerm = dat.getAnotherInst(isNext=True)
dat.aTerm = tangleRecur(uTerm, termTree)
uCond = dat.getAnotherInst()
tangleAggr(uCond, condTree)
dat.condInst = uCond
args = dat.aDefFunc(),
st = applyRecur(dat, T[0], args)
return st
elif isGround(dat, T):
dat.aggrCateg = GROUND_SOL
dat.aVal = tangleRecur(dat, T)
st = dat.aDefFunc()
return st
elif T[0] in {'eq', 'setMem'}:
if T[0] == 'eq':
if T[1][0] == ACT_FUN_EXPR:
dat.aggrCateg = 'eqSol'
else: # 'tupT'
dat.aggrCateg = EQS_SOL
else: # 'setMem'
dat.aggrCateg = SET_MEM_SOL
updateDepSymbsRecur(dat, T[1])
dat.aVal = tangleRecur(dat, T[2])
st = dat.aDefFunc()
return st
elif T[0] == 'disj':
dat.aggrCateg = DISJ_SOL
dat1 = dat.getAnotherInst()
tangleAggr(dat1, T[1])
dat.subInst1 = dat1
dat2 = dat.getAnotherInst()
tangleAggr(dat2, T[2])
dat.subInst2 = dat2
st = dat.aDefFunc()
return st
elif T[0] == 'conj':
dat.aggrCateg = CONJ_SOL
dat1 = dat.getAnotherInst()
tangleAggr(dat1, T[1])
dat.subInst1 = dat1
dat2 = dat1.getAnotherInst(isNext=True)
tangleAggr(dat2, T[2])
dat.subInst2 = dat2
st = dat.aDefFunc()
return st
else:
return recurStr(tangleAggr, dat, T)
def updateDepSymbsRecur(dat: LedDatum, T):
if isinstance(T, tuple):
if T[0] == ACT_FUN_EXPR and isConstFunExpr(T):
st = T[1][1]
if isNewDepSymb(dat, st):
dat.depSymbs += st,
else:
recurVoid(updateDepSymbsRecur, dat, T)
def isGround(dat: LedDatum, T) -> bool:
return not newDepSymbFound(dat, T)
def newDepSymbFound(dat: LedDatum, T) -> bool:
if isinstance(T, str):
return False
elif T[0] == ACT_FUN_EXPR and isConstFunExpr(T):
st = T[1][1]
return isNewDepSymb(dat, st)
else:
for t in T[1:]:
if newDepSymbFound(dat, t):
return True
return False
def isNewDepSymb(dat: LedDatum, st: str) -> bool:
return st not in dat.getSymbs() + defedConsts
################################################################################
"""Quantification."""
def tangleQuant(dat: LedDatum, T) -> str:
dat.isUniv = T[0] == 'univ'
symsInSet = T[1]
dat.depSymbs = getSymbsFromSyms(symsInSet[1])
dat2 = dat.getAnotherInst()
dat.qSet = tangleRecur(dat2, symsInSet[2])
dat3 = dat.getAnotherInst(isNext=True)
dat.qPred = tangleRecur(dat3, T[2])
global auxFuncDefs
qFuncs = dat.qDefFuncs()
auxFuncDefs += qFuncs
func = dat.qGetFuncMain()
args = dat.indepSymbs
st = applyRecur(dat, func, args)
return st
def getSymbsFromSyms(T) -> tuple:
syms = T[1:]
symbs = ()
for sym in syms:
symb = sym[1]
symbs += symb,
return symbs
################################################################################
"""Tangle lexeme."""
def tangleLexemes(dat: LedDatum, T) -> str:
lex = T[0]
func = LEXEMES[lex]
arg = T[1]
if lex in LEXEMES_DOUBLY_QUOTED:
arg = addDoubleQuotes(arg)
args = arg,
st = applyRecur(dat, func, args, isInLib=True)
return st
################################################################################
"""Import and use LED library."""
LIB_NAME = 'lib.sl'
LIB_AS = ''
def importLib() -> str:
st = 'import * from {} as '.format(addDoubleQuotes('../' + LIB_NAME))
if LIB_AS != '':
st += LIB_AS + '::'
st += '*;\n\n'
return st
def prependLib(st: str) -> str:
st = LIB_AS + st
return st
################################################################################
"""Appened the LED library to the output SL file."""
def getLibsStr() -> str:
st = ''
with open(LIB_NAME) as libFile:
stLib = libFile.read()
msg = '\n\n{}\n\n'.format(blockComment('COPY OF ' + LIB_NAME))
stLib = msg + stLib + '\n'
stLib = markStartEnd(stLib) + '\n\n'
st += stLib
return st
################################################################################
"""Test SL constants."""
def writeTest() -> str:
st = ''
for const in defedConsts:
if const == 'initialState' or const not in EASEL_FUNCS:
func = applyRecur(None, 'pp', (const,))
st += func + '\n'
if st != '':
head = 'Test with SequenceL interpreter:\n\n'
tail = '\n(pp: pretty-print)'
st = head + st + tail
st = blockComment(st)
st += '\n\n'
return st
################################################################################
"""Check whether the LED program is an Easel game.
For each keyword `ledGame` found in the LED program:
- set the Python global variable isGame to True
- delete that keyword from the parsetree
"""
def setIsGame(prog):
prog2 = prog[:1]
for prog_el in prog[1:]:
if is_game_flag(prog_el):
global isGame
isGame = True
else:
prog2 += prog_el,
return prog2
|
mit
| 2,770,051,919,933,209,600 | 26.732547 | 80 | 0.523188 | false |
kerryhatcher/voc
|
setup.py
|
1
|
2059
|
__author__ = 'khatcher'
import os
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='voc',
version='0.0.3',
install_requires=[
"beautifulsoup4",
"blinker",
"boto",
"Cerberus",
"dominate",
"elasticsearch",
"Eve",
"Eve-Mongoengine",
"Events",
"Flask",
"Flask-Admin",
"flask-admin-s3-upload",
"Flask-Bootstrap",
"Flask-Classy",
"Flask-Elasticsearch",
"Flask-Menu",
"flask-mongoengine",
"Flask-PyMongo",
"Flask-QRcode",
"Flask-Script",
"Flask-WTF",
"iso8601",
"itsdangerous",
"Jinja2",
"MarkupSafe",
"mongoengine",
"noaaweather",
"nose",
"nose-mongoengine",
"Pillow",
"pymongo",
"qrcode",
"simplejson",
"six",
"url-for-s3",
"urllib3",
"Werkzeug",
"WTForms",
],
description='Web based virutal operations center for tactical or emergencies',
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
url='https://github.com/kerryhatcher/voc',
license='GNU AFFERO GPL v3',
author='Kerry Hatcher',
author_email='[email protected]',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Other Audience',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
|
agpl-3.0
| 8,967,467,391,179,025,000 | 25.74026 | 82 | 0.524041 | false |
allure-framework/allure-python
|
allure-pytest/test/acceptance/status/skip_setup_status_test.py
|
1
|
1699
|
import allure
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
from allure_commons_test.container import has_container
from allure_commons_test.container import has_before
@allure.feature("Fixture")
def test_skip_fixture(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.fixture
... def skip_fixture():
... pytest.skip()
>>> @pytest.mark.xfail()
... def test_skip_fixture_example(skip_fixture):
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_skip_fixture_example",
with_status("skipped"),
has_status_details(with_message_contains("Skipped")),
has_container(executed_docstring_source.allure_report,
has_before("skip_fixture",
with_status("skipped"),
has_status_details(
with_message_contains("Skipped"),
with_trace_contains("skip_fixture")
),
),
)
)
)
|
apache-2.0
| 2,231,998,169,503,835,000 | 41.475 | 94 | 0.486757 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.