repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
softelnet/sponge
|
sponge-jython/examples/script/py/knowledge_base_callbacks.py
|
1
|
1948
|
"""
Sponge Knowledge Base
Using knowledge base callbacks.
"""
from java.util.concurrent.atomic import AtomicBoolean, AtomicInteger
from org.openksavi.sponge.examples.util import TestStatus
class ReloadTrigger(Trigger):
def onConfigure(self):
self.withEvent("reload")
def onRun(self, event):
self.logger.debug("Received event: {}", event.name)
sponge.reload()
def onInit():
# Variables for assertions only
sponge.setVariable("onInitCalled", AtomicBoolean(False))
sponge.setVariable("onBeforeLoadCalled", AtomicInteger(0))
sponge.setVariable("onLoadCalled", AtomicInteger(0))
sponge.setVariable("onAfterLoadCalled", AtomicInteger(0))
sponge.setVariable("onStartupCalled", AtomicBoolean(False))
sponge.setVariable("onBeforeReloadCalled", AtomicBoolean(False))
sponge.setVariable("onAfterReloadCalled", AtomicBoolean(False))
sponge.logger.debug("onInit")
sponge.getVariable("onInitCalled").set(True)
def onBeforeLoad():
sponge.logger.debug("onBeforeLoad")
sponge.getVariable("onBeforeLoadCalled").incrementAndGet()
def onLoad():
sponge.logger.debug("onLoad")
sponge.getVariable("onLoadCalled").incrementAndGet()
def onAfterLoad():
sponge.logger.debug("onAfterLoad")
sponge.getVariable("onAfterLoadCalled").incrementAndGet()
def onStartup():
sponge.logger.debug("onStartup")
sponge.getVariable("onStartupCalled").set(True)
sponge.event("reload").sendAfter(1000)
def onShutdown():
sponge.logger.debug("onShutdown")
# Using Java static field because all variables will be lost after shutdown.
TestStatus.onShutdownCalled = True
def onBeforeReload():
sponge.logger.debug("onBeforeReload")
sponge.getVariable("onBeforeReloadCalled").set(True)
def onAfterReload():
sponge.logger.debug("onAfterReload")
sponge.getVariable("onAfterReloadCalled").set(True)
|
apache-2.0
| 7,126,901,262,843,742,000 | 32.175439 | 80 | 0.721253 | false |
DarioGT/docker-carra
|
src/protoLib/models/protomodel.py
|
1
|
4345
|
# -*- coding: utf-8 -*-
from django.db import models
from jsonfield2 import JSONField
from .usermodel import AUTH_USER_MODEL
from protoLib.models import TeamHierarchy
from protoLib.middleware import CurrentUserMiddleware
from protoLib.getStuff import getUserTeam
from .protomanager import TeamPermissionManager, ProtoJSONManager
import uuid
from protoExt.utils.utilsFile import joinPath
from protoExt.utils.utilsConvert import slugify2
smControlFields = [
'smOwningUser', 'smOwningTeam', 'smOwningUser_id', 'smOwningTeam_id', \
'smCreatedBy', 'smModifiedBy', 'smCreatedBy_id', 'smModifiedBy_id', \
'smCreatedOn', 'smModifiedOn', \
'smWflowStatus', 'smRegStatus', \
'smNaturalCode', 'smUUID', 'smVersion', 'smVersion_id' ]
class ProtoModelBase(models.Model):
"""
model for user entities creation ( sm security mark )
related_name="%(app_label)s_%(class)s"
"""
smNaturalCode = models.CharField(max_length=50, null=True, blank=True, editable=False)
smRegStatus = models.CharField(max_length=50, null=True, blank=True, editable=False)
smWflowStatus = models.CharField(max_length=50, null=True, blank=True, editable=False)
smOwningUser = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True, related_name='+', editable=False)
smOwningTeam = models.ForeignKey(TeamHierarchy, null=True, blank=True, related_name='+', editable=False)
smCreatedBy = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True, related_name='+', editable=False)
smModifiedBy = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True, related_name='+', editable=False)
smCreatedOn = models.DateTimeField( auto_now_add =True, editable=False, null=True, blank=True )
smModifiedOn = models.DateTimeField( auto_now =True, editable=False, null=True, blank=True)
smUUID = models.UUIDField( default=uuid.uuid4, editable=False)
# Si la tabla no es manajada por teams, debe cambiarse el manager
objects = TeamPermissionManager()
smObjects = models.Manager()
# Security indicator used by control permissions
_protoObj = True
# En los modelos q esto es falso NaturalCode debe manejarse directamente
_setNaturalCode = True
class Meta:
abstract = True
# https://docs.djangoproject.com/en/1.8/ref/models/options/#permissions
permissions = (
("list_%(class)", "Can list available %(class)s"),
)
def save(self, *args, **kwargs):
# Disabled for loaddata
isRaw = kwargs.get('raw', False)
if not isRaw :
cuser = CurrentUserMiddleware.get_user( False )
# Set fix version 180131
self.smVersion_id = 1
if self._setNaturalCode:
self.smNaturalCode = self.__str__()
if cuser:
setattr(self, 'smModifiedBy', cuser)
# Insert
if not self.pk:
setattr(self, 'smCreatedBy', cuser)
setattr(self, 'smOwningUser', cuser)
setattr(self, 'smOwningTeam', getUserTeam( cuser))
super(ProtoModelBase, self).save(*args, **kwargs)
@property
def wkFilePath(self):
# app/model
return joinPath( self._meta.app_label , self._meta.verbose_name.title() ).lower()
@property
def wkPage(self):
# str-id ( code-0001 )
sAux = self.__str__()
if len( sAux ) > 16: sAux = sAux[:16]
return slugify2( self.__str__() + '-{:08d}'.format( self.id ) )
@property
def wkFullPageName(self):
# :app:model:page | __str__
sAux = joinPath( self.wkFilePath , self.wkPage )
sAux = '[[:' + sAux.replace('/', ':').replace('\\', ':') + '|' + self.__str__() + ']]'
return sAux
class ProtoModelExt(ProtoModelBase):
"""
Tabla modelo para la creacion de entidades de usuario ( sm security mark )
with json fields ( UDPs ) and filtering allow
"""
smInfo = JSONField(default={})
objects = ProtoJSONManager(json_fields = ['smInfo'])
smObjects = models.Manager()
_protoJson = True
class Meta:
abstract = True
def setSecurityInfo(dEntity, data, user_profile, ins):
pass
|
mit
| 2,237,682,901,933,436,200 | 30.955882 | 110 | 0.627158 | false |
amal029/DataStructuresAndAlgorithmsInPython
|
skiena/clique_monte_carlo.py
|
1
|
1266
|
#!/usr/bin/env python
def mc_clique(graph):
# This is the dictionary to look up the vertex from an index
ret = []
vd = {i: v for i, v in enumerate(graph.vertices())}
for i in range(2**len(list(graph.vertices()))-1, 2, -1):
bnum = list('{0:04b}'.format(i))
if len([x for x in bnum if x > '0']) >= 2:
# Now collect the vertices that you need
to_check = [vd[i] for i, v in enumerate(bnum) if v == '1']
# Now check
rr = []
for i, v in enumerate(to_check):
if v != to_check[-1]:
vn = [graph.opposite(v, e)
for e in graph.incidentEdges(v)]
rr += [all([x in vn for x in to_check[i+1:]])]
if all(rr):
ret += [x. name for x in to_check]
break
return ret
def main():
# Example of an Undirected Graph
vs = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
es = [('a', 'b', 1), ('a', 'c', 1), ('a', 'd', 1),
('c', 'd', 2), ('d', 'b', 1)]
# es += [('c', 'b', 100)]
g = G(vs, es)
return(mc_clique(g))
if __name__ == '__main__':
import sys
sys.path.append('../Graph')
from Graph import Graph as G
print(main())
|
mit
| 2,207,320,461,886,644,500 | 29.878049 | 70 | 0.446288 | false |
hanzz/spectrum
|
tools/stats/stats.py
|
1
|
3232
|
# -*- coding: utf-8 -*-
from twisted.internet import reactor, task
import sys, os
from twisted.internet import reactor
from twisted.names.srvconnect import SRVConnector
from twisted.words.xish import domish
from twisted.words.protocols.jabber import xmlstream, client, jid
from twisted.words.protocols.jabber.client import IQ
class XMPPClientConnector(SRVConnector):
def __init__(self, reactor, domain, factory):
SRVConnector.__init__(self, reactor, 'xmpp-client', domain, factory)
def pickServer(self):
#host, port = SRVConnector.pickServer(self)
if not self.servers and not self.orderedServers:
# no SRV record, fall back..
port = 5222
return self.servers[0], self.servers[1]
class Client(object):
def __init__(self, client_jid, secret):
self.jid = client_jid
self.password = secret
self.f = client.basicClientFactory(client_jid, secret)
self.f.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connected)
self.f.addBootstrap(xmlstream.STREAM_END_EVENT, self.disconnected)
self.f.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
connector = XMPPClientConnector(reactor, client_jid.host, self.f)
connector.servers = [self.jid.host, 5222]
connector.orderedServers = [self.jid.host, 5222]
connector.connect()
self.t = reactor.callLater(10, self.failed)
def failed(self):
reactor.stop()
def rawDataIn(self, buf):
#self.logs += "RECV: %s\n" % unicode(buf, 'utf-8').encode('ascii', 'replace')
#print "RECV: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace')
pass
def rawDataOut(self, buf):
#self.logs += "SEND: %s\n" % unicode(buf, 'utf-8').encode('ascii', 'replace')
#print "SEND: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace')
pass
def connected(self, xs):
self.xmlstream = xs
# Log all traffic
xs.rawDataInFn = self.rawDataIn
xs.rawDataOutFn = self.rawDataOut
def disconnected(self, xs):
pass
def authenticated(self, xs):
self.getStats(sys.argv[3])
def getStats(self, jid = "icq.netlab.cz"):
iq = IQ(self.xmlstream, "get")
iq['to'] = jid
iq.addElement(("http://jabber.org/protocol/stats", "query"))
iq.addCallback(self._statsReceived)
iq.send()
def _statsReceived(self, el):
iq = IQ(self.xmlstream, "get")
iq['to'] = el['from']
q = iq.addElement(("http://jabber.org/protocol/stats", "query"))
query = el.firstChildElement()
for child in query.children:
s = q.addElement('stat')
s['name'] = child['name']
iq.addCallback(self._statsDataReceived)
iq.send()
def _statsDataReceived(self, el):
query = el.firstChildElement()
for stat in query.elements():
print stat['name'].replace('/', '_').replace('-', '_'),stat['value'],stat["units"]
reactor.stop()
if len(sys.argv) != 4:
print "Usage: " + sys.argv[0] + " <bare JID> <password> <transport JID>"
exit(0)
Client(jid.JID(sys.argv[1] + "/stats"), sys.argv[2])
reactor.run()
|
gpl-2.0
| 1,954,666,900,846,923,500 | 32.319588 | 95 | 0.616337 | false |
palankai/xadrpy
|
src/xadrpy/contrib/permanent_session/middleware.py
|
1
|
1342
|
'''
Created on 2011.05.05.
@author: pcsaba
'''
from models import PermanentSession
import conf
from django.http import HttpResponse
from xadrpy.contrib.permanent_session.exceptions import ReturnResponseFromTriggerException
class PermanentSessionMiddleware( object ):
def process_request( self, request ):
self._init(request)
self._set_permanent_session_on_request()
try:
self._run_triggers()
except ReturnResponseFromTriggerException, e:
return e.get_response()
def _init(self, request):
self.permanent_session = PermanentSession.objects.get_current(request)
self.request = request
def _set_permanent_session_on_request(self):
setattr(self.request,conf.REQUEST_KEY, self.permanent_session)
def _run_triggers(self):
for trigger in self._get_triggers():
self._run_one_trigger(trigger)
def _get_triggers(self):
return self.permanent_session.triggers.order_by("priority")
def _run_one_trigger(self, trigger):
try:
response = trigger.run(self.request)
finally:
if trigger.need_delete(self.request):
trigger.delete()
if isinstance(response, HttpResponse):
raise ReturnResponseFromTriggerException(response)
|
lgpl-3.0
| 180,047,912,903,742,300 | 30.209302 | 90 | 0.657228 | false |
cloudify-cosmo/tosca-vcloud-plugin
|
vcloud_storage_plugin/volume.py
|
1
|
7585
|
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import (wait_for_task, with_vca_client,
get_vcloud_config, get_mandatory,
combine_properties, delete_properties,
error_response)
from vcloud_network_plugin import get_vapp_name, SSH_PUBLIC_IP, SSH_PORT
@operation(resumable=True)
@with_vca_client
def create_volume(ctx, vca_client, **kwargs):
"""
create new volume, e.g.:
{
'use_external_resource': False,
'volume': {
'name': 'some-other',
'size': 11
}
}
"""
# combine properties
obj = combine_properties(ctx, kwargs=kwargs, names=['volume'],
properties=['device_name'])
# get external
if obj.get('use_external_resource'):
ctx.logger.info("External resource has been used")
return
vdc_name = get_vcloud_config()['vdc']
name = obj['volume']['name']
size = obj['volume']['size']
size_in_bytes = size * 1024 * 1024
ctx.logger.info("Create volume '{0}' to '{1}' with size {2}Mb."
.format(name, vdc_name, size))
success, disk = vca_client.add_disk(vdc_name, name, size_in_bytes)
if success:
wait_for_task(vca_client, disk.get_Tasks()[0])
ctx.logger.info("Volume node '{0}' has been created".format(name))
else:
raise cfy_exc.NonRecoverableError(
"Disk creation error: {0}".format(disk))
@operation(resumable=True)
@with_vca_client
def delete_volume(ctx, vca_client, **kwargs):
"""
drop volume
"""
# combine properties
obj = combine_properties(ctx, kwargs=kwargs, names=['volume'],
properties=['device_name'])
# get external
if obj.get('use_external_resource'):
ctx.logger.info("External resource has been used")
return
vdc_name = get_vcloud_config()['vdc']
name = obj['volume']['name']
ctx.logger.info("Delete volume '{0}' from '{1}'."
.format(name, vdc_name))
success, task = vca_client.delete_disk(vdc_name, name)
if success:
wait_for_task(vca_client, task)
ctx.logger.info("Volume node '{0}' has been deleted".format(name))
else:
raise cfy_exc.NonRecoverableError(
"Disk deletion error: {0}".format(task))
delete_properties(ctx)
@operation(resumable=True)
@with_vca_client
def creation_validation(ctx, vca_client, **kwargs):
"""
check volume description
"""
vdc_name = get_vcloud_config()['vdc']
disks_names = [
disk.name for [disk, _vms] in vca_client.get_disks(vdc_name)
]
# combine properties
obj = combine_properties(ctx, kwargs=kwargs, names=['volume'],
properties=['device_name'])
# get external resource flag
if obj.get('use_external_resource'):
# get resource_id
resource_id = get_mandatory(obj, 'resource_id')
if resource_id not in disks_names:
raise cfy_exc.NonRecoverableError(
"Disk {} does't exists".format(resource_id))
else:
# get volume
volume = get_mandatory(obj, 'volume')
name = get_mandatory(volume, 'name')
if name in disks_names:
raise cfy_exc.NonRecoverableError(
"Disk {} already exists".format(name))
get_mandatory(volume, 'size')
@operation(resumable=True)
@with_vca_client
def attach_volume(ctx, vca_client, **kwargs):
"""attach volume"""
_wait_for_boot(ctx)
_volume_operation(ctx, vca_client, "ATTACH")
@operation(resumable=True)
@with_vca_client
def detach_volume(ctx, vca_client, **kwargs):
"""
detach volume
"""
_volume_operation(ctx, vca_client, "DETACH")
def _volume_operation(ctx, vca_client, operation):
"""
attach/detach volume
"""
vdc_name = get_vcloud_config()['vdc']
vdc = vca_client.get_vdc(vdc_name)
vmName = get_vapp_name(ctx.target.instance.runtime_properties)
if ctx.source.node.properties.get('use_external_resource'):
volumeName = ctx.source.node.properties['resource_id']
else:
volumeName = ctx.source.node.properties['volume']['name']
vapp = vca_client.get_vapp(vdc, vmName)
for ref in vca_client.get_diskRefs(vdc):
if ref.name == volumeName:
if operation == 'ATTACH':
ctx.logger.info("Attach volume node '{0}'."
.format(volumeName))
task = vapp.attach_disk_to_vm(vmName, ref)
if task:
wait_for_task(vca_client, task)
ctx.logger.info(
"Volume node '{0}' has been attached"
.format(volumeName))
else:
raise cfy_exc.NonRecoverableError(
"Can't attach disk: '{0}' with error: {1}".
format(volumeName, error_response(vapp)))
elif operation == 'DETACH':
ctx.logger.info("Detach volume node '{0}'.".format(volumeName))
task = vapp.detach_disk_from_vm(vmName, ref)
if task:
wait_for_task(vca_client, task)
ctx.logger.info(
"Volume node '{0}' has been detached.".
format(volumeName))
else:
raise cfy_exc.NonRecoverableError(
"Can't detach disk: '{0}'. With error: {1}".
format(volumeName, error_response(vapp)))
else:
raise cfy_exc.NonRecoverableError(
"Unknown operation '{0}'".format(operation))
def _wait_for_boot(ctx):
"""
Whait for loading os.
This function just check if sshd is available.
After attaching disk system may be unbootable,
therefore user can do some manipulation for setup boot sequence.
"""
from fabric import api as fabric_api
ip = ctx.target.instance.runtime_properties.get(SSH_PUBLIC_IP)
if not ip:
# private ip will be used in case
# when we does not have public ip
ip = ctx.target.instance.runtime_properties['ip']
port = ctx.target.instance.runtime_properties.get(SSH_PORT, 22)
ctx.logger.info("Using ip '{0}'.".format(ip))
for i in range(30):
ctx.logger.info("Wait for boot '{0}'.".format(i))
try:
with fabric_api.settings(
host_string=ip, port=port, warn_only=True,
abort_on_prompts=True
):
fabric_api.run('id')
time.sleep(5)
except SystemExit:
return
except Exception:
pass
raise cfy_exc.NonRecoverableError("Can't wait for boot")
|
apache-2.0
| -9,016,477,754,827,965,000 | 35.642512 | 79 | 0.580092 | false |
jamiebull1/geomeppy
|
release.py
|
1
|
3985
|
from os import fdopen, remove
from shutil import move
import subprocess
import sys
from tempfile import mkstemp
from geomeppy import __version__
def replace(file_path, pattern, subst):
# Create temp file
fh, abs_path = mkstemp()
with fdopen(fh, "w") as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
# Remove original file
remove(file_path)
# Move new file
move(abs_path, file_path)
def main(increment):
# check we're on develop
assert b"* develop" in subprocess.check_output(
["git", "branch"]
), "Not on develop branch"
# check we're up-to-date
status = subprocess.check_output(["git", "status"])
assert b"modified" not in status, "Repository contains modified files"
assert b"Untracked" not in status, "Repository contains untracked files"
# increment version
version = __version__
new_version = [int(i) for i in version.split(".")]
for i in range(len(new_version)):
if i == increment:
new_version[i] += 1
if i > increment:
new_version[i] = 1
new_version = ".".join(str(v) for v in new_version)
replace("geomeppy/__init__.py", version, new_version)
replace("setup.py", 'version="%s"' % version, 'version="%s"' % new_version)
replace("setup.py", "tarball/v%s" % version, "tarball/v%s" % new_version)
replace(
"docs/source/conf.py",
'version = "%s"' % ".".join(version.split(".")[:-1]),
'version = "%s"' % ".".join(new_version.split(".")[:-1]),
)
replace(
"docs/source/conf.py",
'release = "%s"' % version,
'release = "%s"' % new_version,
)
try:
# add and commit changes
print(subprocess.check_output(["git", "add", "geomeppy/__init__.py"]))
print(subprocess.check_output(["git", "add", "setup.py"]))
print(subprocess.check_output(["git", "add", "docs/source/conf.py"]))
print(subprocess.check_output(["git", "add", "README.md"]))
print(
subprocess.check_output(["git", "commit", "-m", "release/%s" % new_version])
)
except Exception as e:
# rollback
print("rolling back")
print(e)
replace("geomeppy/__init__.py", new_version, version)
replace("setup.py", new_version, version)
exit()
try:
# push the changes
print(subprocess.check_output(["git", "push", "origin", "develop", "-f"]))
# create a tagged release
print(
subprocess.check_output(
["git", "tag", "release/%s" % new_version, "-m", "v%s" % new_version]
)
)
# push to github
print(
subprocess.check_output(
["git", "push", "origin", "release/%s" % new_version, "-f"]
)
)
except Exception as e:
# rollback
print("rolling back tag")
print(e)
# delete the tagged release
print(
subprocess.check_output(
["git", "tag", "-d", "release/%s" % new_version, "v%s" % new_version]
)
)
# push to github
print(
subprocess.check_output(
[
"git",
"push",
"origin",
":refs/tags/release/%s" % new_version,
"v%s" % new_version,
]
)
)
# from here, the Travis CI magic begins
if __name__ == "__main__":
args = sys.argv[1:]
VERSION = ["major", "minor", "patch"]
try:
increment = VERSION.index(sys.argv[1])
except ValueError:
print(
"%s is not a valid semantic version level (use major, minor, or patch)"
% sys.argv[1]
)
except IndexError:
# default
increment = VERSION.index("patch")
main(increment)
|
mit
| -6,051,716,917,579,736,000 | 30.377953 | 88 | 0.527227 | false |
dgoffredo/ruler.py
|
ruler.py
|
1
|
7381
|
'''Rule over formal systems using lisp syntax
'''
import traceback
COMMENT_PREFIX = ';'
PLACEHOLDER_PREFIX = ':'
DEFRULE_FORM = 'defrule'
def padParens(s):
return s.replace('(' , ' ( ').replace(')', ' ) ')
# TODO: Should I make s-expressions tuples so they're immutable?
def pyifyToken(token):
if token == '(':
return '['
elif token == ')':
return '],'
else:
return '"' + token.replace('"', r'\"') + '",'
class EmptyForm(Exception):
pass
def read(s):
"""This is not quite a full parser, since it returns only compositions of
str and list, but it's close to a parser. It doesn't know, for example,
the difference between foo and :foo. It treats them as "foo" and ":foo",
respectively. Identification of placeholders (like :foo) is done when
a form is converted into a rule.
"""
tokens = padParens(s).split()
# Omit any comment, e.g. (this is (a form)) ; but not this part
try:
commentBegin = tokens.index(COMMENT_PREFIX)
tokens = tokens[:commentBegin]
except ValueError:
pass
if len(tokens) == 0:
raise EmptyForm('There are no forms in this string: ' + s)
pyExpression = ''.join(pyifyToken(token) for token in tokens)
# Often there will be a tailing comma. Remove it.
if pyExpression.endswith(','):
pyExpression = pyExpression[:-1]
form = eval(pyExpression)
if type(form) not in (str, list):
raise Exception('This string is not a form: ' + repr(s))
return form
def form2str(form):
t = type(form)
if t is list:
return '(' + ' '.join(form2str(f) for f in form) + ')'
else:
assert t is str, \
str(form) + " is neither a list nor a str, but a " + str(t)
return form
class Placeholder:
"""Represents a placeholder in a rule pattern. Will compare equal to
anything at first, but then will compare equal to other things only
if they're equal to the first thing, until this `Placeholder` is
`reset()`.
"""
def __init__(self):
self.reset()
def __eq__(self, other):
if self.mostRecentlyCompared is None:
self.mostRecentlyCompared = other
return True
else:
return self.mostRecentlyCompared == other
def reset(self):
# This will be trouble if we compare with `None`, but meh.
self.mostRecentlyCompared = None
class Rule:
"""A form transformation rule: pattern -> replacement. The constructor
takes a form (already pythonized) like:
(defrule (... pattern ...) (... replacement ...)
The constructor will parse `pattern` and `replacement`, creating instances
of `Placeholder` where necessary. Then you can call the `apply()` method
to see whether a given form matches this `Rule`, and if so what the form
looks like after the substitution.
"""
def __init__(self, form):
if type(form) != list:
raise Exception('Scalar form cannot define a rule: ' + str(form))
if len(form) != 3:
raise Exception('Rule must have 3 elements, not ' + str(len(form)))
_, pattern, replacement = form
self._shorthand = form2str(pattern) + ' -> ' + form2str(replacement)
def buildPatternForm(fm):
t = type(fm)
if t is list:
return [buildPatternForm(f) for f in fm]
assert t is str
if fm.startswith(PLACEHOLDER_PREFIX):
name = fm[1:]
if name not in self._placeholders:
placeholder = Placeholder()
self._placeholders[name] = placeholder
else:
placeholder = self._placeholders[name]
return placeholder
else:
return fm
self._placeholders = dict()
self._patternForm = buildPatternForm(pattern)
def buildReplacement(fm):
t = type(fm)
if t is list:
return [buildReplacement(f) for f in fm]
assert t is str
if fm.startswith(PLACEHOLDER_PREFIX):
name = fm[1:]
return self._placeholders[name]
else:
return fm
self._replacement = buildReplacement(replacement)
def apply(self, form):
"""If the specified `form` matches this `Rule`, then return a
transformed copy of `form`. Otherwise return `None`.
"""
# Reset our placeholders (if we have any) so that in the comparison
# that follows, a placeholder can enforce that multiple occurrences
# of it must be the same to be considered a match.
for placeholder in self._placeholders.values():
placeholder.reset()
# See if the form matches the pattern. If not, we're done.
if form != self._patternForm:
return None
# The form matches, so now apply the transformation it describes.
if type(self._replacement) is Placeholder:
return self._replacement.mostRecentlyCompared
elif type(self._replacement) is not list:
return self._replacement
# The replacement is a list. Make a copy of it, but replace the
# placeholder(s) with whatever they matched on.
# TODO Do I need to deep copy the stuff that the placeholders matched?
def replace(fm):
t = type(fm)
if t is Placeholder:
return fm.mostRecentlyCompared
elif t is list:
return [replace(f) for f in fm]
else:
assert t is str
return fm
return replace(self._replacement)
def __repr__(self):
return self._shorthand
class Matches:
count = 0
def applyRules(form, rules):
for rule in rules:
applied = rule.apply(form)
if applied is not None: # It matched
Matches.count += 1
return applyRules(applied, rules)
# Now that we no longer match any rules, check the children
# (if we have any). If any of the children changed, then we
# have to re-roll since we ourselves might now match a rule.
if type(form) is list:
appliedChildren = [applyRules(f, rules) for f in form]
if appliedChildren != form:
return applyRules(appliedChildren, rules)
return form
def evaluate(form, env):
"""Interpret the specified `form` in the specified `env` (environment).
Return the resulting form. `env` might be modified.
"""
if type(form) is list and form[0] == DEFRULE_FORM:
rule = Rule(form)
env['rules'].append(rule)
print('There are', len(env['rules']), 'rules after adding', rule)
return form
return applyRules(form, env['rules'])
def repl():
env = dict()
env['rules'] = []
while True:
try:
print(form2str(evaluate(read(input('ruler> ')), env)))
print('That took', Matches.count, 'matches.')
Matches.count = 0
except EOFError:
break
except EmptyForm:
pass
except Exception:
traceback.print_exc()
print() # Leave the shell on a new line when we're done.
if __name__ == '__main__':
repl()
|
mit
| -8,064,547,875,096,963,000 | 30.542735 | 79 | 0.581358 | false |
sborgeson/prop39schools
|
p39toCSV.py
|
1
|
12800
|
import sys
import os
from collections import OrderedDict
import lxml
from lxml import etree
schemaFile = os.path.join('schema_files','UtilDataProp39Report.xsd')
def getTextValues(node, valueConf, ns, defaultValue='' ):
out = []
for (xmlName, exportName) in valueConf:
#print xmlName, exportName
if node is None:
out.append( (exportName, defaultValue) )
else:
targetNode = node.find('.%s' % (xmlName), ns)
if targetNode is not None:
out.append( (exportName, targetNode.text) )
else:
#print 'No target node found: %s' % (xmlName)
out.append( (exportName, defaultValue) )
return out
def getUsage(observation, ns=None):
'''Return a tuple of energy recieved by the customer and energy produced by the customer
If there is no solar, there is just EnergyUsage;
If there is solar, EnergyDelivered is from the grid and EnergyRecieved is from the customer
'''
try:
usage = observation[1].find('./uti:IntervalReading_EnergyUsage', ns)
if usage is not None: return (usage.text, '0.0')
else:
delivered = observation[1].find('./uti:IntervalReading_EnergyDelivered', ns)
received = observation[1].find('./uti:IntervalReading_EnergyReceived', ns)
return ( delivered.text,
received.text )
except AttributeError as ae:
print observation[1].getchildren()
raise ae
def getSchoolMeta(node, ns):
schoolMetaConf = [('//uti:UtilitySender', 'utility'),
('//uti:Customer_CustomerName', 'customer_name'),
('//uti:Customer_City', 'customer_city'),
('//uti:Customer_ZipPlus4', 'customer_zip'),
('//uti:Customer_AccountNumber', 'customer_account'),
('//uti:LEA_CustomerName', 'lea_customer'),
('//uti:SchoolSite_CDSCode', 'cds_code'),
('//uti:SchoolSite_SchoolSiteName', 'school_site_name'),
('//uti:SchoolSite_City', 'school_city'),
('//uti:SchoolSite_ZipPlus4', 'school_site_zip'), ]
schoolMeta = getTextValues(node, schoolMetaConf, ns)
return schoolMeta
def loadDoc(dataFile):
#print 'Loading %s' % dataFile
with open(schemaFile) as f:
schema = etree.XMLSchema(etree.parse(f))
#print "Schema OK; loading document"
with open(dataFile) as f:
dataDoc = etree.parse(f)
#print "Validating document ..."
try:
schema.assertValid(dataDoc)
except lxml.etree.DocumentInvalid as validationError:
print 'WARNING: XML document did not pass schema validation: %s' % (str(validationError))
#print "Document OK"
return dataDoc
def getNSMap(dataDoc):
# see http://stackoverflow.com/questions/14853243/parsing-xml-with-namespace-in-python-via-elementtree
# ns = {'uti' : 'http://www.lmonte.com/prop39/UtilDataProp39Report'}
ns = dataDoc.getroot().nsmap
# None and '' are not valid namespace names, but they are sometimes returned anyway!
try: del ns[None]
except: pass
try: del ns['']
except: pass
ns['uti'] = 'http://www.lmonte.com/prop39/UtilDataProp39Report' # hack because this is sometimes missing
return ns
def billsToCSV(dataDoc, csvFile):
root = dataDoc.getroot()
ns = getNSMap(dataDoc)
print 'Writing data to %s' % csvFile
tmpFile = csvFile + '.tmp'
billDataConf = [('/uti:ElectricityMonthly_RateScheduleID', 'rate_schedule_id'),
('/uti:ElectricityMonthly_BillingPeriodNumberOfDays', 'n_days'),
('/uti:ElectricityMonthly_EnergyGenerationOnSite', 'generation'),
('/uti:ElectricityMonthly_StartTime', 'start_time'),
('/uti:ElectricityMonthly_BillLastPeriod', 'last_period'),
('/uti:ElectricityMonthly_ConsumptionBillingPeriodTotal', 'consumption_total'),
('//uti:ElectricityMonthly_ConsumptionOnPeak', 'on_peak'),
('//uti:ElectricityMonthly_ConsumptionSemiPeak', 'semi_peak'),
('//uti:ElectricityMonthly_ConsumptionOffPeak', 'off_peak'),
('//uti:ElectricityMonthly_DemandMaximumOnPeak', 'on_peak_demand'),
('//uti:ElectricityMonthly_DemandMaximumSemiPeak', 'semi_peak_demand'),
('//uti:ElectricityMonthly_DemandMaximumOffPeak', 'off_peak_demand'), ]
with open(tmpFile, 'wb') as csvOut:
# write header line
schoolMeta = getSchoolMeta(root, ns)
header = [x[0] for x in schoolMeta]
header.extend( ['agreement','units','demandUnits'] )
header.extend( [x[1] for x in billDataConf] )
csvOut.write( ','.join( header ) + '\n' )
for account in root.findall('.//uti:ElectricityMonthlyBillDataPerAgreement', ns):
#print account
bills = account.findall('.//uti:ElectricityMonthlyBillData', ns)
if len(bills) > 0:
agreement = account.find('./uti:ElectricityMonthly_AgreementIdentifier', ns).text
units = account.find('./uti:ElectricityMonthly_ConsumptionUnitOfMeasure', ns).text
demandUnits = account.find('./uti:ElectricityMonthly_MaximumDemandUnitOfMeasure', ns).text
meta = [agreement, units, demandUnits]
for bill in bills:
textValues = getTextValues(bill, billDataConf, ns)
row = [x[1] for x in schoolMeta]
row.extend(meta)
row.extend( [x[1] for x in textValues] ) # add bill readings
csvOut.write(','.join([str(x) for x in row]) + '\n' )
#print textValues.values()
try:
if os.path.exists(csvFile): os.remove(csvFile)
os.rename(tmpFile, csvFile)
except:
print 'Error renaming file %s we will continue to others.' % csvFile
def intervalsToCSV(dataDoc, csvFile):
root = dataDoc.getroot()
ns = getNSMap(dataDoc)
print 'Writing data to %s' % csvFile
tmpFile = csvFile + '.tmp'
with open(tmpFile, 'wb') as csvOut:
csvOut.write('%s,%s,%s,%s\n' % ('agreement',
'start',
','.join(['d' + str(i+1) for i in range(100)]),
','.join(['r' + str(i+1) for i in range(100)])))
for account in root.findall('.//uti:ElectricityIntervalData', ns):
days = account.findall('.//uti:IntervalBlockDay', ns)
if len(days) > 0:
agreements = account.findall('./uti:ElectricityInterval_AgreementIdentifier', ns)
if len(agreements) > 0:
agreement = account.find('./uti:ElectricityInterval_AgreementIdentifier', ns).text
print 'Agreement: %s; CDS code: %s' % (agreement, cds_code)
print '%s days of data' % len(days)
for day in days:
nobs = day.find('./uti:IntervalReadingLength_IntervalLength', ns).text
start = day.find('./uti:IntervalBlockDay_StartTime', ns).text
obs = day.findall('.//uti:IntervalReadings', ns)
readings = [getUsage(ob, ns) for ob in obs]
if len(readings) == 96:
readings.extend([('', '')] * 4)
if len(readings) == 92:
readings.extend([('', '')] * 8)
if len(readings) == 24:
readings.extend([('', '')])
if len(readings) == 93:
readings.extend([('', '')] * 2)
csvOut.write('%s,%s,%s,%s\n' % (agreement,
start,
','.join([x[0] for x in readings]),
','.join([x[1] for x in readings])))
#print '%s,%s,%s,%s' % (agreement,
# start,
# ','.join([x[0] for x in readings]),
# ','.join([x[1] for x in readings]))
try:
if os.path.exists(csvFile): os.remove(csvFile)
os.rename(tmpFile,csvFile)
except:
print 'Error renaming file %s we will continue to others.' % csvFile
if __name__ == '__main__':
skipExisting = True
path = 'sample_data' # relative path to data directory
output_path = 'csv' # relative path to output directory
if len(sys.argv) > 1: # allow for command line override of path
path = sys.argv[1]
if len(sys.argv) > 2: # allow for command line override of output path
output_path = sys.argv[2]
print 'Converting all sample data from %s' % path
for root, dirs, files in os.walk(path):
# replicate directory structure under output_path directory
for d in dirs:
out_d = os.path.join(output_path, os.path.relpath(root, path), d)
if not os.path.exists(out_d):
os.makedirs(out_d)
potentialFiles = [os.path.join(root, f) for f in os.listdir(root)]
# data files are considered all xml files.
dataFiles = [f for f in potentialFiles if os.path.isfile(f) and f.lower().endswith(".xml")]
n = len(dataFiles)
for (i,dataFile) in enumerate(dataFiles):
print '%s (%d/%d)' % (dataFile,i+1,n)
outputFile = os.path.join(output_path, os.path.relpath(dataFile, path))
csvIntervalFile = outputFile + '_INTERVAL.csv'
csvBillFile = outputFile + '_BILL.csv'
dataDoc = None
# dump metadata
# dump billing data
if os.path.exists(csvBillFile) and skipExisting:
print '%s already exists. Skipping conversion.' % csvBillFile
else:
if dataDoc is None: dataDoc = loadDoc(dataFile)
billsToCSV(dataDoc, csvBillFile)
# dump intervals
if os.path.exists(csvIntervalFile) and skipExisting:
print '%s already exists. Skipping conversion.' % csvIntervalFile
else:
dataDoc = loadDoc(dataFile)
intervalsToCSV(dataDoc, csvIntervalFile)
#dataFile = '45700116097703_20142015_PacificGasElectric_ELECTRIC_20151223.xml'
# obs = root \
# .getchildren()[3] \ #-> [0] UtilitySender 'PacificGasElectric'
# #-> [1] ReportingPeriod '2014-07-01Through2015-06-30'
# #-> [2] Prop39SchemaVersion 'P39 XML 1.004'
# #-> [3] DocumentData
# .getchildren()[0] \ #-> [0] utilDataProp39Report
# .getchildren()[3] \ #-> [0] LEA_Customer_CDSCode
# #-> [1] LEA_CustomerData
# #-> [2] SchoolSiteData
# #-> [3] UsageData
# .getchildren()[0] \ #-> [0] ElectricityUsageData
# .getchildren()[5] \ #-> [0] ElectricityMonthlyBillDataPerAgreement
# #-> [1] ElectricityMonthlyBillDataPerAgreement
# #-> [2] ElectricityMonthlyBillDataPerAgreement
# #-> [3] ElectricityMonthlyBillDataPerAgreement
# #-> [4] ElectricityIntervalData
# #-> [5] ElectricityIntervalData
# #-> [6] ElectricityIntervalData
# #-> [7] ElectricityIntervalData
# .getchildren()[2] \ #-> [0] ElectricityInterval_AgreementIdentifier '0428871646'
# #-> [1] ElectricityInterval_ConsumptionUnitOfMeasure 'kWh'
# #-> [2] ElectricityIntervalDataForAgreement
# .getchildren()[0] \ #-> [0:364] IntervalBlockDay
# .getchildren()[3] #-> [0] IntervalReadingLength_IntervalLength (900) i.e. 15 minute
# #-> [1] IntervalBlockDay_DurationInSeconds (86400) i.e. 24 hour
# #-> [2] IntervalBlockDay_StartTime
# #-> [3:98] IntervalReadings
# .getchildren()[0] #-> [0] IntervalReading_StartTime
# #-> [1] IntervalReadingEnergy
# .text #-> 'kWh' for 0 and '0.231' for [1].find('./uti:IntervalReading_EnergyReceived').text
#obs.getchildren()[0].text # IntervalReading_StartTime
#obs.getchildren()[1].text # IntervalReadingEnergy
|
mit
| 6,427,179,460,344,663,000 | 48.234615 | 114 | 0.546328 | false |
iniverno/RnR-LLC
|
simics-3.0-install/simics-3.0.31/amd64-linux/lib/python/nic_common.py
|
1
|
2721
|
from cli import *
# Common functionality for network devices
def get_nic_info(obj):
info = [("Recorder", obj.recorder),
("MAC address", obj.mac_address),
("Link", obj.link)]
try:
bw = obj.tx_bandwidth
if bw == 0:
bw = "unlimited"
elif bw % 1000:
bw = "%d bit/s" % bw
else:
bw = bw // 1000
if bw % 1000:
bw = "%d kbit/s" % bw
else:
bw = "%d Mbit/s" % (bw // 1000)
info.append(("Transmit limit", bw))
except:
pass
return [(None, info)]
def get_nic_status(obj):
return []
# -------------------- connect --------------------
def connect_cmd(obj, auto, network_poly):
if auto:
print "The flag '-auto' is deprecated, and shouldn't be used."
# for now quiet if network doesn't exist when using auto
if network_poly[0] == str_t:
return
if network_poly[0] == str_t:
print "Argument is not an Ethernet link object."
SIM_command_has_problem()
return
try:
obj.link = network_poly[1]
except Exception, msg:
print "[%s] Connection failed" % obj.name
print msg
# ------------------- disconnect ------------------
def disconnect_cmd(obj):
try:
obj.link = None
except Exception, msg:
print "[%s] Disconnection failed (%s)" % (obj.name, msg)
# ------------- command registration --------------
def new_nic_commands(device_name):
new_command("connect", connect_cmd,
[arg(flag_t, "-auto"),
arg((obj_t("link", "ethernet-link"), str_t), ("link", "link-name"))],
alias = "",
type = ["Ethernet", "Networking"],
short = "connect to a simulated Ethernet link",
see_also = ['<' + device_name + '>.disconnect'],
namespace = device_name,
doc = """
Connect the device to a simulated Ethernet link.
The flag '-auto' is deprecated and shouldn't be used.
""", filename="/mp/simics-3.0/src/extensions/apps-python/nic_common.py", linenumber="59")
new_command("disconnect", disconnect_cmd,
[],
alias = "",
type = ["Ethernet", "Networking"],
short = "disconnect from simulated link",
see_also = ['<' + device_name + '>.connect'],
namespace = device_name,
doc = """
Disconnect the device from a simulated Ethernet link.
""", filename="/mp/simics-3.0/src/extensions/apps-python/nic_common.py", linenumber="72")
|
gpl-2.0
| 1,635,320,602,624,765,000 | 32.592593 | 105 | 0.491363 | false |
sunrenjie/3rd-deadly-technotes
|
technologies/python/pastedeploylab/testroutes.py
|
1
|
2996
|
import logging
import os
import webob
import webob.dec
import webob.exc
from paste.deploy import loadapp
from wsgiref.simple_server import make_server
import routes
import routes.middleware
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
LOG = logging.getLogger(__name__)
class Controller(object):
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(req.params.iteritems())
context['headers'] = dict(req.headers.iteritems())
context['path'] = req.environ['PATH_INFO']
params = req.environ.get(PARAMS_ENV, {})
for name in ['REMOTE_USER', 'AUTH_TYPE']:
try:
context[name] = req.environ[name]
except KeyError:
try:
del context[name]
except KeyError:
pass
params.update(arg_dict)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
result = method(context, **params)
return webob.Response(result)
@staticmethod
def get_user_by_id(context, user_id):
return 'the user %s is on leave' % user_id
@staticmethod
def get_users(context):
return 'the user list is in db'
class Router(object):
def __init__(self):
self._mapper = routes.Mapper()
self._mapper.connect('/users/{user_id}',
controller=Controller(),
action='get_user_by_id',
conditions={'method': ['GET']})
self._mapper.connect('/users/',
controller=Controller(),
action='get_users',
conditions={'method': ['GET']})
self._router = routes.middleware.RoutesMiddleware(self._dispatch, self._mapper)
@webob.dec.wsgify
def __call__(self, req):
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
@classmethod
def app_factory(cls, global_config, **local_config):
return cls()
if __name__ == '__main__':
configfile = 'testroutes.ini'
appname = "main"
wsgi_app = loadapp("config:%s" % os.path.abspath(configfile), appname)
usages = """
Usages: access these URLs using curl or httpie:
http://127.0.0.1:8082/users/
http://127.0.0.1:8082/users/1
"""
print(usages)
httpd = make_server('localhost', 8282, wsgi_app)
httpd.serve_forever()
|
apache-2.0
| -8,855,347,984,600,511,000 | 27.264151 | 87 | 0.577437 | false |
beav/ceilometer_katello_dispatcher
|
src/katello_notification/consumer_map.py
|
1
|
2517
|
#!/usr/bin/python
import simplejson
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class ConsumerMap():
"""
this is a wrapper for looking up consumer UUIDs for systems
"""
def _load_consumer_map(self, fname):
"""
broken out just to make mocking easier
"""
read_data = {}
try:
with open(fname, 'r') as f:
try:
read_data = simplejson.load(f)
except ValueError:
log.warning("json file %s is corrupt or empty, data will be reset on next write" % fname)
except IOError:
log.warning("unable to open %s, no hypervisor map will be loaded. This is expected during the daemon's initial run." % fname)
return read_data
def _save_consumer_map(self, fname, data, debug=True):
"""
broken out to make mocking easier
"""
log.debug("attempting to write data: %s" % data)
try:
with open(fname, "w+") as f:
simplejson.dump(data, f)
if debug:
log.debug("Wrote mapping to %s" % fname)
except IOError, e:
if debug:
log.error("Unable to write mapping to %s" % fname)
log.exception(e)
def __init__(self, hyp_json_filename):
"""
load map, etc
"""
self.hypervisor_consumer_json_fname = hyp_json_filename
self.hypervisor_consumer_map = self._load_consumer_map(hyp_json_filename)
def find_hypervisor_consumer_uuid(self, local_identifier):
"""
given a hypervisor's local identifier (ex: hostname), find its consumer uuid
raises a KeyError when a value is not found
"""
return self.hypervisor_consumer_map[local_identifier]
def add_hypervisor_consumer_uuid(self, local_identifier, hyp_uuid):
"""
save hypervisor uuid to map
"""
self.hypervisor_consumer_map[local_identifier] = hyp_uuid
# save immediately
self._save_consumer_map(fname=self.hypervisor_consumer_json_fname, data=self.hypervisor_consumer_map)
def remove_hypervisor_consumer_uuid(self, local_identifier):
"""
remove hypervisor uuid from map
"""
del self.hypervisor_consumer_map[local_identifier]
# save immediately
self._save_consumer_map(fname=self.hypervisor_consumer_json_fname, data=self.hypervisor_consumer_map)
|
gpl-2.0
| -1,697,457,594,009,624,800 | 33.013514 | 137 | 0.595948 | false |
calzoneman/ld27
|
entity.py
|
1
|
9033
|
from collections import namedtuple
from util import *
import math
import random
AABB = namedtuple('AABB', ['x', 'y', 'width', 'height'])
class Entity(object):
def __init__(self, pos, img):
self.x, self.y = pos
self.w, self.h = img.get_width(), img.get_height()
self.image = img
def die(self):
if self.world:
self.world.remove_entity(self)
def get_AABB(self):
return AABB(self.x - self.w/2, self.y - self.h/2, self.w, self.h)
def collides(self, other):
a = self.get_AABB()
b = other.get_AABB()
if a.x + a.width < b.x:
return False
if a.y + a.height < b.y:
return False
if a.x > b.x + b.width:
return False
if a.y > b.y + b.height:
return False
return True
def on_collision(self, ent):
pass
def tick(self):
pass
def move(self, pos):
tx, ty = self.world.screen_to_world(pos)
oldtx, oldty = self.world.screen_to_world((self.x, self.y))
if tx < 0 or ty < 0:
return False
if tx > self.world.width or ty > self.world.height:
return False
if self.world.get_tile(tx, oldty).solid:
pos = (self.x, pos[1])
if self.world.get_tile(oldtx, ty).solid:
pos = (pos[0], self.y)
self.oldx, self.oldy = self.x, self.y
self.x, self.y = pos
if self.world:
self.world.check_collision(self)
return True
def render(self, screen, pos):
sx, sy = pos
sx -= self.w / 2
sy -= self.h / 2
if sx < -self.w or sy < -self.h:
return
if sx > screen.get_width() or sy > screen.get_height():
return
screen.blit(self.image, (sx, sy))
class MeleeSwipe(Entity):
def on_collision(self, ent):
if isinstance(ent, Enemy):
self.player.score += 10
self.player.world.add_entity(TextParticle((ent.x, ent.y), "+10"))
ATTACK_UP = loadimage("attack-up.png")
ATTACK_DOWN = loadimage("attack-down.png")
ATTACK_LEFT = loadimage("attack-left.png")
ATTACK_RIGHT = loadimage("attack-right.png")
AOFF = 10
hurt = loadsound("hurt.wav")
class Player(Entity):
def __init__(self, *args, **kwargs):
Entity.__init__(self, *args, **kwargs)
self.score = 0
self.attack_timer = 0
self.health = 10
self.swipe = False
self.recovery_timer = 0
self.bombs = 2
def on_collision(self, ent):
if isinstance(ent, Clock):
self.score += 100
self.world.spawn_clock()
self.world.reset_timer()
elif isinstance(ent, Enemy):
if self.recovery_timer > 0:
return
hurt.play()
self.health -= 1
self.recovery_timer = 60
def tick(self):
if self.attack_timer:
self.attack_timer -= 1
if self.attack_timer == 0:
self.swipe = False
if self.recovery_timer:
self.recovery_timer -= 1
def attack(self, direction):
if self.attack_timer:
return
if direction == "UP":
self.swipe = MeleeSwipe((self.x, self.y-self.h-AOFF), ATTACK_UP)
elif direction == "DOWN":
self.swipe = MeleeSwipe((self.x, self.y+self.h+AOFF), ATTACK_DOWN)
elif direction == "LEFT":
self.swipe = MeleeSwipe((self.x-self.w-AOFF, self.y), ATTACK_LEFT)
elif direction == "RIGHT":
self.swipe = MeleeSwipe((self.x+self.w+AOFF, self.y), ATTACK_RIGHT)
self.swipe.direction = direction
self.swipe.player = self
self.world.check_collision(self.swipe)
self.attack_timer = 10
def render(self, screen, pos):
x, y = pos
Entity.render(self, screen, pos)
if self.swipe:
if self.swipe.direction == "UP":
self.swipe.render(screen, (x, y - self.h - AOFF))
elif self.swipe.direction == "DOWN":
self.swipe.render(screen, (x, y + self.h + AOFF))
elif self.swipe.direction == "LEFT":
self.swipe.render(screen, (x - self.w - AOFF, y))
elif self.swipe.direction == "RIGHT":
self.swipe.render(screen, (x + self.w + AOFF, y))
class Enemy(Entity):
KB_SPEED = 2
def __init__(self, *args, **kwargs):
Entity.__init__(self, *args, **kwargs)
self.knockbacktimer = 0
self.knockback = (0, 0)
self.health = 5
self.target = None
self.speed = 2
def tick(self):
if self.knockbacktimer:
self.knockbacktimer -= 1
kx, ky = self.knockback
self.move((self.x + kx, self.y + ky))
return
if self.target:
x, y = self.x, self.y
ox, oy = self.target.x, self.target.y
d = (x - ox)**2 + (y - oy)**2
#if d > 100000:
# return
ang = math.atan2(oy - y, ox - x)
dx, dy = math.cos(ang), math.sin(ang)
self.move((x + self.speed * dx, y + self.speed * dy))
def on_collision(self, ent):
if isinstance(ent, MeleeSwipe):
self.health -= 1
if self.health == 0:
self.die()
return
if ent.direction == "UP":
self.knockback = (0, -Enemy.KB_SPEED)
elif ent.direction == "DOWN":
self.knockback = (0, Enemy.KB_SPEED)
elif ent.direction == "LEFT":
self.knockback = (-Enemy.KB_SPEED, 0)
elif ent.direction == "RIGHT":
self.knockback = (Enemy.KB_SPEED, 0)
self.knockbacktimer = 10
elif isinstance(ent, ShrapnelParticle):
self.health = 0
self.die()
elif isinstance(ent, Player):
self.health -= 1
if self.health == 0:
self.die()
ang = math.atan2(ent.y - self.y, ent.x - self.x)
self.knockback = (-math.cos(ang)*Enemy.KB_SPEED,
-math.sin(ang)*Enemy.KB_SPEED)
self.knockbacktimer = 10
class SlowEnemy(Enemy):
def __init__(self, *args, **kwargs):
Enemy.__init__(self, *args, **kwargs)
self.health = 5
self.speed = 2
def die(self):
Entity.die(self)
if self.world:
make_deathparticles((self.x, self.y), self.world)
self.world.spawn_slowenemy()
class FastEnemy(Enemy):
def __init__(self, *args, **kwargs):
Enemy.__init__(self, *args, **kwargs)
self.health = 1
self.speed = 3
def die(self):
Entity.die(self)
if self.world:
make_deathparticles((self.x, self.y), self.world)
self.world.spawn_fastenemy()
clock_get = loadsound("clock_get.wav")
class Clock(Entity):
def on_collision(self, ent):
if isinstance(ent, Player):
clock_get.play()
self.world.add_entity(TextParticle((self.x, self.y), "+100"))
self.world.remove_entity(self)
def make_deathparticles(pos, world):
for i in range(10):
ang = random.random() * 2*math.pi
speed = random.random() * 3
e = BaseParticle(pos, (speed*math.cos(ang), speed*math.sin(ang)))
world.add_entity(e)
def sign(x):
if x == 0:
return 0
return x / abs(x)
PARTICLE_RED = coloredrect((4, 4), (187, 0, 0))
class BaseParticle(Entity):
def __init__(self, pos, vec, timer=60):
self.x, self.y = pos
self.w, self.h = PARTICLE_RED.get_width(), PARTICLE_RED.get_height()
self.image = PARTICLE_RED
self.vec = list(vec)
self.timer = timer
self.damp = 0.1
def tick(self):
self.timer -= 1
if self.timer == 0:
self.die()
self.move((self.vec[0] + self.x, self.vec[1] + self.y))
self.vec[0] -= self.damp * sign(self.vec[0])
if abs(self.vec[0]) < 0.2:
self.vec[0] = 0
self.vec[1] -= self.damp * sign(self.vec[1])
if abs(self.vec[1]) < 0.2:
self.vec[1] = 0
PARTICLE_GREY = coloredrect((4, 4), (60, 60, 60))
class ShrapnelParticle(BaseParticle):
def __init__(self, *args, **kwargs):
BaseParticle.__init__(self, *args, **kwargs)
self.image = PARTICLE_GREY
self.damp = 0.01
SMALLFONT = makefont(14)
class TextParticle(Entity):
def __init__(self, pos, text):
self.x, self.y = pos
self.text = text
self.w, self.h = 0, 0
self.opacity = 255
self.dy = -2.0
def tick(self):
self.y += self.dy
self.dy += 0.1
self.opacity -= 1
if self.opacity <= 0:
self.die()
def render(self, screen, pos):
fg = (0, 0, 0, self.opacity)
self.image = SMALLFONT.render(self.text, 1, fg)
Entity.render(self, screen, pos)
|
mit
| 957,194,980,910,833,500 | 29.829352 | 79 | 0.521532 | false |
NeuroRoboticTech/Jetduino
|
Software/Python/grove_rfid.py
|
1
|
2106
|
#!/usr/bin/env python
#
# Jetduino test of writing to the GPIO lines
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import serial
import jetduino
from jetduino_pins import *
#ser = serial.Serial('/dev/ttyTHS0') #UART 1
ser = serial.Serial('/dev/ttyTHS1') #UART 2
data = ""
while True:
try:
if ser.inWaiting() > 0:
while ser.inWaiting() > 0:
data = data + ser.read()
print("Read: %s" % data);
data = ""
time.sleep(0.2)
except KeyboardInterrupt:
print("Exiting loop")
break
except IOError:
print ("Error")
ser.close()
|
mit
| -4,274,835,944,483,452,400 | 31.90625 | 149 | 0.724122 | false |
meteoswiss-mdr/monti-pytroll
|
scripts/demo_ot_bedka.py
|
1
|
3988
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Test the mpop reader for overshooting top detection by Kris Bedka
"""
from mpop.satellites import GeostationaryFactory
from datetime import datetime
from mpop.projector import get_area_def
import sys
import inspect
from satpy.utils import debug_on
debug_on()
area = get_area_def("ccs4")
if len(sys.argv) == 1:
#from my_msg_module import get_last_SEVIRI_date
#tslot = get_last_SEVIRI_date(True, delay=5)
#tslot = datetime(2013, 10, 11, 11, 30)
tslot = datetime(2015, 7, 7, 15, 10)
#tslot = datetime(2015, 10, 15, 11, 00)
elif len(sys.argv) == 6:
year = int(sys.argv[1])
month = int(sys.argv[2])
day = int(sys.argv[3])
hour = int(sys.argv[4])
minute = int(sys.argv[5])
tslot = datetime(year, month, day, hour, minute)
else:
print("\n*** Error, wrong number of input arguments")
print(" usage:")
print(" python "+inspect.getfile(inspect.currentframe()))
print(" or")
print(" python "+inspect.getfile(inspect.currentframe())+" 2017 2 17 14 35\n")
quit()
print ("*** plot overshooting top detection for ", str(tslot))
glbd = GeostationaryFactory.create_scene("msg-ot", "", "Overshooting_Tops", tslot)
print ("... load sat data")
# vars_1d = ['latitude','longitude','time']
# vars_3d = ['ir_brightness_temperature',
# 'ot_rating_ir',
# 'ot_id_number',
# 'ot_anvilmean_brightness_temperature_difference',
# 'ir_anvil_detection',
# 'visible_reflectance',
# 'ot_rating_visible',
# 'ot_rating_shadow',
# 'ot_probability',
# 'surface_based_cape',
# 'most_unstable_cape',
# 'most_unstable_equilibrium_level_temperature',
# 'tropopause_temperature',
# 'surface_1km_wind_shear',
# 'surface_3km_wind_shear',
# 'surface_6km_wind_shear',
# 'ot_potential_temperature',
# 'ot_height',
# 'ot_pressure',
# 'parallax_correction_latitude',
# 'parallax_correction_longitude']
#varnames=['ir_brightness_temperature']
#varnames=['visible_reflectance']
varnames=['ir_anvil_detection']
#varnames=['ot_rating_ir']
#varnames=['ot_anvilmean_brightness_temperature_difference']
#varnames=['latitude','longitude']
glbd.load(varnames) #, area_extent=area.area_extent
varname=varnames[0]
print (glbd[varname].data.shape)
print (glbd[varname].data)
max_data = glbd[varname].data.max()
min_data = glbd[varname].data.min()
from trollimage.image import Image as trollimage
from trollimage.colormap import rainbow
colormap=rainbow
if False:
img = trollimage(glbd[varname].data, mode="L") # , fill_value=0
if colormap.values[0] == 0.0 and colormap.values[-1]==1.0: # scale normalized colormap to range of data
colormap.set_range(min_data, max_data)
img.colorize(colormap)
img.show()
area="ccs4" #
local_data = glbd.project(area)
print(local_data[varname].data.shape)
if True:
for varname in varnames:
img = trollimage(local_data[varname].data, mode="L") # , fill_value=0
if colormap.values[0] == 0.0 and colormap.values[-1]==1.0: # scale normalized colormap to range of data
colormap.set_range(min_data, max_data)
img.colorize(colormap)
#img.show()
PIL_image=img.pil_image()
outputFile = "MSG_OT-"+varname.replace("_", "-")+"-ccs4_"+tslot.strftime("%Y%m%d%H%M")+".png"
print("... display "+outputFile" &")
PIL_image.save(outputFile, optimize=True)
#print "... show new RGB image"
#from mpop.imageo.geo_image import GeoImage
#img = GeoImage((local_data[0.8].data, local_data[1.6].data, r39 * 100), area,
# tslot, crange=((0, 100), (0, 70), (0, 30)),
# fill_value=(0, 0, 0), mode="RGB")
#img.enhance(gamma=1.7)
#img.show()
|
lgpl-3.0
| -3,416,682,577,626,647,000 | 32.512605 | 112 | 0.615346 | false |
jriguera/photoplace
|
photoplace/lib/PhotoPlace/Actions/saveFilesAction.py
|
1
|
7781
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# saveFilesAction.py
#
# Copyright 2010-2015 Jose Riguera Lopez <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__program__ = "photoplace"
__author__ = "Jose Riguera Lopez <[email protected]>"
__version__ = "0.6.3"
__date__ = "Apr 2020"
__license__ = "Apache 2.0"
__copyright__ ="(c) Jose Riguera"
import os.path
import threading
import datetime
import zipfile
import zlib
import shutil
import Interface
from PhotoPlace.Facade import Error
from PhotoPlace.definitions import *
class SaveFiles(Interface.Action, threading.Thread):
def __init__(self, state):
Interface.Action.__init__(self, state, [state.lock_geophotos])
#### Interface.Action.__init__(self, state)
threading.Thread.__init__(self)
self.outputkmz = state.outputkmz
self.dgettext['outputkmz'] = self.outputkmz #.encode(PLATFORMENCODING)
self.outputkml = state.outputkml
self.dgettext['outputkml'] = self.outputkml #.encode(PLATFORMENCODING)
self.photouri = state["photouri"]
self.outputdir = state.outputdir
self.dgettext['outputdir'] = self.outputdir.encode(PLATFORMENCODING)
self.tmpdir = state.tmpdir
self.dgettext['tmpdir'] = ''
if self.tmpdir != None:
self.dgettext['tmpdir'] = self.tmpdir.encode(PLATFORMENCODING)
self.quality = state.quality['zip']
####
self.jpgsize = state['jpgsize']
self.jpgquality = state.quality['img']
self.jpgzoom = state['jpgzoom']
self.copymode = state['copymode']
####
self.outputkmldir = os.path.dirname(self.outputkml)
self.fd = None
try:
self.fd = open(self.outputkml, 'wb')
except IOError as (errno, strerror):
self.dgettext['errno'] = errno
self.dgettext['strerror'] = strerror
msg = _("Cannot create KML file '%(outputkml)s': (%(errno)s) %(strerror)s.")
msg = msg % self.dgettext
self.logger.error(msg)
tip = _("Check if output dir '%s' exists and is writable.") % self.outputdir
raise Error(msg, tip, "IOError")
def ini(self, *args, **kwargs):
####
self.num_photos = 0
self.num_copies = 0
if (self.copymode > 0) and (self.outputdir != None):
msg = _("Generating copy of JPEG files in '%(outputdir)s' ...")
self.logger.info(msg % self.dgettext)
####
self.num_files = 0
self._notify_ini(self.fd, self.outputkml, self.outputkmz,
self.photouri, self.outputdir, self.quality)
try:
kmldom = self.state.kmldata.getKml()
kmldom.writexml(self.fd, u"", u" ", u"\n", "utf-8")
self.num_files += 1
except Exception as e:
self.dgettext['error'] = str(e)
msg = _("Cannot write to file '%(outputkml)s': %(error)s.") % self.dgettext
self.logger.error(msg)
tip = _("Check if output dir '%s' is writable.") % self.outputdir
raise Error(msg, tip, "IOError")
finally:
self.fd.close()
return kmldom
def go(self, rini):
#####
if (self.copymode > 0) and (self.outputdir != None):
for photo in self.state.geophotos:
self._notify_run(photo.path, 0)
if photo.status < self.state.status:
continue
self.dgettext['photo'] = photo.name.encode(PLATFORMENCODING)
self.dgettext['photo_lon'] = photo.lon
self.dgettext['photo_lat'] = photo.lat
self.dgettext['photo_ele'] = photo.ele
self.dgettext['photo_time'] = photo.time
if (self.copymode == 2) or ((self.copymode == 1) and photo.isGeoLocated()):
new_file = os.path.join(self.outputdir, photo.name)
self.dgettext['new_path'] = new_file.encode(PLATFORMENCODING)
try:
if photo.isGeoLocated():
photo.attrToExif()
photo.copy(new_file, True, self.jpgzoom, self.jpgsize, self.jpgquality)
self.num_copies += 1
except Exception as e:
self.dgettext['error'] = str(e)
msg = _("Cannot copy '%(photo)s' to '%(new_path)s': %(error)s.")
self.logger.error(msg % self.dgettext)
else:
self._notify_run(new_file, 1)
msg = _("Photo '%(photo)s' has been copied to '%(new_path)s'.")
self.logger.debug(msg % self.dgettext)
self.num_photos += 1
else:
msg = _("Ignoring not geolocated photo '%(photo)s' (%(photo_time)s).")
self.logger.warning(msg % self.dgettext)
self._notify_run(photo.path, -1)
#####
self.num_files += self.num_copies
kmz_out = None
if self.outputkmz == None:
self._notify_run(self.outputkml, 1)
msg = _("KML output file '%(outputkml)s' has been generated.")
self.logger.info(msg % self.dgettext)
else:
msg = _("Generating KMZ file '%(outputkmz)s' ...")
self.logger.info(msg % self.dgettext)
kmz_out = zipfile.ZipFile(self.outputkmz, "w")
self.rzip(kmz_out, self.outputkmldir)
return kmz_out
def rzip(self, zipf, folder, base=u''):
for f in os.listdir(folder):
full_path = os.path.join(folder, f)
if os.path.isfile(full_path):
base_path = os.path.join(base, f)
self.logger.debug(_("Adding file '%s' to KMZ ...") % \
base_path.encode(PLATFORMENCODING))
self._notify_run(base_path, 1)
zipf.write(full_path, base_path, self.quality)
self.num_files += 1
elif os.path.isdir(full_path):
self.rzip(zipf, full_path, f)
def end(self, rgo):
####
if (self.copymode > 0) and (self.outputdir != None):
self.dgettext['num_photos'] = self.num_photos
self.dgettext['num_copies'] = self.num_copies
msg = _("%(num_photos)d photos have been processed, %(num_copies)d were copied.")
self.logger.info(msg % self.dgettext)
####
self._notify_end(self.num_files)
if rgo:
rgo.close()
msg = _("KMZ file '%(outputkmz)s' has been generated.")
self.logger.info(msg % self.dgettext)
if self.tmpdir:
try:
shutil.rmtree(self.tmpdir, False)
msg = _("Deleting directory '%(tmpdir)s' ...")
self.logger.debug(msg % self.dgettext)
except Exception as exception:
self.dgettext['error'] = str(exception)
msg = _("Cannot delete directory '%(tmpdir)s': %(error)s.")
self.logger.error(msg % self.dgettext)
return self.num_files
# EOF
|
apache-2.0
| -5,633,822,833,549,406,000 | 39.526042 | 95 | 0.546716 | false |
sleventyeleven/badkeys_checker
|
SSHScanner.py
|
1
|
1896
|
from pssh import ParallelSSHClient
import paramiko
import argparse
import pssh.utils
parser = argparse.ArgumentParser(description='Take an SSH Key and Blast it across the network.')
parser.add_argument('-i', '--input', required=True, help='The input file containing hosts')
parser.add_argument('-s', '--sudo', default=False, action='store_true', help='Whether Sudo should be called (Default: False)')
parser.add_argument('-c', '--command', default='id', help='The Command to run (Default: id)')
parser.add_argument('-t', '--timeout', default=120, help='The timeout in seconds (Default: 120)')
parser.add_argument('-p', '--parallel', default=10, help='The number of hosts to run (Default: 10)')
parser.add_argument('-r', '--retries', default=1, help='Amount of times to retire (Default: 1)')
parser.add_argument('-u', '--user', default='root', help='The username (Default: root)')
parser.add_argument('-k', '--key', required=True, help='The Key file to use')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Output Activity to StOut')
parser.add_argument('-o', '-output', help='The output file')
args = vars(parser.parse_args())
Scan_Hosts(args)
def Get_Hosts_From_File(input)
hosts=[]
for line in open(input, 'r'):
hosts.add(line)
return hosts
def Scan_Hosts(args):
if args['verbose']:
pssh.utils.enable_host_logger()
private_key = paramiko.RSAKey.from_private_key_file(args['key'])
client = ParallelSSHClient(Get_Hosts_From_File(args['input']), pkey=private_key, pool_size=args['parallel'], timeout=args['timeout'], num_retries=args['retries'])
output = client.run_command(args['command'], sudo=args['sudo'], stop_on_errors=False)
f = open(args['output'], 'w')
f.write("Host\tOutput")
for host in output:
for line in output[host]['stdout']:
f.write(host + "\t" + line)
|
gpl-2.0
| 7,692,294,057,496,623,000 | 46.4 | 166 | 0.67616 | false |
mit-ll/LO-PHI
|
lophi-analysis/analysis/screenshots/download_all_screenshots.py
|
1
|
2290
|
"""
Simple program to download all of the screenshots of completed analysis.
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import os
# 3rd Party
from pymongo import MongoClient
# LO-PHI
from lophi_automation.database.mongodb_al import MongoDb
ss_phys = "./ss_phys"
ss_virt = "./ss_virt"
def ensure_dir(d):
if not os.path.exists(d):
print "* Creating %s"%d
os.makedirs(d)
else:
print "* %s exists."%d
def download_screenshots(options,positionals):
"""
Download all of the screenshots from a mongoDB server
"""
uri = 'mongodb://'+options.services_host+':27017/lophi_db'
print "* Connecting to %s..."%uri
# Initialize our database connections
client = MongoClient(uri)
DB = MongoDb(uri)
ensure_dir(ss_phys)
ensure_dir(ss_virt)
# Loop over all of our analyses.
db = client.lophi_db
analyses = db.analyses
for analysis in analyses.find():
print analysis['_id']
if "output_files" in analysis:
if "screenshot_final" in analysis['output_files']:
ss_id = analysis['output_files']['screenshot_final']
print "Downloading %s..."%ss_id
if analysis['machine_type'] == 2:
DB.download_file(ss_id, os.path.join(ss_virt, analysis[
'_id']+'.ppm'))
else:
DB.download_file(ss_id, os.path.join(ss_phys, analysis[
'_id']+'.png'))
# if "memory_dump_dirty" in analysis['output_files']:
# ss_id = analysis['output_files']['memory_dump_dirty']
# print "Downloading %s..."%ss_id
# DB.download_file(ss_id,analysis['_id']+'.tar.gz')
if __name__ == "__main__":
import optparse
opts = optparse.OptionParser()
# RabbitMQ (for LARIAT, LAMBDA)
opts.add_option("-S", "--services_host", action="store", type="string",
dest="services_host", default='localhost',
help="Host for global services (MongoDB/RabbitMQ)")
(options, positionals) = opts.parse_args()
download_screenshots(options,positionals)
|
bsd-3-clause
| -4,061,412,870,850,824,000 | 29.533333 | 76 | 0.553712 | false |
emilydolson/forestcat
|
pyrobot/brain/twospiral.py
|
2
|
2577
|
from pyrobot.brain.conx import *
from pyrobot.brain.governor import *
def test(net, resolution = 30, sum = 0):
if "candidate" in [layer.name for layer in net.layers]:
net["candidate"].active = 0
for x in range(resolution):
row = ""
if sum:
size = 1
else:
size = net["output"].size
for i in range(size):
for y in range(resolution):
input = (x/float(resolution), y/float(resolution))
results = net.propagate(input = input)
if sum:
retval = reduce(operator.add, net["output"].activation) / net["output"].size
else:
retval = results[i]
c = round(retval, 1)
if c == 1.0:
c = "#"
else:
c = str(c * 10)[0]
row += "%s" % c
row += " "
print row
if "candidate" in [layer.name for layer in net.layers]:
net["candidate"].active = 1
def train(net, sweeps = 100, recruit = 0):
if "candidate" in [layer.name for layer in net.layers]:
net["candidate"].active = 1
cont = 0
test(net)
while not net.complete:
net.train(sweeps, cont=cont)
if recruit:
net.recruitBest()
test(net)
cont = 1
fp = open("two-spiral.dat", "r")
inputs = []
targets = []
for line in fp:
data = map(float, line.split())
inputs.append( data[:2] )
targets.append( data[2:] )
net0 = IncrementalNetwork()
net0.addLayers(2, 1)
net0.setInputs( inputs )
net0.setTargets( targets)
net0.tolerance = 0.4
net0.addCandidateLayer(4)
net0.reportRate = 100
#train(net0, 500, recruit=1)
net2 = GovernorNetwork(5, 2.1, 0.01, 5, 0.2) # 5, 2.1, 0.3, 5, 0.2
net2.reportHistograms = 1
net2.addThreeLayers(2, 10, 1)
net2.setInputs( inputs )
net2.setTargets( targets)
net2.tolerance = 0.4
net2.reportRate = 5
net2.doWhile = lambda a, b: 1
#train(net2)
#print net2.ravq
net3 = Network()
net3.addLayers(2, 10, 10, 1)
net3.setInputs( inputs )
net3.setTargets( targets)
net3.tolerance = 0.4
net3.batch = 1
net3.reportRate = 10
train(net3)
class MyNetwork(Network):
def getData(self, i):
patterns = {1.0: [1.0, 0.0], 0.0: [0.0, 1.0]}
data = {}
data["input"] = self.inputs[i]
data["output"] = patterns[self.targets[i][0]]
return data
net4 = MyNetwork()
net4.addLayers(2, 10, 10, 2)
net4.setInputs( inputs )
net4.setTargets( targets)
net4.tolerance = 0.4
net4.reportRate = 100
#train(net4, 100)
|
agpl-3.0
| 3,686,941,603,215,433,000 | 26.126316 | 96 | 0.565774 | false |
duyuan11/glumpy
|
examples/filter-pixelate.py
|
1
|
2167
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All Rights Reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy.geometry import primitives
from glumpy.graphics.filter import Filter
from glumpy import gl, app, glm, gloo, data
cube_vertex = """
uniform mat4 model, view, projection;
attribute vec3 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = projection * view * model * vec4(position,1.0);
v_texcoord = texcoord;
}
"""
cube_fragment = """
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
float r = texture2D(texture, v_texcoord).r;
gl_FragColor = vec4(vec3(r),1.0);
}
"""
pixelate = Filter(512, 512, """
uniform float level;
vec4 filter(sampler2D original, sampler2D filtered, vec2 texcoord, vec2 texsize)
{
vec2 uv = (texcoord * level);
uv = (uv - fract(uv)) / level;
return texture2D(filtered, uv);
} """)
pixelate["level"] = 256.0
window = app.Window(1024,1024)
@window.event
def on_draw(dt):
global phi, theta
with pixelate:
window.clear()
gl.glEnable(gl.GL_DEPTH_TEST)
cube.draw(gl.GL_TRIANGLES, faces)
theta += 0.5
phi += 0.5
model = np.eye(4, dtype=np.float32)
glm.rotate(model, theta, 0, 0, 1)
glm.rotate(model, phi, 0, 1, 0)
cube['model'] = model
@window.event
def on_resize(width, height):
cube['projection'] = glm.perspective(45.0, width / float(height), 2.0, 100.0)
pixelate.viewport = 0, 0, width, height
@window.event
def on_mouse_scroll(x, y, dx, dy):
p = pixelate["level"]
pixelate["level"] = min(max(8, p + .01 * dy * p), 512)
# Build cube data
vertices, faces = primitives.cube()
cube = gloo.Program(cube_vertex, cube_fragment)
cube.bind(vertices)
view = np.eye(4, dtype=np.float32)
glm.translate(view, 0, 0, -3)
cube['view'] = view
cube['model'] = np.eye(4, dtype=np.float32)
cube['texture'] = data.checkerboard()
phi, theta = 0, 0
app.run()
|
bsd-3-clause
| -993,216,743,268,455,700 | 23.908046 | 81 | 0.610521 | false |
amylittleyang/OtraCAD
|
cadnano25/cadnano/gui/controllers/viewrootcontroller.py
|
1
|
1064
|
class ViewRootController():
def __init__(self, view_root, model_document):
self._view_root = view_root
self._model_document = model_document
self.connectSignals()
def connectSignals(self):
m_d = self._model_document
v_r = self._view_root
m_d.documentActiveDomainAddedSignal.connect(v_r.activeDomainAddedSlot)
m_d.documentPartAddedSignal.connect(v_r.partAddedSlot)
m_d.documentClearSelectionsSignal.connect(v_r.clearSelectionsSlot)
m_d.documentSelectionFilterChangedSignal.connect(v_r.selectionFilterChangedSlot)
m_d.documentViewResetSignal.connect(v_r.resetRootItemSlot)
def disconnectSignals(self):
m_d = self._model_document
v_r = self._view_root
m_d.documentPartAddedSignal.disconnect(v_r.partAddedSlot)
m_d.documentClearSelectionsSignal.disconnect(v_r.clearSelectionsSlot)
m_d.documentSelectionFilterChangedSignal.disconnect(v_r.selectionFilterChangedSlot)
m_d.documentViewResetSignal.disconnect(v_r.resetRootItemSlot)
|
mit
| 4,934,154,739,148,430,000 | 47.409091 | 91 | 0.722744 | false |
edx/opaque-keys
|
opaque_keys/edx/tests/test_locators.py
|
1
|
4865
|
"""
Tests for opaque_keys.edx.locator.
"""
import random
from unittest import TestCase
from uuid import UUID
import ddt
from bson.objectid import ObjectId
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import DefinitionKey
from opaque_keys.edx.locator import Locator, BundleDefinitionLocator, CourseLocator, DefinitionLocator, VersionTree
class LocatorTests(TestCase):
"""
Tests for :class:`.Locator`
"""
def test_cant_instantiate_abstract_class(self):
self.assertRaises(TypeError, Locator)
class DefinitionLocatorTests(TestCase):
"""
Tests for :class:`.DefinitionLocator`
"""
def test_description_locator_url(self):
object_id = '{:024x}'.format(random.randrange(16 ** 24))
definition_locator = DefinitionLocator('html', object_id)
self.assertEqual(f'def-v1:{object_id}+{DefinitionLocator.BLOCK_TYPE_PREFIX}@html',
str(definition_locator))
self.assertEqual(definition_locator, DefinitionKey.from_string(str(definition_locator)))
def test_description_locator_version(self):
object_id = '{:024x}'.format(random.randrange(16 ** 24))
definition_locator = DefinitionLocator('html', object_id)
self.assertEqual(object_id, str(definition_locator.version))
@ddt.ddt
class BundleDefinitionLocatorTests(TestCase):
"""
Tests for :class:`.BundleDefinitionLocator`
"""
@ddt.data(
'bundle-olx:4b33677f-7eb7-4376-8752-024ce057d7e8:5:html:html/introduction/definition.xml',
'bundle-olx:22825172-cde7-4fbd-ac03-a45b631e8e65:studio_draft:video:video/v1/definition.xml',
)
def test_roundtrip_from_string(self, key):
def_key = DefinitionKey.from_string(key)
serialized = str(def_key)
self.assertEqual(key, serialized)
@ddt.data(
{
"bundle_uuid": "4b33677f-7eb7-4376-8752-024ce057d7e8", # string but will be converted to UUID automatically
"block_type": "video",
"olx_path": "video/vid_001/definition.xml",
"bundle_version": 15,
},
{
"bundle_uuid": UUID("4b33677f-7eb7-4376-8752-024ce057d7e8"),
"block_type": "video",
"olx_path": "video/vid_001/definition.xml",
"draft_name": "studio_draft",
},
{
"bundle_uuid": UUID("4b33677f-7eb7-4376-8752-024ce057d7e8"),
"block_type": "video",
"olx_path": "video/θήτα/definition.xml",
"draft_name": "studio_draft",
},
)
def test_roundtrip_from_key(self, key_args):
key = BundleDefinitionLocator(**key_args)
serialized = str(key)
deserialized = DefinitionKey.from_string(serialized)
self.assertEqual(key, deserialized)
@ddt.data(
{
"bundle_uuid": "not-a-valid-uuid",
"block_type": "video",
"olx_path": "video/vid_001/definition.xml",
"bundle_version": 15,
},
{
"bundle_uuid": UUID("4b33677f-7eb7-4376-8752-024ce057d7e8"),
"block_type": "video",
"olx_path": "video/vid_001/definition.xml",
# Missing bundle_version or draft_name
},
{
"bundle_uuid": UUID("4b33677f-7eb7-4376-8752-024ce057d7e8"),
"block_type": "video",
"olx_path": "video/vid_001/definition.xml",
# Both bundle_version and draft_name:
"bundle_version": 15,
"draft_name": "studio_draft",
},
{
"bundle_uuid": UUID("4b33677f-7eb7-4376-8752-024ce057d7e8"),
"block_type": "colon:in:type",
"olx_path": "video/vid_001/definition.xml",
"draft_name": "studio_draft",
},
{
"bundle_uuid": UUID("4b33677f-7eb7-4376-8752-024ce057d7e8"),
"block_type": "video",
"olx_path": "https://www.example.com", # not a valid OLX path
"draft_name": "studio_draft",
},
)
def test_invalid_args(self, key_args):
with self.assertRaises((InvalidKeyError, TypeError, ValueError)):
BundleDefinitionLocator(**key_args)
class VersionTreeTests(TestCase):
"""
Tests for :class:`.VersionTree`
"""
def test_version_tree(self):
"""
Test making a VersionTree object.
"""
with self.assertRaises(TypeError):
VersionTree("invalid")
versionless_locator = CourseLocator(org="mit.eecs", course="6.002x", run="2014")
with self.assertRaises(ValueError):
VersionTree(versionless_locator)
test_id_loc = '519665f6223ebd6980884f2b'
test_id = ObjectId(test_id_loc)
valid_locator = CourseLocator(version_guid=test_id)
self.assertEqual(VersionTree(valid_locator).children, [])
|
agpl-3.0
| -302,764,857,622,180,300 | 33.232394 | 120 | 0.604402 | false |
gatecat/prjoxide
|
tools/parse_pins.py
|
1
|
2752
|
import json, sys
# Parse a Lattice pinout CSV file to a JSON file for the database
# Usage: parse_pins.py pinout.csv iodb.json
def main():
packages = []
pads = []
with open(sys.argv[1], "r") as csvf:
for line in csvf:
sl = line.replace('"', '')
sl = sl.strip()
if len(sl) == 0 or sl.startswith('#'):
continue
splitl = sl.split(',')
if len(splitl) == 0 or splitl[0] == '':
continue
if len(packages) == 0:
# Header line
COL_PADN = 0
COL_FUNC = 1
COL_CUST_NAME = 2
COL_BANK = 3
COL_DF = 4
COL_LVDS = 5
COL_HIGHSPEED = 6
COL_DQS = 7
COL_PKG_START = 8
if splitl[0] == "index":
# new style pinout
COL_PADN = 1
COL_FUNC = 2
COL_CUST_NAME = None
COL_BANK = 3
COL_DF = 5
COL_LVDS = 6
COL_HIGHSPEED = 7
COL_DQS = 4
COL_PKG_START = 8
elif splitl[2] == "BANK":
# LIFCL-17 style pinout
COL_PADN = 0
COL_FUNC = 1
COL_CUST_NAME = None
COL_BANK = 2
COL_DF = 4
COL_LVDS = 5
COL_HIGHSPEED = 6
COL_DQS = 3
COL_PKG_START = 7
assert splitl[COL_PADN] == "PADN"
packages = splitl[COL_PKG_START:]
continue
func = splitl[COL_FUNC]
io_offset = -1
io_side = ''
io_spfunc = []
io_pio = -1
io_dqs = []
io_vref = -1
if len(func) >= 4 and func[0] == 'P' and func[1] in ('T', 'L', 'R', 'B') and func[-1] in ('A', 'B', 'C', 'D'):
# Regular PIO
io_offset = int(func[2:-1])
io_side = func[1]
io_spfunc = splitl[COL_DF].split('/')
io_pio = "ABCD".index(func[-1])
if io_spfunc == ['-']:
io_spfunc = []
io_dqs = splitl[COL_DQS]
if io_dqs == "" or io_dqs == "-":
io_dqs = []
elif io_dqs.find("DQSN") == 1:
io_dqs = [2, int(io_dqs[5:])]
elif io_dqs.find("DQS") == 1:
io_dqs = [1, int(io_dqs[4:])]
elif io_dqs.find("DQ") == 1:
io_dqs = [0, int(io_dqs[3:])]
else:
assert False, "bad DQS type"
for spf in io_spfunc:
if spf.startswith('VREF'):
bank, _, ref = spf[4:].partition('_')
assert int(bank) == int(splitl[COL_BANK])
io_vref = int(ref)
elif func.startswith('ADC_') or func.startswith('DPHY') or func.startswith('SD0') or func.startswith('JTAG_'):
# Special IO, that we still want in the db
io_spfunc = [func, ]
else:
continue
io_bank = int(splitl[COL_BANK]) if splitl[COL_BANK].isdigit() else -1
io_pins = splitl[COL_PKG_START:]
pads.append(dict(side=io_side, offset=io_offset, pio=io_pio, func=io_spfunc, bank=io_bank, dqs=io_dqs, vref=io_vref, pins=io_pins))
with open(sys.argv[2], "w") as jsf:
jsf.write(json.dumps(dict(packages=packages, pads=pads), sort_keys=True, indent=4))
jsf.write('\n')
if __name__ == '__main__':
main()
|
isc
| 5,201,386,546,822,135,000 | 26.52 | 134 | 0.547965 | false |
biosustain/optlang
|
src/optlang/inspyred_interface.py
|
1
|
12977
|
# Copyright 2013 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to the inspyred heuristic optimization framework
Wraps the GLPK solver by subclassing and extending :class:`Model`,
:class:`Variable`, and :class:`Constraint` from :mod:`interface`.
"""
import logging
import random
import types
log = logging.getLogger(__name__)
import sympy
import interface
class Variable(interface.Variable):
def __init__(self, *args, **kwargs):
super(Variable, self).__init__(*args, **kwargs)
class Objective(interface.Objective):
"""docstring for Objective"""
def __init__(self, expression, *args, **kwargs):
super(Objective, self).__init__(expression, *args, **kwargs)
@property
def value(self):
return self._value
def __str__(self):
if isinstance(self.expression, sympy.Basic):
return super(Objective, self).__str__()
else:
return self.expression.__str__()
# return ' '.join((self.direction, str(self.expression)))
@property
def expression(self):
return self._expression
@expression.setter
def expression(self, value):
self._expression = value
class VariableBounder(object):
"""This class defines a inspyred like Bounder.__init__.py
TODO: Make this work also for integer and binary type variables?
"""
def __init__(self, model):
self.model = model
def __call__(self, candidate, args):
variables = self.model.variables
bounded_candidate = list()
for c, variable in zip(candidate, variables):
if variable.type == 'continuous':
bounded_candidate.append(max(min(c, variable.ub), variable.lb))
elif variable.type == 'integer':
bounded_candidate.append(min(range(variable.lb, variable.ub + 1), key=lambda x: abs(x - c)))
elif variable.type == 'binary':
# print min([0, 1], key=lambda x: abs(x-c))
bounded_candidate.append(min([0, 1], key=lambda x: abs(x - c)))
return bounded_candidate
class Configuration(interface.EvolutionaryOptimizationConfiguration):
"""docstring for Configuration"""
class SubConfiguration(object):
pass
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
self._algorithm = inspyred.ec.GA
self._algorithm.terminator = [inspyred.ec.terminators.time_termination,
inspyred.ec.terminators.generation_termination,
inspyred.ec.terminators.evaluation_termination,
inspyred.ec.terminators.diversity_termination,
inspyred.ec.terminators.average_fitness_termination]
self.pop_size = 100
self.seeds = []
self.max_generations = 1
self.max_evaluations = None
self.max_time = None
self.selector_config = self.SubConfiguration()
self.selector_config.num_selected = None
self.selector_config.tournament_size = 2
self.selector_config.num_elites = 0
self.variator_config = self.SubConfiguration()
self.variator_config.mutation_rate = .1
self.variator_config.crossover_rate = 1.
self.variator_config.num_crossover_points = 1
self.topology_config = self.SubConfiguration()
self.topology_config.neighborhood_size = 5
self.swarm_config = self.SubConfiguration()
self.swarm_config.inertia = 0.5
self.swarm_config.cognitive_rate = 2.1
self.swarm_config.social_rate = 2.1
@property
def selector(self):
return self._algorithm.selector
@selector.setter
def selector(self, value):
self.algorithm.selector = value
@property
def variator(self):
return self._algorithm.variator
@variator.setter
def variator(self, value):
self._algorithm.variator = value
@property
def replacer(self):
return self._algorithm.replacer
@replacer.setter
def replacer(self, value):
self._algorithm.replacer = value
@property
def migrator(self):
return self._algorithm.migrator
@migrator.setter
def migrator(self, value):
self._algorithm.migrator = value
@property
def archiver(self):
return self._algorithm.archiver
@archiver.setter
def archiver(self, value):
self._algorithm.archiver = value
@property
def observer(self):
return self._algorithm.observer
@observer.setter
def observer(self, value):
self._algorithm.observer = value
@property
def terminator(self):
return self._algorithm.terminator
@terminator.setter
def terminator(self, value):
self._algorithm.terminator = value
@property
def topology(self):
return self._algorithm.topology
@topology.setter
def topology(self, value):
if value == 'Ring':
self._algorithm.topology = inspyred.swarm.topologies.ring_topology
elif value == 'Star':
self._algorithm.topology = inspyred.swarm.topologies.star_topology
elif isinstance(value, types.FunctionType):
self._algorithm.topology = value
else:
raise ValueError("%s is not a supported topology. Try 'Star' or 'Ring' instead.")
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, value):
init = False
try:
previous_selector = self._algorithm.selector
previous_variator = self._algorithm.variator
previous_replacer = self._algorithm.replacer
# previous_migrator = self._algorithm.migrator
# previous_archiver = self._algorithm.archiver
# previous_observer = self._algorithm.observer
# previous_terminator = self._algorithm.terminator
except AttributeError:
init = True
if value == "EvolutionaryComputation":
self._algorithm = inspyred.ec.EvolutionaryComputation
elif value == "GeneticAlgorithm" or value == "GA":
self._algorithm = inspyred.ec.GA(random)
elif value == "ParticleSwarmOptimization" or value == "PSO":
self._algorithm = inspyred.swarm.PSO(random)
elif value == "AntColonySystem" or value == "ACS":
self._algorithm = inspyred.swarm.ACS(random)
elif value == "EvolutionaryStrategy" or value == "ES":
self._algorithm = inspyred.ec.ES(random)
elif value == "DifferentialEvolutionaryAlgorithm" or value == "DEA":
self._algorithm = inspyred.ec.DEA(random)
elif value == "SimulatedAnnealing" or value == "SA":
self._algorithm = inspyred.ec.SA(random)
elif value == "NSGA2":
self._algorithm = inspyred.emo.NSGA2(random)
elif value == "PAES":
self._algorithm = inspyred.emo.PAES(random)
elif value == "Pareto":
self._algorithm = inspyred.emo.Pareto(random)
else:
raise ValueError(
"%s is not a supported. Try one of the following instead:"
"'GeneticAlgorithm', 'ParticleSwarmOptimization', 'EvolutionaryStrategy'."
"TODO: be more specific here")
# self._algorithm.terminator = self._default_terminator
if init is False:
self._algorithm.selector = previous_selector
self._algorithm.variator = previous_variator
self._algorithm.replacer = previous_replacer
# previous_migrator = self._algorithm.migrator
# previous_archiver = self._algorithm.archiver
# previous_observer = self._algorithm.observer
# previous_terminator = self._algorithm.terminator
# TODO: setting a new algorithm should recycle old variators, selectors etc.
def _evolve_kwargs(self):
"""Filter None keyword arguments. Intended to be passed on to algorithm.evolve(...)"""
valid_evolve_kwargs = (
'max_generations', 'max_evaluations', 'pop_size', 'neighborhood_size', 'tournament_size', 'mutation_rate')
filtered_evolve_kwargs = dict()
for key in valid_evolve_kwargs:
attr_value = getattr(self, key)
if attr_value is not None:
filtered_evolve_kwargs[key] = attr_value
# return filtered_evolve_kwargs
return {}
class Model(interface.Model):
"""Interface"""
def __init__(self, algorithm=None, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.configuration = Configuration()
if algorithm is None:
self.configuration.algorithm = "GA"
else:
self.configuration.algorithm = algorithm
self._bounder = VariableBounder(self)
self._generator = self._generator
def _generator(self, random, args):
individual = list()
for variable in self.variables:
if variable.type == 'continuous':
individual.append(random.uniform(variable.lb, variable.ub))
else:
individual.append(random.choice(range(variable.lb, variable.ub + 1)))
return individual
def _evaluator(self, candidates, args):
fitness = list()
for candidate in candidates:
substitution_dict = dict(zip(self.variables, candidate))
if isinstance(self.objective.expression, sympy.Basic):
fitness.append(self.objective.expression.subs(substitution_dict))
else:
fitness.append(self.objective.expression(substitution_dict))
return fitness
# @inspyred.ec.evaluators.evaluator
# def _evaluate(self, candidate, args):
# substitution_dict = dict(zip(self.variables, candidate))
# try:
# fitness = self.objective.expression.subs(substitution_dict)
# except AttributeError:
# fitness = self.objective.expression(substitution_dict)
# return fitness
def optimize(self, *args, **kwargs):
# import pdb; pdb.set_trace();
final_population = self.configuration.algorithm.evolve(
generator=self._generator,
evaluator=self._evaluator,
bounder=self._bounder,
pop_size=self.configuration.pop_size,
maximize={'max': True, 'min': False}[self.objective.direction],
max_generations=self.configuration.max_generations,
max_evaluations=self.configuration.max_evaluations,
neighborhood_size=self.configuration.topology_config.neighborhood_size,
mutation_rate=self.configuration.variator_config.mutation_rate,
tournament_size=self.configuration.selector_config.tournament_size
)
return final_population
if __name__ == '__main__':
# from optlang.interface import Objective, Variable
import numpy
import inspyred
x = Variable('x', lb=0, ub=2)
y = Variable('y', lb=0, ub=2)
rosenbrock_obj = Objective((1 - x) ** 2 + 100 * (y - x ** 2) ** 2, name="Rosenbrock function", direction='min')
print("The rosenbrock function:", rosenbrock_obj)
print("The global minimum at (x,y) = (1,1) is", rosenbrock_obj.expression.subs({x: 1, y: 1}))
problem = Model(name='rosenbrock', algorithm='PSO')
# problem = Model(name='rosenbrock')
problem.objective = rosenbrock_obj
def my_observer(population, num_generations, num_evaluations, args):
best = max(population)
print(('{0:6} -- {1} : {2}'.format(num_generations,
best.fitness,
str(best.candidate))))
problem.configuration.max_generations = 100
problem.configuration.terminator = inspyred.ec.terminators.generation_termination
problem.configuration.observer = my_observer
problem.configuration.selector = inspyred.ec.selectors.tournament_selection
final_pop = problem.optimize()
fitnesses = [individual.fitness for individual in final_pop]
print(fitnesses)
print("mean", numpy.mean(fitnesses))
print("max", numpy.max(fitnesses))
print("min", numpy.min(fitnesses))
# print numpy.std(fitnesses)
|
apache-2.0
| 5,685,292,111,426,514,000 | 35.658192 | 118 | 0.628034 | false |
bytedance/fedlearner
|
web_console_v2/api/test/fedlearner_webconsole/dataset/apis_test.py
|
1
|
13083
|
# Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import json
import time
import os
import shutil
import tempfile
import unittest
from datetime import datetime, timezone
from http import HTTPStatus
from pathlib import Path
from unittest import mock
from unittest.mock import patch, MagicMock
from collections import namedtuple
from testing.common import BaseTestCase
from fedlearner_webconsole.db import db_handler as db
from fedlearner_webconsole.dataset.models import (Dataset, DatasetType)
from tensorflow.io import gfile
FakeFileStatistics = namedtuple('FakeFileStatistics', ['length', 'mtime_nsec'])
class DatasetApiTest(BaseTestCase):
class Config(BaseTestCase.Config):
STORAGE_ROOT = tempfile.gettempdir()
def setUp(self):
super().setUp()
with db.session_scope() as session:
self.default_dataset1 = Dataset(
name='default dataset1',
dataset_type=DatasetType.STREAMING,
comment='test comment1',
path='/data/dataset/123',
project_id=1,
)
session.add(self.default_dataset1)
session.commit()
time.sleep(1)
with db.session_scope() as session:
self.default_dataset2 = Dataset(
name='default dataset2',
dataset_type=DatasetType.STREAMING,
comment='test comment2',
path=os.path.join(tempfile.gettempdir(), 'dataset/123'),
project_id=2,
)
session.add(self.default_dataset2)
session.commit()
def test_get_dataset(self):
get_response = self.get_helper(
f'/api/v2/datasets/{self.default_dataset1.id}')
self.assertEqual(get_response.status_code, HTTPStatus.OK)
dataset = self.get_response_data(get_response)
self.assertEqual(
{
'id': 1,
'name': 'default dataset1',
'dataset_type': 'STREAMING',
'comment': 'test comment1',
'path': '/data/dataset/123',
'created_at': mock.ANY,
'updated_at': mock.ANY,
'deleted_at': None,
'data_batches': [],
'project_id': 1,
}, dataset)
def test_get_dataset_not_found(self):
get_response = self.get_helper('/api/v2/datasets/10086')
self.assertEqual(get_response.status_code, HTTPStatus.NOT_FOUND)
def test_get_datasets(self):
get_response = self.get_helper('/api/v2/datasets')
self.assertEqual(get_response.status_code, HTTPStatus.OK)
datasets = self.get_response_data(get_response)
self.assertEqual(len(datasets), 2)
self.assertEqual(datasets[0]['name'], 'default dataset2')
self.assertEqual(datasets[1]['name'], 'default dataset1')
def test_get_datasets_with_project_id(self):
get_response = self.get_helper('/api/v2/datasets?project=1')
self.assertEqual(get_response.status_code, HTTPStatus.OK)
datasets = self.get_response_data(get_response)
self.assertEqual(len(datasets), 1)
self.assertEqual(datasets[0]['name'], 'default dataset1')
def test_preview_dataset_and_feature_metrics(self):
# write data
gfile.makedirs(self.default_dataset2.path)
meta_path = os.path.join(self.default_dataset2.path, '_META')
meta_data = {
'dtypes': {
'f01': 'bigint'
},
'samples': [
[1],
[0],
],
}
with gfile.GFile(meta_path, 'w') as f:
f.write(json.dumps(meta_data))
features_path = os.path.join(self.default_dataset2.path, '_FEATURES')
features_data = {
'f01': {
'count': '2',
'mean': '0.0015716767309123998',
'stddev': '0.03961485047808605',
'min': '0',
'max': '1',
'missing_count': '0'
}
}
with gfile.GFile(features_path, 'w') as f:
f.write(json.dumps(features_data))
hist_path = os.path.join(self.default_dataset2.path, '_HIST')
hist_data = {
"f01": {
"x": [
0.0, 0.1, 0.2, 0.30000000000000004, 0.4, 0.5,
0.6000000000000001, 0.7000000000000001, 0.8, 0.9, 1
],
"y": [12070, 0, 0, 0, 0, 0, 0, 0, 0, 19]
}
}
with gfile.GFile(hist_path, 'w') as f:
f.write(json.dumps(hist_data))
response = self.client.get('/api/v2/datasets/2/preview')
self.assertEqual(response.status_code, 200)
preview_data = self.get_response_data(response)
meta_data['metrics'] = features_data
self.assertEqual(preview_data, meta_data, 'should has preview data')
feat_name = 'f01'
feature_response = self.client.get(
f'/api/v2/datasets/2/feature_metrics?name={feat_name}')
self.assertEqual(response.status_code, 200)
feature_data = self.get_response_data(feature_response)
self.assertEqual(
feature_data, {
'name': feat_name,
'metrics': features_data.get(feat_name, {}),
'hist': hist_data.get(feat_name, {})
}, 'should has feature data')
@patch('fedlearner_webconsole.dataset.apis.datetime')
def test_post_datasets(self, mock_datetime):
mock_datetime.now = MagicMock(
return_value=datetime(2020, 6, 8, 6, 6, 6))
name = 'test post dataset'
dataset_type = DatasetType.STREAMING.value
comment = 'test comment'
create_response = self.post_helper('/api/v2/datasets',
data={
'name': name,
'dataset_type': dataset_type,
'comment': comment,
'project_id': 1,
})
self.assertEqual(create_response.status_code, HTTPStatus.OK)
created_dataset = self.get_response_data(create_response)
dataset_path = os.path.join(
tempfile.gettempdir(), 'dataset/20200608_060606_test-post-dataset')
self.assertEqual(
{
'id': 3,
'name': 'test post dataset',
'dataset_type': dataset_type,
'comment': comment,
'path': dataset_path,
'created_at': mock.ANY,
'updated_at': mock.ANY,
'deleted_at': None,
'data_batches': [],
'project_id': 1,
}, created_dataset)
# patch datasets
updated_comment = 'updated comment'
put_response = self.patch_helper('/api/v2/datasets/3',
data={'comment': updated_comment})
updated_dataset = self.get_response_data(put_response)
self.assertEqual(
{
'id': 3,
'name': 'test post dataset',
'dataset_type': dataset_type,
'comment': updated_comment,
'path': dataset_path,
'created_at': mock.ANY,
'updated_at': mock.ANY,
'deleted_at': None,
'data_batches': [],
'project_id': 1,
}, updated_dataset)
@patch('fedlearner_webconsole.dataset.apis.scheduler.wakeup')
def test_post_batches(self, mock_wakeup):
dataset_id = self.default_dataset1.id
event_time = int(
datetime(2020, 6, 8, 6, 8, 8, tzinfo=timezone.utc).timestamp())
files = ['/data/upload/1.csv', '/data/upload/2.csv']
move = False
comment = 'test post comment'
create_response = self.post_helper(
f'/api/v2/datasets/{dataset_id}/batches',
data={
'event_time': event_time,
'files': files,
'move': move,
'comment': comment
})
self.assertEqual(create_response.status_code, HTTPStatus.OK)
created_data_batch = self.get_response_data(create_response)
self.maxDiff = None
self.assertEqual(
{
'id': 1,
'dataset_id': 1,
'comment': comment,
'event_time': event_time,
'created_at': mock.ANY,
'updated_at': mock.ANY,
'deleted_at': None,
'file_size': 0,
'move': False,
'num_file': 2,
'num_imported_file': 0,
'path': '/data/dataset/123/batch/20200608_060808',
'state': 'NEW',
'details': {
'files': [{
'destination_path':
'/data/dataset/123/batch/20200608_060808/1.csv',
'error_message': '',
'size': '0',
'source_path': '/data/upload/1.csv',
'state': 'UNSPECIFIED'
}, {
'destination_path':
'/data/dataset/123/batch/20200608_060808/2.csv',
'error_message': '',
'size': '0',
'source_path': '/data/upload/2.csv',
'state': 'UNSPECIFIED'
}]
}
}, created_data_batch)
mock_wakeup.assert_called_once_with(
data_batch_ids=[created_data_batch['id']])
class FilesApiTest(BaseTestCase):
class Config(BaseTestCase.Config):
STORAGE_ROOT = tempfile.gettempdir()
def setUp(self):
super().setUp()
# Create a temporary directory
self._tempdir = os.path.join(tempfile.gettempdir(), 'upload')
os.makedirs(self._tempdir, exist_ok=True)
subdir = Path(self._tempdir).joinpath('s')
subdir.mkdir()
Path(self._tempdir).joinpath('f1.txt').write_text('f1')
Path(self._tempdir).joinpath('f2.txt').write_text('f2f2')
subdir.joinpath('s3.txt').write_text('s3s3s3')
# Mocks os.stat
self._orig_os_stat = os.stat
def fake_stat(path, *arg, **kwargs):
return self._get_file_stat(self._orig_os_stat, path)
gfile.stat = fake_stat
def tearDown(self):
os.stat = self._orig_os_stat
# Remove the directory after the test
shutil.rmtree(self._tempdir)
super().tearDown()
def _get_temp_path(self, file_path: str = None) -> str:
return str(Path(self._tempdir, file_path or '').absolute())
def _get_file_stat(self, orig_os_stat, path):
if path == self._get_temp_path('f1.txt') or \
path == self._get_temp_path('f2.txt') or \
path == self._get_temp_path('s/s3.txt'):
return FakeFileStatistics(2, 1613982390 * 1e9)
else:
return orig_os_stat(path)
def test_get_default_storage_root(self):
get_response = self.get_helper('/api/v2/files')
self.assertEqual(get_response.status_code, HTTPStatus.OK)
files = self.get_response_data(get_response)
self.assertEqual(sorted(files, key=lambda f: f['path']), [
{
'path': self._get_temp_path('f1.txt'),
'size': 2,
'mtime': 1613982390
},
{
'path': self._get_temp_path('f2.txt'),
'size': 2,
'mtime': 1613982390
},
{
'path': self._get_temp_path('s/s3.txt'),
'size': 2,
'mtime': 1613982390
},
])
def test_get_specified_directory(self):
dir = self._get_temp_path('s')
get_response = self.get_helper(f'/api/v2/files?directory={dir}')
self.assertEqual(get_response.status_code, HTTPStatus.OK)
files = self.get_response_data(get_response)
self.assertEqual(files, [
{
'path': self._get_temp_path('s/s3.txt'),
'size': 2,
'mtime': 1613982390
},
])
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 5,593,758,142,403,258,000 | 36.487106 | 79 | 0.522663 | false |
captainsafia/agate
|
agate/column_types.py
|
1
|
7393
|
#!/usr/bin/env python
"""
This module contains the :class:`ColumnType` class and its subclasses. These
types define how data should be converted during the creation of a
:class:`.Table`. Each subclass of :class:`ColumnType` is associated with a
subclass of :class:`.Column`. For instance, specifying that data is of
:class:`NumberType` will cause a :class:`.NumberColumn` to be created on the
table.
"""
import datetime
try:
from cdecimal import Decimal, InvalidOperation
except ImportError: #pragma: no cover
from decimal import Decimal, InvalidOperation
from dateutil.parser import parse
import pytimeparse
import six
from agate.exceptions import CastError
#: Default values which will be automatically cast to :code:`None`
DEFAULT_NULL_VALUES = ('', 'na', 'n/a', 'none', 'null', '.')
#: Default values which will be automatically cast to :code:`True`.
DEFAULT_TRUE_VALUES = ('yes', 'y', 'true', 't')
#: Default values which will be automatically cast to :code:`False`.
DEFAULT_FALSE_VALUES = ('no', 'n', 'false', 'f')
class ColumnType(object): #pragma: no cover
"""
Base class for column data types.
:param null_values: A sequence of values which should be cast to
:code:`None` when encountered with this type.
"""
def __init__(self, null_values=DEFAULT_NULL_VALUES):
self.null_values = null_values
def _create_column(self, table, index):
raise NotImplementedError
class BooleanType(ColumnType):
"""
Column type for :class:`BooleanColumn`.
:param true_values: A sequence of values which should be cast to
:code:`True` when encountered with this type.
:param false_values: A sequence of values which should be cast to
:code:`False` when encountered with this type.
"""
def __init__(self, true_values=DEFAULT_TRUE_VALUES, false_values=DEFAULT_FALSE_VALUES, null_values=DEFAULT_NULL_VALUES):
super(BooleanType, self).__init__(null_values=null_values)
self.true_values = true_values
self.false_values = false_values
def cast(self, d):
"""
Cast a single value to :class:`bool`.
:param d: A value to cast.
:returns: :class:`bool` or :code:`None`.
"""
if isinstance(d, bool) or d is None:
return d
if isinstance(d, six.string_types):
d = d.replace(',' ,'').strip()
d_lower = d.lower()
if d_lower in self.null_values:
return None
if d_lower in self.true_values:
return True
if d_lower in self.false_values:
return False
raise CastError('Can not convert value %s to bool for BooleanColumn.' % d)
def _create_column(self, table, index):
from agate.columns import BooleanColumn
return BooleanColumn(table, index)
class DateType(ColumnType):
"""
Column type for :class:`DateColumn`.
"""
def __init__(self, date_format=None, null_values=DEFAULT_NULL_VALUES):
super(DateType, self).__init__(null_values=null_values)
self.date_format = date_format
def cast(self, d):
"""
Cast a single value to a :class:`datetime.date`.
:param date_format: An optional :func:`datetime.strptime`
format string for parsing dates in this column.
:returns: :class:`datetime.date` or :code:`None`.
"""
if isinstance(d, datetime.date) or d is None:
return d
if isinstance(d, six.string_types):
d = d.strip()
if d.lower() in self.null_values:
return None
if self.date_format:
return datetime.datetime.strptime(d, self.date_format).date()
return parse(d).date()
def _create_column(self, table, index):
from agate.columns import DateColumn
return DateColumn(table, index)
class DateTimeType(ColumnType):
"""
Column type for :class:`DateTimeColumn`.
"""
def __init__(self, datetime_format=None, null_values=DEFAULT_NULL_VALUES):
super(DateTimeType, self).__init__(null_values=null_values)
self.datetime_format = datetime_format
def cast(self, d):
"""
Cast a single value to a :class:`datetime.datetime`.
:param date_format: An optional :func:`datetime.strptime`
format string for parsing datetimes in this column.
:returns: :class:`datetime.datetime` or :code:`None`.
"""
if isinstance(d, datetime.datetime) or d is None:
return d
if isinstance(d, six.string_types):
d = d.strip()
if d.lower() in self.null_values:
return None
if self.datetime_format:
return datetime.datetime.strptime(d, self.datetime_format)
return parse(d)
def _create_column(self, table, index):
from agate.columns import DateTimeColumn
return DateTimeColumn(table, index)
class TimeDeltaType(ColumnType):
"""
Column type for :class:`datetime.timedelta`.
"""
def cast(self, d):
"""
Cast a single value to :class:`datetime.timedelta`.
:param d: A value to cast.
:returns: :class:`datetime.timedelta` or :code:`None`
"""
if isinstance(d, datetime.timedelta) or d is None:
return d
if isinstance(d, six.string_types):
d = d.strip()
if d.lower() in self.null_values:
return None
seconds = pytimeparse.parse(d)
return datetime.timedelta(seconds=seconds)
def _create_column(self, table, index):
from agate.columns import TimeDeltaColumn
return TimeDeltaColumn(table, index)
class NumberType(ColumnType):
"""
Column type for :class:`NumberColumn`.
"""
def cast(self, d):
"""
Cast a single value to a :class:`decimal.Decimal`.
:returns: :class:`decimal.Decimal` or :code:`None`.
:raises: :exc:`.CastError`
"""
if isinstance(d, Decimal) or d is None:
return d
if isinstance(d, six.string_types):
d = d.replace(',' ,'').strip()
if d.lower() in self.null_values:
return None
if isinstance(d, float):
raise CastError('Can not convert float to Decimal for NumberColumn. Convert data to string first!')
try:
return Decimal(d)
except InvalidOperation:
raise CastError('Can not convert value "%s" to Decimal for NumberColumn.' % d)
def _create_column(self, table, index):
from agate.columns import NumberColumn
return NumberColumn(table, index)
class TextType(ColumnType):
"""
Column type for :class:`TextColumn`.
"""
def cast(self, d):
"""
Cast a single value to :func:`unicode` (:func:`str` in Python 3).
:param d: A value to cast.
:returns: :func:`unicode` (:func:`str` in Python 3) or :code:`None`
"""
if d is None:
return d
if isinstance(d, six.string_types):
d = d.strip()
if d.lower() in self.null_values:
return None
return six.text_type(d)
def _create_column(self, table, index):
from agate.columns import TextColumn
return TextColumn(table, index)
|
mit
| 4,877,048,363,742,383,000 | 28.337302 | 124 | 0.607331 | false |
polysimtools/pysimm
|
pysimm/apps/polymatic.py
|
3
|
14112
|
# polymatic module; calls Polymatic perl code and LAMMPS
import os
import sys
import shlex
import shutil
from time import strftime
from subprocess import Popen, PIPE
from pysimm import system, lmps
rappture = True
try:
import Rappture
except ImportError:
rappture = False
def pack(script, file_in, nrep, boxl, file_out):
"""pysimm.apps.polymatic.pack
Calls Polymatic random packing code
Args:
script: name of packing script
file_in: list of file names of reference molecules to pack
nrep: list of number of monomers for each reference molecule
boxl: length of one dimension of simulation box for random packing
file_out: name of output file (packed system)
Returns:
output from perl code
"""
if not isinstance(file_in, list):
file_in = [file_in]
if not isinstance(nrep, list):
nrep = [nrep]
if len(file_in) != len(nrep) or len(file_in) == 0:
return False
cmd = 'perl %s -i ' % script
cmd += '%s ' % str(len(file_in))
for i in range(len(file_in)):
cmd += '%s %s ' % (file_in[i], nrep[i])
cmd += '-l %s -o %s' % (boxl, file_out)
o, e = Popen(shlex.split(cmd),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE).communicate()
if not e and o:
return o
else:
return False
def polymatic(script, file_in, file_out):
"""pysimm.apps.polymatic.polymatic
Calls Polymatic code. polym.in and types.txt are assumed to exist.
Args:
script: name of Polymatic script
file_in: initial system file name
file_out: final system file name
Returns:
output from perl code
"""
cmd = ('perl %s -i %s -s polym.in -t types.txt -o %s'
% (script, file_in, file_out))
o, e = Popen(shlex.split(cmd),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE).communicate()
if not e and o and o.split()[0] is not 'Error:':
return True
elif not e and o:
return o
else:
return False
def run(settings):
"""pysimm.apps.polymatic.run
Runs Polymatic algorithm.
Args:
settings: object containing Polymatic settings
Returns:
(True/False, :class:`~pysimm.system.System`)
"""
if rappture:
Rappture.Utils.progress(0, 'Initializing Polymatic...')
bonds = 0
os.mkdir('logs')
polymatic(os.path.join(settings.polymatic_dir, 'polym_init.pl'),
'data.lmps',
'step_000.lmps')
s = system.read_lammps('step_000.lmps', quiet=True)
s.read_type_names('types.txt')
s.write_lammps('temp.lmps')
if rappture:
Rappture.Utils.progress(0, '%s/%s bonds made: '
'optimizing initial structure...'
% (bonds, settings.polym.max_bonds))
if not lmps_min(s, 'initial optimization', settings):
s.write_lammps('temp.lmps')
polymatic(os.path.join(settings.polymatic_dir, 'polym_final.pl'),
'temp.lmps',
'final.lmps')
return False, s
s.write_lammps('step_000.lmps')
s.write_lammps('temp.lmps')
while bonds < settings.polym.max_bonds:
attempt = 0
while not polymatic(os.path.join(settings.polymatic_dir, 'polym.pl'),
'temp.lmps',
'temp.lmps'):
attempt += 1
if rappture:
Rappture.Utils.progress(int(float(bonds)/settings.
polym.max_bonds *
100),
'%s/%s bonds made: attempt #%s to make '
'new bond'
% (bonds, settings.polym.max_bonds,
attempt))
s = system.read_lammps('temp.lmps', quiet=True)
s.read_type_names('types.txt')
if not lmps_step_md(s, bonds, attempt, settings):
s.write_lammps('temp.lmps')
polymatic(os.path.join(settings.polymatic_dir, 'polym_final.pl'),
'temp.lmps',
'final.lmps')
return False, s
s.write_lammps('temp.lmps')
if attempt >= settings.polym.max_md:
break
if attempt >= settings.polym.max_md:
break
bonds += 1
if rappture:
Rappture.Utils.progress(int(float(bonds)/settings.polym.max_bonds
* 100),
'%s/%s bonds made: '
'optimizing newly formed bond'
% (bonds, settings.polym.max_bonds))
s = system.read_lammps('temp.lmps', quiet=True)
s.read_type_names('types.txt')
print('%s: bond %s made successfully' % (strftime('%H:%M:%S'), bonds))
sys.stdout.flush()
if not lmps_min(s, 'bond %s optimization' % bonds, settings):
s.write_lammps('temp.lmps')
polymatic(os.path.join(settings.polymatic_dir, 'polym_final.pl'),
'temp.lmps',
'final.lmps')
return False, s
s.write_lammps('step_%03d.lmps' % bonds)
s.write_lammps('temp.lmps')
if (bonds % settings.polym.cycle == 0 and
(bonds / settings.polym.cycle) % settings.polym.npt_freq == 0):
if rappture:
Rappture.Utils.progress(int(float(bonds)/settings.
polym.max_bonds
* 100),
'%s/%s bonds made: '
'performing npt cycle md'
% (bonds, settings.polym.max_bonds))
if not lmps_cycle_npt_md(s, bonds, settings):
s.write_lammps('temp.lmps')
polymatic(os.path.join(settings.polymatic_dir, 'polym_final.pl'),
'temp.lmps',
'final.lmps')
return False, s
s.write_lammps('temp.lmps')
elif bonds % settings.polym.cycle == 0:
if rappture:
Rappture.Utils.progress(int(float(bonds)/settings.
polym.max_bonds
* 100),
'%s/%s bonds made: '
'performing nvt cycle md'
% (bonds, settings.polym.max_bonds))
if not lmps_cycle_nvt_md(s, bonds, settings):
s.write_lammps('temp.lmps')
polymatic(os.path.join(settings.polymatic_dir, 'polym_final.pl'),
'temp.lmps',
'final.lmps')
return False, s
s.write_lammps('temp.lmps')
if rappture:
Rappture.Utils.progress(99, 'Finalizing Polymatic')
polymatic(os.path.join(settings.polymatic_dir, 'polym_final.pl'),
'temp.lmps',
'final.lmps')
return True, s
def lmps_min(s, name, settings):
"""pysimm.apps.polymatic.lmps_min
Runs LAMMPS minimization for the Polymatic algorithm.
Args:
s: :class:`~pysimm.system.System` to minimize
name: name of simulation
settings: object containing Polymatic settings
Returns:
result from :func:`~pysimm.lmps.minimize`
"""
if settings.polym.min.cluster:
nanohub = {'cores': int(settings.polym.min.nanohub_cores),
'walltime': int(settings.polym.min.nanohub_walltime)}
log_name = '%s' % '_'.join(name.split())
else:
nanohub = {}
log_name = 'logs/%s' % '_'.join(name.split())
if settings.polym.min.user_input:
sim = lmps.Simulation(s, name='initial optimization',
print_to_screen=False, nanohub=nanohub, custom=True)
sim.add(settings.polym.min.min_in)
sim.run(np=settings.np, nanohub=nanohub)
else:
sim = lmps.Simulation(s, name='initial optimization',
print_to_screen=False, nanohub=nanohub,
log=log_name
)
sim.add(lmps.Init(cutoff=settings.polym.min.nb_cutoff, forcefield=settings.forcefield))
sim.add_min(
min_style='sd',
etol=settings.polym.min.sd_etol,
ftol=settings.polym.min.sd_ftol,
maxiter=settings.polym.min.sd_maxiter,
maxeval=settings.polym.min.sd_maxeval,
)
sim.add_min(
min_style='cg',
etol=settings.polym.min.cg_etol,
ftol=settings.polym.min.cg_ftol,
maxiter=settings.polym.min.cg_maxiter,
maxeval=settings.polym.min.cg_maxeval,
)
sim.run(np=settings.np, nanohub=nanohub)
if settings.polym.min.cluster:
shutil.move(log_name, 'logs')
return True
def lmps_step_md(s, bonds, attempt, settings):
"""pysimm.apps.polymatic.lmps_step_md
Runs LAMMPS step md for the Polymatic algorithm.
Args:
s: :class:`~pysimm.system.System` to minimize
bonds: number of bond to be made
attempt: number of bonding attempt
settings: object containing Polymatic settings
Returns:
result from :func:`~pysimm.lmps.md`
"""
if settings.polym.step.cluster:
nanohub = {'cores': int(settings.polym.step.nanohub_cores),
'walltime': int(settings.polym.step.nanohub_walltime)}
log_name = 'step_%03d_%03d' % (bonds, attempt)
else:
nanohub = {}
log_name = 'logs/step_%03d_%03d' % (bonds, attempt)
if settings.polym.step.user_input:
sim = lmps.Simulation(s, name='bond %s attempt #%d' % (bonds + 1, attempt),
print_to_screen=False, nanohub=nanohub, custom=True)
sim.add(settings.polym.step.step_in)
sim.run(np=settings.np, nanohub=nanohub)
else:
sim = lmps.Simulation(s, name='bond %s: attempt #%d' % (bonds + 1, attempt),
print_to_screen=False, nanohub=nanohub,
log=log_name
)
sim.add(lmps.Init(cutoff=settings.polym.step.nb_cutoff, forcefield=settings.forcefield))
sim.add(lmps.Velocity(temperature=settings.polym.step.temp))
sim.add_md(
ensemble='nvt', temperature=settings.polym.step.temp,
run=settings.polym.step.length,
)
sim.run(np=settings.np, nanohub=nanohub)
if settings.polym.step.cluster:
shutil.move(log_name, 'logs')
return True
def lmps_cycle_nvt_md(s, bonds, settings):
"""pysimm.apps.polymatic.lmps_cycle_nvt_md
Runs LAMMPS nvt cycle md for the Polymatic algorithm.
Args:
s: :class:`~pysimm.system.System` to minimize
bonds: number of bond to be made
settings: object containing Polymatic settings
Returns:
result from :func:`~pysimm.lmps.md`
"""
if settings.polym.cycle_nvt.cluster:
nanohub = {'cores': int(settings.polym.cycle_nvt.nanohub_cores),
'walltime': int(settings.polym.cycle_nvt.nanohub_walltime)}
log_name = 'cycle_nvt_%03d' % bonds
else:
nanohub = {}
log_name = 'logs/cycle_nvt_%03d' % bonds
if settings.polym.cycle_nvt.user_input:
sim = lmps.Simulation(s, name='bond %d cycle nvt' % bonds,
print_to_screen=False, nanohub=nanohub, custom=True)
sim.add(settings.polym.cycle_nvt.step_in)
sim.run(np=settings.np, nanohub=nanohub)
else:
sim = lmps.Simulation(s, name='bond %d cycle nvt' % bonds,
print_to_screen=False, nanohub=nanohub,
log=log_name
)
sim.add(lmps.Init(cutoff=settings.polym.cycle_nvt.nb_cutoff, forcefield=settings.forcefield))
sim.add(lmps.Velocity(temperature=settings.polym.cycle_nvt.temp))
sim.add_md(
ensemble='nvt', temperature=settings.polym.cycle_nvt.temp,
run=settings.polym.cycle_nvt.length,
)
sim.run(np=settings.np, nanohub=nanohub)
if settings.polym.cycle_nvt.cluster:
shutil.move(log_name, 'logs')
return True
def lmps_cycle_npt_md(s, bonds, settings):
"""pysimm.apps.polymatic.lmps_cycle_npt_md
Runs LAMMPS npt cycle md for the Polymatic algorithm.
Args:
s: :class:`~pysimm.system.System` to minimize
bonds: number of bond to be made
settings: object containing Polymatic settings
Returns:
result from lmps.md
"""
if settings.polym.cycle_npt.cluster:
nanohub = {'cores': int(settings.polym.cycle_npt.nanohub_cores),
'walltime': int(settings.polym.cycle_npt.nanohub_walltime)}
log_name = 'cycle_npt_%03d' % bonds
else:
nanohub = {}
log_name = 'logs/cycle_npt_%03d' % bonds
if settings.polym.cycle_npt.user_input:
sim = lmps.Simulation(s, name='bond %d cycle npt' % bonds,
print_to_screen=False, nanohub=nanohub, custom=True)
sim.add(settings.polym.cycle_npt.step_in)
sim.run(np=settings.np, nanohub=nanohub)
else:
sim = lmps.Simulation(s, name='bond %d cycle npt' % bonds,
print_to_screen=False, nanohub=nanohub,
log=log_name
)
sim.add(lmps.Init(cutoff=settings.polym.cycle_npt.nb_cutoff, forcefield=settings.forcefield))
sim.add(lmps.Velocity(temperature=settings.polym.cycle_npt.temp))
sim.add_md(
ensemble='npt', temperature=settings.polym.cycle_npt.nb_cutoff,
run=settings.polym.cycle_npt.length,
pressure=settings.polym.cycle_npt.pressure,
)
sim.run(np=settings.np, nanohub=nanohub)
if settings.polym.cycle_npt.cluster:
shutil.move(log_name, 'logs')
return True
|
mit
| -2,959,364,420,437,632,000 | 33.335766 | 101 | 0.551091 | false |
vesloguzov/xls-lab
|
lab_1_main.py
|
1
|
1063
|
# -*- coding: UTF-8 -*-
import sys
import random
import datetime
import time
import json
from openpyxl import Workbook, load_workbook
from openpyxl.chart import ScatterChart, Series, Reference
from openpyxl.chart.reader import reader
from openpyxl.chart.layout import Layout, ManualLayout
from openpyxl.styles import *
from lab_1_create_template import lab_1_create_template
from lab_1_check_answer import lab_1_check_answer
reload(sys)
sys.setdefaultencoding('utf8')
# Создание шаблона
template_wb = Workbook()
template_ws = template_wb.active
employees = ["Иванов И.М.", "Коробова П.Н", "Морозов И.Р.", "Петров Г.Т.", "Ромашова П.Т.", "Смирнов С.И.", "Соколова О.С."]
template_ws = lab_1_create_template(template_ws)
template_wb.save('lab1_template.xlsx')
# Проверка
student_wb = load_workbook('lab1_student.xlsx')
student_wb_data_only = load_workbook('lab1_student.xlsx', data_only=True)
result = lab_1_check_answer(student_wb, student_wb_data_only)
print result
|
mit
| 4,004,395,919,724,810,000 | 27.705882 | 124 | 0.755123 | false |
littlezz/IslandCollection
|
core/analyzer.py
|
1
|
1913
|
from urllib import parse
from .islands import island_netloc_table, island_class_table, IslandNotDetectError
__author__ = 'zz'
def determine_island_name(url):
netloc = parse.urlparse(url).netloc
for url, name in island_netloc_table.items():
if url == netloc:
return name
else:
raise IslandNotDetectError('netloc is {}'.format(netloc))
def init_start_url(url):
island_name = determine_island_name(url)
island_class = island_class_table[island_name]
return island_class.init_start_url(url)
def validate_url(url):
"""
:param url:
:return:status code
status code ---> info
---------------------------
0 success
1 no scheme
2 island not support
"""
p = parse.urlparse(url)
if not p.scheme:
return 1
try:
determine_island_name(url)
except IslandNotDetectError:
return 2
else:
return 0
class Analyzer:
def __init__(self, res, max_page):
self.url = res.url
self.res = res
self.max_page = max_page
self.island_name = determine_island_name(self.url)
self._island = self._create_island_obj()
self.divs = self.split_page()
def _create_island_obj(self):
island_class = island_class_table[self.island_name]
return island_class(self.url, self.res)
def split_page(self):
return self._island.island_split_page()
def filter_divs(self, response_gt, *args):
return [div for div in self.divs if div.response_num > response_gt]
def next_page(self, current_page_url=None):
return self._island.next_page(self.max_page, current_page_url)
def get_thread_info(url, res):
island_class = island_class_table[determine_island_name(url)]
info_result = island_class.get_thread_info(url, res)
return info_result
|
mit
| 4,216,913,613,921,257,000 | 25.205479 | 82 | 0.609514 | false |
gilestrolab/ethoscope
|
prototypes/mysql/get_video_from_mysql.py
|
1
|
2729
|
import sqlite3
import sys
import base64
import io
import shutil
import cv2
import tempfile
import shutil
import os
import glob
from optparse import OptionParser
import datetime
import glob
from multiprocessing import Pool
def annotate_image(args):
input, time, t0 = args
label = datetime.datetime.fromtimestamp(time/1000 + t0).strftime('%Y-%m-%d %H:%M:%S')
out = input+"_tmp.jpg"
print(label)
command = "convert %s -pointsize 50 -font Courier -background Khaki label:'%s' +swap -gravity Center -append %s" % (input, label, out)
os.system(command)
shutil.move(out,input)
if __name__ == '__main__':
ETHOGRAM_DIR = "/ethoscope_data/results"
MACHINE_ID_FILE = '/etc/machine-id'
MACHINE_NAME_FILE = '/etc/machine-name'
parser = OptionParser()
parser.add_option("-i", "--input", dest="input", help="The input .db file")
parser.add_option("-o", "--output", dest="output", help="The output mp4")
parser.add_option("-f", "--fps", dest="fps", default=1, help="The output fps")
parser.add_option("-a", "--annotate", dest="annot", default=False, help="Whether date and time should be written on the bottom of the frames", action="store_true")
(options, args) = parser.parse_args()
option_dict = vars(options)
file = option_dict["input"]
dir = tempfile.mkdtemp(prefix="etho_video")
try:
with sqlite3.connect(file, check_same_thread=False) as conn:
cursor = conn.cursor()
sql_metadata = 'select * from METADATA'
conn.commit()
cursor.execute(sql_metadata)
t0 = 0
for field, value in cursor:
if field == "date_time":
t0 = float(value)
sql1 = 'select id,t,img from IMG_SNAPSHOTS'
conn.commit()
cursor.execute(sql1)
for i,c in enumerate(cursor):
id, t, blob = c
file_name = os.path.join(dir,"%05d_%i.jpg" % (id, t))
file_like = io.StringIO(blob)
out_file = open(file_name, "wb")
file_like.seek(0)
shutil.copyfileobj(file_like, out_file)
pool = Pool(4)
pool_args = []
for f in glob.glob(os.path.join(dir , "*.jpg")):
t = int(os.path.basename(f).split("_")[1].split(".")[0])
pool_args.append((f,t,t0))
pool.map(annotate_image,pool_args)
# if option_dict["annot"]:
command = "ffmpeg -y -framerate %i -pattern_type glob -i '%s/*.jpg' -c:v libx264 %s" % (option_dict["fps"], dir, option_dict["output"])
os.system(command)
finally:
shutil.rmtree(dir)
print(dir)
|
gpl-3.0
| -4,299,405,739,438,316,500 | 30.011364 | 167 | 0.576402 | false |
zzir/white
|
get_content.py
|
1
|
8536
|
import time
import sqlite3
import re
from config import CONFIG
class TContents(object):
"""blog_contents数据库查询"""
def __init__(self):
self.con = sqlite3.connect(CONFIG['DB_FILE'])
self.cur = self.con.cursor()
# self.con.commit()
def check_title(self, title):
try:
res = self.cur.execute(
"select * from blog_contents where title=?", (title,)
)
contents = {}
for i in res:
contents['id'] = i[0]
return contents['id']
except:
return 'ok'
def check_title_id(self, title, pid):
try:
res = self.cur.execute(
"select * from blog_contents where title=? and id!=?",
(title, pid)
)
contents = {}
for i in res:
contents['id'] = i[0]
return contents['id']
except:
return 'ok'
def check_slug(self, slug):
try:
res = self.cur.execute(
"select * from blog_contents where slug=?", (slug,)
)
contents = {}
for i in res:
contents['id'] = i[0]
return contents['id']
except:
return 'ok'
def check_slug_id(self, slug, pid):
try:
res = self.cur.execute(
"select * from blog_contents where slug=? and id!=?",
(slug, pid)
)
contents = {}
for i in res:
contents['id'] = i[0]
return contents['id']
except:
return 'ok'
def get_post(self, limit=CONFIG['POST_PAGE'], offset=0):
"""允许显示的最近的?篇"""
try:
res = self.cur.execute(
"select id,title,slug,created,tags,column,short \
from blog_contents where status = 1 and allow_top = 0 \
order by id desc limit ? offset ?", (limit, offset)
)
post = []
for i in res:
post.append(i)
return post
except:
return []
def get_post_new(self, num=CONFIG['POST_NEW']):
"""最新修改的10篇"""
try:
res = self.cur.execute(
"select id,title,slug from blog_contents \
order by modified desc limit ? offset ?", (int(num), 0)
)
post = []
for i in res:
post.append(i)
return post
except:
return []
def get_post_top(self):
"""置顶的文章"""
try:
res = self.cur.execute(
"select id,title,slug from blog_contents \
where allow_top = 1 order by modified desc"
)
post = []
for i in res:
post.append(i)
return post
except:
return []
def get_tags(self, ban=CONFIG['BAN_TAGS'], style=0):
"""统计tags词频,去掉ban列表被禁止显示的"""
try:
res = self.cur.execute("select tags from blog_contents")
tag = []
for i in res:
for j in i[0].replace(',', ',').split(','):
tag.append(j)
tags = dict.fromkeys(tag, 0)
for k in tag:
tags[k] += 1
# 删除tags中被ban的项
for m in ban:
if m in tags:
del tags[m]
return sorted(tags.items(), key=lambda x: x[1],reverse=True)
except:
return []
def get_tag_search(self, search_tag):
if search_tag in ['', ')']:
return []
try:
check_tag = self.cur.execute("select tags from blog_contents")
flag = 0
for i in check_tag:
if flag:
break
for j in i[0].replace(',', ',').split(','):
if search_tag.upper() in j.upper():
flag = 1
break
if flag:
res = self.cur.execute(
"select id,title,created,slug from blog_contents \
where tags like ? order by created desc",
('%' + search_tag + '%', )
)
search_res = []
for i in res:
search_res.append(i)
return search_res
except:
return []
def get_column(self, ban=CONFIG['BAN_COLUMN']):
"""统计column词频,去掉ban列表被禁止显示的"""
res = self.cur.execute(
"select column from blog_contents order by id desc"
)
try:
column = []
for i in res:
for j in i[0].replace(',', ',').split(','):
column.append(j)
re_column = dict.fromkeys(column, 0)
for k in column:
re_column[k] += 1
# 删除tags中被ban的项
for m in ban:
if m in re_column:
del re_column[m]
del re_column['']
column = []
for i in sorted(re_column.items(),key=lambda x: x[1],reverse=True):
column.append(i[0])
return column
except:
return []
def get_columns(self):
res = self.cur.execute(
"select id,title,slug,column,modified from blog_contents \
where column != '' order by id desc"
)
try:
columns = {}
for cos in res:
if cos[3] in columns:
columns[cos[3]].append(cos)
else:
columns.update({cos[3]: [cos, ]})
return columns
except:
return {}
def get_archive(self):
"""统计时间戳返回格式:2017年03月 0篇 """
res = self.cur.execute(
"select created from blog_contents order by created desc"
)
try:
archive = []
for a in res:
time_array = time.localtime(a[0])
time_format = time.strftime("%Y-%m", time_array)
archive.append(time_format)
re_archive = dict.fromkeys(archive, 0)
for b in archive:
re_archive[b] += 1
return re_archive
except:
return {}
def get_archives(self):
"""查询归档"""
res = self.cur.execute("select id,title,created,slug \
from blog_contents order by created desc")
try:
archives = {}
for i in res:
i = list(i)
time_array = time.localtime(i[2])
time_format = time.strftime("%Y-%m-%d", time_array)
i[2] = time_format
# archives['2017']['04'] = [i,]
# '2017' in archives and '04' in archives['2017']
# 如果存在则向archives['2017'][04]的列表追加
# 否则update添加
if i[2][:4] in archives:
if i[2][5:7] in archives[i[2][:4]]:
archives[i[2][:4]][i[2][5:7]].append(i)
else:
archives[i[2][:4]].update({i[2][5:7]: [i, ]})
else:
archives.update({i[2][:4]: {i[2][5:7]: [i, ]}})
return archives
except:
return {}
def get_post_one(self, slug):
"""根据id或者slug返回内容"""
if slug[-5:] == '.html':
slug = slug[:-5]
style = 1
elif re.match(r"^\d+$", slug):
slug = int(slug)
style = 0
else:
return []
try:
post = []
if style == 1:
res = self.cur.execute(
"select title,slug,created,tags,column,text,allow_comment,original \
from blog_contents where slug= ?", (slug,))
else:
res = self.cur.execute(
"select title,slug,created,tags,column,text,allow_comment,original \
from blog_contents where id = ?", (slug,))
for i in res:
post.append(i)
return post[0]
except:
return []
|
mit
| 4,673,153,396,829,510,000 | 27.875 | 88 | 0.427008 | false |
pombredanne/lineup
|
lineup/__init__.py
|
1
|
5049
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <lineup - python distributed pipeline framework>
# Copyright (C) <2013> Gabriel Falcão <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import sys
import time
import json
import logging
import traceback
from pprint import pformat
from redis import StrictRedis
from threading import RLock, Thread
from lineup.datastructures import Queue
from lineup.backends.redis import JSONRedisBackend
class KeyMaker(object):
def __init__(self, step):
self.step = step
for name in ['logging', 'alive', 'error']:
setattr(self, name, self.make_key(name))
def make_key(self, suffix, prefix=None):
prefix = prefix or getattr(self, 'prefix', 'lineup')
return ":".join([prefix, self.step.name, suffix])
class Step(Thread):
def __init__(self, consume_queue, produce_queue, parent, backend=JSONRedisBackend):
super(Step, self).__init__()
self.key = KeyMaker(self)
self.consume_queue = consume_queue
self.produce_queue = produce_queue
self.logger = logging.getLogger(self.key.logging)
self.daemon = False
self.parent = parent
self.backend = backend()
self.name = getattr(self.__class__, 'label', self.taxonomy)
consume_queue.adopt_consumer(self)
produce_queue.adopt_producer(self)
def get_name(self):
return getattr(self, 'name', None) or self.taxonomy
def __str__(self):
return '<{0}>'.format(self.ancestry)
@property
def taxonomy(self):
class_name = self.__class__.__name__
module_name = self.__class__.__module__
return '.'.join([module_name, class_name])
@property
def ancestry(self):
return '|'.join([self.get_name(), self.parent.get_name()])
@property
def id(self):
return '|'.join([self.parent.id, str(self.ident), self.ancestry])
@property
def alive(self):
return self.backend.get(self.key.alive)
def log(self, message, *args, **kw):
self.logger.info(message, *args, **kw)
return self.backend.rpush(self.key.logging, {
'message': message % args % kw,
'when': time.time()
})
def fail(self, exception):
error = traceback.format_exc(exception)
message = 'The worker %s failed while being processed at the pipeline "%s"'
args = (self.name, self.parent.name)
self.logger.exception(message, *args)
self.parent.enqueue_error(source_class=self.__class__, instructions=instructions, exception=exception)
return self.backend.rpush(self.key.logging, {
'message': message % args,
'when': time.time()
})
def consume(self, instructions):
raise NotImplemented("You must implement the consume method by yourself")
def produce(self, payload):
return self.produce_queue.put(payload)
def before_consume(self):
self.log("%s is about to consume its queue", self)
def after_consume(self, instructions):
self.log("%s is done", self)
def do_rollback(self, instructions):
try:
self.rollback(instructions)
except Exception as e:
self.fail(e, instructions)
def run(self):
self.backend.set(self.key.alive, True)
while self.alive:
self.before_consume()
instructions = self.consume_queue.get()
if not instructions:
sys.exit(1)
try:
self.consume(instructions)
except Exception as e:
error = traceback.format_exc(e)
self.log(error)
instructions.update({
'success': False,
'error': error
})
self.produce(instructions)
self.do_rollback(instructions)
continue
self.after_consume(instructions)
|
mit
| 8,066,151,196,050,668,000 | 33.108108 | 110 | 0.636688 | false |
ezoncoin/p2pool-ezon
|
p2pool/networks.py
|
1
|
1922
|
from p2pool.ezoncoin import networks
from p2pool.util import math
# CHAIN_LENGTH = number of shares back client keeps
# REAL_CHAIN_LENGTH = maximum number of shares back client uses to compute payout
# REAL_CHAIN_LENGTH must always be <= CHAIN_LENGTH
# REAL_CHAIN_LENGTH must be changed in sync with all other clients
# changes can be done by changing one, then the other
nets = dict(
ezoncoin=math.Object(
PARENT=networks.nets['ezoncoin'],
SHARE_PERIOD=20, # seconds
CHAIN_LENGTH=24*60*60//20, # shares
REAL_CHAIN_LENGTH=24*60*60//20, # shares
TARGET_LOOKBEHIND=100, # shares //with that the pools share diff is adjusting faster, important if huge hashing power comes to the pool
SPREAD=10, # blocks
IDENTIFIER='aa185015e0a384f5'.decode('hex'),
PREFIX='85fd8cff82f170de'.decode('hex'),
P2P_PORT=8888,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=False,
WORKER_PORT=3333,
BOOTSTRAP_ADDRS=''.split(' '),
ANNOUNCE_CHANNEL='#p2pool-ezon',
VERSION_CHECK=lambda v: v >= 1000000,
),
ezoncoin_testnet=math.Object(
PARENT=networks.nets['ezoncoin_testnet'],
SHARE_PERIOD=20, # seconds
CHAIN_LENGTH=24*60*60//20, # shares
REAL_CHAIN_LENGTH=24*60*60//20, # shares
TARGET_LOOKBEHIND=100, # shares //with that the pools share diff is adjusting faster, important if huge hashing power comes to the pool
SPREAD=10, # blocks
IDENTIFIER='fa417f64e92d1a3c'.decode('hex'),
PREFIX='e6fc75a2eca9f373'.decode('hex'),
P2P_PORT=28888,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=False,
WORKER_PORT=17903,
BOOTSTRAP_ADDRS=''.split(' '),
ANNOUNCE_CHANNEL='',
VERSION_CHECK=lambda v: True,
),
)
for net_name, net in nets.iteritems():
net.NAME = net_name
|
gpl-3.0
| -1,384,594,421,757,907,000 | 38.22449 | 144 | 0.638398 | false |
balint256/cyberspectrum
|
spur_search.py
|
1
|
6742
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# spur_search.py
#
# Copyright 2014 Balint Seeber <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import math
import numpy
import interface
def get_spurs(bins, freq_min, freq_max, snr=6.0, percent_noise_bins=80.0):
"""
Get a list of bins sticking out of the noise floor
NOTE: This routine assumes flat noise floor with most bins as noise
@param snr the number of db a bin needs to stick out of the noise floor
@param percent_noise_bins is the minimum percentage of fft bins expected to be noise
"""
h = numpy.histogram(bins, numpy.arange(min(bins), max(bins), float(snr)/2.0))
#print len(h[0]), h[0]
#print len(h[1]), h[1]
percent = 0.0
for i in range(len(h[0])):
percent += 100.0 * float(h[0][i])/float(len(h[0]))
if percent > percent_noise_bins: break
threshold = h[1][min(len(h[1])-1,i+2)]
def _bin_to_freq(idx):
freq_range = float(freq_max - freq_min)
return idx * freq_range / (len(bins) - 1) + freq_min
spurs = list()
for i in range(len(bins)):
if bins[i] > threshold: spurs.append((_bin_to_freq(i), bins[i]))
return spurs
class SpurSearch(interface.Module):
def __init__(self, config, options, *args, **kwds):
interface.Module.__init__(self, config, options, *args, **kwds)
self.spur_log_file = None
self.noise_log_file = None
self.total_spur_count = 0
def __del__(self):
if self.spur_log_file: self.spur_log_file.close()
if self.noise_log_file: self.noise_log_file.close()
def populate_options(self, parser):
parser.add_option("--spur-log", type="string", default=None, help="Spur log file [default=%default]")
parser.add_option("--ignore-lo", action="store_true", help="Ignore LO spur", default=False)
parser.add_option("--lo-tolerance", type="float", default=7.5e3, help="Ignore LO spur +/- from DC (Hz) [default: %default]")
parser.add_option("--spur-snr", type="float", default=1.0, help="Spur threshold above noise floor (dB) [default: %default]")
parser.add_option("--only-save-spurs", action="store_true", default=False, help="Only save image when spurs are detected [default: %default]")
parser.add_option("--noise-log", type="string", default=None, help="Noise floor log file [default=%default]")
def init(self, usrp, info, states, state_machines, fft_graph, scope_graph):
interface.Module.init(self, usrp, info, states, state_machines, fft_graph, scope_graph)
if not self.spur_log_file and self.options.spur_log is not None and len(self.options.spur_log) > 0:
self.spur_log_file = open(self.options.spur_log, "w")
if not self.noise_log_file and self.options.noise_log is not None and len(self.options.noise_log) > 0:
self.noise_log_file = open(self.options.noise_log, "w")
def start(self, count, current_hw_states):
interface.Module.start(self, count, current_hw_states)
self.total_spur_count = 0
def query_stop(self, channel_idx, state_machine, hw_state):
return (state_machine.loops > 0)
def query_fft(self, sample_idx, hw_state):
return True
def process(self, sample_idx, hw_state, s, fft_data, partial_name, fft_channel_graph, scope_channel_graph):
spurs_detected = []
lo_spurs = []
noise = None
freq_min = hw_state.freq - self.config.rate/2
freq_max = hw_state.freq + self.config.rate/2
fft_avg = fft_data['ave']
hz_per_bin = math.ceil(self.config.rate / len(fft_avg))
lo_bins = int(math.ceil(self.options.lo_tolerance / hz_per_bin))
#print "Skipping %i LO bins" % (lo_bins)
lhs = fft_avg[0:((len(fft_avg) + 1)/2) - ((lo_bins-1)/2)]
rhs = fft_avg[len(fft_avg)/2 + ((lo_bins-1)/2):]
#print len(fft_avg), len(lhs), len(rhs)
fft_minus_lo = numpy.concatenate((lhs, rhs))
#noise = numpy.average(numpy.array(fft_minus_lo))
noise = 10.0 * math.log10(numpy.average(10.0 ** (fft_minus_lo / 10.0))) # dB
print ("\t[%i] Noise (skipped %i LO FFT bins)" % (sample_idx, lo_bins)), noise, "dB"
lo_freq = hw_state.freq + hw_state.lo_offset
fig_name = "fft-%s.png" % (partial_name) # Same as scanner.py
if self.noise_log_file:
self.noise_log_file.write("%d,%d,%f,%f,%f,%s,%f,%s\n" % (
self.last_count,
sample_idx,
hw_state.freq,
lo_freq,
hw_state.gain,
hw_state.get_antenna(),
noise,
fig_name,
))
spurs = get_spurs(fft_avg, freq_min, freq_max) # snr=6.0, percent_noise_bins=80.0
spur_threshold = noise + self.options.spur_snr
for spur_freq, spur_level in spurs:
if spur_level > spur_threshold:
if self.options.ignore_lo and abs(lo_freq - spur_freq) < self.options.lo_tolerance:
#print "\t[%i]\tLO @ %f MHz (%03f dBm) for LO %f MHz (offset %f Hz)" % (channel, spur_freq, spur_level, lo_freq, (spur_freq-lo_freq))
lo_spurs += [(spur_freq, spur_level)]
else:
spurs_detected += [(spur_freq, spur_level)]
#d = {
# 'id': id,
# 'spur_level': spur_level,
# 'spur_freq': spur_freq,
# 'lo_freq': lo_freq,
# 'channel': channel,
# 'noise_floor': noise,
#}
#print '\t\tSpur:', d
print "\t[%i]\tSpur @ %f Hz (%03f dBFS) for LO %f MHz (offset %f Hz)" % (
sample_idx,
spur_freq,
spur_level,
lo_freq,
(spur_freq-lo_freq)
)
if self.spur_log_file:
self.spur_log_file.write("%d,%d,%f,%f,%f,%s\n" % (
self.last_count,
sample_idx,
spur_freq,
spur_level,
lo_freq,
fig_name,
))
self.total_spur_count += 1
if fft_channel_graph is not None:
fft_channel_graph.add_points(spurs_detected)
fft_channel_graph.add_horz_line(noise, 'gray', '--', id='noise')
fft_channel_graph.add_horz_line(spur_threshold, 'gray', '-', id='spur_threshold')
fft_channel_graph.add_points(lo_spurs, 'go')
def query_save(self, which):
if which == 'fft_graph':
if self.options.only_save_spurs:
return (self.total_spur_count > 0)
return None
def shutdown(self):
return
def get_modules():
return [{'class':SpurSearch, 'name':"Spur Search"}]
def main():
return 0
if __name__ == '__main__':
main()
|
gpl-2.0
| -3,284,535,333,556,331,000 | 34.114583 | 144 | 0.6504 | false |
AlexPereverzyev/spidy
|
tests/parser/regex_expressions_test.py
|
1
|
2650
|
from spidy.common import *
from expressions_test_base import ExpressionsTestBase
class RegexExpressionsTest(ExpressionsTestBase):
def test_regex1(self):
''' input string, no capturing groups - returns whole match '''
self.assertEqual(self.evaluate('"hello, John!" % "hello, [a-zA-Z]+!"'), 'hello, John!')
def test_regex2(self):
''' input string, one capturing group - returns capture '''
self.assertEqual(self.evaluate('"hello, John!" % "hello, ([a-zA-Z]+)!"'), 'John')
def test_regex3(self):
''' input string, two capturing groups - returns list of captures '''
self.assertEqual(self.evaluate('"hello, John!" % "(hello), ([a-zA-Z]+)!"'), ['hello', 'John'])
def test_regex4(self):
''' input string, one capturing group, regex doesnt match - returns empty string '''
self.assertEqual(self.evaluate('"hello, John!" % "hello, ([0-9]+)!"'), '')
def test_regex5(self):
''' input not string, one capturing group - failed, evaluation exception is raised '''
self.assertRaises(EvaluationException, self.evaluate, '321321 % "hello, ([a-zA-Z]+)!"')
def test_regex6(self):
''' input list of strings, no capturing groups - returns empty '''
self.assertEqual(self.evaluate('["hello, John!", "hello, Kate!"] % "hello, [a-zA-Z]+!"'), ['hello, John!', 'hello, Kate!'])
def test_regex7(self):
''' input list of strings, one capturing group - returns list of captures '''
self.assertEqual(self.evaluate('["hello, John!", "hello, Kate!"] % "hello, ([a-zA-Z]+)!"'), ['John', 'Kate'])
def test_regex8(self):
''' input list of strings, two capturing groups - returns list of lists of captures '''
self.assertEqual(self.evaluate('["hello, John!", "hello, Kate!"] % "(hello), ([a-zA-Z]+)!"'), [['hello', 'John'], ['hello', 'Kate']])
def test_regex9(self):
''' input list of strings, one capturing group, regex doesnt match - returns list of empty strings '''
self.assertEqual(self.evaluate('["hello, John!", "hello, Kate!"] % "hello, ([0-9]+)!"'), ['', ''])
def test_regex10(self):
''' input not string, one capturing group - failed, evaluation exception is raised '''
self.assertRaises(EvaluationException, self.evaluate, '["hello, John!", 3232] % "hello, ([a-zA-Z]+)!"')
def test_regex11(self):
''' input string, one capturing group, doesnt match - returns empty '''
self.assertEqual(self.evaluate('"hello, John!" % "([0-9]+)"'), '')
|
bsd-3-clause
| 8,767,682,070,943,139,000 | 52.02 | 141 | 0.584906 | false |
dzmuh97/OpenOrioks
|
vk_api/longpoll.py
|
1
|
13087
|
# -*- coding: utf-8 -*-
"""
@author: Kirill Python
@contact: https://vk.com/python273
@license Apache License, Version 2.0
Copyright (C) 2017
"""
from logging import debug
from .smileConvert import smilesTable
from .vk_api import ReSession
from enum import Enum
import time
DEFAULT_MODE = 2 + 8 + 32 + 64 + 128
CHAT_START_ID = int(2E9) # id с которого начинаются беседы
GROUP_START_ID = int(1E9)
class VkEventType(Enum):
MESSAGE_DELETE = 0
MESSAGE_FLAGS_REPLACE = 1
MESSAGE_FLAGS_SET = 2
MESSAGE_FLAGS_RESET = 3
MESSAGE_NEW = 4
READ_ALL_INCOMING_MESSAGES = 6
READ_ALL_OUTGOING_MESSAGES = 7
USER_ONLINE = 8
USER_OFFLINE = 9
PEER_FLAGS_RESET = 10
PEER_FLAGS_REPLACE = 11
PEER_FLAGS_SET = 12
CHAT_NEW = 51
USER_TYPING = 61
USER_TYPING_IN_CHAT = 62
USER_CALL = 70
MESSAGES_COUNTER_UPDATE = 80
NOTIFICATION_SETTINGS_UPDATE = 114
class VkPlatform(Enum):
MOBILE = 1
IPHONE = 2
IPAD = 3
ANDROID = 4
WPHONE = 5
WINDOWS = 6
WEB = 7
class VkMessageType(Enum):
FROM_ME = 'from_me'
TO_ME = 'to_me'
class VkOfflineType(Enum):
EXIT = 0
AWAY = 1
MESSAGE_EXTRA_FIELDS = [
'peer_id', 'timestamp', 'subject', 'text', 'attachments', 'random_id', 'emoji'
]
EVENT_ATTRS_MAPPING = {
VkEventType.MESSAGE_DELETE: ['message_id'],
VkEventType.MESSAGE_FLAGS_REPLACE: ['message_id', 'flags'] + MESSAGE_EXTRA_FIELDS,
VkEventType.MESSAGE_FLAGS_SET: ['message_id', 'mask'] + MESSAGE_EXTRA_FIELDS,
VkEventType.MESSAGE_FLAGS_RESET: ['message_id', 'mask'] + MESSAGE_EXTRA_FIELDS,
VkEventType.MESSAGE_NEW: ['message_id', 'flags'] + MESSAGE_EXTRA_FIELDS,
VkEventType.READ_ALL_INCOMING_MESSAGES: ['peer_id', 'local_id'],
VkEventType.READ_ALL_OUTGOING_MESSAGES: ['peer_id', 'local_id'],
VkEventType.USER_ONLINE: ['user_id', 'extra'],
VkEventType.USER_OFFLINE: ['user_id', 'extra'],
VkEventType.PEER_FLAGS_RESET: ['peer_id', 'mask'],
VkEventType.PEER_FLAGS_REPLACE: ['peer_id', 'flags'],
VkEventType.PEER_FLAGS_SET: ['peer_id', 'mask'],
VkEventType.CHAT_NEW: ['chat_id', 'self'],
VkEventType.USER_TYPING: ['peer_id', 'flags'],
VkEventType.USER_TYPING_IN_CHAT: ['user_id', 'chat_id'],
VkEventType.USER_CALL: ['user_id', 'call_id'],
VkEventType.MESSAGES_COUNTER_UPDATE: ['count'],
VkEventType.NOTIFICATION_SETTINGS_UPDATE: ['peer_id', 'sound', 'disabled_until'],
}
MESSAGE_FLAGS = [
'unread', 'outbox', 'replied', 'important', 'chat', 'friends', 'spam',
'deleted', 'fixed', 'media'
]
def get_all_event_attrs():
keys = set()
for l in EVENT_ATTRS_MAPPING.values():
keys.update(l)
return tuple(keys)
ALL_EVENT_ATTRS = get_all_event_attrs()
PARSE_PEER_ID_EVENTS = [
VkEventType.MESSAGE_NEW,
VkEventType.MESSAGE_FLAGS_SET,
VkEventType.MESSAGE_FLAGS_REPLACE,
VkEventType.READ_ALL_INCOMING_MESSAGES,
VkEventType.READ_ALL_OUTGOING_MESSAGES,
VkEventType.USER_TYPING
]
class VkLongPoll(object):
__slots__ = (
'vk', 'wait', 'use_ssl', 'mode',
'url', 'session',
'key', 'server', 'ts', 'pts'
)
def __init__(self, vk, wait=25, use_ssl=True, mode=DEFAULT_MODE):
"""
https://vk.com/dev/using_longpoll
https://vk.com/dev/using_longpoll_2
:param vk: объект VkApi
:param wait: время ожидания
:param use_ssl: использовать шифрование
:param mode: дополнительные опции ответа
"""
self.vk = vk
self.wait = wait
self.use_ssl = use_ssl
self.mode = mode
self.url = None
self.key = None
self.server = None
self.ts = None
self.pts = None
self.session = ReSession(True)
self.update_longpoll_server()
def update_longpoll_server(self, update_ts=True):
values = {
'use_ssl': '1' if self.use_ssl else '0',
'need_pts': '1'
}
response = self.vk.method('messages.getLongPollServer', values)
if 'error' in response:
return
self.key = response['key']
self.server = response['server']
self.url = ('https://' if self.use_ssl else 'http://') + self.server
if update_ts:
self.ts = response['ts']
self.pts = response['pts']
def get_pts(self, ts, pts):
values = {
'ts': ts,
'pts': pts,
'fields': 'friend_status',
'events_limit': 2**31 - 1,
'msgs_limit': 2**31 - 1,
}
response = self.vk.method('messages.getLongPollHistory', values)
if 'error' in response:
return []
self.pts = response['new_pts']
messages = response['messages']['items']
friends = [q['id'] for q in response['profiles'] if q['friend_status']]
for i,q in enumerate(messages):
if q['id'] in friends:
messages[i].update({ 'friend': 1 })
else:
messages[i].update({ 'friend': 0 })
return messages
def check(self):
values = {
'act': 'a_check',
'key': self.key,
'ts': self.ts,
'wait': self.wait,
'mode': self.mode,
'version': 1
}
while True:
try:
response = self.session.get(
self.url,
params=values,
timeout=self.wait + 10
)
break
except Exception as e:
debug('[RS] При запросе произошла ошибка: %s' % e)
time.sleep(5)
if response.status_code in [404, 500, 502, 503, 504, 509]:
ts, pts = self.ts, self.pts
self.update_longpoll_server(update_ts=True)
for ms in self.get_pts(ts, pts):
yield Event(ms, True)
elif response.ok:
response = response.json()
# response['failed'] = 3
if 'failed' not in response:
self.ts = response['ts']
self.pts = response['pts']
for raw_event in response['updates']:
yield Event(raw_event)
elif response['failed'] == 1:
debug('VkLongPoll ошибка 1: обновляем ts: %s - %s', self.ts, response['ts'])
self.ts = response['ts']
elif response['failed'] == 2:
debug('VkLongPoll ошибка 2: обновляем сервер..')
self.update_longpoll_server(update_ts=False)
elif response['failed'] == 3:
debug('VkLongPoll ошибка 3: используем pts: %s', self.pts)
ts, pts = self.ts, self.pts
self.update_longpoll_server(update_ts=True)
for ms in self.get_pts(ts, pts):
yield Event(ms, True)
def listen(self):
while True:
time.sleep(0.5)
events = self.check()
for event in events:
yield event
class Event(object):
__slots__ = (
'raw', 'type', 'message_flags', 'platform', 'offline_type',
'user_id', 'group_id', 'user_data', 'chat_data',
'from_user', 'from_chat', 'from_group', 'from_me', 'to_me', 'emoji'
) + ALL_EVENT_ATTRS
def __init__(self, raw, pts=False):
# Reset attrs to None
for i in self.__slots__:
self.__setattr__(i, None)
self.raw = raw
self.peer_id = None
self.from_user = False
self.from_chat = False
self.from_group = False
self.from_me = False
self.to_me = False
self.message_flags = set()
self.attachments = {}
if pts:
self.type = VkEventType.MESSAGE_NEW
self._parse_pts_messages()
self._replace_strings()
else:
try:
self.type = VkEventType(raw[0])
self._list_to_attr(raw[1:], EVENT_ATTRS_MAPPING[self.type])
except ValueError:
pass
if self.type in PARSE_PEER_ID_EVENTS:
self._parse_peer_id()
if self.type == VkEventType.MESSAGE_NEW:
self._parse_message_flags()
self._parse_message()
self._replace_strings()
elif self.type in [VkEventType.USER_ONLINE, VkEventType.USER_OFFLINE]:
self.user_id = abs(self.user_id)
self._parse_online_status()
def _parse_pts_messages(self):
self.emoji = False
if self.raw['out']:
self.from_me = True
else:
self.to_me = True
self.raw.update({7: {}}) # костыли
if 'action' in self.raw:
if self.raw['action'] == 'chat_title_update':
self.raw[7] = { 'source_act': self.raw['action'], 'source_text': self.raw['action_text']}
if self.raw['action'] == 'chat_photo_update':
self.raw[7] = { 'source_act': self.raw['action'] }
if self.raw['action'] == 'chat_photo_remove':
self.raw[7] = { 'source_act': self.raw['action'] }
if self.raw['action'] == 'chat_invite_user':
self.raw[7] = { 'source_act': self.raw['action'], 'source_mid': self.raw['action_mid'] }
if self.raw['action'] == 'chat_kick_user':
self.raw[7] = { 'source_act': self.raw['action'], 'source_mid': self.raw['action_mid'] }
self.message_flags = ('friends',) if self.raw['friend'] else ()
self.text = self.raw['body']
self.message_id = self.raw['id']
self.attachments = []
for smile in smilesTable:
if smile in self.text:
self.emoji = True
break
if 'chat_id' in self.raw:
self.from_chat = True
self.chat_id = self.raw['chat_id']
self.user_id = self.raw['user_id']
self.peer_id = self.chat_id + CHAT_START_ID
elif 'user_id' in self.raw:
self.from_user = True
self.user_id = self.raw['user_id']
self.peer_id = self.user_id
if 'attachments' in self.raw:
for attach in self.raw['attachments']:
t = attach['type']
self.attachments.append(t)
if 'fwd_messages' in self.raw:
self.attachments.append('FWD')
def _list_to_attr(self, raw, attrs):
for i in range(min(len(raw), len(attrs))):
self.__setattr__(attrs[i], raw[i])
def _parse_peer_id(self):
if self.peer_id < 0: # Сообщение от/для группы
self.from_group = True
self.group_id = abs(self.peer_id)
elif CHAT_START_ID < self.peer_id: # Сообщение из беседы
self.from_chat = True
self.chat_id = self.peer_id - CHAT_START_ID
if 'from' in self.attachments:
self.user_id = int(self.attachments['from'])
else: # Сообщение от/для пользователя
self.from_user = True
self.user_id = self.peer_id
def _parse_message_flags(self):
x = 1
for message_flag in MESSAGE_FLAGS:
if self.flags & x:
self.message_flags.add(message_flag)
x *= 2
def _parse_message(self):
if 'outbox' in self.message_flags:
self.from_me = True
else:
self.to_me = True
self.emoji = False
for smile in smilesTable:
if smile in self.text:
self.emoji = True
break
if self.attachments:
att = []
for q in range(11):
t = 'attach%s_type' % q
if 'attach%s_type' % q in self.attachments:
att.append('%s' % self.attachments[t])
if 'fwd' in self.attachments:
att.append('FWD')
self.attachments = att
else:
self.attachments = []
def _replace_strings(self):
self.text = self.text.replace('<br>', ' ')
self.text = self.text.replace('\\', '\\\\')
self.text = self.text.replace('"', '"')
self.text = self.text.replace('>', '>')
self.text = self.text.replace('<', '<')
self.text = self.text.replace('&', '&')
self.text = self.text.replace('Ё', 'Е')
self.text = self.text.replace('ё', 'е')
def _parse_online_status(self):
try:
if self.type == VkEventType.USER_ONLINE:
self.platform = VkPlatform(self.extra & 0xFF)
elif self.type == VkEventType.USER_OFFLINE:
self.offline_type = VkOfflineType(self.extra)
except ValueError:
pass
|
gpl-3.0
| 5,682,380,085,713,066,000 | 28.061086 | 105 | 0.530401 | false |
CantemoInternal/django-knowledge
|
knowledge/views.py
|
1
|
8718
|
import settings
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import Q
from models import Question, Response, Category, Company, Author
from forms import QuestionForm, ResponseForm
from utils import paginate
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
ALLOWED_MODS = {
'question': [
'private', 'public',
'delete', 'lock',
'clear_accepted'
],
'response': [
'internal', 'inherit',
'private', 'public',
'delete', 'accept'
]
}
def get_my_questions(request):
if settings.LOGIN_REQUIRED and not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL+"?next=%s" % request.path)
if request.user.is_anonymous():
return None
else:
return Question.objects.can_view(request.user)\
.filter(user=request.user)
def knowledge_index(request,
template='django_knowledge/index.html'):
if settings.LOGIN_REQUIRED and not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL+"?next=%s" % request.path)
articles = Question.objects.can_view(request.user)\
.prefetch_related('responses__question')[0:20]
articles_pop = Question.objects.can_view(request.user)\
.prefetch_related('responses__question')
articles_pop = articles_pop.order_by('-hits')
articles_rec = None
if Question.objects.can_view(request.user) & Question.objects.filter(recommended=True):
articles_rec = Question.objects.filter(recommended=True)
articles_rec = articles_rec.order_by('-lastchanged')
# this is for get_responses()
[setattr(q, '_requesting_user', request.user) for q in articles]
author = ''
try:
author = Author.objects.get(user=request.user)
except:
pass
return render(request, template, {
'request': request,
'articles': articles,
'author': author,
'articles_rec': articles_rec,
'articles_pop': articles_pop,
'my_questions': get_my_questions(request),
'categories': Category.objects.all(),
'BASE_TEMPLATE' : settings.BASE_TEMPLATE,
})
def knowledge_list(request,
category_slug=None,
template='django_knowledge/list.html',
Form=QuestionForm):
if settings.LOGIN_REQUIRED and not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL+"?next=%s" % request.path)
search = request.GET.get('title', None)
questions = Question.objects.can_view(request.user)\
.prefetch_related('responses__question')
if search:
questions = questions.filter(
Q(title__icontains=search) | Q(body__icontains=search)
)
category = None
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
questions = questions.filter(categories=category)
# this is for get_responses()
[setattr(q, '_requesting_user', request.user) for q in questions]
author = ''
try:
author = Author.objects.get(user=request.user)
except:
pass
paginator = Paginator(questions, 5)
page = request.GET.get('page')
try:
articles = paginator.page(page)
except PageNotAnInteger:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
return render(request, template, {
'request': request,
'search': search,
'questions': questions,
'articles': articles,
'author': author,
'my_questions': get_my_questions(request),
'category': category,
'categories': Category.objects.all(),
'form': Form(request.user, initial={'title': search}), # prefill title
'BASE_TEMPLATE' : settings.BASE_TEMPLATE,
})
def knowledge_thread(request,
question_id,
slug=None,
template='django_knowledge/thread.html',
Form=ResponseForm):
if settings.LOGIN_REQUIRED and not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL+"?next=%s" % request.path)
try:
question = Question.objects.can_view(request.user)\
.get(id=question_id)
author_instance = ''
company= ''
try:
author_instance = Author.objects.get(user=question.user)
company = Company.objects.get(name=author_instance.company)
except:
pass
question.hits = question.hits + 1
question.save(update_fields=['hits'])
except Question.DoesNotExist:
if Question.objects.filter(id=question_id).exists() and \
hasattr(settings, 'LOGIN_REDIRECT_URL'):
return redirect(settings.LOGIN_REDIRECT_URL)
else:
raise Http404
author = ''
responses = question.get_responses(request.user)
if request.path != question.get_absolute_url():
return redirect(question.get_absolute_url(), permanent=True)
if request.method == 'POST':
form = Form(request.user, question, request.POST)
if form and form.is_valid():
if request.user.is_authenticated() or not form.cleaned_data['phone_number']:
form.save()
return redirect(question.get_absolute_url())
else:
form = Form(request.user, question)
try:
author = Author.objects.get(user=request.user)
except:
pass
return render(request, template, {
'request': request,
'question': question,
'company': company,
'author': author,
'author_instance': author_instance,
'my_questions': get_my_questions(request),
'responses': responses,
'allowed_mods': ALLOWED_MODS,
'form': form,
'categories': Category.objects.all(),
'BASE_TEMPLATE' : settings.BASE_TEMPLATE,
})
def knowledge_moderate(
request,
lookup_id,
model,
mod,
allowed_mods=ALLOWED_MODS):
"""
An easy to extend method to moderate questions
and responses in a vaguely RESTful way.
Usage:
/knowledge/moderate/question/1/inherit/ -> 404
/knowledge/moderate/question/1/public/ -> 200
/knowledge/moderate/response/3/notreal/ -> 404
/knowledge/moderate/response/3/inherit/ -> 200
"""
if settings.LOGIN_REQUIRED and not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL+"?next=%s" % request.path)
if request.method != 'POST':
raise Http404
if model == 'question':
Model, perm = Question, 'change_question'
elif model == 'response':
Model, perm = Response, 'change_response'
else:
raise Http404
if not request.user.has_perm(perm):
raise Http404
if mod not in allowed_mods[model]:
raise Http404
instance = get_object_or_404(
Model.objects.can_view(request.user),
id=lookup_id)
func = getattr(instance, mod)
if callable(func):
func()
try:
return redirect((
instance if instance.is_question else instance.question
).get_absolute_url())
except NoReverseMatch:
# if we delete an instance...
return redirect(reverse('knowledge_index'))
def knowledge_ask(request,
template='django_knowledge/ask.html',
Form=QuestionForm):
if settings.LOGIN_REQUIRED and not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL+"?next=%s" % request.path)
if request.method == 'POST':
form = Form(request.user, request.POST)
if form and form.is_valid():
if request.user.is_authenticated() or not form.cleaned_data['phone_number']:
question = form.save()
return redirect(question.get_absolute_url())
else:
return redirect('knowledge_index')
else:
form = Form(request.user)
return render(request, template, {
'request': request,
'my_questions': get_my_questions(request),
'form': form,
'categories': Category.objects.all(),
'BASE_TEMPLATE' : settings.BASE_TEMPLATE,
})
|
isc
| 5,901,758,226,665,609,000 | 30.701818 | 91 | 0.606791 | false |
AutorestCI/azure-sdk-for-python
|
azure-mgmt-batchai/azure/mgmt/batchai/models/image_reference.py
|
1
|
1385
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageReference(Model):
"""The image reference.
:param publisher: Publisher of the image.
:type publisher: str
:param offer: Offer of the image.
:type offer: str
:param sku: SKU of the image.
:type sku: str
:param version: Version of the image.
:type version: str
"""
_validation = {
'publisher': {'required': True},
'offer': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, publisher, offer, sku, version=None):
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
|
mit
| -4,448,726,366,510,070,000 | 29.777778 | 76 | 0.53935 | false |
gurneyalex/odoo
|
addons/website_livechat/models/website_visitor.py
|
5
|
6372
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import json
from odoo import models, api, fields, _
from odoo.exceptions import UserError
class WebsiteVisitor(models.Model):
_inherit = 'website.visitor'
livechat_operator_id = fields.Many2one('res.partner', compute='_compute_livechat_operator_id', store=True, string='Speaking with')
livechat_operator_name = fields.Char('Operator Name', related="livechat_operator_id.name")
mail_channel_ids = fields.One2many('mail.channel', 'livechat_visitor_id',
string="Visitor's livechat channels", readonly=True)
session_count = fields.Integer('# Sessions', compute="_compute_session_count")
@api.depends('mail_channel_ids.livechat_active', 'mail_channel_ids.livechat_operator_id')
def _compute_livechat_operator_id(self):
results = self.env['mail.channel'].search_read(
[('livechat_visitor_id', 'in', self.ids), ('livechat_active', '=', True)],
['livechat_visitor_id', 'livechat_operator_id']
)
visitor_operator_map = {int(result['livechat_visitor_id'][0]): int(result['livechat_operator_id'][0]) for result in results}
for visitor in self:
visitor.livechat_operator_id = visitor_operator_map.get(visitor.id, False)
@api.depends('mail_channel_ids')
def _compute_session_count(self):
sessions = self.env['mail.channel'].read_group([('livechat_visitor_id', 'in', self.ids)], ['livechat_visitor_id'], ['livechat_visitor_id'])
sessions_count = {session['livechat_visitor_id'][0]: session['livechat_visitor_id_count'] for session in sessions}
for visitor in self:
visitor.session_count = sessions_count.get(visitor.id, 0)
def action_send_chat_request(self):
""" Send a chat request to website_visitor(s).
This creates a chat_request and a mail_channel with livechat active flag.
But for the visitor to get the chat request, the operator still has to speak to the visitor.
The visitor will receive the chat request the next time he navigates to a website page.
(see _handle_webpage_dispatch for next step)"""
# check if visitor is available
unavailable_visitors_count = self.env['mail.channel'].search_count([('livechat_visitor_id', 'in', self.ids), ('livechat_active', '=', True)])
if unavailable_visitors_count:
raise UserError(_('Recipients are not available. Please refresh the page to get latest visitors status.'))
# check if user is available as operator
for website in self.mapped('website_id'):
if not website.channel_id:
raise UserError(_('No Livechat Channel allows you to send a chat request for website %s.' % website.name))
self.website_id.channel_id.write({'user_ids': [(4, self.env.user.id)]})
# Create chat_requests and linked mail_channels
mail_channel_vals_list = []
for visitor in self:
operator = self.env.user
country = visitor.country_id
visitor_name = "%s (%s)" % (visitor.display_name, country.name) if country else visitor.display_name
mail_channel_vals_list.append({
'channel_partner_ids': [(4, operator.partner_id.id)],
'livechat_channel_id': visitor.website_id.channel_id.id,
'livechat_operator_id': self.env.user.partner_id.id,
'channel_type': 'livechat',
'public': 'private',
'email_send': False,
'country_id': country.id,
'anonymous_name': visitor_name,
'name': ', '.join([visitor_name, operator.livechat_username if operator.livechat_username else operator.name]),
'livechat_visitor_id': visitor.id,
'livechat_active': True,
})
if mail_channel_vals_list:
mail_channels = self.env['mail.channel'].create(mail_channel_vals_list)
# Open empty chatter to allow the operator to start chatting with the visitor.
mail_channels_info = mail_channels.channel_info('channel_minimize')
for mail_channel_info in mail_channels_info:
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', operator.partner_id.id), mail_channel_info)
def _handle_website_page_visit(self, response, website_page, visitor_sudo):
""" Called when the visitor navigates to a website page.
This checks if there is a chat request for the visitor.
It will set the livechat session cookie of the visitor with the mail channel information
to make the usual livechat mechanism do the rest.
(opening the chatter if a livechat session exist for the visitor)
This will only happen if the mail channel linked to the chat request already has a message.
So that empty livechat channel won't pop up at client side. """
super(WebsiteVisitor, self)._handle_website_page_visit(response, website_page, visitor_sudo)
visitor_id = visitor_sudo.id or self.env['website.visitor']._get_visitor_from_request().id
if visitor_id:
# get active chat_request linked to visitor
chat_request_channel = self.env['mail.channel'].sudo().search([('livechat_visitor_id', '=', visitor_id), ('livechat_active', '=', True)], order='create_date desc', limit=1)
if chat_request_channel and chat_request_channel.channel_message_ids:
livechat_session = json.dumps({
"folded": False,
"id": chat_request_channel.id,
"message_unread_counter": 0,
"operator_pid": [
chat_request_channel.livechat_operator_id.id,
chat_request_channel.livechat_operator_id.display_name
],
"name": chat_request_channel.name,
"uuid": chat_request_channel.uuid,
"type": "chat_request"
})
expiration_date = datetime.now() + timedelta(days=100 * 365) # never expire
response.set_cookie('im_livechat_session', livechat_session, expires=expiration_date.timestamp())
|
agpl-3.0
| -657,490,153,285,551,900 | 59.685714 | 184 | 0.628374 | false |
jackyyf/paste.py
|
src/providers/gist.py
|
1
|
11989
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import argparse
import getpass
import json
import os
import sys
import datetime
from lib.provider import ProviderBase
from common import exception
from lib import logger, uri, config
import requests
from requests import exceptions
_no_validate = lambda _=None : _
def to_bool(val):
val = val.lower()
if val in ['1', 'yes', 'true', 'on', 'y']:
return True
if val in ['0', 'no', 'false', 'off', 'n']:
return False
raise ValueError('Invalid bool value: ' + val)
_api_base = 'https://api.github.com'
# For future use: config part.
_config_entry = {
# You may add more entries here.
# Value can be two types : a list or a validator.
# If value is a list, user provided value should be one of the element in the list.
# Otherwise, value is validated by call the validator with the value,
# if no exception raised, the returned value is used.
# if all values are accepted, use _no_validate
# 'auth' : ['anonymous', 'basic'],
'auth' : to_bool,
'token' : _no_validate,
}
_actions = dict()
def action(action_name):
def _add(func):
global _actions
logger.debug('decorator: add_action ' + action_name)
if action_name in _actions:
logger.fatal(action_name + ' already registered!')
_actions[action_name] = func
return func
return _add
class Gist(ProviderBase):
_name = 'gist'
_info = 'Github gist (https://gist.github.com)'
def __init__(self):
logger.debug('call: gist.__init__')
self.req = requests.Session()
logger.debug('http: set user-agent.')
self.req.headers['User-Agent'] = config.full_version()
self.req.headers['Accept'] = 'application/vnd.github.v3+json'
super(Gist, self).__init__()
def add_args(self, opt):
opt_action = opt.add_subparsers(title='Actions', metavar='action', dest='action')
opt_opt = opt.add_argument_group('Options')
opt_opt.add_argument('-c', '--check', action='store_const', dest='check', default=True, const=True,
help='If stored oauth token is invalid, the process will be interrupted. (Default)')
opt_opt.add_argument('-n', '--no-check', action='store_const', dest='check', const=False,
help='Process anonymously if stored oauth token is invalid.')
action_push = opt_action.add_parser('push', help='Push one or more file to gist.', add_help=False)
push_args = action_push.add_argument_group('Arguments')
push_opts = action_push.add_argument_group('Options')
push_opts.add_argument('-h', '--help', help='Print this help message and exit.', action='help')
push_opts.add_argument('-a', '--anonymous', help='Post gist anonymously.', dest='gist.auth',
action='store_const', const=False)
push_opts.add_argument('-p', '--private', help='Hide this gist from search engine.', dest='private',
action='store_const', const=True, default=False)
push_opts.add_argument('-d', '--description', help='Add a description for the gist.', dest='description',
default='Gist by paste.py @ ' + str(datetime.datetime.now()), metavar='DESCRIPTION')
push_args.add_argument('files', nargs='*', metavar='files', help='Files to paste to gist, "-" or ignore to read from stdin.',
type=argparse.FileType('r'), default=[sys.stdin])
action_pull = opt_action.add_parser('pull', help='Pull one or more file from gist. (stub)', add_help=False)
pull_args = action_pull.add_argument_group('Arguments')
pull_opts = action_pull.add_argument_group('Options')
pull_opts.add_argument('-h', '--help', action='help', help='Print this help message and exit.')
pull_opts.add_argument('-f', '--files', help='Specify files you want to pull, may use src=dest to specify local file destination.',
dest='files', nargs='*', default=[])
pull_args.add_argument('url', help='Gist url, you may use http link or gist://gistid. '
'Note: with gist://gistid format, there are some easy ways to download specific files without -f '
'you can use gist://gistid?remote_name=local_name&remote_name2, which assumes remote_name2=remote_name2. '
'`remote_name` and `local_name` should be quoted with urllib.quote')
action_auth = opt_action.add_parser('auth', help='Add new or modify current authentication info for gist.', add_help=False)
auth_opts = action_auth.add_argument_group('Options')
auth_opts.add_argument('-h', '--help', help='Print this help message and exit.', action='help')
auth_opts.add_argument('-s', '--system', help='Add to system wide config file (/etc/paste.conf), instead of current user (~/.pasterc)',
action='store_const', dest='global', default=False, const=True)
auth_opts.add_argument('-r', '--remove', help='Remove stored authentication information.',
action='store_const', dest='remove', default=False, const=True)
auth_opts.add_argument('-f', '--force', help='Force renew token, even if it is still valid.',
action='store_const', dest='force', default=False, const=True)
def run(self):
global _actions
conf = config.getConfig()
action = conf.require('action')
if action not in _actions:
logger.fatal('No function for action: ' + action)
_actions[action](self)
@action('push')
def push(self):
# TODO: Implements push.
conf = config.getConfig()
res = self._do_auth()
if res is not None:
if not res:
if conf.getboolean('check', True):
print 'Token is invalid, please use paste.py gist auth to get a new token.'
sys.exit(1)
else:
del self.req.headers['Authorization']
files = conf.require('files')
if files.count(sys.stdin) > 1:
raise exception.InvalidValue('stdin was listed more than once!')
logger.debug('private: ' + ('yes' if conf.require('private') else 'no'))
logger.debug('description: ' + conf.require('description'))
logger.debug('files: ' + str(len(files)))
post_data = {
'public' : not conf.require('private'),
'description' : conf.require('description'),
}
file_data = dict()
try:
for file in files:
logger.info('reading file ' + file.name)
if file is sys.stdin:
print 'Type your content here, end with EOF'
print 'Use Ctrl-C to interrupt, if you have mistyped something.'
content = file.read()
logger.debug('file ' + file.name + ': %d lines, %d bytes' % (content.count('\n'), len(content)))
fname = os.path.basename(file.name)
now = 2
if fname in file_data:
if '.' in fname:
name, ext = fname.rsplit('.', 1)
else:
name, ext = fname, ''
while (name + '-' + str(now) + '.' + ext) in file_data:
now += 1
fname = (name + '-' + str(now) + '.' + ext)
logger.debug('final filename: ' + fname)
file_data[fname] = {
'content' : content,
}
except KeyboardInterrupt:
logger.warn('Ctrl-C received, exiting.')
sys.exit(1)
post_data['files'] = file_data
post_str = json.dumps(post_data)
post_url = _api_base + '/gists'
logger.debug('post url: ' + post_url)
try:
resp = self.req.post(post_url, data=post_str, headers={
'Content-Type' : 'application/json',
})
except exceptions.RequestException as e:
logger.error('Post error: ' + e.message)
raise exception.ServerException(e)
logger.debug('http ok.')
logger.info('server response: %d %s' % (resp.status_code, resp.reason))
if resp.status_code == 201:
logger.info('gist created')
url = resp.json()[u'html_url']
gistid = url.rsplit('/', 1)[1]
print 'HTTP Link: ' + url
print 'Paste.py uri: gist://' + gistid
else:
raise exception.ServerException('Server responsed with unknown status: %d %s ' % (resp.status_code, resp.reason))
@action('pull')
def pull(self):
# TODO: Implements pull
print 'Still a stub :('
sys.exit(1)
@action('auth')
def write_auth(self):
# TODO: Implements auth
conf = config.getConfig()
fileconf = config.getGlobalConfig() if conf.require('global') else config.getUserConfig()
remove = conf.require('remove')
if remove:
fileconf.remove('gist.auth')
fileconf.remove('gist.token')
print 'Authentication removed, you may delete the token from your user panel.'
return
if fileconf.get('gist.auth', False) and not conf.get('force', False):
logger.info('check current token')
try:
token = fileconf.require('gist.token')
except exception.NoSuchOption:
fileconf.remove('gist.auth')
return self.write_auth()
result = self._do_auth(token=token)
if result:
print 'Current token is valid, no auth required.'
return
print 'Current token is invalid, requesting a new token.'
token = self._perform_auth()
logger.info('auth ok.')
fileconf.set('gist.auth', True)
fileconf.set('gist.token', token)
logger.debug('saving to config file.')
fileconf.save()
print 'Done!'
def _perform_auth(self, otp_token=None):
if otp_token is None:
try:
self.user = raw_input('Username: ')
logger.debug('user: ' + self.user)
self.pwd = getpass.getpass('Password: ')
logger.debug('password ok.')
except KeyboardInterrupt:
logger.warn('Ctrl-C detected.')
sys.exit(1)
user = self.user
pwd = self.pwd
logger.info('auth: fetch new token')
post_json = {
'scopes' : ['gist'],
'note' : 'paste.py @ ' + str(datetime.datetime.now()),
'note_url' : 'https://github.com/jackyyf/paste.py',
}
post_headers = {
'Content-Type' : 'application/json',
}
if otp_token is not None:
post_headers['X-GitHub-OTP'] = otp_token
post_str = json.dumps(post_json)
post_url = _api_base + '/authorizations'
logger.debug('post_url: ' + post_url)
try:
resp = self.req.post(post_url, data=post_str, headers=post_headers, auth=(user, pwd))
except exceptions.RequestException as e:
raise exception.ServerException(e)
logger.info('http ok. response: %d %s' % (resp.status_code, resp.reason))
if resp.status_code == 201:
logger.info('auth ok.')
token = resp.json()[u'token']
logger.debug(resp.content)
self.req.headers['Authorization'] = 'token ' + token
return token
elif resp.status_code == 401:
# Two factor auth?
logger.warn('auth failed')
if 'X-GitHub-OTP' in resp.headers:
logger.warn('auth: two-factor required')
try:
token = raw_input('Two factor token from ' + resp.headers['X-Github-OTP'].replace('required; ', '') + ':')
except KeyboardInterrupt:
logger.warn('Ctrl-C detected')
sys.exit(1)
return self._perform_auth(otp_token=token)
else:
logger.error('username or password error.')
return self._perform_auth()
else:
raise exception.ServerException('Server responsed with unknown status: %d %s' % (resp.status_code, resp.reason))
def _do_auth(self, token=None):
# Authenticate to github, save some login info (user/pass, or oauth token)
conf = config.getConfig()
auth = conf.getboolean('gist.auth', False) or token is not None
if auth: # User/Pass Pair
logger.info('auth: oauth token')
if token is None:
token = conf.require('gist.token')
logger.debug('auth: test token usability')
# Try authenticate
self.req.headers['Authorization'] = 'token ' + token
# Get a time in future (1 year)
fmt_time = (datetime.datetime.now() + datetime.timedelta(days=365)).strftime('%Y-%m-%dT%H:%M:%SZ')
test_url = _api_base + '/gists?since=' + fmt_time
logger.debug('test url: ' + test_url)
try:
resp = self.req.get(test_url)
except exceptions.RequestException as e:
logger.warn('http error, assume token is good.')
logger.info('[%s] %s' % (e.__class__.__name__, e.message))
return
logger.debug('http ok, response: %d %s' % (resp.status_code, resp.reason))
if resp.status_code == 401: # Invalid token
logger.warn('invalid token')
return False
elif resp.status_code == 200:
logger.info('token ok.')
return True
else:
logger.warn('unknown response status: %d %s' % (resp.status_code, resp.reason))
raise exception.ServerException('Server responsed with unknown status: %d %s' % (resp.status_code, resp.reason))
logger.info('auth: none')
return None
|
mit
| 8,551,301,403,267,670,000 | 38.179739 | 137 | 0.660606 | false |
jftaas/tegmail
|
tegmail/interface.py
|
1
|
3466
|
import curses
from tegmail import Event
class Interface(object):
def __init__(self):
self.on_key_event = Event()
self.menu_box = None
self.main_box = None
self.info_box = None
self._keys = {
13: 'KEY_ENTER',
22: 'KEY_BACKSPACE',
27: 'KEY_ESCAPE',
127: 'KEY_BACKSPACE',
258: 'KEY_DOWN',
259: 'KEY_UP',
260: 'KEY_LEFT',
261: 'KEY_RIGHT',
}
self._init_curses()
def _init_curses(self):
self._stdscr = curses.initscr()
curses.curs_set(0)
curses.noecho()
curses.cbreak()
curses.nonl()
self._stdscr.keypad(True)
self._stdscr.refresh()
# set custom color pairs
# TODO check COLORS for number of
# supported pairs
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, -1, -1)
curses.init_pair(2, -1, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_RED, -1)
curses.init_pair(4, curses.COLOR_GREEN, -1)
curses.init_pair(5, curses.COLOR_BLUE, -1)
curses.init_pair(6, curses.COLOR_CYAN, -1)
curses.init_pair(7, curses.COLOR_YELLOW, -1)
curses.init_pair(8, curses.COLOR_MAGENTA, -1)
self.menu_box = curses.newwin(1, curses.COLS, 0, 0)
self.main_box = curses.newwin(curses.LINES - 2, curses.COLS, 1, 0)
self.info_box = curses.newwin(1, curses.COLS, curses.LINES - 1, 0)
self.main_box.idlok(1)
self.main_box.scrollok(True)
def _exit_curses(self):
curses.curs_set(1)
curses.echo()
curses.nocbreak()
curses.nl()
self._stdscr.keypad(False)
curses.endwin()
def _format_key(self, i):
if i in self._keys:
key = self._keys[i]
else:
try:
key = chr(i)
except ValueError:
key = ''
return key
def _change_window_color(self, win, color_index):
win.bkgd(' ', curses.A_REVERSE)
win.refresh()
def update(self):
getch = self._stdscr.getch()
key = self._format_key(getch)
self.on_key_event(key)
def close(self):
self._exit_curses()
def clear(self, win=None):
if not win:
win = self.main_box
win.erase()
win.refresh()
def print_text(self, text, win=None):
if not win:
win = self.main_box
win.addstr(text)
win.refresh()
def get_cursor_pos(self, win=None):
if not win:
win = self.main_box
return win.getyx()
# move_cursor(y_direction)
# move_cursor(y, x)
def move_cursor(self, *args, **kwargs):
if len(args) == 1:
yx = self.main_box.getyx()
y = yx[0] + args[0]
x = yx[1]
elif len(args) == 2:
y = args[0]
x = args[1]
if (y < self.main_box.getbegyx()[0] - 1 or
x > self.main_box.getmaxyx()[0] - 1):
return
self.main_box.chgat(curses.color_pair(1))
self.main_box.move(y, x)
self.main_box.chgat(curses.A_REVERSE)
self.main_box.refresh()
def add_char(self, y, x, ch, win=None):
if not win:
win = self.main_box
win.addch(y, x, ch, curses.A_REVERSE)
win.move(y, 0)
win.refresh()
|
mit
| 1,277,838,725,827,404,000 | 25.06015 | 74 | 0.512695 | false |
gabeblack/DirectoryMaker
|
icons/addPicture.py
|
1
|
11681
|
# Copyright 2009 Gabriel Black
#
# This file is part of Directory Maker.
#
# Directory Maker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Directory MAker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Directory Maker. If not, see <http://www.gnu.org/licenses/>.
from wx.lib.embeddedimage import PyEmbeddedImage
addPicture = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABHNCSVQICAgIfAhkiAAAHGdJ"
"REFUeJztmnm8ZUV177+rqvZwzrnn3LHneaAbkaZbGhohTI7PF4OaF4ZgNGpM1KcJSRQT9RkB"
"icEQNCG+z/NhopgY1GhelIDKoDLJIJNMDT3S8+3h3r73nnlPVZU/zu0GPmlkzHt/PNbncz+7"
"Tt29a9X6rV+tqr3WhlfkFXlFXpFX5P9fkZc6wBfe/+ahci1YbhTrojB6fRiXjjVx1GeUWPE8"
"6SluyrLOzcnk1Prf/sLN7Zdj0i+nvCQAPvPOX/lCZMxvtCYm54ZaB+U4IK6NEA0MEcQxYRwT"
"hoZA6zxt79uok8bbzv3cddtersl7f3/AAw8gJ3wwf7FjvGgAPvTGY79b2OLs5sExH4iVSmR8"
"2RiJ+yo+GJoh2gTe6ECUDr0JQ4nKfWjXpt1srv6Tr/70kRejc9O3/mDZnFUn/2pp1tG/p6pL"
"Vkn9Sezum3C7f8ro/sn7J1rhz0y575GF8+c80r92ZIPM+cJzMu5FAXDeuuVv7nQ7N9brUz7S"
"WiKjfByIxMZTigLMwCx0GKKVoJXGaPFaC9oEaOXHg4pa/emv3LH3+erLbr3gxIcf33lFZWjh"
"qrkr1w7WFq5Bhl4N+R7Y9j3Yeh179kyybZ9l/5RjvEF25hnHblm5rHy2nPzVJ15WAE5fMnCa"
"9fy9K4oVSTcjDITQaEIthFoRGwirNVRYxmiFUgqtBa2011pEaYVW8vMv/eAXr30uXd57efCr"
"v/XBsT37vuysZ86ylX54wQoxAwvZv2e3f/zeu6ViuiyYgffeSbuT+anJpmzaPs7mnU0+/L4z"
"eNXi8proDVc//LIA8JYVA+/aP5X+Q7ObKWO07y8HknYztHiMUhgFgQYdRpi4jNaC0noaiENX"
"5bVWAv7L19y5+cO/TN+Df/ebV91y794PmMZOjllzLLku0e4WBEazbeMWGmPjZIWQoTnjjesw"
"yqNEKKz1j2/cybZdDd599vG7TlrRt1Ze95XxI+nQz9f4c+bPL03krWvSJB+JjBa8o50UMlAr"
"owSc9ygB7z3eO5z35EVOluWkaU6apiRpRjfNJE0zn2b50KIFwT88uTdJj6Tvzit/4zU33bPv"
"Sxs3bdfHveZVkusyYwe74AExTO4dpb9WotpXohxqdo5OMjg8TGELrENmzhjmiU07pNlR/f39"
"0b1fvvaxIy4F83wBqEftEdrFSF+AWO+9FSVGHAqYPVTiQDNHa01/X0zJCO3c0eqkdLtttGhE"
"a5QStDbe5ojHV0yniJ5N364DyScfeXxPdPLxi1FBhfUb91GKAoJAEwQZ1b4y/dWIIArBw469"
"DZrthDAwKAVpXsgbzjieW+7ezNhE/8eA//OSAEh8EQelUoi13tlCrLXEJmKkP2b+UMzcGZ79"
"kwmNJCOIKhy1cDYzF6+kb3g+u7Zu5OD+vbSaTZrNpnTaTax3G92C9gRP/kdd3vvw0+e+5pxy"
"JWDOnJn+4Sd2S5IWdJKcINAopVk+FDEwWOtttUFAM/GMT7QIg5AwNBS2QCntB6pl2bxj6uRn"
"s+t5AyA2bnpTJCJSc+DnLFgss+KUoT6D0powMCyYM0xWeA7Wu+TtOtsfu49u/gBJ7smzjDRN"
"aLQ6bNrf5Ng51at/chvFkXTtuv733/rgo7s5ZuUi/+SOMRndX0drhdaawCiMVvQvHqF/aIAg"
"DIlLJWYmlp3r95LahDg0eMA5L9009794/KD4rX92oiy79L4XDUA1M422yg4gMqPIc3n1ca+m"
"r7GJKDIEYQ91E4SEUcirgoBOkmEtKK3odnvrX/DsG2v6sVs3yMiM6LYjeR9g85bx1VPNLp0k"
"l2x/ncl6CxNojNYYpZkxVGJ4ZIBqfw1tDFEUM2+B4vFtB9m2ZYxqJQQEaz1Jkkm90YGDzdlH"
"0vW8Abh+797O65cN3ue9PzaKY7/10fvlracsIgoDgigijEOCMMRogzYGHYRoo9FBgChF1k1w"
"Rc5d922SwZKiPljdC/uPqGtsolPzHhqtLoV1NJottOoxQCnNScfNJSqX6auVMWFPb2Wwxikn"
"ea696REWzBtGqR4AWZbT7WZ0G0X/SwIAIMvdn2vF+5RSYq1lwcKZhGFIFMeEYYAOeiwwYdCb"
"WBBO/w5JkwQpMq798SOYMOKGG7Y86/HVO+88MFVv4ZzHWYstLN5nxHHI0UfNojpYozYyTBAE"
"PdCDgGPLZd573qn887V3o7XGWodznjgSvFLdI+lSLwSAn+2sPzlvZt9dznush+HBGjNmDjIw"
"XKN/uMbgSD9DI4MMzxhkcGiQ2kCVSl9MFGr6qhUGh2vkhaXVSXn86t9d92x6qn2lPUYJbzt1"
"Ee9/xyoWzJvBkgVDzJ01wAW/8zqOPnoRi1YsoTw0QlTrR8cR4gtKpYDzzj4Nh+Pk1yzknWet"
"ZrLepFqJKYfRgSPpekEMuPHSN/14oFo+5S++do+vlCKp1MqEpYgoLhGEIdoEKN07Wngs3jq8"
"CIggCB44btUyf/yrF6ACufuevz13zWsv+M5/OKXNGh7aFUUBRy0dIS6VWHPMXD720f+Ka+Vk"
"VmHKfRQmAmVw09pUIWB7ILz7nNNZt2oe7akWhXX0VyNYPGv7kWx6QQwY7C9RrpY4c91ikSCi"
"Uo0pxRFhMH3KE493BTiLeIeIA2/BOzyWwnrOetNxcszKOZJbn1TLfUfUH5b1gTgKEG2oVmPO"
"ev1KJkcP0mx1sEVK4T0oDUpN/xnQCsRh85z3/NaZRFHInvEGzjlmjVSR+RfueskAVPvKd8ZR"
"yJtOW+nfeNICsJY8S8nzlCLtBTmcxTsL1uKtBW8RVyDO4l1BXI59XjgGBisNVT5yFBwYqO02"
"WvPY5gN+sC+gFCnSdoc8ScizlMJaHOC1wiuNF/CiwFpwBT7PKVdi7nt0J9VKTH+1suHZbHpB"
"AOhA/504RzkQTj9xsU+6XYosJU+nQchTnM2BnsfFO3CuxwDvEG9R4mX27CHiUrzbmuYRz+cL"
"33rZrlIp5Ja7t0itpHFZSpFl5FlCnuU9AKwF60E8goA4oAd4kWcMDFTYtmOcSikAkZtfFgBW"
"vPua3dZmW3FW8qQreZr7Isso0oQiTQ9fbZ72mOALhGKaBRZnrccVzJk9iInMXcee+93sSHpE"
"JDdasXH7BHfcv42028XlGXnWG9tmOTbPKYoCV1i8t+B9b7nhaLdaPPTETsYPTGKMJi+K218W"
"AACc9R+dmGi4PM29yzJsnlLkGUWWkGcZeZpQZBlFnoK3+GnvgwUKbF6QdLtTA2Xzp79MjyhV"
"H6yVuPQrd3Pfo3vA5rg0O7wMbJ7iiulrluGLDJzFaM8dP9/IR/7o78ny3KOk21+pbn42PS9o"
"FwB41e9+998AveFrv5FI4SKPRnuHd7pnbBD0KO8M3lmMCVBK4UUQL6K1v7V80mdf91x6bOHG"
"EPpFhBvu2s4bT5pPo5PjvUd32xRhiIfe2L7A5x1MkeJVxC23P0EpDrFe0RfHB+ctHt7zbHpe"
"MAMOSZZmf1sUOUWW49Icm+U9JqS9WHDIS0WeYm2OOIfg8M7+5PmM72AcEbQSHto4xi8e3zvN"
"rpSs3aTotLDdFnmnje22KDpNsqTLxPgEW7cfQBlNkuWyZNGMPWd97Ncnn03PC2bAIWnVu9+I"
"4vwP4jiMnDcSOI33Bm96lNfW9uKALXA29C4oBIQ01V96PuNnaT6uBDxCtRLyzRu28KF3rCSK"
"QyrOIUrh4zLKKGyRQdpCRNi6c4KxyQ4i4sMwklUrF94ocq592QHY3R3dOLMYWd9NiuMrlRAf"
"h4Q4sArvHN5oPA7nNVJYUSkc2Nf4wat+51v15zN+u9XpFrmll7TybBttsWXHQebNrpKmGX1Z"
"TqW/H60Emyc45xmslfjbb9yL93i8Z9nSmZx55por+fy1z6rnRQNw7iWPZ1eet/L6o1cMrS2K"
"nCzLKMURURwQOIezmqLI8V7oJYm83HvbY78A8P47Go7RMBZCFF53410z77zzsZUDA7Xq0GBf"
"+CunrTnxsXsfXnvr9XeQZTmdVkKRZlz7UJv3v3kAm1hy26bd6hAaRWAMooUHN+3lia0HqZS0"
"iAj9/X0/W/iWSyZ+mR0vGgCAVuLv2LarwdyZFV9vdKRcCokPgaDU4WNqpRrJnp1T1KeS7b0n"
"ZwiMKajqf7vplv5PfPyqbyxatPCESiVmeLhGrVYjrA3Rv3ylnzjYFBu2aNYbdKd2s3M8Z6Tk"
"cEVGGGgCQy9HgCZLCkQLSikfBkY6SXLNc9nwkgCYOWJ+4UVTb6YSaKHezIjDDsZogunjcRwF"
"pN2UXVvGWLqgOh2MqgIo6Kqbfnj/ImvdmoHBGnNmD7No8UyOPmYRpTjEOy9bNu9ldO8keVEw"
"6jQqCpkxt4/ORJNOYhEVUBWoVKBcicmSHFXupcm67ew5g/xLAuD3vvr4xLcvPAnxnsA7uvUO"
"O8faaK0IjWZgIGZOf42pesKSRYNeGWn0nowFxlRKMPP1bzrhOFFiPnrh7zNrRh86KBBywLNk"
"yXycMxR5wM/vf5Sv/v01nPTmM/AHNzM8NIKuzCaIS1SaW7DSIlDCm//bb/Kz678pA4NDtJP8"
"YuB//acBADAwNJwoXBz4gvmz59LxilbqiLRj0AQY20DPUDjnRJeHBrz3Gi52cCYpwfgPr7t7"
"y513PMTr3jzKYC1i5qyYBfNLGKOo11NG93aZmsp45JHd3P2zh5h/xZdpPfQtyKYISyOMzJpF"
"vr+BL8qYeady5Xnv5sZvvs5fdekf4KUUvO+MVWdffduj//KyA+C9P32i7f54y3cviJTtoLAE"
"3lMrD6MGZxPbNkFrL973juw+zxl47Xu+mGb2gij8s2tEzFUNf0scR4EKopBr/uHrjAzXWLZ8"
"DqefvppSKeSxx7azefNudu8a5+DBKYLAYMIS4dHnELW3UrETEMaEC09DjaxGqovw3nNS9rCc"
"8ZXfI2s367Wy1Vef8eiz2vGCATjmou+E133gjE9t3LjpoiTLGD7lI162X4/KpxCfE5gyw4uX"
"YLI6+VSCzQXxBc089lvqlQXJLx5Y0G63TvvXm3+8fOu+mV8vvNbOWowo4qhMp2UZHZ3EaMW+"
"vXVsoSiXKozbSfK8oFyqUIrLhMESpl+DniHJozdRmisYU/NBWS+0Revb+Y3nfw78x03Svkve"
"ft0z3kCfd2XooosuUo2Fp16qlZwVGlnxgVOWhF4rUUqj8F4n+6TceYIHHt7Aj+4dpWwsKxeU"
"eed5Z8HM1Sxf8xa27zmAswVaKz84PCwf/OjHJlefsGbiT3/7nGUzZg4zMjJIf3+VVcctQ2vF"
"ls27abW6TE01mJiYYvueSfZ+442w7vNMBCNkXVACJoaS9dh//jBmsEQ0Zz7epkjRxWctT9EW"
"73LIJvfm9T23xg33u/LBBzrPCcAHrrq5X5ysNJr3BEo+HAaaMBAvgZJZ/SXeunKmr5RiiaKQ"
"aq2fBx5az6mv+y/0lUOKPCfpprz/d97L/olxfnDDzUTlMlorjDGECg9OlFYMDQ/6cimSUikg"
"igKWLpmFEmHn7nGKwtFNMrqdlE43Zc94h2q2jz86azXn/+opCBClB9B7HyU84W2o6ix82kJs"
"is/b+KzlxXbFucyrvCnF1BPkifpU37vuuexZATjviu8tiFX5kjCQM8LAzA8jHYaBItTiTWRE"
"a3wUBzI02Mepy+ayfLCCEuH9H/pDfnTDjZgwxDlHqz4BKFpJlygMiSpVonKJgf4BBkKo1arE"
"5TJhKcYYTRgqjNYMDfehRFFvdCjygm6S0253SJKcNHfUG022b9nEz/9sFUuPfy1SGkbKI6CC"
"3muxK/C2i2RNfNZAigTrM1TRwY49SJF4xgNfXnjuPd1nxIB3/dW1S6yNLtEi79aBRgfKmzCQ"
"MDTEkSEMtUSliHJfScrliDBS3DNaZ/3BNnMrEdt37aE6MEhc7sNZizjH1OQ45VKJNOniky5a"
"K5zNyHNoNls0mw2c94h4RAlaKaIwwIsjSTNs7gjDmDAKyXJLbhVKFD6sUJ/1GuibjYT9YPoQ"
"3au0eZshovDOIjbHi0Y7g3cOdIDojKGWfAS44jAA77vyx8c45+4wgR6yoryOSxKXAinHoe8r"
"BxJGhjAQgtAQhgqtQXlHOTSEccxHfut8xkdHqZQj9u1+kqKwVCsV5sydQ73Ze4vTShEFmsBZ"
"RAxp0qXdatNNExrNBgsWzOetZ72D5UetZPHixYyO7mbD+vVc861/YvLgJOVKhVXHrmai1WFg"
"aAhvPWJKEFQgrOJNuWeM7fYyRTaFIEaswjsNNgPRiBK88W8Crjh8UvI6/FFQ6R/K4yphX02C"
"OCYII0wQ9Gr6Iigt0yVvQSmFiKAFfFGwa/NmBKEoHMuWLmP5smWU44hdO3YQRBFGa4IwwOgA"
"haMoLM1mi8n6FFFc4rN//pd889vf59fOejsjwzNo1Ov09w/z1redzW233cPlX7iSKCpxx89u"
"pxxHhCbESwAqngahClE/RAO9ti4jOsTpEugYdAkvCpkGQPCz91y1tmwA3vu/7/tTrfXCZjf1"
"SoE3Sqaz2cihxKvqGa1VLwGrDWg/3S+C1oYgMCRJwo4dO7HW0m03e3XDKCQwASYICUKDiJCl"
"KZNTU5x8yml88Yt/zQ033cz555/L6OgoSinCwJAkKUEYsGzZUVx44Sf5l+9dz+WXXcatt/+U"
"OYtXINqglAYd4k0JiQYAwXuL0i1QIWJszxCbIzbBKzN9D0NVKKsPXHV/4Bxn99aXFZSIkl50"
"FAElgogczkCLUmglKOj1I/SFPSLJNCtEZPp5AQ+BMaggIAxMr2JTFHTaXdYcfyKf//wVfObi"
"S7ng9/87WVaw9oRTWLBgKSuPWcMJ605l7txFbNq4gXPOfhs3/PBHfObiSxkeGmGq0UT1JgSo"
"3mSnPX24T4XTfxGiAnzWhHYX7/GCr1jRVTOZqIFA2zl5UeCd93Jo9tMAiICi11AI6ml9CigF"
"GjNttNGGp3/tIPSCsmiF0QZlAkBhC0tftcoX/uqLXPzZi7nhRz/kV05/I61mg0cfeZAsabNr"
"u8ejqQ4Ms3DpSpYqxUWf+QRjY+P84z9ew7ozX98rtfheKhxnIW/1tLpedhilgbCXois60BiH"
"VhPpjwWRyBjpU87mM61zg2mSeSUiAgg9g56+Sx4y/JCHASKjUNNtESEIDlWGnr67erQo1HR5"
"23tHp9vlrLf/OnfceRff/eY1LF66nLH9+9m2dQPNyQMocYh4vM8YG93B7u1b6Ha6zJ49n6u/"
"9hUmGi0+95lP93YWl+Ndhtg2ZHXIppC81ftSRfQ0Owy+sRs/NYZvtXo1C0dgLSVVeLfIe6K8"
"cE87V/ojHQ+eOnjKdCHmacYrpQhDcxiQwyN535sDgnO9T2e63Q6/dtbb+Jsrv8jMuXNJ0oT9"
"e3eS24SgPMDA4GwqA4uolofwytOqH+TA/j1Uav1YW/Ctf7qGdetOohRMR/aii6QtSKaQZAps"
"+gwK+84BZPeD0G7gmnVcVoDDOCuxss6vLJzXzrunee7Q1T/jp5++aunt14fu8b63hwdB2GPA"
"0zDw3j2FpxfyPGfdyacyMTbO/r37qFSrdFpNut0WWodEtRmM1tvsbyom8oK4b5AgjmhOTWCL"
"gv6hEa79/rcZGJzJzBkDeNuFooXPm0hWh6LbWxJMp+PTBuy+F9eawrfqJI0OWafAgyjvIoX4"
"pbaw4MHj8Ye81kur9fr9obafztBN1yH89P2Hlo16ank8gwG9JB1ZnpMkKcetWs2+sf0UziKi"
"aDebmDBAByFJdxxXGJR1pO0maI21FhVonC0ItKHRaNCsT1GJI7AZvsjBpXhX4H3x1Nw7B2D0"
"HpjcC80JimaT/XVHkjhwYK1oA8zLnTtsFIeMmrbMTbed9zgP3gGuV/zySK9vGgxr7TR4TzMe"
"KKztVXF8Qpb2tjZjQrx4yn1VxvfvRZRGvOC8BlK8q6OCkEAFFMqA8zjXo7V3HucKRHIoMpCg"
"t9494FKkyCA5gK/vQg7uxjXHcI1JDjZzxtowqDW28OAQ5ZzUrJ327LSnvfe4Q2033Z6+Ou+w"
"rgeGm77PAc45sjTDWnuY8q4Alyk69Zy0mdHtpqRZxr69+yiXK3jrKEUlorhMkWckSRfjIQwE"
"bXKqlT5smhCX+8jzDFEwPDwDby0DAwM06uOooo7KxpHOKNLaiTS2QXsn0hlDTY7i6weQxkFa"
"jQ5jLc9EqrwJA5wF65w23rlBb13PWHjKo47DQcv5Xo3TWd8rwDqPsx5npj2PxzlHmqa98zaA"
"9sw4LWXx64WofxPtcc2WG0Im1htuv+2n/Ob570KbgPrUJP3DM2g1JjDGkDbrmLiC0Yqi3cLh"
"IMspl2sMj8xm/MAoi5Ytw9ucpLGfA2nvfTiOY4Ig7MUfW8DUKIzvwR7c75OJBrsmrOyYKJi3"
"Yq441LjP/c3elG81VsQforl3vYDmDnt7mgHWY3Wv3zqHtVBYhVYKKx47/VyW5zjn8Lkw99yD"
"vOP8mZTKY7SKFsrO4ejVZX7yrVGe/NcOadrl6KOPYcvmJzjm2LW0GlNMjo0ShDFGgwkNtvDk"
"7S6lUoXhmXMosow9u3Zy+RV/46f2PCF7HnqYxPThJaBcihmqlYlCjW1OQv0A+cQ4rjkp4w1L"
"26s9MxbPua46b/D7Bw90H1v56Yf3ABjr0Bym/iHK9652Op3VY8C0112PBdZ6nPFYBOueigHO"
"eaIVUxz/FiEsPcmAPo33L/4Of3zPDEpxmeVnROSP9nPdv13LhRd+gne98xwOju1n7rxFxHGZ"
"yYl9JJ0mvu0Q0dQGhxkcmoUo2LZ1Ayeecpo/cd1r5aHvX/7z7o4DzYbvCAiVcikb6i91aiVJ"
"ojxLpN0cr8XBtnC4f0t5wez7zr7k3gY0gK3PCNIG739unazpBRbvvQPnEe/wziHOeW+dEufB"
"OsE5sNNl/8IKWoOTQwFPwEG0rEFlIGdF+Q9ZXD4TL5bzFv9PvvbgZ0n9FGkl4Z++fjVrVq3i"
"Ly+/gk9+4uNUqzXmLVzMwOAwSbeL866X7IhLpEmH/bv3MGveIn/pxRfLk5sfv++Cv/j6c35s"
"/ZTsfNb/qJ986tQPFWnn/CLv3JV2Wnu67UbSbTZot+rSaU7RbtSlVZ+kUa9Tn5r0jYlJ6hOT"
"TB2cYGp8gonxSdqdLoMzZ6CUIU27oC3dxCHJXFQxSG4TSm4u3Y6hmxR8+H98ips37aSyeClH"
"LV/Ox//kk4RRxOYN6xndvQOPIwoC2q0mmzc8ypNbNrDu5NP9V666Strtzv5N6ze8/fkb/8vl"
"qU37oovUGc0VQ1rJXGPcwlD51Ub7dYGRY0MtS0OjCbSgg17O3wRCYBRBoAgCTWtynB987avs"
"2rqJyuop1n1Ee287LK2dKp99w/c46UsjDJUjRBRvqF3OcLwCL5A0m8yOAmbPmcett9/Obbff"
"4U0Yy9TkJFPjB1i8ZDG//o53sHbtCTyx/qHdO7esP+Vzn/vcEb/3eWkAPIec8eGL+mb1Dy42"
"Ss3GuZlRZGaJlkGl6DeBhEbpqK+/5lsHx6N9W3eWdiz56xPmLq4tbHXafmx/ytCsmmivaO/n"
"8dNrf/FtkWhCKdmLBGOuXZdy1v3kcavXrh0aHprR7aaI1pRKMSLCrp079mx49KFbPnvxp94D"
"0xW3/9sAvFBZ+3n6BfM3Rpn3Qi/AFkV+CZm9/IFL6BzpmY9fdNHcvrCypFyprAhMcFSRF1va"
"jan1SdJ88rLLLhv7z5rrf7qsvTRa9v96Dq/IK/KKHFH+HY2QgAtRqzHlAAAAAElFTkSuQmCC")
getaddPictureData = addPicture.GetData
getaddPictureImage = addPicture.GetImage
getaddPictureBitmap = addPicture.GetBitmap
|
gpl-3.0
| -328,173,910,101,158,900 | 72.00625 | 79 | 0.870559 | false |
anselmobd/fo2
|
src/logistica/migrations/0038_nfentradaagator_nfentradatussor.py
|
1
|
1086
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-06-17 23:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logistica', '0037_nfentrada_empresa'),
]
operations = [
migrations.CreateModel(
name='NfEntradaAgator',
fields=[
],
options={
'verbose_name': 'Nota fiscal de entrada Agator',
'verbose_name_plural': 'Notas fiscais de entrada Agator',
'proxy': True,
'indexes': [],
},
bases=('logistica.nfentrada',),
),
migrations.CreateModel(
name='NfEntradaTussor',
fields=[
],
options={
'verbose_name': 'Nota fiscal de entrada Tussor',
'verbose_name_plural': 'Notas fiscais de entrada Tussor',
'proxy': True,
'indexes': [],
},
bases=('logistica.nfentrada',),
),
]
|
mit
| -261,893,817,245,230,100 | 26.846154 | 73 | 0.490792 | false |
skalldri/gmusic-sync
|
thumbs-up.py
|
1
|
5121
|
#!/usr/bin/env python2
import logging
import argparse
import time
import os
import ConfigParser
from gmusicapi import Mobileclient
from gmusicapi import Webclient
from gmusicapi import exceptions
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('gmusic-sync')
def main():
log.setLevel(logging.INFO)
logging.getLogger('gmusicapi').setLevel(logging.INFO)
cred_path = os.path.join(os.path.expanduser('~'), '.gmusic-sync')
if not os.path.isfile(cred_path):
raise NoCredentialException(
'No username/password was specified. No config file could '
'be found either. Try creating %s and specifying your '
'username/password there. Make sure to chmod 600.'
% cred_path)
if not oct(os.stat(cred_path)[os.path.stat.ST_MODE]).endswith('00'):
raise NoCredentialException(
'Config file is not protected. Please run: '
'chmod 600 %s' % cred_path)
config = ConfigParser.ConfigParser()
config.read(cred_path)
src_user = config.get('src','username')
src_pass = config.get('src','password')
src_device = config.get('src','deviceid')
dst_user = config.get('dst','username')
dst_pass = config.get('dst','password')
dst_device = config.get('dst','deviceid')
if not src_user or not src_pass or not dst_user or not dst_pass:
raise NoCredentialException(
'No username/password could be read from config file'
': %s' % cred_path)
if not src_device or not dst_device:
raise NoCredentialException(
'No deviceId could be read from config file'
': %s' % cred_path)
parser = argparse.ArgumentParser(description='gmusic-sync', add_help=False)
parser.add_argument('-d', '--dst', help='Perform operation on the dst account', action='store_true', dest='dst')
parser.add_argument('-l', '--list', help='List playlists on the src account', action='store_true', dest='lst')
parser.add_argument('-p', '--playlist', help='Playlist ID from src account to transfer', dest='playlist')
args = parser.parse_args()
api = Mobileclient()
if args.dst:
api.login(dst_user, dst_pass, dst_device)
else:
api.login(src_user, src_pass, src_device)
playlists = api.get_all_playlists()
if args.lst:
for playlist in playlists:
print playlist['name'] + ' (' + playlist['id'] + ') '
exit()
library = api.get_all_songs()
if args.playlist is None:
print 'Error: no playlist selected'
all_playlist_entries = api.get_all_user_playlist_contents()
selected_playlist_entries = []
for entry in all_playlist_entries:
if entry['id'] == args.playlist:
selected_playlist_entries = entry['tracks']
playlist_tracks = []
for ptrack in selected_playlist_entries:
track_found = False
for track in library:
if ptrack['trackId'] == track['id']:
playlist_tracks.append(track)
track_found = True
break
try:
if ptrack['trackId'] == track['storeId']:
playlist_tracks.append(track)
track_found = True
break
except:
pass
if not track_found:
print 'ERROR: could not find playlist entry ' + str(ptrack)
api.add_aa_track(ptrack['trackId'])
if len(playlist_tracks) != len(selected_playlist_entries):
print 'Error: could not locate all playlist tracks in src library'
exit()
failed_tracks = []
playlist_tracks_reversed = []
for track in playlist_tracks:
playlist_tracks_reversed.insert(0, track)
for track in playlist_tracks_reversed:
track['rating'] = '5'
res = api.change_song_metadata(track)
if len(res) != 1:
raise Exception('Could not change track metadata!')
time.sleep(1)
def heuristic_search(library, track, strict):
print 'Heuristics Search Start for ' + track['title'] + ' - ' + track['artist'] + ' (' + track['album'] + ')'
try:
for test_track in library:
if strict:
if test_track['album'] == track['album'] and test_track['title'] == track['title'] and test_track['artist'] == track['artist']:
print 'Strict Heuristic Match! ' + test_track['title'] + ' - ' + test_track['artist'] + ' (' + test_track['album'] + ')'
return test_track
else:
if test_track['title'] == track['title'] and test_track['artist'] == track['artist']:
print 'Weak Heuristic Match! ' + test_track['title'] + ' - ' + test_track['artist'] + ' (' + test_track['album'] + ')'
return test_track
except:
print 'Error occured performing heuristic search. Assuming track is not already in library'
return None
if __name__ == '__main__':
main()
|
gpl-2.0
| 7,836,864,898,315,428,000 | 34.5625 | 143 | 0.58348 | false |
JonathanTaquet/Oe2sSLE
|
RIFF/smpl.py
|
1
|
5650
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015-2017 Jonathan Taquet
This file is part of Oe2sSLE (Open e2sSample.all Library Editor).
Oe2sSLE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Oe2sSLE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Oe2sSLE. If not, see <http://www.gnu.org/licenses/>
"""
import struct
import warnings
import RIFF
class RIFF_smpl(RIFF.ChunkData):
_dataMinFmt = '<9I'
_dataMinSize = struct.calcsize(_dataMinFmt)
class LoopData:
_dataFmt = '<6I'
_dataSize = struct.calcsize(_dataFmt)
"""
smpl_loop_types:
0 : 'Forward',
1 : 'Forward/Backward',
2 : 'Backward',
3-31 : 'Reserved',
>31 : 'Specific'
start, end: byte offsets
"""
def __init__(self, smpl_master, loop_num):
self.__dict__['fields']=dict()
self.__dict__['smpl'] = smpl_master
offset=smpl_master._dataMinSize+loop_num*self._dataSize
self.fields['identifier']=(offset, '<I')
offset+=struct.calcsize('I')
self.fields['type']=(offset, '<I')
offset+=struct.calcsize('I')
self.fields['start']=(offset, '<I')
offset+=struct.calcsize('I')
self.fields['end']=(offset, '<I')
offset+=struct.calcsize('I')
self.fields['fraction']=(offset, '<I')
offset+=struct.calcsize('I')
self.fields['playCount']=(offset, '<I')
offset+=struct.calcsize('I')
def __getattr__(self, name):
try:
loc, fmt = self.fields[name]
except:
raise AttributeError
else:
size = struct.calcsize(fmt)
unpacked = struct.unpack(fmt, self.smpl.rawdata[loc:loc+size])
if len(unpacked) == 1:
return unpacked[0]
else:
return unpacked
def __setattr__(self, name, value):
try:
loc, fmt = self.fields[name]
except:
self.__dict__[name] = value
else:
size = struct.calcsize(fmt)
self.__dict__['smpl'].rawdata[loc:loc+size] = struct.pack(fmt, value)
def __init__(self, file=None, chunkHeader=None):
self.__dict__['fields'] = dict()
self.__dict__['rawdata'] = bytearray()
offset = 0
self.fields['manufacturer']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['product']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['samplePeriod']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['MIDIUnityNote']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['MIDIPitchFraction']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['SMPTEFormat']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['SMPTEOffset']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['numSampleLoops']=(offset, '<I')
offset += struct.calcsize('I')
self.fields['numAdditionalBytes']=(offset, '<I')
offset += struct.calcsize('I')
self.__dict__['loops'] = []
if file:
self.read(file,chunkHeader)
else:
self.reset()
# def __len__(self):
# return len(self.rawdata)
def read(self, file, chunkHeader):
if chunkHeader.id != b'smpl':
raise TypeError("'smpl' chunk expected")
self.rawdata[:] = file.read(chunkHeader.size)
if len(self.rawdata) != chunkHeader.size:
raise EOFError('Unexpected End Of File')
for loopNum in range(self.numSampleLoops):
self.loops.append(self.LoopData(self,loopNum))
def reset(self):
self.rawdata[:] = bytes(RIFF_smpl._dataMinSize)
self.MIDIUnityNote = 60 # default to Middle C
self.loops[:] = []
# def write(self, file):
# file.write(self.rawdata)
def __getattr__(self, name):
try:
loc, fmt = self.fields[name]
except:
raise AttributeError
else:
size = struct.calcsize(fmt)
unpacked = struct.unpack(fmt, self.rawdata[loc:loc+size])
if len(unpacked) == 1:
return unpacked[0]
else:
return unpacked
def __setattr__(self, name, value):
try:
loc, fmt = self.fields[name]
except:
self.__dict__[name] = value
else:
size = struct.calcsize(fmt)
self.__dict__['rawdata'][loc:loc+size] = struct.pack(fmt, value)
def add_loop(self):
self.rawdata[len(self.rawdata):]=bytes(self.LoopData._dataSize)
self.loops.append(self.LoopData(self,len(self.loops)))
self.numSampleLoops = len(self.loops)
return self.loops[-1]
|
gpl-2.0
| -7,203,342,894,235,420,000 | 32.662577 | 85 | 0.529735 | false |
hamogu/filili
|
filili/shmodelshelper.py
|
1
|
7184
|
"""
Module with helpers to manage Sherpa model parameters
"""
import json
import os
from copy import copy
from sherpa.astro.ui import get_model_component, get_model
from sherpa.utils.err import IdentifierErr
def save_pars(filename, modcomps=[], clobber=False):
"""
Save Sherpa model parameter attributes to an ASCII file
`filename` ASCII file name
`modcomps` list of model components (strings or objects) to save
`clobber` clobber the file if it exists
:author: Brian Refsdal
Example:
from sherpa.astro.ui import *
from save_pars import save_pars, load_pars
set_model(gauss1d.g1 + gauss1d.g2)
... set up parameters, fit
save_pars('mypars.out', [g1, g2])
or
save_pars('mypars.out', list_model_components(), clobber=True)
load_pars('mypars.out', [g1, g2])
"""
if not isinstance(filename, basestring):
raise TypeError("filename '%s' is not a string" % str(filename))
clobber = bool(clobber)
if os.path.isfile(filename) and not clobber:
raise ValueError("file '%s' exists and clobber is not set" %
str(filename))
saved = {}
for comp in modcomps:
for par in get_model_component(comp).pars:
for elem in ["val", "min", "max"]:
key = par.fullname + "." + elem
saved[key] = getattr(par, elem)
elem = "frozen"
key = par.fullname + "." + elem
saved[key] = int(getattr(par, elem))
elem = "link"
key = par.fullname + "." + elem
attr = getattr(par, elem)
if attr:
saved[key] = str(attr.fullname)
fd = file(filename, 'w')
fd.write(json.dumps(saved))
fd.close()
def set_parameter_from_dict(par, d, name='name', strict=True):
'''Set Sherpa parameter from a dictionary.
Parameters
----------
par : Sherpa parameter
d : dict
name : string
Can be 'name' (if the dictionary keys do not contain the model name)
or 'fullname' (for dictionary keys like ``mymodel.pos.val``)
strict : bool
If ``True`` this will raise a `KeyError` if any key in the dictionary
starts with the parameter name but there is no corresponding parameter
property.
Example
-------
>>> from sherpa.models import Polynom1D
>>> mdl = Polynom1D('mdl')
>>> vals = {'c0.val': 1.2, 'c0.min': 0.7, 'c2.frozen': False}
>>> set_parameter_from_dict(mdl.c0, vals)
>>> print mdl
mdl
Param Type Value Min Max Units
----- ---- ----- --- --- -----
mdl.c0 thawed 1.2 0.7 3.40282e+38
mdl.c1 frozen 0 -3.40282e+38 3.40282e+38
mdl.c2 frozen 0 -3.40282e+38 3.40282e+38
mdl.c3 frozen 0 -3.40282e+38 3.40282e+38
mdl.c4 frozen 0 -3.40282e+38 3.40282e+38
mdl.c5 frozen 0 -3.40282e+38 3.40282e+38
mdl.c6 frozen 0 -3.40282e+38 3.40282e+38
mdl.c7 frozen 0 -3.40282e+38 3.40282e+38
mdl.c8 frozen 0 -3.40282e+38 3.40282e+38
mdl.offset frozen 0 -3.40282e+38 3.40282e+38
'''
# dicts are mutable. Make a local copy, so the dict on the caller is unmodified.
d = copy(d)
for elem in ["min", "max", "val"]:
key = getattr(par, name) + "." + elem
if key in d:
setattr(par, elem, d.pop(key))
elem = "frozen"
key = getattr(par, name) + "." + elem
if key in d:
setattr(par, elem, bool(int(d.pop(key))))
elem = "link"
key = getattr(par, name) + "." + elem
attr = str(d.pop(key, ''))
if attr:
mdl, param = attr.split('.')
param = getattr(get_model_component(mdl), param)
setattr(par, elem, param)
if strict:
for k in d.keys():
if k.startswith(getattr(par, name)):
raise KeyError('The following key is not understood: {0} - Did you mean {1}?'.format(k, k + '.val'))
def load_pars(filename, modcomps=[]):
"""
Load Sherpa model parameter attributes from an ASCII file
and set the input model components with the parameter attributes.
`filename` ASCII file name
`modcomps` list of model components (strings or objects) to load
:author: Brian Refsdal
See `save_pars` for an example.
"""
if not isinstance(filename, basestring):
raise TypeError("filename '%s' is not a string" % str(filename))
if not os.path.isfile(filename):
raise IOError("file '%s' does not exist" % str(filename))
fd = open(filename, 'r')
saved = json.loads(fd.readline().strip())
fd.close()
for comp in modcomps:
for par in get_model_component(comp).pars:
set_parameter_from_dict(par, saved, name='fullname')
def copy_pars(oldcomp, newcomp, sametype=True):
"""copy parameters from one component to an onther
Both components need to be of the same type, e.g. both are gaus1d models
This routine then copies `val`, `max`, `min`, `frozen` and `link` values.
Example:
>>> from sherpa.astro.ui import *
>>> set_model(gauss1d.g1 + gauss1d.g2)
>>> g1.pos.min = 0.
>>> copy_pars(g1, g2)
Parameters
----------
:param oldcomp: Sherpa model component
component with original values
:param newcomp: Sherpa model component
values of this component will be set
TBD: replace get_model_component(oldcomp).pars with some way that iterates over names, so that parameters can be copied between two line types, even if pos is once the first and once the second parameter.
"""
if sametype:
if not (type(oldcomp) == type(newcomp)):
raise TypeError('Old and new model component must be of same type')
#
for parold, parnew in zip(oldcomp.pars, newcomp.pars):
# min cannot be above max.
# set to -+inf to avoid problems with previously set pars
setattr(parnew, "min", getattr(parnew, "hard_min"))
setattr(parnew, "max", getattr(parnew, "hard_max"))
for elem in ["val", "min", "max", "frozen", "link"]:
setattr(parnew, elem, getattr(parold, elem))
def get_model_parts(id = None):
'''obtain a list of strings for sherpa models
Iterate through all components which are part of the Sherpa model
and return their identifiers. Ignore all composite models.
Example
-------
>>> from sherpa.ui import *
>>> load_arrays(1, [1,2,3], [1,2,3]) # Set some dummy data
>>> set_model('const1d.c + gauss1d.lineg1 + gauss1d.lineg2 + gauss1d.lineg3')
>>> show_model() # doctest: +SKIP
Model: 1
(((const1d.c + gauss1d.lineg1) + gauss1d.lineg2) + gauss1d.lineg3)
...
>>> get_model_parts() # doctest: +SKIP
{'c', 'lineg1', 'lineg2', 'lineg3'}
'''
try:
return set([par.modelname for par in get_model(id).pars])
except IdentifierErr:
return set([])
|
mit
| 5,445,088,446,991,960,000 | 31.506787 | 208 | 0.577255 | false |
andrewyoung1991/supriya
|
supriya/tools/pendingugentools/VOsc.py
|
1
|
3690
|
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.PureUGen import PureUGen
class VOsc(PureUGen):
r'''
::
>>> vosc = ugentools.VOsc.ar(
... bufpos=bufpos,
... frequency=440,
... phase=0,
... )
>>> vosc
VOsc.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = None
__slots__ = ()
_ordered_input_names = (
'bufpos',
'frequency',
'phase',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
bufpos=None,
frequency=440,
phase=0,
):
PureUGen.__init__(
self,
calculation_rate=calculation_rate,
bufpos=bufpos,
frequency=frequency,
phase=phase,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
bufpos=None,
frequency=440,
phase=0,
):
r'''Constructs an audio-rate VOsc.
::
>>> vosc = ugentools.VOsc.ar(
... bufpos=bufpos,
... frequency=440,
... phase=0,
... )
>>> vosc
VOsc.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
bufpos=bufpos,
frequency=frequency,
phase=phase,
)
return ugen
@classmethod
def kr(
cls,
bufpos=None,
frequency=440,
phase=0,
):
r'''Constructs a control-rate VOsc.
::
>>> vosc = ugentools.VOsc.kr(
... bufpos=bufpos,
... frequency=440,
... phase=0,
... )
>>> vosc
VOsc.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
bufpos=bufpos,
frequency=frequency,
phase=phase,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def bufpos(self):
r'''Gets `bufpos` input of VOsc.
::
>>> vosc = ugentools.VOsc.ar(
... bufpos=bufpos,
... frequency=440,
... phase=0,
... )
>>> vosc.bufpos
Returns ugen input.
'''
index = self._ordered_input_names.index('bufpos')
return self._inputs[index]
@property
def frequency(self):
r'''Gets `frequency` input of VOsc.
::
>>> vosc = ugentools.VOsc.ar(
... bufpos=bufpos,
... frequency=440,
... phase=0,
... )
>>> vosc.frequency
440.0
Returns ugen input.
'''
index = self._ordered_input_names.index('frequency')
return self._inputs[index]
@property
def phase(self):
r'''Gets `phase` input of VOsc.
::
>>> vosc = ugentools.VOsc.ar(
... bufpos=bufpos,
... frequency=440,
... phase=0,
... )
>>> vosc.phase
0.0
Returns ugen input.
'''
index = self._ordered_input_names.index('phase')
return self._inputs[index]
|
mit
| 3,098,371,534,855,957,000 | 20.584795 | 64 | 0.441192 | false |
ppizarror/Hero-of-Antair
|
data/doc/mapeditor/_creator.py
|
1
|
2172
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Creador del fichero _textures.txt y _images.txt
#Pablo Pizarro, 2014
#Importación de librerias
import os
import sys
reload(sys)
sys.setdefaultencoding('UTF8') #@UndefinedVariable
#Definicion de constantes
ACTUAL_FOLDER = str(os.getcwd()).replace("\\","/")+"/"
DELETE = ["_16","_32"]
FILE_TEXTURES_NAME="_textures.txt"
FILE_PASTE_NAME="_paste.txt"
FILE_PASTE_COM = "#Items\n"
FOLDER_ITEM = "DATA_IMAGES_ITEMS"
RESTRICTED = ["vacio","no_lw","no_rw","no_casco","no_pantalon","no_chaleco","no_botas"]
VALID_FILE = "gif"
#Consulto los archivos del directorio actual
archivos = os.listdir(ACTUAL_FOLDER)
#borro los string en DELETE
for k in range(len(archivos)):
archivos[k]= str(archivos[k]).replace(DELETE[0],"").replace(DELETE[1],"")
#archivos validos
archivos_validos = []
#recorro los archivos y voy agregando a archivos_validos
for k in range(len(archivos)):
if VALID_FILE in archivos[k] and archivos[k].replace(".gif","") not in archivos_validos: archivos_validos.append(archivos[k].replace(".gif",""))
#ordeno la matriz
archivos_validos.sort()
#elimino los archivos restringidos
for r in RESTRICTED:
try: archivos_validos.remove(r)
except: pass
#genero un archivo de texto
archivo = open(FILE_TEXTURES_NAME,"w")
for i in archivos_validos:
archivo.write(i+"\n")
archivo.close() #cierro el archivo
#Consulto nuevamente los archivos del directorio actual
archivos = os.listdir(ACTUAL_FOLDER)
#matriz de links para self.images en HOA
links = []
#recorro los archivos y verifico si es una imagen
for fil in archivos:
if VALID_FILE in fil:
links.append("\t\t\""+fil.replace("."+VALID_FILE,"")+"\":PhotoImage(file="+FOLDER_ITEM+"+\""+fil+"\"),\\")
#agrego imagenes no validas
links.append("\t\t\"vacio_16\":PhotoImage(data=\"R0lGODlhEAAQAIAAAP///wAAACH5BAEAAAEALAAAAAAQABAAAAIOjI+py+0Po5y02ouzPgUAOw==\"),\\")
#ordeno la matriz
links.sort()
#inserto comentario
links.insert(0, FILE_PASTE_COM)
archivo2 = open(FILE_PASTE_NAME,"w")
for i in links:
archivo2.write(i+"\n")
archivo2.close()
|
gpl-2.0
| 8,161,869,249,285,306,000 | 26.973333 | 148 | 0.681253 | false |
dunkhong/grr
|
grr/server/grr_response_server/gui/api_integration_tests/flow_test.py
|
1
|
12185
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for API client and flows-related API calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import threading
import time
import zipfile
from absl import app
from future.builtins import range
from grr_api_client import errors as grr_api_errors
from grr_api_client import utils as grr_api_utils
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server.databases import db
from grr_response_server.flows.general import processes
from grr_response_server.gui import api_integration_test_lib
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class ApiClientLibFlowTest(api_integration_test_lib.ApiIntegrationTest):
"""Tests flows-related part of GRR Python API client library."""
def testSearchWithNoClients(self):
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testSearchClientsWith2Clients(self):
client_ids = sorted(self.SetupClients(2))
clients = sorted(
self.api.SearchClients(query="."), key=lambda c: c.client_id)
self.assertLen(clients, 2)
for i in range(2):
self.assertEqual(clients[i].client_id, client_ids[i])
self.assertEqual(clients[i].data.urn, "aff4:/%s" % client_ids[i])
def testListFlowsFromClientRef(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
flows = list(self.api.Client(client_id=client_id).ListFlows())
self.assertLen(flows, 1)
self.assertEqual(flows[0].client_id, client_id)
self.assertEqual(flows[0].flow_id, flow_id)
self.assertEqual(flows[0].data.flow_id, flow_id)
def testListFlowsFromClientObject(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
client = self.api.Client(client_id=client_id).Get()
flows = list(client.ListFlows())
self.assertLen(flows, 1)
self.assertEqual(flows[0].client_id, client_id)
self.assertEqual(flows[0].flow_id, flow_id)
self.assertEqual(flows[0].data.flow_id, flow_id)
def testCreateFlowWithUnicodeArguments(self):
unicode_str = "🐊 🐢 🦎 🐍"
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex=unicode_str, fetch_binaries=True)
client_ref = self.api.Client(client_id=client_id)
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
got_flow = client_ref.Flow(flow_id=result_flow.flow_id).Get()
self.assertEqual(got_flow.args.filename_regex, unicode_str)
def testCreateFlowFromClientRef(self):
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertEmpty(flows)
client_ref = self.api.Client(client_id=client_id)
client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertLen(flows, 1)
self.assertEqual(flows[0].args, args)
def testCreateFlowFromClientObject(self):
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertEmpty(flows)
client = self.api.Client(client_id=client_id).Get()
client.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertLen(flows, 1)
self.assertEqual(flows[0].args, args)
def testRunInterrogateFlow(self):
client_id = self.SetupClient(0)
client_ref = self.api.Client(client_id=client_id)
result_flow = client_ref.Interrogate()
self.assertEqual(result_flow.data.client_id, client_id)
self.assertEqual(result_flow.data.name, "Interrogate")
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertLen(flows, 1)
self.assertEqual(flows[0].flow_class_name, "Interrogate")
def testListResultsForListProcessesFlow(self):
process = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=1333718907167083,
RSS_size=42)
client_id = self.SetupClient(0)
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(processes.ListProcesses),
client_id=client_id,
client_mock=action_mocks.ListProcessesMock([process]),
token=self.token)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id)
results = list(result_flow.ListResults())
self.assertLen(results, 1)
self.assertEqual(process.AsPrimitiveProto(), results[0].payload)
def testWaitUntilDoneReturnsWhenFlowCompletes(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id).Get()
self.assertEqual(result_flow.data.state, result_flow.data.RUNNING)
def ProcessFlow():
time.sleep(1)
client_mock = action_mocks.ListProcessesMock([])
flow_test_lib.FinishAllFlowsOnClient(client_id, client_mock=client_mock)
t = threading.Thread(target=ProcessFlow)
t.start()
try:
f = result_flow.WaitUntilDone()
self.assertEqual(f.data.state, f.data.TERMINATED)
finally:
t.join()
def testWaitUntilDoneRaisesWhenFlowFails(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id).Get()
def ProcessFlow():
time.sleep(1)
flow_base.TerminateFlow(client_id, flow_id, "")
t = threading.Thread(target=ProcessFlow)
t.start()
try:
with self.assertRaises(grr_api_errors.FlowFailedError):
result_flow.WaitUntilDone()
finally:
t.join()
def testWaitUntilDoneRasiesWhenItTimesOut(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id).Get()
with self.assertRaises(grr_api_errors.PollTimeoutError):
with utils.Stubber(grr_api_utils, "DEFAULT_POLL_TIMEOUT", 1):
result_flow.WaitUntilDone()
def _SetupFlowWithStatEntryResults(self):
client_id = self.SetupClient(0)
# Start a flow. The exact type of the flow doesn't matter:
# we'll add results manually.
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
data_store.REL_DB.WriteFlowResults([
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar1",
pathtype=rdf_paths.PathSpec.PathType.OS))),
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar2",
pathtype=rdf_paths.PathSpec.PathType.OS))),
])
return client_id, flow_id
def testGetFilesArchiveGeneratesCorrectArchive(self):
client_id, flow_id = self._SetupFlowWithStatEntryResults()
blob_size = 1024 * 1024 * 4
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "ab")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, blob_data)
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "cd")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar2"]), blob_refs, blob_data)
zip_stream = io.BytesIO()
self.api.Client(client_id).Flow(flow_id).GetFilesArchive().WriteToStream(
zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
prefix = "%s_flow_ListProcesses_%s" % (client_id, flow_id)
namelist = zip_fd.namelist()
self.assertCountEqual(namelist, [
"%s/MANIFEST" % prefix,
"%s/%s/client_info.yaml" % (prefix, client_id),
"%s/%s/fs/os/foo/bar1" % (prefix, client_id),
"%s/%s/fs/os/foo/bar2" % (prefix, client_id),
])
for info in zip_fd.infolist():
self.assertGreater(info.compress_size, 0)
def testGetFilesArchiveFailsWhenFirstFileBlobIsMissing(self):
client_id, flow_id = self._SetupFlowWithStatEntryResults()
_, blob_refs = vfs_test_lib.GenerateBlobRefs(10, "0")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, [])
zip_stream = io.BytesIO()
with self.assertRaisesRegex(grr_api_errors.UnknownError,
"Could not find one of referenced blobs"):
self.api.Client(client_id).Flow(flow_id).GetFilesArchive().WriteToStream(
zip_stream)
def testGetFilesArchiveDropsStreamingResponsesWhenSecondFileBlobIsMissing(
self):
client_id, flow_id = self._SetupFlowWithStatEntryResults()
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(1024 * 1024 * 4, "abc")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, blob_data[0:2])
zip_stream = io.BytesIO()
timestamp = rdfvalue.RDFDatetime.Now()
self.api.Client(client_id).Flow(flow_id).GetFilesArchive().WriteToStream(
zip_stream)
with self.assertRaises(zipfile.BadZipfile):
zipfile.ZipFile(zip_stream)
# Check that notification was pushed indicating the failure to the user.
pending_notifications = list(self.api.GrrUser().ListPendingNotifications(
timestamp=timestamp.AsMicrosecondsSinceEpoch()))
self.assertLen(pending_notifications, 1)
self.assertEqual(
pending_notifications[0].data.notification_type,
int(rdf_objects.UserNotification.Type
.TYPE_FILE_ARCHIVE_GENERATION_FAILED))
self.assertEqual(pending_notifications[0].data.reference.type,
pending_notifications[0].data.reference.FLOW)
self.assertEqual(pending_notifications[0].data.reference.flow.client_id,
client_id)
self.assertEqual(pending_notifications[0].data.reference.flow.flow_id,
flow_id)
# TODO(user): These unit tests should be moved to a dedicated GrrApi test.
def testClientReprContainsClientId(self):
client_id = self.SetupClient(0)
client_ref = self.api.Client(client_id=client_id)
self.assertIn(client_id, repr(client_ref))
self.assertIn(client_id, repr(client_ref.Get()))
def testFlowReprContainsMetadata(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
flow_ref = self.api.Client(client_id=client_id).Flow(flow_id)
self.assertIn(client_id, repr(flow_ref))
self.assertIn(flow_id, repr(flow_ref))
flow = flow_ref.Get()
self.assertIn(client_id, repr(flow))
self.assertIn(flow_id, repr(flow))
self.assertIn("ListProcesses", repr(flow))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
apache-2.0
| 3,207,579,086,705,398,300 | 35.121662 | 80 | 0.691366 | false |
mrhanky17/irc3
|
examples/async_command.py
|
1
|
1077
|
# -*- coding: utf-8 -*-
from irc3.plugins.command import command
from irc3.compat import asyncio
from irc3.compat import Queue
import irc3
@irc3.plugin
class AsyncCommands(object):
"""Async commands example. This is what it's look like on irc::
<gawel> !get
<gawel> !put item
<irc3> items added to queue
<irc3> item
"""
def __init__(self, bot):
self.bot = bot
self.queue = Queue()
@command
def put(self, mask, target, args):
"""Put items in queue
%%put <words>...
"""
for w in args['<words>']:
self.queue.put_nowait(w)
yield 'items added to queue'
@command
@asyncio.coroutine
def get(self, mask, target, args):
"""Async get items from the queue
%%get
"""
messages = []
message = yield from self.queue.get()
messages.append(message)
while not self.queue.empty():
message = yield from self.queue.get()
messages.append(message)
return messages
|
mit
| 8,905,069,654,674,450,000 | 22.933333 | 67 | 0.558032 | false |
paulpflug/VASPmanager
|
VASPmanager/pbs.py
|
1
|
1256
|
import vaspconfig as vconf
import re
import subprocess as sub
def makescript(path,filename,nodes,ppn,queue,name,email,outpath,memorymultiplikator=1,hours=1,parameters=""):
temp = """\
#!/bin/bash
#
#PBS -l nodes=%d:ppn=%d%s
#PBS -l mem=%dmb
#PBS -l walltime=%d:00:00
#PBS -q %s
#PBS -j oe
#PBS -N %s
#PBS -M %s
#PBS -o %s
#
cd %s
%s
"""
return temp %(nodes,ppn,parameters,nodes*vconf.getnpar(ppn)*2048*memorymultiplikator,hours,queue,name,email,outpath,path,filename)
def qsub(file):
out = sub.Popen(["qsub" , file],stdout=sub.PIPE)
out, err = out.communicate()
jobid = re.search("\d{5,8}",out).group(0)
print "Job: "+jobid+" gestartet"
return jobid
def qcheck(jobdict):
finishedjobs = []
for nr,job in jobdict.iteritems():
out = sub.Popen(["qstat", job],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = out.communicate()
if err != "":
finishedjobs.append(nr)
return finishedjobs
def qrem(jobs):
for job in jobs:
out= sub.Popen(["qdel", job],stdout=sub.PIPE,stderr=sub.PIPE)
out.wait()
def deljobidfromfile(file,jobid):
s=open(file,"r").read()
s=re.sub(str(jobid)+"\n","",s,1)
open(file,"w").write(s)
|
mit
| -531,489,039,031,338,400 | 24.723404 | 134 | 0.609076 | false |
jmpews/torweb
|
app/user/user.py
|
1
|
10595
|
# coding:utf-8
import tornado.websocket
from settings.config import config
from custor.handlers.basehandler import BaseRequestHandler, BaseWebsocketHandler
from custor.decorators import login_required_json, login_required, ppeewwee
from custor.utils import get_cleaned_post_data, get_cleaned_json_data, json_result, get_cleaned_json_data_websocket, TimeUtil
from custor.logger import logger
from db.mysql_model.common import Notification
from db.mysql_model.post import Post, PostReply, CollectPost
from db.mysql_model.user import User, Profile, Follower, ChatMessage
import greenado
class UserProfileHandler(BaseRequestHandler):
"""
user profile
"""
@greenado.groutine
def get(self, user_id, *args, **kwargs):
user = User.get(User.id == user_id)
profile = Profile.get_by_user(user)
posts = Post.select().where(Post.user == user, Post.is_delete == False).limit(10)
postreplys = PostReply.select().where(PostReply.user == user).limit(10)
collectposts = CollectPost.select().where(CollectPost.user == user).limit(10)
who_follow = Follower.select(Follower.follower).where(Follower.user == user)
follow_who = Follower.select(Follower.user).where(Follower.follower == user)
# 是否显示关注
is_follow = True if Follower.is_follow(user, self.current_user) else False
is_online = True if WebsocketChatHandler.is_online(user.username) else False
self.render('user/profile.html',
user=user,
who_follow=who_follow,
follow_who=follow_who,
profile=profile,
posts=posts,
postreplys=postreplys,
is_follow=is_follow,
is_online=is_online,
collectposts=collectposts)
class UserProfileEditHandler(BaseRequestHandler):
"""
user profile edit
"""
@greenado.groutine
@login_required
def get(self, *args, **kwargs):
user = self.current_user
profile = Profile.get_by_user(user)
userinfo = {}
userinfo['username'] = user.username
userinfo['weibo'] = profile.weibo
self.render('user/profile_edit.html', userinfo=userinfo)
@greenado.groutine
@login_required
def post(self, *args, **kwargs):
post_data = get_cleaned_post_data(self, ['weibo',])
user = self.current_user
profile = Profile.get_by_user(user)
profile.weibo = post_data['weibo']
profile.save()
self.write(json_result(0, {'user': user.username}))
class UserAvatarEditHandler(BaseRequestHandler):
"""
editor user avatar
"""
@greenado.groutine
@login_required
def post(self, *args, **kwargs):
user = self.current_user
# 上传的文件
avatar = self.request.files['avatar'][0]
avatar_file_name = user.username + '.' + avatar['filename'].split('.')[-1]
avatar_file = open(config.avatar_upload_path + avatar_file_name, 'wb')
avatar_file.write(avatar['body'])
user.avatar = avatar_file_name
user.save()
self.redirect('/user/edit')
class UserNotificationHandler(BaseRequestHandler):
"""
user message notification
"""
@greenado.groutine
@login_required
def get(self, *args, **kwargs):
user = self.current_user
profile = Profile.get(Profile.user == user)
notifications = Notification.select().where(Notification.user == user)
self.render('user/profile_notification.html',
profile=profile,
notifications=notifications,
)
class UserFollowerHandler(BaseRequestHandler):
"""
user follower
"""
@greenado.groutine
def get(self, user_id, *args, **kwargs):
user = User.get(User.id == user_id)
who_follow = Follower.select(Follower.follower).where(Follower.user == user)
follow_who = Follower.select(Follower.user).where(Follower.follower == user)
profile = Profile.get_by_user(user)
is_follow = Follower.is_follow(user, self.current_user)
self.render('user/profile_follower.html',
user=user,
profile=profile,
who_follow=who_follow,
follow_who=follow_who,
is_follow=is_follow)
class UserOptHandler(BaseRequestHandler):
"""
user operation set
和postreplyopthandelr设计的类似,api模式
"""
@greenado.groutine
@login_required_json(-3, 'login failed.')
def post(self, *args, **kwargs):
json_data = get_cleaned_json_data(self, ['opt', 'data'])
data = json_data['data']
opt = json_data['opt']
# 关注用户
if opt == 'follow-user':
try:
user = User.get(User.id == data['user'])
except:
self.write(json_result(1, '没有该用户'))
return
Follower.create(user=user, follower=self.current_user)
self.write(json_result(0, 'success'))
# 取关用户
elif opt == 'unfollow-user':
try:
user = User.get(User.id == data['user'])
except:
self.write(json_result(1, '没有该用户'))
return
try:
f = Follower.get(Follower.user == user, Follower.follower == self.current_user)
except:
self.write(json_result(1, '还没有关注他'))
return
f.delete_instance()
self.write(json_result(0, 'success'))
# 更新头像
elif opt == 'update-avatar':
import base64
avatar = base64.b64decode(data['avatar'])
user = self.current_user
avatar_file_name = user.username + '.png'
avatar_file = open(config.avatar_upload_path + avatar_file_name, 'wb')
avatar_file.write(avatar)
user.avatar = avatar_file_name
user.save()
self.write(json_result(0, 'success'))
# 更新社区主题
elif opt == 'update-theme':
user = self.current_user
user.theme = data['theme']
user.save()
self.write(json_result(0, 'success'))
# 获取聊天记录
elif opt == 'realtime-chat':
user = self.current_user
other_id = data['other']
other = User.get(User.id == other_id)
result = ChatMessage.get_recent_chat_message(user, other)
self.write(json_result(0, result))
# 发送消息
elif opt == 'chat-to' :
user = self.current_user
other_id = data['other']
other = User.get(User.id == other_id)
content = data['content']
ChatMessage.create(me=user, other=other, content=content)
self.write(json_result(0, 'success'))
else:
self.write(json_result(1, 'opt不支持'))
class WebsocketChatHandler(BaseWebsocketHandler):
"""
使用websocket的实时聊天
websocket real-time-chat
"""
# redis ?
clients = {}
def check_origin(self, origin):
return True
@greenado.groutine
@ppeewwee
def open(self, *args, **kwargs):
user = self.current_user
if user and user.username not in WebsocketChatHandler.clients.keys():
WebsocketChatHandler.clients[user.username] = self
# self.write_message(json_result(2,ChatMessage.get_not_read_log(user)))
@greenado.groutine
@ppeewwee
def on_close(self):
user = self.current_user
if user.username in WebsocketChatHandler.clients.keys():
WebsocketChatHandler.clients.pop(user.username)
else:
logger.debug("[{0}] not in Websocket.clients, but close.".format(user.username))
@staticmethod
def is_online(username):
w = WebsocketChatHandler.clients.get(username, False)
return w
@ppeewwee
def on_message(self, message):
json_data = get_cleaned_json_data_websocket(message, ['opt', 'data'])
data = json_data['data']
opt = json_data['opt']
if opt == 'update_recent_user_list':
logger.debug('update_recent_user_list...')
recent_user_list = ChatMessage.get_recent_user_list(self.current_user)
self.write_message(json_result(0,{'code': 'recent_user_list', 'data': recent_user_list}))
elif opt == 'update_recent_user_list_and_open':
recent_user_list = ChatMessage.get_recent_user_list(self.current_user)
self.write_message(json_result(0,recent_user_list))
elif opt == 'send_message':
other_id = data['user_id']
other = User.get(User.id == other_id)
content = data['content']
cl = ChatMessage.create(sender=self.current_user, receiver=other, content=content)
self.write_message(json_result(0, {'code': 'receive_a_message',
'data': {
'id': other.id,
'name': other.username,
'avatar': other.avatar,
'msg': ['>', cl.content, TimeUtil.datetime_delta(cl.time)]}}))
# send to other user
other_websocket = WebsocketChatHandler.is_online(other.username)
if other_websocket:
other_websocket.write_message(json_result(0, {'code': 'receive_a_message',
'data': {
'id': self.current_user.id,
'avatar': self.current_user.avatar,
'name': self.current_user.username,
'msg': ['<', cl.content, TimeUtil.datetime_delta(cl.time)]}}))
elif opt == 'update_recent_message_list':
other_id = data['user_id']
other = User.get(User.id == other_id)
recent_message = ChatMessage.get_recent_chat_message(self.current_user, other)
logger.debug(recent_message)
self.write_message(json_result(0,{'code': 'recent_message_list', 'data':recent_message}))
|
mit
| -5,671,329,620,418,602,000 | 37.127737 | 126 | 0.560448 | false |
bgarnaat/lj_401d5
|
lj_401d5/views.py
|
1
|
1251
|
from pyramid.response import Response
# from pyramid.view import view_config
import os
# @view_config(route_name='home', renderer='templates/mytemplate.pt')
# def my_view(request):
# return {'project': 'lj_401d5'}
HERE = os.path.dirname(__file__)
# def home_page(request):
# imported_text = open(os.path.join(HERE, 'sample.txt')).read()
# return Response(imported_text)
# return Response("This is a home page.")
def includeme(config):
# config.add_view(home_page, route_name='home')
config.add_view(lj_index, route_name='lj_index')
config.add_view(lj_detail, route_name='lj_detail')
config.add_view(lj_create, route_name='lj_create')
config.add_view(lj_update, route_name='lj_update')
def lj_index(request):
imported_text = open(os.path.join(HERE, 'lj_index.html')).read()
return Response(imported_text)
def lj_detail(request):
imported_text = open(os.path.join(HERE, 'lj_detail.html')).read()
return Response(imported_text)
def lj_create(request):
imported_text = open(os.path.join(HERE, 'lj_create.html')).read()
return Response(imported_text)
def lj_update(request):
imported_text = open(os.path.join(HERE, 'lj_update.html')).read()
return Response(imported_text)
|
mit
| 6,786,088,640,510,532,000 | 26.8 | 69 | 0.685052 | false |
shengshuyang/StanfordCNNClass
|
shadow_project/extract_patches.py
|
1
|
1314
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import os
from math import sqrt
from os.path import expanduser
def extract_patches(path, filename, out_path, patch_size, stride, visualize):
img = mpimg.imread(path+filename)
nRows, nCols, nColor = img.shape
psx, psy = patch_size
patches = []
for r in xrange(psy/2+1, nRows - psy/2 - 1, stride):
for c in xrange(psx/2+1, nCols - psx/2 - 1, stride):
patches.append(img[r-psy/2 : r + psy/2, c-psx/2 : c+psx/2, :])
grid_size = int(sqrt(len(patches)))
name, ext = os.path.splitext(filename)
for pos in xrange(len(patches)):
plt.imsave(out_path + name + "_" + str(pos) + ext, patches[pos])
if not visualize:
return
for pos in xrange(len(patches)):
if pos + 1 < grid_size ** 2:
plt.subplot(grid_size, grid_size, pos+1)
plt.imshow(patches[pos])
plt.axis('off')
if __name__ == "__main__":
home = expanduser("~")
nyu_path = home+'/IGNORE_NYU/jpgs/'
#extract_patches(, [16,16], 100, True)
for root, dirs, files in os.walk(nyu_path, topdown=False):
for filename in files:
extract_patches(nyu_path, filename, nyu_path+"/patches/", [64,64], 100, False)
|
gpl-3.0
| -562,036,518,576,350,340 | 31.073171 | 90 | 0.593607 | false |
dreipol/meta-tagger
|
meta_tagger/migrations/0001_initial.py
|
1
|
1394
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import filer.fields.image
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
('filer', '0002_auto_20150606_2003'),
]
operations = [
migrations.CreateModel(
name='MetaTagPageExtension',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('robots_indexing', models.BooleanField(default=True, verbose_name='Allow Indexing')),
('robots_following', models.BooleanField(default=True, verbose_name='Allow Following')),
('extended_object', models.OneToOneField(editable=False, to='cms.Page', on_delete=django.db.models.deletion.CASCADE)),
('og_image', filer.fields.image.FilerImageField(null=True, to='filer.Image', blank=True, verbose_name='Open Graph image', on_delete=django.db.models.deletion.CASCADE)),
('public_extension', models.OneToOneField(editable=False, null=True, to='meta_tagger.MetaTagPageExtension', related_name='draft_extension', on_delete=django.db.models.deletion.CASCADE)),
],
options={
'verbose_name': 'Meta Tag',
},
),
]
|
bsd-3-clause
| 3,677,816,774,778,833,400 | 43.967742 | 202 | 0.632712 | false |
hasadna/OpenTrain
|
webserver/opentrain/algorithm/stops.py
|
1
|
2111
|
import os
os.environ['DJANGO_SETTINGS_MODULE']='opentrain.settings'
from scipy import spatial
import os
import config
import numpy as np
import copy
import config
import timetable.services
from utils import *
NOSTOP_ID = -1
class Stop(object):
def __init__( self, id_, name, coords ) :
self.id = id_
self.name = name
self.coords = coords
def __str__(self):
return str(self.id) + ' ' + self.name
class StopList(dict):
def __init__(self) :
super(StopList, self).__init__()
stops = timetable.services.get_all_stops_ordered_by_id()
assert len(stops) > 0, 'No stops in DB'
stops = list(stops)
self.id_list = []
stop_coords = []
for i, gtfs_stop in enumerate(stops):
coord = (gtfs_stop.stop_lat, gtfs_stop.stop_lon)
stop = Stop(gtfs_stop.gtfs_stop_id, gtfs_stop.stop_name, coord)
stop_coords.append(coord)
self.id_list.append(stop.id)
self[stop.id] = stop
coord = (None, None)
stop = Stop(NOSTOP_ID, 'nostop', coord)
self[stop.id] = stop
self.id_list.append(NOSTOP_ID)
stop_coords = np.array(stop_coords)
self.point_tree = spatial.cKDTree(stop_coords)
def __getstate__(self):
ret = self.__dict__.copy()
ret['stop_coords'] = self.point_tree.data
del ret['point_tree']
return ret
def __setstate__(self, dict):
self.point_tree = spatial.cKDTree(dict['stop_coords'])
del dict['stop_coords']
self.__dict__.update(dict)
def query_stops(self, coords, accuracies) :
res_coord_int_ids = query_coords(self.point_tree, coords, accuracies)
if len(res_coord_int_ids) == 1:
res_coord_int_ids = [res_coord_int_ids]
res_coord_ids = [self.id_list[i[0]] if i else NOSTOP_ID for i in res_coord_int_ids]
return res_coord_ids
def get_all_stops():
all_stops = StopList()
return all_stops
all_stops = get_all_stops()
|
bsd-3-clause
| 7,088,727,018,380,412,000 | 27.917808 | 91 | 0.568925 | false |
jetskijoe/SickGear
|
sickbeard/providers/hd4free.py
|
1
|
2970
|
# coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import time
from . import generic
from sickbeard.helpers import tryInt
class HD4FreeProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, 'HD4Free')
self.url_base = 'https://hd4free.xyz/'
self.urls = {'search': self.url_base + 'searchapi.php',
'get': self.url_base + 'download.php?torrent=%s&torrent_pass=%s'}
self.url = self.url_base
self.username, self.api_key, self.freeleech, self.minseed, self.minleech = 5 * [None]
def _authorised(self, **kwargs):
return self._check_auth()
def _search_provider(self, search_params, age=0, **kwargs):
results = []
if not self._authorised():
return results
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
params = {'username': self.username, 'apikey': self.api_key,
'tv': 'true', 'fl': ('true', None)[not self.freeleech]}
for mode in search_params.keys():
for search_string in search_params[mode]:
params['search'] = '+'.join(search_string.split())
json_resp = self.get_url(self.urls['search'], params=params, json=True)
cnt = len(items[mode])
for k, item in json_resp.items():
if 'error' == k or not item.get('total_results'):
break
seeders, leechers, size = [tryInt(n, n) for n in [
item.get(x) for x in 'seeders', 'leechers', 'size']]
if self._peers_fail(mode, seeders, leechers):
continue
title = item.get('release_name')
tid, tpass = [item.get('torrent' + x) for x in 'id', 'pass']
download_url = all([tid, tpass]) and (self.urls['get'] % (tid, tpass))
if title and download_url:
items[mode].append((title, download_url, seeders, self._bytesizer('%smb' % size)))
self._log_search(mode, len(items[mode]) - cnt, self.session.response['url'])
time.sleep(1.1)
results = self._sort_seeding(mode, results + items[mode])
return results
provider = HD4FreeProvider()
|
gpl-3.0
| -7,057,446,645,199,407,000 | 36.594937 | 106 | 0.581481 | false |
audip/lunr
|
testlunr/unit/__init__.py
|
2
|
3386
|
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempfile import NamedTemporaryFile
from testlunr.functional import Struct
from contextlib import contextmanager
from webob import Request
import unittest
import urllib
import json
import os
import logging
from StringIO import StringIO
from lunr.common.logger import LunrLoggerAdapter, local
@contextmanager
def temp_disk_file(body=''):
path = None
try:
with NamedTemporaryFile('w', delete=False) as f:
path = f.name
f.write(body)
yield path
finally:
if path:
os.unlink(path)
@contextmanager
def patch(target, attr, new):
"""
Run in context with patched attribute on target.
:param target: real object to patch
:param attr: name of attribute to patch, a string
:param new: mock or stub to use in place
"""
original = getattr(target, attr)
setattr(target, attr, new)
try:
yield
finally:
setattr(target, attr, original)
class WsgiTestBase(unittest.TestCase):
def request(self, uri, method='GET', params=None):
encoded = urllib.urlencode(params or {})
body = ''
req = Request.blank(uri)
if method in ('PUT', 'POST'):
body = encoded
req.content_type = 'application/x-www-form-urlencoded'
else:
uri = "%s?%s" % (uri, encoded)
req.method = method
req.body = body
resp = self.app(req)
return Struct(code=resp.status_int, body=json.loads(resp.body))
class MockLogger(object):
def __init__(self):
self.local = local
self.log_file = StringIO()
self.logger = None
def get_logger(self, name):
if not self.logger:
logger = logging.getLogger(name)
logger.setLevel(1) # caputure everything
handler = logging.StreamHandler(self.log_file)
handler.setFormatter(
logging.Formatter('%(name)s:%(levelname)s:%(message)s'))
logger.addHandler(handler)
self.logger = LunrLoggerAdapter(logger)
return self.logger
def pop_log_messages(self):
rv = self.log_file.getvalue()
self.log_file.seek(0)
self.log_file.truncate()
return rv
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
# prevent recursive lookup
logger = object.__getattribute__(self, 'logger')
if hasattr(logger, name):
return getattr(logger, name)
raise
class MockResourceLock(object):
def acquire(self, info):
pass
def remove(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, trace):
pass
|
apache-2.0
| -6,141,713,419,206,304,000 | 25.661417 | 72 | 0.626108 | false |
unreal666/outwiker
|
buildtools/builders/plugins.py
|
2
|
3061
|
# -*- coding: utf-8 -*-
import os
import shutil
import urllib.request
import urllib.error
import urllib.parse
from fabric.api import local, lcd
from .base import BuilderBase
from buildtools.defines import (PLUGINS_DIR,
PLUGINS_LIST)
from buildtools.versions import (readAppInfo,
getPluginVersionsPath,
downloadAppInfo)
class BuilderPlugins(BuilderBase):
"""
Create archives with plug-ins
"""
def __init__(self,
updatedOnly=False,
build_dir=PLUGINS_DIR,
plugins_list=PLUGINS_LIST):
super(BuilderPlugins, self).__init__(build_dir)
self._all_plugins_fname = u'outwiker-plugins-all.zip'
self._plugins_list = plugins_list
self._updatedOnly = updatedOnly
def get_plugins_pack_path(self):
return self._getSubpath(self._all_plugins_fname)
def clear(self):
super(BuilderPlugins, self).clear()
self._remove(self.get_plugins_pack_path())
def _build(self):
# Path to archive with all plug-ins
full_archive_path = self.get_plugins_pack_path()
for plugin in self._plugins_list:
# Path to plugin.xml for current plugin
xmlplugin_path = getPluginVersionsPath(plugin)
localAppInfo = readAppInfo(xmlplugin_path)
assert localAppInfo is not None
assert localAppInfo.currentVersion is not None
skip_plugin = False
# Check for update
if self._updatedOnly:
url = localAppInfo.updatesUrl
try:
siteappinfo = downloadAppInfo(url)
if localAppInfo.currentVersion == siteappinfo.currentVersion:
skip_plugin = True
except (urllib.error.URLError, urllib.error.HTTPError):
pass
# Archive a single plug-in
if not skip_plugin:
version = str(localAppInfo.currentVersion)
archive_name = u'{}-{}.zip'.format(plugin, version)
# Subpath to current plug-in archive
plugin_dir_path = self._getSubpath(plugin)
# Path to future archive
archive_path = self._getSubpath(plugin, archive_name)
os.mkdir(plugin_dir_path)
shutil.copy(xmlplugin_path, plugin_dir_path)
# Archive a single plug-in
with lcd("plugins/{}".format(plugin)):
local('7z a -r -aoa -xr!*.pyc -xr!.ropeproject -x!doc "{}" ./*'.format(archive_path))
# Add a plug-in to full archive
with lcd("plugins/{}".format(plugin)):
local('7z a -r -aoa -xr!*.pyc -xr!.ropeproject -w../ "{}" ./*'.format(full_archive_path))
def _getSubpath(self, *args):
"""
Return subpath inside current build path (inside 'build' subpath)
"""
return os.path.join(self.build_dir, *args)
|
gpl-3.0
| -6,392,681,947,784,442,000 | 33.784091 | 105 | 0.564521 | false |
JohnVinyard/zounds
|
zounds/ui/cli.py
|
1
|
2224
|
import argparse
class BasePartialArgumentParser(argparse.ArgumentParser):
def __init__(self, groupname, group_description):
super(BasePartialArgumentParser, self).__init__(add_help=False)
self.group = self.add_argument_group(groupname, group_description)
def add_argument(self, *args, **kwargs):
self.group.add_argument(*args, **kwargs)
class ObjectStorageSettings(BasePartialArgumentParser):
def __init__(self):
super(ObjectStorageSettings, self).__init__(
'object_storage',
'Rackspace object storage settings for model checkpoint storage')
self.add_argument(
'--object-storage-region',
help='the rackspace object storage region',
default='DFW')
self.add_argument(
'--object-storage-username',
help='rackspace cloud username',
required=True)
self.add_argument(
'--object-storage-api-key',
help='rackspace cloud api key',
required=True)
class AppSettings(BasePartialArgumentParser):
def __init__(self):
super(AppSettings, self).__init__(
'app',
'In-browser REPL settings')
self.add_argument(
'--app-secret',
help='app password. If not provided, REPL is public',
required=False)
self.add_argument(
'--port',
help='The port on which the In-Browser REPL app should listen',
default=8888)
class NeuralNetworkTrainingSettings(BasePartialArgumentParser):
def __init__(self):
super(NeuralNetworkTrainingSettings, self).__init__(
'training',
'Common settings for training neural networks')
self.add_argument(
'--epochs',
help='how many passes over the data should be made during training',
type=int)
self.add_argument(
'--batch-size',
help='how many examples constitute a minibatch?',
type=int,
default=64)
self.add_argument(
'--nsamples',
help='the number of samples to draw from the database for training',
type=int)
|
mit
| -4,572,883,692,200,368,600 | 33.75 | 80 | 0.589478 | false |
tensorflow/models
|
official/nlp/modeling/models/bert_pretrainer.py
|
1
|
11067
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Pre-training model."""
# pylint: disable=g-classes-have-attributes
import collections
import copy
from typing import List, Optional
from absl import logging
import gin
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
@tf.keras.utils.register_keras_serializable(package='Text')
class BertPretrainer(tf.keras.Model):
"""BERT pretraining model.
[Note] Please use the new `BertPretrainerV2` for your projects.
The BertPretrainer allows a user to pass in a transformer stack, and
instantiates the masked language model and classification networks that are
used to create the training objectives.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output.
num_classes: Number of classes to predict from the classification network.
num_token_predictions: Number of tokens to predict from the masked LM.
embedding_table: Embedding table of a network. If None, the
"network.get_embedding_table()" is used.
activation: The activation (if any) to use in the masked LM network. If
None, no activation will be used.
initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
network,
num_classes,
num_token_predictions,
embedding_table=None,
activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a copy of the network inputs for use
# when we construct the Model object at the end of init. (We keep a copy
# because we'll be adding another tensor to the copy later.)
network_inputs = network.inputs
inputs = copy.copy(network_inputs)
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
# Note that, because of how deferred construction happens, we can't use
# the copy of the list here - by the time the network is invoked, the list
# object contains the additional input added below.
sequence_output, cls_output = network(network_inputs)
# The encoder network may get outputs from all layers.
if isinstance(sequence_output, list):
sequence_output = sequence_output[-1]
if isinstance(cls_output, list):
cls_output = cls_output[-1]
sequence_output_length = sequence_output.shape.as_list()[1]
if sequence_output_length is not None and (sequence_output_length <
num_token_predictions):
raise ValueError(
"The passed network's output length is %s, which is less than the "
'requested num_token_predictions %s.' %
(sequence_output_length, num_token_predictions))
masked_lm_positions = tf.keras.layers.Input(
shape=(num_token_predictions,),
name='masked_lm_positions',
dtype=tf.int32)
inputs.append(masked_lm_positions)
if embedding_table is None:
embedding_table = network.get_embedding_table()
masked_lm = layers.MaskedLM(
embedding_table=embedding_table,
activation=activation,
initializer=initializer,
output=output,
name='cls/predictions')
lm_outputs = masked_lm(
sequence_output, masked_positions=masked_lm_positions)
classification = networks.Classification(
input_width=cls_output.shape[-1],
num_classes=num_classes,
initializer=initializer,
output=output,
name='classification')
sentence_outputs = classification(cls_output)
super(BertPretrainer, self).__init__(
inputs=inputs,
outputs=dict(masked_lm=lm_outputs, classification=sentence_outputs),
**kwargs)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
config_dict = {
'network': network,
'num_classes': num_classes,
'num_token_predictions': num_token_predictions,
'activation': activation,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.encoder = network
self.classification = classification
self.masked_lm = masked_lm
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Text')
@gin.configurable
class BertPretrainerV2(tf.keras.Model):
"""BERT pretraining model V2.
Adds the masked language model head and optional classification heads upon the
transformer encoder.
Args:
encoder_network: A transformer network. This network should output a
sequence output and a classification output.
mlm_activation: The activation (if any) to use in the masked LM network. If
None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM. Default
to a Glorot uniform initializer.
classification_heads: A list of optional head layers to transform on encoder
sequence outputs.
customized_masked_lm: A customized masked_lm layer. If None, will create
a standard layer from `layers.MaskedLM`; if not None, will use the
specified masked_lm layer. Above arguments `mlm_activation` and
`mlm_initializer` will be ignored.
name: The name of the model.
Inputs: Inputs defined by the encoder network, plus `masked_lm_positions` as a
dictionary.
Outputs: A dictionary of `lm_output`, classification head outputs keyed by
head names, and also outputs from `encoder_network`, keyed by
`sequence_output` and `encoder_outputs` (if any).
"""
def __init__(
self,
encoder_network: tf.keras.Model,
mlm_activation=None,
mlm_initializer='glorot_uniform',
classification_heads: Optional[List[tf.keras.layers.Layer]] = None,
customized_masked_lm: Optional[tf.keras.layers.Layer] = None,
name: str = 'bert',
**kwargs):
super().__init__(self, name=name, **kwargs)
self._config = {
'encoder_network': encoder_network,
'mlm_initializer': mlm_initializer,
'classification_heads': classification_heads,
'name': name,
}
self.encoder_network = encoder_network
inputs = copy.copy(self.encoder_network.inputs)
self.classification_heads = classification_heads or []
if len(set([cls.name for cls in self.classification_heads])) != len(
self.classification_heads):
raise ValueError('Classification heads should have unique names.')
self.masked_lm = customized_masked_lm or layers.MaskedLM(
embedding_table=self.encoder_network.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
name='cls/predictions')
masked_lm_positions = tf.keras.layers.Input(
shape=(None,), name='masked_lm_positions', dtype=tf.int32)
inputs.append(masked_lm_positions)
self.inputs = inputs
def call(self, inputs):
if isinstance(inputs, list):
logging.warning('List inputs to BertPretrainer are discouraged.')
inputs = dict([
(ref.name, tensor) for ref, tensor in zip(self.inputs, inputs)
])
outputs = dict()
encoder_network_outputs = self.encoder_network(inputs)
if isinstance(encoder_network_outputs, list):
outputs['pooled_output'] = encoder_network_outputs[1]
# When `encoder_network` was instantiated with return_all_encoder_outputs
# set to True, `encoder_network_outputs[0]` is a list containing
# all transformer layers' output.
if isinstance(encoder_network_outputs[0], list):
outputs['encoder_outputs'] = encoder_network_outputs[0]
outputs['sequence_output'] = encoder_network_outputs[0][-1]
else:
outputs['sequence_output'] = encoder_network_outputs[0]
elif isinstance(encoder_network_outputs, dict):
outputs = encoder_network_outputs
else:
raise ValueError('encoder_network\'s output should be either a list '
'or a dict, but got %s' % encoder_network_outputs)
sequence_output = outputs['sequence_output']
# Inference may not have masked_lm_positions and mlm_logits is not needed.
if 'masked_lm_positions' in inputs:
masked_lm_positions = inputs['masked_lm_positions']
outputs['mlm_logits'] = self.masked_lm(
sequence_output, masked_positions=masked_lm_positions)
for cls_head in self.classification_heads:
cls_outputs = cls_head(sequence_output)
if isinstance(cls_outputs, dict):
outputs.update(cls_outputs)
else:
outputs[cls_head.name] = cls_outputs
return outputs
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.encoder_network, masked_lm=self.masked_lm)
for head in self.classification_heads:
for key, item in head.checkpoint_items.items():
items['.'.join([head.name, key])] = item
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
|
apache-2.0
| 2,978,270,782,516,456,400 | 39.390511 | 80 | 0.687991 | false |
mattgodbolt/agner
|
tests/branch.py
|
1
|
2732
|
#!/usr/bin/env python
import os
import subprocess
import sys
from lib.agner import run_test, merge_results, MergeError
SCRAMBLE_BTB = """
; Proven effective at "scrambling" the BTB/BPU for an Arrendale M520
%macro OneJump 0
mov ecx, esi
align 16
%%lp:
dec ecx
jnz %%lp
%endmacro
jmp ScrambleBTB
ScrambleBTB_i:
align 16
%REP 4096
OneJump
%ENDREP
ret
ScrambleBTB:
mov esi, 3
.lp:
call ScrambleBTB_i
dec esi
jnz .lp
"""
def branch_test(name, instr, backwards=False):
extra_begin = ""
extra_end = ""
if backwards:
extra_begin = """
jmp BranchTestEnd - 16
%REP 16
nop
%ENDREP
align 16
jmp BranchTestEnd"""
extra_end = """
align 16
BranchTestEnd:
"""
test_code = """
cmp ebp, ebp
""" + extra_begin + """
%REP 1000
align 16
""" + instr + """
%ENDREP
align 16
""" + extra_end
merge_error = None
# TODO: do we actually need this? If so, should extract and put in agner
for attempt in range(10):
results = None
try:
for counters in ([1, 9, 207, 400], [1, 9, 401, 402], [1, 9, 404]):
results = merge_results(results, run_test(test_code, counters, init_each=SCRAMBLE_BTB))
return results
except MergeError, e:
merge_error = e
raise merge_error
def branch_plot(name, results):
if not results: return
for res in results:
del res['Clock']
del res['Instruct']
del res['Core cyc']
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy as np
fig, ax = plt.subplots()
fig.canvas.set_window_title(name)
num_samples = len(results)
num_counters = len(results[0])
width = 1.0 / (num_counters + 1)
rects = []
color = cm.rainbow(np.linspace(0, 1, num_counters))
for counter_index in range(num_counters):
counter_name = results[0].keys()[counter_index]
xs = np.arange(num_samples) + width * counter_index
ys = [a[counter_name] for a in results]
rects.append(ax.bar(xs, ys, width, color=color[counter_index]))
ax.set_ylabel("Count")
ax.set_xlabel("Run #")
ax.set_title(name)
ax.legend((x[0] for x in rects), results[0].keys())
def add_test(agner, name, instr, backwards=False):
test = lambda: branch_test(name, instr, backwards)
plot = lambda results, alt : branch_plot(name, results)
agner.add_test(name, test, plot)
def add_tests(agner):
add_test(agner, "Ahead not taken", "jne $+4")
add_test(agner, "Behind not taken", "jne $-4")
add_test(agner, "Ahead taken", "je $+4")
add_test(agner, "Behind taken", "je $-16-8", True)
|
gpl-3.0
| 5,906,651,856,811,018,000 | 23.612613 | 103 | 0.599195 | false |
mplucinski/tex-gettext
|
generate.py
|
1
|
1410
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import os.path
import re
import sqlite3
import subprocess
import sys
import time
import translator
VERSION='0.1'
logging.basicConfig(level=logging.DEBUG)
def generate(input, languages=None):
document = translator.Document.load(input)
translations = [translator.Translation(input, 'en_US')]+translator.find_translations(input, languages=languages.split(',') if languages else None)
changed = False
for i in translations:
if i.update(document):
changed = True
if changed:
sys.stderr.write('Some translations has changed. Please update them and restart the process\n')
sys.exit(1)
outputs = []
for i in translations:
i = i.translate(document)
outputs.append(i.generate())
for i in outputs:
subprocess.check_call(['xdg-open', i])
def main():
parser = argparse.ArgumentParser(description='Documents internationalization tool (version {})'.format(VERSION))
parser.add_argument('--input', action='store',
help='Name of input file (default: input.tex)', default='input.tex')
parser.add_argument('--languages', action='store',
help='List of language codes for which outputs will be generated.'+
'Default list is built from names of found translation files', default=None)
args = parser.parse_args()
generate(input=args.input, languages=args.languages)
if __name__ == '__main__':
main()
|
bsd-2-clause
| -9,037,493,617,593,389,000 | 26.647059 | 147 | 0.734043 | false |
IntegratedAlarmSystem-Group/ias-webserver
|
alarms/models.py
|
1
|
17630
|
import time
import logging
from collections import Counter
from utils.choice_enum import ChoiceEnum
from alarms.connectors import CdbConnector
logger = logging.getLogger(__name__)
class OperationalMode(ChoiceEnum):
""" Operational Mode of a monitor point value. """
STARTUP = 0
INITIALIZATION = 1
CLOSING = 2
SHUTTEDDOWN = 3
MAINTENANCE = 4
OPERATIONAL = 5
DEGRADED = 6
UNKNOWN = 7
MALFUNCTIONING = 8
@classmethod
def options(cls):
""" Return a list of tuples with the valid options. """
return cls.get_choices()
class Value(ChoiceEnum):
""" Value of the Alarm. """
SET_CRITICAL = 4
SET_HIGH = 3
SET_MEDIUM = 2
SET_LOW = 1
CLEARED = 0
@classmethod
def options(cls):
""" Return a list of tuples with the valid options. """
return cls.get_choices()
@classmethod
def unset_options(cls):
""" Return a list of tuples with the valid options. """
return [0]
class Validity(ChoiceEnum):
""" Possible validity states of an Alarm """
RELIABLE = 1
""" The value has been provided in time and the operator can trust what the IAS shows"""
UNRELIABLE = 0
""" The values has not been produced in time either by the IAS Core or due
to network problems or any other reason."""
@classmethod
def options(cls):
""" Returns a list of tuples with the valid options. """
return cls.get_choices()
class AlarmCountManager:
""" Class to manage the counter by view. """
counter_by_view = {}
def reset_counter_by_view(self):
""" Method to clear the counter by view """
self.counter_by_view = {}
def update_counter_by_view_if_new_alarm_in_collection(self, alarm):
""" Increase counter for a new SET UNACK alarm
Note: This method is used in the AlarmCollection
"""
if alarm.is_stored():
views = alarm.views
current_views = self.counter_by_view.keys()
for view in views:
# initialize count if no key
if view not in current_views:
self.counter_by_view[view] = 0
current_views = self.counter_by_view.keys()
# update count
if alarm.value > 0:
if alarm.ack is not True:
# unacknowledged alarm in set status
self.counter_by_view[view] += 1
def update_counter_by_view_if_alarm_is_acknowledged(self, after_ack_alarm, initial_ack_state):
""" Update counter after acknowledgment action """
alarm = after_ack_alarm
if alarm.is_stored():
views = alarm.views
current_views = self.counter_by_view.keys()
for view in views:
# initialize count if no key
if view not in current_views:
self.counter_by_view[view] = 0
current_views = self.counter_by_view.keys()
if alarm.value > 0:
# set alarm
if initial_ack_state is False:
# from unack state
if alarm.ack is True:
# to ack state
self.counter_by_view[view] -= 1
else:
# cleared alarm
if initial_ack_state is False:
# from unack state
if alarm.ack is True:
# to ack state
self.counter_by_view[view] += 0
def update_counter_by_view_if_alarm_is_unacknowledged(
self, after_ack_alarm, initial_ack_state
):
""" Update counter after unacknowledgment action """
alarm = after_ack_alarm
if alarm.is_stored():
views = alarm.views
current_views = self.counter_by_view.keys()
for view in views:
# initialize count if no key
if view not in current_views:
self.counter_by_view[view] = 0
current_views = self.counter_by_view.keys()
if alarm.value > 0:
# set alarm
if initial_ack_state is True:
# from ack state
if alarm.ack is False:
# to unack state
self.counter_by_view[view] += 1
else:
# cleared alarm
if initial_ack_state is True:
# from ack state
if alarm.ack is False:
# to unack state
self.counter_by_view[view] += 0
def update_counter_by_view_if_alarm_has_value_update(
self, alarm, initial_ack_state, transition
):
""" Update counter after value (set or cleared) update """
if alarm.is_stored():
views = alarm.views
current_views = self.counter_by_view.keys()
for view in views:
# initialize count if no key
if view not in current_views:
self.counter_by_view[view] = 0
current_views = self.counter_by_view.keys()
if transition == 'clear-set':
# set alarm
if initial_ack_state is False:
# from ack state
if alarm.ack is False:
# to unack state
self.counter_by_view[view] += 1
if transition == 'set-clear':
# cleared alarm
if initial_ack_state is False:
# from ack state
if alarm.ack is False:
# to unack state
self.counter_by_view[view] -= 1
class AlarmManager(AlarmCountManager):
""" Set of auxiliary methods for the alarm model. """
class Alarm:
""" Alarm generated by some device in the observatory. """
objects = AlarmManager()
def __init__(self, core_timestamp, core_id, running_id, value=0, mode=0,
validity=0, dependencies=[], properties={}, timestamps={},
ack=False, shelved=False, state_change_timestamp=0,
description='', url='', sound='', can_shelve=False, views=[],
stored=False, value_change_timestamp=0,
value_change_transition=[0, 0]):
""" Constructor of the class,
only executed when there a new instance is created.
Receives and validates values for the attributes of the object """
self.core_timestamp = core_timestamp
""" Core timestamp of the alarm """
self.core_id = core_id
""" Core ID of the alarm """
self.running_id = running_id
""" Running ID of the alarm """
self.value = value
""" Value of the alarm """
self.mode = mode
""" Operational mode of the alarm """
self.validity = validity
""" Validity of the alarm """
self.dependencies = dependencies # optiona
""" Children Alarms, alarms on which this Alarm depends """
self.properties = properties # optiona
""" Properties of the core """
self.timestamps = timestamps # optiona
""" Timestamps of the core """
self.ack = ack
""" True if the alarm is acknowledged, False if not """
self.shelved = shelved
""" True if the alarm is shelved, False if not """
self.state_change_timestamp = state_change_timestamp
""" Timestamp of the last important (notified) change in the alarm """
self.description = description
""" Description of the alarm """
self.url = url
""" URL to go for documentation of the alarm """
self.sound = sound
""" Sound associated to the alarm """
self.can_shelve = can_shelve
""" Flag that defines weteher or not the alarm can be shelved """
self.views = views # optional
"""List of views for which the alarm must be considered for counting"""
self.stored = stored
""" Flag that defines weteher or not the alarm is stored """
self.value_change_timestamp = value_change_timestamp
""" Timestamp of the last change in the alarm value """
self.value_change_transition = value_change_transition
"""
Transition of the last change in the alarm value
Stored as a list with 2 elements in order: [previous_value, new_value]
"""
def __str__(self):
""" Returns a string representation of the object """
return str(self.core_id) + '=' + str(self.value)
def to_dict(self):
""" Returns a dict with all the values of the different attributes """
return {
'value': self.value,
'mode': self.mode,
'validity': self.validity,
'core_timestamp': self.core_timestamp,
'state_change_timestamp': self.state_change_timestamp,
'core_id': self.core_id,
'running_id': self.running_id,
'timestamps': self.timestamps,
'properties': self.properties,
'dependencies': self.dependencies,
'ack': self.ack,
'shelved': self.shelved,
'description': self.description,
'url': self.url,
'sound': self.sound,
'can_shelve': self.can_shelve,
'value_change_timestamp': self.value_change_timestamp,
'value_change_transition': self.value_change_transition,
}
def update(self, alarm):
"""
Updates the alarm with attributes from another given alarm if the
timestamp of the given alarm is greater than the stored alarm.
Args:
alarm (Alarm): The new alarm object
Returns:
(string, string, boolean): A tuple with the state of the update
(not-updated, updated-equal, updated-different), the
transition of the alarm value (clear-set, set-clear or None) and
wether or not the dependencies of the alarm have been updated
"""
initial_ack_state = self.ack # counter by view variable
if alarm.core_timestamp <= self.core_timestamp:
logger.debug(
'alarm %s was not updated (tstamp is older than the last one)',
alarm.core_id)
return ('not-updated', None, False)
# Evaluate alarm state transition between set and unset states:
if self.value == 0 and alarm.value > 0:
transition = 'clear-set'
elif self.value > 0 and alarm.value == 0:
transition = 'set-clear'
else:
transition = None
if self.mode != alarm.mode or \
(self.state_change_timestamp == 0 and alarm.validity == 1):
self.state_change_timestamp = alarm.core_timestamp
if self.value != alarm.value:
self.state_change_timestamp = alarm.core_timestamp
self.value_change_timestamp = alarm.core_timestamp
self.value_change_transition = [self.value, alarm.value]
ignored_fields = ['core_timestamp', 'id', 'timestamps']
unchanged_fields = \
['ack', 'shelved', 'description', 'url', 'sound', 'can_shelve',
'state_change_timestamp', 'views', 'stored',
'value_change_timestamp', 'value_change_transition']
notify = 'updated-equal'
if Counter(self.dependencies) == Counter(alarm.dependencies):
dependencies_changed = True
else:
dependencies_changed = False
for field in alarm.__dict__.keys():
if field in unchanged_fields:
continue
old_value = getattr(self, field)
new_value = getattr(alarm, field)
if (field not in ignored_fields) and old_value != new_value:
notify = 'updated-different'
setattr(self, field, new_value)
# start block - counter by view
self.objects.update_counter_by_view_if_alarm_has_value_update(self, initial_ack_state, transition)
# end block - counter by view
return (notify, transition, dependencies_changed)
def update_validity(self):
"""
Calculate the validity of the alarm considering the current time,
the refresh rate and a previously defined delta time
"""
if self.validity == 0:
return self
validity_threshold = CdbConnector.validity_threshold
current_timestamp = int(round(time.time() * 1000))
if current_timestamp - self.core_timestamp > validity_threshold:
self.validity = 0
return self
else:
return self
def acknowledge(self, ack=True):
"""
Acknowledges the Alarm if its value is SET
Args:
ack (optional boolean): acknowledge status to update,
True by default
Returns:
boolean: the final ack status
"""
initial_ack_state = self.ack # counter variable
self.ack = ack
self.objects.update_counter_by_view_if_alarm_is_acknowledged(self, initial_ack_state)
return self.ack
def unacknowledge(self):
"""
Unacknowledge the Alarm
Returns:
boolean: the final ack status
"""
initial_ack_state = self.ack # counter variable
self.ack = False
self.objects.update_counter_by_view_if_alarm_is_unacknowledged(self, initial_ack_state)
return self.ack
def shelve(self):
"""
Shelves the Alarm
Returns:
int: 1 if it was shelved, 0 if not, -1 if shelving is not allowed
"""
if not self.can_shelve:
return -1
if self.shelved:
return 0
self.shelved = True
return 1
def unshelve(self):
"""
Unshelves the Alarm
Returns:
boolean: True if it was unshelved, False if not
"""
if not self.shelved:
return False
self.shelved = False
return True
def is_set(self):
""" Method to check is the alarm is set """
return True if self.value > 0 else False
def is_not_set(self):
""" Method to check is the alarm is not set """
return True if self.value == 0 else False
def is_stored(self):
""" Method to check is the alarm was stored in the collection """
return self.stored
class IASValue(Alarm):
""" IASValue from some device in the observatory. """
def __init__(self, core_timestamp, core_id, running_id, value, mode=0,
validity=0, timestamps={}, state_change_timestamp=0):
""" Constructor of the class,
only executed when there a new instance is created.
Receives and validates values for the attributes of the object """
Alarm.__init__(
self, core_timestamp, core_id, running_id, mode=mode,
validity=validity, timestamps=timestamps,
state_change_timestamp=state_change_timestamp
)
self.value = self.__check_value(value)
def __check_value(self, value):
""" Validates the IASValue value """
if type(value) is not str:
raise TypeError
else:
return value
def to_dict(self):
""" Returns a dict with all the values of the different attributes """
return {
'value': self.value,
'mode': self.mode,
'validity': self.validity,
'core_timestamp': self.core_timestamp,
'state_change_timestamp': self.state_change_timestamp,
'core_id': self.core_id,
'running_id': self.running_id,
'timestamps': self.timestamps
}
def update(self, ias_value):
"""
Updates the ias_value with attributes from another given ias_value if
the timestamp of the given ias_value is greater than the stored ias value.
Args:
ias_value (dict): The new ias_value object
Returns:
string: the state of the update (not-updated, updated-equal,
updated-different)
"""
if ias_value.core_timestamp <= self.core_timestamp:
logger.debug('value %s was not updated (tstamp is older than the last one)', ias_value.core_id)
return ('not-updated', None, False)
if self.mode != ias_value.mode or self.value != ias_value.value or \
(self.state_change_timestamp == 0 and ias_value.validity == 1):
self.state_change_timestamp = ias_value.core_timestamp
ignored_fields = ['core_timestamp', 'id', 'timestamps', 'properties', 'mode', 'validity']
unchanged_fields = ['ack', 'shelved', 'description', 'url', 'state_change_timestamp']
notify = 'updated-equal'
for field in ias_value.__dict__.keys():
if field in unchanged_fields:
continue
old_value = getattr(self, field)
new_value = getattr(ias_value, field)
if (field not in ignored_fields) and old_value != new_value:
notify = 'updated-different'
setattr(self, field, new_value)
return notify
|
lgpl-3.0
| 6,150,734,570,595,830,000 | 34.189621 | 107 | 0.554453 | false |
rreubenur/vmware-pyvmomi-examples
|
network_configure.py
|
1
|
4280
|
'''
Copyright 2013-2014 Reubenur Rahman
All Rights Reserved
@author: [email protected]
'''
import atexit
import argparse
import sys
import time
from pyVmomi import vim, vmodl
from pyVim import connect
from pyVim.connect import Disconnect, SmartConnect
inputs = {'vcenter_ip': '10.10.10.211',
'vcenter_password': 'Password123',
'vcenter_user': 'Administrator',
'vm_name' : 'reuben-test',
'isDHCP' : False,
'vm_ip' : '10.10.10.212',
'subnet' : '255.255.255.0',
'gateway' : '10.10.10.1',
'dns' : ['11.110.135.51', '11.110.135.52'],
'domain' : 'asiapacific.mycomp.net'
}
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def wait_for_task(task, actionName='job', hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result
def main():
#args = GetArgs()
try:
si = None
try:
print "Trying to connect to VCENTER SERVER . . ."
si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'])
except IOError, e:
pass
atexit.register(Disconnect, si)
print "Connected to VCENTER SERVER !"
content = si.RetrieveContent()
#vm_name = args.vm
vm_name = inputs['vm_name']
vm = get_obj(content, [vim.VirtualMachine], vm_name)
if vm.runtime.powerState != 'poweredOff':
print "WARNING:: Power off your VM before reconfigure"
sys.exit()
adaptermap = vim.vm.customization.AdapterMapping()
globalip = vim.vm.customization.GlobalIPSettings()
adaptermap.adapter = vim.vm.customization.IPSettings()
isDHDCP = inputs['isDHCP']
if not isDHDCP:
"""Static IP Configuration"""
adaptermap.adapter.ip = vim.vm.customization.FixedIp()
adaptermap.adapter.ip.ipAddress = inputs['vm_ip']
adaptermap.adapter.subnetMask = inputs['subnet']
adaptermap.adapter.gateway = inputs['gateway']
globalip.dnsServerList = inputs['dns']
else:
"""DHCP Configuration"""
adaptermap.adapter.ip = vim.vm.customization.DhcpIpGenerator()
adaptermap.adapter.dnsDomain = inputs['domain']
globalip = vim.vm.customization.GlobalIPSettings()
#For Linux . For windows follow sysprep
ident = vim.vm.customization.LinuxPrep(domain=inputs['domain'], hostName=vim.vm.customization.FixedName(name=vm_name))
customspec = vim.vm.customization.Specification()
#For only one adapter
customspec.identity = ident
customspec.nicSettingMap = [adaptermap]
customspec.globalIPSettings = globalip
#Configuring network for a single NIC
#For multipple NIC configuration contact me.
print "Reconfiguring VM Networks . . ."
task = vm.Customize(spec=customspec)
# Wait for Network Reconfigure to complete
wait_for_task(task, si)
except vmodl.MethodFault, e:
print "Caught vmodl fault: %s" % e.msg
return 1
except Exception, e:
print "Caught exception: %s" % str(e)
return 1
# Start program
if __name__ == "__main__":
main()
|
apache-2.0
| -143,291,768,155,777,600 | 30.940299 | 134 | 0.58785 | false |
SabatierBoris/CecileWebSite
|
pyramidapp/tests/models/testright.py
|
1
|
1389
|
# vim: set fileencoding=utf-8 :
"""
This is a unit test for Right model
"""
import unittest
from pyramidapp.models.right import Right
from . import init_testing_db
class TestRight(unittest.TestCase):
"""
Test of right
"""
def setUp(self):
self.db_session = init_testing_db()
def tearDown(self):
self.db_session.remove()
def test_right_basic(self):
"""
This test all access to the bdd for Right
- Insert
- Select
- Update
- Delete
"""
# pylint: disable=E1101
self.db_session.add(Right("read"))
self.db_session.commit()
acc1 = self.db_session.query(Right).filter_by(name="read").scalar()
self.assertEquals(acc1.name, "read")
self.db_session.add(Right("edit"))
self.db_session.add(Right("delete"))
self.db_session.commit()
self.assertEquals(self.db_session.query(Right).count(), 3)
acc1 = self.db_session.query(Right).filter_by(name="read").scalar()
acc1.name = "READ"
self.db_session.commit()
acc1 = self.db_session.query(Right).filter_by(name="READ").scalar()
self.assertEquals(acc1.name, "READ")
self.db_session.delete(acc1)
self.db_session.commit()
self.assertEquals(self.db_session.query(Right).count(), 2)
# pylint: enable=E1101
|
gpl-2.0
| 7,682,956,768,483,351,000 | 24.722222 | 75 | 0.597552 | false |
cyberphilia/incidentally
|
incidentally/settings.py
|
1
|
3184
|
"""
Django settings for incidentally project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5t0-nhp3h-_da%nvz)*0f!$u=@wjr%muw*2vvnx@2!i!-3n-vv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'incidentally.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'incidentally.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -1,034,277,688,391,205,600 | 25.31405 | 91 | 0.690013 | false |
gnott/elife-tools
|
elifetools/tests/test_utils_html.py
|
1
|
8778
|
# coding=utf-8
import unittest
import os
import time
from ddt import ddt, data, unpack
os.sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import utils_html
@ddt
class TestUtilsHtml(unittest.TestCase):
def setUp(self):
self.mock_tag = type('', (object,), {})
setattr(self.mock_tag, 'name', 'foo')
@unpack
@data(
(False, None, None, None),
(True, None, None, None),
(True, u'<p><bold>A</bold> <italic>α</italic> <underline>c</underline></p>', None,
u'<p><b>A</b> <i>α</i> <span class="underline">c</span></p>'),
(False, u'<p><bold>A</bold> <italic>α</italic> <underline>c</underline></p>', None,
u'<p><bold>A</bold> <italic>α</italic> <underline>c</underline></p>'),
(True, u'<p><bold>A</bold> 1 < 2 > 1 <xref rid="bib1">>α<</xref>></p>', None,
u'<p><b>A</b> 1 < 2 > 1 <a href="#bib1">>α<</a>></p>'),
(True, u'A bad xref <xref>>α<</xref>', None,
u'A bad xref <xref>>α<</xref>'),
(True, u'Link 1 <ext-link ext-link-type="uri" xlink:href="http://example.org/example.html">http://example.org/example.html</ext-link>', None,
u'Link 1 <a href="http://example.org/example.html">http://example.org/example.html</a>'),
(True, u'Link 2 <ext-link ext-link-type="doi" xlink:href="10.7554/eLife.00001.012">http://dx.doi.org/10.7554/eLife.00001.012</ext-link>', None,
u'Link 2 <a href="https://doi.org/10.7554/eLife.00001.012">http://dx.doi.org/10.7554/eLife.00001.012</a>'),
(True, u'Bad link 1 <ext-link xlink:href="10.7554/eLife.00001.012">http://dx.doi.org/10.7554/eLife.00001.012</ext-link>', None,
u'Bad link 1 <ext-link xlink:href="10.7554/eLife.00001.012">http://dx.doi.org/10.7554/eLife.00001.012</ext-link>'),
(True, u'External link rewrite 1 <ext-link ext-link-type="uri" xlink:href="www.r-project.org">www.r-project.org</ext-link>', None,
u'External link rewrite 1 <a href="http://www.r-project.org">www.r-project.org</a>'),
(True, u'External link rewrite 2 <ext-link ext-link-type="uri" xlink:href="http://www.r-project.org">www.r-project.org</ext-link>', None,
u'External link rewrite 2 <a href="http://www.r-project.org">www.r-project.org</a>'),
(True, u'External link rewrite 3 <ext-link ext-link-type="uri" xlink:href="ftp://example.org">ftp://example.org</ext-link>', None,
u'External link rewrite 3 <a href="ftp://example.org">ftp://example.org</a>'),
(True, u'<p>The Panda database (<ext-link ext-link-type="uri" xlink:href="http://circadian.salk.edu/about.html)%20does%20not%20indicate%20restoration%20of%20Cyp2b10">http://circadian.salk.edu/about.html) does not indicate restoration of <italic>Cyp2b10</italic></ext-link> cycling by restricted feeding of clockless mice.</p>', None,
u'<p>The Panda database (<a href="http://circadian.salk.edu/about.html)%20does%20not%20indicate%20restoration%20of%20Cyp2b10">http://circadian.salk.edu/about.html) does not indicate restoration of <i>Cyp2b10</i></a> cycling by restricted feeding of clockless mice.</p>'),
(True, u'<p>An empty tag <italic/></p>', None,
u'<p>An empty tag <i></i></p>'),
(True, u'<p><email>[email protected]</email></p>', None,
u'<p><a href="mailto:[email protected]">[email protected]</a></p>'),
(True, u'<p>A first <email>[email protected]</email> and second <email>[email protected]</email></p>', None,
u'<p>A first <a href="mailto:[email protected]">[email protected]</a> and second <a href="mailto:[email protected]">[email protected]</a></p>'),
(True, u'<p><inline-graphic xlink:href="elife-00240-inf1-v1"/></p>', None,
u'<p><img src="elife-00240-inf1-v1.jpg"/></p>'),
(True, u'<p><inline-graphic xlink:href="elife-00240-inf1-v1.tiff"/></p>', None,
u'<p><img src="elife-00240-inf1-v1.jpg"/></p>'),
(True, u'<p><inline-graphic xlink:href="elife-00240-inf1-v1.tif"/>Some text <inline-graphic xlink:href="elife-00240-inf2-v1.jpg"/>><inline-graphic xlink:href="elife-00240-inf3-v1.gif"></inline-graphic></p>', 'https://example.org/',
u'<p><img src="https://example.org/elife-00240-inf1-v1.jpg"/>Some text <img src="https://example.org/elife-00240-inf2-v1.jpg"/>><img src="https://example.org/elife-00240-inf3-v1.gif"/></p>'),
(True, u'<p>Bad inline-graphic for test coverage <inline-graphic/></p>', None,
u'<p>Bad inline-graphic for test coverage <inline-graphic></inline-graphic></p>'),
(True, u'<p>Xref tag with multiple rid from 09561 v1 to <xref ref-type="fig" rid="fig3 fig4">Figures 3, 4</xref></p>', None,
u'<p>Xref tag with multiple rid from 09561 v1 to <a href="#fig3">Figures 3, 4</a></p>'),
(True, u'<break></break>', None,
u'<br/>'),
(True, u'<monospace>m</monospace>', None,
u'<span class="monospace">m</span>'),
(True, u'<table><thead><!-- Header row --><tr><!-- This header row ... --><th></th></tr></thead><tbody><!-- Table body --><tr><td>Genotype</td></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td>Genotype</td></tr></tbody></table>'),
# Replace a particular style pattern with a class name
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td style="author-callout-style-b8">RS19</td></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td class="author-callout-style-b8">RS19</td></tr></tbody></table>'),
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td style="author-callout-style-b8"/></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td class="author-callout-style-b8"></td></tr></tbody></table>'),
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td style="author-callout-style-b8" valign="top">RS19</td></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td class="author-callout-style-b8" valign="top">RS19</td></tr></tbody></table>'),
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td valign="top" style="author-callout-style-b8">RS19</td></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td class="author-callout-style-b8" valign="top">RS19</td></tr></tbody></table>'),
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td valign="top"><bold>R143Q</bold></td><td valign="top"/><td style="author-callout-style-b8" valign="top"/><td style="author-callout-style-b8" valign="top"/><td style="author-callout-style-b8" valign="top"/><td style="author-callout-style-b8" valign="top"/><td><bold>-2</bold></td><td valign="top"/></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td valign="top"><b>R143Q</b></td><td valign="top"></td><td class="author-callout-style-b8" valign="top"></td><td class="author-callout-style-b8" valign="top"></td><td class="author-callout-style-b8" valign="top"></td><td class="author-callout-style-b8" valign="top"></td><td><b>-2</b></td><td valign="top"></td></tr></tbody></table>'),
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td style="author-callout-style-b8">RS19</td><td style="author-callout-style-b8">RS19</td></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td class="author-callout-style-b8">RS19</td><td class="author-callout-style-b8">RS19</td></tr></tbody></table>'),
# Do not replace general styles with a class name
(True, u'<table><thead><tr><th></th></tr></thead><tbody><tr><td style="width:51.5pt;border-top:none;border-left:none; border-bottom:dashed; padding:0in 5.4pt 0in 5.4pt">CAP-Gly domain</td></tr></tbody></table>', None,
u'<table><thead><tr><th></th></tr></thead><tbody><tr><td style="width:51.5pt;border-top:none;border-left:none; border-bottom:dashed; padding:0in 5.4pt 0in 5.4pt">CAP-Gly domain</td></tr></tbody></table>'),
(True, u'<named-content content-type="author-callout-style-a1">author-callout-style-a1</named-content>', None,
u'<span class="author-callout-style-a1">author-callout-style-a1</span>'),
(True, u'<p>Bad named-content for test coverage <named-content/></p>', None,
u'<p>Bad named-content for test coverage <named-content></named-content></p>'),
)
def test_xml_to_html(self, html_flag, xml_string, base_url, expected):
self.assertEqual(utils_html.xml_to_html(html_flag, xml_string, base_url), expected)
if __name__ == '__main__':
unittest.main()
|
mit
| 3,111,444,813,102,461,000 | 72.697479 | 398 | 0.617788 | false |
jubalh/MAT
|
test/clitest.py
|
1
|
4824
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""
Unit test for the CLI interface
"""
import os
import unittest
import subprocess
import sys
import tarfile
sys.path.append('..')
from libmat import mat
import test
class TestRemovecli(test.MATTest):
"""
test if cli correctly remove metadatas
"""
def test_remove(self):
"""make sure that the cli remove all compromizing meta"""
for _, dirty in self.file_list:
subprocess.call(['../mat', '--add2archive', dirty])
current_file = mat.create_class_file(dirty, False, add2archive=True, low_pdf_quality=True)
self.assertTrue(current_file.is_clean())
def test_remove_empty(self):
"""Test removal with clean files\n"""
for clean, _ in self.file_list:
subprocess.call(['../mat', '--add2archive', clean])
current_file = mat.create_class_file(clean, False, add2archive=True, low_pdf_quality=True)
self.assertTrue(current_file.is_clean())
class TestListcli(test.MATTest):
"""
test if cli correctly display metadatas
"""
def test_list_clean(self):
"""check if get_meta returns meta"""
for clean, _ in self.file_list:
proc = subprocess.Popen(['../mat', '-d', clean],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(str(stdout).strip('\n'), "[+] File %s \
:\nNo harmful metadata found" % clean)
def test_list_dirty(self):
"""check if get_meta returns all the expected meta"""
for _, dirty in self.file_list:
proc = subprocess.Popen(['../mat', '-d', dirty],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertNotEqual(str(stdout), "[+] File %s :\n No\
harmul metadata found" % dirty)
class TestisCleancli(test.MATTest):
"""
check if cli correctly check if a file is clean or not
"""
def test_clean(self):
"""test is_clean on clean files"""
for clean, _ in self.file_list:
proc = subprocess.Popen(['../mat', '-c', clean],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(str(stdout).strip('\n'), '[+] %s is clean' % clean)
def test_dirty(self):
"""test is_clean on dirty files"""
for _, dirty in self.file_list:
proc = subprocess.Popen(['../mat', '-c', dirty],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(str(stdout).strip('\n'), '[+] %s is not clean' % dirty)
class TestFileAttributes(unittest.TestCase):
"""
test various stuffs about files (readable, writable, exist, ...)
"""
def test_not_writtable(self):
""" test MAT's behaviour on non-writable file"""
proc = subprocess.Popen(['../mat', 'not_writtable'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(str(stdout).strip('\n'), '[-] %s is not writable' % 'not_writtable')
def test_not_exist(self):
""" test MAT's behaviour on non-existent file"""
proc = subprocess.Popen(['../mat', 'ilikecookies'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(str(stdout).strip('\n'), 'Unable to process %s' % 'ilikecookies')
def test_empty(self):
""" test MAT's behaviour on empty file"""
proc = subprocess.Popen(['../mat', 'empty_file'], stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertEqual(str(stdout).strip('\n'), 'Unable to process %s' % 'ilikecookies')
class TestUnsupported(test.MATTest):
def test_abort_unsupported(self):
""" test if the cli aborts on unsupported files
"""
tarpath = os.path.join(self.tmpdir, "test.tar.bz2")
tar = tarfile.open(tarpath, "w")
for f in ('../mat.desktop', '../README.security', '../setup.py'):
tar.add(f, f[3:]) # trim '../'
tar.close()
proc = subprocess.Popen(['../mat', tarpath], stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
self.assertTrue('It contains unsupported filetypes:' \
'\n- mat.desktop\n- README.security\n- setup.py\n'
in str(stdout))
def get_tests():
""" Return every clitests"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestRemovecli))
suite.addTest(unittest.makeSuite(TestListcli))
suite.addTest(unittest.makeSuite(TestisCleancli))
suite.addTest(unittest.makeSuite(TestUnsupported))
return suite
|
gpl-2.0
| -8,538,410,506,182,681,000 | 35 | 102 | 0.573798 | false |
Infinidat/infi.clickhouse_orm
|
tests/test_dictionaries.py
|
1
|
4992
|
import unittest
import logging
from infi.clickhouse_orm import *
class DictionaryTestMixin:
def setUp(self):
self.database = Database('test-db', log_statements=True)
if self.database.server_version < (20, 1, 11, 73):
raise unittest.SkipTest('ClickHouse version too old')
self._create_dictionary()
def tearDown(self):
self.database.drop_database()
def _test_func(self, func, expected_value):
sql = 'SELECT %s AS value' % func.to_sql()
logging.info(sql)
result = list(self.database.select(sql))
logging.info('\t==> %s', result[0].value if result else '<empty>')
print('Comparing %s to %s' % (result[0].value, expected_value))
self.assertEqual(result[0].value, expected_value)
return result[0].value if result else None
class SimpleDictionaryTest(DictionaryTestMixin, unittest.TestCase):
def _create_dictionary(self):
# Create a table to be used as source for the dictionary
self.database.create_table(NumberName)
self.database.insert(
NumberName(number=i, name=name)
for i, name in enumerate('Zero One Two Three Four Five Six Seven Eight Nine Ten'.split())
)
# Create the dictionary
self.database.raw("""
CREATE DICTIONARY numbers_dict(
number UInt64,
name String DEFAULT '?'
)
PRIMARY KEY number
SOURCE(CLICKHOUSE(
HOST 'localhost' PORT 9000 USER 'default' PASSWORD '' DB 'test-db' TABLE 'numbername'
))
LIFETIME(100)
LAYOUT(HASHED());
""")
self.dict_name = 'test-db.numbers_dict'
def test_dictget(self):
self._test_func(F.dictGet(self.dict_name, 'name', F.toUInt64(3)), 'Three')
self._test_func(F.dictGet(self.dict_name, 'name', F.toUInt64(99)), '?')
def test_dictgetordefault(self):
self._test_func(F.dictGetOrDefault(self.dict_name, 'name', F.toUInt64(3), 'n/a'), 'Three')
self._test_func(F.dictGetOrDefault(self.dict_name, 'name', F.toUInt64(99), 'n/a'), 'n/a')
def test_dicthas(self):
self._test_func(F.dictHas(self.dict_name, F.toUInt64(3)), 1)
self._test_func(F.dictHas(self.dict_name, F.toUInt64(99)), 0)
class HierarchicalDictionaryTest(DictionaryTestMixin, unittest.TestCase):
def _create_dictionary(self):
# Create a table to be used as source for the dictionary
self.database.create_table(Region)
self.database.insert([
Region(region_id=1, parent_region=0, region_name='Russia'),
Region(region_id=2, parent_region=1, region_name='Moscow'),
Region(region_id=3, parent_region=2, region_name='Center'),
Region(region_id=4, parent_region=0, region_name='Great Britain'),
Region(region_id=5, parent_region=4, region_name='London'),
])
# Create the dictionary
self.database.raw("""
CREATE DICTIONARY regions_dict(
region_id UInt64,
parent_region UInt64 HIERARCHICAL,
region_name String DEFAULT '?'
)
PRIMARY KEY region_id
SOURCE(CLICKHOUSE(
HOST 'localhost' PORT 9000 USER 'default' PASSWORD '' DB 'test-db' TABLE 'region'
))
LIFETIME(100)
LAYOUT(HASHED());
""")
self.dict_name = 'test-db.regions_dict'
def test_dictget(self):
self._test_func(F.dictGet(self.dict_name, 'region_name', F.toUInt64(3)), 'Center')
self._test_func(F.dictGet(self.dict_name, 'parent_region', F.toUInt64(3)), 2)
self._test_func(F.dictGet(self.dict_name, 'region_name', F.toUInt64(99)), '?')
def test_dictgetordefault(self):
self._test_func(F.dictGetOrDefault(self.dict_name, 'region_name', F.toUInt64(3), 'n/a'), 'Center')
self._test_func(F.dictGetOrDefault(self.dict_name, 'region_name', F.toUInt64(99), 'n/a'), 'n/a')
def test_dicthas(self):
self._test_func(F.dictHas(self.dict_name, F.toUInt64(3)), 1)
self._test_func(F.dictHas(self.dict_name, F.toUInt64(99)), 0)
def test_dictgethierarchy(self):
self._test_func(F.dictGetHierarchy(self.dict_name, F.toUInt64(3)), [3, 2, 1])
self._test_func(F.dictGetHierarchy(self.dict_name, F.toUInt64(99)), [99])
def test_dictisin(self):
self._test_func(F.dictIsIn(self.dict_name, F.toUInt64(3), F.toUInt64(1)), 1)
self._test_func(F.dictIsIn(self.dict_name, F.toUInt64(3), F.toUInt64(4)), 0)
self._test_func(F.dictIsIn(self.dict_name, F.toUInt64(99), F.toUInt64(4)), 0)
class NumberName(Model):
''' A table to act as a source for the dictionary '''
number = UInt64Field()
name = StringField()
engine = Memory()
class Region(Model):
region_id = UInt64Field()
parent_region = UInt64Field()
region_name = StringField()
engine = Memory()
|
bsd-3-clause
| 5,617,064,091,989,218,000 | 37.10687 | 106 | 0.610377 | false |
timkahlke/BASTA
|
basta/FileUtils.py
|
1
|
3618
|
import timeit
import sys
#########
#
# FileUtils.py - provide various functions for reading and writing
# files.
#
####
#
# COPYRIGHT DISCALIMER:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
# Author: Tim Kahlke, [email protected]
# Date: April 2017
#
def hit_gen(hit_file, alen, evalue, identity, config, num):
"""Generator function returning hits grouped by sequence"""
with open(hit_file, "r") as f:
hits = {}
hit = ""
try:
while True:
line = f.next()
ls = line.split("\t")
# next unless good hit
if not _check_hit(ls, alen, evalue, identity, config):
continue
nh = ls[config['query_id']]
# check if new query sequence
if hit != nh:
# check non-empty list of hits
if hits:
yield hits
hit = nh
hits = {hit: [_hit_hash(ls, config)]}
else:
if not hits:
hits[hit] = []
if num and len(hits[hit]) == num:
continue
hits[hit].append(_hit_hash(ls, config))
except StopIteration:
if hits:
yield hits
else:
return
def _check_hit(ls, alen, evalue, ident, config):
try:
if float(ls[config['pident']]) < ident:
return 0
if float(ls[config['evalue']]) > evalue:
return 0
if int(ls[config['align_length']]) < alen:
return 0
return 1
except IndexError:
print("\n#### [BASTA ERROR] ####\n#\n# INDEX ERROR WHILE CHECKING e-value, alingment length OR percent identity!!!.\n# Are you sure that your input file has the correct format?\n# (For details check https://github.com/timkahlke/BASTA/wiki/3.-BASTA-Usage#input-file-format)\n#\n#####\n\n")
sys.exit()
def _get_hit_name(hs):
# Figure out if the hit is of format
# >bla|accession.version|additional-string
# or
# >accession.version optional-additional-info
# or
# >gi|gi_number|ref|accession
# DIRTY! Create a better name guessing!!!!
ps = hs.split("|")
if len(ps) >= 3:
if ps[0] == 'gi':
try:
return [x for x in ps[3].split(".") if x][0]
except IndexError:
return("no_match")
else:
try:
return [x for x in ps[1].split(".") if x][0]
except IndexError:
return("no_match")
else:
try:
return [x for x in hs.replace(">", "").split(".") if x][0]
except IndexError:
return("no_match")
def _hit_hash(ls, config):
return {'id': _get_hit_name(ls[config['subject_id']]), 'identity': ls[config['pident']], 'evalue': ls[config['evalue']], 'alen': ls[config['align_length']]}
|
gpl-3.0
| 4,843,353,332,459,730,000 | 31.303571 | 297 | 0.538695 | false |
unioslo/cerebrum
|
contrib/no/uit/populate_roles.py
|
1
|
12147
|
#!/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2019 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Populate role groups from SystemY
rolenames=role:<rolename from xml>
if rolename has admin attr=yes
build an admin accont ( 999-style)
"""
import argparse
import logging
import xml.sax
import Cerebrum.logutils
import cereconf
from Cerebrum import Errors
from Cerebrum.Utils import Factory
from Cerebrum.modules import PosixGroup
from Cerebrum.modules import PosixUser
from Cerebrum.modules.no.uit.Account import UsernamePolicy
from Cerebrum.utils import transliterate
from Cerebrum.utils.argutils import add_commit_args
logger = logging.getLogger(__name__)
db = Factory.get('Database')()
db.cl_init(change_program='pop_itroles')
account = Factory.get('Account')(db)
person = Factory.get('Person')(db)
const = Factory.get('Constants')(db)
account2name = dict((x["entity_id"], x["entity_name"]) for x in
Factory.get("Group")(db).list_names(
const.account_namespace))
name_gen = UsernamePolicy(db)
class RolesXmlParser(xml.sax.ContentHandler):
"""Parserklasse for it_roles.xml."""
elements = {'roles': False,
'role': True,
'member': True,
}
def __init__(self, filename, call_back_function):
self.call_back_function = call_back_function
xml.sax.parse(filename, self)
def startElement(self, name, attrs): # noqa: N802
if name == 'roles':
pass
elif name == 'role':
self.role_attrs = {}
self.role_members = []
for k in attrs.keys():
self.role_attrs[k] = attrs.get(k)
elif name == 'member':
self._elemdata = []
else:
logger.error("UNKNOWN ELEMENT: %s", name)
def characters(self, ch):
self.var = None
tmp = ch.encode('iso8859-1').strip() # TODO: Should this be changed?
if tmp:
self.var = tmp
self._elemdata.append(tmp)
def endElement(self, name): # noqa: N802
if name == 'role':
self.call_back_function(self, name)
elif name == 'member':
self.call_back_function(self, name)
elif name == 'roles':
pass
else:
logger.error("UNKNOWN ELEMENT: %s", name)
account_cache = {}
def get_account(name):
cache_hit = account_cache.get(name)
if cache_hit:
return cache_hit
ac = Factory.get('Account')(db)
ac.find_by_name(name)
account_cache[name] = ac
return ac
def get_group(id):
gr = PosixGroup.PosixGroup(db)
if isinstance(id, int):
gr.find(id)
else:
gr.find_by_name(id)
return gr
class ITRole(object):
def __init__(self, role_name, admin, members):
self.group_name = role_name
self.buildadmins = admin
self.group_members = members
def group_creator(self):
creator_ac = get_account(cereconf.INITIAL_ACCOUNTNAME)
return creator_ac.entity_id
def maybe_create(self, group_name):
try:
return get_group(group_name)
except Errors.NotFoundError:
description = "IT role group (%s)" % group_name
pg = PosixGroup.PosixGroup(db)
pg.populate(
creator_id=self.group_creator(),
visibility=const.group_visibility_internal,
name=self.group_name,
description=description,
# TODO:
# Are these groups:
# - internal? They have group_visibility_internal for some
# reason - do they have some internal usage in Cerebrum as
# well?
# - automatic? They seem to be maintained from this script, but
# the script never removes members..
group_type=const.group_type_unknown,
)
pg.write_db()
logger.info("Created group: name=%s, id=%d, gid=%d, desc='%s'",
pg.group_name, pg.entity_id, pg.posix_gid,
pg.description)
if self.buildadmins:
pg.add_spread(const.spread_uit_ad_lit_admingroup)
else:
pg.add_spread(const.spread_uit_ad_group)
return pg
def maybe_create_admin(self, person_id):
# this person should have two accounts.
# a primary account, eg bto001
# and a admin account, eg bto999
new_ac = PosixUser.PosixUser(db)
pri_ac = PosixUser.PosixUser(db)
person.clear()
person.find(person_id)
pri_account_id = person.get_primary_account()
if pri_account_id is None:
logger.warn("Primary account for person_id=%r not found, "
"account expired?", person_id)
return
pri_ac.find(pri_account_id)
existing_acc_types = pri_ac.get_account_types(owner_id=person_id,
filter_expired=False)
default_expire_date = pri_ac.expire_date
admin_priority = 920
accounts = new_ac.search(spread=const.spread_uit_ad_lit_admin,
owner_id=person_id,
expire_start=None)
if len(accounts) == 0:
# does not have account in spread ad_lit_admin, create and set
# spread
logger.debug("Create admin account for %s", person_id)
ext_id = person.get_external_id(id_type=const.externalid_fodselsnr)
# FIXME: may bang if person only from sysX !??
ssn = ext_id[0]['external_id']
full_name = person.get_name(const.system_cached, const.name_full)
new_username = name_gen.get_uit_uname(person, full_name,
regime='ADMIN')
logger.debug("GOT account_name=%r", new_username)
creator = get_account(cereconf.INITIAL_ACCOUNTNAME)
creator_id = creator.entity_id
new_ac.clear()
new_ac.populate(
name=new_username,
owner_id=person.entity_id,
owner_type=const.entity_person,
np_type=None,
creator_id=creator_id,
expire_date=default_expire_date,
posix_uid=new_ac.get_free_uid(),
gid_id=1623, # int(group.entity_id),
gecos=transliterate.for_gecos(full_name),
shell=const.posix_shell_bash,
)
new_ac.write_db()
# AD litadmin spread
new_ac.add_spread(const.spread_uit_ad_lit_admin)
new_ac.set_home_dir(const.spread_uit_ad_lit_admin)
# Set spread expire date
new_ac.set_spread_expire(spread=const.spread_uit_ad_lit_admin,
expire_date=default_expire_date)
password = new_ac.make_passwd(new_username)
new_ac.set_password(password)
new_ac.set_account_type(existing_acc_types[0]['ou_id'],
existing_acc_types[0]['affiliation'],
admin_priority)
new_ac.write_db()
return new_ac.account_name
elif len(accounts) == 1:
# sync account to person's primary account. expire date that is...
new_ac.clear()
new_ac.find(accounts[0]['account_id'])
new_ac.expire_date = default_expire_date
# Set spread expire date
new_ac.set_spread_expire(spread=const.spread_uit_ad_lit_admin,
expire_date=default_expire_date)
new_ac.write_db()
return accounts[0]['name']
else:
logger.error("TOO MANY ACCOUNTS FOUND for with "
"spread_uit_ad_lit_admin for %s!", person_id)
raise db.IntegrityError
def translate2admins(self, accountList):
admlist = []
for a in accountList:
try:
parent = get_account(a)
except Errors.NotFoundError:
logger.error(
"Account %s not found. Cannot create admin account!", a)
continue
admin = self.maybe_create_admin(parent.owner_id)
if admin:
admlist.append(admin)
return admlist
def sync_members(self):
group = self.maybe_create(self.group_name)
current_members = []
for member in group.search_members(group_id=group.entity_id,
indirect_members=True,
member_type=const.entity_account,
member_filter_expired=False):
member_id = int(member["member_id"])
if member_id not in account2name:
continue
current_members.append(account2name[member_id])
current = set(current_members)
logger.debug("CURRENT MEMBERS: %s", current)
if self.buildadmins == 'yes':
new = set(self.translate2admins(self.group_members))
else:
new = set(self.group_members)
logger.info("group: %s, members should be %s", self.group_name, new)
toAdd = new - current
toRemove = current - new
logger.info("TO ADD: %s", toAdd)
logger.info("TO REM: %s", toRemove)
for name in toRemove:
acc = get_account(name)
group.remove_member(acc.entity_id)
for name in toAdd:
logger.info("Trying to add %s", name)
try:
acc = get_account(name)
group.add_member(acc.entity_id)
except Errors.NotFoundError:
logger.error("Could not add %s to %s, account not found",
name, group.group_name)
continue
def process_role(name, attrs, members):
logger.info(
"PROCESS ROLE: name=%s, attrs=%s,members=%s", name, attrs, members)
role_prefix = 'role'
role_name = "%s:%s" % (role_prefix, attrs.get('name'))
admin = attrs.get('admin')
work = ITRole(role_name, admin, members)
work.sync_members()
def rolle_helper(obj, el_name):
if el_name == 'role':
process_role(el_name, obj.role_attrs, obj.role_members)
pass
elif el_name == 'member':
attribute = obj._elemdata
member_name = ''.join(attribute)
obj.role_members.append(member_name)
return
def main(inargs=None):
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
'-r', '--role_file',
required=True,
help='Import role groups from %(metavar)s',
metavar='<file>',
)
parser = add_commit_args(parser)
Cerebrum.logutils.options.install_subparser(parser)
args = parser.parse_args(inargs)
Cerebrum.logutils.autoconf('cronjob', args)
logger.info('Start %s', parser.prog)
logger.debug('args: %r', args)
RolesXmlParser(args.role_file, rolle_helper)
if args.commit:
logger.info("Commiting changes")
db.commit()
else:
logger.info("Dryrun, rollback changes")
db.rollback()
logger.info('Done %s', parser.prog)
if __name__ == '__main__':
main()
|
gpl-2.0
| -2,223,359,008,404,774,700 | 33.216901 | 79 | 0.565489 | false |
KMFleischer/PyEarthScience
|
Transition_examples_NCL_to_PyNGL/vectors/TRANS_vectors.py
|
1
|
2139
|
#
# File:
# TRANS_vectors.py
#
# Synopsis:
# Illustrates how to create a vector plot
#
# Categories:
# vector plot
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to create a vector plot.
#
# Effects illustrated:
# o Read netCDF data
# o Drawing a vector plot
#
# Output:
# A single visualization is produced.
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
'''
Transition Guide PyNGL Example: TRANS_vectors.py
- Read netCDF data
- Drawing a vector plot
18-09-04 kmf
'''
from __future__ import print_function
import Ngl,Nio
#-- open a file and read variables
f = Nio.open_file("../read_data/rectilinear_grid_2D.nc", "r")
u = f.variables["u10"]
v = f.variables["v10"]
ua = f.variables["u10"][0,:,:]
va = f.variables["v10"][0,:,:]
lat = f.variables["lat"]
lon = f.variables["lon"]
nlon = len(lon)
nlat = len(lat)
#-- open a workstation
wks = Ngl.open_wks("png","plot_TRANS_vectors_py")
#-- resource settings
vcres = Ngl.Resources()
vcres.nglFrame = False
vcres.vfXArray = lon[::3]
vcres.vfYArray = lat[::3]
vcres.vcMinFracLengthF = 0.3 #-- length of smallest vector
vcres.vcRefLengthF = 0.05 #-- length of reference vector
vcres.vcRefMagnitudeF = 20.0 #-- define vector ref mag
vcres.vcRefLengthF = 0.035 #-- define length of vec ref
vcres.mpFillOn = True
vcres.mpOceanFillColor = "Transparent"
vcres.mpLandFillColor = "Gray90"
vcres.mpInlandWaterFillColor = "Gray90"
#-- create the plot
plot = Ngl.vector_map(wks,ua[::3,::3],va[::3,::3],vcres)
#-- write variable long_name and units to the plot
txres = Ngl.Resources()
txres.txFontHeightF = 0.014
Ngl.text_ndc(wks,f.variables["u10"].attributes['long_name'],0.16,0.8,txres)
Ngl.text_ndc(wks,f.variables["u10"].attributes['units'], 0.95,0.8,txres)
#-- advance the frame
Ngl.frame(wks)
Ngl.end()
|
mit
| -2,994,039,357,135,409,000 | 23.306818 | 75 | 0.635811 | false |
ajparsons/useful_inkleby
|
useful_inkleby/useful_django/views/url.py
|
1
|
5631
|
'''
IntegratedURLView - Sidestep django's url.py based setup and integrate
urls directly with view classes rather than keeping them seperate.
This will mix-in either with the functional inkleby view or the default
django class-based views.
In views module you set up a series of classes that inherit from
IntegratedURLView and then connect up in project url like so:
url(r'^foo/', include_view('foo.views')),
Philosophy behind this is that the current urlconf system was designed for
functional views - class-based views have to hide themselves as functions
with an as_view function, which is ugly. By moving responsibility for
generating these to the class view it avoids awkward manual repetition
and keeps all settings associated with the view in one place.
Apps then don't need a separate url.py.
'''
from django.core.urlresolvers import (RegexURLPattern,
RegexURLResolver, LocaleRegexURLResolver)
from django.core.exceptions import ImproperlyConfigured
import re
from django.conf.urls import url
import six
from importlib import import_module
from functional import FunctionalView, LogicalView
from types import ModuleType
from django.core.urlresolvers import reverse
from django.shortcuts import HttpResponseRedirect
def make_comparison(v):
text = v
for s in re.findall("\((.*?)\)", v):
text = text.replace(s, "1")
return text
class AppUrl(object):
def __init__(self, app_view):
"""
Given path to views (or views module) will gather all url-enabled views
"""
self.views = []
if isinstance(app_view, ModuleType):
view_module = app_view
elif isinstance(app_view, six.string_types):
view_module = import_module(app_view)
else:
raise TypeError("Not a module or module path")
for k, v in view_module.__dict__.iteritems():
if isinstance(v, type) and issubclass(v, IntegratedURLView):
self.views.append(v)
def patterns(self):
"""
return patterns of all associated views
"""
local_patterns = []
for c in self.views:
local_patterns.extend(c.get_pattern())
local_patterns.sort(key=lambda x: len(x._url_comparison), reverse=True)
return local_patterns
def has_bakeable_views(self):
for v in self.views:
if hasattr(v, "bake_args") and hasattr(v,"url_name"):
if v.url_name:
return True
return False
def bake(self, **kwargs):
"""
bake all views with a bake_path
"""
for v in self.views:
if hasattr(v, "bake_args") and hasattr(v,"url_name"):
if v.url_name:
v.bake(**kwargs)
def include_view(arg, namespace=None, app_name=None):
if app_name and not namespace:
raise ValueError('Must specify a namespace if specifying app_name.')
if isinstance(arg, tuple):
# callable returning a namespace hint
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that provides a namespace')
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = AppUrl(urlconf_module).patterns()
urlconf_module.urlpatterns = patterns
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
return
class IntegratedURLView(LogicalView):
"""
Integrate URL configuration information into the View class.
Makes app level urls.py unnecessary.
add class level variables for:
url_pattern - regex string
url_patterns - list of regex strings (optional)
url_name - name for url view (for reverse lookup)
url_extra_args - any extra arguments to be fed into the url function for this view.
"""
url_pattern = ""
url_patterns = []
url_name = ""
url_extra_args = {}
@classmethod
def redirect_response(cls, *args):
return HttpResponseRedirect(reverse(cls.url_name, args=args))
@classmethod
def get_pattern(cls):
"""
returns a list of conf.url objects for url patterns that match this object
"""
new_patterns = []
def urlformat(pattern):
uo = url(pattern, cls.as_view(),
cls.url_extra_args, name=cls.url_name)
uo._url_comparison = make_comparison(pattern)
return uo
if cls.url_patterns:
new_patterns = [urlformat(x) for x in cls.url_patterns]
if cls.url_pattern:
new_patterns.append(urlformat(cls.url_pattern))
return new_patterns
|
mit
| 5,741,831,958,367,051,000 | 31.123529 | 95 | 0.626354 | false |
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/music21/common/formats.py
|
1
|
10980
|
#-*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: common/formats.py
# Purpose: Utilities for formats
#
# Authors: Michael Scott Cuthbert
# Christopher Ariza
#
# Copyright: Copyright © 2009-2015 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Utilities for working with file formats.
almost everything here is deprecated.
'''
__all__ = ['subConverterList', 'findSubConverterForFormat', 'findFormat',
'findInputExtension', 'findFormatFile', 'findFormatExtFile',
'findFormatExtURL',
'VALID_SHOW_FORMATS', 'VALID_WRITE_FORMATS', 'VALID_AUTO_DOWNLOAD']
# used for checking preferences, and for setting environment variables
VALID_SHOW_FORMATS = ['musicxml', 'lilypond', 'text', 'textline', 'midi', 'png', 'pdf', 'svg', 'lily.pdf', 'lily.png', 'lily.svg', 'braille', 'vexflow', 'vexflow.html', 'vexflow.js', 'ipython', 'ipython.png', 'musicxml.png']
VALID_WRITE_FORMATS = ['musicxml', 'lilypond', 'text', 'textline', 'midi', 'png', 'pdf', 'svg', 'lily.pdf', 'lily.png', 'lily.svg', 'braille', 'vexflow', 'vexflow.html', 'vexflow.js', 'ipython', 'ipython.png', 'musicxml.png']
VALID_AUTO_DOWNLOAD = ['ask', 'deny', 'allow']
from music21.common.decorators import deprecated
#-------------------------------------------------------------------------------
@deprecated('May 2015', '[soonest possible]', 'Moved to converter')
def subConverterList():
'''
returns a list of subconverter classes available to music21
in converter/subConverters, including the stub SubConverter class
DEPRECATED May 2015: moved to converter. #TODO: Remove
'''
from music21 import converter
return converter.Converter().subconvertersList()
def findSubConverterForFormat(fmt):
'''
return a converter.subConverter.SubConverter subclass
for a given format -- this is a music21 format name,
not a file extension. Or returns None
>>> common.findSubConverterForFormat('musicxml')
<class 'music21.converter.subConverters.ConverterMusicXMLET'>
>>> common.findSubConverterForFormat('text')
<class 'music21.converter.subConverters.ConverterText'>
Some subconverters have format aliases
>>> common.findSubConverterForFormat('t')
<class 'music21.converter.subConverters.ConverterText'>
'''
fmt = fmt.lower().strip()
from music21 import converter
scl = converter.Converter().subconvertersList()
for sc in scl:
formats = sc.registerFormats
if fmt in formats:
return sc
#@deprecated('May 2014', '[soonest possible]', 'Moved to converter')
def findFormat(fmt):
'''
Given a format defined either by a format name, abbreviation, or
an extension, return the regularized format name as well as
the output exensions.
DEPRECATED May 2014 -- moving to converter
All but the first element of the tuple are deprecated for use, since
the extension can vary by subconverter (e.g., lily.png)
Note that .mxl and .mx are only considered MusicXML input formats.
>>> common.findFormat('mx')
('musicxml', '.xml')
>>> common.findFormat('.mxl')
('musicxml', '.xml')
>>> common.findFormat('musicxml')
('musicxml', '.xml')
>>> common.findFormat('lily')
('lilypond', '.ly')
>>> common.findFormat('lily.png')
('lilypond', '.ly')
>>> common.findFormat('humdrum')
('humdrum', '.krn')
>>> common.findFormat('txt')
('text', '.txt')
>>> common.findFormat('textline')
('textline', '.txt')
>>> common.findFormat('midi')
('midi', '.mid')
>>> common.findFormat('abc')
('abc', '.abc')
>>> common.findFormat('scl')
('scala', '.scl')
>>> common.findFormat('braille')
('braille', '.txt')
>>> common.findFormat('vexflow')
('vexflow', '.html')
>>> common.findFormat('capx')
('capella', '.capx')
>>> common.findFormat('mx')
('musicxml', '.xml')
#>>> common.findFormat('png')
#('musicxml.png', '.png')
#>>> common.findFormat('ipython')
#('ipython', '.png')
# >>> common.findFormat('ipython.png')
# ('ipython', '.png')
# >>> common.findFormat('musicxml.png')
# ('musicxml.png', '.png')
Works the same whether you have a leading dot or not:
>>> common.findFormat('md')
('musedata', '.md')
>>> common.findFormat('.md')
('musedata', '.md')
If you give something we can't deal with, returns a Tuple of None, None:
>>> common.findFormat('wpd')
(None, None)
'''
from music21 import converter
c = converter.Converter()
fileformat = c.regularizeFormat(fmt)
if fileformat is None:
return (None, None)
scf = c.getSubConverterFormats()
sc = scf[fileformat]
if sc.registerOutputExtensions:
firstOutput = '.' + sc.registerOutputExtensions[0]
elif sc.registerInputExtensions:
firstOutput = '.' + sc.registerInputExtensions[0]
else:
firstOutput = None
return fileformat, firstOutput
# for key in sorted(list(fileExtensions)):
# if fmt.startswith('.'):
# fmt = fmt[1:] # strip .
# if fmt == key or fmt in fileExtensions[key]['input']:
# # add leading dot to extension on output
# return key, '.' + fileExtensions[key]['output']
# return None, None # if no match found
#@deprecated('May 2014', '[soonest possible]', 'Moved to converter')
def findInputExtension(fmt):
'''
Will be fully deprecated when there's an exact equivalent in converter...
Given an input format or music21 format, find and return all possible
input extensions.
>>> a = common.findInputExtension('musicxml')
>>> a
('.xml', '.mxl', '.mx', '.musicxml')
>>> a = common.findInputExtension('humdrum')
>>> a
('.krn',)
>>> common.findInputExtension('musedata')
('.md', '.musedata', '.zip')
mx is not a music21 format but it is a file format
>>> common.findInputExtension('mx')
('.xml', '.mxl', '.mx', '.musicxml')
Leading dots don't matter...
>>> common.findInputExtension('.mx')
('.xml', '.mxl', '.mx', '.musicxml')
blah is neither
>>> common.findInputExtension('blah') is None
True
'''
from music21 import converter
fmt = fmt.lower().strip()
if fmt.startswith('.'):
fmt = fmt[1:] # strip .
sc = findSubConverterForFormat(fmt)
if sc is None:
# file extension
post = []
for sc in converter.Converter().subconvertersList():
if fmt not in sc.registerInputExtensions:
continue
for ext in sc.registerInputExtensions:
if not ext.startswith('.'):
ext = '.' + ext
post.append(ext)
if post:
return tuple(post)
return None
else:
# music21 format
post = []
for ext in sc.registerInputExtensions:
if not ext.startswith('.'):
ext = '.' + ext
post.append(ext)
return tuple(post)
#@deprecated('May 2014', '[soonest possible]', 'Moved to converter')
def findFormatFile(fp):
'''
Given a file path (relative or absolute) return the format
DEPRECATED May 2014 -- moving to converter
>>> common.findFormatFile('test.xml')
'musicxml'
>>> common.findFormatFile('long/file/path/test-2009.03.02.xml')
'musicxml'
>>> common.findFormatFile('long/file/path.intermediate.png/test-2009.03.xml')
'musicxml'
On a windows networked filesystem
>>> common.findFormatFile('\\\\long\\file\\path\\test.krn')
'humdrum'
'''
fmt, unused_ext = findFormat(fp.split('.')[-1])
return fmt # may be None if no match
#@deprecated('May 2014', '[soonest possible]', 'Moved to converter')
def findFormatExtFile(fp):
'''Given a file path (relative or absolute) find format and extension used (not the output extension)
DEPRECATED May 2014 -- moving to converter
>>> common.findFormatExtFile('test.mx')
('musicxml', '.mx')
>>> common.findFormatExtFile('long/file/path/test-2009.03.02.xml')
('musicxml', '.xml')
>>> common.findFormatExtFile('long/file/path.intermediate.png/test-2009.03.xml')
('musicxml', '.xml')
>>> common.findFormatExtFile('test')
(None, None)
Windows drive
>>> common.findFormatExtFile('d:/long/file/path/test.xml')
('musicxml', '.xml')
On a windows networked filesystem
>>> common.findFormatExtFile('\\\\long\\file\\path\\test.krn')
('humdrum', '.krn')
'''
fileFormat, unused_extOut = findFormat(fp.split('.')[-1])
if fileFormat == None:
return None, None
else:
return fileFormat, '.'+fp.split('.')[-1] # may be None if no match
#@deprecated('May 2014', '[soonest possible]', 'Moved to converter')
def findFormatExtURL(url):
'''Given a URL, attempt to find the extension. This may scrub arguments in a URL, or simply look at the last characters.
DEPRECATED May 2014 -- moving to converter
>>> urlA = 'http://somesite.com/?l=cc/schubert/piano/d0576&file=d0576-06.krn&f=xml'
>>> urlB = 'http://somesite.com/cgi-bin/ksdata?l=cc/schubert/piano/d0576&file=d0576-06.krn&f=kern'
>>> urlC = 'http://somesite.com/cgi-bin/ksdata?l=cc/bach/cello&file=bwv1007-01.krn&f=xml'
>>> urlF = 'http://junk'
>>> common.findFormatExtURL(urlA)
('musicxml', '.xml')
>>> common.findFormatExtURL(urlB)
('humdrum', '.krn')
>>> common.findFormatExtURL(urlC)
('musicxml', '.xml')
>>> common.findFormatExtURL(urlF)
(None, None)
'''
from music21 import converter
ext = None
# first, look for cgi arguments
if '=xml' in url:
ext = '.xml'
elif '=kern' in url:
ext = '.krn'
# specific tag used on musedata.org
elif 'format=stage2' in url or 'format=stage1' in url:
ext = '.md'
else: # check for file that ends in all known input extensions
for sc in converter.Converter().subconvertersList():
inputTypes = sc.registerInputExtensions
for extSample in inputTypes:
if url.endswith('.' + extSample):
ext = '.' + extSample
break
# presently, not keeping the extension returned from this function
# reason: mxl is converted to xml; need to handle mxl files first
if ext != None:
fileFormat, unused_junk = findFormat(ext)
return fileFormat, ext
else:
return None, None
if __name__ == "__main__":
import music21
music21.mainTest()
#------------------------------------------------------------------------------
# eof
|
mit
| 5,272,609,764,362,257,000 | 31.675595 | 225 | 0.593861 | false |
MercenariesEngineering/Coalition
|
qarnot/disk.py
|
1
|
40259
|
"""Module for disk object."""
# Copyright 2016 Qarnot computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from qarnot import get_url, raise_on_error
from qarnot.exceptions import *
import posixpath
import os
import os.path
import time
import hashlib
import datetime
import threading
import itertools
try:
from progressbar import AnimatedMarker, Bar, ETA, Percentage, AdaptiveETA, ProgressBar, AdaptiveTransferSpeed
except:
pass
class Disk(object):
"""Represents a resource/result disk on the cluster.
This class is the interface to manage resources or results from a
:class:`qarnot.task.Task`.
.. note::
A :class:`Disk` must be created with
:meth:`qarnot.connection.Connection.create_disk`
or retrieved with :meth:`qarnot.connection.Connection.disks` or `qarnot.connection.Connection.retrieve_disk`.
.. note::
Paths given as 'remote' arguments,
(or as path arguments for :func:`Disk.directory`)
**must** be valid unix-like paths.
"""
# Creation
def __init__(self, connection, description, lock=False,
tags=None):
"""
Create a disk on a cluster.
:param :class:`qarnot.connection.Connection` connection: represents the cluster on which to create the disk
:param str description: a short description of the disk
:param bool lock: prevents the disk to be removed accidentally
:param list(str) tags: Custom tags
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
self._uuid = None
self._description = description
self._file_count = 0
self._used_space_bytes = 0
self._locked = lock
self._connection = connection
self._filethreads = {} # A dictionary containing key:value where key is
# the remote destination on disk, and value a running thread.
self._filecache = {} # A dictionary containing key:value where key is
# the remote destination on disk, and value an opened Python File.
self._add_mode = UploadMode.blocking
self._tags = tags
self._auto_update = True
self._last_auto_update_state = self._auto_update
self._update_cache_time = 5
self._last_cache = time.time()
def create(self):
"""Create the Disk on the REST API.
.. note:: This method should not be used unless if the object was created with the constructor.
"""
data = {
"description": self._description,
"locked": self._locked
}
if self._tags is not None:
data["tags"] = self._tags
response = self._connection._post(get_url('disk folder'), json=data)
if response.status_code == 403:
raise MaxDiskException(response.json()['message'])
else:
raise_on_error(response)
self._uuid = response.json()['uuid']
self.update()
@classmethod
def _retrieve(cls, connection, disk_uuid):
"""Retrieve information of a disk on a cluster.
:param :class:`qarnot.connection.Connection` connection: the cluster
to get the disk from
:param str disk_uuid: the UUID of the disk to retrieve
:rtype: :class:`qarnot.disk.Disk`
:returns: The retrieved disk.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
response = connection._get(get_url('disk info', name=disk_uuid))
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
return cls.from_json(connection, response.json())
@classmethod
def from_json(cls, connection, json_disk):
"""Create a Disk object from a json disk
:param qarnot.connection.Connection connection: the cluster connection
:param dict json_disk: Dictionary representing the disk
"""
disk = cls(connection,
json_disk['description'],
lock=json_disk['locked'],
tags=json_disk.get('tags'))
disk._update(json_disk)
return disk
# Disk Management
def update(self, flushcache=False):
"""
Update the disk object from the REST Api.
The flushcache parameter can be used to force the update, otherwise a cached version of the object
will be served when accessing properties of the object.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
if self._uuid is None:
return
now = time.time()
if (now - self._last_cache) < self._update_cache_time and not flushcache:
return
response = self._connection._get(get_url('disk info', name=self._uuid))
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
self._update(response.json())
self._last_cache = time.time()
def _update(self, json_disk):
""" Update local disk object from json
:type json_disk: dict
"""
self._uuid = json_disk["uuid"]
self._description = json_disk["description"]
self._file_count = json_disk["fileCount"]
self._used_space_bytes = json_disk["usedSpaceBytes"]
self._locked = json_disk["locked"]
self._file_count = json_disk["fileCount"]
self._used_space_bytes = json_disk["usedSpaceBytes"]
self._tags = json_disk.get("tags", None)
def delete(self):
"""Delete the disk represented by this :class:`Disk`.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
response = self._connection._delete(
get_url('disk info', name=self._uuid))
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
if response.status_code == 403:
raise LockedDiskException(response.json()['message'])
raise_on_error(response)
def get_archive(self, extension='zip', local=None):
"""Get an archive of this disk's content.
:param str extension: in {'tar', 'tgz', 'zip'},
format of the archive to get
:param str local: name of the file to output to
:rtype: :class:`str`
:returns:
The filename of the retrieved archive.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises ValueError: invalid extension format
"""
response = self._connection._get(
get_url('get disk', name=self._uuid, ext=extension),
stream=True)
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
elif response.status_code == 400:
raise ValueError('invalid file format : {0}', extension)
else:
raise_on_error(response)
local = local or ".".join([self._uuid, extension])
if os.path.isdir(local):
local = os.path.join(local, ".".join([self._uuid, extension]))
with open(local, 'wb') as f_local:
for elt in response.iter_content():
f_local.write(elt)
return local
def list_files(self):
"""List files on the whole disk.
:rtype: List of :class:`FileInfo`.
:returns: List of the files on the disk.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
self.flush()
response = self._connection._get(
get_url('tree disk', name=self._uuid))
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
return [FileInfo(**f) for f in response.json()]
def directory(self, directory=''):
"""List files in a directory of the disk. Doesn't go through
subdirectories.
:param str directory: path of the directory to inspect.
Must be unix-like.
:rtype: List of :class:`FileInfo`.
:returns: Files in the given directory on the :class:`Disk`.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
.. note::
Paths in results are relative to the *directory* argument.
"""
self.flush()
response = self._connection._get(
get_url('ls disk', name=self._uuid, path=directory))
if response.status_code == 404:
if response.json()['message'] == 'no such disk':
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
return [FileInfo(**f) for f in response.json()]
def sync_directory(self, directory, verbose=False):
"""Synchronize a local directory with the remote disks.
:param str directory: The local directory to use for synchronization
:param bool verbose: Print information about synchronization operations
.. warning::
Local changes are reflected on the server, a file present on the
disk but not in the local directory will be deleted from the disk.
A file present in the directory but not in the disk will be uploaded.
.. note::
The following parameters are used to determine whether
synchronization is required :
* name
* size
* sha1sum
"""
if not directory.endswith('/'):
directory = directory + '/'
filesdict = {}
for root, dirs, files in os.walk(directory):
for file_ in files:
filepath = os.path.join(root, file_)
name = filepath[len(directory) - 1:]
filesdict[name] = filepath
for dir_ in dirs:
filepath = os.path.join(root, dir_)
name = filepath[len(directory) - 1:]
if not name.endswith('/'):
name += '/'
filesdict[name] = filepath
self.sync_files(filesdict, verbose)
def sync_files(self, files, verbose=False, ignore_directories=False):
"""Synchronize files with the remote disks.
:param dict files: Dictionary of synchronized files
:param bool verbose: Print information about synchronization operations
:param bool ignore_directories: Ignore directories when looking for changes
Dictionary key is the remote file path while value is the local file
path.
.. warning::
Local changes are reflected on the server, a file present on the
disk but
not in the local directory will be deleted from the disk.
A file present in the directory but not in the disk will be uploaded.
.. note::
The following parameters are used to determine whether
synchronization is required :
* name
* size
* sha1sum
"""
def generate_file_sha1(filepath, blocksize=2**20):
"""Generate SHA1 from file"""
sha1 = hashlib.sha1()
with open(filepath, "rb") as file_:
while True:
buf = file_.read(blocksize)
if not buf:
break
sha1.update(buf)
return sha1.hexdigest()
def create_qfi(name, filepath):
"""Create a QFI from a file"""
if not name.startswith('/'):
name = '/' + name
mtime = os.path.getmtime(filepath)
dtutc = datetime.datetime.utcfromtimestamp(mtime)
dtutc = dtutc.replace(microsecond=0)
type = 'directory' if os.path.isdir(filepath) else 'file'
sha1 = generate_file_sha1(filepath) if type is 'file' else 'N/A'
size = os.stat(filepath).st_size if type is 'file' else 0
qfi = FileInfo(dtutc, name, size, type, sha1)
qfi.filepath = filepath
return qfi
localfiles = []
for name, filepath in files.items():
qfi = create_qfi(name, filepath)
localfiles.append(qfi)
if ignore_directories:
local = set([x for x in localfiles if not x.directory])
remote = set([x for x in self.list_files() if not x.directory])
else:
local = set(localfiles)
remote = set(self.list_files())
adds = local - remote
removes = remote - local
sadds = sorted(adds, key=lambda x: x.sha1sum)
groupedadds = [list(g) for _, g in itertools.groupby(
sadds, lambda x: x.sha1sum)]
for file_ in removes:
renames = [x for x in adds if x.sha1sum == file_.sha1sum and not x.directory and not file_.directory]
if len(renames) > 0:
for dup in renames:
if verbose:
print("Copy", file_.name, "to", dup.name)
self.add_link(file_.name, dup.name)
if verbose:
print("remove ", file_.name)
self.delete_file(file_.name, force=True)
remote = self.list_files()
for entry in groupedadds:
try:
rem = next(x for x in remote if x.sha1sum == entry[0].sha1sum and not x.directory and not entry[0].directory)
if rem.name == entry[0].name:
continue
if verbose:
print("Link:", rem.name, "<-", entry[0].name)
self.add_link(rem.name, entry[0].name)
except StopIteration:
if verbose:
print("Upload:", entry[0].name)
self.add_file(entry[0].filepath, entry[0].name)
if len(entry) > 1: # duplicate files
for link in entry[1:]:
if not link.directory:
if verbose:
print("Link:", entry[0].name, "<-", link.name)
self.add_link(entry[0].name, link.name)
else:
if verbose:
print("Add dir" + link.filepath + " " + str(link.name))
self.add_file(link.filepath, link.name)
def flush(self):
"""Ensure all files added through :meth:`add_file`/:meth:`add_directory`
are on the disk.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises TypeError: trying to write on a R/O disk
:raises IOError: user space quota reached
"""
for thread in self._filethreads.values():
thread.join()
self._filethreads.clear()
for remote, file_ in self._filecache.items():
self._add_file(file_, remote)
self._filecache.clear()
def move(self, source, dest):
"""Move a file or a directory inside a disk.
Missing destination path directories can be created.
Trailing '/' for directories affect behavior.
:param str source: name of the source file
:param str dest: name of the destination file
.. warning::
No clobber on move. If dest exist move will fail.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
data = [
{
"source": source,
"dest": dest
}
]
url = get_url('move disk', name=self._uuid)
response = self._connection._post(url, json=data)
raise_on_error(response)
self.update(True)
def add_link(self, target, linkname):
"""Create link between files on the disk
:param str target: name of the existing file to duplicate
:param str linkname: name of the created file
.. warning::
File size is counted twice, this method is meant to save upload
time, not space.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
data = [
{
"target": target,
"linkName": linkname
}
]
url = get_url('link disk', name=self._uuid)
response = self._connection._post(url, json=data)
raise_on_error(response)
self.update(True)
def _is_executable(self, file):
try:
return os.access(file.name, os.X_OK)
except IOError:
return False
def add_file(self, local_or_file, remote=None, mode=None, **kwargs):
"""Add a local file or a Python File on the disk.
.. note::
You can also use **disk[remote] = local**
.. warning::
In non blocking mode, you may receive an exception during an other
operation (like :meth:`flush`).
:param local_or_file: path of the local file or an opened Python File
:type local_or_file: str or File
:param str remote: name of the remote file
(defaults to *local_or_file*)
:param mode: mode with which to add the file
(defaults to :attr:`~UploadMode.blocking` if not set by
:attr:`Disk.add_mode`)
:type mode: :class:`UploadMode`
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises TypeError: trying to write on a R/O disk
:raises IOError: user space quota reached
:raises ValueError: file could not be created
"""
mode = mode or self._add_mode
if isinstance(local_or_file, str):
if os.path.isdir(local_or_file):
dest = remote or os.path.basename(local_or_file)
url = get_url('update file', name=self._uuid, path=os.path.dirname(dest))
response = self._connection._post(
url,
)
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
return
else:
file_ = open(local_or_file, 'rb')
else:
file_ = local_or_file
dest = remote or os.path.basename(file_.name)
if isinstance(dest, FileInfo):
dest = dest.name
# Ensure 2 threads do not write on the same file
previous = self._filethreads.get(dest)
if previous is not None:
previous.join()
del self._filethreads[dest]
# Do not delay a file added differently
if dest in self._filecache:
self._filecache[dest].close()
del self._filecache[dest]
if mode is UploadMode.blocking:
return self._add_file(file_, dest, **kwargs)
elif mode is UploadMode.lazy:
self._filecache[dest] = file_
else:
thread = threading.Thread(None, self._add_file, dest, (file_, dest), **kwargs)
thread.start()
self._filethreads[dest] = thread
def _add_file(self, file_, dest, **kwargs):
"""Add a file on the disk.
:param File file_: an opened Python File
:param str dest: name of the remote file (defaults to filename)
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
"""
try:
file_.seek(0)
except AttributeError:
pass
if dest.endswith('/'):
dest = os.path.join(dest, os.path.basename(file_.name))
url = get_url('update file', name=self._uuid, path=os.path.dirname(dest))
try:
# If requests_toolbelt is installed, we can use its
# MultipartEncoder to stream the upload and save memory overuse
from requests_toolbelt import MultipartEncoder # noqa
m = MultipartEncoder(
fields={'filedata': (os.path.basename(dest), file_)})
response = self._connection._post(
url,
data=m,
headers={'Content-Type': m.content_type})
except ImportError:
response = self._connection._post(
url,
files={'filedata': (os.path.basename(dest), file_)})
if response.status_code == 404:
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
# Update file settings
if 'executable' not in kwargs:
kwargs['executable'] = self._is_executable(file_)
self.update_file_settings(dest, **kwargs)
self.update(True)
def add_directory(self, local, remote="", mode=None):
""" Add a directory to the disk. Does not follow symlinks.
File hierarchy is preserved.
.. note::
You can also use **disk[remote] = local**
.. warning::
In non blocking mode, you may receive an exception during an other
operation (like :meth:`flush`).
:param str local: path of the local directory to add
:param str remote: path of the directory on remote node
(defaults to *local*)
:param mode: the mode with which to add the directory
(defaults to :attr:`~Disk.add_mode`)
:type mode: :class:`UploadMode`
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises ValueError: one or more file(s) could not be created
:raises IOError: not a valid directory
"""
if not os.path.isdir(local):
raise IOError("Not a valid directory")
if not remote.endswith('/'):
remote += '/'
for dirpath, _, files in os.walk(local):
remote_loc = dirpath.replace(local, remote, 1)
for filename in files:
self.add_file(os.path.join(dirpath, filename),
posixpath.join(remote_loc, filename), mode)
def get_file_iterator(self, remote, chunk_size=4096, progress=None):
"""Get a file iterator from the disk.
.. note::
This function is a generator, and thus can be used in a for loop
:param remote: the name of the remote file or a QFileInfo
:type remote: str or FileInfo
:param int chunk_size: Size of chunks to be yield
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises ValueError: no such file
"""
progressbar = None
def _cb(c, total, remote):
c = max(0, min(c, 100))
progressbar.update(c)
if isinstance(remote, FileInfo):
remote = remote.name
# Ensure file is done uploading
pending = self._filethreads.get(remote)
if pending is not None:
pending.join()
if remote in self._filecache:
try:
self._filecache[remote].seek(0)
except AttributeError:
pass
while True:
chunk = self._filecache[remote].read(chunk_size)
if not chunk:
break
yield chunk
else:
response = self._connection._get(
get_url('update file', name=self._uuid, path=remote),
stream=True)
if response.status_code == 404:
if response.json()['message'] == "No such disk":
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
total_length = float(response.headers.get('content-length'))
if progress is not None:
if progress is True:
progress = _cb
try:
widgets = [
remote,
' ', Percentage(),
' ', AnimatedMarker(),
' ', Bar(),
' ', AdaptiveETA(),
' ', AdaptiveTransferSpeed(unit='B')
]
progressbar = ProgressBar(widgets=widgets, max_value=total_length)
except Exception as e:
print(str(e))
progress = None
elif progress is False:
progress = None
count = 0
for chunk in response.iter_content(chunk_size):
count += len(chunk)
if progress is not None:
progress(count, total_length, remote)
yield chunk
if progress:
progressbar.finish()
def get_all_files(self, output_dir, progress=None):
"""Get all files the disk.
:param str output_dir: local directory for the retrieved files.
:param progress: can be a callback (read,total,filename) or True to display a progress bar
:type progress: bool or function(float, float, str)
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
.. warning:: Will override *output_dir* content.
"""
for file_info in self:
outpath = os.path.normpath(file_info.name.lstrip('/'))
self.get_file(file_info, os.path.join(output_dir,
outpath), progress)
def get_file(self, remote, local=None, progress=None):
"""Get a file from the disk.
.. note::
You can also use **disk[file]**
:param remote: the name of the remote file or a QFileInfo
:type remote: str or FileInfo
:param str local: local name of the retrieved file (defaults to *remote*)
:param progress: can be a callback (read,total,filename) or True to display a progress bar
:type progress: bool or function(float, float, str)
:rtype: :class:`str`
:returns: The name of the output file.
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises ValueError: no such file
(:exc:`KeyError` with disk[file] syntax)
"""
def make_dirs(_local):
"""Make directory if needed"""
directory = os.path.dirname(_local)
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
if isinstance(remote, FileInfo):
if remote.directory:
return
remote = remote.name
if local is None:
local = os.path.basename(remote)
if os.path.isdir(local):
local = os.path.join(local, os.path.basename(remote))
make_dirs(local)
if os.path.isdir(local):
return
with open(local, 'wb') as f_local:
for chunk in self.get_file_iterator(remote, progress=progress):
f_local.write(chunk)
return local
def update_file_settings(self, remote_path, **kwargs):
settings = dict(**kwargs)
if len(settings) < 1:
return
response = self._connection._put(
get_url('update file', name=self._uuid, path=remote_path),
json=settings)
if response.status_code == 404:
if response.json()['message'] == "No such disk":
raise MissingDiskException(response.json()['message'])
raise_on_error(response)
def delete_file(self, remote, force=False):
"""Delete a file from the disk.
.. note::
You can also use **del disk[file]**
:param str remote: the name of the remote file
:param bool force: ignore missing files
:raises qarnot.exceptions.MissingDiskException: the disk is not on the server
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
:raises ValueError: no such file
(:exc:`KeyError` with disk['file'] syntax)
"""
dest = remote.name if isinstance(remote, FileInfo) else remote
# Ensure 2 threads do not write on the same file
pending = self._filethreads.get(dest)
if pending is not None:
pending.join()
# Remove the file from local cache if present
if dest in self._filecache:
self._filecache[dest].close()
del self._filecache[dest]
# The file is not present on the disk so just return
return
response = self._connection._delete(
get_url('update file', name=self._uuid, path=dest))
if response.status_code == 404:
if response.json()['message'] == "No such disk":
raise MissingDiskException(response.json()['message'])
if force and response.status_code == 404:
pass
else:
raise_on_error(response)
self.update(True)
def commit(self):
"""Replicate local changes on the current object instance to the REST API
:raises qarnot.exceptions.QarnotGenericException: API general error, see message for details
:raises qarnot.exceptions.UnauthorizedException: invalid credentials
.. note:: When updating disks' properties, auto update will be disabled until commit is called.
"""
data = {
"description": self._description,
"locked": self._locked
}
if self._tags is not None:
data["tags"] = self._tags
self._auto_update = self._last_auto_update_state
resp = self._connection._put(get_url('disk info', name=self._uuid),
json=data)
if resp.status_code == 404:
raise MissingDiskException(resp.json()['message'])
raise_on_error(resp)
self.update(True)
@property
def uuid(self):
""":type: :class:`str`
:getter: Returns this disk's uuid
The disk's UUID."""
return self._uuid
@property
def tags(self):
""":type: :class:list(`str`)
:getter: Returns this disk's tags
:setter: Sets this disk's tags
Custom tags.
"""
if self._auto_update:
self.update()
return self._tags
@tags.setter
def tags(self, value):
self._tags = value
self._auto_update = False
@property
def add_mode(self):
""":type: :class:`UploadMode`
:getter: Returns this disk's add mode
:setter: Sets this disk's add mode
Default mode for adding files.
"""
return self._add_mode
@add_mode.setter
def add_mode(self, value):
"""Add mode setter"""
self._add_mode = value
@property
def description(self):
""":type: :class:`str`
:getter: Returns this disk's description
:setter: Sets this disk's description
The disk's description.
"""
if self._auto_update:
self.update()
return self._description
@description.setter
def description(self, value):
"""Description setter"""
self._description = value
self._auto_update = False
@property
def file_count(self):
""":type: :class:`int`
:getter: Returns this disk's file count
The number of files on the disk.
"""
if self._auto_update:
self.update()
return self._file_count
@property
def used_space_bytes(self):
""":type: :class:`int`
:getter: Returns this disk's used space in bytes
The total space used on the disk in bytes.
"""
if self._auto_update:
self.update()
return self._used_space_bytes
@property
def locked(self):
""":type: :class:`bool`
:getter: Returns this disk's locked state
:setter: Sets this disk's locked state
The disk's lock state. If True, prevents the disk to be removed
by a subsequent :meth:`qarnot.connection.Connection.create_task` with *force*
set to True.
"""
if self._auto_update:
self.update()
return self._locked
@locked.setter
def locked(self, value):
"""Change disk's lock state."""
self._locked = value
self._auto_update = False
@property
def auto_update(self):
""":type: :class:`bool`
:getter: Returns this disk's auto update state
:setter: Sets this disk's auto update state
Auto update state, default to True
When auto update is disabled properties will always return cached value
for the object and a call to :meth:`update` will be required to get latest values from the REST Api.
"""
return self._auto_update
@auto_update.setter
def auto_update(self, value):
"""Setter for auto_update feature
"""
self._auto_update = value
self._last_auto_update_state = self._auto_update
def __str__(self):
return (
("[LOCKED] - " if self.locked else "[NON LOCKED] - ") +
self.uuid + " - " + self.description
)
# Operators
def __getitem__(self, filename):
"""x.__getitem__(y) <==> x[y]"""
try:
return self.get_file(filename)
except ValueError:
raise KeyError(filename)
def __setitem__(self, remote, filename):
"""x.__setitem__(i, y) <==> x[i]=y"""
if os.path.isdir(filename):
return self.add_directory(filename, remote)
return self.add_file(filename, remote)
def __delitem__(self, filename):
"""x.__delitem__(y) <==> del x[y]"""
try:
return self.delete_file(filename)
except ValueError:
raise KeyError(filename)
def __contains__(self, item):
"""D.__contains__(k) -> True if D has a key k, else False"""
if isinstance(item, FileInfo):
item = item.name
return item in [f.name for f in self.list_files()]
def __iter__(self):
"""x.__iter__() <==> iter(x)"""
return iter(self.list_files())
def __eq__(self, other):
"""x.__eq__(y) <==> x == y"""
if isinstance(other, self.__class__):
return self._uuid == other._uuid
return False
def __ne__(self, other):
"""x.__ne__(y) <==> x != y"""
return not self.__eq__(other)
# Utility Classes
class FileInfo(object):
"""Information about a file."""
def __init__(self, lastChange, name, size, fileFlags, sha1Sum):
self.lastchange = None
""":type: :class:`datetime`
UTC Last change time of the file on the :class:`Disk`."""
if isinstance(lastChange, datetime.datetime):
self.lastchange = lastChange
else:
self.lastchange = datetime.datetime.strptime(lastChange,
"%Y-%m-%dT%H:%M:%SZ")
self.name = name
""":type: :class:`str`
Path of the file on the :class:`Disk`."""
self.size = size
""":type: :class:`int`
Size of the file on the :class:`Disk` (in Bytes)."""
self.directory = fileFlags == 'directory'
""":type: :class:`bool`
Is the file a directory."""
self.sha1sum = sha1Sum
""":type: :class:`str`
SHA1 Sum of the file."""
if not self.directory:
self.executable = fileFlags == 'executableFile'
""":type: :class:`bool`
Is the file executable."""
self.filepath = None # Only for sync
def __repr__(self):
template = 'FileInfo(lastchange={0}, name={1}, size={2}, '\
'directory={3}, sha1sum={4})'
return template.format(self.lastchange, self.name, self.size,
self.directory, self.sha1sum)
def __eq__(self, other):
return (self.name == other.name and
self.size == other.size and
self.directory == other.directory and
self.sha1sum == other.sha1sum)
def __hash__(self):
return (hash(self.name) ^
hash(self.size) ^
hash(self.directory) ^
hash(self.sha1sum))
class UploadMode(object):
"""How to add files on a :class:`Disk`."""
blocking = 0
"""Call to :func:`~Disk.add_file` :func:`~Disk.add_directory`
or blocks until file is done uploading."""
background = 1
"""Launch a background thread for uploading."""
lazy = 2
"""Actual uploading is made by the :func:`~Disk.flush` method call."""
|
lgpl-2.1
| -6,521,706,549,595,188,000 | 34.785778 | 125 | 0.580938 | false |
blaze/dask
|
dask/array/random.py
|
1
|
19969
|
import numbers
import warnings
from itertools import product
from numbers import Integral
from operator import getitem
import numpy as np
from .core import (
normalize_chunks,
Array,
slices_from_chunks,
asarray,
broadcast_shapes,
broadcast_to,
)
from .creation import arange
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
from ..utils import ignoring, random_state_data, derived_from, skip_doctest
def doc_wraps(func):
""" Copy docstring from one function to another """
warnings.warn(
"dask.array.random.doc_wraps is deprecated and will be removed in a future version",
FutureWarning,
)
def _(func2):
if func.__doc__ is not None:
func2.__doc__ = skip_doctest(func.__doc__)
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministically generate pseudo-random
numbers from a variety of probability distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Parameters
----------
seed: Number
Object to pass to RandomState to serve as deterministic seed
RandomState: Callable[seed] -> RandomState
A callable that, when provided with a ``seed`` keyword provides an
object that operates identically to ``np.random.RandomState`` (the
default). This might also be a function that returns a
``randomgen.RandomState``, ``mkl_random``, or
``cupy.random.RandomState`` object.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([10.01867852, 10.04812289, 9.89649746])
See Also
--------
np.random.RandomState
"""
def __init__(self, seed=None, RandomState=None):
self._numpy_state = np.random.RandomState(seed)
self._RandomState = RandomState
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(
self, funcname, *args, size=None, chunks="auto", extra_chunks=(), **kwargs
):
"""Wrap numpy random function to produce dask.array random function
extra_chunks should be a chunks tuple to append to the end of chunks
"""
if size is not None and not isinstance(size, (tuple, list)):
size = (size,)
args_shapes = {ar.shape for ar in args if isinstance(ar, (Array, np.ndarray))}
args_shapes.union(
{ar.shape for ar in kwargs.values() if isinstance(ar, (Array, np.ndarray))}
)
shapes = list(args_shapes)
if size is not None:
shapes.extend([size])
# broadcast to the final size(shape)
size = broadcast_shapes(*shapes)
chunks = normalize_chunks(
chunks,
size, # ideally would use dtype here
dtype=kwargs.get("dtype", np.float64),
)
slices = slices_from_chunks(chunks)
def _broadcast_any(ar, shape, chunks):
if isinstance(ar, Array):
return broadcast_to(ar, shape).rechunk(chunks)
if isinstance(ar, np.ndarray):
return np.ascontiguousarray(np.broadcast_to(ar, shape))
# Broadcast all arguments, get tiny versions as well
# Start adding the relevant bits to the graph
dsk = {}
dsks = []
lookup = {}
small_args = []
dependencies = []
for i, ar in enumerate(args):
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dependencies.append(res)
dsks.append(res.dask)
lookup[i] = res.name
elif isinstance(res, np.ndarray):
name = "array-{}".format(tokenize(res))
lookup[i] = name
dsk[name] = res
small_args.append(ar[tuple(0 for _ in ar.shape)])
else:
small_args.append(ar)
small_kwargs = {}
for key, ar in kwargs.items():
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dependencies.append(res)
dsks.append(res.dask)
lookup[key] = res.name
elif isinstance(res, np.ndarray):
name = "array-{}".format(tokenize(res))
lookup[key] = name
dsk[name] = res
small_kwargs[key] = ar[tuple(0 for _ in ar.shape)]
else:
small_kwargs[key] = ar
sizes = list(product(*chunks))
seeds = random_state_data(len(sizes), self._numpy_state)
token = tokenize(seeds, size, chunks, args, kwargs)
name = "{0}-{1}".format(funcname, token)
keys = product(
[name], *([range(len(bd)) for bd in chunks] + [[0]] * len(extra_chunks))
)
blocks = product(*[range(len(bd)) for bd in chunks])
vals = []
for seed, size, slc, block in zip(seeds, sizes, slices, blocks):
arg = []
for i, ar in enumerate(args):
if i not in lookup:
arg.append(ar)
else:
if isinstance(ar, Array):
dependencies.append(ar)
arg.append((lookup[i],) + block)
else: # np.ndarray
arg.append((getitem, lookup[i], slc))
kwrg = {}
for k, ar in kwargs.items():
if k not in lookup:
kwrg[k] = ar
else:
if isinstance(ar, Array):
dependencies.append(ar)
kwrg[k] = (lookup[k],) + block
else: # np.ndarray
kwrg[k] = (getitem, lookup[k], slc)
vals.append(
(_apply_random, self._RandomState, funcname, seed, size, arg, kwrg)
)
meta = _apply_random(
self._RandomState,
funcname,
seed,
(0,) * len(size),
small_args,
small_kwargs,
)
dsk.update(dict(zip(keys, vals)))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return Array(graph, name, chunks + extra_chunks, meta=meta)
@derived_from(np.random.RandomState, skipblocks=1)
def beta(self, a, b, size=None, chunks="auto", **kwargs):
return self._wrap("beta", a, b, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def binomial(self, n, p, size=None, chunks="auto", **kwargs):
return self._wrap("binomial", n, p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def chisquare(self, df, size=None, chunks="auto", **kwargs):
return self._wrap("chisquare", df, size=size, chunks=chunks, **kwargs)
with ignoring(AttributeError):
@derived_from(np.random.RandomState, skipblocks=1)
def choice(self, a, size=None, replace=True, p=None, chunks="auto"):
dependencies = []
# Normalize and validate `a`
if isinstance(a, Integral):
# On windows the output dtype differs if p is provided or
# absent, see https://github.com/numpy/numpy/issues/9867
dummy_p = np.array([1]) if p is not None else p
dtype = np.random.choice(1, size=(), p=dummy_p).dtype
len_a = a
if a < 0:
raise ValueError("a must be greater than 0")
else:
a = asarray(a)
a = a.rechunk(a.shape)
dtype = a.dtype
if a.ndim != 1:
raise ValueError("a must be one dimensional")
len_a = len(a)
dependencies.append(a)
a = a.__dask_keys__()[0]
# Normalize and validate `p`
if p is not None:
if not isinstance(p, Array):
# If p is not a dask array, first check the sum is close
# to 1 before converting.
p = np.asarray(p)
if not np.isclose(p.sum(), 1, rtol=1e-7, atol=0):
raise ValueError("probabilities do not sum to 1")
p = asarray(p)
else:
p = p.rechunk(p.shape)
if p.ndim != 1:
raise ValueError("p must be one dimensional")
if len(p) != len_a:
raise ValueError("a and p must have the same size")
dependencies.append(p)
p = p.__dask_keys__()[0]
if size is None:
size = ()
elif not isinstance(size, (tuple, list)):
size = (size,)
chunks = normalize_chunks(chunks, size, dtype=np.float64)
if not replace and len(chunks[0]) > 1:
err_msg = (
"replace=False is not currently supported for "
"dask.array.choice with multi-chunk output "
"arrays"
)
raise NotImplementedError(err_msg)
sizes = list(product(*chunks))
state_data = random_state_data(len(sizes), self._numpy_state)
name = "da.random.choice-%s" % tokenize(
state_data, size, chunks, a, replace, p
)
keys = product([name], *(range(len(bd)) for bd in chunks))
dsk = {
k: (_choice, state, a, size, replace, p)
for k, state, size in zip(keys, state_data, sizes)
}
graph = HighLevelGraph.from_collections(
name, dsk, dependencies=dependencies
)
return Array(graph, name, chunks, dtype=dtype)
# @derived_from(np.random.RandomState, skipblocks=1)
# def dirichlet(self, alpha, size=None, chunks="auto"):
@derived_from(np.random.RandomState, skipblocks=1)
def exponential(self, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("exponential", scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def f(self, dfnum, dfden, size=None, chunks="auto", **kwargs):
return self._wrap("f", dfnum, dfden, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def gamma(self, shape, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("gamma", shape, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def geometric(self, p, size=None, chunks="auto", **kwargs):
return self._wrap("geometric", p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("gumbel", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks="auto", **kwargs):
return self._wrap(
"hypergeometric", ngood, nbad, nsample, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("laplace", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("logistic", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("lognormal", mean, sigma, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def logseries(self, p, size=None, chunks="auto", **kwargs):
return self._wrap("logseries", p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def multinomial(self, n, pvals, size=None, chunks="auto", **kwargs):
return self._wrap(
"multinomial",
n,
pvals,
size=size,
chunks=chunks,
extra_chunks=((len(pvals),),),
)
@derived_from(np.random.RandomState, skipblocks=1)
def negative_binomial(self, n, p, size=None, chunks="auto", **kwargs):
return self._wrap("negative_binomial", n, p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def noncentral_chisquare(self, df, nonc, size=None, chunks="auto", **kwargs):
return self._wrap(
"noncentral_chisquare", df, nonc, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks="auto", **kwargs):
return self._wrap(
"noncentral_f", dfnum, dfden, nonc, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def normal(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("normal", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def pareto(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("pareto", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def permutation(self, x):
from .slicing import shuffle_slice
if isinstance(x, numbers.Number):
x = arange(x, chunks="auto")
index = np.arange(len(x))
self._numpy_state.shuffle(index)
return shuffle_slice(x, index)
@derived_from(np.random.RandomState, skipblocks=1)
def poisson(self, lam=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("poisson", lam, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def power(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("power", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def randint(self, low, high=None, size=None, chunks="auto", dtype="l", **kwargs):
return self._wrap(
"randint", low, high, size=size, chunks=chunks, dtype=dtype, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def random_integers(self, low, high=None, size=None, chunks="auto", **kwargs):
return self._wrap(
"random_integers", low, high, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def random_sample(self, size=None, chunks="auto", **kwargs):
return self._wrap("random_sample", size=size, chunks=chunks, **kwargs)
random = random_sample
@derived_from(np.random.RandomState, skipblocks=1)
def rayleigh(self, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("rayleigh", scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_cauchy(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_cauchy", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_exponential(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_exponential", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_gamma(self, shape, size=None, chunks="auto", **kwargs):
return self._wrap("standard_gamma", shape, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_normal(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_normal", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_t(self, df, size=None, chunks="auto", **kwargs):
return self._wrap("standard_t", df, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def tomaxint(self, size=None, chunks="auto", **kwargs):
return self._wrap("tomaxint", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def triangular(self, left, mode, right, size=None, chunks="auto", **kwargs):
return self._wrap(
"triangular", left, mode, right, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def uniform(self, low=0.0, high=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("uniform", low, high, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def vonmises(self, mu, kappa, size=None, chunks="auto", **kwargs):
return self._wrap("vonmises", mu, kappa, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def wald(self, mean, scale, size=None, chunks="auto", **kwargs):
return self._wrap("wald", mean, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def weibull(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("weibull", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def zipf(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("zipf", a, size=size, chunks=chunks, **kwargs)
def _choice(state_data, a, size, replace, p):
state = np.random.RandomState(state_data)
return state.choice(a, size=size, replace=replace, p=p)
def _apply_random(RandomState, funcname, state_data, size, args, kwargs):
"""Apply RandomState method with seed"""
if RandomState is None:
RandomState = np.random.RandomState
state = RandomState(state_data)
func = getattr(state, funcname)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
if hasattr(_state, "choice"):
choice = _state.choice
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
multinomial = _state.multinomial
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
permutation = _state.permutation
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
randint = _state.randint
random_integers = _state.random_integers
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
|
bsd-3-clause
| -1,272,867,540,019,400,200 | 37.328215 | 92 | 0.595173 | false |
tommyjasmin/polar2grid
|
py/polar2grid/polar2grid/ninjo/__init__.py
|
1
|
1615
|
#!/usr/bin/env python
# encoding: utf-8
"""polar2grid NinJo tiff backend subpackage
:author: David Hoese (davidh)
:contact: [email protected]
:organization: Space Science and Engineering Center (SSEC)
:copyright: Copyright (c) 2013 University of Wisconsin SSEC. All rights reserved.
:date: Jan 2013
:license: GNU GPLv3
Copyright (C) 2013 Space Science and Engineering Center (SSEC),
University of Wisconsin-Madison.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This file is part of the polar2grid software package. Polar2grid takes
satellite observation data, remaps it, and writes it to a file format for
input into another program.
Documentation: http://www.ssec.wisc.edu/software/polar2grid/
Written by David Hoese January 2013
University of Wisconsin-Madison
Space Science and Engineering Center
1225 West Dayton Street
Madison, WI 53706
[email protected]
"""
__docformat__ = "restructuredtext en"
from .ninjo_backend import Backend
|
gpl-3.0
| 7,935,579,677,415,002,000 | 35.704545 | 84 | 0.747368 | false |
jinzishuai/learn2deeplearn
|
curveFitNN/tf_fit2.py
|
1
|
2150
|
#!/usr/bin/python
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
sess = tf.InteractiveSession()
######################### Define the model ########################
## define the dimensions
nn_input_dim = 1
nn_output_dim = 1
nn_hdim = 3 #Hidden node dimension
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
num_examples = 100
x = tf.placeholder(tf.float32, [None, nn_input_dim])
y_ = tf.placeholder(tf.float32, [None, nn_output_dim])
x_data = np.random.rand(num_examples, nn_input_dim).astype(np.float32)
y_data = x_data**3 + x_data**2 + x_data
plt.plot(x_data, y_data, 'ro')
W1 = tf.Variable(tf.zeros([nn_input_dim, nn_hdim]))
b1 = tf.Variable(tf.zeros([nn_hdim]))
W2 = tf.Variable(tf.zeros( [nn_hdim, nn_output_dim]))
b2 = tf.Variable(tf.zeros([nn_output_dim]))
## define forward propogation
z1 = tf.matmul(x, W1) + b1
#for a list of available tensor flow activation function, see https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html
#a1 = tf.tanh(z1) #bad
a1 = tf.sigmoid(z1) #good
#a1 = tf.nn.relu(z1) #bad
#a1 = tf.nn.relu6(z1)#bad
#a1 = tf.nn.elu(z1) #bad
#a1 = tf.nn.softplus(z1) #good
#a1 = tf.nn.softsign(z1) #bad
#a1 = tf.nn.dropout(z1, 0.1) #bad
y = tf.matmul(a1, W2) + b2
## Define loss and optimizer
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
######################### End of model definition ########################
tf.initialize_all_variables().run()
NSteps=10000
NReport = NSteps/10
for step in range(NSteps+1):
train_step.run({x:x_data, y_:y_data})
if step % NReport == 0:
print(step, loss.eval({x:x_data, y_:y_data}))
# # Test trained model
#correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels})
x_test = np.linspace(-1.0, 1.0, 1000).reshape(1000, 1)
y_test = x_test**3 + x_test**2 + x_test
plt.plot(x_test, y_test, 'g-')
plt.plot(x_test, y.eval({x:x_test}) , 'b-')
plt.show()
|
gpl-3.0
| 419,065,986,394,510,600 | 31.575758 | 127 | 0.64 | false |
smok-serwis/coolamqp
|
compile_definitions/xml_tags.py
|
1
|
3460
|
# coding=UTF-8
from __future__ import print_function, absolute_import, division
import copy
import logging
import math
from compile_definitions.xml_fields import *
from coolamqp.framing.base import BASIC_TYPES, DYNAMIC_BASIC_TYPES
logger = logging.getLogger(__name__)
def bool_int(x):
return bool(int(x))
__all__ = [
'Domain', 'Method', 'Class', 'Field', 'Constant'
]
class BaseObject(object):
FIELDS = []
# tuples of (xml name, field name, type, (optional) default value)
def __init__(self, elem):
for ft in self.FIELDS:
ft.set(self, elem)
@classmethod
def findall(cls, xml):
return [cls(p) for p in xml.findall(cls.NAME)]
def _replace(self, **kwargs):
c = copy.copy(self)
c.__dict__.update(**kwargs)
return c
class Constant(BaseObject):
NAME = 'constant'
FIELDS = [
_name,
SimpleField('value', int),
ValueField('class', 'kind', default=''),
_docs,
]
class Field(BaseObject):
NAME = 'field'
FIELDS = [
_name,
ValueField(('domain', 'type'), 'type', str),
SimpleField('label', default=None),
SimpleField('reserved', bool_int, default=0),
ComputedField('basic_type', lambda elem: elem.attrib.get('type',
'') == elem.attrib.get(
'name', '')),
_docs
]
class Domain(BaseObject):
NAME = 'domain'
FIELDS = [
_name,
SimpleField('type'),
ComputedField('elementary',
lambda a: a.attrib['type'] == a.attrib['name'])
]
class Method(BaseObject):
NAME = 'method'
FIELDS = [
_name,
SimpleField('synchronous', bool_int, default=False),
SimpleField('index', int),
SimpleField('label', default=None),
_docs,
ChildField('fields', 'field', Field),
ChildField('response', 'response', lambda e: e.attrib['name']),
ChildField('sent_by_client', 'chassis',
lambda e: e.attrib.get('name', '') == 'client',
post_exec=any),
ChildField('sent_by_server', 'chassis',
lambda e: e.attrib.get('name', '') == 'server',
post_exec=any),
ChildField('constant', 'field', lambda e: Field(e).reserved,
post_exec=all)
]
def get_static_body(self): # only arguments part
body = []
bits = 0
for field in self.fields:
if bits > 0 and field.basic_type != 'bit':
body.append(b'\x00' * math.ceil(bits / 8))
bits = 0
if field.basic_type == 'bit':
bits += 1
else:
body.append(eval(BASIC_TYPES[field.basic_type][2]))
return b''.join(body)
def is_static(self, domain_to_type=None): # is size constant?
return not any(
field.basic_type in DYNAMIC_BASIC_TYPES for field in self.fields)
_cls_method_sortkey = lambda m: (m.name.strip('-')[0], -len(m.response))
_cls_method_postexec = lambda q: sorted(q, key=_cls_method_sortkey)
class Class(BaseObject):
NAME = 'class'
FIELDS = [
_name,
SimpleField('index', int),
_docs_with_label,
ChildField('methods', 'method', Method, post_exec= \
_cls_method_postexec),
ChildField('properties', 'field', Field)
]
|
mit
| 3,059,699,210,150,156,000 | 25.821705 | 89 | 0.539306 | false |
algarecu/trollslayer
|
src/gt/groundtruth_reader.py
|
1
|
21735
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2017
# Álvaro García-Recuero, [email protected]
#
# This file is part of the Trollslayer framework
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses>.
"""
Ground-truth reader
Created on '26/05/15'
__author__='algarecu'
__email__='[email protected]'
"""
import getch
import textwrap
import os
import sys
import argparse
import traceback
import src.utils.colors_function as colors
import src.mining.database as db
from sqlalchemy import func, Table
from sqlalchemy.sql import select, and_, desc
from src.mining.crawler_twitter import CrawlerTwitter
from src.mining.database import engine, gt_tweets, reviewers, tweet_ranks, cursor_gt, database
from src.mining.database import tweets, replies
from colorama import init
init( strip=not sys.stdout.isatty( ) ) # strip colors if stdout is redirected
from termcolor import cprint, colored
from pyfiglet import figlet_format
from src.utils.get_api_keys import get_api_keys as gk
text_guidelines = 'To mark a tweet as abuse, we ask you to read the JTRIG techniques for online HUMINT Operations.\n'
header = "\n#### JTRIG 4 D's: Deny, Disrupt, Degrade or Deceive: \n\n"
bullets = "- Deny: encouraging self-harm to others users, promoting violence (direct or indirect), terrorism or \n" \
"similar activities.\n\n" + \
"- Disrupt: distracting provocations, denial-of-service, flooding with messages, promote abuse.\n\n" + \
"- Degrade: disclosing personal and private data of others without their approval as to harm their public \n" \
"image/reputation.\n\n" + \
"- Deceive: supplanting a known user identity (impersonation) for influencing other users behavior and \n" \
"activities, including assuming false identities (but not pseudonyms).\n\n"
example_tweet = "\nTweet:"
example_abuse = "\tI retract my awful statement of #XXXX people with batman/anime/Sin City avatars deserve death.\n" \
"\tI really meant ”frozen in time forever”. \n\n"
login = "Please enter your id below, choose something unique and that you can remember (annotations are grouped by id):"
login_warning = "If you have already annotated data, please reuse your unique identifier to continue annotations"
login_exit = "To exit: Ctrl + C"
tweet = colored(example_tweet, 'magenta')
abuse = colored(example_abuse, 'magenta')
warning = colored(login_warning, 'red', attrs=['blink'])
def cls_next_tweet():
"""
Function to reprint the header of the tool
@return:
"""
os.system(['clear', 'cls'][os.name == 'nt'])
cprint(header, "red")
cprint(bullets)
def cls():
"""
Function to reprint the header of the tool
@return:
"""
os.system(['clear', 'cls'][os.name == 'nt'])
# cprint(header, "red")
# cprint(bullets)
def print_banner():
"""
Print TrollSlayer header
:return:
"""
cls()
cprint(figlet_format('TrollSlayer', font='basic', width='90'), 'grey', attrs=['bold', 'concealed'], end='\n')
# cprint(figlet_format('Welcome, happy troll hunting', font='thin'), 'grey', attrs=['bold'])
cprint(text_guidelines)
cprint(header, "red")
cprint(bullets)
cprint('Abusive tweet matching Deny', 'red', attrs=['underline'])
cprint(tweet + abuse)
cprint(login)
cprint(warning)
cprint(login_exit)
def anonimize_users(data):
"""
Function to remove user ids from tweets we annotate
@param data: tweet text
@return: curated text
"""
words = data.split( )
for k, v in enumerate( words ):
if '@' in v:
words[k] = '@user'
data_curated = ' '.join(str(elem) for elem in words)
return data_curated
def print_replies(data, author, crawler):
"""
Function to get replies to the current tweet being annotated
@return:
"""
if data > 0:
for row in data:
tweet_id = unicode(row['tweet_id']).encode('utf-8')
id_reply = unicode(row['in_reply_to_tweet_id']).encode('utf-8')
user_id_reply = unicode(row['tweet_userid'])
text_reply = unicode(row['tweet_text']).encode('utf-8')
text_reply = anonimize_users(text_reply)
reply_date = row['tweet_creation']
if reply_date is None:
full_tweet = crawler.show_status( id=tweet_id )
crawler.handle_rate_limit('statuses', 'statuses/show/:id' )
reply_date = crawler.convert_to_datetime(full_tweet['created_at'])
db.database.updateReplyDate(reply_date, tweet_id)
if id != id_reply:
if author == user_id_reply:
print >> sys.stdout, '{.FGMAGENTA}'.format(colors.bcolors) + str(reply_date) \
+ " ----> self-reply: " + '{.ENDC}'.format( colors.bcolors), text_reply
else:
print >> sys.stdout, '{.FGMAGENTA}'.format(colors.bcolors) + str(reply_date) \
+ " ----> reply: " + '{.ENDC}'.format(colors.bcolors), text_reply
def print_in_reply_to(data, crawler):
"""
Function to get source tweet if current tweet is a in-reply-to.
@return:
"""
if data:
# id of the in_reply_to
id_tweet = unicode( data['tweet_id'] ).encode( 'utf-8' )
id_tweet_date = data['tweet_creation']
if id_tweet_date is None:
full_in_reply = crawler.show_status( id=id_tweet )
id_tweet_date = crawler.convert_to_datetime( full_in_reply['created_at'] )
l1_reply = unicode( data['tweet_text'] ).encode('utf-8')
l1_reply = anonimize_users(l1_reply)
print >> sys.stdout, '{.FGMAGENTA}'.format( colors.bcolors ) + str(id_tweet_date) \
+ " ----> in_reply_to:" + '{.ENDC}'.format(colors.bcolors), l1_reply
def print_bordered_text(text, tweet_date):
"""
Function to print tweet with border
:param date: the tweet date
:param text: the tweet text
:return:
"""
maxlen = max(len(s) for s in text)
colwidth = maxlen + 2
print '+' + '-' * colwidth + '+'
for s in my_text_frame:
print '{.FGMAGENTA}'.format(colors.bcolors) + str(tweet_date) + " :\t" + \
'%-*.*s' % (maxlen, maxlen, s) + '{.ENDC}'.format(colors.bcolors)
print '+' + '-' * colwidth + '+'
def my_text_frame(string_lst, tweet_date, width=160):
g_line = "+{0}+".format("-"*(width-2))
print g_line
print '{.FGMAGENTA}'.format(colors.bcolors) + str(tweet_date) + " :\t" + ' %-*.*s ' % (width, width, string_lst) + '{.ENDC}'.format(colors.bcolors)
print g_line
def get_context(tweet, conn, tweet_type, crawler, *args):
"""
Function to display context of user tweet
@param: tweet_id, user_id
@return:
"""
if len( args ) == 2:
data_replies = args[0]
data_in_reply_to = args[1]
else:
if len( args ) == 1:
data_replies = args[0]
# before query:
before_query = select(
[tweets.c.tweet_text, tweets.c.tweet_creation, tweets.c.tweet_userid] ).where( and_(
tweets.c.tweet_userid == tweet['tweet_userid'], tweets.c.tweet_id < tweet['tweet_id'] ) )
result_before = conn.execute( before_query.order_by( tweets.c.tweet_id.desc( ) ).limit( 5 ) ).fetchall( )
# after query:
after_query = select(
[tweets.c.tweet_text, tweets.c.tweet_creation, tweets.c.tweet_userid] ).where( and_(
tweets.c.tweet_userid == tweet['tweet_userid'], tweets.c.tweet_id > tweet['tweet_id'] ) )
result_after = conn.execute( after_query.order_by( tweets.c.tweet_id.asc( ) ).limit( 5 ) ).fetchall( )
# after (newer tweets)
for row in reversed(result_after):
tweet_after_text = row['tweet_text'].encode('utf-8')
tweet_after_date = row['tweet_creation']
tweet_after_text = anonimize_users(tweet_after_text)
# all_text_after
print str(tweet_after_date) + ": \t".format(colors.bcolors, colors.bcolors) + str(tweet_after_text)
#print textwrap.fill(all_text_after)
# current
current_tweet_text = tweet['tweet_text'].encode('utf-8')
current_tweet_author = tweet['tweet_userid']
current_tweet_date = tweet['tweet_creation']
current_tweet_text = anonimize_users(current_tweet_text)
##### The Tweet and its replies #####
if tweet_type == 'in_reply_to':
print_in_reply_to(data_in_reply_to, crawler)
# The tweet
# print_bordered_text(current_tweet_text, current_tweet_date)
string_lst = str(current_tweet_text)
my_text_frame(string_lst, current_tweet_date)
print_replies(data_replies, current_tweet_author, crawler)
##### ######################### #####
# before (older tweets)
for row in result_before:
tweet_before_text = row['tweet_text'].encode('utf-8')
tweet_before_date = row['tweet_creation']
tweet_before_text = anonimize_users( tweet_before_text)
#all_text_before
print str(tweet_before_date) + ": \t".format(colors.bcolors, colors.bcolors) + str(tweet_before_text)
#print textwrap.fill(all_text_before)
def get_key():
"""
Function to get arrow key pressed
@return: key type
"""
try:
charlist = []
key = ''
char = getch.getch( )
print >> sys.stdout, char
if char == '\x1b':
charlist.extend( char )
for i in range( 2 ):
sub = getch.getch( )
charlist.extend( sub )
for i, j in enumerate( charlist ):
charlist[i] = unicode( j ).encode( 'ascii' )
if charlist[2] == 'C':
key = 'right'
elif charlist[2] == 'B':
key = 'down'
elif charlist[2] == 'D':
key = 'left'
elif charlist[2] == 'A':
key = 'up'
elif char == 'q':
key = 'q'
else:
key = None
return key
except KeyboardInterrupt:
print >> sys.stdout, 'Goodbye: You pressed Ctrl+C!'
def main():
# Connect to db
conn = engine.connect( )
ANSI_KEYS_TO_STRING = {'up': 'up', 'down': 'down', 'right': 'right', 'left': 'left', 'up': 'up'}
# Add arguments
parser = argparse.ArgumentParser( description='Crawl Twitter', prog='groundtruth_reader.py',
usage='%(prog)s [options]' )
parser.add_argument( '--auth', help='Twitter account for authentication', required=True )
try:
args = parser.parse_args( )
except Exception as e:
raise e
##### Crawler instance #####
auth = args.auth
# client_args = {'verify': True}
crawler = CrawlerTwitter( apikeys=gk(auth))
##### Start logic #####
print_banner( )
left_key = colored( '(arrow-left)', 'red', attrs=['blink'] )
right_key = colored( '(arrow-right)', 'red', attrs=['blink'] )
up_key = colored( '(arrow-up)', 'red', attrs=['blink'] )
down_key = colored( '(arrow-down)', 'red', attrs=['blink'] )
quitting = colored( '(q)', 'red', attrs=['blink'] )
try:
entered_id = raw_input( )
ins = reviewers.insert( )
if not database.checkReviewerId( entered_id ):
conn.execute( ins, reviewer_id=entered_id )
print >> sys.stdout, 'New reviewer id saved'
else:
print >> sys.stdout, 'Existing reviewer chosen'
# Subquery for not showing same tweets already annotated to the reviewer
annotated = select( [gt_tweets.c.fk_tweet_id] ). \
where( gt_tweets.c.fk_reviewer_id == entered_id ).order_by( gt_tweets.c.fk_tweet_id )
annotated_count = select( [func.count( )] ).select_from( gt_tweets ).where(
gt_tweets.c.fk_reviewer_id == entered_id )
# Query to show how many tweets are annotated by user
annotated_count_result = conn.execute( annotated_count, reviewer=entered_id )
for r in annotated_count_result:
print >> sys.stdout, 'Tweets you have already annotated: ', str( r.values( )[0] )
mentions = select( [tweets.c.tweet_id] ).where( and_( tweets.c.msg_type == 'm', tweets.c.depth == 1, tweets.c.retweet is False )).limit(1000)
# Avoid this join by creating a VIEW out of it
j = tweets.join( tweet_ranks, mentions.c.tweet_id == tweet_ranks.c.ranked_tweet_id )
query = select( [tweets] ).select_from( j ).order_by( desc( tweet_ranks.c.rank_tweet ),
desc( tweet_ranks.c.count_replies ),
desc( tweet_ranks.c.count_mentions ),
desc( tweet_ranks.c.count_hashtags ) ). \
where( tweets.c.tweet_id.notin_( annotated ) )
result = conn.execute( query, reviewer=entered_id ).fetchall( )
#### Tweets annotation loop ####
if len( result ) > 0:
print >> sys.stdout, 'To quit press key: [q]'
print >> sys.stdout, "\n"
for tweet in result:
if tweet['in_reply_to_tweet_id'] is None:
reply_id = None
else:
reply_id = tweet['in_reply_to_tweet_id']
current_tweet_id = tweet['tweet_id']
tweet_lang = tweet['tweet_lang']
if tweet_lang != 'en' or tweet['retweet'] is True or tweet['tweet_text'].startswith( 'RT' ):
gt_tweets.update( ).where( gt_tweets.c.fk_tweet_id == current_tweet_id ).values( abusive="hidden" )
continue
else:
if reply_id is None:
tweet_type = 'normal'
# SQL query to get replies to tweet: have in_reply_to_tweet_id = current_tweet_id
query_replies = select(
[replies.c.tweet_id, replies.c.tweet_text, replies.c.tweet_creation,
replies.c.in_reply_to_tweet_id,
replies.c.tweet_userid] ).where( replies.c.in_reply_to_tweet_id == current_tweet_id )
replies_content = conn.execute( query_replies ).fetchall( )
# Check for context
get_context( tweet, conn, tweet_type, crawler, replies_content )
# It is a In-reply-to, get its original tweet
else:
tweet_type = 'in_reply_to'
query_sources = select(
[tweets.c.tweet_id, tweets.c.tweet_text, tweets.c.tweet_creation,
tweets.c.in_reply_to_tweet_id,
tweets.c.tweet_userid] ).where( tweets.c.tweet_id == reply_id )
in_reply_to = conn.execute( query_sources ).fetchone( )
# ---- Replies to in_reply_to ---- #
# SQL query to get replies to tweet: have in_reply_to_tweet_id = current_tweet_id
query_replies = select(
[replies.c.tweet_id, replies.c.tweet_text, replies.c.tweet_creation,
replies.c.in_reply_to_tweet_id,
replies.c.tweet_userid] ).where( replies.c.in_reply_to_tweet_id == current_tweet_id )
replies_content = conn.execute( query_replies ).fetchall( )
get_context( tweet, conn, tweet_type, crawler, replies_content, in_reply_to )
# Bar-helper
print >> sys.stdout, '\n'
print >> sys.stdout, \
'{.BLINK}Abusive{.ENDC}'.format( colors.bcolors, colors.bcolors ) + left_key + \
',{.BLINK}Acceptable{.ENDC}'.format( colors.bcolors, colors.bcolors ) + right_key + \
',{.BLINK}Skip{.ENDC}'.format( colors.bcolors, colors.bcolors ) + down_key + \
',{.BLINK}Undo{.ENDC}'.format( colors.bcolors, colors.bcolors ) + up_key + \
',{.BLINK}Quit{.ENDC}'.format( colors.bcolors, colors.bcolors ) + quitting
key = get_key( )
if key == 'q':
print >> sys.stdout, 'Goodbye: your pressed quit [q]'
sys.exit( 0 )
while key not in ANSI_KEYS_TO_STRING.itervalues( ):
print >> sys.stdout, 'WHAT?'
# Bar-helper
print >> sys.stdout, '\n'
print >> sys.stdout, \
'{.BLINK}Abusive{.ENDC}'.format( colors.bcolors, colors.bcolors ) + left_key + \
',{.UNDERLINE}Acceptable{.ENDC}'.format( colors.bcolors, colors.bcolors ) + right_key + \
',{.BLINK}Skip{.ENDC}'.format( colors.bcolors, colors.bcolors ) + down_key + \
',{.BLINK}Undo{.ENDC}'.format( colors.bcolors, colors.bcolors ) + up_key + \
',{.BLINK}Quit{.ENDC}'.format( colors.bcolors, colors.bcolors ) + quitting
key = get_key( )
if key == 'q':
print >> sys.stdout, 'Goodbye: you pressed quit [q]'
sys.exit( 0 )
# Get the cursor
new_cursor = select( [cursor_gt.c.last_tweet_id] ).where( cursor_gt.c.username == entered_id )
new_cursor_value = conn.execute( new_cursor, username=entered_id,
last_tweet_id=tweet['tweet_id'] ).first( )
# Control input
if key == ANSI_KEYS_TO_STRING['right']:
ins = gt_tweets.insert( )
conn.execute( ins, fk_reviewer_id=entered_id, fk_tweet_id=tweet['tweet_id'], abusive='no' )
if new_cursor_value is None:
insert_cursor = cursor_gt.insert( )
conn.execute( insert_cursor, username=entered_id, last_tweet_id=tweet['tweet_id'] )
else:
update_cursor = cursor_gt.update( ).values( ).where( cursor_gt.c.username == entered_id )
conn.execute( update_cursor, username=entered_id, last_tweet_id=tweet['tweet_id'] )
elif key == ANSI_KEYS_TO_STRING['left']:
ins = gt_tweets.insert( )
conn.execute( ins, fk_reviewer_id=entered_id, fk_tweet_id=tweet['tweet_id'], abusive='yes' )
if new_cursor_value is None:
insert_cursor = cursor_gt.insert( )
conn.execute( insert_cursor, username=entered_id, last_tweet_id=tweet['tweet_id'] )
else:
update_cursor = cursor_gt.update( ).values( ).where( cursor_gt.c.username == entered_id )
conn.execute( update_cursor, username=entered_id, last_tweet_id=tweet['tweet_id'] )
elif key == ANSI_KEYS_TO_STRING['up']:
user_last_tweet_id = select( [cursor_gt.c.last_tweet_id] ).where(
cursor_gt.c.username == entered_id )
gt_delete = gt_tweets.delete( ).where(
and_( entered_id == gt_tweets.c.fk_reviewer_id,
user_last_tweet_id == gt_tweets.c.fk_tweet_id ) )
conn.execute( gt_delete, fk_reviewer_id=entered_id, fk_tweet_id=user_last_tweet_id )
cursor_gt_delete = cursor_gt.delete( ).where(
and_( entered_id == cursor_gt.c.username, user_last_tweet_id == cursor_gt.c.last_tweet_id )
)
conn.execute( cursor_gt_delete, fk_reviewer_id=entered_id, fk_tweet_id=user_last_tweet_id )
elif key == ANSI_KEYS_TO_STRING['down']:
ins = gt_tweets.insert( )
conn.execute( ins, fk_reviewer_id=entered_id, fk_tweet_id=tweet['tweet_id'], abusive='unknown' )
if new_cursor_value is None:
insert_cursor = cursor_gt.insert( )
conn.execute( insert_cursor, username=entered_id, last_tweet_id=tweet['tweet_id'] )
else:
update_cursor = cursor_gt.update( ).values( ).where( cursor_gt.c.username == entered_id )
conn.execute( update_cursor, username=entered_id, last_tweet_id=tweet['tweet_id'] )
elif key == 'q':
print >> sys.stdout, 'Goodbye: your pressed quit [q]'
sys.exit( 0 )
# Clean screen after we have marked the tweet
cls_next_tweet()
else:
print >> sys.stdout, 'Hurray! You have completed all possible tweet annotations'
except KeyboardInterrupt:
print >> sys.stdout, 'Goodbye: You pressed Ctrl+C!'
except Exception:
traceback.print_exc( file=sys.stdout )
sys.exit( 0 )
# Main
if __name__ == '__main__':
main( )
|
gpl-3.0
| -6,845,495,059,199,524,000 | 40.786538 | 151 | 0.557918 | false |
dongweiming/web_develop
|
chapter4/section2/app_wtf.py
|
1
|
1254
|
# coding=utf-8
from flask import Flask, render_template, request
from flask_wtf import Form
from flask_wtf.csrf import CsrfProtect
from wtforms import TextField, PasswordField
from wtforms.validators import length, Required, EqualTo
from ext import db
from users import User
app = Flask(__name__, template_folder='../../templates')
app.config.from_object('config')
CsrfProtect(app)
db.init_app(app)
class RegistrationForm(Form):
name = TextField('Username', [length(min=4, max=25)])
email = TextField('Email Address', [length(min=6, max=35)])
password = PasswordField('New Password', [
Required(), EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
user = User(name=form.name.data, email=form.email.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
return 'register successed!'
return render_template('chapter4/section2/register.html', form=form)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000, debug=app.debug)
|
gpl-3.0
| 3,855,969,728,156,356,600 | 31.153846 | 72 | 0.681818 | false |
laurentperrinet/Khoei_2017_PLoSCB
|
scripts/experiment_speed.py
|
1
|
1790
|
import MotionParticlesFLE as mp
gen_dot = mp.generate_dot
import numpy as np
import os
from default_param import *
image_speed = {}
experiment = 'speed'
speeds = np.linspace(3.75, 1., 11, endpoint=True)
# make such that the dot crosses the middle of the screen at t=.5 while making the same distance
X_0s = -speeds
dot_starts = .5 - .4 / speeds
flash_durations = .8 / speeds
stimulus_tag = stim_labels[0] # 'dot'
im_arg = stim_args[0]
# generating the movie
image_speed[stimulus_tag] = {}
image_speed[stimulus_tag]['args'] = {'Y_0':0, 'im_noise':mp.im_noise, 'dot_size':dot_size}
image_speed[stimulus_tag]['im'] = gen_dot(N_X=N_X, N_Y=N_Y, N_frame=N_frame, **image_speed[stimulus_tag]['args'])
image_speed[stimulus_tag]['result'] = {}
# running PX and MBP with 2 different latencies
for D_x, D_V, v_prior, label in zip([mp.D_x, PBP_D_x], [mp.D_V, PBP_D_V], [mp.v_prior, PBP_prior], ['MBP', 'PBP']):
figname = os.path.join(mp.figpath, experiment + '-' + stimulus_tag + '-' + label)
image_speed[stimulus_tag]['result'][label] = {}
image_speed[stimulus_tag]['args'].update(D_V=D_V, D_x=D_x, v_prior=v_prior)
kwargs_variable = mp.figure_image_variable(
figname,
N_X, N_Y, N_frame, gen_dot, order=None, do_figure=do_figure, do_video=do_video, N_quant_X=N_quant_X, N_quant_Y=N_quant_Y,
fixed_args=image_speed[stimulus_tag]['args'],
V_X=speeds, X_0=X_0s, flash_start=dot_starts, flash_duration=flash_durations)
for new_kwargs in kwargs_variable:
try:
matname = mp.make_figname(figname, new_kwargs).replace(mp.figpath, mp.matpath) + '.npy'
image_speed[stimulus_tag]['result'][label][new_kwargs['V_X']] = np.load(matname)
except:
print('no result yet for ', matname)
|
mit
| -7,396,957,845,695,132,000 | 39.704545 | 133 | 0.640782 | false |
alexmgr/tinyec
|
tinyec/registry.py
|
1
|
10114
|
# -*- coding: utf-8 -*-
import tinyec.ec as ec
EC_CURVE_REGISTRY = {"brainpoolP160r1": {"p": 0xE95E4A5F737059DC60DFC7AD95B3D8139515620F,
"a": 0x340E7BE2A280EB74E2BE61BADA745D97E8F7C300,
"b": 0x1E589A8595423412134FAA2DBDEC95C8D8675E58,
"g": (0xBED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3,
0x1667CB477A1A8EC338F94741669C976316DA6321),
"n": 0xE95E4A5F737059DC60DF5991D45029409E60FC09,
"h": 0x1},
"brainpoolP192r1": {"p": 0xC302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297,
"a": 0x6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF,
"b": 0x469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9,
"g": (0xC0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6,
0x14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F),
"n": 0xC302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1,
"h": 0x1},
"brainpoolP224r1": {"p": 0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF,
"a": 0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43,
"b": 0x2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B,
"g": (0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D,
0x58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD),
"n": 0xD7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F,
"h": 0x1},
"brainpoolP256r1": {"p": 0xA9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377,
"a": 0x7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9,
"b": 0x26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6,
"g": (0x8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262,
0x547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997),
"n": 0xA9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7,
"h": 0x1},
"brainpoolP320r1": {"p": 0xD35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27,
"a": 0x3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F492F375A97D860EB4,
"b": 0x520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539816F5EB4AC8FB1F1A6,
"g": (0x43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C710AF8D0D39E20611,
0x14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7D35245D1692E8EE1),
"n": 0xD35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311,
"h": 0x1},
"brainpoolP384r1": {"p": 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53,
"a": 0x7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826,
"b": 0x04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D57CB4390295DBC9943AB78696FA504C11,
"g": (0x1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E,
0x8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315),
"n": 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565,
"h": 0x1},
"brainpoolP512r1": {"p": 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3,
"a": 0x7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA,
"b": 0x3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723,
"g": (0x81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822,
0x7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892),
"n": 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069,
"h": 0x1},
"secp192r1": {"p": 0xfffffffffffffffffffffffffffffffeffffffffffffffff,
"a": 0xfffffffffffffffffffffffffffffffefffffffffffffffc,
"b": 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1,
"g": (0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012,
0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811),
"n": 0xffffffffffffffffffffffff99def836146bc9b1b4d22831,
"h": 0x1},
"secp224r1": {"p": 0xffffffffffffffffffffffffffffffff000000000000000000000001,
"a": 0xfffffffffffffffffffffffffffffffefffffffffffffffffffffffe,
"b": 0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4,
"g": (0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21,
0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34),
"n": 0xffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d,
"h": 0x1},
"secp256r1": {"p": 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff,
"a": 0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc,
"b": 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b,
"g": (0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296,
0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5),
"n": 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551,
"h": 0x1},
"secp384r1": {"p": 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff,
"a": 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc,
"b": 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef,
"g": (0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7,
0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f),
"n": 0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973,
"h": 0x1},
"secp521r1": {"p": 0x000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff,
"a": 0x000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc,
"b": 0x00000051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00,
"g": (0x000000c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66,
0x0000011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650),
"n": 0x000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409,
"h": 0x1}}
def get_curve(name):
curve_params = {}
for k, v in EC_CURVE_REGISTRY.items():
if name.lower() == k.lower():
curve_params = v
if curve_params == {}:
raise ValueError("Unknown elliptic curve name")
try:
sub_group = ec.SubGroup(curve_params["p"], curve_params["g"], curve_params["n"], curve_params["h"])
curve = ec.Curve(curve_params["a"], curve_params["b"], sub_group, name)
except KeyError:
raise RuntimeError("Missing parameters for curve %s" % name)
return curve
|
gpl-3.0
| -7,705,694,151,767,924,000 | 96.25 | 181 | 0.616967 | false |
ebilionis/variational-reformulation-of-inverse-problems
|
vuq/_full_optimizer.py
|
1
|
3651
|
"""
A generic optimizer class.
Author:
Ilias Bilionis
Date:
6/5/2014
"""
__all__ = ['FullOptimizer']
import math
import numpy as np
from scipy.optimize import minimize
from . import EvidenceLowerBound
class FullOptimizer(object):
"""
A generic optimizer object.
"""
# The evidence lower bound
_elbo = None
# A name for the object
__name__ = None
@property
def elbo(self):
"""
:getter: The evidence lower bound.
"""
return self._elbo
def __init__(self, elbo=elbo, name='Optimizer'):
"""
Initialize the object.
"""
assert isinstance(elbo, EvidenceLowerBound)
self._elbo = elbo
self.__name__ = name
def __str__(self):
"""
Return a string representation of the object.
"""
s = 'Name: ' + self.__name__ + '\n'
s += 'ELBO:\n'
s += str(self.elbo)
return s
def optimize_full_mu(self, log_q):
"""
Full optimization of the mu's.
"""
def f_mu(mu, log_q, elbo):
mu_old = log_q.mu[:]
log_q.mu = mu.reshape((log_q.num_comp, log_q.num_dim))
state = elbo(log_q)
L = state['L1']
L_grad_mu = state['L_grad_mu'].reshape((log_q.num_comp * log_q.num_dim, ))
log_q.mu = mu_old
return -L, -L_grad_mu
args = (log_q, self.elbo)
res = minimize(f_mu, log_q.mu.flatten(), args=args, jac=True,
tol=1e-20)
log_q.mu = res.x.reshape((log_q.num_comp, log_q.num_dim))
def optimize_full_L(self, log_q):
"""
Full optimization of the cholesky factor of the C's.
"""
def f_L(L, log_q, elbo, idx):
C_old = log_q.C[:]
k = idx[0].shape[0]
ZZ = []
for i in xrange(log_q.num_comp):
Z = np.zeros((log_q.num_dim, log_q.num_dim))
Z[idx] = L[i * k : (i + 1) * k]
ZZ.append(Z)
C = np.dot(Z, Z.T)
log_q.comp[i].C = C
ZZ = np.array(ZZ)
state = elbo(log_q)
LL = state['L']
L_grad_C = state['L_grad_C']
L_grad_Z = 2. * np.einsum('ijk,ikl->ijl', L_grad_C, ZZ)
L_grad_Z = np.hstack([L_grad_Z[i, :, :][idx]
for i in xrange(log_q.num_comp)])
log_q.C = C_old
print L, LL
return -LL, -L_grad_Z
idx = np.tril_indices(log_q.num_dim)
L0 = np.hstack([log_q.comp[i].C[idx] for i in xrange(log_q.num_comp)])
tmp = np.ndarray((log_q.num_dim, log_q.num_dim), dtype='object')
for i in xrange(log_q.num_dim):
for j in xrange(log_q.num_dim):
if i == j:
tmp[i, j] = (0.5, None)
else:
tmp[i, j] = (None, None)
L_bounds = tuple(tmp[idx] for i in xrange(log_q.num_comp))
L_bounds = tuple(xx for x in L_bounds for xx in x)
args = (log_q, self.elbo, idx)
res = minimize(f_L, L0, args=args, jac=True, method='L-BFGS-B',
bounds=L_bounds)
k = idx[0].shape[0]
for i in xrange(log_q.num_comp):
Z = np.zeros((log_q.num_dim, log_q.num_dim))
Z[idx] = res.x[i * k : (i + 1) * k]
C = np.dot(Z, Z.T)
log_q.comp[i].C = C
def optimize(self, log_q, max_it=10):
"""
Optimize.
"""
for i in xrange(max_it):
self.optimize_full_mu(log_q)
self.optimize_full_L(log_q)
|
gpl-2.0
| 4,461,635,448,875,746,300 | 27.523438 | 86 | 0.466721 | false |
yejia/osl_notebook
|
tests/test_group.py
|
1
|
6122
|
# -*- coding: utf-8 -*-
import os
import unittest
import requests
from nose.tools import nottest, istest, raises, eq_, ok_, assert_in, assert_not_in, assert_raises
import django
from notebook.social.models import Group as G, Social_Tag as ST, Member
from notebook.notes.models import Tag as T, getW, getT
from notebook.data.data import *
class TestPrivateGroup(unittest.TestCase):
pass
class TestGroup(unittest.TestCase):
def setUp(self):
self.tearDown()
print 'test_mode:', os.environ['TEST_MODE']
self.name = 'unittest'
#create a unittest member first
creator_name = self.name
m, created = Member.objects.get_or_create(username=self.name)
groupname = 'test_maniacs'
tag_names = ['testing', 'framework', 'philosophy']
#tag_ids = [ST.objects.get_or_create(name=tag_name)[0].id for tag_name in tag_names]
self.group = create_group(groupname, tag_names, creator_name, private=False)
create_group('my_private_group', ['my_private_group'], 'unittest', private=True)
def testCreateGroup(self):
g = G.objects.get(name='test_maniacs')
eq_(g.name, 'test_maniacs')
eq_(g.private, False)
eq_(g.get_tag_names(), ['framework', 'philosophy', 'sharinggroup:test_maniacs', 'testing'])
T = getT(self.name)
W = getW(self.name)
w1 = W.objects.get(name='snippetbook')
eq_(w1.name, 'snippetbook')
w2 = W.objects.get(name='bookmarkbook')
eq_(w2.name, 'bookmarkbook')
w3 = W.objects.get(name='scrapbook')
eq_(w3.name, 'scrapbook')
w = W.objects.get(name="sharinggroup:"+g.name)
eq_(w.name, "sharinggroup:"+g.name)
for st in ['testing', 'framework', 'philosophy']:
t = T.objects.get(name=st)
eq_(t.name, st)
eq_(t.private, False)
eq_(t.name in w1.display_tags().split(','), True)
def testCreatePrivateGroup(self):
g = G.objects.get(name='my_private_group')
eq_(g.name, 'my_private_group')
eq_(g.private, True)
eq_(g.get_tag_names(), ['my_private_group', 'sharinggroup:'+g.name])
T = getT('unittest')
W = getW('unittest')
w1 = W.objects.get(name='snippetbook')
eq_(w1.name, 'snippetbook')
w2 = W.objects.get(name='bookmarkbook')
eq_(w2.name, 'bookmarkbook')
w3 = W.objects.get(name='scrapbook')
eq_(w3.name, 'scrapbook')
w = W.objects.get(name="sharinggroup:"+g.name)
eq_(w.name, "sharinggroup:"+g.name)
for st in ['my_private_group', 'sharinggroup:'+g.name]:
t = T.objects.get(name=st)
eq_(t.name, st)
eq_(t.private, True)
eq_(t.name in w1.display_tags().split(','), True)
def testPostToPrivateGroup(self):
pass
def testUpdateGroup(self):
g1 = G.objects.get(name='test_maniacs')
g1.name = 'test_maniacs_changed'
g1.desc = 'A learning group for those crazy about test.'
#TODO:test add tags
g1.save()
g2 = G.objects.get(name='test_maniacs_changed')
eq_(g2.desc, 'A learning group for those crazy about test.')
def testAddGroupTags(self):
g1 = G.objects.get(name='test_maniacs')
self.testJoinGroup()
add_tags(g1, ['functional test', 'performance test'])
assert_in('functional test', g1.get_tag_names())
assert_in('performance test', g1.get_tag_names())
T1 = getT('unittest')
T2 = getT('unittest2')
for T in [T1, T2]:
T.objects.get(name='functional test')
T.objects.get(name='performance test')
def testRemoveGroupTags(self):
g1 = G.objects.get(name='test_maniacs')
self.testJoinGroup()
self.testAddGroupTags()
remove_tags(g1, ['functional test', 'performance test'])
assert_not_in('functional test', g1.get_tag_names())
assert_not_in('performance test', g1.get_tag_names())
def testJoinGroup(self):
username2 = 'unittest2'
m, created = Member.objects.get_or_create(username=username2)
join_group('test_maniacs', username2)
g = G.objects.get(name='test_maniacs')
assert_in(m, g.members.all())
sts = g.tags.all()
T = getT('unittest2')
for st in sts:
T.objects.get(name=st.name)
@raises(django.core.exceptions.ObjectDoesNotExist)
def testDeleteGroup(self):
g1 = G.objects.get(name='test_maniacs')
username2 = 'unittest2'
m, created = Member.objects.get_or_create(username=username2)
#TODO: how to check the error msg?
assert_raises(Exception, delete_group, g1, 'unittest2')
#delete a group as the creator
result = delete_group(g1, 'unittest')
eq_(result, True)
#add a user as admin
self.setUp()
self.testAddAdmin()
result = delete_group(g1, 'unittest3')
eq_(result, True)
g1 = G.objects.get(name='test_maniacs')
def testAddAdmin(self):
g1 = G.objects.get(name='test_maniacs')
m, created = Member.objects.get_or_create(username='unittest3')
g1.admins.add(m)
g1.save()
assert_in(m, g1.admins.all())
def testRemoveMember(self):
self.testJoinGroup()
remove_group_member('test_maniacs', 'unittest', 'unittest2')
g = G.objects.get(name='test_maniacs')
assert_not_in('unittest2', g.members.all())
def tearDown(self):
print "clearing groups..."
G.objects.all().delete()
print "clearning social tags..."
ST.objects.all().delete()
#TODO:St seems not cleared
T = getT('unittest')
T.objects.all().delete()
T2 = getT('unittest2')
T2.objects.all().delete()
|
mit
| -3,152,835,355,536,219,600 | 33.393258 | 107 | 0.567298 | false |
5GExchange/mapping
|
simulation/ResourceGetter.py
|
1
|
6523
|
# Copyright 2017 Balazs Nemeth, Mark Szalay, Janos Doka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import string
from abc import ABCMeta, abstractmethod
try:
# runs when mapping files are called from ESCAPE
from escape.nffg_lib.nffg import NFFG, NFFGToolBox
except ImportError:
# runs when mapping repo is cloned individually, and NFFG lib is in a
# sibling directory. WARNING: cicular import is not avioded by design.
import site
site.addsitedir('..')
from nffg_lib.nffg import NFFG, NFFGToolBox
try:
from generator import CarrierTopoBuilder
except ImportError:
import site
site.addsitedir('../generator')
import CarrierTopoBuilder
import alg1.MappingAlgorithms as online_mapping
class AbstractResourceGetter:
__metaclass__ = ABCMeta
@abstractmethod
def GetNFFG(self):
pass
class LoadedResourceGetter(AbstractResourceGetter):
def __init__(self, log, resource_path):
log.info("Reading loaded resource topology from file...")
# WARNING: all IDs of SG components needs to be different from the
# ID-s which will be used during the mapping! (it is a problem,
# if a dumped NFFG is used from an earlier configuration)
self.log = log
self.loaded_resource_nffg = NFFG.parse_from_file(resource_path)
self.log.info("Recreating SGhops based on flowrules in the loaded topology...")
NFFGToolBox.recreate_all_sghops(self.loaded_resource_nffg)
def getRunningSGs(self):
"""
Retrieves the set of SGs which are mapped already.
:return: list of NFFGs
"""
running_sgs = []
# merged NFFG of all SGs into one single NFFG object
SUM_req = NFFG()
for req in self.loaded_resource_nffg.reqs:
mapped_sg = NFFG()
mapped_sg.add_sap(sap_obj=copy.deepcopy(req.src.node))
if req.dst.node.id not in mapped_sg:
mapped_sg.add_sap(sap_obj=copy.deepcopy(req.dst.node))
for sg_hop_id in req.sg_path:
# retrieve the SGHop objects
for sghop in self.loaded_resource_nffg.sg_hops:
if sghop.id == sg_hop_id:
break
else:
raise Exception("SGHop with ID %s couldn't be found in loaded "
"resource topology!")
# add both ends of an SG HOP
if sghop.src.node.id not in mapped_sg:
mapped_sg.add_nf(nf=copy.deepcopy(sghop.src.node))
if sghop.dst.node.id not in mapped_sg:
mapped_sg.add_nf(nf=copy.deepcopy(sghop.dst.node))
# add the SGHop itself
if not mapped_sg.network.has_edge(sghop.src.node.id,
sghop.dst.node.id, key=sghop.id):
mapped_sg.add_sglink(sghop.src, sghop.dst, hop=copy.deepcopy(sghop))
# add the Edgereq Itself
mapped_sg.add_req(req.src, req.dst, req=copy.deepcopy(req))
SUM_req = NFFGToolBox.merge_nffgs(SUM_req, mapped_sg,
copy_shallow=False, silent=True)
running_sgs.append(mapped_sg)
# delete the NFs, which are not included in any Service Graph
# (so they would never be deleted from the resource)
_, del_nffg = NFFGToolBox.generate_difference_of_nffgs(
self.loaded_resource_nffg,
SUM_req, ignore_infras=True)
nfs_to_remove = [n.id for n in del_nffg.nfs]
if len(nfs_to_remove) > 0:
self.log.warn("Removing %s NFs from loaded topology, because they were "
"not included in any EdgeReq. Examples: %s"%
(len(nfs_to_remove), nfs_to_remove[:20]))
self.loaded_resource_nffg, _ = online_mapping.MAP(del_nffg,
self.loaded_resource_nffg, mode=NFFG.MODE_DEL,
keep_input_unchanged=True)
return running_sgs
def GetNFFG(self):
return self.loaded_resource_nffg
class PicoResourceGetter(AbstractResourceGetter):
def GetNFFG(self):
network = CarrierTopoBuilder.getPicoTopo()
return network
class GwinResourceGetter(AbstractResourceGetter):
def GetNFFG(self):
network = CarrierTopoBuilder.getSNDlib_dfn_gwin(
"../generator/dfn-gwin.gml")
return network
class CarrierTopoGetter(AbstractResourceGetter):
def GetNFFG(self):
"""
See parameter description in CarrierTopoBuilder module.
:return:
"""
topoparams = []
topoparams.append({'Retail': (2, 3, 10), 'Business': (2, 2, 15),
'CloudNFV': (2, 2, 2, 16000, 100000,
list(string.ascii_uppercase)[:10],
[80, 120, 160], [32000, 64000], [150], 4000,
4)})
topoparams = 3 * topoparams
return CarrierTopoBuilder.getCarrierTopo(topoparams, increment_port_ids=True)
class FatFreeTopoGetter(AbstractResourceGetter):
def GetNFFG(self):
network = CarrierTopoBuilder.getFatTreeTopo()
return network
class SpineLeafTopoGetter(AbstractResourceGetter):
def GetNFFG (self):
network = CarrierTopoBuilder.getSpineLeafTopology()
return network
class EdgeAndCoreComputingTopoGetter(AbstractResourceGetter):
def GetNFFG (self):
network = CarrierTopoBuilder.getSNDlib_dfn_gwin(
"../generator/dfn-gwin.gml", edge_computing=True,
edge_and_core_computing=True)
return network
if __name__ == "__main__":
carrier = CarrierTopoGetter().GetNFFG()
print "total: ", len(carrier)
print "nfs: ", len([n for n in carrier.nfs])
print "saps: ", len([n for n in carrier.saps])
print "infras: ", len([n for n in carrier.infras])
import networkx as nx
carrier_gml = nx.MultiDiGraph()
carrier_gml.add_nodes_from(carrier.network.nodes_iter())
carrier_gml.add_edges_from(carrier.network.edges_iter())
nx.write_gml(carrier_gml, "carrier"+str(len(carrier))+".gml")
|
apache-2.0
| 2,515,066,137,436,988,000 | 34.644809 | 87 | 0.638816 | false |
lsst-dm/great3-public
|
validation/plot_galaxy_badcounts.py
|
2
|
3216
|
#!/usr/bin/env python
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@file plot_galaxy_badcounts.py
Convenience script for finding and plotting the histogram of object SNRs for the GREAT3 galaxy image
with an object number count furthest from the 10000 required. Uses a "badcount" dict as output by
`count_objects.py`
"""
# usage: ./plot_galaxy_badcounts.py DICTFILE
import os
from sys import argv
import cPickle
import pyfits
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
if len(argv) != 2:
print "usage: ./plot_galaxy_badcounts.py DICTFILE"
import sys
sys.exit(1)
# List for storing object counts
nobs = []
# Setup a couple of variables to get the largest nobs
biggest_nobs = 10000
biggest_nobs_field = ""
baddict = cPickle.load(open(argv[1]))
for key, subdict in baddict.iteritems():
if "starfield_image" not in os.path.split(key)[-1]:
nobs.append(subdict["nobs"])
if np.abs(nobs[-1] - 10000) > np.abs(biggest_nobs - 10000):
biggest_nobs = nobs[-1]
biggest_nobs_field = key
# Then plot the hist
plt.hist(
nobs, bins=50,
label="Worst offender: "+os.path.split(os.path.split(biggest_nobs_field)[0])[-1]+"/"+
os.path.split(biggest_nobs_field)[-1]+" with "+str(biggest_nobs))
plt.xlabel("N objects")
plt.legend()
splitname = ((argv[1]).rsplit("_"))
plt.title("Fields in "+str(splitname[1])+" with N objects != 10000")
outfile = "histogram_"+((argv[1]).rstrip(".p"))+".png"
print "Saving plot to "+outfile
plt.savefig(outfile)
plt.show()
|
bsd-3-clause
| 6,930,097,718,750,238,000 | 40.766234 | 100 | 0.709266 | false |
hakril/PythonForWindows
|
ctypes_generation/func_parser.py
|
1
|
5316
|
from winstruct import WinStruct, WinStructType, Ptr
import dummy_wintypes
from simpleparser import *
class WinFunc(object):
def __init__(self, return_type, name, params=()):
self.name = name
self.return_type = return_type
self.params = params
#if return_type not in dummy_wintypes.names:
# print("Non-standard return type {0}".format(return_type))
#for (type, name) in params:
# if type not in dummy_wintypes.names:
# print("Non-standard type {0}".format(type))
def generate_ctypes(self):
return self.generate_comment_ctypes() + "\n" + self.generate_prototype_ctypes() + "\n" + self.generate_paramflags_ctypes() + "\n"
def generate_comment_ctypes(self):
model = "#def {0}({1}):\n# return {0}.ctypes_function({1})"
ctypes_param = [name for type, name in self.params]
ctypes_param_str = ", ".join(ctypes_param)
return model.format(self.name, ctypes_param_str)
def generate_prototype_ctypes(self):
model = "{0} = WINFUNCTYPE({1})"
if isinstance(self.return_type, tuple) and self.return_type[0] == "PTR":
ctypes_param = ["POINTER({0})".format(self.return_type[1])]
else:
ctypes_param = [self.return_type]
for type, name in self.params:
if type.upper() == "POINTER(VOID)":
type = "PVOID"
ctypes_param.append(type)
#ctypes_param = [self.return_type] + [type for type, name in self.params]
ctypes_param_str = ", ".join(ctypes_param)
return model.format(self.name + "Prototype", ctypes_param_str)
def generate_paramflags_ctypes(self):
model = "{0} = {1}"
ctypes_paramflags = tuple([(1, name) for type, name in self.params])
return model.format(self.name + "Params", ctypes_paramflags)
class WinFuncParser(Parser):
known_io_info_type = ["__in", "__in_opt", "_In_", "_In_opt_", "_Inout_", "_Out_opt_", "_Out_", "_Reserved_", "_Inout_opt_", "__inout_opt", "__out", "__inout", "__deref_out", "_Outptr_"]
known_io_info_with_param = ["_Out_writes_bytes_", "_In_reads_bytes_opt_", "_In_reads_bytes_", "_Out_writes_", "_Out_writes_bytes_to_opt_"]
known_declarations = {
"WINAPI" : "WINFUNCTYPE",
"LDAPAPI" : "CFUNCTYPE"
}
default_calling_convention = "WINFUNCTYPE"
def assert_argument_io_info(self):
io_info = self.assert_token_type(NameToken)
if io_info.value not in self.known_io_info_type:
if io_info.value not in self.known_io_info_with_param:
raise ParsingError("Was expection IO_INFO got {0} instead".format(io_info))
# Ignore IO infos params.
self.assert_token_type(OpenParenthesisToken)
while type(self.peek()) is not CloseParenthesisToken:
self.next_token()
self.assert_token_type(CloseParenthesisToken)
return io_info
def parse_func_arg(self, has_winapi):
type_ptr = False
if has_winapi:
self.assert_argument_io_info()
arg_type = self.assert_token_type(NameToken)
if arg_type.value.upper() == "CONST":
arg_type = self.assert_token_type(NameToken)
if type(self.peek()) == StarToken:
type_ptr = True
self.assert_token_type(StarToken)
arg_name = self.assert_token_type(NameToken)
if not type(self.peek()) == CloseParenthesisToken:
self.assert_token_type(CommaToken)
if not type_ptr:
return (arg_type.value, arg_name.value)
return ("POINTER({0})".format(arg_type.value), arg_name.value)
def assert_winapi_token(self):
winapi = self.assert_token_type(NameToken)
if winapi.value != "WINAPI":
raise ParsingError("Was expection NameToken(WINAPI) got {0} instead".format(winapi))
return winapi
def parse_winfunc(self):
has_winapi = False
try:
return_type = self.assert_token_type(NameToken).value
except StopIteration:
raise NormalParsingTerminaison()
if type(self.peek()) == StarToken:
self.assert_token_type(StarToken)
return_type = ("PTR", return_type)
func_name = self.assert_token_type(NameToken).value
if func_name.upper() == "WINAPI":
has_winapi = True
func_name = self.assert_token_type(NameToken).value
self.assert_token_type(OpenParenthesisToken)
params = []
while type(self.peek()) != CloseParenthesisToken:
params.append(self.parse_func_arg(has_winapi))
self.assert_token_type(CloseParenthesisToken)
self.assert_token_type(SemiColonToken)
return WinFunc(return_type, func_name, params)
def parse(self):
res = []
while self.peek() is not None:
res.append(self.parse_winfunc())
return res
def dbg_lexer(data):
for i in Lexer(data).token_generation():
print i
def dbg_parser(data):
return WinFuncParser(data).parse()
def dbg_validate(data):
return validate_structs(Parser(data).parse())
if __name__ == "__main__":
import sys
data = open(sys.argv[1], 'r').read()
funcs = generate_ctypes(data)
print(funcs)
|
bsd-3-clause
| 5,275,421,062,611,060,000 | 36.43662 | 189 | 0.60064 | false |
changyaochen/zi_DAQs
|
vacmega_open_loop.py
|
1
|
3296
|
# -*- coding: utf-8 -*-
"""
This is for multiple open loop sweep with different vac
The purpose is to see how the vac will affect the
lineshape of the resonance, as well as the
extracted Q
@author: changyao chen
"""
from open_loop_sweep import *
import os
import numpy as np
import pandas as pd
import zi_processing
# get the directories right
dname = os.path.dirname(os.path.abspath(__file__))
os.chdir(dname)
device_id= 'dev267'
# ===== global setting =======
vac_list = np.linspace(0.01, 0.51, 25)
samplecount = 2000
#vac_list = [0.1]; samplecount = 100 # for debug purpose
inplace_fit = True # whether to do Lorentz fit for each sweep
# ==============================
# I will save the output into 2 types of files:
# for the first type, each vac has its own file
# for the second type is the usual megasweep format (single file)
#
# with vac attached to the file name
# initilization for the type_II data
type_II = pd.DataFrame()
# initialize output file
fitted_results = []
# only to extract certain fields from the raw output
headers = ['frequency', 'x', 'y']
# prompt to ask for path to save data
handle = input('saved file path/name: ')
if len(handle) == 0:
handle = 'temp'
# create a folder for the results
if not os.path.exists(handle):
os.makedirs(handle)
os.chdir(dname + '/' + handle)
elif handle == 'temp':
os.chdir(dname + '/' + handle)
else:
raise Exception('Please input a valid file path/name!')
for vac in vac_list:
# run one open loop sweep
# the return result is a list, and its [0][0] element is the dict
# that is of interest
result = open_loop_sweep(device_id = 'dev267',
start_freq = 27.00e3, stop_freq = 40.00e3,
amplitude=vac, avg_sample= 3,
samplecount = samplecount, )
type_I_temp = pd.DataFrame.from_dict({x: result[0][0][x] for x in headers})
type_I_temp['vac'] = vac
# save the type I data
type_I_temp.to_csv(handle + '_' + str(vac) + '.txt', sep = '\t',
index = False)
# do the fit if chosen
if inplace_fit:
# fit the data to a Lorentz curve
p_fit, p_err = zi_processing.fit_lorentz_sweeper(type_I_temp, showHTML = False,
figure_name = handle + '_' + str(vac),
zoom_in_fit = False)
A, f0, Q, bkg = [x for x in p_fit]
A_err, f0_err, Q_err, bkg_err = [x for x in p_err]
fitted_results.append([vac, f0, f0_err, Q, Q_err])
if type_II.size == 0: # first time
type_II = type_I_temp
else:
type_II = type_II.append(type_I_temp, ignore_index = True)
# save the type_II data
type_II.to_csv(handle + '_vacmega.txt', sep = '\t',
index = False, headers = False)
# save the fitted result
fitted_result = pd.DataFrame(fitted_result, columns = ['vac(V)', 'f0(Hz)', 'f0_err(Hz)',
'Q', 'Q_err'])
fitted_result.to_csv(handle + '_fitted_results.txt', sep = '\t',
index = False)
# return to the parent folder
os.chdir('..')
|
gpl-3.0
| -1,115,082,174,616,237,800 | 30.96 | 88 | 0.56432 | false |
google-research/motion_imitation
|
motion_imitation/utilities/pose3d.py
|
1
|
9138
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for 3D pose conversion."""
import math
import numpy as np
from pybullet_utils import transformations
VECTOR3_0 = np.zeros(3, dtype=np.float64)
VECTOR3_1 = np.ones(3, dtype=np.float64)
VECTOR3_X = np.array([1, 0, 0], dtype=np.float64)
VECTOR3_Y = np.array([0, 1, 0], dtype=np.float64)
VECTOR3_Z = np.array([0, 0, 1], dtype=np.float64)
# QUATERNION_IDENTITY is the multiplicative identity 1.0 + 0i + 0j + 0k.
# When interpreted as a rotation, it is the identity rotation.
QUATERNION_IDENTITY = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float64)
def Vector3RandomNormal(sigma, mu=VECTOR3_0):
"""Returns a random 3D vector from a normal distribution.
Each component is selected independently from a normal distribution.
Args:
sigma: Scale (or stddev) of distribution for all variables.
mu: Mean of distribution for each variable.
Returns:
A 3D vector in a numpy array.
"""
random_v3 = np.random.normal(scale=sigma, size=3) + mu
return random_v3
def Vector3RandomUniform(low=VECTOR3_0, high=VECTOR3_1):
"""Returns a 3D vector selected uniformly from the input box.
Args:
low: The min-value corner of the box.
high: The max-value corner of the box.
Returns:
A 3D vector in a numpy array.
"""
random_x = np.random.uniform(low=low[0], high=high[0])
random_y = np.random.uniform(low=low[1], high=high[1])
random_z = np.random.uniform(low=low[2], high=high[2])
return np.array([random_x, random_y, random_z])
def Vector3RandomUnit():
"""Returns a random 3D vector with unit length.
Generates a 3D vector selected uniformly from the unit sphere.
Returns:
A normalized 3D vector in a numpy array.
"""
longitude = np.random.uniform(low=-math.pi, high=math.pi)
sin_latitude = np.random.uniform(low=-1.0, high=1.0)
cos_latitude = math.sqrt(1.0 - sin_latitude * sin_latitude)
x = math.cos(longitude) * cos_latitude
y = math.sin(longitude) * cos_latitude
z = sin_latitude
return np.array([x, y, z], dtype=np.float64)
def QuaternionNormalize(q):
"""Normalizes the quaternion to length 1.
Divides the quaternion by its magnitude. If the magnitude is too
small, returns the quaternion identity value (1.0).
Args:
q: A quaternion to be normalized.
Raises:
ValueError: If input quaternion has length near zero.
Returns:
A quaternion with magnitude 1 in a numpy array [x, y, z, w].
"""
q_norm = np.linalg.norm(q)
if np.isclose(q_norm, 0.0):
raise ValueError(
'Quaternion may not be zero in QuaternionNormalize: |q| = %f, q = %s' %
(q_norm, q))
return q / q_norm
def QuaternionFromAxisAngle(axis, angle):
"""Returns a quaternion that generates the given axis-angle rotation.
Returns the quaternion: sin(angle/2) * axis + cos(angle/2).
Args:
axis: Axis of rotation, a 3D vector in a numpy array.
angle: The angle of rotation (radians).
Raises:
ValueError: If input axis is not a normalizable 3D vector.
Returns:
A unit quaternion in a numpy array.
"""
if len(axis) != 3:
raise ValueError('Axis vector should have three components: %s' % axis)
axis_norm = np.linalg.norm(axis)
if np.isclose(axis_norm, 0.0):
raise ValueError('Axis vector may not have zero length: |v| = %f, v = %s' %
(axis_norm, axis))
half_angle = angle * 0.5
q = np.zeros(4, dtype=np.float64)
q[0:3] = axis
q[0:3] *= math.sin(half_angle) / axis_norm
q[3] = math.cos(half_angle)
return q
def QuaternionToAxisAngle(quat, default_axis=VECTOR3_Z, direction_axis=None):
"""Calculates axis and angle of rotation performed by a quaternion.
Calculates the axis and angle of the rotation performed by the quaternion.
The quaternion should have four values and be normalized.
Args:
quat: Unit quaternion in a numpy array.
default_axis: 3D vector axis used if the rotation is near to zero. Without
this default, small rotations would result in an exception. It is
reasonable to use a default axis for tiny rotations, because zero angle
rotations about any axis are equivalent.
direction_axis: Used to disambiguate rotation directions. If the
direction_axis is specified, the axis of the rotation will be chosen such
that its inner product with the direction_axis is non-negative.
Raises:
ValueError: If quat is not a normalized quaternion.
Returns:
axis: Axis of rotation.
angle: Angle in radians.
"""
if len(quat) != 4:
raise ValueError(
'Quaternion should have four components [x, y, z, w]: %s' % quat)
if not np.isclose(1.0, np.linalg.norm(quat)):
raise ValueError('Quaternion should have unit length: |q| = %f, q = %s' %
(np.linalg.norm(quat), quat))
axis = quat[:3].copy()
axis_norm = np.linalg.norm(axis)
min_axis_norm = 1e-8
if axis_norm < min_axis_norm:
axis = default_axis
if len(default_axis) != 3:
raise ValueError('Axis vector should have three components: %s' % axis)
if not np.isclose(np.linalg.norm(axis), 1.0):
raise ValueError('Axis vector should have unit length: |v| = %f, v = %s' %
(np.linalg.norm(axis), axis))
else:
axis /= axis_norm
sin_half_angle = axis_norm
if direction_axis is not None and np.inner(axis, direction_axis) < 0:
sin_half_angle = -sin_half_angle
axis = -axis
cos_half_angle = quat[3]
half_angle = math.atan2(sin_half_angle, cos_half_angle)
angle = half_angle * 2
return axis, angle
def QuaternionRandomRotation(max_angle=math.pi):
"""Creates a random small rotation around a random axis.
Generates a small rotation with the axis vector selected uniformly
from the unit sphere and an angle selected from a uniform
distribution over [0, max_angle].
If the max_angle is not specified, the rotation should be selected
uniformly over all possible rotation angles.
Args:
max_angle: The maximum angle of rotation (radians).
Returns:
A unit quaternion in a numpy array.
"""
angle = np.random.uniform(low=0, high=max_angle)
axis = Vector3RandomUnit()
return QuaternionFromAxisAngle(axis, angle)
def QuaternionRotatePoint(point, quat):
"""Performs a rotation by quaternion.
Rotate the point by the quaternion using quaternion multiplication,
(q * p * q^-1), without constructing the rotation matrix.
Args:
point: The point to be rotated.
quat: The rotation represented as a quaternion [x, y, z, w].
Returns:
A 3D vector in a numpy array.
"""
q_point = np.array([point[0], point[1], point[2], 0.0])
quat_inverse = transformations.quaternion_inverse(quat)
q_point_rotated = transformations.quaternion_multiply(
transformations.quaternion_multiply(quat, q_point), quat_inverse)
return q_point_rotated[:3]
def IsRotationMatrix(m):
"""Returns true if the 3x3 submatrix represents a rotation.
Args:
m: A transformation matrix.
Raises:
ValueError: If input is not a matrix of size at least 3x3.
Returns:
True if the 3x3 submatrix is a rotation (orthogonal).
"""
if len(m.shape) != 2 or m.shape[0] < 3 or m.shape[1] < 3:
raise ValueError('Matrix should be 3x3 or 4x4: %s\n %s' % (m.shape, m))
rot = m[:3, :3]
eye = np.matmul(rot, np.transpose(rot))
return np.isclose(eye, np.identity(3), atol=1e-4).all()
# def ZAxisAlignedRobotPoseTool(robot_pose_tool):
# """Returns the current gripper pose rotated for alignment with the z-axis.
# Args:
# robot_pose_tool: a pose3d.Pose3d() instance.
# Returns:
# An instance of pose.Transform representing the current gripper pose
# rotated for alignment with the z-axis.
# """
# # Align the current pose to the z-axis.
# robot_pose_tool.quaternion = transformations.quaternion_multiply(
# RotationBetween(
# robot_pose_tool.matrix4x4[0:3, 0:3].dot(np.array([0, 0, 1])),
# np.array([0.0, 0.0, -1.0])), robot_pose_tool.quaternion)
# return robot_pose_tool
# def RotationBetween(a_translation_b, a_translation_c):
# """Computes the rotation from one vector to another.
# The computed rotation has the property that:
# a_translation_c = a_rotation_b_to_c * a_translation_b
# Args:
# a_translation_b: vec3, vector to rotate from
# a_translation_c: vec3, vector to rotate to
# Returns:
# a_rotation_b_to_c: new Orientation
# """
# rotation = rotation3.Rotation3.rotation_between(
# a_translation_b, a_translation_c, err_msg='RotationBetween')
# return rotation.quaternion.xyzw
|
apache-2.0
| 4,977,634,553,611,800,000 | 31.289753 | 80 | 0.687678 | false |
Christian-B/batch_tool
|
batch_tools.py
|
1
|
46695
|
import collections
import filecmp
import optparse # using optparse as hydra still python 2.6
import os
import re
import shutil
import sys
parser = None
def report_error(error):
"""Prints the error, usage (if possible) and exits -1"""
#print error
if parser:
# parser.print_help()
print
print error
sys.exit(1)
def expanderuser(path):
"""Replaces the ~ with the users home directory"""
if path.startswith("~"):
return os.path.expanduser("~") + path[1:]
return path
def check_parent(parent):
"""Checks Parent directory exists and tries to make it of not"""
if parent:
if not os.path.isdir(parent):
if os.path.isfile(parent):
report_error("parent: " + parent + " is a file.")
grand_parent = os.path.dirname(os.path.normpath(parent))
if (grand_parent):
if os.path.isdir(grand_parent):
os.mkdir(parent, 0744)
else:
report_error("grand parent (" + grand_parent + ") of " + parent + " does not exist.")
else:
os.mkdir(parent, 0744)
if not os.path.isdir(parent):
report_error("parent: " + parent + " does not exist.")
else:
raise Exception("Unexpected None parent")
def name_cleaner(root, name):
"""Creates a useable path from a root and suggested name.
The name if first cleaned to removed special characters.
Whenever one or more is found a single underscore is added
"""
name = name.replace("%", "_percent_")
name = re.sub('[^0-9a-zA-Z]+', '_', name)
if name[-1] == "_":
name = name[: -1]
if name[0] == "_":
name = name[1:]
return os.path.join(root, name)
def copy_if_new(old_path, new_path, verbose=True):
"""Copies a file to a new location if required and safe.
Checks if there is already a file at the new paths.
Existing files are compared with the new file
Rather than overwrite a different file the program exis with an error
"""
if os.path.exists(new_path):
if filecmp.cmp(new_path, old_path):
if verbose:
print "ignoring existing", new_path
else:
report_error("Unwilling to overwrite: " + new_path + " with " + old_path)
else:
shutil.copy2(old_path, new_path)
if verbose:
print new_path, "created"
def remove_symbols(s):
if s.find("__") == -1:
return s
# Patterns used by Galaxy
s = s.replace("__cb__", ']')
s = s.replace("__cc__", '}')
s = s.replace("__dq__", '"')
s = s.replace("__lt__", '<')
s = s.replace("__gt__", '>')
s = s.replace("__ob__", '[')
s = s.replace("__oc__", '{')
s = s.replace("__sq__", "'")
# Patterns added by Christian
s = s.replace("__in__", '%in%')
s = s.replace("__not__", '!')
end = -2
# tab = 9
# | = 124
while True:
start = s.find("__", end + 2) + 2
if start == 1:
return s
end = s.find("__", start)
if end == -1:
return s
part = s[start: end]
try:
ascii = int(part)
s = s.replace("__" + part + "__", chr(ascii))
end = -2
except ValueError:
pass
return s
"""
File or directory processing methods
All will take three parameters
root: full path excluding the current directory or file
name: Name of the file or directory
verbose: flag to say if full output should be included
returns: True if and only if any subdirectories should be walked.
Note: Return is only about subdirectores and not name!
"""
def approve_none(root, name, verbose=False):
"""Simple method to do nothing and ignore subdirectores"""
return False
def approve_all(root, name, verbose=False):
"""Simple method to do nothing but walk any subdirectores"""
return True
def print_size(root, name, verbose=False):
"""Debug method to print size of a file"""
path = os.path.join(root, name)
print path, "has size", os.path.getsize(path)
class RegexChecker:
"""Check a name against one or more regex"""
def __init__(self, regexes=[".*"]):
self.patterns = []
for regex in regexes:
self.patterns.append(re.compile(regex))
def approve_name(self, root, name, verbose=False):
"""Returns true of name matches any of the regex"""
for pattern in self.patterns:
if pattern.search(name):
return True
return False
class FileSizeChecker:
"""Superclass of classes that could check the size of a file"""
def __init__(self, maximum_size=None, minimum_size=None):
if maximum_size is None:
if minimum_size is None:
self.size_wrong = self.never_wrong
else:
self.minimum_size = minimum_size
self.size_wrong = self.too_small
else:
self.maximum_size = maximum_size
if minimum_size is None:
self.size_wrong = self.too_big
else:
self.minimum_size = minimum_size
self.size_wrong = self.outside_range
def never_wrong(self, path):
return False
def too_small(self, path):
return os.path.getsize(path) < self.minimum_size
def too_big(self, path):
#print path
#print os.path.getsize(path)
#print os.path.getsize(path) > self.maximum_size
return os.path.getsize(path) > self.maximum_size
def outside_range(self, path):
size = os.path.getsize(path)
if (size < self.minimum_size):
return True
return os.path.getsize(path) > self.maximum_size
class DirectoryLister(FileSizeChecker):
"""Creates a list (to file) of the all suitable directories
A directory is conidered suitable if
Neither its name or any of its parents match the ignore_regexes
It contains ALL of the required files
"""
def __init__(self, list_file, ignore_regexes=[], required_files=[], check_path=None, list_path=None, maximum_size=None, minimum_size=None):
"""Sets up the class and stores all parameteres
ignore_regexes: Pattern of all directories to ignore
(including children directories of these)
required_files: List of files that must all be present
Missing a single one excludes the directory
check_path and list_path: Allow you to check in one place and
list in a different place
if the path starts with check_path that is replaced with list_path
"""
FileSizeChecker.__init__(self, maximum_size, minimum_size)
self.ignore_patterns = []
for regex in ignore_regexes:
self.ignore_patterns.append(re.compile(regex))
self.required_files = required_files
self.check_path = check_path
self.list_path = list_path
if self.check_path:
self.check_path = expanderuser(self.check_path)
if not self.list_path:
raise Exception("if check_path is specified a list_path is required")
else:
if self.list_path.startswith("~"):
raise Exception("Do not use ~ in list_path, as it does not work in all cases with all applications.")
else:
if self.list_path:
raise Exception("if list_path is specified a check_path is required")
self.list_file = expanderuser(list_file)
try:
afile = open(list_file, 'w')
afile.close()
except Exception as e:
print e
report_error("Unable to open file: " + list_file)
def list_directory(self, root, name, verbose=True):
"""Processes the directory as per the settings passed on init"""
for pattern in self.ignore_patterns:
if pattern.search(name):
# Ignore directory and its children
return False
path = os.path.join(root, name)
for required_file in self.required_files:
required_path = os.path.join(path, required_file)
if not os.path.exists(required_path):
# Ignore directory but check its children!
if verbose:
print "ignoring", path, "as it is missing", required_file
return True
else:
if (self.size_wrong(required_path)):
# Ignore directory but check its children!
if verbose:
print "ignoring", path, "as", required_file, "is the wrong size"
return True
if self.check_path:
if path.startswith(self.check_path):
path = self.list_path + path[len(self.check_path):]
with open(self.list_file, 'a') as f:
f.write(path)
f.write("\n")
if verbose:
print path, "added"
return True
class Copier(FileSizeChecker):
"""Copies the files into seperate directories"""
def __init__(self, endings_mappings, target_parent=os.getcwd(), maximum_size=None, minimum_size=None):
"""Copies the files macthing endings_mappings into target_parent
endings_mappings is a dictionary of regex terms to file endings
Every time a file is found with matches the key and new file is
created that with a name based on the directory name and the ending
The program exits on an attempt to overwrite with a different file
"""
FileSizeChecker.__init__(self, maximum_size, minimum_size)
self.target_parent = expanderuser(target_parent)
check_parent(self.target_parent)
self.endings_mappings = {}
try:
if len(endings_mappings) == 0:
raise Exception("endings_mappings may not be empty")
except Exception as e:
print e
raise Exception("endings_mappings must be a dictionary")
for(regex, file_name) in endings_mappings.items():
pattern = re.compile(regex)
self.endings_mappings[pattern] = file_name
def __act_on_files__(self, old_path, new_path, verbose=True):
copy_if_new(old_path, new_path, verbose=verbose)
return True
def file_action(self, root, name, verbose=True):
"""Checks if name matches any regex pattern and if so copies the file
As files are handled on at a time this methods copied the file
even if the directory does not have all expected files
"""
for(pattern, file_name) in self.endings_mappings.items():
match = pattern.search(name)
if match:
prefix = name[: match.start()]
if prefix[-1] == ".":
prefix = prefix[: -1]
if len(prefix) == 0:
report_error("Ending regex: " + pattern.pattern + " was found at start of " + name)
oldpath = os.path.join(root, name)
if (self.size_wrong(oldpath)):
# Ignore directory but check its children!
if verbose:
print "ignoring", oldpath, "as it is the wrong size"
return False
newdir = os.path.join(self.target_parent, prefix)
if not os.path.isdir(newdir):
os.mkdir(newdir)
newpath = os.path.join(newdir, file_name)
return self.__act_on_files__(oldpath, newpath, verbose)
return False
class Linker(Copier):
"""Links the files into a file in a seperate directory"""
def __act_on_files__(self, old_path, new_path, verbose=True):
if os.path.exists(new_path):
try:
with open(new_path, "r") as old_file:
old_link = old_file.read().replace('\n', '')
if old_link != old_path:
if len(old_link) > 200:
report_error("Unwilling overwrite: " + new_path +
" it does not apear to hold a link")
report_error("Unwilling overwrite: " + new_path +
" with " + old_path +
" it currently points to " + old_link)
except Exception as e:
print e
raise Exception("Exception overwriting: " + new_path)
with open(new_path, 'w') as f:
f.write(old_path)
if verbose:
print old_path, "recorded at", new_path
return True
class File_action:
"""Superclass of classes that write details to file"""
def write_summary(self, path, root, summary):
"""Writes a line to path with root tab summary"""
directory = os.path.basename(os.path.normpath(root))
with open(path, 'a') as f:
f.write(directory)
f.write("\t")
f.write(summary)
if summary[-1] != "\n":
f.write("\n")
class Extractor(File_action):
"""Extract information from the line that starts with extract_prefix
Looks at any file whose name is in use_files
Looks for a single line that starts with extract_prefix
Strips of whitespace if requested to
Writes this to summary_path
"""
def __init__(self, summary_path, extract_prefix, use_files, strip=True):
self.extract_prefix = extract_prefix
if not self.extract_prefix:
raise Exception("extract_prefix parameter(s) missing")
if not isinstance(self.extract_prefix, basestring):
raise Exception(self.extract_prefix, "is not a string")
self.summary_path = summary_path
try:
afile = open(self.summary_path, 'w')
afile.close()
except Exception as e:
print e
raise Exception("Unable to open file: " + self.summary_path)
self.use_files = use_files
if not self.use_files:
raise Exception("use_files parameter(s) missing or empty")
if isinstance(self.use_files, basestring):
self.use_file = [self.use_file]
self.strip = strip
def get_summary(self, root, name, verbose=True):
"""Summarizes the single line that starts with exact_prefix"""
if name in self.use_files:
path = os.path.join(root, name)
summary = None
with open(path, 'r') as the_file:
for line in the_file:
if self.strip:
line = line.strip()
if line.startswith(self.extract_prefix):
if summary:
raise Exception("Two lines found in" + path + " which start with " + self.extract_prefix)
summary = line[len(self.extract_prefix):]
if self.strip:
summary = summary.strip()
if summary:
self.write_summary(self.summary_path, root, summary)
if verbose:
print path, "had", summary
else:
if verbose:
print self.prefix, "not found in ", path
class ByPrefixCombiner(File_action):
"""Extract information from each line that includes the divider
Looks at any file whose name is in use_files
Looks for lines with a the divider in them
Writes what comes after the divider
To a file whose names is based on what comes before the divider
Strips of whitespace if requested to
"""
def __init__(self, divider, use_files, output_dir=os.getcwd(), strip=True):
"""Sets the parameters for the combiner
divider: String to act as the divider
use_files: List of file names to check
output_dir: Parent directory of all summary files
"""
self.divider = divider
if not self.divider:
raise Exception("divider_prefix parameter(s) missing")
if not isinstance(self.divider, basestring):
raise Exception(self.divider, "is not a string")
self.use_files = use_files
if not self.use_files:
raise Exception("use_files parameter(s) missing or empty")
if isinstance(self.use_files, basestring):
self.use_file = [self.use_file]
self.output_dir = output_dir
check_parent(self.output_dir)
self.strip = strip
self.summaries = collections.Counter()
def get_summaries(self, root, name, verbose=True):
"""Extract data and appends to summary files"""
if name in self.use_files:
path = os.path.join(root, name)
count = 0
with open(path, 'r') as the_file:
for line in the_file:
parts = line.split(self.divider)
if len(parts) == 2:
if self.strip:
for index in range(len(parts)):
parts[index] = parts[index].strip()
summary_file = name_cleaner(self.output_dir, parts[0]) + ".tsv"
self.write_summary(summary_file, root, parts[1])
self.summaries[summary_file] += 1
count += 1
elif len(parts) > 2:
raise Exception("file", path, "as a line with multiple", self.divider)
if verbose:
print path, "contained", count, "lines with", self.divider
class Merger(FileSizeChecker):
"""Merges selected files into a single directory
Looks for files whose name is one of the keys in file_mappings
Copies a file to the target_parent with a name being a combination
of the directory name and value in file_mappings
"""
def __init__(self, file_mappings, target_parent=os.getcwd(), maximum_size=None, minimum_size=None):
"""Saves the parameters of a filE_mapping dictionary and target"""
FileSizeChecker.__init__(self, maximum_size, minimum_size)
self.target_parent = expanderuser(target_parent)
check_parent(self.target_parent)
self.file_mappings = file_mappings
try:
if len(file_mappings) == 0:
raise Exception("file_mappings may not be empty")
except Exception as e:
print e
raise Exception("file_mappings must be a dictionary")
def copy_files(self, root, name, verbose=True):
"""Copies the file to the target_parent"""
if name in self.file_mappings:
old_path = os.path.join(root, name)
if (self.size_wrong(old_path)):
# Ignore directory but check its children!
if verbose:
print "ignoring", old_path, "as it is the wrong size"
return False
new_name = os.path.basename(root) + self.file_mappings[name]
new_path = os.path.join(self.target_parent, new_name)
if verbose:
print "coping", old_path
copy_if_new(old_path, new_path, verbose=verbose)
class Filter(FileSizeChecker):
"""Copies the files into seperate directories"""
def __init__(self, filter_mappings, keep_regexes, remove_regexes=list(), maximum_size=None, minimum_size=None, verbose=True):
"""Copies the files matching endings_mappings based on keep_regexes and remove_regexs
replace_mapping is a dictionary of regex terms to replacements
Every time a file is found with matches the key a new file is
created with the regex matched part of the name replaced by the value
Only lines that contain at least one of the keep patterns are copied.
From these any part that matches any remove regex is replace by an empty string
The program exits on an attempt to overwrite with a different file
"""
FileSizeChecker.__init__(self, maximum_size, minimum_size)
self.filter_mappings = {}
try:
if len(filter_mappings) == 0:
raise Exception("filter_mappings may not be empty")
except Exception as e:
print e
raise Exception("filter_mappings must be a dictionary")
for(regex, replace) in filter_mappings.items():
pattern = re.compile(regex)
self.filter_mappings[pattern] = replace
self.keep_patterns = list()
try:
if len(keep_regexes) == 0:
raise Exception("keep_regexes may not be empty")
except Exception as e:
print e
raise Exception("keep_regexes must be a list")
for regex in keep_regexes:
if verbose:
print "Will keep any line that has: ", regex
pattern = re.compile(remove_symbols(regex))
self.keep_patterns.append(pattern)
self.remove_patterns = list()
for regex in remove_regexes:
if verbose:
print "Will remove any text that matches: ", regex
pattern = re.compile(remove_symbols(regex))
self.remove_patterns.append(pattern)
def __keep_line__(self, line):
for pattern in self.keep_patterns:
match = pattern.search(line)
if match:
return True
return False
def __copy_and_filter__(self, old_path, new_path, verbose=True):
if os.path.exists(new_path):
report_error("Unwilling to overwrite: " + new_path + " with " + old_path)
else:
with open(old_path, 'r') as old_f:
with open(new_path, 'w') as new_f:
for line in old_f:
if self.__keep_line__(line):
line = line
for remove_pattern in self.remove_patterns:
#print remove_pattern.pattern
line = remove_pattern.sub("", line)
#print line
line = line.strip()
new_f.write(line)
new_f.write("\n")
def file_action(self, root, name, verbose=True):
"""Checks if name matches any regex pattern and if so copies the file
"""
for(pattern, replace) in self.filter_mappings.items():
match = pattern.search(name)
if match:
oldpath = os.path.join(root, name)
if (self.size_wrong(oldpath)):
# Ignore directory but check its children!
if verbose:
print "ignoring", oldpath, "as it is the wrong size"
return False
newname = re.sub(pattern.pattern, replace, name)
newpath = os.path.join(root, newname)
return self.__copy_and_filter__(oldpath, newpath, verbose)
return False
def do_walk(source=os.getcwd(), directory_action=approve_all, file_action=print_size, onerror=None, followlinks=False, verbose=True):
"""
Walker method
Inputs are:
source Parent directory to be walked through
directory_action method to process and check subdirectoies
method may do something useful but must return a boolean
approve_all(default) cause all children(recurisive) to be walked
approve_none ignores all child directories
file_action method processes each file
can but needs not do anything
any return is ignored
print_size(default) just prints the path and size
onerror is a function on underlying os.listdir fails
followlinks would allow following symbolic links
WARNING Be aware that setting followlinks to True can lead to infinite
recursion if a link points to a parent directory of itself.
walk() does not keep track of the directories it visited already.
verbose: Flag passed to action methods to provide more output
"""
source = expanderuser(source)
# Must be topdown=True otherwise walk will process subdirectories before checking them
for root, dirs, files in os.walk(source, topdown=True, onerror=onerror, followlinks=followlinks):
dirs_clone = dirs[:]
for sub in dirs_clone:
if not directory_action(root, sub, verbose=verbose):
if verbose:
print "skipping directory", os.path.join(root, sub)
dirs.remove(sub)
# do something cool with each file
for name in files:
file_action(root, name, verbose=verbose)
"""
Methods for creating batch job file
"""
def count_lines(file_name):
"""Counts the number of lines in a file"""
lines = 0
with open(file_name) as the_file:
for line in the_file:
lines += 1
return lines
def write_loop_block(new_file, directory_list):
"""Insters the instuctions to do a job array based on a directpry file"""
count = count_lines(directory_list)
new_file.write("#$ -t 1-")
new_file.write(str(count))
new_file.write("\n")
new_file.write("DIRECTORY=`sed -n \"${SGE_TASK_ID}p\" ")
new_file.write(directory_list)
new_file.write("`\n")
new_file.write("\n")
def update_script(script, new_script, directory_list):
"""Converts a none job script into a job array script
Assumes that the first instruction is a line that starts with "DIRECTORY="
Replaces that line with the looping instructions
inluding to look for the directories in the directory_list file.
"""
directory_line_found = False
with open(script) as old_file:
with open(new_script, 'w') as new_file:
for line in old_file:
if line.startswith("DIRECTORY="):
if directory_line_found:
raise Exception("qsub_file: " + script + " has two lines that start with DIRECTORY=")
else:
write_loop_block(new_file, directory_list)
directory_line_found = True
else:
new_file.write(line)
if not directory_line_found:
raise Exception("qsub_file: " + script + " has no line that start with DIRECTORY=")
def short(term):
"""Converts a parameter name into a short flag"""
return "-" + term.lower()[0]
def longer(term):
"""Converts a parameter name into a long flag"""
return "--" + term.lower()
"""Commands for the program"""
__FIND__ = "find"
__LIST__ = "list"
__BATCH__ = "batch"
__EXTRACT__ = "extract"
__DELIMIT__ = "delimit"
__MERGE__ = "merge"
__FILTER__ = "filter"
__COMMANDS__ = [__FIND__, __LIST__, __BATCH__, __EXTRACT__, __DELIMIT__, __MERGE__, __FILTER__]
"""Parameter names"""
#__AFTER_DATE__ = "AFTER_DATE"
__BATCH_SCRIPT__ = "BATCH_SCRIPT"
__COPY__ = "COPY"
__DELIMITER__ = "DELIMITER"
__EXTRACT_PREFIX__ = "EXTRACT_PREFIX"
__FILE_LIST__ = "FILE_LIST"
__LISTp__ = "LIST"
__KEEP_REGEX__ = "KEEP_REGEX"
__MINIMUM_SIZE__ = "MINIMUM_SIZE"
__MAXIMUM_SIZE__ = "MAXIMUM_SIZE" # short = x
__OUTPUT_DIR__ = "OUTPUT_DIR"
__PARENT__ = "PARENT"
__QSUB_SCRIPT__ = "QSUB_SCRIPT"
__REMOVE_REGEX__ = "REMOVE_REGEX"
__SOURCE__ = "SOURCE"
#__UPTO_DATE__ = "UPTO_DATE"
__VERBOSE__ = "VERBOSE"
#X" used for maximum
#Directory lister ignore
#Directory lister check vs list path
if __name__ == '__main__':
usage = "usage: %prog command(s) [options] \n" + \
"Where command(s) is one or more of " + str(__COMMANDS__)
parser = optparse.OptionParser(usage=usage)
parser.add_option(short(__VERBOSE__), longer(__VERBOSE__), action="store_true", dest=__VERBOSE__, default=False,
help="If set will generate output of what the tool is doing.")
find_group = optparse.OptionGroup(parser, __FIND__,
"Searchs though the " + __SOURCE__ + " directory(and subdirectories) "
"looking for file that match the regex pattern in " + __FILE_LIST__ + "(s)."
"For file that matches the regex pattern, the part before the pattern is used as the name of "
"a subdirectory of " + __PARENT__ + " to be created/used. "
"Anything after the pattern is ignored. "
"A new file is created with either a link or copy. "
'Example: find --file_list="R1_001.fastq.gz:left.link" --file_list="R2_001.fastq.gz:right.link" ')
find_group.add_option(short(__SOURCE__), longer(__SOURCE__), dest=__SOURCE__, action="store", type="string",
default=os.getcwd(),
help=__SOURCE__ + "directory that holds the raw data files. "
"Default is the current directory")
find_group.add_option(short(__PARENT__), longer(__PARENT__), dest=__PARENT__, action="store", type="string",
default=os.getcwd(),
help=__PARENT__ + " directory of the sub directories to hold the data for each run "
"Default is the current directory")
parent_option = find_group.get_option(longer(__PARENT__))
find_group.add_option(short(__FILE_LIST__), longer(__FILE_LIST__), dest=__FILE_LIST__, action="append", type="string",
help="List of files to operate over. "
"If " + __FIND__ + " specified, format must be regex:name "
"If " + __MERGE__ + " is specified, format must be ending:name "
"If " + __FILTER__ + " is specified, format must be regex:replacement "
"Otherwise format can be just the name. "
"Multiple values allowed.")
file_list_option = find_group.get_option(longer(__FILE_LIST__))
find_group.add_option(short(__MINIMUM_SIZE__), longer(__MINIMUM_SIZE__), dest=__MINIMUM_SIZE__, action="store", type="long",
help="Minimum size in bytes that a file must have to be used.")
minimum_size_option = find_group.get_option(longer(__MINIMUM_SIZE__))
find_group.add_option("-X", longer(__MAXIMUM_SIZE__), dest=__MAXIMUM_SIZE__, action="store", type="long",
help="Maximum size in bytes that a file must have to be used.")
maximum_size_option = find_group.get_option(longer(__MAXIMUM_SIZE__))
find_group.add_option(short(__COPY__), longer(__COPY__), action="store_true", dest=__COPY__, default=False,
help="(Optional) If specified will copy the original file to the new location. "
"Otherwise just the path to the original file is saved.")
parser.add_option_group(find_group)
list_group = optparse.OptionGroup(parser, __LIST__,
"Lists all the paths to the directories "
"(including subdirectories) in " + __PARENT__ + " directory"
"But only includes the directories that contain a files in " + __FILE_LIST__ + "(s)"
"This list is writen to the path specified as " + __LISTp__ + " "
'Example: list --file_list="left.link" --file_list="right.link"')
list_group.option_list.append(parent_option)
list_group.option_list.append(file_list_option)
list_group.option_list.append(minimum_size_option)
list_group.option_list.append(maximum_size_option)
list_group.add_option(short(__LISTp__), longer(__LISTp__), dest=__LISTp__, action="store", type="string",
default="directories.txt",
help="File to hold the list of directories. "
"Default is directories.txt in the current directory.")
list_option = list_group.get_option(longer(__LISTp__))
parser.add_option_group(list_group)
batch_group = optparse.OptionGroup(parser, __BATCH__,
"Converts the " + __QSUB_SCRIPT__ + " to a batch/job array "
"The batch will run over each directory in the " + __LISTp__)
batch_group.option_list.append(list_option)
batch_group.add_option(short(__QSUB_SCRIPT__), longer(__QSUB_SCRIPT__), dest=__QSUB_SCRIPT__, action="store", type="string",
help="qsub script to be switched to batch. "
"This script should have a line which starts with DIRECTORY= "
"This line will be replaced with a loop for each directory in the " + __LISTp__)
batch_group.add_option(short(__BATCH_SCRIPT__), longer(__BATCH_SCRIPT__), dest=__BATCH_SCRIPT__, action="store", type="string",
help="Location to write batch qsub script to. "
"Default is " + __QSUB_SCRIPT__ + " + \"_batch\" ")
parser.add_option_group(batch_group)
extract_group = optparse.OptionGroup(parser, __EXTRACT__,
"Extract information from the files in " + __PARENT__ + " directory "
"(and sub directories) "
"whose name are in the " + __FILE_LIST__ + "(s)"
"Looking for a line that begins with the " + __EXTRACT_PREFIX__ + " "
"These will be written to a file called " + __EXTRACT_PREFIX__ + ".tsv "
"Placed in the " + __OUTPUT_DIR__)
extract_group.option_list.append(parent_option)
extract_group.option_list.append(file_list_option)
extract_group.add_option(short(__EXTRACT_PREFIX__), longer(__EXTRACT_PREFIX__), dest=__EXTRACT_PREFIX__,
action="append", type="string",
help="Prefix of the line to extract information from.")
extract_group.add_option(short(__OUTPUT_DIR__), longer(__OUTPUT_DIR__), dest=__OUTPUT_DIR__, action="store", type="string",
help="Directories to hold the file(s). "
"Default is " + __PARENT__)
output_option = extract_group.get_option(longer(__OUTPUT_DIR__))
parser.add_option_group(extract_group)
delimit_group = optparse.OptionGroup(parser, __DELIMIT__,
"Extract information from the files in the " + __PARENT__ + " directory "
"(and sub directories) "
"whose name are in the " + __FILE_LIST__ + "(s) "
"Looking for lines with the delimiter in them."
"Saving what comes after the " + __DELIMITER__ + " + in a file whose "
"name is based on what comes before the delimieter"
"Placed in the " + __OUTPUT_DIR__)
delimit_group.option_list.append(parent_option)
delimit_group.option_list.append(file_list_option)
delimit_group.add_option(short(__DELIMITER__), longer(__DELIMITER__), dest=__DELIMITER__, action="store", type="string",
help="Delimiter to create extract information files with."
"Will look in all files in the directories specified by parent that are in the file_list."
"For each line with this delimiter it will not the rest in a summary file."
"This data will be written to a tsv file in the parent directory.")
delimit_group.option_list.append(output_option)
parser.add_option_group(delimit_group)
merge_group = optparse.OptionGroup(parser, __MERGE__,
"Merges files found in the " + __PARENT__ + " directory (and sub directories) "
"whose name are in the name part of " + __FILE_LIST__ + "(s) "
"Coping these with a file whose name is based on the directory "
"and the ending part of " + __FILE_LIST__ + "(s) "
"Placed in the " + __OUTPUT_DIR__)
merge_group.option_list.append(parent_option)
merge_group.option_list.append(file_list_option)
merge_group.option_list.append(output_option)
merge_group.option_list.append(minimum_size_option)
merge_group.option_list.append(maximum_size_option)
parser.add_option_group(merge_group)
filter_group = optparse.OptionGroup(parser, __FILTER__,
"Filters files found in the " + __PARENT__ + " directory (and sub directories) "
"whose name match the regex part of " + __FILE_LIST__ + "(s) "
"Coping these with a file whose name is the regex part replaced with the replacement part of " + __FILE_LIST__ + ". "
"Placed in the " + __OUTPUT_DIR__)
filter_group.option_list.append(parent_option)
filter_group.option_list.append(file_list_option)
filter_group.add_option(short(__KEEP_REGEX__), longer(__KEEP_REGEX__), dest=__KEEP_REGEX__, action="append", type="string",
help="Regex pattern to idenfity a line that should be kept. "
"Multiple values are allowed in which case any line which has any of the values is kept.")
filter_group.add_option(short(__REMOVE_REGEX__), longer(__REMOVE_REGEX__), dest=__REMOVE_REGEX__, action="append", type="string",
help="Regex pattern to idenfity parts of the lines to remove. "
"Multiple values are allowed in which they are applied in the order provided."
"The same pattern can be used for both a " + __KEEP_REGEX__ + " and a " + __REMOVE_REGEX__ + ". ")
filter_group.option_list.append(output_option)
parser.add_option_group(filter_group)
(options, args) = parser.parse_args()
if len(args) == 0:
report_error("No command specified! Legal commands are: " + str(__COMMANDS__))
for arg in args:
if arg not in __COMMANDS__:
report_error("Unexpected command " + arg + " Legal commands are: " + str(__COMMANDS__))
endings_mappings = None
required_files = None
if __FIND__ in args:
if __MERGE__ in args:
report_error(__FIND__ + " and " + __MERGE__ +
" can not be combined due to different " + __FILE_LIST__ + " formats")
if __FILTER__ in args:
report_error(__FIND__ + " and " + __FILTER__ +
" can not be combined due to different " + __FILE_LIST__ + " formats")
if not options.__dict__[__FILE_LIST__]:
report_error(__FIND__ + " selected but no " + __FILE_LIST__ + " parameter provided")
endings_mappings = {}
for file_option in options.__dict__[__FILE_LIST__]:
parts = file_option.split(":")
if len(parts) != 2:
report_error(__FILE_LIST__ + " " + file_option + " not in the expected regex:name format")
endings_mappings[parts[0]] = parts[1]
required_files = endings_mappings.values()
elif __MERGE__ in args:
if __FILTER__ in args:
report_error(__MERGE__ + " and " + __FILTER__ +
" can not be combined due to different " + __FILE_LIST__ + " formats")
if not options.__dict__[__FILE_LIST__]:
report_error(__MERGE__ + " selected but no " + __FILE_LIST__ + " parameter provided")
file_mappings = {}
for file_option in options.__dict__[__FILE_LIST__]:
parts = file_option.split(":")
if len(parts) != 2:
report_error(__FILE_LIST__ + " " + file_option + " not in the expected ending:name format")
file_mappings[parts[1]] = parts[0]
elif __FILTER__ in args:
if not options.__dict__[__FILE_LIST__]:
report_error(__MERGE__ + " selected but no " + __FILE_LIST__ + " parameter provided")
filter_mappings = {}
for file_option in options.__dict__[__FILE_LIST__]:
parts = file_option.split(":")
if len(parts) != 2:
report_error(__FILE_LIST__ + " " + file_option + " not in the expected ending:name format")
filter_mappings[parts[0]] = parts[1]
elif options.__dict__[__FILE_LIST__]:
required_files = []
for file_option in options.__dict__[__FILE_LIST__]:
parts = file_option.split(":")
if len(parts) == 1:
required_files.append(parts[0])
elif len(parts) == 2:
required_files.append(parts[1])
else:
report_error("FILE_LIST " + file_option + " has more than one : in it.")
if not options.__dict__[__OUTPUT_DIR__]:
options.__dict__[__OUTPUT_DIR__] = options.__dict__[__PARENT__]
options.__dict__[__LISTp__] = expanderuser(options.__dict__[__LISTp__])
if __FIND__ in args:
# parent, source and copy have default values
# File list already checked
if options.__dict__[__COPY__]:
copier = Copier(endings_mappings, options.__dict__[__PARENT__],
minimum_size=options.__dict__[__MINIMUM_SIZE__], maximum_size=options.__dict__[__MAXIMUM_SIZE__])
print "Coping files from", options.__dict__[__SOURCE__], "to", options.__dict__[__PARENT__]
else:
copier = Linker(endings_mappings, options.__dict__[__PARENT__],
minimum_size=options.__dict__[__MINIMUM_SIZE__], maximum_size=options.__dict__[__MAXIMUM_SIZE__])
print "linking files from", options.__dict__[__SOURCE__], "in", options.__dict__[__PARENT__]
print "Using the file mappings: "
print endings_mappings
do_walk(source=options.__dict__[__SOURCE__], directory_action=approve_all, file_action=copier.file_action,
verbose=options.__dict__[__VERBOSE__])
if options.__dict__[__VERBOSE__]:
print
if __LIST__ in args:
# parent has a default value
if not required_files:
report_error(__LIST__ + " selected but no " + __FILE_LIST__ + " parameter provided")
print "Writing list of directories in", options.__dict__[__PARENT__], "to", options.__dict__[__LISTp__]
print "Only directories that have all these files are included: "
print required_files
lister = DirectoryLister(list_file=options.__dict__[__LISTp__], required_files=required_files,
minimum_size=options.__dict__[__MINIMUM_SIZE__], maximum_size=options.__dict__[__MAXIMUM_SIZE__])
do_walk(source=options.__dict__[__PARENT__], directory_action=lister.list_directory, file_action=approve_none,
verbose=options.__dict__[__VERBOSE__])
if options.__dict__[__VERBOSE__]:
print
if __BATCH__ in args:
if not options.__dict__[__QSUB_SCRIPT__]:
report_error(__BATCH__ + " selected but no " + __QSUB_SCRIPT__ + " parameter provided")
if options.__dict__[__BATCH_SCRIPT__]:
batch = options.__dict__[__BATCH_SCRIPT__]
else:
batch = options.__dict__[__QSUB_SCRIPT__] + "_batch"
print "Writing new", __BATCH_SCRIPT__, "to", batch
print "Based on", __QSUB_SCRIPT__, options.__dict__[__QSUB_SCRIPT__]
print "Using directory LIST", options.__dict__[__LISTp__]
update_script(options.__dict__[__QSUB_SCRIPT__], batch, options.__dict__[__LISTp__])
if options.__dict__[__VERBOSE__]:
print
if __EXTRACT__ in args:
if not options.__dict__[__EXTRACT_PREFIX__]:
report_error(__EXTRACT__ + " selected but no " + __EXTRACT_PREFIX__ + " parameter provided")
if not required_files:
report_error(__EXTRACT__ + " selected but no " + __FILE_LIST__ + " parameter provided")
for extract_prefix in options.__dict__[__EXTRACT_PREFIX__]:
summary_path = name_cleaner(options.__dict__[__OUTPUT_DIR__], extract_prefix) + ".tsv"
print "Writing extract to ", summary_path
extractor = Extractor(summary_path, extract_prefix, required_files, strip=True)
do_walk(source=options.__dict__[__PARENT__], directory_action=approve_all, file_action=extractor.get_summary,
verbose=options.__dict__[__VERBOSE__])
if options.__dict__[__VERBOSE__]:
print
if __DELIMIT__ in args:
if not options.__dict__[__DELIMITER__]:
report_error(__DELIMIT__ + " selected but no " + __DELIMITER__ + " parameter provided")
if not required_files:
report_error(__DELIMIT__ + " selected but no FILE_LIST parameter provided")
print "Writing extract to ", options.__dict__[__OUTPUT_DIR__]
combiner = ByPrefixCombiner(options.__dict__[__DELIMITER__], required_files,
options.__dict__[__OUTPUT_DIR__], strip=True)
do_walk(source=options.__dict__[__PARENT__], directory_action=approve_all,
file_action=combiner.get_summaries, verbose=options.__dict__[__VERBOSE__])
if options.__dict__[__VERBOSE__]:
for key in sorted(combiner.summaries):
print key, combiner.summaries[key]
if options.__dict__[__VERBOSE__]:
print
if __MERGE__ in args:
print "Coping files to ", options.__dict__[__OUTPUT_DIR__]
merger = Merger(file_mappings, options.__dict__[__OUTPUT_DIR__],
minimum_size=options.__dict__[__MINIMUM_SIZE__], maximum_size=options.__dict__[__MAXIMUM_SIZE__])
do_walk(source=options.__dict__[__PARENT__], directory_action=approve_all,
file_action=merger.copy_files, verbose=options.__dict__[__VERBOSE__])
if options.__dict__[__VERBOSE__]:
print
if __FILTER__ in args:
if not options.__dict__[__PARENT__]:
report_error(__FILTER__ + " selected but no " + __PARENT__ + " parameter provided")
print "Coping and filtering files to ", options.__dict__[__PARENT__]
if not filter_mappings:
report_error(__FILTER__ + " selected but no " + __FILE_LIST__ + " parameter provided")
if not options.__dict__[__KEEP_REGEX__]:
report_error(__FILTER__ + " selected but no " + __KEEP_REGEX__ + " parameter provided")
if not options.__dict__[__REMOVE_REGEX__]:
options.__dict__[__REMOVE_REGEX__] = list()
filter = Filter(filter_mappings, options.__dict__[__KEEP_REGEX__], options.__dict__[__REMOVE_REGEX__],
minimum_size=options.__dict__[__MINIMUM_SIZE__], maximum_size=options.__dict__[__MAXIMUM_SIZE__], verbose=options.__dict__[__VERBOSE__])
do_walk(source=options.__dict__[__PARENT__], directory_action=approve_all,
file_action=filter.file_action, verbose=options.__dict__[__VERBOSE__])
if options.__dict__[__VERBOSE__]:
print
|
gpl-2.0
| -2,103,030,591,755,150,600 | 44.203291 | 160 | 0.571474 | false |
letuananh/pysemcor
|
pysemcor/semcorxml.py
|
1
|
14579
|
# -*- coding: utf-8 -*-
'''
Semcor data in XML format
Latest version can be found at https://github.com/letuananh/pysemcor
References:
Python documentation:
https://docs.python.org/
PEP 0008 - Style Guide for Python Code
https://www.python.org/dev/peps/pep-0008/
PEP 257 - Python Docstring Conventions:
https://www.python.org/dev/peps/pep-0257/
@author: Le Tuan Anh <[email protected]>
'''
# Copyright (c) 2017, Le Tuan Anh <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "Le Tuan Anh"
__email__ = "<[email protected]>"
__copyright__ = "Copyright 2017, pysemcor"
__license__ = "MIT"
__maintainer__ = "Le Tuan Anh"
__version__ = "0.1"
__status__ = "Prototype"
__credits__ = []
########################################################################
import os
import logging
import json
from lxml import etree
from bs4 import BeautifulSoup
from chirptext import FileHelper
from chirptext.leutile import StringTool
from chirptext import ttl
from yawlib import SynsetID
from yawlib.helpers import get_wn
# -------------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------------
wn = get_wn()
def getLogger():
return logging.getLogger(__name__)
# -------------------------------------------------------------------------------
# Data structures
# -------------------------------------------------------------------------------
class TokenInfo:
def __init__(self, text, **kwargs):
self.text = text
self.__data = dict(kwargs)
def __contains__(self, key):
return key in self.__data
def __getitem__(self, key):
return self.__data[key]
def get(self, key, default=None):
return self[key] if key in self else default
def __setitem__(self, key, value):
self.__data[key] = value
@property
def data(self):
return self.__data.items()
@property
def lemma(self):
return self['lemma'] if 'lemma' in self else self.text
def to_json(self):
data = dict(self.__data)
data['text'] = self.text
return data
def __repr__(self):
return "{}:{}".format(self.text, self.__data)
def __str__(self):
return "{}:{}".format(self.text, self.__data)
class FileSet(object):
def __init__(self, root):
self.root = root
self.__files = []
@property
def root(self):
return self.__root
@root.setter
def root(self, value):
self.__root = FileHelper.abspath(value)
def add_all(self, path):
folderpath = os.path.join(self.root, path)
if not os.path.isdir(folderpath):
getLogger().warning("Folder {} does not exist".format(path))
else:
files = FileHelper.get_child_files(folderpath)
for f in files:
self.add(os.path.join(path, f))
def add(self, path):
self.__files.append(path)
def __getitem__(self, idx):
return self.__files[idx]
def __len__(self):
return len(self.__files)
def abspaths(self):
return [os.path.join(self.root, p) for p in self]
def abspath(self, path):
return path if os.path.isabs(path) else os.path.join(self.root, path)
class SemcorXML(object):
def __init__(self, root):
self.files = FileSet(root)
if not os.path.isdir(root):
getLogger().warning("Root {} does not exist".format(root))
self.files.add_all('brown1/tagfiles')
self.files.add_all('brown2/tagfiles')
self.files.add_all('brownv/tagfiles')
@property
def root(self):
return self.files.root
def iterparse(self, path):
tree = etree.iterparse(self.files.abspath(path), events=('start', 'end'))
filename = 'n/a'
para = 'n/a'
for event, element in tree:
if event == 'start':
if element.tag == 'context':
filename = element.get('filename')
elif element.tag == 'p':
para = element.get('pnum')
if event == 'end':
if element.tag == 's':
# found a sentence
snum = element.get('snum')
tokens = []
for token in element:
token_data = dict(token.attrib)
token_data['tag'] = token.tag
text = fix_token_text(token.text)
if token.tag == 'wf':
# create sensekey
lemma = StringTool.strip(token.get('lemma'))
lexsn = StringTool.strip(token.get('lexsn'))
sk = lemma + '%' + lexsn if lemma and lexsn else ''
sk = StringTool.strip(sk.replace('\t', ' ').replace('|', ' '))
if sk:
token_data['sk'] = sk
tokens.append(TokenInfo(text, **token_data))
elif token.tag == 'punc':
tokens.append(TokenInfo(text, **token_data))
element.clear()
s = {'para': para,
'filename': filename,
'snum': snum,
'sid': "{}-{}-{}".format(filename, para, snum),
'tokens': tokens}
yield s
elif element.tag == 'p':
para = 'n/a'
element.clear()
elif element.tag == 'context':
filename = 'n/a'
element.clear()
def iter_ttl(self, limit=None, with_nonsense=True):
sk_map = {}
# Convert sentence by sentence to TTL
with wn.ctx() as wnctx:
for f in self.files[:limit] if limit else self.files:
for sj in self.iterparse(f):
s = to_ttl(sj, with_nonsense=with_nonsense, sk_map=sk_map, wnctx=wnctx)
yield s
def convert_to_ttl(self, ttlset, limit=None, with_nonsense=True):
sk_map = {}
with wn.ctx() as wnctx:
for f in self.files[:limit] if limit else self.files:
xml2ttl(f, self, ttlset, with_nonsense=with_nonsense, sk_map=sk_map, wnctx=wnctx)
# -------------------------------------------------------------------------------
# Application logic
# -------------------------------------------------------------------------------
def xml2json(inpath, scxml, scjson):
new_name = FileHelper.getfilename(inpath) + ".json"
dir_name = os.path.dirname(inpath)
outpath = scjson.abspath(os.path.join(dir_name, new_name))
if os.path.isfile(outpath):
print("SKIPPED: {} (output file exists)".format(outpath))
return
else:
print("Generating: {} => {}".format(inpath, outpath))
dirpath = os.path.dirname(outpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(outpath, 'wt') as outfile:
for sj in scxml.iterparse(inpath):
sj['tokens'] = [t.to_json() for t in sj['tokens']]
outfile.write(json.dumps(sj))
outfile.write("\n")
def xml2ttl(inpath, scxml, scttl, with_nonsense=True, sk_map=None, wnctx=None):
''' convert all semcor files in XML format to ttl format '''
new_name = FileHelper.getfilename(inpath) + ".json"
dir_name = os.path.dirname(inpath)
outpath = scttl.abspath(os.path.join(dir_name, new_name))
if os.path.isfile(outpath):
print("SKIPPED: {} (output file exists)".format(outpath))
return
else:
print("Generating: {} => {}".format(inpath, outpath))
dirpath = os.path.dirname(outpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(outpath, 'wt') as outfile:
for sj in scxml.iterparse(inpath):
s = to_ttl(sj, with_nonsense=with_nonsense, sk_map=sk_map, wnctx=wnctx)
outfile.write(json.dumps(s.to_json(), ensure_ascii=False))
outfile.write("\n")
def to_ttl(sent, with_nonsense=True, sk_map=None, wnctx=None):
tokens = sent['tokens']
text = detokenize(tokens)
s = ttl.Sentence(text=text)
s.new_tag(sent['sid'], tagtype='origid')
s.import_tokens((t.text for t in tokens))
for tinfo, tk in zip(tokens, s):
for k, v in tinfo.data:
if (k, v) == ('tag', 'wf') or k == 'sk':
continue
if k == 'lemma':
tk.lemma = v
elif k == 'pos':
tk.pos = v
else:
tk.new_tag(label=v, tagtype=k)
# if sensekey exists, add it as a concept
lemma = tinfo.lemma
sk = fix_sensekey(tinfo.get('sk'))
rdf = tinfo.get('rdf')
comment = None
if sk and (with_nonsense or not is_nonsense(lemma, sk, rdf)):
sensetag = sk
if sk_map is not None and sk in sk_map:
sensetag = sk_map[sk]
elif wnctx is not None:
# try to determine synsetID
ss = wnctx.senses.select_single('sensekey=?', (sk,))
if ss is not None:
sid = str(SynsetID.from_string(ss.synsetid))
if sk_map is not None:
sk_map[sk] = sid
sensetag = sid
else:
# sensekey not found
getLogger().warning("There is no synsetID with sensekey={} | rdf={}".format(sk, rdf))
comment = 'sensekey'
s.new_concept(clemma=lemma, tag=sensetag, tokens=(tk,), comment=comment)
return s
KNOWN_KEYS = {"n't%4:02:00::": "not%4:02:00::"}
def fix_sensekey(sk):
return KNOWN_KEYS[sk] if sk in KNOWN_KEYS else sk
NONSENSE = [('person%1:03:00::', 'person'),
('group%1:03:00::', 'group'),
('location%1:03:00::', 'location')]
def is_nonsense(lemma, sk, rdf):
return ((sk, rdf) in NONSENSE) or lemma == 'be'
def fix_token_text(tk):
tk = StringTool.strip(tk).replace('\t', ' ').replace('|', ' ').replace('_', ' ')
tk = tk.replace(" ' nuff", " 'nuff")
tk = tk.replace("Ol ' ", "Ol' ")
tk = tk.replace("O ' ", "O' ")
tk = tk.replace("ma ' am", "ma'am")
tk = tk.replace("Ma ' am", "Ma'am")
tk = tk.replace("probl ' y", "probl'y")
tk = tk.replace("ai n't", "ain't")
tk = tk.replace("holdin '", "holdin'")
tk = tk.replace("hangin '", "hangin'")
tk = tk.replace("dryin ' ", "dryin' ")
tk = tk.replace("Y ' all", "Y'all")
tk = tk.replace("y ' know", "y'know")
tk = tk.replace("c ' n", "c'n")
tk = tk.replace("l ' identite", "l'identite")
tk = tk.replace("Rue de L ' Arcade", "Rue de l'Arcade")
tk = tk.replace("p ' lite", "p'lite")
tk = tk.replace("rev ' rend", "rev'rend")
tk = tk.replace("coup d ' etat", "coup d'etat")
tk = tk.replace("t ' gethuh", "t'gethuh")
tk = tk.replace('``', "“")
tk = tk.replace("''", "”")
tk = tk.replace(" ,", ",")
tk = tk.replace("( ", "(")
tk = tk.replace(" )", ")")
tk = tk.replace(" ”", "”")
tk = tk.replace(" 's", "'s")
tk = tk.replace("o '", "o'")
tk = tk.replace("s ' ", "s' ")
tk = tk.replace(" , ", ", ")
# tk = tk.replace(" ' ", "' ")
return tk
def detokenize(tokens):
sentence_text = ' '.join([x.text for x in tokens])
sentence_text = sentence_text.replace(" , , ", ", ")
sentence_text = sentence_text.replace(' , ', ', ').replace('“ ', '“').replace(' ”', '”')
sentence_text = sentence_text.replace(' ! ', '! ').replace(" 'll ", "'ll ").replace(" 've ", "'ve ").replace(" 're ", "'re ").replace(" 'd ", "'d ")
sentence_text = sentence_text.replace(" 's ", "'s ")
sentence_text = sentence_text.replace(" 'm ", "'m ")
sentence_text = sentence_text.replace(" ' ", "' ")
sentence_text = sentence_text.replace(" ; ", "; ")
sentence_text = sentence_text.replace("( ", "(")
sentence_text = sentence_text.replace(" )", ")")
sentence_text = sentence_text.replace(" n't ", "n't ")
# sentence_text = sentence_text.replace("Never_mind_''", "Never_mind_”")
# sentence_text = sentence_text.replace("327_U._S._114_''", "327_U._S._114_”")
# sentence_text = sentence_text.replace("``", "“")
# sentence_text = sentence_text.replace("''", "”")
sentence_text = sentence_text.replace(" ", " ")
if sentence_text[-2:] in (' .', ' :', ' ?', ' !'):
sentence_text = sentence_text[:-2] + sentence_text[-1]
sentence_text = sentence_text.strip()
return sentence_text
def fix_3rada(root, output_dir):
ds_3rada = SemcorXML(root)
for f in ds_3rada.files:
inpath = os.path.join(ds_3rada.root, f)
outpath = os.path.join(output_dir, f + ".xml")
fix_malformed_xml_file(inpath, outpath)
def fix_malformed_xml_file(inpath, outpath):
if os.path.isfile(outpath):
print("SKIPPED: {} (output file exists)".format(outpath))
return
print('Fixing the file: %s ==> %s' % (inpath, outpath))
with open(inpath) as infile:
soup = BeautifulSoup(infile.read(), 'lxml')
# create output dir if needed
outdir = os.path.dirname(outpath)
if not os.path.exists(outdir):
os.makedirs(outdir)
with open(outpath, 'w') as outfile:
outfile.write(soup.prettify())
|
mit
| 336,102,974,210,178,100 | 34.849754 | 152 | 0.531982 | false |
scottharney/python-mdszoning
|
library/utils.py
|
1
|
1296
|
# utils.py
#
# General utilities functions
class bcolors:
OKGREEN = '\033[92m' # Green
WARNING = '\033[93m' # Yellow
FAIL = '\033[91m' # Red
OKBLUE = '\033[94m' # Blue
HEADER = '\033[95m' # Pink
BOLD = '\033[1m' # Bold text
UNDERLINE = '\033[4m' # Underline text
ENDC = '\033[0m' # EOL
def confirm(prompt=None, resp=False):
""" Prompts yes or no response to user user.
Returns True for yes and False for no """
# Set a default message if no param was received
if prompt is None:
prompt = 'Confirm?'
# Check if 'resp' was set to True the default answer will be set to Y (yes),
# if varible wasn't set the default answer will be N (No)
if resp:
prompt = '%s [%s/%s]: ' % (prompt, 'Y', 'n')
else:
prompt = '%s [%s/%s]: ' % (prompt, 'y', 'N')
# Create a while loop to read the answer from terminal.
# If no answer, the print will be returned
while True:
answer = raw_input(prompt)
if not answer:
return resp
if answer not in ['y', 'Y', 'n', 'N']:
print 'Please enter y or n.\n'
continue
if answer == 'y' or answer == 'Y':
return True
if answer == 'n' or answer == 'N':
return False
|
apache-2.0
| -6,379,115,861,355,171,000 | 29.857143 | 80 | 0.544753 | false |
philanthropy-u/edx-platform
|
openedx/features/philu_courseware/models.py
|
1
|
5839
|
"""
All models for philu_courseware app
"""
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import UsageKeyField
from student.models import CourseEnrollment
from .constants import (
COMPETENCY_ASSESSMENT_DEFAULT_PROBLEMS_COUNT,
COMPETENCY_ASSESSMENT_TYPE_CHOICES,
CORRECT_ASSESSMENT_KEY,
CORRECTNESS_CHOICES,
PRE_ASSESSMENT_KEY
)
class CompetencyAssessmentManager(models.Manager):
"""
Manager for model CompetencyAssessmentRecord
"""
def revert_user_post_assessment_attempts(self, user, problem_id):
post_assessment_records = self.get_queryset().filter(problem_id=problem_id, user=user, assessment_type='post')
delete_result = post_assessment_records.delete()
deleted_records_count = delete_result[0]
return deleted_records_count
def get_score(self, user, chapter_id):
"""
Return competency assessments scores of user in chapter
:param user: user
:param chapter_id: chapter url_name.
:return: assessments score dictionary
"""
pre_assessment_attempted = None
pre_assessment_score = post_assessment_score = attempted_pre_assessments = attempted_post_assessments = 0
query_format = """
SELECT MAX(`id`) AS `id`, COUNT(`assessment_type`) AS `assessments_count`, `assessment_type`, `correctness`
FROM `philu_courseware_competencyassessmentrecord`
WHERE `id` IN (
SELECT MAX(`id`) FROM `philu_courseware_competencyassessmentrecord`
WHERE `chapter_id` = '{chapter_id}' and `user_id` = {user_id}
GROUP BY `problem_id`, `question_number`
) GROUP BY `correctness`, `assessment_type`
"""
assessment_records = self.raw(query_format.format(chapter_id=chapter_id, user_id=user.id))
"""
Sample result of upper query. This Query will return results of problems from latest attempt
for both "Pre" and "Post" assessments. All attempts are saved in our table and we are concerned only with
the latest one, hence sub query provides us the latest attempt of all problems
| id | assessment_count | assessment_type | correctness |
+-------+------------------+-------------------+---------------+
| 231 | 4 | post | correct |
| 229 | 4 | pre | correct |
| 232 | 1 | post | incorrect |
| 233 | 1 | pre | incorrect |
""" # pylint: disable=pointless-string-statement
for assessment in assessment_records:
if assessment.assessment_type == PRE_ASSESSMENT_KEY:
pre_assessment_attempted = True
if assessment.correctness == CORRECT_ASSESSMENT_KEY:
pre_assessment_score = assessment.assessments_count
attempted_pre_assessments += assessment.assessments_count
else:
if assessment.correctness == CORRECT_ASSESSMENT_KEY:
post_assessment_score = assessment.assessments_count
attempted_post_assessments += assessment.assessments_count
return {
'pre_assessment_score': pre_assessment_score,
'post_assessment_score': post_assessment_score,
'pre_assessment_attempted': pre_assessment_attempted,
'all_pre_assessment_attempted': attempted_pre_assessments == COMPETENCY_ASSESSMENT_DEFAULT_PROBLEMS_COUNT,
'all_post_assessment_attempted': attempted_post_assessments == COMPETENCY_ASSESSMENT_DEFAULT_PROBLEMS_COUNT,
}
class CompetencyAssessmentRecord(TimeStampedModel):
"""
Model for storing competency assessment records
"""
chapter_id = models.TextField(max_length=255)
problem_id = UsageKeyField(max_length=255)
problem_text = models.TextField(null=False)
assessment_type = models.CharField(max_length=4, choices=COMPETENCY_ASSESSMENT_TYPE_CHOICES)
attempt = models.IntegerField()
correctness = models.CharField(max_length=9, choices=CORRECTNESS_CHOICES)
# It stores comma separated choice ids for example choice_1,choice_2,choice_3
choice_id = models.CharField(max_length=255)
choice_text = models.TextField()
score = models.FloatField()
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
question_number = models.IntegerField(default=None, blank=True, null=True)
objects = CompetencyAssessmentManager()
def __unicode__(self):
return '{problem}, question({question_number}), {username}, {assessment_type}, attempt({attempt})'.format(
problem=self.problem_id,
username=self.user.username,
assessment_type=self.assessment_type,
attempt=self.attempt,
question_number=self.question_number
)
class CourseEnrollmentMeta(models.Model):
"""
A model to establish linkages between course enrollment and specialisation through which it is being enrolled in
"""
course_enrollment = models.OneToOneField(CourseEnrollment, related_name='course_enrollment_meta',
related_query_name='course_enrollment_meta', on_delete=models.CASCADE)
program_uuid = models.UUIDField(null=True, blank=True, verbose_name=_('Program UUID'))
def __unicode__(self):
return 'Program {program_uuid}, {course_id}'.format(
program_uuid=self.program_uuid,
course_id=self.course_enrollment.course_id,
)
|
agpl-3.0
| -3,636,482,258,178,601,500 | 42.574627 | 120 | 0.637095 | false |
Grumbel/dirtool
|
dirtools/file_transfer.py
|
1
|
19713
|
# dirtool.py - diff tool for directories
# Copyright (C) 2018 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import hashlib
import os
import sys
from enum import Enum
from abc import ABC, abstractmethod
import bytefmt
from dirtools.filesystem import Filesystem
from dirtools.format import progressbar
class CancellationException(Exception):
pass
class ConflictResolution(Enum):
CANCEL = 0 # QDialog.Rejected
OVERWRITE = 1 # QDialog.Accepted
SKIP = 2
RENAME_SOURCE = 3
RENAME_TARGET = 4
NO_CONFLICT = 5
class Overwrite(Enum):
ASK = 0
NEVER = 1
ALWAYS = 2
def sha1sum(filename: str, blocksize: int = 65536) -> str:
with open(filename, 'rb') as fin:
hasher = hashlib.sha1()
buf = fin.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fin.read(blocksize)
return hasher.hexdigest()
class Mediator(ABC):
"""Whenever a filesystem operation would result in the destruction of data,
the Mediator is called to decide which action should be taken."""
@abstractmethod
def file_conflict(self, source: str, dest: str) -> ConflictResolution:
pass
@abstractmethod
def directory_conflict(self, sourcedir: str, destdir: str) -> ConflictResolution:
pass
@abstractmethod
def cancel_transfer(self) -> bool:
pass
class ConsoleMediator(Mediator):
def __init__(self) -> None:
self.overwrite: Overwrite = Overwrite.ASK
self.merge: Overwrite = Overwrite.ASK
def cancel_transfer(self) -> bool:
return False
def file_info(self, filename: str) -> str:
return (" name: {}\n"
" size: {}").format(filename,
bytefmt.humanize(os.path.getsize(filename)))
def file_conflict(self, source: str, dest: str) -> ConflictResolution:
if self.overwrite == Overwrite.ASK:
return self._file_conflict_interactive(source, dest)
elif self.overwrite == Overwrite.ALWAYS:
return ConflictResolution.OVERWRITE
elif self.overwrite == Overwrite.NEVER:
return ConflictResolution.SKIP
else:
assert False
def _file_conflict_interactive(self, source: str, dest: str) -> ConflictResolution:
source_sha1 = sha1sum(source)
dest_sha1 = sha1sum(dest)
if source == dest:
print("skipping '{}' same file as '{}'".format(source, dest))
return ConflictResolution.SKIP
elif source_sha1 == dest_sha1:
print("skipping '{}' same content as '{}'".format(source, dest))
return ConflictResolution.SKIP
else:
print("Conflict: {}: destination file already exists".format(dest))
print("source:\n{}\n sha1: {}".format(self.file_info(source), source_sha1))
print("target:\n{}\n sha1: {}".format(self.file_info(dest), dest_sha1))
while True:
c = input("Overwrite {} ([Y]es, [N]o, [A]lways, n[E]ver)? ".format(dest)) # [R]ename, [Q]uit
c = c.lower()
if c == 'n':
print("skipping {}".format(source))
return ConflictResolution.SKIP
elif c == 'y':
return ConflictResolution.OVERWRITE
elif c == 'a':
self.overwrite = Overwrite.ALWAYS
return ConflictResolution.OVERWRITE
elif c == 'e':
self.overwrite = Overwrite.NEVER
return ConflictResolution.SKIP
else:
pass # try to read input again
def directory_conflict(self, sourcedir: str, destdir: str) -> ConflictResolution:
if self.merge == Overwrite.ASK:
return self._directory_conflict_interactive(sourcedir, destdir)
elif self.merge == Overwrite.ALWAYS:
return ConflictResolution.OVERWRITE
elif self.merge == Overwrite.NEVER:
return ConflictResolution.SKIP
else:
assert False
def _directory_conflict_interactive(self, sourcedir: str, destdir: str) -> ConflictResolution:
print("Conflict: {}: destination directory already exists".format(destdir))
print("source: {}".format(sourcedir))
print("target: {}".format(destdir))
while True:
c = input("Merge into {} ([Y]es, [N]o, [A]lways, n[E]ver)? ".format(destdir)) # [R]ename, [Q]uit
c = c.lower()
if c == 'n':
print("skipping {}".format(sourcedir))
return ConflictResolution.SKIP
elif c == 'y':
return ConflictResolution.OVERWRITE
elif c == 'a':
self.merge = Overwrite.ALWAYS
return ConflictResolution.OVERWRITE
elif c == 'e':
self.merge = Overwrite.NEVER
return ConflictResolution.SKIP
else:
pass # try to read input again
class Progress(ABC):
@abstractmethod
def copy_file(self, src: str, dst: str, resolution: ConflictResolution) -> None:
pass
@abstractmethod
def copy_progress(self, current: int, total: int) -> None:
pass
@abstractmethod
def copy_directory(self, src: str, dst: str, resolution: ConflictResolution) -> None:
pass
@abstractmethod
def remove_file(self, src: str) -> None:
pass
@abstractmethod
def remove_directory(self, src: str) -> None:
pass
@abstractmethod
def move_file(self, src: str, dst: str, resolution: ConflictResolution) -> None:
pass
@abstractmethod
def move_directory(self, src: str, dst: str, resolution: ConflictResolution) -> None:
pass
@abstractmethod
def link_file(self, src: str, dst: str, resolution: ConflictResolution) -> None:
pass
@abstractmethod
def transfer_canceled(self) -> None:
pass
@abstractmethod
def transfer_completed(self) -> None:
pass
class ConsoleProgress(Progress):
def __init__(self):
self.verbose: bool = False
def copy_file(self, src: str, dst: str, resolution: ConflictResolution) -> None:
if self.verbose:
print("copying {} -> {}".format(src, dst))
def copy_progress(self, current: int, total: int) -> None:
progress = current / total
total_width = 50
if current != total:
sys.stdout.write("{:3d}% |{}|\r".format(
int(progress * 100),
progressbar(total_width, current, total)))
else:
sys.stdout.write(" {}\r".format(total_width * " "))
def copy_directory(self, src: str, dst: str, resolution: ConflictResolution) -> None:
if self.verbose:
print("copying {} -> {}".format(src, dst))
def remove_file(self, src: str) -> None:
if self.verbose:
print("removing {}".format(src))
def remove_directory(self, src: str) -> None:
if self.verbose:
print("removing {}".format(src))
def link_file(self, src: str, dst: str, resolution: ConflictResolution) -> None:
if self.verbose:
print("linking {} -> {}".format(src, dst))
def move_file(self, src: str, dst: str, resolution: ConflictResolution) -> None:
if self.verbose:
print("moving {} -> {}".format(src, dst))
def move_directory(self, src: str, dst: str, resolution: ConflictResolution) -> None:
if self.verbose:
print("moving {} -> {}".format(src, dst))
def transfer_canceled(self) -> None:
print("transfer canceled")
def transfer_completed(self) -> None:
print("transfer completed")
class FileTransfer:
def __init__(self, fs: Filesystem, mediator: Mediator, progress: Progress) -> None:
self._fs = fs
self._mediator = mediator
self._progress = progress
def _move_file(self, source: str, destdir: str) -> None:
assert self._fs.isreg(source) or self._fs.islink(source), "{}: unknown file type".format(source)
assert os.path.isdir(destdir), "{}: not a directory".format(destdir)
base = os.path.basename(source)
dest = os.path.join(destdir, base)
self._progress.move_file(source, dest, ConflictResolution.NO_CONFLICT)
self._move_file2(source, dest, destdir)
def _move_file2(self, source: str, dest: str, destdir: str) -> None:
if self._fs.lexists(dest):
resolution = self._mediator.file_conflict(source, dest)
if resolution == ConflictResolution.SKIP:
self._progress.move_file(source, dest, resolution)
elif resolution == ConflictResolution.OVERWRITE:
try:
self._fs.overwrite(source, dest)
except OSError as err:
if err.errno == errno.EXDEV:
self._progress.copy_file(source, dest, resolution)
self._fs.copy_file(source, dest, overwrite=True, progress=self._progress.copy_progress)
self._progress.remove_file(source)
self._fs.remove_file(source)
else:
raise
elif resolution == ConflictResolution.RENAME_SOURCE:
new_dest = self._fs.generate_unique(dest)
self._move_file2(source, new_dest, destdir)
elif resolution == ConflictResolution.RENAME_TARGET:
self._fs.rename_unique(dest)
self._move_file(source, destdir)
elif resolution == ConflictResolution.CANCEL:
raise CancellationException()
else:
assert False, "unknown conflict resolution: %r" % resolution
else:
try:
self._fs.rename(source, dest)
except OSError as err:
if err.errno == errno.EXDEV:
self._fs.copy_file(source, dest, progress=self._progress.copy_progress)
self._fs.remove_file(source)
else:
raise
def _move_directory_content(self, sourcedir: str, destdir: str) -> None:
assert os.path.isdir(sourcedir), "{}: not a directory".format(sourcedir)
assert os.path.isdir(destdir), "{}: not a directory".format(destdir)
for name in self._fs.listdir(sourcedir):
src = os.path.join(sourcedir, name)
# FIXME: this could be speed up by using os.scandir()
self.move(src, destdir)
def _move_directory(self, sourcedir: str, destdir: str) -> None:
assert os.path.isdir(sourcedir), "{}: not a directory".format(sourcedir)
assert os.path.isdir(destdir), "{}: not a directory".format(destdir)
base = os.path.basename(sourcedir)
dest = os.path.join(destdir, base)
self._progress.move_directory(sourcedir, dest, ConflictResolution.NO_CONFLICT)
self._move_directory2(sourcedir, dest, destdir)
def _move_directory2(self, sourcedir: str, dest: str, destdir: str) -> None:
if self._fs.lexists(dest):
resolution = self._mediator.directory_conflict(sourcedir, dest)
if resolution == ConflictResolution.SKIP:
self._progress.move_directory(sourcedir, dest, resolution)
elif resolution == ConflictResolution.OVERWRITE:
self._move_directory_content(sourcedir, dest)
elif resolution == ConflictResolution.RENAME_SOURCE:
new_dest = self._fs.generate_unique(dest)
self._move_directory2(sourcedir, new_dest, destdir)
elif resolution == ConflictResolution.RENAME_TARGET:
self._fs.rename_unique(dest)
self._move_directory(sourcedir, destdir)
elif resolution == ConflictResolution.CANCEL:
raise CancellationException()
else:
assert False, "unknown conflict resolution: {}".format(resolution)
else:
try:
self._fs.rename(sourcedir, dest)
except OSError as err:
if err.errno == errno.EXDEV:
self._fs.mkdir(dest)
self._fs.copy_stat(sourcedir, dest)
self._move_directory_content(sourcedir, dest)
self._fs.rmdir(sourcedir)
else:
raise
def move(self, source: str, destdir: str) -> None:
"""Move 'source' to the directory 'destdir'. 'source' can be any file
object or directory.
"""
self.interruption_point()
if not self._fs.isdir(destdir):
raise Exception("{}: target directory does not exist".format(destdir))
if os.path.isdir(source):
self._move_directory(source, destdir)
else:
self._move_file(source, destdir)
def link(self, source: str, destdir: str) -> None:
self.interruption_point()
base = os.path.basename(source)
dest = os.path.join(destdir, base)
self._link(source, dest, destdir)
def _link(self, source: str, dest: str, destdir: str) -> None:
if self._fs.lexists(dest):
resolution = self._mediator.file_conflict(source, dest)
if resolution == ConflictResolution.SKIP:
self._progress.link_file(source, dest, resolution)
elif resolution == ConflictResolution.OVERWRITE:
self._progress.link_file(source, dest, resolution)
self._fs.remove_file(dest)
self._fs.symlink(source, dest)
elif resolution == ConflictResolution.RENAME_SOURCE:
new_dest = self._fs.generate_unique(dest)
self._link(source, new_dest, destdir)
elif resolution == ConflictResolution.RENAME_TARGET:
self._fs.rename_unique(dest)
self.link(source, destdir)
elif resolution == ConflictResolution.CANCEL:
raise CancellationException()
else:
assert False, "unknown conflict resolution: {}".format(resolution)
else:
self._progress.link_file(source, dest, ConflictResolution.NO_CONFLICT)
self._fs.symlink(source, dest)
def _copy_file(self, source: str, destdir: str) -> None:
assert self._fs.isreg(source) or self._fs.islink(source), "{}: unknown file type".format(source)
assert os.path.isdir(destdir), "{}: not a directory".format(destdir)
base = os.path.basename(source)
dest = os.path.join(destdir, base)
self._copy_file2(source, dest, destdir)
def _copy_file2(self, source: str, dest: str, destdir: str) -> None:
if self._fs.lexists(dest):
resolution = self._mediator.file_conflict(source, dest)
if resolution == ConflictResolution.SKIP:
self._progress.copy_file(source, dest, resolution)
elif resolution == ConflictResolution.OVERWRITE:
self._progress.copy_file(source, dest, resolution)
self._fs.copy_file(source, dest, overwrite=True, progress=self._progress.copy_progress)
elif resolution == ConflictResolution.RENAME_SOURCE:
new_dest = self._fs.generate_unique(dest)
self._copy_file2(source, new_dest, destdir)
elif resolution == ConflictResolution.RENAME_TARGET:
self._fs.rename_unique(dest)
self._copy_file(source, destdir)
elif resolution == ConflictResolution.CANCEL:
raise CancellationException()
else:
assert False, "unknown conflict resolution: {}".format(resolution)
else:
self._progress.copy_file(source, dest, ConflictResolution.NO_CONFLICT)
self._fs.copy_file(source, dest, progress=self._progress.copy_progress)
def _copy_directory_content(self, sourcedir: str, destdir: str) -> None:
assert os.path.isdir(sourcedir), "{}: not a directory".format(sourcedir)
assert os.path.isdir(destdir), "{}: not a directory".format(destdir)
for name in self._fs.listdir(sourcedir):
src = os.path.join(sourcedir, name)
# FIXME: this could be speed up by using os.scandir()
self.copy(src, destdir)
def _copy_directory(self, sourcedir: str, destdir: str) -> None:
assert os.path.isdir(sourcedir), "{}: not a directory".format(sourcedir)
assert os.path.isdir(destdir), "{}: not a directory".format(destdir)
base = os.path.basename(sourcedir)
dest = os.path.join(destdir, base)
self._copy_directory2(sourcedir, dest, destdir)
def _copy_directory2(self, sourcedir: str, dest: str, destdir: str) -> None:
if self._fs.lexists(dest):
resolution = self._mediator.directory_conflict(sourcedir, dest)
if resolution == ConflictResolution.SKIP:
self._progress.copy_directory(sourcedir, dest, resolution)
elif resolution == ConflictResolution.OVERWRITE:
self._progress.copy_directory(sourcedir, destdir, resolution)
self._copy_directory_content(sourcedir, dest)
elif resolution == ConflictResolution.RENAME_SOURCE:
new_dest = self._fs.generate_unique(dest)
self._copy_directory2(sourcedir, new_dest, destdir)
elif resolution == ConflictResolution.RENAME_TARGET:
self._fs.rename_unique(dest)
self._copy_directory(sourcedir, destdir)
elif resolution == ConflictResolution.CANCEL:
raise CancellationException()
else:
assert False, "unknown conflict resolution: {}".format(resolution)
else:
self._progress.copy_directory(sourcedir, dest, ConflictResolution.NO_CONFLICT)
self._fs.mkdir(dest)
self._fs.copy_stat(sourcedir, dest)
self._copy_directory_content(sourcedir, dest)
def copy(self, source: str, destdir: str) -> None:
self.interruption_point()
if not self._fs.isdir(destdir):
raise Exception("{}: target directory does not exist".format(destdir))
if os.path.isdir(source):
self._copy_directory(source, destdir)
else:
self._copy_file(source, destdir)
def make_relative_dir(self, source: str, destdir: str) -> str:
prefix = os.path.dirname(source)
if os.path.isabs(prefix):
prefix = os.path.relpath(prefix, "/")
actual_destdir = os.path.join(destdir, prefix)
if not os.path.isdir(actual_destdir):
self._fs.makedirs(actual_destdir)
return actual_destdir
def interruption_point(self) -> None:
if self._mediator.cancel_transfer():
raise CancellationException()
# EOF #
|
gpl-3.0
| 8,320,482,712,508,614,000 | 37.72888 | 111 | 0.59514 | false |
mgrygoriev/CloudFerry
|
cloudferrylib/utils/remote_runner.py
|
1
|
3149
|
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fabric import api
from oslo_config import cfg
from cloudferrylib.base import exception
from cloudferrylib.utils import retrying
from cloudferrylib.utils import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class RemoteExecutionError(exception.CFBaseException):
pass
class RemoteRunner(object):
def __init__(self, host, user, password=None, sudo=False, key=None,
ignore_errors=False, timeout=None, gateway=None):
self.host = host
if key is None:
key = CONF.migrate.key_filename
self.user = user
self.password = password
self.sudo = sudo
self.key = key
self.ignore_errors = ignore_errors
self.timeout = timeout
self.gateway = gateway
def run(self, cmd, **kwargs):
abort_exception = None
if not self.ignore_errors:
abort_exception = RemoteExecutionError
if kwargs:
cmd = cmd.format(**kwargs)
ssh_attempts = CONF.migrate.ssh_connection_attempts
with api.settings(warn_only=self.ignore_errors,
host_string=self.host,
user=self.user,
password=self.password,
abort_exception=abort_exception,
reject_unkown_hosts=False,
combine_stderr=False,
connection_attempts=ssh_attempts,
command_timeout=self.timeout,
gateway=self.gateway):
with utils.forward_agent(self.key):
LOG.debug("running '%s' on '%s' host as user '%s'",
cmd, self.host, self.user)
if self.sudo and self.user != 'root':
result = api.sudo(cmd)
else:
result = api.run(cmd)
LOG.debug('[%s] Command "%s" result: %s',
self.host, cmd, result)
return result
def run_ignoring_errors(self, cmd, **kwargs):
ignore_errors_original = self.ignore_errors
try:
self.ignore_errors = True
return self.run(cmd, **kwargs)
finally:
self.ignore_errors = ignore_errors_original
def run_repeat_on_errors(self, cmd, **kwargs):
retrier = retrying.Retry(
max_attempts=CONF.migrate.retry,
reraise_original_exception=True,
timeout=0,
)
return retrier.run(self.run, cmd, **kwargs)
|
apache-2.0
| -7,797,338,825,854,478,000 | 33.604396 | 74 | 0.586535 | false |
qedsoftware/commcare-hq
|
corehq/apps/builds/views.py
|
1
|
6257
|
from cStringIO import StringIO
from couchdbkit import ResourceNotFound, BadValueError
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponse, Http404
from django.utils.translation import ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET, require_POST
from django.shortcuts import render
from django.utils.decorators import method_decorator
from corehq.apps.hqwebapp.views import BasePageView
from corehq.apps.style.decorators import use_jquery_ui
from corehq.util.view_utils import json_error
from dimagi.utils.web import json_request, json_response
from dimagi.utils.couch.database import get_db
from corehq.apps.api.models import require_api_user
from corehq.apps.domain.decorators import require_superuser
from .models import CommCareBuild, CommCareBuildConfig, SemanticVersionProperty
from .utils import get_all_versions, extract_build_info_from_filename
import requests
import requests.exceptions
@csrf_exempt # is used by an API
@json_error
@require_api_user
def post(request):
artifacts = request.FILES.get('artifacts')
build_number = request.POST.get('build_number')
version = request.POST.get('version')
try:
build_number = int(build_number)
except Exception:
return HttpResponseBadRequest("build_number has to be a base-10 integer")
if not artifacts:
CommCareBuild.create_without_artifacts(version, build_number)
else:
CommCareBuild.create_from_zip(artifacts, build_number=build_number, version=version)
return HttpResponse()
@require_GET
def get(request, version, build_number, path):
build = CommCareBuild.get_build(version, build_number)
try:
file = build.fetch_file(path)
except ResourceNotFound:
raise Http404()
response = HttpResponse(file)
response['Content-Disposition'] = 'attachment; filename="%s"' % path.split("/")[-1]
return response
@require_GET
@require_superuser
def get_all(request):
builds = sorted(CommCareBuild.all_builds(), key=lambda build: build.time)
return render(request, 'builds/all.html', {'builds': builds})
class EditMenuView(BasePageView):
template_name = "builds/edit_menu.html"
urlname = 'edit_menu'
doc_id = "config--commcare-builds"
page_title = ugettext_lazy("Edit CommCare Builds")
@method_decorator(require_superuser)
@use_jquery_ui
def dispatch(self, *args, **kwargs):
# different local caches on different workers
# but this at least makes it so your changes take effect immediately
# while you're editing the config
CommCareBuildConfig.clear_local_cache()
self.doc = CommCareBuildConfig.fetch()
return super(EditMenuView, self).dispatch(*args, **kwargs)
def save_doc(self):
db = get_db()
return db.save_doc(self.doc)
@property
def page_context(self):
return {
'doc': self.doc,
'all_versions': get_all_versions(
[v['build']['version'] for v in self.doc['menu']]),
'j2me_enabled_versions': CommCareBuild.j2me_enabled_build_versions()
}
@property
def page_url(self):
return reverse(self.urlname)
def post(self, request, *args, **kwargs):
request_json = json_request(request.POST)
self.doc = request_json.get('doc')
self.save_doc()
return self.get(request, success=True, *args, **kwargs)
KNOWN_BUILD_SERVER_LOGINS = {
'http://build.dimagi.com:250/': (
lambda session:
session.get('http://build.dimagi.com:250/guestLogin.html?guest=1')
)
}
@require_POST
def import_build(request):
"""
example POST params:
source: 'http://build.dimagi.com:250/repository/downloadAll/bt97/14163:id/artifacts.zip'
version: '2.13.0'
build_number: 32703
"""
source = request.POST.get('source')
version = request.POST.get('version')
build_number = request.POST.get('build_number')
try:
SemanticVersionProperty().validate(version)
except BadValueError as e:
return json_response({
'reason': 'Badly formatted version',
'info': {
'error_message': unicode(e),
'error_type': unicode(type(e))
}
}, status_code=400)
if build_number:
try:
build_number = int(build_number)
except ValueError:
return json_response({
'reason': 'build_number must be an int'
}, status_code=400)
session = requests.session()
# log in to the build server if we know how
for key in KNOWN_BUILD_SERVER_LOGINS:
if source.startswith(key):
KNOWN_BUILD_SERVER_LOGINS[key](session)
if source:
r = session.get(source)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
return json_response({
'reason': 'Fetching artifacts.zip failed',
'response': {
'status_code': r.status_code,
'content': r.content,
'headers': r.headers,
}
}, status_code=400)
try:
_, inferred_build_number = (
extract_build_info_from_filename(r.headers['content-disposition'])
)
except (KeyError, ValueError): # no header or header doesn't match
inferred_build_number = None
if inferred_build_number:
build_number = inferred_build_number
if not build_number:
return json_response({
'reason': "You didn't give us a build number "
"and we couldn't infer it"
}, status_code=400)
build = CommCareBuild.create_from_zip(
StringIO(r.content), version, build_number)
else:
build = CommCareBuild.create_without_artifacts(version, build_number)
return json_response({
'message': 'New CommCare build added',
'info': {
'version': version,
'build_number': build_number,
'_id': build.get_id,
}
})
|
bsd-3-clause
| -2,932,700,241,914,030,600 | 31.252577 | 92 | 0.635448 | false |
ferriman/SSandSP
|
pyxel-test/venv/lib/python3.8/site-packages/pyxel/editor/field_cursor.py
|
1
|
3620
|
import pyxel
from pyxel.ui.constants import WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
class FieldCursor:
def __init__(
self,
data_getter,
pre_history_setter,
post_history_setter,
data_max_length,
data_view_length,
data_count,
):
self._get_data = data_getter
self._add_pre_history = pre_history_setter
self._add_post_history = post_history_setter
self._data_max_length = data_max_length
self._data_view_length = data_view_length
self._data_count = data_count
self._x = 0
self._y = 0
@property
def x(self):
return min(self._x, len(self.data), self._data_max_length - 1)
@property
def _max_x(self):
return min(len(self.data), self._data_max_length - 1)
@property
def y(self):
return self._y
@property
def data(self):
return self._get_data(self._y)
def move(self, x, y):
self._x = x
self._y = y
def move_left(self):
if self.x > 0:
self._x = self.x - 1
def move_right(self):
if self.x < self._max_x:
self._x += 1
def move_up(self):
cursor_view_y = self._x // self._data_view_length
if cursor_view_y > 0:
self._x -= self._data_view_length
elif self._y > 0:
self._y -= 1
data_view_y = self._max_x // self._data_view_length
self._x = (
self._data_view_length * data_view_y + self._x % self._data_view_length
)
def move_down(self):
cursor_view_y = self._x // self._data_view_length
data_view_y = self._max_x // self._data_view_length
if cursor_view_y < data_view_y:
self._x += self._data_view_length
elif self._y < self._data_count - 1:
self._y += 1
self._x %= self._data_view_length
def insert(self, value):
x = self.x
data = self.data
self._add_pre_history(self.x, self.y)
data.insert(x, value)
data[:] = data[: self._data_max_length]
self._x = x
self.move_right()
self._add_post_history(self.x, self.y)
def backspace(self):
x = self.x
data = self.data
if x == 0:
return
self._add_pre_history(self.x, self.y)
del data[x - 1]
if self._x <= self._max_x:
self.move_left()
self._add_post_history(self.x, self.y)
def delete(self):
x = self.x
data = self.data
if x >= len(data):
return
self._add_pre_history(self.x, self.y)
del data[x]
self._add_post_history(self.x, self.y)
def process_input(self):
if (
pyxel.btn(pyxel.KEY_SHIFT)
or pyxel.btn(pyxel.KEY_CONTROL)
or pyxel.btn(pyxel.KEY_ALT)
or pyxel.btn(pyxel.KEY_SUPER)
):
return
if pyxel.btnp(pyxel.KEY_LEFT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_left()
if pyxel.btnp(pyxel.KEY_RIGHT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_right()
if pyxel.btnp(pyxel.KEY_UP, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_up()
if pyxel.btnp(pyxel.KEY_DOWN, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_down()
if pyxel.btnp(pyxel.KEY_BACKSPACE, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.backspace()
if pyxel.btnp(pyxel.KEY_DELETE, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.delete()
|
gpl-3.0
| -2,487,638,061,186,023,000 | 24.492958 | 87 | 0.529282 | false |
marcomaio/MediaMover
|
common/decoder.py
|
1
|
7744
|
import re
from common.colors import CustomColors
from common.types import UserTypes
from common.videoElement import VideoElement
kMovieIdentifier = 'movie'
kSerieIdentifier = 'serie'
kIgnoreKeyword = 'sample'
class Decoder:
# this method aims to identify if the serie category inserted
# by the user is approximately similar to an hardcoded value here
def isSerieCategory(self, iCategory):
# transform the file name in lower letters
aLoweredCategory = iCategory.lower()
# check if the serie identifier is contained in the category defined by the user
if kSerieIdentifier in aLoweredCategory:
return True
return False
# this method aims to identify if the movie category inserted
# by the user is approximately similar to an hardcoded value here
def isMovieCategory(self, iCategory):
# transform the file name in lower letters
aLoweredCategory = iCategory.lower()
# check if the movie identifier is contained in the category defined by the user
if kMovieIdentifier in aLoweredCategory:
return True
return False
def setCategories(self, iCategories):
if UserTypes.movie == '' or UserTypes.serie == '':
for aCategory in iCategories.keys():
if self.isSerieCategory(aCategory):
UserTypes.serie = aCategory
elif self.isMovieCategory(aCategory):
UserTypes.movie = aCategory
# if movie or serie categories have not been found throw an exception
if UserTypes.movie == '':
raise NameError('Movie category has not been defined')
elif UserTypes.serie == '':
raise NameError('Serie category has not been defined')
print('Movie category: ' + UserTypes.movie)
print('TV serie category: ' + UserTypes.serie)
def searchDelimiter(self, iFileName):
# remove the extension
aPureFileName = iFileName[:iFileName.rfind(".")]
# instantiate a map of not alphanum charachters with occurrences
aDelimiterMap = {}
# init max number of occurrences and the delimiter
aMaxOccurrences = 0
aFoundDelimiter = ""
# loop in the filename, if current character is not an alphanum increment its counter
for aChar in aPureFileName:
if not aChar.isalnum():
# not needed, but just for clarity purpose
aDelimiter = aChar
if aDelimiter in aDelimiterMap.keys():
aDelimiterMap[aDelimiter] +=1
else:
# First occurrence of delimiter found, init its counter
aDelimiterMap[aDelimiter] = 1
# check if current number of occurrences is the max
if aDelimiterMap[aDelimiter] > aMaxOccurrences:
aMaxOccurrences = aDelimiterMap[aDelimiter]
aFoundDelimiter = aDelimiter
return aFoundDelimiter
def isSeriesEpisode(self, iFileName):
# understand which is the separator
# Algorithm: Remove the extension including the ".". Count the max number of a char different than a-z0-9
aDelimiter = self.searchDelimiter(iFileName)
# when using the directory name we could override a valid delimiter on behalf of a invalid one
if aDelimiter != '':
self.delimiter = aDelimiter
# if it contains two separate numbers in a single word
aRegExp = re.compile('.*' + aDelimiter + '.*[0-9]+[^' + aDelimiter + UserTypes.kForbiddenSerieSeparator + '0-9][0-9]+' + aDelimiter + '.*')
if aRegExp.match(iFileName):
return True
return False
def formatTitle(self, iTitle):
aStandardDelimiter = UserTypes.kStandardDelimiter
aFormattedTitle = ''
for aWord in iTitle.split(self.delimiter):
aFormattedTitle += aWord + aStandardDelimiter
aFormattedTitle = aFormattedTitle[:aFormattedTitle.rfind(aStandardDelimiter)]
return aFormattedTitle
def retrieveTitle(self, iFileName, isSerie=False):
aTitle = ''
if isSerie == True:
# regular expression aiming to find the serie and chapter, e.g. s01e03, 1x3, etc.
aRegExp = re.compile('[' + self.delimiter + '].?[0-9]+[^' + self.delimiter + UserTypes.kForbiddenSerieSeparator + '0-9][0-9]+' + self.delimiter)
aSerieEpisodeObject = aRegExp.search(iFileName)
aRawTitle = iFileName[:aSerieEpisodeObject.start()]
else:
aRawTitle = iFileName[:iFileName.rfind(".")]
aTitle = self.formatTitle(aRawTitle)
return aTitle
def retrieveSeasonEpisodeNumber(self, iFileName):
# regular expression aiming to find the season and episode number
aRegExp = re.compile('[' + self.delimiter + '].?[0-9]+[^' + self.delimiter + UserTypes.kForbiddenSerieSeparator + '0-9][0-9]+' + self.delimiter)
aSerieEpisodeObject = aRegExp.search(iFileName)
aRegExp = re.compile('[0-9]+')
aSeasonEpisodeNumberObject = aRegExp.findall(aSerieEpisodeObject.group())
return aSeasonEpisodeNumberObject
def retrieveEpisodeTitle(self, iFileName, isFromFile=True):
# regular expression aiming to find the season and episode number
aRegExp = re.compile('[' + self.delimiter + '].?[0-9]+[^' + self.delimiter + UserTypes.kForbiddenSerieSeparator + '0-9][0-9]+' + self.delimiter)
aEpisodeTitle = iFileName[aRegExp.search(iFileName).end():]
if isFromFile:
return self.formatTitle(aEpisodeTitle[:aEpisodeTitle.rfind('.')])
return self.formatTitle(self.formatTitle(aEpisodeTitle))
def decode(self):
aVideoElement = {}
aFileName = self.fileName
aDirectoryName = self.directoryName
if kIgnoreKeyword in self.fileName.lower():
print('Ignored element: ' + self.fileName)
return
elif self.isSeriesEpisode(self.fileName):
#print(CustomColors.GREEN + 'Serie episode found: ' + self.fileName + CustomColors.ENDC)
aTitle = self.retrieveTitle(aFileName, True)
aSeasonEpisode = self.retrieveSeasonEpisodeNumber(aFileName)
aVideoElement = VideoElement(aTitle, UserTypes.serie, aFileName)
aVideoElement.setSeasonChapter(aSeasonEpisode[0], aSeasonEpisode[1], self.retrieveEpisodeTitle(aFileName))
# it could happen that the file name is not compliant with series one, but its containing folder is
# thus we perform a "last chance" check on the parent folder
elif self.isSeriesEpisode(aDirectoryName):
aTitle = self.retrieveTitle(aDirectoryName, True)
aSeasonEpisode = self.retrieveSeasonEpisodeNumber(aDirectoryName)
aVideoElement = VideoElement(aTitle, UserTypes.serie, aFileName)
aVideoElement.setSeasonChapter(aSeasonEpisode[0], aSeasonEpisode[1], self.retrieveEpisodeTitle(aDirectoryName, False))
else:
#print(CustomColors.BLUE + 'Movie found: ' + self.fileName + CustomColors.ENDC)
aTitle = self.retrieveTitle(aFileName)
aVideoElement = VideoElement(aTitle, UserTypes.movie, aFileName)
return aVideoElement
def __init__(self, iFileName, iDirectoryName, iCategories):
# set the categories
self.setCategories(iCategories)
# set inputs
self.fileName = iFileName
self.directoryName = iDirectoryName
|
mit
| 6,190,352,808,857,124,000 | 43.763006 | 156 | 0.639721 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.