repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
tracim/tracim-webdav
|
wsgidav/addons/tracim/sql_resources.py
|
Python
|
mit
| 16,929 | 0.000945 |
# coding: utf8
from wsgidav.dav_provider import DAVCollection, DAVNonCollection
from wsgidav.dav_error import DAVError, HTTP_FORBIDDEN
from wsgidav import util
from wsgidav.addons.tracim import role, MyFileStream
from time import mktime
from datetime import datetime
from os.path import normpath, dirname, basename
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Root(DAVCollection):
def __init__(self, path, environ):
super(Root, self).__init__(path, environ)
def __repr__(self):
return 'Root folder'
def getCreationDate(self):
return mktime(datetime.now().timetuple())
def getDisplayName(self):
return 'Tracim - Home'
def getLastModified(self):
return mktime(datetime.now().timetuple())
def getMemberNames(self):
return self.provider.get_all_workspaces(only_name=True)
def getMember(self, workspace_name):
workspace = self.provider.get_workspace({'label': workspace_name})
if not self.provider.has_right(
self.environ["http_authenticator.username"],
workspace.workspace_id,
role["READER"]
):
return None
return Workspace(self.path + workspace.label, self.environ, workspace)
def createEmptyResource(self, name):
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
raise DAVError(HTTP_FORBIDDEN)
def getMemberList(self):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
return memberlist
class Workspace(DAVCollection):
def __init__(self, path, environ, workspace):
super(Workspace, self).__init__(path, environ)
self.workspace = workspace
def __repr__(self):
return "Workspace: %s" % self.workspace.label
def getCreationDate(self):
return mktime(self.workspace.created.timetuple())
def getDisplayName(self):
return self.workspace.label
def getLastModified(self):
return mktime(self.workspace.updated.timetuple())
def getMemberNames(self):
return self.provider.get_workspace_children_id(self.workspace)
def getMember(self, item_id):
item = self.provider.get_item({'id': item_id, 'child_revision_id': None})
if not self.provider.has_right(
self.environ["http_authenticator.username"],
item.workspace_id,
role["READER"]
):
return None
return Folder(self.path + item.item_name, self.environ, item)
def createEmptyResource(self, name):
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
assert "/" not in name
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.workspace.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
item = self.provider.add_item(
item_name=name,
item_type="FOLDER",
workspace_id=self.workspace.workspace_id
)
return Folder(self.path + name, self.environ, item)
def delete(self):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.workspace.workspace_id,
role["WORKSPACE_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
self.provider.delete_workspace(self.workspace)
self.removeAllLocks(True)
def copyMoveSingle(self, destpath, ismove):
if ismove:
self.provider.set_workspace_label(self.workspace, basename(normpath(destpath)))
el
|
se:
self.provi
|
der.add_workspace(basename(normpath(destpath)))
def supportRecursiveMove(self, destpath):
return True
def moveRecursive(self, destpath):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.workspace.workspace_id,
role["WORKSPACE_MANAGER"]
) or dirname(normpath(destpath)) != '/':
raise DAVError(HTTP_FORBIDDEN)
self.provider.set_workspace_label(self.workspace, basename(normpath(destpath)))
def setLastModified(self, destpath, timestamp, dryrun):
return False
def getMemberList(self):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
return memberlist
class Folder(DAVCollection):
def __init__(self, path, environ, item):
super(Folder, self).__init__(path, environ)
self.item = item
def __repr__(self):
return "Folder: %s" % self.item.item_name
def getCreationDate(self):
return mktime(self.item.created.timetuple())
def getDisplayName(self):
return self.item.item_name
def getLastModified(self):
return mktime(self.item.updated.timetuple())
def getMemberNames(self):
return self.provider.get_item_children(self.item.id)
def getMember(self, item_id):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["READER"]
):
return None
item = self.provider.get_item({'id': item_id, 'child_revision_id': None})
return self.provider.getResourceInst(self.path + item.item_name, self.environ)
def createEmptyResource(self, name):
assert "/" not in name
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTRIBUTOR"]
):
raise DAVError(HTTP_FORBIDDEN)
item = self.provider.add_item(
item_name=name,
item_type="FILE",
workspace_id=self.item.workspace_id,
parent_id=self.item.id
)
return File(self.path + name, self.environ, item)
def createCollection(self, name):
assert "/" not in name
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
item = self.provider.add_item(
item_name=name,
item_type="FOLDER",
workspace_id=self.item.workspace_id,
parent_id=self.item.id
)
return Folder(self.path + name, self.environ, item)
def delete(self):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
self.provider.delete_item(self.item)
self.removeAllLocks(True)
def copyMoveSingle(self, destpath, ismove):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
) or dirname(normpath(destpath)) == '/':
raise DAVError(HTTP_FORBIDDEN)
if ismove:
self.provider.move_item(self.item, destpath)
else:
self.provider.copy_item(self.item, destpath)
def supportRecursiveMove(self, destpath):
return True
def moveRecursive(self, destpath):
self.copyMoveSingle(destpath, True)
def setLastModified(self, destpath, timestamp, dryrun):
return False
def getMemberList(self, copyOrMove=False):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
print "j'ai : ", copyOrMove
if memberlist != [] and not copyOrMove:
memb
|
shahankhatch/aurora
|
src/main/python/apache/aurora/executor/common/announcer.py
|
Python
|
apache-2.0
| 8,388 | 0.008345 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import posixpath
import threading
import time
from abc import abstractmethod
from kazoo.client import KazooClient
from kazoo.retry import KazooRetry
from mesos.interface import mesos_pb2
from twitter.common import log
from twitter.common.concurrent.deferred import defer
from twitter.common.exceptions import ExceptionalThread
from twitter.common.metrics import LambdaGauge, Observable
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.serverset import Endpoint, ServerSet
from apache.aurora.executor.common.status_checker import (
StatusChecker,
StatusCheckerProvider,
StatusResult
)
from apache.aurora.executor.common.task_info import (
mesos_task_instance_from_assigned_task,
resolve_ports
)
def make_endpoints(hostname, portmap, primary_port):
"""
Generate primary, additional endpoints from a portmap and primary_port.
primary_port must be a name in the portmap dictionary.
"""
# Do int check as stop-gap measure against incompatible downstream clients.
additional_endpoints = dict(
(name, Endpoint(hostname, port)) for (name, port) in portmap.items()
if isinstance(port, int))
# It's possible for the primary port to not have been allocated if this task
# is using autoregistration, so register with a port of 0.
return Endpoint(hostname, portmap.get(primary_port, 0)), additional_endpoints
class AnnouncerCheckerProvider(StatusCheckerProvider):
def __init__(self, name=None):
self.name = name
super(AnnouncerCheckerProvider, self).__init__()
@abstractmethod
def make_zk_client(self):
"""Create a ZooKeeper client which can be asyncronously started"""
@abstractmethod
def make_zk_path(self, assigned_task):
"""Given an assigned task return the path into where we should announce the task."""
def from_assigned_task(self, assigned_task, _):
mesos_task = mesos_task_instance_from_assigned_task(assigned_task)
if not mesos_task.has_announce():
return None
portmap = resolve_ports(mesos_task, assigned_task.assignedPorts)
# assigned_task.slaveHost is the --hostname argument passed into the mesos slave.
# Using this allows overriding the hostname published into ZK when announcing.
# If no argument was passed to the mesos-slave, the slave falls back to gethostname().
endpoint, additional = make_endpoints(
assigned_task.slaveHost,
portmap,
mesos_task.announce().primary_port().get())
client = self.make_zk_client()
path = self.make_zk_path(assigned_task)
initial_interval = mesos_task.health_check_config().initial_interval_secs().get()
interval = mesos_task.health_check_config().interval_secs().get()
consecutive_failures = mesos_task.health_check_config().max_consecutive_failures().get()
timeout_secs = initial_interval + (consecutive_failures * interval)
return AnnouncerChecker(
client, path, timeout_secs, endpoint, additional=additional, shard=assigned_task.instanceId,
name=self.name)
class DefaultAnnouncerCheckerProvider(AnnouncerCheckerProvider):
DEFAULT_RETRY_MAX_DELAY = Amount(5, Time.MINUTES)
DEFAULT_RETRY_POLICY = KazooRetry(
max_tries=None,
ignore_expire=True,
max_delay=DEFAULT_RETRY_MAX_DELAY.as_(Time.SECONDS),
)
def __init__(self, ensemble, root='/aurora'):
self.__ensemble = ensemble
self.__root = root
super(DefaultAnnouncerCheckerProvider, self).__init__()
def make_zk_client(self):
return KazooClient(self.__ensemble, connection_retry=self.DEFAULT_RETRY_POLICY)
def make_zk_path(self, assigned_task):
config = assigned_task.task
role, environment, name = (
config.job.role if config.job else config.owner.role,
config.job.environment if config.job else config.environment,
config.job.name if config.job else config.jobName)
return posixpath.join(self.__root, role, environment, name)
class ServerSetJoinThread(ExceptionalThread):
"""Background thread to reconnect to Serverset on session expiration."""
LOOP_WAIT = Amount(1, Time.SECONDS)
def __init__(self, event, joiner, loop_wait=LOOP_WAIT):
self._event = event
self._joiner = joiner
self._stopped = threading.Event()
self._loop_wait = loop_wait
super(ServerSetJoinThread, self).__init__()
self.daemon = True
def run(self):
while True:
if self._stopped.is_set():
break
self._event.wait(timeout=self._loop_wait.as_(Time.SECONDS))
if not self._event.is_set():
continue
log.debug('Join event triggered, joining serverset.')
self._event.clear
|
()
self._joiner()
def stop(self):
self._stopped.set()
class Announcer(Observable):
class Error(Exception): pass
EXCEPTION_WAIT = Amount(15, Time.SECONDS)
def __init__(self,
serverset,
endpoint,
additional=None,
shard=None,
clock=time,
exception_wait=None):
self._membership = None
self._membership_termination = clock.time()
self._endpoint = endp
|
oint
self._additional = additional or {}
self._shard = shard
self._serverset = serverset
self._rejoin_event = threading.Event()
self._clock = clock
self._thread = None
self._exception_wait = exception_wait or self.EXCEPTION_WAIT
def disconnected_time(self):
# Lockless membership length check
membership_termination = self._membership_termination
if membership_termination is None:
return 0
return self._clock.time() - membership_termination
def _join_inner(self):
return self._serverset.join(
endpoint=self._endpoint,
additional=self._additional,
shard=self._shard,
expire_callback=self.on_expiration)
def _join(self):
if self._membership is not None:
raise self.Error("join called, but already have membership!")
while True:
try:
self._membership = self._join_inner()
self._membership_termination = None
except Exception as e:
log.error('Failed to join ServerSet: %s' % e)
self._clock.sleep(self._exception_wait.as_(Time.SECONDS))
else:
break
def start(self):
self._thread = ServerSetJoinThread(self._rejoin_event, self._join)
self._thread.start()
self.rejoin()
def rejoin(self):
self._rejoin_event.set()
def stop(self):
thread, self._thread = self._thread, None
thread.stop()
if self._membership:
self._serverset.cancel(self._membership)
def on_expiration(self):
self._membership = None
if not self._thread:
return
self._membership_termination = self._clock.time()
log.info('Zookeeper session expired.')
self.rejoin()
class AnnouncerChecker(StatusChecker):
DEFAULT_NAME = 'announcer'
def __init__(self, client, path, timeout_secs, endpoint, additional=None, shard=None, name=None):
self.__client = client
self.__connect_event = client.start_async()
self.__timeout_secs = timeout_secs
self.__announcer = Announcer(ServerSet(client, path), endpoint, additional=additional,
shard=shard)
self.__name = name or self.DEFAULT_NAME
self.__status = None
self.start_event = threading.Event()
self.metrics.register(LambdaGauge('disconnected_time', self.__announcer.disconnected_time))
@property
def status(self):
return self.__status
def name(self):
return self.__name
def __start(self):
self.__connect_event.wait(timeout=self.__timeout_secs)
if not self.__connect_event.is_set():
self.__status = StatusResult("Creating Announcer Ser
|
larose/utt
|
utt/plugins/0_hello.py
|
Python
|
gpl-3.0
| 593 | 0.003373 |
import argparse
from ..api import _v1
class HelloHandler:
def __init__(
self, args: argparse.Namespace, now: _v1.Now, add_entry: _v1._private.AddEntry,
):
self._args = args
self._now = now
self._add_entry = add_entry
def __call__(self):
self._add_e
|
ntry(_v1.Entry(self._now, _v1.HELLO_ENTRY_NAME, False))
hello_command = _v1.Command(
|
"hello",
"Say '{hello_entry_name}' when you arrive in the morning...".format(hello_entry_name=_v1.HELLO_ENTRY_NAME),
HelloHandler,
lambda p: None,
)
_v1.register_command(hello_command)
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/edx_proctoring/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 13,525 | 0.00414 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProctoredExam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('course_id', models.CharField(max_length=255, db_index=True)),
('content_id', models.CharField(max_length=255, db_index=True)),
('external_id', models.CharField(max_length=255, null=True, db_index=True)),
('exam_name', models.TextField()),
('time_limit_mins', models.IntegerField()),
('due_date', models.DateTimeField(null=True)),
('is_proctored', models.BooleanField(default=False)),
('is_practice_exam', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
],
options={
'db_table': 'proctoring_proctoredexam',
},
),
migrations.CreateModel(
name='ProctoredExamReviewPolicy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('review_policy', models.TextField()),
('proctored_exam', models.ForeignKey(to='edx_proctoring.ProctoredExam')),
('set_by_user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'proctoring_proctoredexamreviewpolicy',
'verbose_name': 'Proctored exam review policy',
'verbose_name_plural': 'Proctored exam review policies',
},
),
migrations.CreateModel(
name='ProctoredExamReviewPolicyHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('original_id', models.IntegerField(db_index=True)),
('review_policy', models.TextField()),
('proctored_exam', models.ForeignKey(to='edx_proctoring.ProctoredExam')),
('set_by_user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'proctoring_proctoredexamreviewpolicyhistory',
'verb
|
ose_name': 'proctored exam review policy history',
},
),
migrations.CreateModel(
name='ProctoredExamSoftwareSecureComment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
|
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('start_time', models.IntegerField()),
('stop_time', models.IntegerField()),
('duration', models.IntegerField()),
('comment', models.TextField()),
('status', models.CharField(max_length=255)),
],
options={
'db_table': 'proctoring_proctoredexamstudentattemptcomment',
'verbose_name': 'proctored exam software secure comment',
},
),
migrations.CreateModel(
name='ProctoredExamSoftwareSecureReview',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('attempt_code', models.CharField(max_length=255, db_index=True)),
('review_status', models.CharField(max_length=255)),
('raw_data', models.TextField()),
('video_url', models.TextField()),
('exam', models.ForeignKey(to='edx_proctoring.ProctoredExam', null=True)),
('reviewed_by', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, null=True)),
('student', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'proctoring_proctoredexamsoftwaresecurereview',
'verbose_name': 'Proctored exam software secure review',
},
),
migrations.CreateModel(
name='ProctoredExamSoftwareSecureReviewHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('attempt_code', models.CharField(max_length=255, db_index=True)),
('review_status', models.CharField(max_length=255)),
('raw_data', models.TextField()),
('video_url', models.TextField()),
('exam', models.ForeignKey(to='edx_proctoring.ProctoredExam', null=True)),
('reviewed_by', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, null=True)),
('student', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'proctoring_proctoredexamsoftwaresecurereviewhistory',
'verbose_name': 'Proctored exam review archive',
},
),
migrations.CreateModel(
name='ProctoredExamStudentAllowance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('key', models.CharField(max_length=255)),
('value', models.CharField(max_length=255)),
('proctored_exam', models.ForeignKey(to='edx_proctoring.ProctoredExam')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'proctoring_proctoredexamstudentallowance',
'verbose_name': 'proctored allowance',
},
),
migration
|
dogukantufekci/supersalon
|
supersalon/users/admin.py
|
Python
|
bsd-3-clause
| 1,106 | 0.000904 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.a
|
uth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta
|
):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': _("This username has already been taken.")
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
|
Rctue/DialogStateMachine
|
DialogTest_2_AskDeviceOnFinger.py
|
Python
|
gpl-2.0
| 1,079 | 0.013902 |
#!/usr/bin/env python
from sre_parse import isdigit
import sys
__author__ = 'jpijper'
import roslib; roslib.load_manifest('smach_tutorials')
import rospy
import smach_ros
from DialogStateMachine import SMDialog
def main():
# To restrict the amount of feedback to the screen, a feedback leve
|
l can be given on the command line.
# Level 0 means show only the most urgent feedback and the higher the level, the more is shown.
feedback_level = int(sys.argv[1]) if len(sys.argv) > 1 and isdigit(sys.argv[1]) else 10
rospy.init_node('sm_dialog_ask_device_on_finger')
sm_top = SMDialog('ask_device_on_finger.csv', '192.168.0.4').sm_top
## inserted for smach_viewer
# Create and start the introspection server
#si
|
s = smach_ros.IntrospectionServer('server_name', sm_top, '/SM_ROOT')
#sis.start()
## end insert
# Execute SMACH plan
outcome = sm_top.execute()
## inserted for smach_viewer
# Wait for ctrl-c to stop the application
#rospy.spin()
#sis.stop()
## end insert
if __name__ == '__main__':
main()
|
JoelEager/pyTanks.Server
|
dataModels/command.py
|
Python
|
mit
| 2,194 | 0.002735 |
import json
import numbers
import html
import config
class command:
"""
Used to store an incoming player command
"""
def __init__(self, message):
"""
Called with a raw message string from a client
:raise: ValueError if the message isn't a valid command
"""
# Try to parse it as a JSON command
try:
message = json.loads(message)
except json.decoder.JSONDecodeError:
# Message isn't valid JSON
raise ValueError("Invalid JSON")
# All commands must have a valid action
if message.get("action") not in config.server.commands.validCommands:
raise ValueError("Missing or invalid action")
|
self.action = message["action"]
# Check for a valid arg if it's required
if self.action == config.server.commands.turn or self.action == config.server.commands.fire:
if not isinstance(message.get("arg"), numbers.Number):
raise ValueError("Missing or invalid arg")
self.arg = message["arg"]
elif self.action == config.server.commands.setInfo:
self.arg = str(message.get("arg"))
|
if len(self.arg) > config.server.commands.infoMaxLen:
raise ValueError("Info string is longer than " + str(config.server.commands.infoMaxLen) + " characters")
self.arg = html.escape(self.arg)
self.arg = self.arg.replace("\n", " <br /> ")
# Parse urls
start = self.arg.find("http")
while start != -1:
end = self.arg.find(" ", start)
if end == -1:
end = len(self.arg)
if self.arg[start:start + 7] == "http://" or self.arg[start:start + 8] == "https://":
url = self.arg[start:end]
aTag = "<a href='" + url + "' target='_blank'>" + url + "</a>"
self.arg = self.arg[:start] + aTag + self.arg[end:]
end += len(aTag) - len(url)
start = self.arg.find("http", end)
else:
if "arg" in message:
raise ValueError("Unexpected arg")
|
moniker-dns/debian-beaver
|
beaver/transports/sqs_transport.py
|
Python
|
mit
| 3,043 | 0.003615 |
# -*- coding: utf-8 -*-
import boto.sqs
import uuid
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
class SqsTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(SqsTransport, self).__init__(beaver_config, logger=logger)
self._access_key = beaver_config.get('sqs_aws_access_key')
self._secret_key = beaver_config.get('sqs_aws_secret_key')
self._region = beaver_config.get('sqs_aws_region')
self._queue_name = beaver_config.get('sqs_aws_queue')
try:
if self._access_key is None and self._secret_key is None:
self._connection = boto.sqs.connect_to_region(self._region)
else:
self._connection = boto.sqs.connect_to_region(self._region,
aws_access_key_id=self._access_key,
aws_secre
|
t_access_key=self._secret_key)
if self._connection is None:
self._logger.warn('Unable to connect to AWS - check your AWS credentials')
raise TransportException('Unable to connect to AWS - check your AWS credentials')
self._queue = self._connection.get_queue(self._queue_name)
if self._queue is None:
raise TransportException('Unable to access queue with name {0}'.format(self._queue_name))
except Exception, e:
|
raise TransportException(e.message)
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
message_batch = []
for line in lines:
message_batch.append((uuid.uuid4(), self.format(filename, line, timestamp, **kwargs), 0))
if len(message_batch) == 10: # SQS can only handle up to 10 messages in batch send
self._logger.debug('Flushing 10 messages to SQS queue')
self._send_message_batch(message_batch)
message_batch = []
if len(message_batch) > 0:
self._logger.debug('Flushing last {0} messages to SQS queue'.format(len(message_batch)))
self._send_message_batch(message_batch)
return True
def _send_message_batch(self, message_batch):
try:
result = self._queue.write_batch(message_batch)
if not result:
self._logger.error('Error occurred sending messages to SQS queue {0}. result: {1}'.format(
self._queue_name, result))
raise TransportException('Error occurred sending message to queue {0}'.format(self._queue_name))
except Exception, e:
self._logger.exception('Exception occurred sending batch to SQS queue')
raise TransportException(e.message)
def interrupt(self):
return True
def unhandled(self):
return True
|
vongochung/ngudan
|
permission_backend_nonrel/admin.py
|
Python
|
bsd-3-clause
| 5,050 | 0.002178 |
from django import forms
from django.contrib import admin
from django.utils.translation import ugettext
from django.contrib.auth.admin import UserAdmin
from django.contrib.admin.sites import NotRegistered
from django.contrib.auth.models import User, Group, Permission
from django.contrib.admin.widgets import FilteredSelectMultiple
from .models import UserPermissionList, GroupPermissionList
from .utils import update_permissions_user, \
update_user_groups, update_permissions_group
class UserForm(forms.ModelForm):
class Meta:
model = User
exclude = ('user_permissions', 'groups')
class NonrelPermissionUserForm(UserForm):
user_permissions = forms.MultipleChoiceField(required=False)
groups = forms.MultipleChoiceField(required=False)
def __init__(self, *args, **kwargs):
super(NonrelPermissionUserForm, self).__
|
init__(*args, **kwargs)
self.fields['user_permissions'] = forms.MultipleChoiceField(required=False)
self.fields['groups'] = forms.MultipleChoiceField(required=False)
permissions_objs = Permission.objects.all().order_by('name')
choices = []
for perm_obj in permissions_objs:
choices.append([perm_obj.id, perm_obj.name])
self.fields['user_permissions'].choic
|
es = choices
group_objs = Group.objects.all()
choices = []
for group_obj in group_objs:
choices.append([group_obj.id, group_obj.name])
self.fields['groups'].choices = choices
try:
user_perm_list = UserPermissionList.objects.get(
user=kwargs['instance'])
self.fields['user_permissions'].initial = user_perm_list.permission_fk_list
self.fields['groups'].initial = user_perm_list.group_fk_list
except (UserPermissionList.DoesNotExist, KeyError):
self.fields['user_permissions'].initial = list()
self.fields['groups'].initial = list()
class NonrelPermissionCustomUserAdmin(UserAdmin):
form = NonrelPermissionUserForm
list_filter = ('is_staff', 'is_superuser', 'is_active')
def save_model(self, request, obj, form, change):
super(NonrelPermissionCustomUserAdmin, self).save_model(request, obj, form, change)
try:
if len(form.cleaned_data['user_permissions']) > 0:
permissions = list(Permission.objects.filter(
id__in=form.cleaned_data['user_permissions']).order_by('name'))
else:
permissions = []
update_permissions_user(permissions, obj)
except KeyError:
pass
try:
if len(form.cleaned_data['groups']) > 0:
groups = list(Group.objects.filter(
id__in=form.cleaned_data['groups']))
else:
groups = []
update_user_groups(obj, groups)
except KeyError:
pass
class PermissionAdmin(admin.ModelAdmin):
ordering = ('name',)
class GroupForm(forms.ModelForm):
permissions = forms.MultipleChoiceField(required=False)
def __init__(self, *args, **kwargs):
# Temporarily exclude 'permissions' as it causes an
# unsupported query to be executed
original_exclude = self._meta.exclude
self._meta.exclude = ['permissions',] + (self._meta.exclude if self._meta.exclude else [])
super(GroupForm, self).__init__(*args, **kwargs)
self._meta.exclude = original_exclude
self.fields['permissions'] = forms.MultipleChoiceField(required=False, widget=FilteredSelectMultiple(ugettext('Permissions'), False))
permissions_objs = Permission.objects.all().order_by('name')
choices = []
for perm_obj in permissions_objs:
choices.append([perm_obj.id, perm_obj.name])
self.fields['permissions'].choices = choices
try:
current_perm_list = GroupPermissionList.objects.get(
group=kwargs['instance'])
self.fields['permissions'].initial = current_perm_list.permission_fk_list
except (GroupPermissionList.DoesNotExist, KeyError):
self.fields['permissions'].initial = []
class Meta:
model = Group
fields = ('name',)
class CustomGroupAdmin(admin.ModelAdmin):
form = GroupForm
fieldsets = None
def save_model(self, request, obj, form, change):
super(CustomGroupAdmin, self).save_model(request, obj, form, change)
if len(form.cleaned_data['permissions']) > 0:
permissions = list(Permission.objects.filter(
id__in=form.cleaned_data['permissions']).order_by('name'))
else:
permissions = []
update_permissions_group(permissions, obj)
try:
admin.site.unregister(User)
except NotRegistered:
pass
try:
admin.site.unregister(Group)
except NotRegistered:
pass
admin.site.register(User, NonrelPermissionCustomUserAdmin)
admin.site.register(Permission, PermissionAdmin)
admin.site.register(Group, CustomGroupAdmin)
|
project-rig/network_tester
|
examples/getting_started_example.py
|
Python
|
gpl-2.0
| 1,458 | 0.000686 |
"""This is (more-or-less) the example experiment described in the getting
started section of the manual.
In this experiment we simply measure the number of
|
packets received as we ramp
up the amount of traffic generated.
"""
import sys
import random
from network_tester import Experiment, to_csv
# Take the SpiNNaker board IP/hostname from the command-line
e = Experiment(sys.argv[1])
# Define a random network
cores = [e.new_core() for _ in range(64)]
flows = [e.new_flow(core, random.sample(cores, 8))
for core in cores]
e.timestep =
|
1e-5 # 10 us
# Sweep over a range of packet-generation probabilities
num_steps = 10
for step in range(num_steps):
with e.new_group() as group:
e.probability = step / float(num_steps - 1)
group.add_label("probability", e.probability)
# Run each group for 1/10th of a second (with some time for warmup cooldown)
e.warmup = 0.05
e.duration = 0.1
e.cooldown = 0.01
e.record_received = True
# When the network saturates (for particularly high packet rates) realtime
# deadlines will be missed in the packet sinks. We'll just ignore them in this
# experiment.
results = e.run(ignore_deadline_errors=True)
totals = results.totals()
# Plot the results
import matplotlib.pyplot as plt
plt.plot(totals["probability"], totals["received"])
plt.xlabel("Packet injection probability")
plt.ylabel("Packets received at sinks")
plt.show()
# Produce an R-compatible CSV file.
print(to_csv(totals))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_runtime_features/sys_current_frames.py
|
Python
|
apache-2.0
| 1,093 | 0 |
import sys
import threading
import time
io_lock = threading.Lock()
blocker = threading.Lock()
def block(i):
t = threading.current_thread()
with io_lock:
print('{} with ident {} going to sleep'.format(
t.name, t.ident))
if i:
blocker.acquire() # acquired but never released
time.sleep(0.2)
with io_lock:
print(t.name, 'finishing')
return
# Create and start several threads that "block"
threads = [
threading.Thread(target=block, args=(i,))
for i in range(3)
]
for t in threads:
t.setDaemon(True)
t.start()
# Map the threads from their identifier to the thread object
threads_by_ident = dict((t.ident, t) for t in threads)
# Show where each thread is "blocked"
time.sleep(0.01)
with io_lock:
for ident, frame in sys._current_frames().items():
t = threads_by_ident.get(ident)
if not t:
|
# Main thread
continue
print('{} stopped in {} at line {} of {}'.format(
t.name, frame.f_code.co_name,
frame.f_lineno, frame.f_code.co_filename))
|
|
hughperkins/kgsgo-dataset-preprocessor
|
thirdparty/future/tests/test_future/test_requests.py
|
Python
|
mpl-2.0
| 3,385 | 0.001182 |
"""
Tests for whether the standard library hooks in ``future`` are compatible with
the ``requests`` package.
"""
from __future__ import absolute_import, unicode_literals, print_function
from future import standard_library
from future.tests.base import unittest, CodeHandler
import textwrap
import sys
import os
import io
# Don't import requests first. This avoids the problem we want to expose:
# with standard_library.suspend_hooks():
# try:
# import requests
# except ImportError:
# requests = None
class write_module(object):
"""
A context manager to streamline the tests. Creates a temp file for a
module designed to be imported by the ``with`` block, then removes it
afterwards.
"""
def __init__(s
|
elf, code, tempdir):
self.code = code
self.tempdir = tempdir
def __enter__(self):
print('Creating {0}test_imports_future_stdlib.py ...'.format(se
|
lf.tempdir))
with io.open(self.tempdir + 'test_imports_future_stdlib.py', 'wt',
encoding='utf-8') as f:
f.write(textwrap.dedent(self.code))
sys.path.insert(0, self.tempdir)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
If an exception occurred, we leave the file for inspection.
"""
sys.path.remove(self.tempdir)
if exc_type is None:
# No exception occurred
os.remove(self.tempdir + 'test_imports_future_stdlib.py')
try:
os.remove(self.tempdir + 'test_imports_future_stdlib.pyc')
except OSError:
pass
class TestRequests(CodeHandler):
"""
This class tests whether the requests module conflicts with the
standard library import hooks, as in issue #19.
"""
def test_remove_hooks_then_requests(self):
code = """
from future import standard_library
standard_library.install_hooks()
import builtins
import http.client
import html.parser
"""
with write_module(code, self.tempdir):
import test_imports_future_stdlib
standard_library.remove_hooks()
try:
import requests
except ImportError:
print("Requests doesn't seem to be available. Skipping requests test ...")
else:
r = requests.get('http://google.com')
self.assertTrue(r)
self.assertTrue(True)
def test_requests_cm(self):
"""
Tests whether requests can be used importing standard_library modules
previously with the hooks context manager
"""
code = """
from future import standard_library
with standard_library.hooks():
import builtins
import html.parser
import http.client
"""
with write_module(code, self.tempdir):
import test_imports_future_stdlib
try:
import requests
except ImportError:
print("Requests doesn't seem to be available. Skipping requests test ...")
else:
r = requests.get('http://google.com')
self.assertTrue(r)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
JonSteinn/Kattis-Solutions
|
src/Red Rover/Python 3/rr.py
|
Python
|
gpl-3.0
| 479 | 0.004175 |
def all_substr(string):
s = set()
length = len(strin
|
g)
for i in range(length):
for j in range(i, length + 1):
s.add(string[i:j])
return s
def compr(string, substring):
l = len(substring)
c = string.count(substring)
return len(string) - c * (l-1) + l
|
def min_enc(string):
x = len(string)
for s in all_substr(string):
y = compr(string, s)
if y < x:
x = y
return x
print(min_enc(input()))
|
KaiSzuttor/espresso
|
samples/immersed_boundary/sampleImmersedBoundary.py
|
Python
|
gpl-3.0
| 3,388 | 0.001771 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simulate the motion o
|
f a spherical red blood cell-like particle advected
in a planar Poiseuille flow, with or without volume conservation. For more
details, see :ref:`Immersed Boundary Method for soft elastic objects`.
"""
import espressomd
required_features = ["LB_BOUNDARIES", "VIRTUAL_SITES_INERTIALESS_TRACERS",
|
"EXPERIMENTAL_FEATURES"]
espressomd.assert_features(required_features)
from espressomd import lb, shapes, lbboundaries
from espressomd.virtual_sites import VirtualSitesInertialessTracers
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-volcons", action="store_const", dest="volcons",
const=False, help="Disable volume conservation", default=True)
parser.add_argument(
"--no-bending", action="store_const", dest="bending",
const=False, help="Disable bending", default=True)
args = parser.parse_args()
if args.volcons and not args.bending:
print('Note: removing bending will also remove volume conservation')
args.volcons = False
# System setup
boxZ = 20
system = espressomd.System(box_l=(20, 20, boxZ))
system.time_step = 1 / 6.
system.cell_system.skin = 0.1
system.virtual_sites = VirtualSitesInertialessTracers()
print("Parallelization: " + str(system.cell_system.node_grid))
force = 0.001
from addSoft import AddSoft
k1 = 0.1
k2 = 1
AddSoft(system, 10, 10, 10, k1, k2)
# case without bending and volCons
outputDir = "outputPure"
# case with bending
if args.bending:
from addBending import AddBending
kb = 1
AddBending(system, kb)
outputDir = "outputBendPara"
# case with bending and volCons
if args.volcons:
from addVolCons import AddVolCons
kV = 10
AddVolCons(system, kV)
outputDir = "outputVolParaCUDA"
# Add LB Fluid
lbf = lb.LBFluid(agrid=1, dens=1, visc=1, tau=system.time_step,
ext_force_density=[force, 0, 0])
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, gamma=1.0, act_on_virtual=False)
# Setup boundaries
walls = [lbboundaries.LBBoundary() for k in range(2)]
walls[0].set_params(shape=shapes.Wall(normal=[0, 0, 1], dist=0.5))
walls[1].set_params(shape=shapes.Wall(normal=[0, 0, -1], dist=-boxZ + 0.5))
for wall in walls:
system.lbboundaries.add(wall)
# make directory
import os
os.makedirs(outputDir)
print('Saving data to ' + outputDir)
# Perform integration
from writeVTK import WriteVTK
WriteVTK(system, str(outputDir + "/cell_" + str(0) + ".vtk"))
stepSize = 1000
numSteps = 20
for i in range(numSteps):
system.integrator.run(stepSize)
WriteVTK(system, str(outputDir + "/cell_" + str(i + 1) + ".vtk"))
print("Done " + str(i + 1) + " out of " + str(numSteps) + " steps.")
|
seftler/GPSstorage
|
manage.py
|
Python
|
mit
| 808 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
|
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "GPSstorage.settings")
|
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
dsajkl/123
|
lms/envs/common.py
|
Python
|
agpl-3.0
| 62,841 | 0.002803 |
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614, C0103
import sys
import os
import imp
from path import path
from warnings import simplefilter
from django.utils.translation import ugettext_lazy as _
from .discussionsettings import *
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from lms.lib.xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "StudentEDX"
CC_MERCHANT_NAME = PLATFORM_NAME
PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/studentedx"
PLATFORM_TWITTER_ACCOUNT = "@studentedx"
PLATFORM_TWITTER_URL = "https://twitter.com/studenedx"
PLATFORM_MEETUP_URL = "http://www.meetup.com/YourMeetup"#<---------------------------
PLATFORM_LINKEDIN_URL = "http://www.linkedin.com/company/YourPlatform" #<---------------------------
PLATFORM_GOOGLE_PLUS_URL = "https://plus.google.com/u/0/b/105623299540915099930/105623299540915099930/about"
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': True,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Toggles OAuth2 authentication provider
'ENABLE_OAUTH2_PROVIDER': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# Analytics experiments - shows instructor analytics tab in LMS instructor dashboard.
# Enabling this feature depends on installation of a separate analytics server.
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# Segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': True,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
|
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': False,
# Enable legacy instructor dashboard
'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True,
# Is this an edX-owned domain? (used on instructor dashboard)
'IS_EDX_DOMAIN': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'
|
AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT':
|
pynamodb/PynamoDB
|
pynamodb/settings.py
|
Python
|
mit
| 2,509 | 0.004384 |
import importlib.util
import logging
import os
import warnings
from os import getenv
from typing import Any, Optional, Mapping, ClassVar
log = logging.getLogger(__name__)
default_settings_dict = {
'connect_timeout_seconds': 15,
'read_timeout_seconds': 30,
'max_retry_attempts': 3,
'base_backoff_ms': 25,
'region': None,
'max_pool_connections': 10,
|
'extra_headers': None,
}
OVERRIDE_SETTINGS_
|
PATH = getenv('PYNAMODB_CONFIG', '/etc/pynamodb/global_default_settings.py')
def _load_module(name, path):
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec) # type: ignore
spec.loader.exec_module(module) # type: ignore
return module
override_settings = {}
if os.path.isfile(OVERRIDE_SETTINGS_PATH):
override_settings = _load_module('__pynamodb_override_settings__', OVERRIDE_SETTINGS_PATH)
if hasattr(override_settings, 'session_cls') or hasattr(override_settings, 'request_timeout_seconds'):
warnings.warn("The `session_cls` and `request_timeout_second` options are no longer supported")
log.info('Override settings for pynamo available {}'.format(OVERRIDE_SETTINGS_PATH))
else:
log.info('Override settings for pynamo not available {}'.format(OVERRIDE_SETTINGS_PATH))
log.info('Using Default settings value')
def get_settings_value(key: str) -> Any:
"""
Fetches the value from the override file.
If the value is not present, then tries to fetch the values from constants.py
"""
if hasattr(override_settings, key):
return getattr(override_settings, key)
if key in default_settings_dict:
return default_settings_dict[key]
return None
class OperationSettings:
"""
Settings applicable to an individual operation.
When set, the settings in this object supersede the global and model settings.
"""
default: ClassVar['OperationSettings']
def __init__(self, *, extra_headers: Optional[Mapping[str, Optional[str]]] = None) -> None:
"""
Initializes operation settings.
:param extra_headers: if set, extra headers to add to the HTTP request. The headers are merged
on top of extra headers derived from settings or models' Meta classes. To delete a header, set its value
to `None`.
"""
self.extra_headers = extra_headers
OperationSettings.default = OperationSettings()
|
pmghalvorsen/gramps_branch
|
gramps/gen/plug/report/_reportbase.py
|
Python
|
gpl-2.0
| 2,801 | 0.002499 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001 David R. Hampton
# Copyright (C) 2001-2006 Donald N. Allingham
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.grampslocale import GrampsLocale
from gramps.gen.display.name import NameDisplay
#-------------------------------------------------------------------------
#
# Report
#
#-------------------------------------------------------------------------
class Report(object):
"""
The Report base class. This is a base class for generating
customized reports. It cannot be used as is, but it can be easily
sub-classed to create a functional report generator.
"""
def __init__(self, database, options_class, user):
self.database = database
self.options_class = options_class
self.doc = options_class.get_document()
creator = database.get_researcher().get_name()
self.doc.set_creator(creator)
output = options_class.get_output()
if output:
self.standalone = True
self.doc.open(options_class.get_output())
else:
self.standalone = False
def begin_report(self):
|
pass
def set_locale(self, language):
"""
Set the translator to one selected with
stdoptions.add_localization_option().
"""
if language == GrampsLocale.DEFAULT_TRANSLATION_STR:
language = None
locale = GrampsLocale(lang=language)
self._ = loc
|
ale.translation.gettext
self._get_date = locale.get_date
self._get_type = locale.get_type
self._dd = locale.date_displayer
self._name_display = NameDisplay(locale) # a legacy/historical name
return locale
def write_report(self):
pass
def end_report(self):
if self.standalone:
self.doc.close()
|
pombreda/fMBT
|
pythonshare/pythonshare/__init__.py
|
Python
|
lgpl-2.1
| 2,903 | 0.004478 |
# fMBT, free Model Based Testing tool
# Copyright (c) 2013, Intel Corporation.
#
# Author: [email protected]
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
import client
import server
import messages
import socket
import subprocess
import urlparse as _urlparse
from messages import Exec, Exec_rv
# Connection = client.Connection
default_port = 8089 # PY
class PythonShareError(Exception):
pass
class AuthenticationError(PythonShare
|
Error):
pass
class RemoteExecError(PythonShareError):
pass
class RemoteEvalError(PythonShareError):
pass
class AsyncStatu
|
s(object):
pass
class InProgress(AsyncStatus):
pass
# Misc helpers for client and server
def _close(*args):
for a in args:
if a:
try:
a.close()
except (socket.error, IOError):
pass
def connection(hostspec, password=None):
if not "://" in hostspec:
hostspec = "socket://" + hostspec
scheme, netloc, _, _, _ = _urlparse.urlsplit(hostspec)
if scheme == "socket":
# Parse URL
if "@" in netloc:
userinfo, hostport = netloc.split("@", 1)
else:
userinfo, hostport = "", netloc
if ":" in userinfo:
userinfo_user, userinfo_password = userinfo.split(":", 1)
else:
userinfo_user, userinfo_password = userinfo, None
if ":" in hostport:
host, port = hostport.split(":")
else:
host, port = hostport, default_port
# If userinfo has been given, authenticate using it.
# Allow forms
# socket://password@host:port
# socket://dontcare:password@host:port
if password == None and userinfo:
if userinfo_password:
password = userinfo_password
else:
password = userinfo
return client.Connection(host, int(port), password=password)
elif scheme == "shell":
p = subprocess.Popen(hostspec[len("shell://"):],
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return client.Connection(p.stdout, p.stdin)
else:
raise ValueError('invalid URI "%s"' % (hostspec,))
|
AdrianRibao/notifintime
|
notifintime/admin.py
|
Python
|
bsd-3-clause
| 58 | 0.017241 |
# -
|
*- coding: utf-8 -*-
#from django.contrib import admin
| |
hclivess/Stallion
|
nuitka/Cryptodome/Hash/MD2.py
|
Python
|
gpl-3.0
| 6,130 | 0.001305 |
# ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
from Cryptodome.Util.py3compat import bord
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
c_uint8_ptr)
_raw_md2_lib = load_pycryptodome_raw_lib(
"Cryptodome.Hash._MD2",
"""
int md2_init(void **shaState);
int md2_destroy(void *shaState);
int md2_update(void *hs,
const uint8_t *buf,
size_t len);
int md2_digest(const void *shaState,
uint8_t digest[20]);
int md2_copy(const void *src, void *dst);
""")
class MD2Hash(object):
"""An MD2 hash object.
Do not instantiate directly. Use the :func:`new` function.
:ivar oid: ASN.1 Object ID
:vartype oid: string
:ivar block_size: the size in bytes of the internal message block,
input to the compression function
:vartype block_size: integer
:ivar digest_size: the size in bytes of the resulting hash
:vartype digest_size: integer
"""
# The size of the resulting hash in bytes.
digest_size = 16
# The internal block size of the hash algorithm in bytes.
block_size = 64
# ASN.1 Object ID
oid = "1.2.840.113549.2.2"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_md2_lib.md2_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating MD2"
% result)
self._state = SmartPointer(state.get(),
_raw_md2_lib.md2_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Args:
data (byte string/byte array/memoryview): The next chunk of the message being hashed.
"""
result = _raw_md2_lib.md2_update(self._state.get(),
c_uint8_ptr(data),
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating MD2"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Binary form.
:rtype: byte string
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_md2_lib.md2_digest(self._state.get(),
|
bfr)
if result:
raise ValueError("Error %d while instantiating MD2"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
:return: The hash di
|
gest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:return: A hash object of the same type
"""
clone = MD2Hash()
result = _raw_md2_lib.md2_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying MD2" % result)
return clone
def new(self, data=None):
return MD2Hash(data)
def new(data=None):
"""Create a new hash object.
:parameter data:
Optional. The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`MD2Hash.update`.
:type data: byte string/byte array/memoryview
:Return: A :class:`MD2Hash` hash object
"""
return MD2Hash().new(data)
# The size of the resulting hash in bytes.
digest_size = MD2Hash.digest_size
# The internal block size of the hash algorithm in bytes.
block_size = MD2Hash.block_size
|
eduNEXT/edunext-platform
|
import_shims/lms/grades/exceptions.py
|
Python
|
agpl-3.0
| 374 | 0.008021 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from i
|
mport_shims.warn import warn_deprecated_import
warn_deprecated_import('grades.exceptions', 'lms.djangoapps.grades.exceptions')
from lms.djangoapps.grades.
|
exceptions import *
|
Weatherlyzer/weatherlyzer
|
base/cron.py
|
Python
|
mit
| 264 | 0.003788 |
import kronos
import random
@kronos.register('0 0 * * *')
def complain():
complaints =
|
[
"I forgot to migrate our applications's cron jobs to our new server! Darn!",
|
"I'm out of complaints! Damnit!"
]
print random.choice(complaints)
|
salvacarrion/orange3-recommendation
|
orangecontrib/recommendation/widgets/__init__.py
|
Python
|
bsd-2-clause
| 2,082 | 0.001921 |
import sysconfig
# Category metadata.
# Category icon show in the menu
ICON = "icons/star2.svg"
# Background color
|
for category background in menu
# and widget icon background in workflow.
BACKGROUND = "light-blue"
# Location of widget help files.
WIDGET_HELP_PATH = (
# No local documentation (There are problems with it, so Orange3 widgets
# usually don't use it)
# Local documentation. This fake line is needed to access to the online
# documentation
("{DEVELOP_ROOT}/doc/build/htmlhelp/index.html", None),
# Online documentation url, used when the local documen
|
tation is not available.
# Url should point to a page with a section Widgets. This section should
# includes links to documentation pages of each widget. Matching is
# performed by comparing link caption to widget name.
# IMPORTANT TO PUT THE LAST SLASH '/'
("http://orange3-recommendation.readthedocs.io/en/latest/", "")
)
"""
***************************************************************************
************************** CREDITS FOR THE ICONS **************************
***************************************************************************
- 'star.svg' icon made by [Freepik] from [www.flaticon.com]
- 'starred-list' icon made by [Freepik] from [www.flaticon.com]
- 'customer.svg' icon made by [Freepik] from [www.flaticon.com]
- 'stars.svg' icon made by [ Alfredo Hernandez] from [www.flaticon.com]
- 'star2.svg' icon made by [EpicCoders] from [www.flaticon.com]
- 'brismf.svg' icon made by [Freepik] from [www.flaticon.com]
- 'manager.svg' icon made by [Freepik] from [www.flaticon.com]
- 'ranking.svg' icon made by [Freepik] from [www.flaticon.com]
- 'candidates-ranking-graphic.svg' icon made by [Freepik] from [www.flaticon.com]
- 'trustsvd.svg' icon made by [Zurb] from [www.flaticon.com]
- 'organization.svg' icon made by [Freepik] from [www.flaticon.com]
- 'task.svg' icon made by [Freepik] from [www.flaticon.com]
- 'list.svg' icon made by [Freepik] from [www.flaticon.com]
- 'ranking.svg' icon made by [Freepik] from [www.flaticon.com]
"""
|
shakcho/Indic-language-ngram-viewer
|
demo.py
|
Python
|
mit
| 120 | 0.033333 |
a = "nabb jasj
|
jjs, jjsajdhh kjkda jj"
a1 = a.split(",")
for i in range(0,len(a1)):
print (len(a1[i].s
|
plit()))
|
obulpathi/poppy
|
poppy/transport/pecan/controllers/v1/services.py
|
Python
|
apache-2.0
| 9,877 | 0 |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import uuid
from oslo_config import cfg
import pecan
from pecan import hooks
from poppy.common import errors
from poppy.common import uri
from poppy.common import util
from poppy.transport.pecan.controllers import base
from poppy.transport.pecan import hooks as poppy_hooks
from poppy.transport.pecan.models.response import link
from poppy.trans
|
port.pecan.models.response import service as resp_service_model
from poppy.transport.validators import helpers
from poppy.transport.validators.schemas import service
from poppy.transport.validators.stoplight import decorators
from poppy.transport.validators.stoplight import exceptions
from popp
|
y.transport.validators.stoplight import helpers as stoplight_helpers
from poppy.transport.validators.stoplight import rule
LIMITS_OPTIONS = [
cfg.IntOpt('max_services_per_page', default=20,
help='Max number of services per page for list services'),
]
LIMITS_GROUP = 'drivers:transport:limits'
class ServiceAssetsController(base.Controller, hooks.HookController):
__hooks__ = [poppy_hooks.Context(), poppy_hooks.Error()]
@pecan.expose('json')
@decorators.validate(
service_id=rule.Rule(
helpers.is_valid_service_id(),
helpers.abort_with_message)
)
def delete(self, service_id):
purge_url = pecan.request.GET.get('url', '/*')
purge_all = pecan.request.GET.get('all', False)
hard = pecan.request.GET.get('hard', 'True')
if purge_url:
try:
purge_url.encode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError):
pecan.abort(400, detail='non ascii character present in url')
if hard and hard.lower() == 'false':
hard = 'False'
if hard and hard.lower() == 'true':
hard = 'True'
try:
hard = ast.literal_eval(hard)
except ValueError:
pecan.abort(400, detail='hard can only be set to True or False')
if hard not in [True, False]:
pecan.abort(400, detail='hard can only be set to True or False')
purge_all = (
True if purge_all and purge_all.lower() == 'true' else False)
if purge_all and purge_url != '/*':
pecan.abort(400, detail='Cannot provide all=true '
'and a url at the same time')
services_controller = self._driver.manager.services_controller
try:
services_controller.purge(self.project_id, service_id, hard,
purge_url)
except errors.ServiceStatusNotDeployed as e:
pecan.abort(400, detail=str(e))
except LookupError as e:
pecan.abort(404, detail=str(e))
service_url = str(
uri.encode(u'{0}/v1.0/services/{1}'.format(
pecan.request.host_url,
service_id)))
return pecan.Response(None, 202, headers={"Location": service_url})
class ServicesController(base.Controller, hooks.HookController):
__hooks__ = [poppy_hooks.Context(), poppy_hooks.Error()]
def __init__(self, driver):
super(ServicesController, self).__init__(driver)
self._conf = driver.conf
self._conf.register_opts(LIMITS_OPTIONS, group=LIMITS_GROUP)
self.limits_conf = self._conf[LIMITS_GROUP]
self.max_services_per_page = self.limits_conf.max_services_per_page
# Add assets controller here
# need to initialize a nested controller with a parameter driver,
# so added it in __init__ method.
# see more in: http://pecan.readthedocs.org/en/latest/rest.html
self.__class__.assets = ServiceAssetsController(driver)
@pecan.expose('json')
def get_all(self):
marker = pecan.request.GET.get('marker', None)
limit = pecan.request.GET.get('limit', 10)
try:
limit = int(limit)
if limit <= 0:
pecan.abort(400, detail=u'Limit should be greater than 0')
if limit > self.max_services_per_page:
error = u'Limit should be less than or equal to {0}'.format(
self.max_services_per_page)
pecan.abort(400, detail=error)
except ValueError:
error = (u'Limit should be an integer greater than 0 and less'
u' or equal to {0}'.format(self.max_services_per_page))
pecan.abort(400, detail=error)
try:
if marker is not None:
marker = str(uuid.UUID(marker))
except ValueError:
pecan.abort(400, detail="Marker must be a valid UUID")
services_controller = self._driver.manager.services_controller
service_resultset = services_controller.list(
self.project_id, marker, limit)
results = [
resp_service_model.Model(s, self)
for s in service_resultset]
links = []
if len(results) >= limit:
links.append(
link.Model(u'{0}/services?marker={1}&limit={2}'.format(
self.base_url,
results[-1]['id'],
limit),
'next'))
return {
'links': links,
'services': results
}
@pecan.expose('json')
@decorators.validate(
service_id=rule.Rule(
helpers.is_valid_service_id(),
helpers.abort_with_message)
)
def get_one(self, service_id):
services_controller = self._driver.manager.services_controller
try:
service_obj = services_controller.get(
self.project_id, service_id)
except ValueError:
pecan.abort(404, detail='service %s could not be found' %
service_id)
# convert a service model into a response service model
return resp_service_model.Model(service_obj, self)
@pecan.expose('json')
@decorators.validate(
request=rule.Rule(
helpers.json_matches_service_schema(
service.ServiceSchema.get_schema("service", "POST")),
helpers.abort_with_message,
stoplight_helpers.pecan_getter))
def post(self):
services_controller = self._driver.manager.services_controller
service_json_dict = json.loads(pecan.request.body.decode('utf-8'))
service_id = None
try:
service_obj = services_controller.create(self.project_id,
self.auth_token,
service_json_dict)
service_id = service_obj.service_id
except LookupError as e: # error handler for no flavor
pecan.abort(400, detail=str(e))
except ValueError as e: # error handler for existing service name
pecan.abort(400, detail=str(e))
service_url = str(
uri.encode(u'{0}/v1.0/services/{1}'.format(
pecan.request.host_url,
service_id)))
return pecan.Response(None, 202, headers={"Location": service_url})
@pecan.expose('json')
@decorators.validate(
service_id=rule.Rule(
helpers.is_valid_service_id(),
helpers.abort_with_message)
)
def delete(self, service_id):
services_controller = self._driver.manager.services_controller
try:
services_controller.delete(self.project_id, service_id)
except LookupError as e:
pecan.abort(404, de
|
ksmit799/Toontown-Source
|
toontown/trolley/Trolley.py
|
Python
|
mit
| 7,361 | 0.005434 |
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from direct.directnotify import DirectNotifyGlobal
class Trolley(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('Trolley')
def __init__(self, safeZone, parentFSM, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.fsm = ClassicFSM.ClassicFSM('Trolley', [
State.State('start',
self.enterStart,
self.exitStart,
['requestBoard',
'trolleyHFA',
'trolleyTFA']),
State.State('trolleyHFA',
self.enterTrolleyHFA,
self.exitTrolleyHFA,
['final']),
State.State('trolleyTFA',
self.enterTrolleyTFA,
self.exitTrolleyTFA,
['final']),
State.State('requestBoard',
self.enterRequestBoard,
self.exitRequestBoard,
['boarding']),
State.State('boarding',
self.enterBoarding,
self.exitBoarding,
['boarded']),
State.State('boarded',
self.enterBoarded,
self.exitBoarded,
['requestExit',
'trolleyLeaving',
'final']),
State.State('requestExit',
self.enterRequestExit,
self.exitRequestExit,
['exiting',
'trolleyLeaving']),
State.State('trolleyLeaving',
self.enterTrolleyLeaving,
self.exitTrolleyLeaving,
['final']),
State.State('exiting',
self.enterExiting,
self.exitExiting,
['final']),
State.State('final',
self.enterFinal,
self.exitFinal,
['start'])],
'start', 'final')
self.parentFSM = parentFSM
return None
def load(self):
self.parentFSM.getStateNamed('trolley').addChild(self.fsm)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
def unload(self):
self.parentFSM.getStateNamed('trolley').removeChild(self.fsm)
del self.fsm
del self.parentFSM
self.buttonModels.removeNode()
del self.buttonModels
del self.upButton
del self.downButton
del self.rolloverButton
def enter(self):
self.fsm.enterInitialState()
if base.localAvatar.hp > 0:
messenger.send('enterTrolleyOK')
self.fsm.request('requestBoard')
else:
self.fsm.request('trolleyHFA')
return None
def exit(self):
self.ignoreAll()
return None
def enterStart(self):
return None
def exitStart(self):
return None
def enterTrolleyHFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyHFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyHFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def enterTrolleyTFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyTFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyTFA(self):
self.ignore('noTrolleyA
|
ck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def __handleNoTrolleyAck(self):
ntbDoneStatus = self.noTrolleyBox.doneStatus
if ntbDoneStatus == 'ok':
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
else:
self.no
|
tify.error('Unrecognized doneStatus: ' + str(ntbDoneStatus))
def enterRequestBoard(self):
return None
def handleRejectBoard(self):
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
def exitRequestBoard(self):
return None
def enterBoarding(self, nodePath):
camera.wrtReparentTo(nodePath)
self.cameraBoardTrack = LerpPosHprInterval(camera, 1.5, Point3(-35, 0, 8), Point3(-90, 0, 0))
self.cameraBoardTrack.start()
return None
def exitBoarding(self):
self.ignore('boardedTrolley')
return None
def enterBoarded(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: RIDETHETROLLEY: Ride the Trolley')
self.enableExitButton()
return None
def exitBoarded(self):
self.cameraBoardTrack.finish()
self.disableExitButton()
return None
def enableExitButton(self):
self.exitButton = DirectButton(relief=None, text=TTLocalizer.TrolleyHopOff, text_fg=(1, 1, 0.65, 1), text_pos=(0, -0.23), text_scale=TTLocalizer.TexitButton, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0, 0, 0.8), scale=0.15, command=lambda self = self: self.fsm.request('requestExit'))
return
def disableExitButton(self):
self.exitButton.destroy()
def enterRequestExit(self):
messenger.send('trolleyExitButton')
return None
def exitRequestExit(self):
return None
def enterTrolleyLeaving(self):
camera.lerpPosHprXYZHPR(0, 18.55, 3.75, -180, 0, 0, 3, blendType='easeInOut', task='leavingCamera')
self.acceptOnce('playMinigame', self.handlePlayMinigame)
return None
def handlePlayMinigame(self, zoneId, minigameId):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
doneStatus = {}
doneStatus['mode'] = 'minigame'
doneStatus['zoneId'] = zoneId
doneStatus['minigameId'] = minigameId
messenger.send(self.doneEvent, [doneStatus])
def exitTrolleyLeaving(self):
self.ignore('playMinigame')
taskMgr.remove('leavingCamera')
return None
def enterExiting(self):
return None
def handleOffTrolley(self):
doneStatus = {}
doneStatus['mode'] = 'exit'
messenger.send(self.doneEvent, [doneStatus])
return None
def exitExiting(self):
return None
def enterFinal(self):
return None
def exitFinal(self):
return None
|
t3dev/odoo
|
addons/website_sale/models/product.py
|
Python
|
gpl-3.0
| 20,625 | 0.003782 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
from odoo.addons import decimal_precision as dp
from odoo.addons.website.models import ir_http
from odoo.tools.translate import html_translate
from odoo.osv import expression
class ProductStyle(models.Model):
_name = "product.style"
_description = 'Product Style'
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
def _default_website(self):
""" Find the first company's website, if there is one. """
company_id = self.env.user.company_id.id
if self._context.get('default_company_id'):
company_id = self._context.get('default_company_id')
domain = [('company_id', '=', company_id)]
return self.env['website'].search(domain, limit=1)
website_id = fields.Many2one('website', string="Website", default=_default_website)
code = fields.Char(string='E-commerce Promotional Code', groups="base.group_user")
selectable = fields.Boolean(help="Allow the end user to choose this price list")
def clear_cache(self):
# website._get_pl_partner_order() is cached to avoid to recompute at each request the
# list of available pricelists. So, we need to invalidate the cache when
# we change the config of website price list to force to recompute.
website = self.env['website']
website._get_pl_partner_order.clear_cache(website)
@api.model
def create(self, data):
if data.get('company_id') and not data.get('website_id'):
# l10n modules install will change the company currency, creating a
# pricelist for that currency. Do not use user's company in that
# case as module install are done with OdooBot (company 1)
self = self.with_context(default_company_id=data['company_id'])
res = super(ProductPricelist, self).create(data)
self.clear_cache()
return res
@api.multi
def write(self, data):
res = super(ProductPricelist, self).write(data)
self.clear_cache()
return res
@api.multi
def unlink(self):
res = super(ProductPricelist, self).unlink()
self.clear_cache()
return res
def _get_partner_pricelist_multi_search_domain_hook(self):
domain = super(ProductPricelist, self)._get_partner_pricelist_multi_search_domain_hook()
website = ir_http.get_request_website()
if website:
domain += self._get_website_pricelists_domain(website.id)
return domain
def _get_partner_pricelist_multi_filter_hook(self):
res = super(ProductPricelist, self)._get_partner_pricelist_multi_filter_hook()
website = ir_http.get_request_website()
if website:
res = res.filtered(lambda pl: pl._is_available_on_website(website.id))
return res
@api.multi
def _is_available_on_website(self, website_id):
""" To be able to be used on a website, a pricelist should either:
- Have its `website_id` set to current website (specific pricelist).
- Have no `website_id` set and should be `selectable` (generic pricelist)
or should have a `code` (generic promotion).
Note: A pricelist without a website_id, not selectable and without a
code is a backend pricelist.
Change in this method should be reflected in `_get_website_pricelists_domain`.
"""
self.ensure_one()
return self.website_id.id == website_id or (not self.website_id and (self.selectable or self.sudo().code))
def _get_website_pricelists_domain(self, website_id):
''' Check above `_is_available_on_website` for explanation.
Change in this method should be reflected in `_is_available_on_website`.
'''
return [
'|', ('website_id', '=', website_id),
'&', ('website_id', '=', False),
'|', ('selectable', '=', True), ('code', '!=', False),
]
def _get_partner_pricelist_multi(self, partner_ids, company_id=None):
''' If `property_product_pricelist` is read from website, we should use
the website's company and not the user's one.
Passing a `company_id` to super will avoid using the current user's
company.
'''
website = ir_http.get_request_website()
if not company_id and website:
company_id = website.company_id.id
return super(ProductPricelist, self)._get_partner_pricelist_multi(partner_ids, company_id)
@api.onchange('company_id')
def _onchange_company_id(self):
''' Show only the company's website '''
domain = self.company_id and [('company_id', '=', self.company_id.id)] or []
return {'domain': {'website_id': domain}}
@api.constrains('company_id', 'website_id')
def _check_websites_in_company(self):
'''Prevent misconfiguration multi-website/multi-companies.
If the record has a company, the website should be from that company.
'''
for record in self.filtered(lambda pl: pl.website_id and pl.company_id):
if record.website_id.company_id != record.company_id:
raise ValidationError(_("Only the company's websites are allowed. \
Leave the Company field empty or select a website from that company."))
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata", "website.multi.mixin"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(help="This field holds the image used as image for the category, limited to 1024x1024px.")
website_description = fields.Html('Category Description', sanitize_attributes=False, translate=html_translate)
image_medium = fields.Binary(string='Medium-sized image',
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image',
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You canno
|
t create recursive categories.'))
|
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
|
mnahm5/django-estore
|
Lib/site-packages/awscli/customizations/emr/ssh.py
|
Python
|
mit
| 7,731 | 0 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import subprocess
import tempfile
from awscli.customizations.emr import constants
from awscli.customizations.emr import emrutils
from awscli.customizations.emr import sshutils
from awscli.customizations.emr.command import Command
KEY_PAIR_FILE_HELP_TEXT = '\nA value for the variable Key Pair File ' \
'can be set in the AWS CLI config file using the ' \
'"aws configure set emr.key_pair_file <value>" command.\n'
class Socks(Command):
NAME = 'socks'
DESCRIPTION = ('Create a socks tunnel on port 8157 from your machine '
'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to ssh into'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
]
def _run_main_command(self, parsed_args, parsed_globals):
try:
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'ServerAliveInterval=10', '-ND', '8157', '-i',
parsed_args.key_pair_file, constants.SSH_USER +
'@' + master_dns]
else:
command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns, '-N', '-D',
'8157']
print(' '.join(command))
rc = subprocess.call(command)
return rc
except KeyboardInterrupt:
print('Disabling Socks Tunnel.')
return 0
class SSH(Command):
NAME = 'ssh'
DESCRIPTION = ('SSH into master node of the cluster.\n%s' %
KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to ssh into'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'command', 'help_text': 'Command to execute on Master Node'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'ServerAliveInterval=10', '-i',
parsed_args.key_pair_file, constants.SSH_USER +
'@' + master_dns, '-t']
if parsed_args.command:
command.append(parsed_args.command)
else:
command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns, '-t']
if parsed_args.command:
f.write(parsed_args.command)
f.write('\nread -n1 -r -p "Command completed. Press any key."')
command.append('-m')
command.append(f.name)
f.close()
print(' '.join(command))
rc = subprocess.call(command)
os.remove(f.name)
return rc
class Put(Command):
NAME = 'put'
DESCRIPTION = ('Put file onto the master node.\n%s' %
KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to put file onto'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'src', 'required': True,
'help_text': 'Source file path on local machine'},
{'name': 'dest', 'help_text': 'Destination file path on remote host'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
if (emrutils.which('scp') or emrutils.which('scp.exe')):
command = ['scp', '-r', '-o StrictHostKeyChecking=no',
'-i', parsed_args.key_pair_file, parsed_args.src,
constants.SSH_USER + '@' + master_dns]
else:
command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
parsed_args.src, constants.SSH_USER + '@' + master_dns]
# if the instance is not te
|
rminated
if parsed_args.dest:
command[-1] = command[-1] + ":" + parsed_args.dest
else:
command[-1] = comm
|
and[-1] + ":" + parsed_args.src.split('/')[-1]
print(' '.join(command))
rc = subprocess.call(command)
return rc
class Get(Command):
NAME = 'get'
DESCRIPTION = ('Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to get file from'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'src', 'required': True,
'help_text': 'Source file path on remote host'},
{'name': 'dest', 'help_text': 'Destination file path on your machine'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
if (emrutils.which('scp') or emrutils.which('scp.exe')):
command = ['scp', '-r', '-o StrictHostKeyChecking=no', '-i',
parsed_args.key_pair_file, constants.SSH_USER + '@' +
master_dns + ':' + parsed_args.src]
else:
command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns + ':' +
parsed_args.src]
if parsed_args.dest:
command.append(parsed_args.dest)
else:
command.append(parsed_args.src.split('/')[-1])
print(' '.join(command))
rc = subprocess.call(command)
return rc
|
baby5/Django-httpbin
|
httpbin/bin/helpers.py
|
Python
|
mit
| 1,339 | 0.004481 |
from functools import wraps
import json
from django.http import JsonResponse, HttpResponseNotAllowed
from django.utils.decorators import available_attrs
def methods(method_list):
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kw):
if request.method not in method_list:
return HttpResponseNotAllowed(method_list, 'Method Not Allow')
return func(request, *args, **kw)
return inner
return decorator
def get_headers(request):
headers = {}
for key, value in request.META.iteritems():#u
|
se iterator
if key.startswith('HTTP_'):
headers['-'.join(key.split('_')[1:]).title()] = value
elif key.startswith('CONTENT'):
headers['-'.join(key.split('_')).title()] = value
return headers
def no_get(request):
rep_dict = {
'args': request.GET,
'data': request.body,
'files': request.FILES,
'form': request.POST,
'headers': get_headers(re
|
quest),
'json': None,
'origin': request.META['REMOTE_ADDR'],
'url': request.build_absolute_uri(),
}
if 'json' in request.content_type:
try:
rep_dict['json'] = json.loads(request.body)
except:
pass
return rep_dict
|
komuW/sewer
|
sewer/dns_providers/tests/test_rackspace.py
|
Python
|
mit
| 8,481 | 0.003184 |
from unittest import mock
from unittest import TestCase
from sewer.dns_providers.rackspace import RackspaceDns
from . import test_utils
class TestRackspace(TestCase):
"""
"""
def setUp(self):
self.domain_name = "example.com"
self.domain_dns_value = "mock-domain_dns_value"
self.RACKSPACE_USERNAME = "mock_username"
self.RACKSPACE_API_KEY = "mock-api-key"
self.RACKSPACE_API_TOKEN = "mock-api-token"
with mock.patch("requests.post") as mock_requests_post, mock.patch(
"requests.get"
) as mock_requests_get, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.get_rackspace_credentials"
) as mock_get_credentials, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.find_dns_zone_id", autospec=True
) as mock_find_dns_zone_id:
mock_requests_post.return_value = test_utils.MockResponse()
mock_requests_get.return_value = test_utils.MockResponse()
mock_get_credentials.return_value = "mock-api-token", "http://example.com/"
mock_find_dns_zone_id.return_value = "mock_zone_id"
self.dns_class = RackspaceDns(
RACKSPACE_USERNAME=self.RACKSPACE_USERNAME, RACKSPACE_API_KEY=self.RACKSPACE_API_KEY
)
def tearDown(self):
pass
def test_find_dns_zone_id(self):
with mock.patch("requests.get") as mock_requests_get:
# see: https://developer.rackspace.com/docs/cloud-dns/v1/api-reference/domains/
mock_dns_zone_id = 1_239_932
mock_requests_content = {
"domains": [
{
"name": self.domain_name,
"id": mock_dns_zone_id,
"comment": "Optional domain comment...",
"updated": "2011-06-24T01:23:15.000+0000",
"accountId": 1234,
"emailAddress": "[email protected]",
"created": "2011-06-24T01:12:51.000+0000",
}
]
}
mock_requests_get.return_value = test_utils.MockResponse(200, mock_requests_content)
dns_zone_id = self.dns_class.find_dns_zone_id(self.domain_name)
self.assertEqual(dns_
|
zone_id, mock_dns_zone_id)
self.assertTrue(mock_requests_get.called)
def test_find_dns_record_id(self):
with mock.patch("requests.get") as mock_requests_get, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.find_dns_zone_id"
) as mock_find_dns_zone_id:
# see: https://developer.rackspace.com/docs/cloud-dns/v1/api-reference/records/
mock_dns_record_id = "A-123
|
4"
mock_requests_content = {
"totalEntries": 1,
"records": [
{
"name": self.domain_name,
"id": mock_dns_record_id,
"type": "A",
"data": self.domain_dns_value,
"updated": "2011-05-19T13:07:08.000+0000",
"ttl": 5771,
"created": "2011-05-18T19:53:09.000+0000",
}
],
}
mock_requests_get.return_value = test_utils.MockResponse(200, mock_requests_content)
mock_find_dns_zone_id.return_value = 1_239_932
dns_record_id = self.dns_class.find_dns_record_id(
self.domain_name, self.domain_dns_value
)
self.assertEqual(dns_record_id, mock_dns_record_id)
self.assertTrue(mock_requests_get.called)
self.assertTrue(mock_find_dns_zone_id.called)
def test_delete_dns_record_is_not_called_by_create_dns_record(self):
with mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.find_dns_zone_id"
) as mock_find_dns_zone_id, mock.patch("requests.post") as mock_requests_post, mock.patch(
"requests.get"
) as mock_requests_get, mock.patch(
"requests.delete"
) as mock_requests_delete, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.delete_dns_record"
) as mock_delete_dns_record, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.poll_callback_url"
) as mock_poll_callback_url:
mock_find_dns_zone_id.return_value = "mock_zone_id"
mock_requests_get.return_value = (
mock_requests_delete.return_value
) = mock_delete_dns_record.return_value = test_utils.MockResponse()
mock_requests_content = {"callbackUrl": "http://example.com/callbackUrl"}
mock_requests_post.return_value = test_utils.MockResponse(202, mock_requests_content)
mock_poll_callback_url.return_value = 1
self.dns_class.create_dns_record(
domain_name=self.domain_name, domain_dns_value=self.domain_dns_value
)
self.assertFalse(mock_delete_dns_record.called)
def test_rackspace_is_called_by_create_dns_record(self):
with mock.patch("requests.post") as mock_requests_post, mock.patch(
"requests.get"
) as mock_requests_get, mock.patch("requests.delete") as mock_requests_delete, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.delete_dns_record"
) as mock_delete_dns_record, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.find_dns_zone_id"
) as mock_find_dns_zone_id, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.poll_callback_url"
) as mock_poll_callback_url:
mock_requests_content = {"callbackUrl": "http://example.com/callbackUrl"}
mock_requests_post.return_value = test_utils.MockResponse(202, mock_requests_content)
mock_requests_get.return_value = (
mock_requests_delete.return_value
) = mock_delete_dns_record.return_value = test_utils.MockResponse()
mock_find_dns_zone_id.return_value = "mock_zone_id"
mock_poll_callback_url.return_value = 1
self.dns_class.create_dns_record(
domain_name=self.domain_name, domain_dns_value=self.domain_dns_value
)
expected = {
"headers": {"X-Auth-Token": "mock-api-token", "Content-Type": "application/json"},
"data": self.domain_dns_value,
}
self.assertDictEqual(expected["headers"], mock_requests_post.call_args[1]["headers"])
self.assertEqual(
expected["data"], mock_requests_post.call_args[1]["json"]["records"][0]["data"]
)
def test_rackspace_is_called_by_delete_dns_record(self):
with mock.patch("requests.post") as mock_requests_post, mock.patch(
"requests.get"
) as mock_requests_get, mock.patch("requests.delete") as mock_requests_delete, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.find_dns_zone_id"
) as mock_find_dns_zone_id, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.poll_callback_url"
) as mock_poll_callback_url, mock.patch(
"sewer.dns_providers.rackspace.RackspaceDns.find_dns_record_id"
) as mock_find_dns_record_id:
mock_requests_content = {"callbackUrl": "http://example.com/callbackUrl"}
mock_requests_post.return_value = (
mock_requests_get.return_value
) = test_utils.MockResponse()
mock_requests_delete.return_value = test_utils.MockResponse(202, mock_requests_content)
mock_find_dns_zone_id.return_value = "mock_zone_id"
mock_poll_callback_url.return_value = 1
mock_find_dns_record_id.return_value = "mock_record_id"
self.dns_class.delete_dns_record(
domain_name=self.domain_name, domain_dns_value=self.domain_dns_value
)
expected = {
"headers": {"X-Auth-Token": "mock-api-token", "Content-Type": "applicatio
|
tchellomello/home-assistant
|
homeassistant/components/isy994/__init__.py
|
Python
|
apache-2.0
| 8,442 | 0.000711 |
"""Support the ISY-994 controllers."""
import asyncio
from functools import partial
from typing import Optional
from urllib.parse import urlparse
from pyisy import ISY
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import ConfigType
from .const import (
_LOGGER,
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DEFAULT_IGNORE_STRING,
DEFAULT_RESTORE_LIGHT_STATE,
DEFAULT_SENSOR_STRING,
DEFAULT_VAR_SENSOR_STRING,
DOMAIN,
ISY994_ISY,
ISY994_NODES,
ISY994_PROGRAMS,
ISY994_VARIABLES,
MANUFACTURER,
SUPPORTED_PLATFORMS,
SUPPORTED_PROGRAM_PLATFORMS,
UNDO_UPDATE_LISTENER,
)
from .helpers import _categorize_nodes, _categorize_programs, _categorize_variables
from .services import async_setup_services, async_unload_services
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TLS_VER): vol.Coerce(float),
vol.Optional(
CONF_IGNORE_STRING, default=DEFAULT_IGNORE_STRING
): cv.string,
vol.Optional(
CONF_SENSOR_STRING, default=DEFAULT_SENSOR_STRING
): cv.string,
vol.Optional(
CONF_VAR_SENSOR_STRING, default=DEFAULT_VAR_SENSOR_STRING
): cv.string,
vol.Required(
CONF_RESTORE_LIGHT_STATE, default=DEFAULT_RESTORE_LIGHT_STATE
): bool,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the isy994 integration from YAML."""
isy_config: Optional[ConfigType] = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not isy_config:
return True
# Only import if we haven't before.
config_entry = _async_find_matching_config_entry(hass)
if not config_entry:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=dict(isy_config),
)
)
return True
# Update the entry based on the YAML configuration, in case it changed.
hass.config_entries.async_update_entry(config_entry, data=dict(isy_config))
return True
@callback
def _async_find_matching_config_entry(hass):
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.source == config_entries.SOURCE_IMPORT:
return entry
async def async_setup_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Set up the ISY 994 integration."""
# As there currently is no way to import options from yaml
# when setting up a config entry, we fallback to adding
# the options to the config entry and pull them out here if
# they are missing from the options
_async_import_options_from_data_if_missing(hass, entry)
hass.data[DOMAIN][entry.entry_id] = {}
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
hass_isy_data[ISY994_NODES] = {}
for platform in SUPPORTED_PLATFORMS:
hass_isy_data[ISY994_NODES][platform] = []
hass_isy_data[ISY994_PROGRAMS] = {}
for platform in SUPPORTED_PROGRAM_PLATFORMS:
hass_isy_data[ISY994_PROGRAMS][platform] = []
hass_isy_data[ISY994_VARIABLES] = []
isy_config = entry.data
isy_options = entry.options
# Required
user = isy_config[CONF_USERNAME]
password = isy_config[CONF_PASSWORD]
host = urlparse(isy_config[CONF_HOST])
# Optional
tls_version = isy_config.get(CONF_TLS_VER)
ignore_identifier = isy_options.get(CONF_IGNORE_STRING, DEFAULT_IGNORE_STRING)
sensor_identifier = isy_options.get(CONF_SENSOR_STRING, DEFAULT_SENSOR_STRING)
variable_identifier = isy_options.get(
CONF_VAR_SENSOR_STRING, DEFAULT_VAR_SENSOR_STRING
)
if host.scheme == "http":
https = False
port = host.port or 80
elif host.scheme == "https":
https = True
port = host.port or 443
else:
_LOGGER.error("isy994 host value in configuration is invalid")
return False
# Connect to ISY controller.
isy = await hass.async_add_executor_job(
partial(
ISY,
host.hostname,
port,
username=user,
password=password,
use_https=https,
tls_ver=tls_version,
log=_LOGGER,
webroot=host.path,
)
)
if not isy.connected:
return False
_categorize_nodes(hass_isy_data, isy.nodes, ignore_identifier, sensor_identifier)
_categorize_programs(hass_isy_data, isy.programs)
_categorize_variables(hass_isy_data, isy.variables, variable_identifier)
# Dump ISY Clock Information. Future: Add ISY as sensor to Hass with attrs
_LOGGER.info(repr(isy.clock))
hass_isy_data[ISY994_ISY] = isy
await _async_get_or_create_isy_device_in_registry(hass, entry, isy)
# Load platforms for the devices in the ISY controller that we support.
for platform in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
def _start_auto_update() -> None:
"""Start isy auto update."""
_LOGGER.debug("ISY Starting Event Stream and automatic updates")
isy.auto_update = True
await hass.async_add_executor_job(_start_auto_update)
undo_listener = entry.add_update_listener(_async_update_listener)
hass_isy_data[UNDO_UPDATE_LISTENER] = undo_listener
# Register Integration-wide Services:
async_setup_services(hass)
return True
async def _async_update_listener(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
@callback
def _async_import_options_from_data_if_missing(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
options = dict(entry.options)
modified = False
for importable_option in [
CONF_IGNORE_STRING,
CONF_SENSOR_STRING,
CONF_RESTORE_LIGHT_STATE,
]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
async def _async_get_or_create_isy_device_in_registry(
hass: HomeAssistant, entry: config_ent
|
ries.ConfigEntry, isy
) -> None:
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_N
|
ETWORK_MAC, isy.configuration["uuid"])},
identifiers={(DOMAIN, isy.configuration["uuid"])},
manufacturer=MANUFACTURER,
name=isy.configuration["name"],
model=isy.configuration["model"],
sw_version=isy.configuration["firmware"],
)
async def async_unload_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in SUPPORTED_PLATFORMS
]
)
)
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
isy = hass_isy_data[ISY994_ISY]
def _stop_auto_update() -> None:
"""Start isy auto update."""
_LOGGER.debug("ISY Stopping Event Stream and automatic u
|
carlsonp/kaggle-TrulyNative
|
processURLS_count.py
|
Python
|
gpl-3.0
| 2,502 | 0.031575 |
from __future__ import print_function
import re, os, sys, multiprocessing, zipfile, Queue
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from urlparse import urlparse
#https://pypi.python.org/pypi/etaprogress/
from etaprogress.progress import ProgressBar
#337304 total HTML files, some are actually NOT in either the training or testing set
#process_zips = ["./data/0.zip", "./data/1.zip", "./data/2.zip", "./data/3.zip", "./data/4.zip"]
process_zips = ["./data/0.zip"]
def parseFile(contents, filename, sponsored):
nodes = [sponsored, filename]
#use lxml parser for faster speed
cleaned = BeautifulSoup(contents, "lxml")
for anchor in cleaned.findAll('a', href=True):
if anchor['href'].startswith("http"):
try:
parsedurl = urlparse(anchor['href'])
parsedurl = parsedurl.netloc.replace("www.", "", 1)
parsedurl = re.sub('[^0-9a-zA-Z\.]+', '', parsedurl) #remove non-alphanumeric and non-period literals
nodes.append(parsedurl)
except ValueError:
print("IPv6 URL?")
return nodes
def addNodes(nodes):
for n in nodes:
if n not in q:
q.append(n)
train = pd.read_csv("./data/train.csv", header=0, delimiter=",", quoting=3)
sample = pd.read_csv("./data/sampleSubmission.csv", header=0, delimiter=",", quoting=3)
print("Starting processing...")
q = []
for i, zipFile in enumerate(process_zips):
archive = zipfile.ZipFile(zipFile, 'r')
file_paths = zipfile.ZipFile.namelist(archive)
bar = ProgressBar(len(file_paths), max_width=40)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1 or 1)
for k, file_path in enumerate(file_paths):
data = archive.read(file_path)
openfile = file_path[2:] #filename
sponsored = train.loc[train['file'] == openfile]
if not sponsored.empty:
pool.apply_async(parseFile, args = (data, openfile, int(sponsored['sponsored']), ), callback = addNodes)
testing = sample.loc[sample['file'] == openfile]
if not testing.empty:
pool.apply_async(parseFile, args = (data, openfile, 2, ), callback = addNodes)
bar.numerator = k
print("Folder:", i, bar, end='\r')
sys.stdout.flush()
pool.close()
pool.join()
print()
print("Size: ", len(q))
#print("Sponsored pages: ", G.out_degree("SPONSORED"
|
))
#print("Normal pages: ", G.out_degree("NOTSPONSORED"))
#if G.out_degree("TESTING") != 235917:
#print("Error, invalid number of testing nodes.")
#if G.out_degree("SPONSORED") + G.out_deg
|
ree("NOTSPONSORED") != 101107:
#print("Error, invalid number of training nodes.")
|
pmarks-net/dtella
|
dtella/common/core.py
|
Python
|
gpl-2.0
| 131,965 | 0.001523 |
"""
Dtella - Core P2P Module
Copyright (C) 2008 Dtella Labs (http://www.dtella.org)
Copyright (C) 2008 Paul Marks
$Id$
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import struct
import heapq
import time
import random
import bisect
import socket
from binascii import hexlify
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor, defer
from twisted.python.runtime import seconds
import twisted.internet.error
import dtella.local_config as local
import dtella.common.crypto
from dtella.common.util import (RandSet, dcall_discard, dcall_timeleft,
randbytes, validateNick, word_wrap, md5,
parse_incoming_info, get_version_string,
parse_dtella_tag, CHECK, SSLHACK_filter_flags)
from dtella.common.ipv4 import Ad, SubnetMatcher
from dtella.common.log import LOG
from zope.interface import implements
from zope.interface.verify import verifyClass
from dtella.common.interfaces import IDtellaNickNode
# Check for some non-fatal but noteworthy conditions.
def doWarnings():
import twisted
from twisted.python import versions
if (twisted.version < versions.Version('twisted', 8, 0, 0)):
LOG.warning("You should get Twisted 8 or later. Previous versions "
"have some bugs that affect Dtella.")
try:
import dtella.bridge
except ImportError:
# Don't warn about GMP for clients, because verifying a signature
# is fast enough without it (~1ms on a Core2)
pass
else:
import Crypto.PublicKey
try:
import Crypto.PublicKey._fastmath
except ImportError:
LOG.warning("Your version of PyCrypto was compiled without "
"GMP (fastmath). Signing messages will be slower.")
doWarnings()
# Miscellaneous Exceptions
class BadPacketError(Exception):
pass
class BadTimingError(Exception):
pass
class BadBroadcast(Exception):
pass
class Reject(Exception):
pass
class NickError(Exception):
pass
class MessageCollisionError(Exception):
pass
# How many seconds our node will last without incoming pings
ONLINE_TIMEOUT = 30.0
# How many seconds our node will stay online without a DC client
NO_CLIENT_TIMEOUT = 60.0 * 5
# Reconnect time range. Currently 10sec .. 15min
RECONNECT_RANGE = (10, 60*15)
NODE_EXPIRE_EXTEND = 15.0
PKTNUM_BUF = 20
# Status Flags
PERSIST_BIT = 0x1
# Ping Flags
IWANT_BIT = 0x01
GOTACK_BIT = 0x02
REQ_BIT = 0x04
ACK_BIT = 0x08
NBLIST_BIT = 0x10
OFFLINE_BIT = 0x20
# Broadcast Flags
REJECT_BIT = 0x1
# Ack flags
ACK_REJECT_BIT = 0x1
# Sync Flags
TIMEDOUT_BIT = 0x1
# Chat Flags
SLASHME_BIT = 0x1
NOTICE_BIT = 0x2
# ConnectToMe Flags
USE_SSL_BIT = 0x1
# ACK Modes
ACK_PRIVATE = 1
ACK_BROADCAST = 2
# Bridge topic change
CHANGE_BIT = 0x1
# Bridge Kick flags
RE
|
JOIN_BIT = 0x1
# Bridge general flags
MODERATED_BIT = 0x1
# Init response codes
CODE_IP_OK = 0
CODE_IP_FOREIGN = 1
CODE_IP_BANNED = 2
##############################################################################
class NickManager(object):
def __init__(self, main):
self.main = main
self.nickmap = {} # {nick.lower() -> Node}
def
|
getNickList(self):
return [n.nick for n in self.nickmap.itervalues()]
def lookupNick(self, nick):
# Might raise KeyError
return self.nickmap[nick.lower()]
def removeNode(self, n, reason):
try:
if self.nickmap[n.nick.lower()] is not n:
raise KeyError
except KeyError:
return
del self.nickmap[n.nick.lower()]
so = self.main.getStateObserver()
if so:
so.event_RemoveNick(n, reason)
# Clean up nick-specific stuff
if n.is_peer:
n.nickRemoved(self.main)
def addNode(self, n):
if not n.nick:
return
lnick = n.nick.lower()
if lnick in self.nickmap:
raise NickError("collision")
so = self.main.getStateObserver()
if so:
# Might raise NickError
so.event_AddNick(n)
so.event_UpdateInfo(n)
self.nickmap[lnick] = n
def setInfoInList(self, n, info):
# Set the info of the node, and synchronize the info with
# an observer if it changes.
if not n.setInfo(info):
# dcinfo hasn't changed, so there's nothing to send
return
# Look for this node in the nickmap
try:
if self.nickmap[n.nick.lower()] is not n:
raise KeyError
except KeyError:
return
# Push new dcinfo to dch/ircs
so = self.main.getStateObserver()
if so:
so.event_UpdateInfo(n)
##############################################################################
class PeerHandler(DatagramProtocol):
# Panic rate limit for broadcast traffic
CHOKE_RATE = 100000 # bytes per second
CHOKE_PERIOD = 5 # how many seconds to average over
def __init__(self, main):
self.main = main
self.remap_ip = None
self.choke_time = seconds() - self.CHOKE_PERIOD
self.choke_reported = seconds() - 999
# True iff we're shutting down after a socket failure.
self.stopping_protocol = False
def stopProtocol(self):
# If this is the final termination, don't do anything.
if not reactor.running:
return
self.main.showLoginStatus("UDP socket was reset.")
# Otherwise, our UDP port randomly died, so try reconnecting.
# Disable transmits during the shutdown.
self.stopping_protocol = True
try:
self.main.shutdown(reconnect='instant')
finally:
self.stopping_protocol = False
def getSocketState(self):
# Figure out the state of our UDP socket.
if self.stopping_protocol:
return 'dying'
elif not self.transport:
return 'dead'
elif hasattr(self.transport, "d"):
return 'dying'
else:
return 'alive'
def sendPacket(self, data, addr, broadcast=False):
# Send a packet, passing it through the encrypter
# returns False if an error occurs
if self.stopping_protocol:
# Still cleaning up after a socket asplosion.
return False
self.main.logPacket("%s -> %s:%d" % (data[:2], addr[0], addr[1]))
data = self.main.pk_enc.encrypt(data)
# For broadcast traffic, set a safety limit on data rate,
# in order to protect the physical network from DoS attacks.
if broadcast:
now = seconds()
self.choke_time = max(self.choke_time, now - self.CHOKE_PERIOD)
penalty = (1.0 * len(data) *
self.CHOKE_PERIOD / self.CHOKE_RATE)
# Have we used up the buffer time?
if self.choke_time + penalty >= now:
# Tell the user what's going on, but only once every
# 10 seconds.
if self.choke_reported < now - 10:
self.main.showLoginStatus(
"!!! Dropping broadcast packets due to "
"excessive flood !!!")
self.choke_reported = now
# Don't send packet
return False
# Nibble something off the choke buffer
self.choke_
|
edx/edx-ora2
|
openassessment/xblock/studio_mixin.py
|
Python
|
agpl-3.0
| 20,504 | 0.003414 |
"""
Studio editing view for OpenAssessment XBlock.
"""
import copy
import logging
from uuid import uuid4
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy
from voluptuous import MultipleInvalid
from xblock.fields import List, Scope
from xblock.core import XBlock
from web_fragments.fragment import Fragment
from openassessment.xblock.data_conversion import (
create_rubric_dict,
make_django_template_key,
update_assessments_format
)
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.resolve_dates import resolve_dates, parse_date_value, DateValidationError, InvalidDateFormat
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.validation import validator
from openassessment.xblock.editor_config import AVAILABLE_EDITORS
from openassessment.xblock.load_static import LoadStatic
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class StudioMixin:
"""
Studio editing view for OpenAssessment XBlock.
"""
DEFAULT_CRITERIA = [
{
'label': '',
'options': [
{
'label': ''
},
]
}
]
NECESSITY_OPTIONS = {
"required": ugettext_lazy("Required"),
"optional": ugettext_lazy("Optional"),
"": ugettext_lazy("None")
}
# Build editor options from AVAILABLE_EDITORS
AVAILABLE_EDITOR_OPTIONS = {
key: val.get('display_name', key) for key, val in AVAILABLE_EDITORS.items()
}
STUDIO_EDITING_TEMPLATE = 'openassessmentblock/edit/oa_edit.html'
BASE_EDITOR_ASSESSMENTS_ORDER = copy.deepcopy(DEFAULT_EDITOR_ASSESSMENTS_ORDER)
# Since the XBlock problem definition contains only assessment
# modules that are enabled, we need to keep track of the order
# that the user left assessments in the editor, including
# the ones that were disabled. This allows us to keep the order
# that the user specified.
editor_assessments_order = List(
default=DEFAULT_EDITOR_ASSESSMENTS_ORDER,
scope=Scope.content,
help="The order to display assessments in the editor."
)
def studio_view(self, context=None): # pylint: disable=unused-argument
"""
Render the OpenAssessment XBlock for editing in Studio.
Args:
context: Not actively used for this view.
Returns:
(Fragment): An HTML fragment for editing the configuration of this XBlock.
"""
rendered_template = get_template(
self.STUDIO_EDITING_TEMPLATE
).render(self.editor_context())
fragment = Fragment(rendered_template)
fragment.add_javascript_url(LoadStatic.get_url('openassessment-studio.js'))
js_context_dict = {
"ALLOWED_IMAGE_EXTENSIONS": self.ALLOWED_IMAGE_EXTENSIONS,
"ALLOWED_FILE_EXTENSIONS": self.ALLOWED_FILE_EXTENSIONS,
"FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
}
fragment.initialize_js('OpenAssessmentEditor', js_context_dict)
return fragment
def editor_context(self):
"""
Update the XBlock's XML.
Returns:
dict with keys
'rubric' (unicode), 'prompt' (unicode), 'title' (unicode),
'submission_start' (unicode), 'submission_due' (unicode),
'assessments (dict)
"""
# In the authoring GUI, date and time fields should never be null.
# Therefore, we need to resolve all "default" dates to datetime objects
# before displaying them in the editor.
try:
__, __, date_ranges = resolve_dates( # pylint: disable=redeclared-assigned-name
self.start, self.due,
[
(self.submission_start, self.submission_due)
] + [
(asmnt.get('start'), asmnt.get('due'))
for asmnt in self.valid_assessments
],
self._
)
except (DateValidationError, InvalidDateFormat):
# If the dates are somehow invalid, we still want users to be able to edit the ORA,
# so just present the dates as they are.
def _parse_date_safe(date):
try:
return parse_date_value(date, self._)
except InvalidDateFormat:
return ''
date_ranges = [
(_parse_date_safe(self.submission_start), _parse_date_safe(self.submission_due))
] + [
(_parse_date_safe(asmnt.get('start')), _parse_date_safe(asmnt.get('due')))
for asmnt in self.valid_assessments
]
submission_start, submission_due = date_ranges[0]
assessments = self._assessments_editor_context(date_ranges[1:])
self.editor_assessments_order = self._editor_assessments_order_context()
# Every rubric requires one criterion. If there is no criteria
# configured for the XBlock, return one empty default criterion, with
# an empty default option.
criteria = copy.deepcopy(self.rubric_criteria_with_labels)
if not criteria:
criteria = self.DEFAULT_CRITERIA
# To maintain backwards compatibility, if there is no
# feedback_default_text configured for the xblock, use the default text
feedback_default_text = copy.deepcopy(self.rubric_feedback_default_text)
if not feedback_default_text:
feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
course_id = self.location.course_key if hasattr(self, 'location') else None
# If allowed file types haven't been explicitly set, load from a preset
white_listed_file_types = self.get_allowed_file_types_or_preset()
white_listed_file_types_string = ','.join(white_listed_file_types) if white_listed_file_types else ''
# If rubric reuse is enabled, include information about the other ORAs in this course
rubric_reuse_data = {}
if self.is_rubric_reuse_enabled:
rubric_reuse_data = self.get_other_ora_blocks_for_rubric_editor_context()
return {
'prompts': self.prompts,
'prompts_type': self.prompts_type,
'title': self.title,
'submission_due': submission_due,
'submission_start': submission_start,
'assessments': assessments,
'criteria': criteria,
'feedbackprompt': self.rubric_feedback_prompt,
'feedback_default_text': feedback_default_text,
'text_response': self.text_response if self.text_response else '',
'text_response_editor': self.text_response_editor if self.text_response_editor else 'text',
'file_upload_response': self.file_upload_response if self.file_upload_response else '',
'necessity_options': self.NECESSITY_OPTIONS,
'available_editor_options': self.AVAILABLE_EDITOR_OPTIONS,
'file_upload_type': self.file_upload_type,
'allow_multiple_files': self.allow_multiple_files,
|
'white_listed_file_types': white_listed_file_types_string,
'allow_latex': self.allow_latex,
'leaderboard_show': self.leaderboard_show,
'editor_assessments
|
_order': [
make_django_template_key(asmnt)
for asmnt in self.editor_assessments_order
],
'teams_feature_enabled': self.team_submissions_enabled,
'teams_enabled': self.teams_enabled,
'base_asset_url': self._get_base_url_path_for_course_assets(course_id),
'is_released': self.is_released(),
'teamsets': self.get_teamsets(course_id),
'selected_teamset_id': self.selected_teamset_id,
'show_rubric_during_response': self.show_rubric_during_response,
'rubric_reuse_enabled': self.is_rubric_reuse_enabled,
'rubric_reuse_data': rubric_reuse_data,
'block_l
|
boos/cppcheck
|
addons/findcasts.py
|
Python
|
gpl-3.0
| 1,197 | 0.001671 |
#!/usr/bin/env python3
#
# Locate casts in the code
#
import cppcheckdata
import sys
for arg in sys.argv[1:]:
if arg.startswith('-'):
continue
print('Checking %s...' % arg)
data = cppcheckdata.CppcheckData(arg)
for cfg in data.iterconfigurations():
print('Checking %s, config %s...' % (ar
|
g, cfg.name))
for token in cfg.tokenlist:
if token.str != '(' or not token.astOperand1 or token.astOp
|
erand2:
continue
# Is it a lambda?
if token.astOperand1.str == '{':
continue
# we probably have a cast.. if there is something inside the parentheses
# there is a cast. Otherwise this is a function call.
typetok = token.next
if not typetok.isName:
continue
# cast number => skip output
if token.astOperand1.isNumber:
continue
# void cast => often used to suppress compiler warnings
if typetok.str == 'void':
continue
cppcheckdata.reportError(token, 'information', 'found a cast', 'findcasts', 'cast')
sys.exit(cppcheckdata.EXIT_CODE)
|
lucashtnguyen/pybmpdb
|
pybmpdb/tests/summary_tests.py
|
Python
|
bsd-3-clause
| 29,742 | 0.001143 |
import sys
import os
from pkg_resources import resource_filename
pythonversion = sys.version_info.major
from six import StringIO
import mock
import nose.tools as nt
import numpy as np
import numpy.testing as nptest
import matplotlib.pyplot as plt
import pandas
import pandas.util.testing as pdtest
import pybmpdb
from wqio import utils
from wqio import testing
mock_figure = mock.Mock(spec=plt.Figure)
@nt.nottest
def get_data_file(filename):
return resource_filename("wqio.data", filename)
@nt.nottest
def get_tex_file(filename):
return resource_filename("pybmpdb.tex", filename)
@nt.nottest
class mock_parameter(object):
def __init__(self):
self.name = 'Carbon Dioxide'
self.tex = r'$[\mathrm{CO}_2]$'
self.units = 'mg/L'
def paramunit(self, *args, **kwargs):
return 'Carbon Dioxide (mg/L)'
@nt.nottest
class mock_location(object):
def __init__(self, include):
self.N = 25
self.ND = 5
self.min = 0.123456
self.max = 123.456
self.mean = 12.3456
self.mean_conf_interval = np.array([-1, 1]) + self.mean
self.logmean = 12.3456
self.logmean_conf_interval = np.array([-1, 1]) + self.logmean
self.geomean = 12.3456
self.geomean_conf_interval = np.array([-1, 1]) + self.geomean
self.std = 4.56123
self.logstd = 4.56123
self.cov = 5.61234
self.skew = 6.12345
self.pctl25 = 0.612345
self.median = 1.23456
self.median_conf_interval = np.array([-1, 1]) + self.median
self.pctl75 = 2.34561
self.include = include
self.exclude = not self.include
pass
@nt.nottest
class mock_dataset(object):
def __init__(self, infl_include, effl_include):
self.influent = mock_location(infl_include)
self.effluent = mock_location(effl_include)
self.n_pairs = 22
self.wilcoxon_p = 0.0005
self.mannwhitney_p = 0.456123
self.definition = {
'parameter': mock_parameter(),
'category': 'testbmp'
}
def scatterplot(self, *args, **kwargs):
return mock_figure()
def statplot(self, *args, **kwargs):
return mock_figure()
class _base_DatasetSummary_Mixin(object):
def main_setup(self):
self.known_paramgroup = 'Metals'
self.known_bmp = 'testbmp'
self.known_latex_file_name = 'metalstestbmpcarbondioxide'
self.ds_sum = pybmpdb.DatasetSummary(self.ds, self.known_paramgroup, 'testfigpath')
self.known_latex_input_tt = r"""\subsection{testbmp}
\begin{table}[h!]
\caption{test table title}
\centering
\begin{tabular}{l l l l l}
\toprule
\textbf{Statistic} & \textbf{Inlet} & \textbf{Outlet} \\
\toprule
Count & 25 & 25 \\
\midrule
Number of NDs & 5 & 5 \\
\midrule
Min; Max & 0.123; 123 & 0.123; 123 \\
\midrule
Mean & 12.3 & 12.3 \\
%%
(95\% confidence interval) & (11.3; 13.3) & (11.3; 13.3) \\
\midrule
Standard Deviation & 4.56 & 4.56 \\
\midrule
Log. Mean & 12.3 & 12.3 \\
%%
(95\% confidence interval) & (11.3; 13.3) & (11.3; 13.3) \\
\midrule
Log. Standard Deviation & 4.56 & 4.56 \\
\midrule
Geo. Mean & 12.3 & 12.3 \\
%%
(95\% confidence interval) & (11.3; 13.3) & (11.3; 13.3) \\
\midrule
Coeff. of Variation & 5.61 & 5.61 \\
\midrule
Skewness & 6.12 & 6.12 \\
\midrule
Median & 1.23 & 1.23 \\
%%
(95\% confidence interval) & (0.235; 2.23) & (0.235; 2.23) \\
\midrule
Quartiles & 0.612; 2.35 & 0.612; 2.35 \\
\toprule
Number of Pairs & \multicolumn{2}{c} {22} \\
\midrule
Wilcoxon p-value & \multicolumn{2}{c} {$<0.001$} \\
\midrule
Mann-Whitney p-value & \multicolumn{2}{c} {0.456} \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}[hb] % FIGURE
\centering
\includegraphics[scale=1.00]{testfigpath/statplot/metalstestbmpcarbondioxidestats.pdf}
\caption{Box and Probability Plots of Carbon Dioxide at testbmp BMPs}
\end{figure}
\begin{figure}[hb] % FIGURE
\centering
\includegraphics[scale=1.00]{testfigpath/scatterplot/metalstestbmpcarbondioxidescatter.p
|
df}
\caption{Influent vs. Effluent Plots of Carbon Dioxide at testbmp BMPs}
\end{figure} \clearpage""" + '\n'
self.known_latex_input_ff = ''
self.known_latex_input_ft = r"""\subsection{testbmp}
\begin{table}[h!]
\caption{test table title}
\centering
\begin{tabular}{l l l l l}
\toprule
\textbf{Statistic} & \textbf{Inlet} & \textbf{Outlet} \\
\t
|
oprule
Count & NA & 25 \\
\midrule
Number of NDs & NA & 5 \\
\midrule
Min; Max & NA & 0.123; 123 \\
\midrule
Mean & NA & 12.3 \\
%%
(95\% confidence interval) & NA & (11.3; 13.3) \\
\midrule
Standard Deviation & NA & 4.56 \\
\midrule
Log. Mean & NA & 12.3 \\
%%
(95\% confidence interval) & NA & (11.3; 13.3) \\
\midrule
Log. Standard Deviation & NA & 4.56 \\
\midrule
Geo. Mean & NA & 12.3 \\
%%
(95\% confidence interval) & NA & (11.3; 13.3) \\
\midrule
Coeff. of Variation & NA & 5.61 \\
\midrule
Skewness & NA & 6.12 \\
\midrule
Median & NA & 1.23 \\
%%
(95\% confidence interval) & NA & (0.235; 2.23) \\
\midrule
Quartiles & NA & 0.612; 2.35 \\
\toprule
Number of Pairs & \multicolumn{2}{c} {NA} \\
\midrule
Wilcoxon p-value & \multicolumn{2}{c} {NA} \\
\midrule
Mann-Whitney p-value & \multicolumn{2}{c} {NA} \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}[hb] % FIGURE
\centering
\includegraphics[scale=1.00]{testfigpath/statplot/metalstestbmpcarbondioxidestats.pdf}
\caption{Box and Probability Plots of Carbon Dioxide at testbmp BMPs}
\end{figure}
\begin{figure}[hb] % FIGURE
\centering
\includegraphics[scale=1.00]{testfigpath/scatterplot/metalstestbmpcarbondioxidescatter.pdf}
\caption{Influent vs. Effluent Plots of Carbon Dioxide at testbmp BMPs}
\end{figure} \clearpage""" + '\n'
def test_paramgroup(self):
nt.assert_true(hasattr(self.ds_sum, 'paramgroup'))
nt.assert_true(isinstance(self.ds_sum.paramgroup, str))
nt.assert_equal(self.ds_sum.paramgroup, self.known_paramgroup)
def test_ds(self):
nt.assert_true(hasattr(self.ds_sum, 'ds'))
nt.assert_true(isinstance(self.ds_sum.ds, mock_dataset))
def test_parameter(self):
nt.assert_true(hasattr(self.ds_sum, 'parameter'))
nt.assert_true(isinstance(self.ds_sum.parameter, mock_parameter))
def test_bmp(self):
nt.assert_true(hasattr(self.ds_sum, 'bmp'))
|
zeckalpha/mindfeed
|
mindfeed/__init__.py
|
Python
|
mit
| 75 | 0 |
from mi
|
ndfeed.mindfeed import main
if __name__ == "__main
|
__":
main()
|
qsheeeeen/Self-Driving-Car
|
rl_toolbox/agent/__init__.py
|
Python
|
mit
| 26 | 0 |
from .ppo
|
import PPOAgent
| |
uaprom-summer-2015/Meowth
|
commands/static.py
|
Python
|
bsd-3-clause
| 3,080 | 0 |
from subprocess import call
import os
from flask.ext.script import Manager
from commands.utils import perform
def alt_exec(cmd, alt=None):
"""
Tries to execute command.
If command not found, it tries to execute the alternative comand
"""
try:
call(cmd)
except OSError as e:
if e.errno == os.errno.ENOENT and alt:
try:
call(alt)
except OSError as ex:
raise ex
else:
raise e
StaticCommand = Manager(usage='Commands to build static')
def npm():
""" Run npm install script """
with perform(
name='static npm',
before='run npm install',
):
alt_exec(
cmd=["npm", "install"],
)
@StaticCommand.option(
'--allow-root',
dest='allow_root',
default=False,
help='Force scripts to allow execution by root user',
action='store_true',
)
@StaticCommand.option(
'--silent',
dest='silent',
default=False,
help='Do not ask user anything',
action='store_true',
)
def bower(allow_root, silent):
""" Run bower install script """
with perform(
name='static bower',
before='run bower install',
):
cmd_args = list()
if allow_root:
cmd_args.append("--allow-root")
|
if silent:
cmd_args.append("--silent")
alt_exec(
cmd=["bower", "install"] + cmd_args,
alt=["./node_modules/bower/bin/bower", "install"] + cmd_args,
)
@StaticCommand.option(
'--deploy-type',
dest='deploy_type',
default="production",
help='Set deploy type '
'(production with minifying, development without minifying etc.)'
)
def gulp(deploy_type=None):
""" Run gulp build script """
with
|
perform(
name='static gulp',
before='run gulp',
):
cmd_args = list()
if deploy_type is not None:
cmd_args.append("--type")
cmd_args.append(deploy_type)
alt_exec(
cmd=["gulp"] + cmd_args,
alt=["./node_modules/gulp/bin/gulp.js"] + cmd_args,
)
@StaticCommand.option(
'--allow-root',
dest='allow_root',
default=False,
help='Force scripts to allow execution by root user',
action='store_true',
)
@StaticCommand.option(
'--deploy-type',
dest='deploy_type',
default="production",
help='Set deploy type '
'(production with minifying, development without minifying etc.)'
)
@StaticCommand.option(
'--silent',
dest='silent',
default=False,
help='Do not ask user anything',
action='store_true',
)
def collect(allow_root, deploy_type, silent):
npm()
bower(allow_root, silent)
gulp(deploy_type)
@StaticCommand.command
def clean():
""" Clean built static files """
with perform(
name='static clean',
before='run gulp clean',
):
alt_exec(
cmd=["gulp", "clean"],
alt=["./node_modules/gulp/bin/gulp.js", "clean"],
)
|
izapolsk/integration_tests
|
cfme/tests/configure/test_version.py
|
Python
|
gpl-2.0
| 824 | 0.002427 |
import pytest
from cfme import test_requirements
from cfme.configure import about
@test_requirements.appliance
@pytest.
|
mark.tier(3)
@pytest.mark.sauce
def test_appliance_version(appliance):
"""Check version presented in UI against version retrieved directly from the machine.
Version retrieved from appliance is in this format: 1.2.3.4
Version in the UI is always: 1.2.3.4.20140505xyzblabla
So we check whether the UI version starts
|
with SSH version
Polarion:
assignee: jhenner
casecomponent: Appliance
caseimportance: high
initialEstimate: 1/4h
"""
ssh_version = str(appliance.version)
ui_version = about.get_detail(about.VERSION, server=appliance.server)
assert ui_version.startswith(ssh_version), "UI: {}, SSH: {}".format(ui_version, ssh_version)
|
bskinn/run_jensen
|
run_jensen.py
|
Python
|
mit
| 65,917 | 0.008829 |
#-------------------------------------------------------------------------------
# Name: run_jensen
# Purpose: Automated execution of diatomic M-L computations in the form
# of Jensen et al. J Chem Phys 126, 014103 (2007)
#
# Author: Brian
#
# Created: 8 May 2015
# Copyright: (c) Brian 2015
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is a standalone module for execution of M-L diatomic
# computations in ORCA, approximately per the approach given in
# the above citation.
#
# http://www.github.com/bskinn/run_jensen
#
#-------------------------------------------------------------------------------
# Module-level imports
import os, logging, time, re, csv
import h5py as h5, numpy as np
from opan.const import DEF, E_Software as E_SW, E_FileType as E_FT
# Module-level variables
# Constant strings
repofname = 'jensen.h5'
csvfname = 'jensen.csv'
csv_multfname = 'jensen_mult.csv'
pausefname = 'pause'
dir_fmt = 'jensen_%Y%m%d_%H%M%S'
sep = "_"
NA_str = "NA"
fail_conv = "FAILED"
# Adjustable parameters (not all actually are adjustable yet)
exec_cmd = 'runorca_pal.bat'
opt_str = '! TIGHTOPT'
convergers = ["", "! KDIIS", "! SOSCF"] #, "! NRSCF"]
req_tag_strs = ['<MOREAD>', '<MULT>', '<XYZ>', '<OPT>', '<CONV>', \
'<CHARGE>']
init_dia_sep = 2.1 # Angstroms
fixed_dia_sep = True
ditch_sep_thresh = 4.0
geom_scale = 0.75
pausetime = 2.0
skip_atoms = False
# Class for logging information
class log_names(object):
filename = 'jensen_log.txt'
fmt = "%(levelname)-8s [%(asctime)s] %(message)s"
datefmt = "%H:%M:%S"
loggername = 'rjLogger'
handlername = 'rjHandler'
formattername = 'rjFormatter'
## end class log_names
# Class for names of subgroups
class h5_names(object):
max_mult = 'max_mult'
mult_prfx = 'm'
chg_prfx = 'q'
ref_prfx = 'r'
run_base = 'base'
converger = 'conv'
out_en = 'energy'
out_zpe = 'zpe'
out_enth = 'enthalpy'
out_bondlen = 'bond_length'
out_dipmom = 'dipole_moment'
out_freq = 'freq'
min_en = 'min_en'
min_en_mult = 'min_en_mult'
min_en_ref = 'min_en_ref'
min_en_zpe = 'min_en_zpe'
min_en_enth = 'min_en_enth'
min_en_bondlen = 'min_en_bondlen'
min_en_dipmom = 'min_en_dipmom'
min_en_freq = 'min_en_freq'
## end class h5_names
# Regex patterns for quick testing
p_multgrp = re.compile("/" + h5_names.mult_prfx + "(?P<mult>[0-9]+)$")
p_refgrp = re.compile("/" + h5_names.ref_prfx + "(?P<ref>[0-9]+)$")
# Atomic numbers for elements, and the max associated unpaired electrons
metals = set(range(21,31))
nonmetals = set([1, 6, 7, 8, 9, 16, 17, 35])
cation_nms = set([1,8])
max_unpaired = {1: 1, 6: 4, 7: 3, 8: 2, 9: 1, 16: 4, 17: 1, 35: 1, \
21: 3, 22: 4, 23: 5, 24: 6, 25: 7, 26: 6, 27: 5, \
28: 4, 29: 3, 30: 2}
mult_range = range(1,13)
def do_run(template_file, wkdir=None):
""" Top-level function for doing a series of runs
"""
# Imports
from opan.utils import make_timestamp
from opan.const import atomSym
# If wkdir specified, try changing there first
if not wkdir == None:
old_wkdir = os.getcwd()
os.chdir(wkdir)
## end if
# Pull in the template
with open(template_file) as f:
template_str = f.read()
## end with
# Create working folder, enter
dir_name = time.strftime(dir_fmt)
os.mkdir(dir_name)
os.chdir(dir_name)
# Set up and create the log, and log wkdir
setup_logger()
logger = logging.getLogger(log_names.loggername)
logger.info("Jensen calc series started: " + time.strftime("%c"))
logger.info("Working in directory: " + os.getcwd())
# Proofread the template
proof_template(template_str)
# Log the template file contents
logger.info("Template file '" + template_file + "' contents:\n\n" + \
template_str)
# Log the metals and nonmetals to be processed, including those
# nonmetals for which the monocations will be calculated.
logger.info("Metals: " + ", ".join([atomSym[a].capitalize() \
for a in metals]))
logger.info("Non-metals: " + ", ".join([atomSym[a].capitalize() \
for a in nonmetals]))
logger.info("Cations calculated for non-metals: " + \
", ".join([atomSym[a].capitalize() for a in cation_nms]))
# Log the geometry scale-down factor, if used
if fixed_dia_sep:
logger.info("Using fixed initial diatomic separation of " + \
str(init_dia_sep) + " Angstroms.")
else:
logger.info("Using geometry scale-down factor: " + str(geom_scale))
## end if
# Store the starting time
start_time = time.time()
# Create the data repository
repo = h5.File(repofname, 'a')
# Log notice if skipping atoms
if skip_atoms:
logger.warning("SKIPPING ATOM COMPUTATIONS")
else:
# Loop atoms (atomic calculations)
for at in metals.union(nonmetals):
run_mono(at, template_str, repo)
repo.flush()
## next at
## end if
# Loop atom pairs (diatomics) for run execution
for m in metals:
for nm in nonmetals:
# Run the diatomic optimizations
run_dia(m, nm, 0, template_str, repo)
# Ensure repository is updated
repo.flush()
# Run the diatomic monocation optimizations for hydrides, oxides
if nm in cation_nms:
run_dia(m, nm, 1, template_str, repo)
## end if
# Ensure repository is updated
repo.flush()
# Clear any residual temp files from failed comps
clear_tmp(atomSym[m].capitalize() + atomSym[nm].capitalize())
## next nm
## next m
# Close the repository
repo.close()
# Exit the working directory; if wkdir not specified then just go to
# parent directory; otherwise restore the old wd.
if wkdir == None:
os.chdir('..')
else:
os.chdir(old_wkdir)
## end if
# Log end of execution
logger.info("Calc series ended: " + time.strftime("%c"))
logger.info("Total elapsed time: " + \
make_timestamp(time.time() - start_time))
## end def do_run
def continue_dia(template_file, m, nm, chg, mult, ref, wkdir=None):
# Imports
from opan.utils import make_timestamp
from opan.const import atomSym
# If wkdir specified, try changing there first
if not wkdir == None:
old_wkdir = os.getcwd()
os.chdir(wkdir)
## end if
# Pull in the template
with open(template_file) as f:
template_str = f.read()
## end with
# Set up and create the log, and log wkdir
setup_logger()
logger = logging.getLogger(log_names.loggername)
logger.info("============ RESTART ============")
logger.info("Jensen calc series resumed: " + time.strftime("%c"))
logger.info("Working in directory: " + os.getcwd())
# Proofread the template
proof_template(templ
|
ate_str)
# Log the template file contents
logger.info("Template file '" + template_file + "' contents:\n\n" + \
template_str)
# Store the starting time
start_time = time.t
|
ime()
# Retrieve the data repository
repo = h5.File(repofname, 'a')
# Log the restart
logger.info("Restarting '" + build_base(m, nm, chg) + \
"' at multiplicity " + str(mult) + ", reference " + \
"multiplicity " + str(ref))
# Run the diatomic optimizations, with restart point
run_dia(m, nm, chg, template_str,
|
lqez/django-summernote
|
django_summernote/admin.py
|
Python
|
mit
| 974 | 0.001027 |
from django.contrib import admin
from django.db import models
fr
|
om django_summernote.widgets import SummernoteWidget, SummernoteInplaceWidget
from django_summernote.settings import summernote_config, get_attachment_model
__widget__ = SummernoteWidget if summernote_config['iframe'] \
else SummernoteInplaceWidget
class SummernoteInlineModelAdmin(admin.options.InlineM
|
odelAdmin):
formfield_overrides = {models.TextField: {'widget': __widget__}}
class SummernoteModelAdmin(admin.ModelAdmin):
formfield_overrides = {models.TextField: {'widget': __widget__}}
class AttachmentAdmin(admin.ModelAdmin):
list_display = ['name', 'file', 'uploaded']
search_fields = ['name']
ordering = ('-id',)
def save_model(self, request, obj, form, change):
obj.name = obj.file.name if (not obj.name) else obj.name
super(AttachmentAdmin, self).save_model(request, obj, form, change)
admin.site.register(get_attachment_model(), AttachmentAdmin)
|
withtwoemms/pygame-explorations
|
render_particle.py
|
Python
|
mit
| 926 | 0.009719 |
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self
|
.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#--
|
RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
mpvismer/gltk
|
offscreenrender.py
|
Python
|
mit
| 3,321 | 0.013851 |
"""
For using OpenGL to render to an off screen buffer
Ref:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from OpenGL.GL import *
from OpenGL.raw.GL.VERSION import GL_1_1,GL_1_2, GL_3_0
class OffScreenRender(object):
def __init__(self, width=1920, height=1080):
super(OffScreenRender, self).__init__()
self._width = width
self._height = height
self._fbo = None
self._render_buf = None
self._init_fbo()
self._oldViewPort = (ctypes.c_int*4)*4
def __enter__(self):
self.activate()
def __exit__(self, type, value, traceback):
self.deactivate()
return False
def __del__(self):
self._cleanup()
super(OffScreenRender, self).__del__()
def activate(self):
glBindFramebuffer(GL_FRAMEBUFFER, self._fbo)
self._oldViewPort = glGetIntegerv(GL_VIEWPORT)
side = min(self._width, self._height)
glViewport(int((self._width - side)/2), int((self._height - side)/2), side, side)
def deactivate(self):
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glViewport(*self._oldViewPort)
def read_into(self, buf, x=0, y=0, width=None, height=None):
glReadBuffer(GL_COLOR_ATTACHMENT0)
width = width is not None or self._width,
height = height is not None or self._height,
glReadPixels(0,
0,
width,
height,
GL_BGRA,
#GL_RGBA, alot faster, but incorrect :()
GL_UNSIGNED_BYTE,
buf)
#outputType=None)
#GL_1_1.glReadPixels(
def get_size(self):
return self._width*self._height*4
def _init_fbo(self, depth=True):
fbo = glGenFramebuffers(1)
self._fbo = fbo
glBindFramebuffer(G
|
L_DRAW_FRAMEBUFFER, fbo)
render_buf = glGenRenderbuffers(1)
self._render_buf = render_buf
glBindRenderbuffer(GL_RENDERBUFFER, render_buf)
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8, self._width, self._height)
glFramebufferRenderbuffer(GL
|
_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, render_buf)
if depth:
depth = glGenRenderbuffers(1)
self._depth = depth
glBindRenderbuffer(GL_RENDERBUFFER, depth)
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, self._width, self._height)
glBindRenderbuffer(GL_RENDERBUFFER, 0)
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depth)
assert GL_FRAMEBUFFER_COMPLETE == glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER)
glBindFramebuffer(GL_FRAMEBUFFER, 0);
def _cleanup(self):
if self._fbo is not None:
glDeleteFramebuffers(self._fbo)
self._fbo = None
if self._render_buf is not None:
glDeleteRenderbuffers(self._render_buf)
self._render_buf = None
|
d4l3k/compute-archlinux-image-builder
|
arch-staging.py
|
Python
|
apache-2.0
| 5,271 | 0.011762 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import utils
COMPUTE_IMAGE_PACKAGES_GIT_URL = (
'https://github.com/GoogleCloudPlatform/compute-image-packages.git')
IMAGE_FILE='disk.raw'
SETUP_PACKAGES_ESSENTIAL = 'grep file'.split()
SETUP_PACKAGES = ('pacman wget gcc make parted git setconf libaio sudo '
|
'fakeroot').split()
IMAGE_PACKAGES = ('base tar wget '
'curl sudo mkinitcpio syslinux dhcp ethtool irqbalance '
'ntp psmisc openssh udev less bash-completion zip unzip '
'python2 python3').split()
def main():
args = utils.DecodeArgs(sys.argv[1])
utils.SetupLogging(quiet=args['quiet'], verbose=args['verbose'])
logging.info('Setup Bootstrapper Environment')
utils.SetupArchLocale()
InstallPackagesForStagingEnvironment(
|
)
image_path = os.path.join(os.getcwd(), IMAGE_FILE)
CreateImage(image_path, size_gb=int(args['size_gb']))
mount_path = utils.CreateTempDirectory(base_dir='/')
image_mapping = utils.ImageMapper(image_path, mount_path)
try:
image_mapping.Map()
primary_mapping = image_mapping.GetFirstMapping()
image_mapping_path = primary_mapping['path']
FormatImage(image_mapping_path)
try:
image_mapping.Mount()
utils.CreateDirectory('/run/shm')
utils.CreateDirectory(os.path.join(mount_path, 'run', 'shm'))
InstallArchLinux(mount_path)
disk_uuid = SetupFileSystem(mount_path, image_mapping_path)
ConfigureArchInstall(
args, mount_path, primary_mapping['parent'], disk_uuid)
utils.DeleteDirectory(os.path.join(mount_path, 'run', 'shm'))
PurgeDisk(mount_path)
finally:
image_mapping.Unmount()
ShrinkDisk(image_mapping_path)
finally:
image_mapping.Unmap()
utils.Run(['parted', image_path, 'set', '1', 'boot', 'on'])
utils.Sync()
def ConfigureArchInstall(args, mount_path, parent_path, disk_uuid):
relative_builder_path = utils.CopyBuilder(mount_path)
utils.LogStep('Download compute-image-packages')
packages_dir = utils.CreateTempDirectory(mount_path)
utils.Run(['git', 'clone', COMPUTE_IMAGE_PACKAGES_GIT_URL, packages_dir])
utils.CreateDirectory(os.path.join(mount_path, ''))
packages_dir = os.path.relpath(packages_dir, mount_path)
params = {
'packages_dir': '/%s' % packages_dir,
'device': parent_path,
'disk_uuid': disk_uuid,
'accounts': args['accounts'],
'debugmode': args['debugmode'],
}
params.update(args)
config_arch_py = os.path.join(
'/', relative_builder_path, 'arch-image.py')
utils.RunChroot(mount_path,
'%s "%s"' % (config_arch_py, utils.EncodeArgs(params)),
use_custom_path=False)
utils.DeleteDirectory(os.path.join(mount_path, relative_builder_path))
def InstallPackagesForStagingEnvironment():
utils.InstallPackages(SETUP_PACKAGES_ESSENTIAL)
utils.InstallPackages(SETUP_PACKAGES)
utils.SetupArchLocale()
utils.AurInstall(name='multipath-tools-git')
utils.AurInstall(name='zerofree')
def CreateImage(image_path, size_gb=10, fs_type='ext4'):
utils.LogStep('Create Image')
utils.Run(['rm', '-f', image_path])
utils.Run(['truncate', image_path, '--size=%sG' % size_gb])
utils.Run(['parted', image_path, 'mklabel', 'msdos'])
utils.Run(['parted', image_path, 'mkpart', 'primary',
fs_type, '1', str(int(size_gb) * 1024)])
def FormatImage(image_mapping_path):
utils.LogStep('Format Image')
utils.Run(['mkfs', image_mapping_path])
utils.Sync()
def InstallArchLinux(base_dir):
utils.LogStep('Install Arch Linux')
utils.Pacstrap(base_dir, IMAGE_PACKAGES)
def SetupFileSystem(base_dir, image_mapping_path):
utils.LogStep('File Systems')
_, fstab_contents, _ = utils.Run(['genfstab', '-p', base_dir],
capture_output=True)
utils.WriteFile(os.path.join(base_dir, 'etc', 'fstab'), fstab_contents)
_, disk_uuid, _ = utils.Run(['blkid', '-s', 'UUID',
'-o', 'value',
image_mapping_path],
capture_output=True)
disk_uuid = disk_uuid.strip()
utils.WriteFile(os.path.join(base_dir, 'etc', 'fstab'),
'UUID=%s / ext4 defaults 0 1' % disk_uuid)
utils.Run(['tune2fs', '-i', '1', '-U', disk_uuid, image_mapping_path])
return disk_uuid
def PurgeDisk(mount_path):
paths = ['/var/cache', '/var/log', '/var/lib/pacman/sync']
for path in paths:
utils.DeleteDirectory(os.path.join(mount_path, path))
def ShrinkDisk(image_mapping_path):
utils.LogStep('Shrink Disk')
utils.Run(['zerofree', image_mapping_path])
main()
|
mancoast/CPythonPyc_test
|
fail/331_test_re.py
|
Python
|
gpl-3.0
| 54,426 | 0.002536 |
from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \
cpython_only
import io
import re
from re import Scanner
import sys
import string
import traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefully modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_keep_buffer(self):
# See bug 14212
b = bytearray(b'x')
it = re.finditer(b'a', b)
with self.assertRaises(BufferError):
b.extend(b'x'*400)
list(it)
del it
gc_collect()
b.extend(b'x'*400)
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re
|
.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual
|
(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_groups(self):
re.compile('(?P<a>x)(?P=a)(?(a)y)')
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
self.assertRaises(re.error, re.compile, '(?Px)')
self.assertRaises(re.error, re.compile, '(?P=)')
self.assertRaises(re.error, re.compile, '(?P=1)')
self.assertRaises(re.error, re.compile, '(?P=a)')
self.assertRaises(re.error, re.compile, '(?P=a1)')
self.assertRaises(re.error, re.compile, '(?P=a.)')
self.assertRaises(re.error, re.compile, '(?P<)')
self.assertRaises(re.error, re.compile, '(?P<>)')
self.assertRaises(re.error, re.compile, '(?P<1>)')
self.assertRaises(re.error, re.compile, '(?P<a.>)')
self.assertRaises(re.error, re.compile, '(?())')
self.assertRaises(re.error, re.compile, '(?(a))')
self.assertRaises(re.error, re.compile, '(?(1a))')
self.assertRaises(re.error, re.compile, '(?(a.))')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self
|
ffakhraei/pProj
|
python/codility/ch15/countDistinctSlices.py
|
Python
|
gpl-3.0
| 350 | 0.04 |
#/bin/env/python
A=[3,4,5,5,2]
M=6
def solution(M, A) :
n=len(A)
|
total = 0
for back in xrange(n) :
front = back
while front < n and A[front] not in A[back:front] :
total += 1
front += 1
if total >= 1000000000 :
return 1000000000
retu
|
rn total
print solution(M, A)
|
gregoil/rotest
|
src/rotest/common/django_utils/fields.py
|
Python
|
mit
| 2,999 | 0 |
"""Contain common module fields."""
# pylint: disable=too-many-public-methods
from __future__ import absolute_import
import re
from django.db import models
from django.core.exceptions import ValidationError
class NameField(models.CharField):
"""Item name string field.
This field is limited to 64 characters and contains the name(string).
Good examples:
* "name_lastname"
* "name@lasrname"
* 64 characters name
Bad examples:
* 65 characters name
"""
MAX_LEN = 150
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(NameField, self).__init__(*args, max_length=max_length,
**kwargs)
class DynamicIPAddressField(models.CharField):
"""DNS name or IP address."""
MAX_LEN = 64
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(DynamicIPAddressField, self).__init__(*args,
max_length=max_length,
**kwargs)
class MACAddressField(models.CharField):
"""MAC address field."""
MAX_LE
|
N = 17 # enables writing exactly 16 characters
MAC_ADDRESS_REGEX = '(([0-9a-fA-F]{2}):
|
){5}[0-9a-fA-F]{2}'
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(MACAddressField, self).__init__(*args,
max_length=max_length,
**kwargs)
def validate(self, value, model_instance):
"""Validate that the input value is a MAC address."""
super(MACAddressField, self).validate(value, model_instance)
if re.match(self.MAC_ADDRESS_REGEX, value) is None:
raise ValidationError('The input MAC address does not match the '
'pattern of a MAC address')
class PathField(models.CharField):
r"""File-system path string field.
This field is limited to 200 characters and contains string path split by
slashes or backslashes.
Good examples:
* "/mnt/home/code/a.txt"
* "/./a"
* "c:\\windows\\temp"
Bad examples:
* "//mnt//@%$2"
* "c:\;"
"""
MAX_LEN = 200
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(PathField, self).__init__(*args, max_length=max_length,
**kwargs)
class VersionField(models.CharField):
"""Item version string field.
This field is limited to 10 characters and contains numbers and characters
separated by dots.
Good examples:
* "4.12F"
* "1.1423"
Bad examples:
* "4,12F"
* "1/1423"
"""
MAX_LEN = 10
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(VersionField, self).__init__(*args, max_length=max_length,
**kwargs)
class PortField(models.PositiveSmallIntegerField):
"""Port number field (for IP connections)."""
pass
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/connectivity/disjoint_paths.py
|
Python
|
gpl-3.0
| 14,544 | 0.000619 |
"""Flow based node and edge disjoint paths."""
import networkx as nx
from networkx.exception import NetworkXNoPath
# Define the default maximum flow function to use for the undelying
# maximum flow computations
from networkx.algorithms.flow import edmonds_karp
from networkx.algorithms.flow import preflow_push
from networkx.algorithms.flow import shortest_augmenting_path
default_flow_func = edmonds_karp
# Functions to build auxiliary data structures.
from .utils import build_auxiliary_node_connectivity
from .utils import build_auxiliary_edge_connectivity
from itertools import filterfalse as _filterfalse
__all__ = [
"edge_disjoint_paths",
"node_disjoint_paths",
]
def edge_disjoint_paths(
G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None
):
"""Returns the edges disjoint paths between source and target.
Edge disjoint paths are paths that do not share any edge. The
number of edge disjoint paths between source and target is equal
to their edge connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node for the flow.
t : node
Sink node for the flow.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. The choice of the default function
may change from version to version and should not be relied on.
Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based edge connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
A generator of edge independent paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
See also
--------
:meth:`node_disjoint_paths`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Examples
--------
We use in this example the platonic icosahedral graph, which has node
edge connectivity 5, thus there are 5 edge disjoint paths between any
pair of nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.edge_disjoint_paths(G, 0, 6)))
5
If you need to compute edge disjoint paths on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for edge connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute edge disjoint paths among all pairs of
nodes of the platonic icosahedral graph reusing the data
structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_edge_connectivity)
>>> H = build_auxiliary_edge_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = {n: {} for n in G}
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> for u, v in itertools.combinations(G, 2):
... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R)))
... result[u][v] = k
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing edge disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of edge disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to edge disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_edge_connectivity(G)
else:
H = auxiliary
# Maximum possible edge disjoint paths
possible = min(H.out_degree(s), H.in_degree(t))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
# Compute maximum flow between source and target. Flow functions in
# NetworkX return a residual network.
kwargs = dict(
capacity="capacity", residual=residual, cutoff=cutoff, value_only=True
)
if flow_func is preflow_push:
del kwargs["cutoff"]
if flow_func is shortest_augmenting_path:
kwargs["two_phase"] = True
R = flow_func(H, s, t, **kwargs)
if R.graph["flow_value"] == 0:
raise NetworkXNoPath
# Saturated edges in the residual network form the edge disjoint paths
# between source and target
cutset = [
(u, v)
for u, v, d in R.edges(data=True)
if d["capacity"] == d["f
|
low"] and d["flow"] > 0
]
# This is equivalent of what flow.utils.build_flow_dict returns, but
# only for the nodes with saturated edges and without reporting 0 flows.
flow_dict = {n: {} for edge in cutset for n in edge}
for u, v in cutset:
flow_dict[u][v] = 1
# Rebuild the edge disjoint paths from the flow dictionary.
paths_found = 0
for v in list(flow_dict[s]):
if paths_f
|
ound >= cutoff:
# preflow_push does not support cutoff: we have to
# keep track of the paths founds and stop at cutoff.
break
path = [s]
if v == t:
path.append(v)
yield path
continue
u = v
while u != t:
path.append(u)
try:
u, _ = flow_dict[u].popitem()
except KeyError:
break
else:
path.append(t)
yield path
paths_found += 1
def node_disjoint_paths(
G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None
):
|
tmenjo/cinder-2015.1.1
|
cinder/tests/objects/test_objects.py
|
Python
|
apache-2.0
| 36,876 | 0 |
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import mock
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from testtools import matchers
from cinder import context
from cinder import exception
from cinder import objects
from cinder.objects import base
from cinder.objects import fields
from cinder import test
from cinder.tests import fake_notifier
class MyOwnedObject(base.CinderPersistentObject, base.CinderObject):
VERSION = '1.0'
fields = {'baz': fields.Field(fields.
|
Integer())}
class MyObj(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.Field(fields.Integer(), default=1),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
'readonly': fields.Field(fields.Integer(), read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable
|
=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def _update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.TestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.CinderObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.CinderObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.BooleanField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.Boolean)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.TestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.CinderObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
class TestObjMakeList(test.TestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.CinderObject):
pass
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a CinderObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterate
|
siosio/intellij-community
|
python/testData/override/qualified.py
|
Python
|
apache-2.0
| 57 | 0.035088 |
imp
|
ort turtle
class C(turtle.TurtleScreenBase):
pa
|
ss
|
gditzler/BigLS2014-Code
|
src/bmu.py
|
Python
|
gpl-3.0
| 2,945 | 0.021053 |
#!/usr/bin/env python
import json
import numpy
import scipy.sparse as sp
from optparse import OptionParser
__author__ = "Gregory Ditzler"
__copyright__ = "Copyright 2014, EESI Laboratory (Drexel University)"
__credits__ = ["Gregory Ditzler"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Gregory Ditzler"
__email__ = "[email protected]"
def load_biom(fname):
"""
load a biom file and return a dense matrix
:fname - string containing the path to the biom file
:data - numpy array containing the OTU matrix
:samples - list containing the sample IDs (important for knowing
the labels in the data matrix)
:features - list containing the feature names
"""
o = json.loads
|
(open(fname,"U").read())
if o["matrix_type"] == "sparse":
data = load_sparse(o)
else:
data = load_dense(o)
samples = []
for sid in o["columns"]:
samples.append(sid["id"])
features = []
for sid in o["rows"]:
# check to see if the taxonomy is listed, this will generally lead to more
# descriptive names for the taxonomies.
if sid.has_key("metadata") and sid["metadata"] != None:
if sid["metadata"].has_key("taxonomy"):
#features.appe
|
nd(str( \
# sid["metadata"]["taxonomy"]).strip( \
# "[]").replace(",",";").replace("u'","").replace("'",""))
features.append(json.dumps(sid["metadata"]["taxonomy"]))
else:
features.append(sid["id"])
else:
features.append(sid["id"])
return data, samples, features
def load_dense(obj):
"""
load a biom file in dense format
:obj - json dictionary from biom file
:data - dense data matrix
"""
n_feat,n_sample = obj["shape"]
data = np.array(obj["data"], order="F")
return data.transpose()
def load_sparse(obj):
"""
load a biom file in sparse format
:obj - json dictionary from biom file
:data - dense data matrix
"""
n_feat,n_sample = obj["shape"]
data = numpy.zeros((n_feat, n_sample),order="F")
for val in obj["data"]:
data[val[0], val[1]] = val[2]
data = data.transpose()
return data
def load_map(fname):
"""
load a map file. this function does not have any dependecies on qiime's
tools. the returned object is a dictionary of dictionaries. the dictionary
is indexed by the sample_ID and there is an added field for the the
available meta-data. each element in the dictionary is a dictionary with
the keys of the meta-data.
:fname - string containing the map file path
:meta_data - dictionary containin the mapping file information
"""
f = open(fname, "U")
mfile = []
for line in f:
mfile.append(line.replace("\n","").replace("#","").split("\t"))
meta_data_header = mfile.pop(0)
meta_data = {}
for sample in mfile:
sample_id = sample[0]
meta_data[sample_id] = {}
for identifier, value in map(None, meta_data_header, sample):
meta_data[sample_id][identifier] = value
return meta_data
|
hmmlearn/hmmlearn
|
scripts/benchmark.py
|
Python
|
bsd-3-clause
| 8,715 | 0.000115 |
"""
A script for testing / benchmarking HMM Implementations
"""
import argparse
import collections
import logging
import time
import hmmlearn.hmm
import numpy as np
import sklearn.base
LOG = logging.getLogger(__file__)
class Benchmark:
def __init__(self, repeat, n_iter, verbose):
self.repeat = repeat
self.n_iter = n_iter
self.verbose = verbose
def benchmark(self, sequences, lengths, model, tag):
elapsed = []
for i in range(self.repeat):
start = time.time()
cloned = sklearn.base.clone(model)
cloned.fit(sequences, lengths)
end = time.time()
elapsed.append(end-start)
self.log_one_run(start, end, cloned, tag)
return np.asarray(elapsed)
def generate_training_sequences(self):
pass
def new_model(self, implementation):
pass
def run(self, results_file):
runtimes = collections.defaultdict(dict)
sequences, lengths = self.generate_training_sequences()
for implementation in ["scaling", "log"]:
model = self.new_model(implementation)
LOG.info(f"{model.__class__.__name__}: testing {implementation}")
key = f"{model.__class__.__name__}|EM|hmmlearn-{implementation}"
elapsed = self.benchmark(sequences, lengths, model, key)
runtimes[key]["mean"] = elapsed.mean()
runtimes[key]["std"] = elapsed.std()
with open(results_file, mode="w") as fd:
fd.write("configuration,mean,std,n_iterations,repeat\n")
for key, value in runtimes.items():
fd.write(f"{key},{value['mean']},{value['std']},"
f"{self.n_iter},{self.repeat}\n")
def log_one_run(self, start, end, model, tag):
LOG.info(f"Training Took {end-start} seconds {tag}")
LOG.info(f"startprob={model.startprob_}")
LOG.info(f"transmat={model.transmat_}")
class GaussianBenchmark(Benchmark):
def new_model(self, implementation):
return hmmlearn.hmm.GaussianHMM(
n_components=4,
n_iter=self.n_iter,
covariance_type="full",
implementation=implementation,
verbose=self.verbose
)
def generate_training_sequences(self):
sampler = hmmlearn.hmm.GaussianHMM(
n_components=4,
covariance_type="full",
init_params="",
verbose=self.verbose
)
sampler.startprob_ = np.asarray([0, 0, 0, 1])
sampler.transmat_ = np.asarray([
[.2, .2, .3, .3],
[.3, .2, .2, .3],
[.2, .3, .3, .2],
[.3, .3, .2, .2],
])
sampler.means_ = np.asarray([
-1.5,
0,
1.5,
3
]).reshape(4, 1)
sampler.covars_ = np.asarray([
.5,
.5,
.5,
.5
]).reshape(4, 1, 1,)
sequences, states = sampler.sample(50000)
lengths = [len(sequences)]
return sequences, lengths
def log_one_run(self, start, end, model, tag):
super().log_one_run(start, end, model, tag)
LOG.info(f"means={model.means_}")
LOG.info(f"covars={model.covars_}")
class MultinomialBenchmark(Benchmark):
def new_model(self, implementation):
return hmmlearn.hmm.MultinomialHMM(
n_components=3,
n_iter=self.n_iter,
verbose=self.verbose,
implementation=implementation
)
def generate_training_sequences(self):
sampler = hmmlearn.hmm.MultinomialHMM(n_components=3)
sampler.startprob_ = np.array([0.6, 0.3, 0.1])
sampler.transmat_ = np.array([[0.6, 0.2, 0.2],
[0.3, 0.5, 0.2],
[0.4, 0.3, 0.3]])
sampler.emissionprob_ = np.array([
[.1, .5, .1, .3],
[.1, .2, .4, .3],
[0, .5, .5, .0],
])
sequences, states = sampler.sample(50000)
lengths = [len(sequences)]
return sequences, lengths
def log_one_run(self, start, end, model, tag):
super().log_one_run(start, end, model, tag)
LOG.info(f"emissions={model.emissionprob_}")
class MultivariateGaussianBenchmark(GaussianBenchmark):
def generate_training_sequences(self):
sampler = hmmlearn.hmm.GaussianHMM(
n_components=4,
covariance_type="full",
init_params=""
)
sampler.startprob_ = np.asarray([0, 0, 0, 1])
sampler.transmat_ = np.asarray([
[.2, .2, .3, .3],
[.3, .2, .2, .3],
[.2, .3, .3, .2],
[.3, .3, .2, .2],
])
sampler.means_ = np.asarray([
[-1.5, 0],
[0, 0],
[1.5, 0],
[3, 0]
])
sampler.covars_ = np.asarray([
[[.5, 0],
[0, .5]],
[[.5, 0],
[0, 0.5]],
[[.5, 0],
[0, .5]],
[[0.5, 0],
[0, 0.5]],
])
observed, hidden = sampler.sample(50000)
lengths = [len(observed)]
return observed, lengths
class GMMBenchmark(GaussianBenchmark):
def generate_training_sequences(self):
sampler = hmmlearn.hmm.GMMHMM(
n_components=4,
n_mix=3,
covariance_type="full",
init_params=""
)
sampler.startprob_ = [.25, .25, .25, .25]
sampler.transmat_ = [
[.1, .3, .3, .3],
[.3, .1, .3, .3],
[.3, .3, .1, .3],
[.3, .3, .3, .1],
]
sampler.weights_ = [
[.2, .2, .6],
[.6, .2, .2],
[.2, .6, .2],
[.1, .1, .8],
]
sampler.means_ = np.asarray([
[[-10], [-12], [-9]],
[[-5], [-4], [-3]],
[[-1.5], [0], [1.5]],
[[5], [7], [9]],
])
sampler.covars_ = np.asarray([
[[[.125]], [[.125]], [[.125]]],
[[[.125]], [[.125]], [[.125]]],
[[[.125]], [[.125]], [[.125]]],
[[[.125]], [[.125]], [[.125]]],
])
n_sequences = 10
length = 5_000
sequences = []
for i in range(n_sequences):
sequences.append(sampler.sample(5000)[0])
return np.concatenate(sequences), [length] * n_sequences
def new_model(self, implementation):
return hmmlearn.hmm.GMMHMM(
n_components=4,
n_mix=3,
n_iter=self.n_iter,
covariance_type="full",
verbose=self.verbose,
implementation=implementation
)
def log_one_run(self, start, end, model, tag):
super().log_one_run(start, end, model, tag)
LOG.info(f"weights_={model.weights_}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--all", action="store_true")
parser.add_argument("--categorical", action="store_true")
parser.add_argument("--gaussian", action="store_true")
parser.add_argument("--multivariate-gaussian", action="store_true")
parser.add_argument("--gaussian-mixture", action="store_t
|
rue")
parser.add_argument("--repeat", type=int, default=10)
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--n-iter", type=int, default=100)
args = parser.parse_args()
if args.all:
args.categorical = True
args.gaussian = True
args.multivariate_gaussian = True
args.gaussian_mixture = True
if args.categorical:
bench = MultinomialBenchmark(
repeat=args.repeat,
|
n_iter=args.n_iter,
verbose=args.verbose,
)
bench.run("categorical.benchmark.csv")
if args.gaussian:
bench = GaussianBenchmark(
repeat=args.repeat,
n_iter=args.n_iter,
verbose=args.verbose,
)
bench.run("gaussian.benchmark.csv")
if args.multivariate_gaussian:
bench = MultivariateGaussianBenchmark(
|
Elfhir/apero-imac
|
apero_imac/views.py
|
Python
|
mpl-2.0
| 255 | 0.023529 |
from apero_imac.models import *
from django.shortcuts import render_to_response
from datetime import datetime
def home(request):
return render_to_response('home.html', locals())
def about(request):
return render_to_response('about.html', locals()
|
)
|
|
FCP-INDI/C-PAC
|
CPAC/surface/surf_preproc.py
|
Python
|
bsd-3-clause
| 6,597 | 0.00864 |
import os
import nipype.interfaces.utility as util
from CPAC.utils.interfac
|
es.function import Function
from CPAC.pipeline import nipype_pipeline_engine as pe
def run_surface(post_freesurfer_folder,
freesurfer_folder,
subject,
t1w_restore_image,
atlas_space_t1w_image,
atlas_transform,
inverse_atlas_transform,
atlas_space_bold,
scout_bold,
|
surf_atlas_dir,
gray_ordinates_dir,
gray_ordinates_res,
high_res_mesh,
low_res_mesh,
subcortical_gray_labels,
freesurfer_labels,
fmri_res,
smooth_fwhm):
import os
import subprocess
freesurfer_folder = os.path.join(freesurfer_folder, 'recon_all')
# DCAN-HCP PostFreeSurfer
# Ref: https://github.com/DCAN-Labs/DCAN-HCP/blob/master/PostFreeSurfer/PostFreeSurferPipeline.sh
cmd = ['bash', '/code/CPAC/surface/PostFreeSurfer/run.sh', '--post_freesurfer_folder', post_freesurfer_folder, \
'--freesurfer_folder', freesurfer_folder, '--subject', subject, \
'--t1w_restore', t1w_restore_image, '--atlas_t1w', atlas_space_t1w_image, \
'--atlas_transform', atlas_transform, '--inverse_atlas_transform', inverse_atlas_transform, \
'--surfatlasdir', surf_atlas_dir, '--grayordinatesdir', gray_ordinates_dir, '--grayordinatesres', gray_ordinates_res, \
'--hiresmesh', high_res_mesh, '--lowresmesh', low_res_mesh, \
'--subcortgraylabels', subcortical_gray_labels, '--freesurferlabels', freesurfer_labels]
subprocess.check_output(cmd)
# DCAN-HCP fMRISurface
# https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRISurface/GenericfMRISurfaceProcessingPipeline.sh
cmd = ['bash', '/code/CPAC/surface/fMRISurface/run.sh', '--post_freesurfer_folder', post_freesurfer_folder,\
'--subject', subject, '--fmri', atlas_space_bold, '--scout', scout_bold,
'--lowresmesh', low_res_mesh, '--grayordinatesres', gray_ordinates_res,
'--fmrires', fmri_res, '--smoothingFWHM', smooth_fwhm]
subprocess.check_output(cmd)
out_file = os.path.join(post_freesurfer_folder, 'MNINonLinear/Results/task-rest01/task-rest01_Atlas.dtseries.nii')
return out_file
def surface_connector(wf, cfg, strat_pool, pipe_num, opt):
surf = pe.Node(util.Function(input_names=['post_freesurfer_folder',
'freesurfer_folder',
'subject',
't1w_restore_image',
'atlas_space_t1w_image',
'atlas_transform',
'inverse_atlas_transform',
'atlas_space_bold',
'scout_bold',
'surf_atlas_dir',
'gray_ordinates_dir',
'gray_ordinates_res',
'high_res_mesh',
'low_res_mesh',
'subcortical_gray_labels',
'freesurfer_labels',
'fmri_res',
'smooth_fwhm'],
output_names=['out_file'],
function=run_surface),
name=f'post_freesurfer_{pipe_num}')
surf.inputs.subject = cfg['subject_id']
surf.inputs.post_freesurfer_folder = os.path.join(cfg.pipeline_setup['working_directory']['path'],
'cpac_'+cfg['subject_id'],
f'post_freesurfer_{pipe_num}')
surf.inputs.surf_atlas_dir = cfg.surface_analysis['post_freesurfer']['surf_atlas_dir']
surf.inputs.gray_ordinates_dir = cfg.surface_analysis['post_freesurfer']['gray_ordinates_dir']
surf.inputs.subcortical_gray_labels = cfg.surface_analysis['post_freesurfer']['subcortical_gray_labels']
surf.inputs.freesurfer_labels = cfg.surface_analysis['post_freesurfer']['freesurfer_labels']
# convert integers to strings as subprocess requires string inputs
surf.inputs.gray_ordinates_res = str(cfg.surface_analysis['post_freesurfer']['gray_ordinates_res'])
surf.inputs.high_res_mesh = str(cfg.surface_analysis['post_freesurfer']['high_res_mesh'])
surf.inputs.low_res_mesh = str(cfg.surface_analysis['post_freesurfer']['low_res_mesh'])
surf.inputs.fmri_res = str(cfg.surface_analysis['post_freesurfer']['fmri_res'])
surf.inputs.smooth_fwhm = str(cfg.surface_analysis['post_freesurfer']['smooth_fwhm'])
node, out = strat_pool.get_data('freesurfer-subject-dir')
wf.connect(node, out, surf, 'freesurfer_folder')
node, out = strat_pool.get_data('desc-restore_T1w')
wf.connect(node, out, surf, 't1w_restore_image')
node, out = strat_pool.get_data('space-template_desc-head_T1w')
wf.connect(node, out, surf, 'atlas_space_t1w_image')
node, out = strat_pool.get_data('from-T1w_to-template_mode-image_xfm')
wf.connect(node, out, surf, 'atlas_transform')
node, out = strat_pool.get_data('from-template_to-T1w_mode-image_xfm')
wf.connect(node, out, surf, 'inverse_atlas_transform')
node, out = strat_pool.get_data('space-template_desc-brain_bold')
wf.connect(node, out, surf, 'atlas_space_bold')
node, out = strat_pool.get_data('space-template_desc-scout_bold')
wf.connect(node, out, surf, 'scout_bold')
outputs = {
'space-fsLR_den-32k_bold-dtseries': (surf, 'out_file')
}
return wf, outputs
def surface_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "surface_preproc",
"config": ["surface_analysis", "post_freesurfer"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": ["freesurfer-subject-dir",
"desc-restore_T1w",
"space-template_desc-head_T1w",
"from-T1w_to-template_mode-image_xfm",
"from-template_to-T1w_mode-image_xfm",
"space-template_desc-brain_bold",
"space-template_desc-scout_bold"],
"outputs": ["space-fsLR_den-32k_bold-dtseries"]}
'''
wf, outputs = surface_connector(wf, cfg, strat_pool, pipe_num, opt)
return (wf, outputs)
|
bergolho1337/URI-Online-Judge
|
Basicos/Python/1060/main.py
|
Python
|
gpl-2.0
| 173 | 0 |
# -*- coding: utf-8 -
|
*-
cont = 0
v = []
for i in range(
|
6):
v.append(float(raw_input()))
if (v[i] > 0):
cont = cont + 1
print("%d valores positivos" % cont)
|
sgraham/nope
|
tools/grit/grit/tool/build_unittest.py
|
Python
|
bsd-3-clause
| 11,256 | 0.00613 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the 'grit build' tool.
'''
import codecs
import os
import sys
import tempfile
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.tool import build
class BuildUnittest(unittest.TestCase):
def testFindTranslationsWithSubstitutions(self):
# This is a regression test; we had a bug where GRIT would fail to find
# messages with substitutions e.g. "Hello [IDS_USER]" where IDS_USER is
# another <message>.
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
builder.Run(DummyOpts(), ['-o', output_dir])
def testGenerateDepFile(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute.grd.d')
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
def testGenerateDepFileWithResourceIds(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute_no_ids.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute_no_ids.grd.d')
builder.Run(DummyOpts(),
['-f', util.PathFromRoot('grit/testdata/resource_ids'),
'-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(2, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
self.failUnlessEqual(deps[1],
util.PathFromRoot('grit/testdata/resource_ids'))
def testAssertOutputs(self):
output_dir = tempfile.mkdtemp()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
# Incomplete output file list should fail.
builder_fail = build.RcBuilder()
self.failUnlessEqual(2,
builder_fail.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc'))]))
# Complete output file list should succeed.
builder_ok = build.RcBuilder()
self.failUnlessEqual(0,
builder_ok.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'sv_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'resource.h'))]))
def _verifyWhitelistedOutput(self,
filename,
whitelisted_ids,
non_whitelisted_ids,
encoding='utf8'):
self.failUnless(os.path.exists(filename))
whitelisted_ids_found = []
non_whitelisted_ids_found = []
with codecs.open(filename, encoding=encoding) as f:
for line in f.readlines():
for whitelisted_id in whitelisted_ids:
if whitelisted_id in line:
whitelisted_ids_found.append(whitelisted_id)
for non_whitelisted_id in non_whitelisted_ids:
if non_whitelisted_id in line:
non_whitelisted_ids_found.append(non_whitelisted_id)
self.longMessage = True
self.assertEqual(whitelisted_ids,
whitelisted_ids_found,
'\nin file {}'.format(os.path.basename(filename)))
non_whitelisted_msg = ('Non-Whitelisted IDs {} found in {}'
.format(non_whitelisted_ids_found, os.path.basename(filename)))
self.assertFalse(non_whitelisted_ids_found, non_whitelisted_msg)
def testWhitelistStrings(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_strings.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
rc = os.path.join(output_dir, 'en_whitelist_test_strings.rc')
whitelisted_ids = ['IDS_MESSAGE_WHITELISTED']
non_whitelisted_ids = ['IDS_MESSAGE_NOT_WHITELISTED']
self._verifyWhitelistedOutput(
header,
whitelisted_ids,
non_whitelisted_ids,
)
self._verifyWhitelistedOutput(
rc,
whitelisted_ids,
non_whitelisted_ids,
encoding='utf16'
)
def testWhitelistResources(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
map
|
_h = os.path.join(output_dir, 'whitelist_test_resources_map.h')
pak = os.path.join(output_dir, 'whitelist_test_resources.pak')
# Ensure the resource map head
|
er and .pak files exist, but don't verify
# their content.
self.failUnless(os.path.exists(map_h))
self.failUnless(os.path.exists(pak))
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
]
non_whitelisted_ids = [
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testOutputAllResourceDefinesTrue(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file,
'--output-all-reso
|
ganeshrn/ansible
|
test/units/parsing/yaml/test_loader.py
|
Python
|
gpl-3.0
| 17,230 | 0.002266 |
# coding: utf-8
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from units.compat import unittest
from ansible import errors
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common._collections_compat import Sequence, Set, Mapping
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing import vault
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.parsing.yaml.dumper import AnsibleDumper
from units.mock.yaml_helper import YamlTestUtils
from units.mock.vault_helper import TextVaultSecret
from yaml.parser import ParserError
from yaml.scanner import ScannerError
class NameStringIO(StringIO):
"""In py2.6, StringIO doesn't let you set name because a baseclass has it
as readonly property"""
name = None
def __init__(self, *args, **kwargs):
super(NameStringIO, self).__init__(*args, **kwargs)
class TestAnsibleLoaderBasic(unittest.TestCase):
def test_parse_number(self):
stream = StringIO(u"""
1
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, 1)
# No line/column info saved yet
def test_parse_string(self):
stream = StringIO(u"""
Ansible
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Ansible')
self.assertIsInstance(data, text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_utf8_string(self):
stream = StringIO(u"""
Cafè Eñyei
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Cafè Eñyei')
self.assertIsInstance(data, text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_dict(self):
stream = StringIO(u"""
webster: daniel
oed: oxford
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'})
self.assertEqual(len(data), 2)
self.assertIsInstance(list(data.keys())[0], text_type)
self.assertIsInstance(list(data.values())[0], text_type)
# Beginning of the first key
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26))
self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22))
def test_parse_list(self):
stream = StringIO(u"""
- a
- b
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [u'a', u'b'])
self.assertEqual(len(data), 2)
self.assertIsInstance(data[0], text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19))
self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19))
def test_parse_short_dict(self):
stream = StringIO(u"""{"foo": "bar"}""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9))
stream = StringIO(u"""foo: bar""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6))
def test_error_conditions(self):
stream = StringIO(u"""{""")
loader = AnsibleLoader(stream, 'm
|
yfile.yml')
self.assertRaises(ParserError, loader.get_single_data)
def test_tab_error(self):
stream = StringIO(u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""")
loader = AnsibleLoader(stream, 'myfile.yml')
self.assertRaises(ScannerError, loader.get_single_data)
def test_front_matter(self):
|
stream = StringIO(u"""---\nfoo: bar""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6))
# Initial indent (See: #6348)
stream = StringIO(u""" - foo: bar\n baz: qux""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}])
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2))
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4))
self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9))
self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9))
class TestAnsibleLoaderVault(unittest.TestCase, YamlTestUtils):
def setUp(self):
self.vault_password = "hunter42"
vault_secret = TextVaultSecret(self.vault_password)
self.vault_secrets = [('vault_secret', vault_secret),
('default', vault_secret)]
self.vault = vault.VaultLib(self.vault_secrets)
@property
def vault_secret(self):
return vault.match_encrypt_secret(self.vault_secrets)[1]
def test_wrong_password(self):
plaintext = u"Ansible"
bob_password = "this is a different password"
bobs_secret = TextVaultSecret(bob_password)
bobs_secrets = [('default', bobs_secret)]
bobs_vault = vault.VaultLib(bobs_secrets)
ciphertext = bobs_vault.encrypt(plaintext, vault.match_encrypt_secret(bobs_secrets)[1])
try:
self.vault.decrypt(ciphertext)
except Exception as e:
self.assertIsInstance(e, errors.AnsibleError)
self.assertEqual(e.message, 'Decryption failed (no vault secrets were found that could decrypt)')
def _encrypt_plaintext(self, plaintext):
# Construct a yaml repr of a vault by hand
vaulted_var_bytes = self.vault.encrypt(plaintext, self.vault_secret)
# add yaml tag
vaulted_var = vaulted_var_bytes.decode()
lines = vaulted_var.splitlines()
lines2 = []
for line in lines:
lines2.append(' %s' % line)
vaulted_var = '\n'.join(lines2)
tagged_vaulted_var = u"""!vault |\n%s""" % vaulted_var
return tagged_vaulted_var
def _build_stream(self, yaml_text):
stream = NameStringIO(yaml_text)
stream.name = 'my.yml'
return stream
def _loader(self, stream):
return AnsibleLoader(stream, vault_secrets=self.vault.secrets)
def _load_yaml(self, yaml_text, password):
stream = self._build_stream(yaml_text)
loader = self._loader(stream)
data_from_yaml = loader.get_single_data()
return data_from_yaml
def test_dump_loa
|
Nitrate/Nitrate
|
src/tcms/logs/managers.py
|
Python
|
gpl-2.0
| 527 | 0 |
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.db import models
class TCMSLogManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all comments for a particular model (either an instance or
a class).
|
"""
|
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=model.pk)
return qs
|
astroclark/numrel_bursts
|
nrburst_utils/nrburst_pickle_preserve.py
|
Python
|
gpl-2.0
| 2,870 | 0.007666 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 James Clark <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
nrburst_pickle_preserve.py
Crunch together pickles from nrburst_match.py
"""
import sys
import glob
import cPickle as pickle
import numpy as np
pickle_files = glob.glob(sys.argv[1]+'*pickle')
user_tag = sys.argv[2]
delta_samp=100
sample_pairs=zip(range(0,1000,delta_samp), range(delta_samp-1,1000,delta_samp))
# Get numbers for pre-allocation
sim_instances = [name.split('-')[1] for name in pickle_files]
sim_names = np.unique(sim_instances)
# XXX: assume all sample runs have same number of jobs..
n_sample_runs = sim_instances.count(sim_names[0])
# Load first one to extract data for preallocation
current_matches, current_masses, current_inclinations, config, \
simulations = pickle.load(open(pickle_files[0],'r'))
nSims = len(sim_names)
nsampls = config.nsampls * n_sample_runs
# --- Preallocate
matches = np.zeros(shape=(nSims, nsampls))
masses = np.zeros(shape=(nSims, nsampls))
inclinations = np.zeros(shape=(nSims, nsampls))
# be a bit careful with the simulations object
setattr(simulations, 'simulations', [])
setattr(simulations, 'nsimulations', nSims)
for f, name in enumerate(sim_names):
startidx=0
endidx=len(current_matches[0])
for s in xrange(n_sample_runs):
if n_sample_runs>1:
file = glob.glob('*%s-minsamp_%d-maxsamp_%d*'%(
name, min(sample_pairs[s]), max(sample_pairs[s])))[0]
else:
|
file = pickle_files[f]
current_matches, current_masses, current_inclinations, config, \
current_simulations = pickle.load(open(file,'r'))
matches[f,startidx:endidx] = current_matches[0]
masses[f,startidx:endidx] = current_masses[0]
inclinations[f,startidx:endidx] = current_inclinations[0]
startidx += len(current_matches[0])
endidx = startidx + len(current_matches[0])
simulations.simulati
|
ons.append(current_simulations.simulations[0])
filename=user_tag+'_'+config.algorithm+'.pickle'
pickle.dump([matches, masses, inclinations, config, simulations],
open(filename, "wb"))
|
redpawfx/massiveImporter
|
python/ns/tests/TestGenotype.py
|
Python
|
mit
| 2,335 | 0.02227 |
# The MIT License
#
# Copyright (c) 2008 James Piechota
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import unittest
import difflib
import filecmp
import ns.bridge.io.CDLReader as CDLReader
import ns.evolve.Genotype as Genotype
class TestGenotype(unittest.TestCase):
def setUp(self):
self.scratchFile = "R:/scratch/out.cdl"
def tearDown(self):
try:
os.remove(self.scratchFile)
except:
pass
def testOutputChannels(self):
''' Test that the Genotype calculates the right output channels. '''
|
input = "R:/massive/testdata/cdl/man/CDL/embedded_simple.cdl"
agentSpec = CDLReader.read(input, CDLReader.kEvolveTokens)
geno = Genotype.Genotype(agentSpec)
expectedChannels = [ "ty", "rx", "ry", "rz",
"height", "leg_length", "shirt_ma
|
p",
"walk", "walk_45L", "walk_sad",
"walk->walk_45L", "walk->walk_sad",
"walk_45L->walk", "walk_45L->walk_sad",
"walk_sad->walk", "walk_sad->walk_45L",
"walk:rate", "walk_45L:rate", "walk_sad:rate" ]
self.assertEqual(sorted(expectedChannels), sorted(geno.outputChannels()))
#os.system("xdiff.exe %s %s" % (input, self.scratchFile))
suite = unittest.TestLoader().loadTestsFromTestCase(TestGenotype)
|
bozzmob/dxr
|
dxr/plugins/python/menus.py
|
Python
|
mit
| 674 | 0 |
from dxr.lines import Ref
from dxr.utils import search_url
class _PythonPluginAttr(object):
plugin = 'python'
class ClassRef(Ref, _PythonPluginAttr):
"""A refe
|
rence attached to a class definition"""
def menu_items(self):
qualname = self.menu_data
yield {'html': 'Find subclasses',
'title': 'Find subclasses of this class',
'href': search_url(self.tree, '+derived:' + qualname),
'icon': 'type'}
yield {'html': 'Find base classes',
'title': 'Find base classes of this class',
'href': search_url(self.tree, '+bases:' + qualname),
|
'icon': 'type'}
|
zengchunyun/s12
|
day9/temp/ext/twisted_client.py
|
Python
|
gpl-2.0
| 935 | 0.00107 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
from twisted.internet import reactor
from twisted.internet import protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.transport.write(bytes("hello zengchunyun", "utf8"))
def dataReceived(self, data):
print("server said: {}".format(data))
self.transport.loseConnection()
d
|
ef connectionLost(self, reason):
print("Connection lost")
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print("Connection failed - goodbye")
reactor.stop()
def clientConnectionLost(self, connector, reason):
print("Connection lost - goodbye")
react
|
or.stop()
def main():
f = EchoFactory()
reactor.connectTCP("localhost", 1234, f)
reactor.run()
if __name__ == "__main__":
main()
|
tobiashelfenstein/wuchshuellenrechner
|
wuchshuellenrechner/lib/xml.py
|
Python
|
gpl-3.0
| 6,767 | 0.005619 |
"""
xml.py -
Copyright (C) 2016 Tobias Helfenstein <[email protected]>
Copyright (C) 2016 Anton Hammer <[email protected]>
Copyright (C) 2016 Sebastian Hein <[email protected]>
Copyright (C) 2016 Hochschule für Forstwirtschaft Rottenburg <[email protected]>
This file is part of Wuchshüllenrechner.
Wuchshüllenrechner is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Wuchshüllenrechner is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xml.etree.ElementTree as ElementTree
import logging
import os
import lib.files as FileHandling
from lib.variants import Fence, Tube, VariantItem, Project
class XMLFileWriter():
"""
Creates a simple about dialog.
The about dialog contains general information about the application and
shows the copyright notice.
That's why the class has no attributes or return values.
"""
def __init__(self):
"""The constructor initializes the class XMLFileWriter."""
pass
def writeXMLFile(self, project, items, xmlfile):
# check, if the filename is valid
if not FileHandling.filename_is_valid(xmlfile, "xml"):
raise ValueError("file name not valid")
newfile = xmlfile.strip()
# temporarily add _tmp to the filename, if the file already exists
exists = os.path.isfile(newfile)
if exists:
newfile = newfile.replace(".xml", "_tmp.xml", 1)
# write all XML data to the file
try:
xml = self.createXMLData(project, items)
xml.write(newfile, xml_declaration=True, encoding="utf-8")
if exists:
os.remove(xmlfile.strip())
os.rename(newfile, xmlfile.strip())
except OSError:
return False
return True
def createXMLData(self, project, items):
# create root element with its project information
root = ElementTree.Element("project")
header = ElementTree.SubElement(root, "header")
for p in project:
data = ElementTree.SubElement(header, p)
data.text = str(project[p]).strip()
# create item list with its entries
dataset = ElementTree.SubElement(root, "data")
self.writeItemTree(items, dataset)
return ElementTree.ElementTree(root)
def writeItemTree(self, tree, element):
for child in tree.children:
item, plant, protection = child.prepareSerialization()
# first serialize item's general data without an own element
variantEntry = self.writeTags(item, "variant")
element.append(variantEntry)
# serialize plant data with its own element
plantElement = self.writeTags(plant, "plant")
variantEntry.append(plantElement)
# at last serialize protection data with its own element
protectionElement = self.writeTags(protection, "protection")
variantEntry.append(protectionElement)
if child.hasChildren():
childrenElement = ElementTree.SubElement(variantEntry, "children")
self.writeItemTree(child, childrenElement)
def writeTags(self, dictionary, name):
# avoid side effects
element = ElementTree.Element(name.strip())
for key, value in dictionary.items():
data = ElementTree.SubElement(element, key)
data.text = str(value).strip()
return element
class XMLFileReader():
"""Creates a simple about dialog.
The about dialog contains general information about the application and
shows the copyright notice.
That's why the class has no attributes or return values.
"""
def __init__(self):
"""The constructor initializes the class XMLFileReader."""
self.root = None
def readXMLFile(self, xmlfile):
# check, if the filename is valid
if not FileHandling.filename_is_valid(xmlfile, "xml"):
raise ValueError("file name not valid")
try:
xml = ElementTree.parse(xmlfile)
self.root = xml.getroot()
except ElementTree.ParseError:
return False
|
return True
def readItemTree(self, element):
items = []
for variantEntry in elem
|
ent:
kind = int(variantEntry.findtext("type", "-1"))
child = VariantItem(protection=kind)
item, plant, protection = child.prepareSerialization()
# first read item's general data
item = self.readTags(item, variantEntry)
# read plant data
plantElement = variantEntry.find("plant")
plant = self.readTags(plant, plantElement)
# at last read protection data
protectionElement = variantEntry.find("protection")
protection = self.readTags(protection, protectionElement)
# add item to list
child.deserialize((item, plant, protection))
items.append(child)
childrenElement = variantEntry.find("children")
if childrenElement:
items.extend(self.readItemTree(childrenElement))
return items
def readTags(self, dictionary, element):
# avoid side effects
tags = {}
for key in dictionary:
kind = type(dictionary[key])
data = element.findtext(key)
if data:
tags[key] = kind(data)
return tags
def getProject(self):
if not self.root:
return None
project = Project()
header = self.root.find("header")
if None is not header:
return self.readTags(project.__dict__, header)
else:
return None
def getItems(self):
if not self.root:
return None
dataset = self.root.find("data")
try:
items = self.readItemTree(dataset)
except TypeError:
return None
return items
|
glomex/gcdt-bundler
|
tests/test_bundler_utils.py
|
Python
|
mit
| 2,286 | 0.000875 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import fnmatch
import logging
from gcdt_bundler.bundler_utils import glob_files, get_path_info
from . import here
ROOT_DIR = here('./resources/static_files')
log = logging.getLogger(__name__)
def test_find_two_files():
result = list(glob_files(ROOT_DIR, ['a/**']))
#assert list(result) == [
# (ROOT_DIR + '/a/aa.txt', 'a/aa.txt'),
# (ROOT_DIR + '/a/ab.txt', 'a/ab.txt')
#]
assert (ROOT_DIR + '/a/aa.txt', 'a/aa.txt') in result
assert (ROOT_DIR + '/a/ab.txt', 'a/ab.txt') in result
def test_default_include():
result = list(glob_files(ROOT_DIR))
assert (ROOT_DIR + '/a/aa.txt', 'a/aa.txt') in result
assert (ROOT_DIR + '/a/ab.txt', 'a/ab.txt') in result
assert (ROOT_DIR + '/b/ba.txt', 'b/ba.txt') in result
assert (ROOT_DIR + '/b/bb.txt', 'b/bb.txt') in result
def test_later_include_has_precedence():
# note: this testcase is not exactly relevant any more since the tag
# mechanism has been removed
result = list(glob_files(ROOT_DIR, ['**', 'a/**']))
assert (ROOT_DIR + '/b/ba.txt', 'b/ba.txt') in result
assert (ROOT_DIR + '/b/bb.txt', 'b/bb.txt') in result
assert (ROOT_DIR + '/a/aa.txt', 'a/aa.txt') in result
assert (ROOT_DIR + '/a/ab.txt', 'a/ab.txt') in result
def test_exclude_file():
result = glob_files(ROOT_DIR, ['a/**'], ['a/aa.txt'])
assert list(result) == [
(ROOT_DIR + '/a/ab.txt',
|
'a/ab.txt')
]
def test_exclude_file_with_gcdtignore():
result = glob_files(ROOT_DIR, ['a/**'],
gcdtignore=['aa.txt'])
assert list(result) == [
(ROOT_DIR + '/a/ab.txt', 'a/ab.txt')
]
def test_how_crazy_is_it():
f = '/a/b/c/d.txt'
p = '/a/**/d.txt'
assert fnmatch.fnmatchcase(f, p)
def test_ge
|
t_path_info_relative():
path = {'source': 'codedeploy', 'target': ''}
base, ptz, target = get_path_info(path)
assert base == os.getcwd()
assert ptz == 'codedeploy'
assert target == '/'
def test_get_path_info_abs():
path = {'source': os.getcwd() + '/codedeploy', 'target': ''}
base, ptz, target = get_path_info(path)
assert base == os.getcwd()
assert ptz == 'codedeploy'
assert target == '/'
|
Arno-Nymous/pyload
|
module/plugins/accounts/OverLoadMe.py
|
Python
|
gpl-3.0
| 1,824 | 0.001096 |
# -*- coding: utf-8 -*-
from ..internal.misc import json
from ..internal.MultiAccount import MultiAccount
class OverLoadMe(MultiAccount):
__name__ = "OverLoadMe"
__type__ = "account"
__version__ = "0.13"
__status__ = "testing"
__config__ = [("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12)]
__description__ = """Over-Load.me account plugin"""
__license__ = "GPLv3"
__authors__ = [("marley", "[email protected]")]
def grab_hosters(self, user, password, data):
html = self.load("https://api.over-load.me/hoster.php",
get={'auth': "0001-cb1f24dadb3aa487bda5afd3b76298935329be7700cd7-5329be77-00cf-1ca0135f"})
return [x for x in map(
str.strip, html.replace("\"", "").split(",")) if x]
def grab_info(self, user, password, data):
html = self.load("https://api.over-load.me/account.php",
get={'user': user,
'auth': password}).strip()
data = json.loads(html)
self.log_debug(data)
#: Check for premium
if data['membership'] == "Free":
|
return {'premium': False, 'validuntil': None, 'trafficleft': None}
else:
return {'premium': True,
'validuntil': data['expirationunix'],
'trafficleft': -1}
def signin(self, user, password, data):
html = self.load("https://api.over-load.me/account.php",
get={'user': user,
|
'auth': password}).strip()
data = json.loads(html)
if data['err'] == 1:
self.fail_login()
|
bigswitch/snac-nox-zaku
|
src/nox/lib/packet/dhcp.py
|
Python
|
gpl-3.0
| 8,935 | 0.005708 |
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
#
# DHCP Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | op (1) | htype (1) | hlen (1) | hops (1) |
# +---------------+---------------+---------------+---------------+
# | xid (4) |
# +-------------------------------+-------------------------------+
# | secs (2) | flags (2) |
# +-------------------------------+-------------------------------+
# | ciaddr (4) |
# +---------------------------------------------------------------+
# | yiaddr (4) |
# +---------------------------------------------------------------+
# | siaddr (4) |
# +---------------------------------------------------------------+
# | giaddr (4) |
# +---------------------------------------------------------------+
# | |
# | chaddr (16) |
# | |
# | |
# +---------------------------------------------------------------+
# | |
# | sname (64) |
# +---------------------------------------------------------------+
# | |
# | file (128) |
# +---------------------------------------------------------------+
# | |
# | options (variable)
|
|
# +------------------------------------
|
---------------------------+
#
#======================================================================
import struct
from packet_utils import *
from packet_exceptions import *
from array import *
from packet_base import packet_base
class dhcp(packet_base):
"DHCP Packet struct"
STRUCT_BOUNDARY = 28
MIN_LEN = 240
SERVER_PORT = 67
CLIENT_PORT = 68
BROADCAST_FLAG = 0x8000
BOOTREQUEST = 1
BOOTREPLY = 2
MSG_TYPE_OPT = 53
NUM_MSG_TYPES = 8
DISCOVER_MSG = 1
OFFER_MSG = 2
REQUEST_MSG = 3
DECLINE_MSG = 4
ACK_MSG = 5
NAK_MSG = 6
RELEASE_MSG = 7
INFORM_MSG = 8
SUBNET_MASK_OPT = 1
GATEWAY_OPT = 3
DNS_SERVER_OPT = 6
HOST_NAME_OPT = 12
DOMAIN_NAME_OPT = 15
MTU_OPT = 26
BCAST_ADDR_OPT = 28
REQUEST_IP_OPT = 50
REQUEST_LEASE_OPT = 51
OVERLOAD_OPT = 52
SERVER_ID_OPT = 54
PARAM_REQ_OPT = 55
T1_OPT = 58
T2_OPT = 59
CLIENT_ID_OPT = 61
PAD_OPT = 0
END_OPT = 255
MAGIC = array('B', '\x63\x82\x53\x63')
def __init__(self, arr=None, prev=None):
self.prev = prev
if self.prev == None:
self.op = 0
self.htype = 0
self.hlen = 0
self.hops = 0
self.xid = 0
self.secs = 0
self.flags = 0
self.ciaddr = 0
self.yiaddr = 0
self.siaddr = 0
self.giaddr = 0
self.chaddr = array('B')
self.sname = array('B')
self.file = array('B')
self.magic = array('B')
self.options = array('B')
self.parsedOptions = {}
else:
if type(arr) == type(''):
arr = array('B', arr)
assert(type(arr) == array)
self.arr = arr
self.parse()
def __str__(self):
if self.parsed == False:
return ""
return ' '.join(('[','op:'+str(self.op),'htype:'+str(self.htype), \
'hlen:'+str(self.hlen),'hops:'+str(self.hops), \
'xid:'+str(self.xid),'secs:'+str(self.secs), \
'flags:'+str(self.flags), \
'ciaddr:'+ip_to_str(self.ciaddr), \
'yiaddr:'+ip_to_str(self.yiaddr), \
'siaddr:'+ip_to_str(self.siaddr), \
'giaddr:'+ip_to_str(self.giaddr), \
'chaddr:'+mac_to_str(self.chaddr[:self.hlen]), \
'magic:'+str(self.magic), \
'options:'+str(self.options),']'))
def parse(self):
dlen = len(self.arr)
if dlen < dhcp.MIN_LEN:
print '(dhcp parse) warning DHCP packet data too short to parse header: data len %u' % dlen
return None
(self.op, self.htype, self.hlen, self.hops, self.xid, self.secs, \
self.flags, self.ciaddr, self.yiaddr, self.siaddr, self.giaddr) \
= struct.unpack('!BBBBIHHIIII', self.arr[:28])
self.chaddr = self.arr[28:44]
self.sname = self.arr[44:108]
self.file = self.arr[102:236]
self.magic = self.arr[236:240]
self.hdr_len = dlen
self.parsed = True
if self.hlen > 16:
print '(dhcp parse) DHCP hlen %u too long' % self.hlen
return
for i in range(4):
if dhcp.MAGIC[i] != self.magic[i]:
print '(dhcp parse) bad DHCP magic value %s' % str(self.magic)
return
self.parsedOptions = {}
self.options = self.arr[240:]
self.parseOptions()
self.parsed = True
def parseOptions(self):
self.parsedOptions = {}
self.parseOptionSegment(self.options)
if self.parsedOptions.has_key(dhcp.OVERLOAD_OPT):
opt_val = self.parsedOptions[dhcp.OVERLOAD_OPT]
if opt_val[0] != 1:
print 'DHCP overload option has bad len %u' % opt_val[0]
return
if opt_val[1] == 1 or opt_val[1] == 3:
self.parseOptionSegment(self.file)
if opt_val[1] == 2 or opt_val[1] == 3:
self.parseOptionSegment(self.sname)
def parseOptionSegment(self, barr):
ofs = 0;
len = barr.buffer_info()[1]
while ofs < len:
opt = barr[ofs]
if opt == dhcp.END_OPT:
return
ofs += 1
if opt == dhcp.PAD_OPT:
continue
if ofs >= len:
print 'DHCP option ofs extends past segment'
return
opt_len = barr[ofs]
ofs += 1 # Account for the length octet
if ofs + opt_len > len:
return False
if self.parsedOptions.has_key(opt):
print '(parseOptionSegment) ignoring duplicate DHCP option: %d' % opt
else:
self.parsedOptions[opt] = barr[ofs:ofs+opt_len]
ofs += opt_len
print 'DHCP end of option segment before END option'
def hdr(self):
fmt = '!BBBBIHHIIII16s64s128s4s%us' % self.options.buffer_info()[1]
retu
|
supermari0/config
|
utils/pwgen.py
|
Python
|
gpl-2.0
| 183 | 0.005464 |
#!/usr/local/bin/python3
import secrets, str
|
ing
length = 16
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
print(''.join(secrets.choice(chars) for i in ran
|
ge(length)))
|
yourcelf/btb
|
printing/print_mail.py
|
Python
|
agpl-3.0
| 4,703 | 0.002126 |
import os
import sys
import glob
import json
import subprocess
from collections import defaultdict
from utils import UnicodeReader, slugify, count_pages, combine_pdfs, parser
import addresscleaner
from click2mail import Click2MailBatch
parser.add_argument("directory", help="Path to downloaded mail batch")
parser.add_argument("--skip-letters", action='store_true', default=False)
parser.add_argument("--skip-postcards", action='store_true', default=False)
def fix_lines(address):
"""
Click2Mail screws up addresses with 3 lines. If we have only one address
line, put it in "address1". If we have more, put the first in
"organization", and subsequent ones in "addressN".
"""
lines = [a for a in [
address.get('organization', None),
address.get('address1', None),
address.get('address2', None),
address.get('address3', None)] if a]
if len(lines) == 1:
address['organization'] = ''
address['address1'] = lines[0]
address['address2'] = ''
address['address3'] = ''
if len(lines) >= 2:
address['organization'] = lines[0]
address['address1'] = lines[1]
address['address2'] = ''
address['address3'] = ''
if len(lines) >= 3:
address['address2'] = lines[2]
address['address3'] = ''
if len(lines) >= 4:
address['address3'] = lines[3]
return address
def collate_letters(mailing_dir, letters, page=1):
# Sort by recipient.
recipient_letters = defaultdict(list)
for letter in letters:
recipient_letters[(letter['recipient'], letter['sender'])].append(letter)
# Assemble list of files and jobs.
files = []
jobs = {}
for (recipient, sender), letters in recipient_letters.iteritems():
count = 0
for letter in letters:
filename = os.path.join(mailing_dir, letter["file"])
files.append(filename)
count += count_pages(filename)
end = page + count
jobs[recipient] = {
"startingPage": page,
"endingPage": end - 1,
"recipients": [fix_lines(addresscleaner.parse_address(recipient))],
"sender": addresscleaner.parse_address(sender),
"type": "letter"
}
page = end
vals = jobs.values()
vals.sort(key=lambda j: j['startingPage'])
return files, vals, page
def collate_postcards(postcards, page=1):
# Collate postcards into a list per type and sender.
type_sender_postcards = defaultdict(list)
for letter in postcards:
key = (letter['type'], letter['sender'])
type_sender_postcards[key].append(letter)
files = []
jobs = []
for (postcard_type, sender), letters in type_sender_postcards.iteritems():
files.append(os.path.join(
os.path.dirname(__file__),
"postcards",
"{}.pdf".format(postcard_type)
))
jobs.append({
"startingPage": page + len(files) - 1,
"endingPage": page + len(files) - 1,
"recipients": [
fix_lines(addresscleaner.parse_address(letter['recipient'])) for letter in letters
],
"sender": addresscleaner.parse_address(sender),
"type": "postcard",
})
return files, jobs, page + len(files)
def run_batch(args, files, jobs):
filename = combine_pdfs(files)
|
print "Building job with", filename
batch = Click2MailBatch(
username=args.username,
password=args.password,
filename=filename,
jobs=jobs,
staging=args.staging)
if batch.run(args.dry_run):
os.remove(filename)
def main():
args = parser.parse_args()
if args.directory.endswith(".zip"):
directory = os.path.abspath(args.directory[0:-len(".zip")])
if not os.path.exists(di
|
rectory):
subprocess.check_call([
"unzip", args.directory, "-d", os.path.dirname(args.directory)
])
else:
directory = args.directory
with open(os.path.join(directory, "manifest.json")) as fh:
manifest = json.load(fh)
if manifest["letters"] and not args.skip_letters:
lfiles, ljobs, lpage = collate_letters(directory, manifest["letters"], 1)
print "Found", len(ljobs), "letter jobs"
if ljobs:
run_batch(args, lfiles, ljobs)
if manifest["postcards"] and not args.skip_postcards:
pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
print "Found", len(pjobs), "postcard jobs"
if pjobs:
run_batch(args, pfiles, pjobs)
if __name__ == "__main__":
main()
|
ChristianKniep/QNIB
|
serverfiles/usr/local/lib/networkx-1.6/networkx/tests/test_relabel.py
|
Python
|
gpl-2.0
| 5,962 | 0.032372 |
#!/usr/bin/env python
from nose.tools import *
from networkx import *
from networkx.convert import *
from networkx.algorithms.operators import *
from networkx.generators.classic import barbell_graph,cycle_graph
class TestRelabel():
def test_convert_node_labels_to_integers(self):
# test that empty graph converts fine for all options
G=empty_graph()
H=convert_node_labels_to_integers(G,100)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(H.nodes(), [])
assert_equal(H.edges(), [])
for opt in ["default", "sorted", "increasing degree",
"decreasing degree"]:
G=empty_graph()
H=convert_node_labels_to_integers(G,100, ordering=opt)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(H.nodes(), [])
assert_equal(H.edges(), [])
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
G.name="paw"
H=convert_node_labels_to_integers(G)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
H=convert_node_labels_to_integers(G,1000)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(H.nodes(), [1000, 1001, 1002, 1003])
H=convert_node_labels_to_integers(G,ordering="increasing degree")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 1)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 3)
H=convert_node_labels_to_integers(G,ordering="decreasing degree")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 3)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 1)
H=convert_node_labels_to_integers(G,ordering="increasing degree",
discard_old_labels=False)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 1)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 3)
mapping=H.node_labels
assert_equal(mapping['C'], 3)
assert_equal(mapping['D'], 0)
assert_true(mapping['A']==1 or mapping['A']==2)
assert_true(mapping['B']==1 or mapping['B']==2)
G=empty_graph()
G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
G.name="paw"
H=convert_node_labels_to_integers(G,ordering="sorted")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
H=convert_node_labels_to_integers(G,ordering="sorted",
discard_old_labels=False)
mapping=H.node_labels
assert_equal(mapping['A'], 0)
assert_equal(mapping['B'], 1)
assert_equal(mapping['C'], 2)
assert_equal(mapping['D'], 3)
assert_raises(networkx.exception.NetworkXError,
convert_node_labels_to_integers, G,
ordering="increasing age")
def test_relabel_nodes_copy(self):
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_function(self):
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
# function mapping no longer encouraged but works
def mapping(n):
return ord(n)
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), [65, 66, 67, 68])
def test_relabel_nodes_graph(self):
G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_digraph(self):
G=DiGraph([('A','B'),('A','C'),('B','C'),('C',
|
'D')])
mapping={'A':'aard
|
vark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_multigraph(self):
G=MultiGraph([('a','b'),('a','b')])
mapping={'a':'aardvark','b':'bear'}
G=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
assert_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_nodes_multidigraph(self):
G=MultiDiGraph([('a','b'),('a','b')])
mapping={'a':'aardvark','b':'bear'}
G=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
assert_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
@raises(KeyError)
def test_relabel_nodes_missing(self):
G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={0:'aardvark'}
G=relabel_nodes(G,mapping,copy=False)
def test_relabel_toposort(self):
K5=nx.complete_graph(4)
G=nx.complete_graph(4)
G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False)
nx.is_isomorphic(K5,G)
G=nx.complete_graph(4)
G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False)
nx.is_isomorphic(K5,G)
|
zniper/django-quickadmin
|
setup.py
|
Python
|
mit
| 1,119 | 0.000894 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-quickadmin',
version='0.1.2',
description='Django application automatically registers all found models into admin area',
long_description=long_description,
url='https://github.com/zniper/django-quickadmin',
author='Ha Pham',
auth
|
or_email='[email protected]',
license='MIT',
|
classifiers=[
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='django admin models register python',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[''],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
timdiels/0publish
|
validator.py
|
Python
|
lgpl-2.1
| 2,847 | 0.038286 |
import os
from zeroinstall.injector import namespaces
from zeroinstall.injector.reader import InvalidInterface, load_feed
from xml.dom import minidom, Node, XMLNS_NAMESPACE
import tempfile
from logging import warn, info
group_impl_attribs = ['version', 'version-modifier', 'released', 'main', 'stability', 'arch', 'license', 'doc-dir', 'self-test', 'langs', 'local-path']
known_elements = {
'interface' : ['uri', 'min-injector-version', 'main'], # (main is deprecated)
'name' : [],
'summary' : [],
'description' : [],
'needs-terminal' : [],
'homepage' : [],
'category' : ['type'],
'icon' : ['type', 'href'],
'feed' : ['src', 'arch'],
'feed-for' : ['interface'],
'group' : group_impl_attribs,
'implementation' : ['id'] + group_impl_attribs,
'package-implementation' : ['package', 'main', 'distributions'],
'manifest-digest' : ['sha1new', 'sha256'],
'command' : ['name', 'path', 'shell-command'],
'arg' : [],
'archive' : ['href', 'size', 'extract', 'type', 'start-offset'],
'recipe' : [],
'requires' : ['interface', 'use'],
'runner' : ['interface', 'use', 'command'],
'version' : ['not-before', 'before'],
'environment' : ['name', 'insert', 'value', 'default', 'mode'],
'executable-in-var' : ['name', 'command'],
'executable-in-path' : ['name', 'command'],
#'overlay' : ['src', 'mount-point'],
}
def checkElement(elem):
if elem.namespaceURI != namespaces.XMLNS_IFACE:
info("Note: Skipping unknown (but namespaced) element <%s>", elem.localName)
return # Namespaces elements are OK
if elem.localName not in known_elements:
warn("Unknown Zero Install element <%s>.\nNon Zero-Install elements should be namespaced.", elem.localName)
return
known_attrs = known_elements[elem.localName]
for (uri, name), value in elem.attributes.itemsNS():
if uri == XMLNS_NAMESPACE:
continue # Namespace declarations are fine
if uri:
info("Note: Skipping unknown (but namespaced) attribute '%s'", name)
continue
if name not in known_attrs:
warn("Unknown Zero Install attribute '%s' on <%s>.\nNon Zero-Install attributes should be namespaced.",
name
|
, elem.localName)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
checkEl
|
ement(child)
def check(data, warnings = True, implementation_id_alg=None, generate_sizes=False):
fd, tmp_name = tempfile.mkstemp(prefix = '0publish-validate-')
os.close(fd)
try:
tmp_file = file(tmp_name, 'w')
tmp_file.write(data)
tmp_file.close()
try:
feed = load_feed(tmp_name, local=True, implementation_id_alg=implementation_id_alg, generate_sizes=generate_sizes)
except InvalidInterface, ex:
raise
except Exception, ex:
warn("Internal error: %s", ex)
raise InvalidInterface(str(ex))
finally:
os.unlink(tmp_name)
if warnings:
doc = minidom.parseString(data)
checkElement(doc.documentElement)
return feed
|
artolaluis/restblog
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 7,320 | 0.007104 |
# -*- coding: utf-8 -*-
#
# restblog documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 12 17:19:21 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.ap
|
pend(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx exten
|
sion module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'restblog'
copyright = u'2010, Luis Artola'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = os.getenv( 'VERSION', '0.0.0' )
# The short X.Y version.
version = release.rsplit( '.', 1 )[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'restblogdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'restblog.tex', u'restblog Documentation',
u'Luis Artola', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'restblog', u'restblog Documentation',
[u'Luis Artola'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
fakdora/flaksy-upto-login
|
app/__init__.py
|
Python
|
mit
| 1,012 | 0 |
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_obj
|
ect(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as
|
auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
|
tridvaodin/Assignments-Valya-Maskaliova
|
LPTHW/ex43.py
|
Python
|
gpl-2.0
| 5,149 | 0.004273 |
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enrer()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# ne sure tp print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mum would be proud..if she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "The Gothons of Planet #25 have invaded your ship and destroyed your entire crew. You are the last surviving member and your last mission is to get the neutron destruct bomb from the Weapons Armory, put it in the bridge, and blow the ship up after getting into an escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costumes flowing around his hate filled body. He's blocking the door to the Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot!":
print "Quick on the drae you yank out your blaster anf fire it at the Gothon. His clown costume is flowing and moving around his body, which throws off your aim. Your laser hits his costume but misses him entirly. This makes him fly into an insane rage and blast you repeadedly in the face until you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right as the Gothon's blaster cracks a laser past
|
your head. In the middle of your artful dodge your foor slips and you bang your head on the metal wall and pass out. You wake up shortly after only to die as the Gothon stomps on your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy. You tell the one Gothon joke you know: \nLbhe zbgure vf fb sbg, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur. \n The Gotho
|
n stops, tries not to laugh, then busts out laughing and can't stop. While he's laughing you run up and shoot him square in the head putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "DOES NOT COMPUTE!"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print "A lot of things happen in here. Blablabla."
code = "%d%d%d" % (randint(1,9), randint(1,9), randint(1,9))
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print "BZZZZZEED!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "Go to the bridge."
return 'the_bridge'
else:
print "Ups. Ypu die."
return 'death'
class TheBridge(Scene):
def enter(self):
print "You have a bomb under your arm and haven't pulled your weapon yet as more Gorthons emerge."
action = raw_input("> ")
if action == "throw the bomb":
print "You die."
return 'death'
elif action == "slowly place the bomb":
print "You run to the escape podto get off this tin can."
return 'escape_pod'
else:
print "DOES NOT COMPUTE!"
return 'the_bridge'
class EscapePod(Scene):
def enter(self):
print "There's 5 pods, which one do you take?"
good_pot = randint(1,5)
guess = raw_input("[pod #]> ")
if int(guess) != good_pod:
print "You die."
return 'death'
else:
print "You won!"
return 'finished'
class Finished(Scene):
def enter(self):
print "You won! Good job!"
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
VisTrails/VisTrails
|
vistrails/packages/spreadsheet/cell_rc.py
|
Python
|
bsd-3-clause
| 211,544 | 0.000151 |
# -*- coding: utf-8 -*-
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Resource object code
#
# Created: Thu Mar 20 11:39:03 2014
# by: The Resource Compiler for PyQt (Qt v4.8.5)
#
# WARNING! All changes made in this file will be lost!
from __future__ import division
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x33\x77\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x20\x00\x49\x44\x41\x54\x78\x9c\xed\xbd\x79\x9c\x1d\x57\
\x79\xe7\xfd\x7d\x4e\xd5\x5d\x7a\xd7\xda\xda\x2d\xdb\x92\x77\x79\
\x37\xb6\x71\x26\x36\x06\x13\xe2\x04\xc2\x92\x18\x48\x08\x0a\x19\
\xf2\x4e\x98\x30\x21\x21\x04\xb2
|
\xcd\xe7\x93\x99\x24\x84\x21\x0c\
\x49\xde\x37\x13\x66\x86\x84\x64\x42\xc0\x01\x4c\x20\x4c\x06\x08\
\x13\x36\x93\x78\x03\x1b\x6f\xb2\x8c\x65\xc9\x96\xd5\x52\x4b\xea\
\x56\xb7\x7a\xbb\x5b\xd5\x39\xcf\xf3\xfe\x71\xea\xde\xbe\x6a\xc9\
\x2b\x96\x5a\xc6\xfa\x7d\x54\xaa\x7b\xeb\x56\xdd\x3e\x75\x9f\xdf\
\x79\xb6\xf3\x9c\x53\x70\x0a\xa7\x70\x0a\xa7\x70\x0a\xa7\x70\x0a\
\xa7\x70\x0a\xa7\x70\x0a\x2f\x2e\
|
x88\x99\x2d\x76\x1b\x4e\x61\x11\
\xe1\x16\xbb\x01\xa7\xb0\xb8\x38\x45\x80\x17\x39\x4e\x11\xe0\x45\
\x8e\x74\xb1\x1b\xb0\x98\xb8\xe2\x8a\x2b\x3a\x0e\xd0\xdd\x77\xdf\
\x2d\x8b\xd9\x96\xc5\xc2\x29\x0d\xf0\x22\xc7\x8b\x5a\x03\x74\x43\
\x44\x56\x03\x55\xa0\x09\x4c\x99\x59\x73\x91\x9b\x74\x42\x70\x8a\
\x00\xf3\xb8\x04\x58\x0d\x1c\x00\xee\x2b\xf6\x3f\xf0\x38\x45\x80\
\x79\xac\x06\x4e\x2f\x5e\x57\x17\xb1\x1d\x27\x14\xa7\x7c\x80\x17\
\x39\x5e\xb4\x1a\x60\xec\xcd\x72\xd3\x0f\xf7\x9f\xd9\x79\xff\xfa\
\x4b\x79\xc9\x7f\xbc\x97\xf1\x45\x6c\xd2\xa2\xe0\x07\x36\x15\x3c\
\xf6\x66\x59\x0f\xbc\x14\xb8\x1a\xd8\x00\xdc\xf4\x1c\xbe\xe6\x16\
\x60\x04\xb8\x13\xb8\x63\xf8\x53\xb6\xf7\xf9\x6b\xe1\xc9\x81\x1f\
\x28\x02\x8c\xbd\x59\x6e\x22\x0a\xfa\xb9\x08\xfb\x99\xe2\x16\xe0\
\x96\xe1\x4f\xd9\x2d\xc7\xf1\x6f\x9c\x30\xbc\xe0\x09\x30\xf6\x66\
\xb9\x1a\xf8\x35\x9e\xa5\xd0\x67\x72\xbe\x74\x7f\x6b\xe9\x8f\xb5\
\xdf\xaf\x08\x87\xef\x3d\x6f\x88\x4b\x9f\xe5\x9f\xbf\x05\xf8\xe3\
\xe1\x4f\xd9\x9d\xcf\xf2\xba\x93\x06\x2f\x58\x02\x8c\xbd\x59\xde\
\x0d\xbc\x9b\xa8\xde\x8f\x82\xc1\xde\x66\xe0\xbb\xdf\x36\xf2\x3d\
\x46\xf2\x59\xc7\x80\x29\xa9\x19\xa9\x4b\x19\x2e\x57\x39\x6b\xe1\
\x35\xcd\x39\xf6\xfa\x8c\x29\x71\xe4\x22\x84\x1b\x73\x5a\xab\x81\
\x4b\x85\x74\x6d\x2f\x9b\x7a\x12\x56\x3c\x49\x73\x46\x80\x3f\x19\
\xfe\x94\xfd\xc9\xf3\x76\x83\x27\x08\x2f\x38\x02\x14\x6a\xfe\xc3\
\x1c\x43\xf0\xb3\xc6\x97\xbf\x04\x33\x9f\x12\x06\x5d\xc2\x65\x2e\
\x61\xd5\xf3\xf5\x77\x7d\xce\x74\x6b\x8e\xdd\x3f\xea\x69\xdc\xe0\
\xe8\x3d\x7f\x80\x8b\x16\x9e\x63\xb0\x57\x8d\xf7\xac\xf9\xb4\x7d\
\xe6\xf9\xfa\xbb\xc7\x1b\x2f\x18\x02\x14\xaa\xfe\x8f\x89\x8e\x5d\
\x07\x19\x7c\xf7\x0b\xf0\xbd\x4f\x25\x9c\xe3\x12\x2e\x5f\x78\xdd\
\xf0\x86\x61\x00\x56\x6d\x58\xd9\x39\x56\xe9\xad\x30\xb4\x7c\xe8\
\xa8\xbf\x31\x3d\x31\x4d\xab\xde\xea\xbc\x3f\x38\x12\x83\x82\xb1\
\x91\xb1\xa3\xce\x6d\xce\x31\xf2\xf2\x3a\x63\x6f\xaa\xb2\x72\x6d\
\x95\xd3\xba\x3f\xf3\xc6\xb7\x9b\x81\x5f\x3b\xf3\xb3\x76\xdb\xb3\
\xb9\xc7\xc5\xc0\x0b\x82\x00\x63\x6f\x96\xcf\xb0\xc0\xc6\xd7\xe1\
\x8b\xbf\xe3\x18\xdf\x9b\xf2\xb6\xee\xe3\xc3\x1b\x86\x59\xb5\x61\
\x25\x9b\xcf\xbc\x88\x25\x43\x2b\x58\x39\xb0\x91\xde\x4a\x14\xf6\
\xca\x81\x8d\xf4\x55\x8e\x16\x7c\xad\x35\xcd\xf8\xec\x13\xf1\x7b\
\x8b\xd7\xf5\xd6\x0c\x87\x66\xf7\x74\xce\x69\xd6\x5b\xcc\x4c\x4c\
\x73\x70\x64\xfc\x28\x42\x54\x0f\x72\xf7\x7b\x13\xfa\x2e\x1a\xe4\
\xbc\xee\xe3\xb9\xf2\xf9\xf5\xb7\x70\x93\x99\x85\xef\xe3\xf6\x8f\
\x2b\x4e\x6a\x02\x14\xa1\xdc\x67\xe8\xea\xf5\x39\xdc\xf3\xff\x39\
\x76\xdd\x99\xf2\xc6\xf6\xb1\xe1\x0d\xc3\x5c\x7c\xf1\x95\x9c\xb5\
\xf9\x62\x4e\x5b\xbe\x85\x15\x03\xa7\x1d\xeb\xeb\x9e\x13\x0e\xcd\
\xee\x61\xcf\xc4\x36\x0e\xcd\x8e\x30\x31\x37\x32\xdf\xb6\x91\xb1\
\xa3\xc8\x70\xe6\x21\xee\xff\x0f\x25\x56\x6c\xe8\x61\x5d\xfb\x58\
\xa6\xdc\x73\xcf\x04\x3f\xfd\xba\xaf\xb3\xcb\xcc\xf4\x79\x6b\xd8\
\xf3\x84\x93\x96\x00\x85\xca\xff\x0c\x5d\xb6\xfe\x13\x8e\x2f\x7d\
\x21\xa5\xe3\xb9\x5f\x70\xd9\x45\xbc\xf4\xea\x57\xb1\x65\xe3\xb5\
\xf4\x96\x07\x8f\x7b\x9b\xea\xd9\x0c\x8f\xec\xbf\x9d\x91\x89\xed\
\x34\xb2\x19\x20\x6a\x86\x91\x1d\x23\xec\x79\x64\x9e\x1c\x2f\x9d\
\xe4\xe1\x5f\xeb\x9f\xd7\x06\x99\x72\xf0\xbe\x49\x7e\xfe\x35\x5f\
\xe3\x6b\x66\x96\x1d\xf7\x86\x3e\x0b\x9c\x94\x04\x28\x7a\x7e\xe7\
\x17\x55\x18\xfd\x4f\x25\xd2\x87\x85\x61\x80\x4b\xaf\xb9\x82\xeb\
\xaf\x79\x3d\x67\xaf\xbe\x7a\xd1\xda\xb8\xe3\xc0\x9d\xec\x38\xf8\
\xed\x0e\x11\x00\xf6\x3c\x32\xc2\xa3\xf7\xed\x04\xe0\xac\x9c\xd9\
\xdf\xcc\xb1\xc1\x94\x0e\x33\xff\xeb\x43\xbc\xf4\x43\xdb\xb8\x0f\
\x68\xd9\x49\xf2\xc3\x9f\x74\x04\x28\x84\x7f\x3b\x45\xcf\x9f\x16\
\x76\xfc\x46\xca\xd9\x13\x02\x43\xcb\x07\xf9\xc9\x9f\x7a\x3b\x97\
\x6d\xfe\xd1\xc5\x6d\x64\x17\x1e\x1c\xf9\x1a\x3b\x0f\x7e\xa7\xf3\
\xbe\x59\x6f\xb1\xed\xf6\x6d\x4c\x4f\xcc\xb0\xdc\xe0\x37\x6b\x1c\
\x3a\xbd\x1c\xc3\xc7\x46\xe0\xd0\x1f\x3e\xc0\x4d\x1f\xdd\xc1\x3d\
\x40\xed\x64\x30\x09\x27\x23\x01\x6e\xa7\xb0\xf9\x2d\x98\xf8\xd9\
\x32\xcb\x01\xce\xbb\x64\x0b\x6f\x7c\xcd\xbb\x58\xd6\xb7\x76\x51\
\xdb\x77\x2c\x4c\xd6\x46\xb9\xfb\xf1\x7f\xa4\x99\xcf\x75\x8e\x3d\
\x7a\xdf\xce\x8e\x59\xf8\xcb\x06\xf5\xa1\x84\x5e\x80\xf1\x26\x8f\
\x6e\xf9\x02\x37\x01\x8f\x01\x73\x8b\xad\x09\x4e\x2a\x02\x8c\xbd\
\x59\x3e\x4c\xcc\xea\xd1\x82\x89\x5f\x29\xb1\x7c\x42\xe0\xaa\xeb\
\x5f\xca\x6b\x5f\xf6\x4e\x7a\x4a\x03\x8b\xdc\xc2\x27\x47\x23\x9f\
\xe5\xb6\x47\x3f\x7d\x04\x09\xc6\x46\xc6\x78\xf0\xf6\x87\x58\x6e\
\xf0\xc1\xe6\x3c\x09\xee\x9e\xe0\xcb\x3f\xfe\x55\xde\x07\x3c\x0e\
\xd4\x17\x93\x04\x27\x0d\x01\x16\xda\xfd\xdf\x29\xc1\x0e\x81\x2b\
\xaf\xbf\x9a\xd7\x5c\xfb\x0e\xaa\xa5\xfe\x45\x6c\xdd\x33\x43\x33\
\x9f\xe3\xce\x5d\x9f\xa5\x99\xd7\x3a\xc7\xda\x24\x38\xdb\xe0\xfd\
\xf9\xfc\xb9\x37\x7d\x93\x5f\xfa\xd6\x41\xbe\x0e\x3c\xb1\x98\xd5\
\x47\x27\xd3\x70\xf0\xbb\xdb\x2f\x6e\x4e\x98\xda\x21\x2c\xb9\xf0\
\x9a\x0b\xb8\xfc\xa2\x97\x53\x4a\x2a\x04\xcd\x9f\xea\xda\x93\x02\
\xa5\xa4\xc2\x69\xcb\x2f\x62\xc7\x81\x3b\x3a\xc7\x86\x37\x0c\x73\
\xe1\x35\xf0\xe0\xed\x0f\xf1\x71\x68\x6e\x2d\x8a\x4d\x7e\xf9\x3c\
\xde\xf4\xad\x83\xec\x03\x1a\x22\xb2\xdf\xcc\x16\xe5\x06\x4f\x26\
\x02\xdc\x04\xd0\x80\xa9\xcf\x27\x2c\x19\x5a\x3e\xc8\xe6\xb3\x2f\
\x60\xfd\xd2\x73\x5f\x10\xc2\x6f\x63\xfd\xd2\x73\x39\x30\xbd\x93\
\x99\xc6\x7c\x69\xc1\xf0\x86\x61\x86\x96\x8f\xf0\x8f\x13\x33\xd5\
\x9f\x68\xd0\x5c\x92\x5
|
ajn123/Python_Tutorial
|
Python Version 2/Advanced/exceptions.py
|
Python
|
mit
| 1,154 | 0.034662 |
"""
An exception is what occurs when you have a run time error in your
program.
what you can do is "try" a statement wether it is invalid or not
if an error does occur withan that statement you can catch that error
in the except clause and print out a corresponding message
(or you can print out the error message).
"""
#made up imports will cause an imput error.
try:
import madeup
except ImportError:
print ImportError
|
def main():
while True:
#Similar to Try and catch, tries an error and catches it.
try:
x = int(raw_input("Please enter a number: "))
break
except ValueError:
print "Oops! That was no valid number. Try again..."
#A lookup error in whon an a incorrect lookup happens in a dictionary,
dict = {"string": "a word representation"}
try:
dict["ERROR"]
except LookupError:
|
print LookupError
"""
You can catch an exception and then use a finally clause
to take care of your program
A finally statement is executed no matter if an exception is
thrown.
"""
try:
a = 4 / 2
except Exception, e:
raise e
finally:
print "finally clause raised"
if __name__ == '__main__':
main()
|
MarekSuchanek/repocribro
|
repocribro/github.py
|
Python
|
mit
| 9,565 | 0 |
import hashlib
import hmac
import json
import requests
class GitHubResponse:
"""Wrapper for GET request response from GitHub"""
def __init__(self, response):
self.response = response
@property
def is_ok(self):
"""Check if request has been successful
:return: if it was OK
:rtype: bool
"""
return self.response.status_code < 300
@property
def data(self):
"""Response data as dict/list
:return: data of response
:rtype: dict|list
"""
return self.response.json()
@property
def url(self):
"""URL of the request leading to this response
:return: URL origin
:rtype: str
"""
return self.response.url
@property
def links(self):
"""Response header links
:return: URL origin
:rtype: dict
"""
return self.response.links
@property
def is_first_page(self):
"""Check if this is the first page of data
:return: if it is the first page of data
:rtype: bool
"""
return 'first' not in self.links
@property
def is_last_page(self):
"""Check if this is the last page of data
:return: if it is the last page of data
:rtype: bool
"""
return 'last' not in self.links
@property
def is_only_page(self):
"""Check if this is the only page of data
:return: if it is the only page page of data
:rtype: bool
"""
return self.is_first_page and self.is_last_page
@property
def total_pages(self):
"""Number of pages
:return: number of pages
:rtype: int
"""
if 'last' not in self.links:
return self.actual_page
return self.parse_page_number(self.links['last']['url'])
@property
def actual_page(self):
"""Actual page number
:return: actual page number
:rtype: int
"""
return self.parse_page_number(self.url)
@staticmethod
def parse_page_number(url):
"""Parse page number from GitHub GET URL
:param url: URL used for GET request
:type url: str
:return: page number
:rtype: int
"""
if '?' not in url:
|
return 1
params = url.split('?')[1].split('=')
params = {k: v for k, v in zip(params[0::2], params[1::2])}
if 'page' not in params:
return 1
return int(pa
|
rams['page'])
class GitHubAPI:
"""Simple GitHub API communication wrapper
It provides simple way for getting the basic GitHub API
resources and special methods for working with webhooks.
.. todo:: handle if GitHub is out of service, custom errors,
better abstraction, work with extensions
"""
#: URL to GitHub API
API_URL = 'https://api.github.com'
#: URL for OAuth request at GitHub
AUTH_URL = 'https://github.com/login/oauth/authorize?scope={}&client_id={}'
#: URL for OAuth token at GitHub
TOKEN_URL = 'https://github.com/login/oauth/access_token'
#: Scopes for OAuth request
SCOPES = ['user', 'repo', 'admin:repo_hook']
#: Required webhooks to be registered
WEBHOOKS = ['push', 'release', 'repository']
#: Controller for incoming webhook events
WEBHOOK_CONTROLLER = 'webhooks.gh_webhook'
#: URL for checking connections within GitHub
CONNECTIONS_URL = 'https://github.com/settings/connections/applications/{}'
def __init__(self, client_id, client_secret, webhooks_secret,
session=None, token=None):
self.client_id = client_id
self.client_secret = client_secret
self.webhooks_secret = webhooks_secret
self.session = session or requests.Session()
self.token = token
self.scope = []
def _get_headers(self):
"""Prepare auth header fields (empty if no token provided)
:return: Headers for the request
:rtype: dict
"""
if self.token is None:
return {}
return {
'Authorization': 'token {}'.format(self.token),
'Accept': 'application/vnd.github.mercy-preview+json'
}
def get_auth_url(self):
"""Create OAuth request URL
:return: OAuth request URL
:rtype: str
"""
return self.AUTH_URL.format(' '.join(self.SCOPES), self.client_id)
def login(self, session_code):
"""Authorize via OAuth with given session code
:param session_code: The session code for OAuth
:type session_code: str
:return: If the auth procedure was successful
:rtype: bool
.. todo:: check granted scope vs GH_SCOPES
"""
response = self.session.post(
self.TOKEN_URL,
headers={
'Accept': 'application/json'
},
data={
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': session_code,
}
)
if response.status_code != 200:
return False
data = response.json()
self.token = data['access_token']
self.scope = [x for x in data['scope'].split(',')]
return True
def get(self, what, page=0):
"""Perform GET request on GitHub API
:param what: URI of requested resource
:type what: str
:param page: Number of requested page
:type page: int
:return: Response from the GitHub
:rtype: ``repocribro.github.GitHubResponse``
"""
uri = self.API_URL + what
if page > 0:
uri += '?page={}'.format(page)
return GitHubResponse(self.session.get(
uri,
headers=self._get_headers()
))
def webhook_get(self, full_name, hook_id):
"""Perform GET request for repo's webhook
:param full_name: Full name of repository that contains the hook
:type full_name: str
:param hook_id: GitHub ID of hook to be get
:type hook_id: int
:return: Data of the webhook
:rtype: ``repocribro.github.GitHubResponse``
"""
return self.get('/repos/{}/hooks/{}'.format(full_name, hook_id))
def webhooks_get(self, full_name):
"""GET all webhooks of the repository
:param full_name: Full name of repository
:type full_name: str
:return: List of returned webhooks
:rtype: ``repocribro.github.GitHubResponse``
"""
return self.get('/repos/{}/hooks'.format(full_name))
def webhook_create(self, full_name, hook_url, events=None):
"""Create new webhook for specified repository
:param full_name: Full name of the repository
:type full_name: str
:param hook_url: URL where the webhook data will be sent
:type hook_url: str
:param events: List of requested events for that webhook
:type events: list of str
:return: The created webhook data
:rtype: dict or None
"""
if events is None:
events = self.WEBHOOKS
data = {
'name': 'web',
'active': True,
'events': events,
'config': {
'url': hook_url,
'content_type': 'json',
'secret': self.webhooks_secret
}
}
response = self.session.post(
self.API_URL + '/repos/{}/hooks'.format(full_name),
data=json.dumps(data),
headers=self._get_headers()
)
if response.status_code == 201:
return response.json()
return None
def webhook_tests(self, full_name, hook_id):
"""Perform test request for repo's webhook
:param full_name: Full name of repository that contains the hook
:type full_name: str
:param hook_id: GitHub ID of hook to be tested
:type hook_id: int
:return: If request was successful
:rtype: bool
"""
response = self.session.delete(
self.API_URL + '
|
beernarrd/gramps
|
gramps/gen/filters/rules/person/_hascitation.py
|
Python
|
gpl-2.0
| 1,883 | 0.006373 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Filter rule to match persons with a particular citation.
"""
#-------------------------
|
------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hascitationbase import HasCitationBase
#-------------------------------------------------------------------------
#
# HasEvent
#
#-------------------------------------------------------------------------
class HasCitation(HasCitationBase):
"""Rule that checks for a person with a particular value"""
labels = [ _('Volume/Page:'),
_('Date:'),
_('Confidence level:')]
name = _('People with the <citation>')
description = _("Matches people with a citation of a particular "
"value")
|
gundalow/ansible-modules-extras
|
windows/win_webpicmd.py
|
Python
|
gpl-3.0
| 1,852 | 0.00162 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Peter Mounce <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_webpicmd
version_added: "2.0"
short_description: Installs packages using Web Platform Installer command-line
description:
- Installs packages using W
|
eb Platform Installer command-line (http://www.iis.net/
|
learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release).
- Must be installed and present in PATH (see win_chocolatey module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)
- Install IIS first (see win_feature module)
notes:
- accepts EULAs and suppresses reboot - you will need to check manage reboots yourself (see win_reboot module)
options:
name:
description:
- Name of the package to be installed
required: true
author: Peter Mounce
'''
EXAMPLES = '''
# Install URLRewrite2.
win_webpicmd:
name: URLRewrite2
'''
|
rolando-contrib/scrapy
|
tests/__init__.py
|
Python
|
bsd-3-clause
| 1,205 | 0 |
"""
tests: this package contains all Scrapy unittests
see http://doc.scrapy.org/en/latest/contributing.html#running-tests
"""
import os
# ignore system-wide proxies for tests
# which would send requests to a totally unsuspecting server
# (e.g. because urllib does not fully understand the proxy spec)
os.environ['http_proxy'] = ''
os.environ['https_proxy'] = ''
os.environ['ftp_proxy'] = ''
# Absolutize paths to coverage config and output file because tests that
# spawn subprocesses also changes current working directory.
_sourceroot = os.path.dirname(os.path.dirname(os.
|
path.abspath(__file__)))
if 'COV_CORE_CONFIG' in os.
|
environ:
os.environ['COVERAGE_FILE'] = os.path.join(_sourceroot, '.coverage')
os.environ['COV_CORE_CONFIG'] = os.path.join(_sourceroot,
os.environ['COV_CORE_CONFIG'])
try:
import unittest.mock as mock
except ImportError:
import mock
tests_datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'sample_data')
def get_testdata(*paths):
"""Return test data"""
path = os.path.join(tests_datadir, *paths)
with open(path, 'rb') as f:
return f.read()
|
Forage/Gramps
|
gramps/gui/widgets/linkbox.py
|
Python
|
gpl-2.0
| 1,754 | 0.005701 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
#
#
|
This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Ge
|
neral Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
__all__ = ["LinkBox"]
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger(".widgets.linkbox")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import GObject
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# LinkBox class
#
#-------------------------------------------------------------------------
class LinkBox(Gtk.HBox):
def __init__(self, link, button):
GObject.GObject.__init__(self)
self.set_spacing(6)
self.pack_start(link, False, True, 0)
if button:
self.pack_start(button, False, True, 0)
self.show()
|
ResearchSoftwareInstitute/MyHPOM
|
hs_tracking/tests.py
|
Python
|
bsd-3-clause
| 15,515 | 0.001482 |
from datetime import datetime, timedelta
import csv
from cStringIO import StringIO
from django.test import TestCase
from django.contrib.auth.models import User
from django.test import Client
from django.http import HttpRequest, QueryDict, response
from mock import patch, Mock
from .models import Variable, Session, Visitor, SESSION_TIMEOUT, VISITOR_FIELDS
from .views import AppLaunch
import utils
import urllib
class ViewTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='testuser',
email='[email protected]')
self.user.set_password('password')
self.user.save()
profile = self.user.userprofile
profile_data = {
'country': 'USA',
}
for field in profile_data:
setattr(profile, field, profile_data[field])
profile.save()
self.visitor = Visitor.objects.create()
self.session = Session.objects.create(visitor=self.visitor)
def createRequest(self, user=None):
self.request = Mock()
if user is not None:
self.request.user = user
# sample request with mocked ip address
self.request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0,' +
'127.0.0.1, 198.84.193.157, '
'177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
self.request.method = 'GET'
self.request.session = {}
return self.request
def test_get(self):
# check that there are no logs for app_launch
app_lauch_cnt = Variable.objects.filter(name='app_launch').count()
self.assertEqual(app_lauch_cnt, 0)
# create a mock request object
r = self.createRequest(self.user)
# build request 'GET'
res_id = 'D7a7de92941a044049a7b8ad09f4c75bb'
res_type = 'GenericResource'
app_name = 'test'
request_url = 'https://apps.hydroshare.org/apps/hydroshare-gis/' \
'?res_id=%s&res_type=%s' % (res_id, res_type)
app_url = urllib.quote(request_url)
href = 'url=%s;name=%s' % (app_url, app_name)
r.GET = QueryDict(href)
# invoke the app logging endpoint
app_logging = AppLaunch()
url_redirect = app_logging.get(r)
# validate response
self.assertTrue(type(url_redirect) == response.HttpResponseRedirect)
self.assertTrue(url_redirect.url == request_url)
# validate logged data
app_lau
|
ch_cnt = Variable.objects.filter(name='app_launch').count()
self.assertEqual(app_
|
lauch_cnt, 1)
data = list(Variable.objects.filter(name='app_launch'))
values = dict(tuple(pair.split('=')) for pair in data[0].value.split('|'))
self.assertTrue('res_type' in values.keys())
self.assertTrue('name' in values.keys())
self.assertTrue('user_email_domain' in values.keys())
self.assertTrue('user_type' in values.keys())
self.assertTrue('user_ip' in values.keys())
self.assertTrue('res_id' in values.keys())
self.assertTrue(values['res_type'] == res_type)
self.assertTrue(values['name'] == app_name)
self.assertTrue(values['user_email_domain'] == self.user.email[-3:])
self.assertTrue(values['user_type'] == 'Unspecified')
self.assertTrue(values['user_ip'] == '198.84.193.157')
self.assertTrue(values['res_id'] == res_id)
class TrackingTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='testuser',
email='[email protected]')
self.user.set_password('password')
self.user.save()
profile = self.user.userprofile
profile_data = {
'country': 'USA',
}
for field in profile_data:
setattr(profile, field, profile_data[field])
profile.save()
self.visitor = Visitor.objects.create()
self.session = Session.objects.create(visitor=self.visitor)
def createRequest(self, user=None):
request = Mock()
if user is not None:
request.user = user
# sample request with mocked ip address
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0, ' +
'127.0.0.1, 198.84.193.157, '
'177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
return request
def test_record_variable(self):
self.session.record('int', 42)
self.session.record('float', 3.14)
self.session.record('true', True)
self.session.record('false', False)
self.session.record('text', "Hello, World")
self.assertEqual("42", self.session.variable_set.get(name='int').value)
self.assertEqual("3.14", self.session.variable_set.get(name='float').value)
self.assertEqual("true", self.session.variable_set.get(name='true').value)
self.assertEqual("false", self.session.variable_set.get(name='false').value)
self.assertEqual('Hello, World', self.session.variable_set.get(name='text').value)
def test_record_bad_value(self):
self.assertRaises(TypeError, self.session.record, 'bad', ['oh no i cannot handle arrays'])
def test_get(self):
self.assertEqual(42, Variable(name='var', value='42', type=0).get_value())
self.assertEqual(3.14, Variable(name='var', value='3.14', type=1).get_value())
self.assertEqual(True, Variable(name='var', value='true', type=3).get_value())
self.assertEqual(False, Variable(name='var', value='false', type=3).get_value())
self.assertEqual("X", Variable(name='var', value='X', type=2).get_value())
self.assertEqual(None, Variable(name='var', value='', type=4).get_value())
def test_for_request_new(self):
request = self.createRequest(user=self.user)
request.session = {}
session = Session.objects.for_request(request)
self.assertIn('hs_tracking_id', request.session)
self.assertEqual(session.visitor.user.id, self.user.id)
def test_for_request_existing(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
session2 = Session.objects.for_request(request)
self.assertEqual(session1.id, session2.id)
def test_for_request_expired(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
with patch('hs_tracking.models.datetime') as dt_mock:
dt_mock.now.return_value = datetime.now() + timedelta(seconds=SESSION_TIMEOUT)
session2 = Session.objects.for_request(request)
self.assertNotEqual(session1.id, session2.id)
self.assertEqual(session1.visitor.id, session2.visitor.id)
def test_for_other_user(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
user2 = User.objects.create(username='testuser2', email='[email protected]')
request = self.createRequest(user=user2)
request.session = {}
session2 = Session.objects.for_request(request)
self.assertNotEqual(session1.id, session2.id)
self.assertNotEqual(session1.visitor.id, session2.visitor.id)
def test_export_visitor_info(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
info = session1.visitor.export_visitor_information()
self.assertEqual(info['country'], 'USA')
self.assertEqual(info['username'], 'testuser')
def test_tracking_view(self):
self.user.is_staff = True
self.user.save()
client = Client()
client.login(username=self.user.username, password='password')
response = client.get('/hydr
|
SpeedProg/eve-inc-waitlist
|
waitlist/blueprints/xup/__init__.py
|
Python
|
mit
| 52 | 0 |
from .
|
blueprint
|
import bp
from .submission import *
|
jfillmore/Omega-API-Engine
|
clients/python/omega/__init__.py
|
Python
|
mit
| 402 | 0.002488 |
#!/usr/bin/python
# omega - python client
# https://github.com/jfillmore/Omega-API-Engine
#
# Copyright 2011, J
|
onathon Fillmore
# Licensed under the MIT license. See LICENSE file.
# http://www.opensource.org/licenses/mit-license.php
"""Omega core library."""
__all__ = ['client', 'dbg', 'browser', 'box_factory', 'error', 'util', 'shell']
import dbg
from util import *
from error import Exc
|
eption
|
jkandasa/integration_tests
|
cfme/fixtures/cli.py
|
Python
|
gpl-2.0
| 4,212 | 0.002374 |
from cfme.utils.version import get_stream
from collections import namedtuple
from contextlib import contextmanager
from cfme.test_framework.sprout.client import SproutClient
from cfme.utils.conf import cfme_data, credentials
from cfme.utils.log import logger
import pytest
from wait_for import wait_for
from cfme.test_framework.sprout.client import SproutException
from fixtures.appliance import temp_appliances
TimedCommand = namedtuple('TimedCommand', ['command', 'timeout'])
@pytest.yield_fixture(scope="function")
def dedicated_db_appliance(app_creds, appliance):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd'
db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
if appliance.version > '5.7':
with temp_appliances(count=1, preconfigured=False) as apps:
pwd = app_creds['password']
opt = '5' if apps[0].version >= "5.8" else '8'
command_set = ('ap', '', opt, '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '')
apps[0].appliance_console.run_commands(command_set)
wait_for(lambda: apps[0].db.is_dedicated_active)
yield apps[0]
else:
raise Exception("Can't setup dedicated db on appliance below 5.7 builds")
""" The Following fixtures are for provisioning one preconfigured or unconfigured appliance for
testing from an FQDN provider unless there are no provisions available"""
@contextmanager
def fqdn_appliance(appliance, preconfigured):
sp = SproutClient.from_config()
available_providers = set(sp.call_method('available_providers'))
required_providers = set(cfme_data['fqdn_providers'])
usable_providers = available_providers & required_providers
version = appliance.version.vstring
stream = get_stream(appliance.version)
for provider in usable_providers:
try:
apps, pool_id = sp.provision_appliances(
count=1, preconfigured=preconfigured, version=version, stream=stream,
provider=provider
)
break
except Exception as e:
logger.warning("Couldn't provision appliance with following error:")
logger.warning("{}".format(e))
continue
else:
logger.error("Couldn't provision an appliance at all")
|
raise SproutException('No provision available')
yield apps[0]
apps[0].ssh_client.close()
sp.destroy_pool(pool_id)
@pyt
|
est.yield_fixture()
def unconfigured_appliance(appliance):
with fqdn_appliance(appliance, preconfigured=False) as app:
yield app
@pytest.yield_fixture()
def configured_appliance(appliance):
with fqdn_appliance(appliance, preconfigured=True) as app:
yield app
@pytest.yield_fixture()
def ipa_crud(configured_appliance, ipa_creds):
configured_appliance.appliance_console_cli.configure_ipa(ipa_creds['ipaserver'],
ipa_creds['username'], ipa_creds['password'], ipa_creds['domain'], ipa_creds['realm'])
yield(configured_appliance)
@pytest.fixture()
def app_creds():
return {
'username': credentials['database']['username'],
'password': credentials['database']['password'],
'sshlogin': credentials['ssh']['username'],
'sshpass': credentials['ssh']['password']
}
@pytest.fixture(scope="module")
def app_creds_modscope():
return {
'username': credentials['database']['username'],
'password': credentials['database']['password'],
'sshlogin': credentials['ssh']['username'],
'sshpass': credentials['ssh']['password']
}
@pytest.fixture()
def ipa_creds():
fqdn = cfme_data['auth_modes']['ext_ipa']['ipaserver'].split('.', 1)
creds_key = cfme_data['auth_modes']['ext_ipa']['credentials']
return{
'hostname': fqdn[0],
'domain': fqdn[1],
'realm': cfme_data['auth_modes']['ext_ipa']['iparealm'],
'ipaserver': cfme_data['auth_modes']['ext_ipa']['ipaserver'],
'username': credentials[creds_key]['principal'],
'password': credentials[creds_key]['password']
}
|
Repythory/Libraries
|
amolf/plotting/__init__.py
|
Python
|
bsd-2-clause
| 236 | 0.008475 |
import os
import glob
|
SOURCE_FILES = glob.glob(os.path.dirname(__file__) + "/*.py")
__all__ = [os.path.basename(
|
f)[: -3] for f in SOURCE_FILES]
__doc__ = """\
Module for advanced plotting based on matplotlib
"""
from .popups import *
|
RingCentralVuk/ringcentral-python
|
ringcentral/subscription/__init__test.py
|
Python
|
mit
| 2,725 | 0.001835 |
#!/usr/bin/env python
# encoding: utf-8
import unittest
from ..test import TestCase, Spy
from ..http.mocks.presence_subscription_mock import PresenceSubscriptionMock
from ..http.mocks.subscription_mock import SubscriptionMock
from . import *
class TestSubscription(TestCase):
def test_presence_decryption(self):
sdk = self.get_sdk()
sdk.get_context().get_mocks().add(PresenceSubscriptionMock())
aes_message = 'gkw8EU4G1SDVa2/hrlv6+0ViIxB7N1i1z5MU/Hu2xkIKzH6yQzhr3vIc27IAN558kTOkacqE5DkLpRdnN1orwtIBsUHm' + \
'PMkMWTOLDzVr6eRk+2Gcj2Wft7ZKrCD+FCXlKYIoa98tUD2xvoYnRwxiE2QaNywl8UtjaqpTk1+WDImBrt6uabB1WICY' + \
|
'/qE0It3DqQ6vdUWISoTfjb+vT5h9kfZxWYUP4ykN2UtUW1biqCjj1Rb6GWGnTx6jP
|
qF77ud0XgV1rk/Q6heSFZWV/GP2' + \
'3/iytDPK1HGJoJqXPx7ErQU='
s = sdk.get_subscription()
spy = Spy()
s.add_events(['/restapi/v1.0/account/~/extension/1/presence'])
s.on(EVENTS['notification'], spy)
s.register()
s._get_pubnub().receive_message(aes_message)
expected = {
"timestamp": "2014-03-12T20:47:54.712+0000",
"body": {
"extensionId": 402853446008,
"telephonyStatus": "OnHold"
},
"event": "/restapi/v1.0/account/~/extension/402853446008/presence",
"uuid": "db01e7de-5f3c-4ee5-ab72-f8bd3b77e308"
}
self.assertEqual(expected, spy.args[0])
s.destroy()
def test_plain_subscription(self):
sdk = self.get_sdk()
sdk.get_context().get_mocks().add(SubscriptionMock())
s = sdk.get_subscription()
spy = Spy()
expected = {
"timestamp": "2014-03-12T20:47:54.712+0000",
"body": {
"extensionId": 402853446008,
"telephonyStatus": "OnHold"
},
"event": "/restapi/v1.0/account/~/extension/402853446008/presence",
"uuid": "db01e7de-5f3c-4ee5-ab72-f8bd3b77e308"
}
s.add_events(['/restapi/v1.0/account/~/extension/1/presence'])
s.on(EVENTS['notification'], spy)
s.register()
s._get_pubnub().receive_message(expected)
self.assertEqual(expected, spy.args[0])
s.destroy()
def test_subscribe_with_events(self):
sdk = self.get_sdk()
sdk.get_context().get_mocks().add(SubscriptionMock())
s = sdk.get_subscription()
res = s.register(events=['/restapi/v1.0/account/~/extension/1/presence'])
self.assertEqual('/restapi/v1.0/account/~/extension/1/presence', res.get_json().eventFilters[0])
s.destroy()
if __name__ == '__main__':
unittest.main()
|
remontees/EliteHebergPanel
|
home/forms.py
|
Python
|
lgpl-3.0
| 442 | 0.020362 |
#-*- coding: utf-8 -*-
from django import forms
from models import User
from captcha.fields import CaptchaField
class UserFo
|
rm(forms.ModelForm):
captcha = CaptchaField()
cgu = forms.BooleanField()
class Meta:
model = User
widgets = { 'pseudo': forms.TextInput(attrs={'class':'form-control'}), 'nom': forms.TextInput(attrs={'class':'form-control'}), 'email': forms.TextInput(attrs={'class'
|
:'form-control'}) }
|
SINGROUP/pycp2k
|
pycp2k/classes/_nmr1.py
|
Python
|
lgpl-3.0
| 844 | 0.00237 |
from pycp2k.inputsection import InputSection
from ._print55 import _print55
from ._interpolator10 import _interpolator10
class _nmr1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_par
|
ameters = None
self.Interpolate_shift = None
self.Nics = None
self.Nics_file_name = None
self.Restart_nmr = None
self.Shift_gapw_radius = None
self.PRINT = _print55()
self.INTERPOLATOR = _interpolator10()
self._name = "NMR"
self._keywords = {'Restart_nmr': 'RESTART_NMR', 'Nics': 'NICS', 'Nics_file_name': 'NICS_FILE_NAME', 'Interpolate_shift': 'INTERPOLATE_SHIFT', 'Shift_ga
|
pw_radius': 'SHIFT_GAPW_RADIUS'}
self._subsections = {'INTERPOLATOR': 'INTERPOLATOR', 'PRINT': 'PRINT'}
self._attributes = ['Section_parameters']
|
sniemi/SamPy
|
plot/interactive_correlation_plot.py
|
Python
|
bsd-2-clause
| 11,714 | 0.015025 |
### Interactively plot points
### to show the correlation between the x and y directions.
### By Rajeev Raizada, Jan.2011.
### Requires Python, with the Matplotlib and SciPy modules.
### You can download Python and those modules for free from
### http://www.python.org/download
### http://scipy.org
### http://matplotlib.sourceforge.net
###
### Please feel more than free to use this code for teaching.
### If you use it, I'd love to hear from you!
### If you have any questions, comments or feedback,
### please send them to me: rajeev dot raizada at dartmouth dot edu
###
### Some tutorial exercises which might be useful to try:
### 1. Click to make a few points in more or less a straight line.
### What is the correlation value?
### Now add a point far away from the line.
### What does adding that point do to the correlation value?
### Try deleting the point by clicking on it, then re-adding it, to compare.
### 2. Click outside the axes to reset the plot.
### Now put in about 10 points in a oval-ish cloud,
### deleting and adjusting them so that you get a correlation
### of around r=0.6.
### What is the size of the p-value associated with this correlation?
### (This p-value is the probability of observing this r-value
### if the population the points were sampled from actually had zero correlation).
### Now add another 10 points, so that there are 20 in all,
### while keeping the correlation value at r=0.6.
### What is the p-value now?
### 3. Click outside the axes to reset the plot.
### Now make in turn, approximately, each of the four plots
### shown in Anscombe's Quartet:
### http://en.wikipedia.org/wiki/Anscombe's_quartet
### What does this tell you how only knowing a correlation-value
### might give you a misleading picture of the data?
###########################################
# First, we import the modules that we need
import pylab
import scipy
import scipy.stats # We need this one for the norm.pdf function
#####################################################
# Next, we define the functions that the program uses
### This function clears the figure and empties the points list
def clear_the_figure_and_empty_points_list():
global coords_array
global point_handles_array
# Reset our variables to be empty
coords_array = scipy.array([])
point_handles_array = scipy.array([])
handle_of_regression_line_plot = []
### Clear the figure window
pylab.clf() # clf means "clear the figure"
### In order to keep the boundaries of the figure fixed in place,
### we will draw a white box around the region that we want.
pylab.plot(axis_range*scipy.array([-1, 1, 1, -1]),
axis_range*scipy.array([-1, -1, 1, 1]),'w-')
### We want a long title, so we put a \n in the middle, to start a new line of title-text
multiline_title_string = 'Click to add points, on old points to delete,' \
' outside axes to reset.\n' \
' The red line is the linear regression best-fit.'
pylab.title(multiline_title_string)
pylab.grid(True) # Add a grid on to the figure window
pylab.axis('equal') # Make the tick-marks equally spaced on x- and y-axes
pylab.axis(axis_range*scipy.array([-1, 1, -1, 1]))
# This is the function which gets called when the mouse is clicked in the figure window
def do_this_when_the_mouse_is_clicked(this_event):
global coords_array
global point_handles_array
x = this_event.xdata
y = this_event.ydata
### If the click is outside the range, then clear figure and points list
if this_event.xdata is None: # This means we clicked outside the axis
clear_the_figure_and_empty_points_list()
else: # We clicked inside the axis
number_of_points = scipy.shape(coords_array)[0]
if number_of_points > 0:
point_to_be_deleted = check_if_click_is_on_an_existing_point(x,y)
if point_to_be_deleted != -1: # We delete a point
# We will delete that row from coords_array. The rows are axis 0
coords_array = scipy.delete(coords_array,point_to_be_deleted,0)
# We will also hide that point on the figure, by finding its handle
handle_of_point_to_be_deleted = point_handles_array[point_to_be_deleted]
pylab.setp(handle_of_point_to_be_deleted,visible=False)
# Now that we have erased the point with that handle,
# we can delete that handle from the handles list
point_handles_array = scipy.delete(point_handles_array,point_to_be_deleted)
else: # We make a new point
coords_array = scipy.vstack((coords_array,[x,y]))
new_point_handle = pylab.plot(x,y,'*',color='blue')
point_handles_array = scipy.append(point_handles_array,new_point_handle)
if number_of_points == 0:
coords_array = scipy.array([[x,y]])
new_point_handle = pylab.plot(x,y,'*',color='blue')
point_handles_array = scipy.append(point_handles_array,new_point_handle)
### Now plot the statistics that this program is demonstrating
number_of_points = scipy.shape(coords_array)[0] # Recount how many points we have now
if number_of_points > 1:
plot_the_correlation()
### Finally, check to see whether we have fewer than two points
### as a result of any possible point-deletions above.
### If we do, then delete the stats info from the plot,
### as it isn't meaningful for just one data point
number_of_points = scipy.shape(coords_array)[0]
if number_of_points < 2: # We only show mean and std if there are two or more points
pylab.setp(handle_of_regression_line_plot,visible=False)
pylab.xlabel('')
pylab.ylabel('')
# Set the axis back to its original value, in case Python has changed it during plotting
pylab.axis('equal') # Make the tick-marks equally spaced on x- and y-axes
pylab.axis(axis_range*scipy.array([-1, 1, -1, 1]))
# This is the function which calculates and plo
|
ts the statistics
def plot_the_correlation():
# First, delete any existing regression line plots fr
|
om the figure
global handle_of_regression_line_plot
pylab.setp(handle_of_regression_line_plot,visible=False)
#### Next, calculate and plot the stats
number_of_points = scipy.shape(coords_array)[0]
x_coords = coords_array[:,0] # Python starts counting from zero
y_coords = coords_array[:,1]
#### To get the best-fit line, we'll do a regression
slope, y_intercept, r_from_regression, p_from_regression, std_err = (
scipy.stats.linregress(x_coords,y_coords) )
#### Plot the best-fit line in red
handle_of_regression_line_plot = pylab.plot(axis_range*scipy.array([-1,1]),
y_intercept + slope*axis_range*scipy.array([-1,1]),'r-')
#### Uncomment the next two lines if you want to verify
#### that the stats we get from regression and from correlation are the same.
# r_from_corr,p_from_corr = scipy.stats.pearsonr(x_coords,y_coords)
# print r_from_regression,r_from_corr,p_from_regression,p_from_corr
#### In order to make the p-values format nicely
#### even when they have a bunch of zeros at the start, we do this:
p_value_string = "%1.2g" % p_from_regression
pylab.xlabel(str(number_of_points) + ' points: ' +
' p-value of corr = ' + p_value_string +
' Correlation, r = ' + str(round(r_from_regression,2)) )
# The ',2' means show 2 decimal places
# Set the axis back to its original value, in case Python has changed it during plotting
pylab.axis('equal') # Make the tick-marks equally spaced on x- and y-axes
pylab.axis(axis_range*scipy.array([-1, 1, -1, 1]))
# This is the function which deletes existing points if you click on them
def check_if_click_is_on_an_existing_point(mouse_x_coord,mouse_y_c
|
maoterodapena/pysouliss
|
souliss/Typicals.py
|
Python
|
mit
| 9,295 | 0.00979 |
import logging
import struct
_LOGGER = logging.getLogger(__name__)
typical_types = {
0x11: {
"desc": "T11: ON/OFF Digital Output with Timer Option", "size": 1,
"name": "Switch Timer",
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x12: {"desc": "T12: ON/OFF Digital Output with AUTO mode",
"size": 1,
"name": "Switch auto",
"state_desc": { 0x00: "off",
0x01: "on",
0xF0: "on/auto",
0xF1: "off/auto"
}
},
0x13: {"desc": "T13: Digital Input Value",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x14: {"desc": "T14: Pulse Digital Output",
"size": 1,
"name": "Switch",
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x15: {"desc": "T15: RGB Light",
"size": 2,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x16: {"desc": "T16: RGB LED Strip",
"size": 4,
"state_desc": { 0x00: "on",
0x01: "on"}
},
0x18: {"desc": "T18: ON/OFF Digital Output (Step Relay)",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x19: {"desc": "T19: Single Color LED Strip",
"size": 2,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x1A: {"desc": "T1A: Digital Input Pass Through",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x1B: {"desc": "T1B: Position Constrained ON/OFF Digital Output", "size": 1},
0x21: {"desc": "T21: Motorized devices with limit switches", "size": 1},
0x22: {"desc": "T22: Motorized devices with limit switches and middle position", "size": 1},
0x31: {"desc": "T31: Temperature control with cooling and heating mode", "size": 5},
0x32: {"desc": "T32: Air Conditioner", "size": 2},
0x41: {"desc": "T41: Anti-theft integration -Main-", "size": 1},
0x42: {"desc": "T42: Anti-theft integration -Peer-", "size": 1},
0x51: {"desc": "T51: Analog input, half-precision floating point",
"size": 2,
"units": "units"},
0x52: {"desc": "T52: Temperature measure (-20, +50) C",
"size": 2,
"units": "C"},
0x53: {"desc": "T53: Humidity measure (0, 100) ",
"size": 2,
"units": "%"},
0x54: {"desc": "T54: Light Sensor (0, 40) kLux",
"size": 2,
"units": "kLux"},
0x55: {"desc": "T55: Voltage (0, 400) V",
"size": 2,
"units": "V"},
0x56: {"desc": "T56: Current (0, 25) A",
"size": 2,
"units": "A"},
0x57: {"desc": "T57: Power (0, 6500) W",
"size": 2,
"units": "W"},
0x58: {"desc": "T58: Pressure measure (0, 1500) hPa",
"size": 2,
"units": "hPa"},
0x61: {"desc": "T61: Analog setpoint, half-precision floating point", "size": 2},
0x62: {"desc": "T62: Temperature measure (-20, +50) C", "size": 2},
0x63: {"desc": "T63: Humidity measure (0, 100) ", "size": 2},
0x64: {"desc": "T64: Light Sensor (0, 40) kLux", "size": 2},
0x65: {"desc": "T65: Voltage (0, 400) V", "size": 2},
0x66: {"desc": "T66: Current (0, 25) A", "size": 2},
0x67: {"desc": "T67: Power (0, 6500) W", "size": 2},
0x68: {"desc": "T68: Pressure measure (0, 1500) hPa", "size": 2}
}
class Typical(object):
def __init__(self, ttype):
self.ttype = ttype
self.description = typical_types[ttype]['desc']
self.size = typical_types[ttype]['size']
self.slot = -1 # undefined until assigned to a slot
self.node = -1 # undefined until assigned to a slot
# inital state. It will be overwritten with the first update
self.state = b'\x00\x00\x00\x00\x00\x00\x00'
self.listeners = []
def add_listener(self, callback):
self.listeners.append(callback)
@staticmethod
def factory_type(ttype):
if ttype in [0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18, 0x19, 0x1A, 0x1B]:
return TypicalT1n(ttype)
elif ttype in [0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58]:
return TypicalT5n(ttype)
else:
return TypicalNotImplemented(ttype)
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state = value
self.state_description = value
_LOGGER.info("Node %d: Typical %d - %s updated from %s to %s" % (self.index,
self.description,
':'.join("{:02x}".format(c) for c in self.state[:self.size]),
':'.join("{:02x}".format(c) for c in value[:self.size])))
for listener in self.listeners:
listener(self)
"""
if self.mqtt:
# TODO: este self....
print("Publico mi nuevo estado %s" + self.state)
self.mqttc.publish('souliss/%s/%s/state' % (self.device_class, self.name), self.state)
"""
"""
def publish(self, mqttc):
if self.mqtt:
self.mqttc = mqttc
self.device_class = typical_types[self.ttype]['mqtt']
mqttc.publish('souliss/%s/%s/config' % (self.device_class, self.name),
'{"name" : "' + self.friendly_name + '", ' +
'"payload_on": "01", ' +
'"payload_off": "00", ' +
'"optimistic": false, ' +
'"retain": true, ' +
'"command_topic": "souliss/%s/%s/set"
|
, "state_topic": "souliss/%
|
s/%s/state"}' \
% (self.device_class, self.name, self.device_class, self.name))
#'{"name" : "once,", "payload_on": "0", "payload_off": "1", "optimistic": false, "retain": true, "state_topic": "souliss/switch/%s", "command_topic": "souliss/switch/%s/set"}' % (self.name, self.name))
#mqttc.subscribe("souliss/%s/%s/#" % (self.device_class, self.name))
#mqttc.subscribe("souliss/switch/%s" % self.name)
else:
print('WARNING: I do not know mqtt device for ' + self.description)
"""
def set_node_slot_index(self, node, slot, index):
self.node = node
self.slot = slot
self.index = index
def to_dict(self):
return {'ddesc': self.description,
'slo': self.slot,
'typ': self.ttype}
class TypicalT1n(Typical):
def __init__(self, ttype):
super(TypicalT1n,self).__init__(ttype)
self.state_desc = typical_types[ttype]['state_desc']
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state = value
if self.size > 1: # Raw description for Typicals T15, T16 and T19
self.state_description = ':'.join("{:02x}".format(c) for c in self.state)
else:
if ord(value) in self.state_desc.keys():
self.state_description = self.state_desc[ord(value)]
else:
_LOGGER.warning("Unknow value!")
self.state_description = "Unknow value!"
_LOGGER.info("Node %d: Typical %d - %s updated to %s" % (self.node, self.index,
self.description,
self.state_description))
for listener in self.listeners:
listener(self)
def send_command(self, command):
# TODO: Handle different T1 behaviour
if command == 0x01: # Toggle
if self.state == chr(1):
|
PriviPK/privipk-sync-engine
|
kgc/kgc-service.py
|
Python
|
agpl-3.0
| 1,469 | 0.001361 |
#!/usr/bin/env python2.7
import click
from common import KGC_PORT
from server import KgcRpcServer
import privipk
from privipk.keys import LongTermSecretKey
@click.group()
def cli():
"""
Use this tool to spawn a KGC server and/or to generate
a private key for a KGC server.
"""
pass
@cli.command()
@click.option('--port', '-p', required=False, metavar='PORT', default=KGC_PORT,
help='TCP port for the KGC to listen on.')
@click.argument('secret_key_path', required=True)
@click.argument('group_params_path', required=False)
def start(port, secret_key_path, group_params_path):
"""
Starts a KGC server. Loads the private key from the specified file.
Setting the g
|
roup parameters is not yet implemented
"""
params = privipk.parameters.default
lts = LongTermSecretKey.unserialize(params, open(secret_key_path).read())
server = KgcRpcServer(lts, port)
server.start()
@cli.command()
@click.argument('secret_key_path', required=True)
def genkey(secret_key_path):
"""
Generates a private key and stores it in the specified file. Also
|
stores the public key in the another file named by appending .pub
to the first file.
"""
params = privipk.parameters.default
lts = LongTermSecretKey(params)
open(secret_key_path, 'w').write(lts.serialize())
ltp = lts.getPublicKey()
open(secret_key_path + '.pub', 'w').write(ltp.serialize())
if __name__ == '__main__':
cli()
|
vishnu-kumar/PeformanceFramework
|
tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py
|
Python
|
apache-2.0
| 2,878 | 0 |
# Copyright 2015: Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.nova import keypairs
from tests.unit import test
class NovaKeypairTestCase(test.ScenarioTestCase):
def test_create_and_list_keypairs(self):
scenario = keypairs.NovaKeypair(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._create_keypair = mock.MagicMock(return_value="foo_keypair")
scenario._list_keypairs = mock.MagicMock()
scenario.create_and_list_keypairs(fakearg="fakearg")
scenario._create_keypair.asse
|
rt_called_once_with(fakearg="fakearg")
scenario._list_keypairs.assert_called_once_with()
def test_create_and_delete_keypair(self):
scenario = keypairs.NovaKeypair(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._create_keypair = mock.MagicMock(return_value="foo_keypair")
scenario._delete_keypair =
|
mock.MagicMock()
scenario.create_and_delete_keypair(fakearg="fakearg")
scenario._create_keypair.assert_called_once_with(fakearg="fakearg")
scenario._delete_keypair.assert_called_once_with("foo_keypair")
def test_boot_and_delete_server_with_keypair(self):
scenario = keypairs.NovaKeypair(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._create_keypair = mock.MagicMock(return_value="foo_keypair")
scenario._boot_server = mock.MagicMock(return_value="foo_server")
scenario._delete_server = mock.MagicMock()
scenario._delete_keypair = mock.MagicMock()
fake_server_args = {
"foo": 1,
"bar": 2,
}
scenario.boot_and_delete_server_with_keypair(
"img", 1, server_kwargs=fake_server_args,
fake_arg1="foo", fake_arg2="bar")
scenario._create_keypair.assert_called_once_with(
fake_arg1="foo", fake_arg2="bar")
scenario._boot_server.assert_called_once_with(
"img", 1, foo=1, bar=2, key_name="foo_keypair")
scenario._delete_server.assert_called_once_with("foo_server")
scenario._delete_keypair.assert_called_once_with("foo_keypair")
|
CajetanP/code-learning
|
Python/Learning/Tips/args_and_kwargs.py
|
Python
|
mit
| 319 | 0 |
def test_var_args(f_arg, *argv):
print("normal:"
|
, f_arg)
for arg in argv:
print("another one:", arg)
def greet_me(**kwargs):
for key, value in kwargs.items():
print("{0} = {1}".format(key, value))
test_var_args('hello', 'there', 'general', 'Kenob
|
i')
greet_me(name="test", surname="me")
|
akpeker/sayi-tahmin-cozucu
|
py/sayitahminCozucu.py
|
Python
|
mit
| 5,341 | 0.02385 |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 14:10:30 2017
@author: WİN7
"""
# 4 basamaklı, basamakları tekrarsız sayıların (4lü karakter dizisi)
# tümünü bir liste olarak (5040 tane) üretip döndürür.
def sayilariUret():
S = []
for n in range(10000):
ss = "%04d"%n
if ss[0]==ss[1] or ss[0]==ss[2] or ss[0]==ss[3] or ss[1]==ss[2] or ss[1]==ss[3] or ss[2]==ss[3]:
continue
else:
S.append(ss)
return S
# Verilen tahmin'i sayi ile karşılaştırıp kaç A (doğru yerinde) kaç B (yanlış yerde) doğru karakter var döndürür.
def getMatches(sayi, tahmin):
A, B = 0, 0
for i in range(len(tahmin)):
if tahmin[i] in sayi:
if tahmin[i] == sayi[i]:
A += 1
else:
B += 1
return A, B
# Verilen S sayi listesi içinde, verilen tahmin'e verilen AB sonucunu veren bütün sayilari S1 listesi olarak döndürür.
# Yani, tahmin ve alınan sonuçla uyumlu olan sayilari bulur, orijinal alternatif listesini filtrelemiş olur.
def filt(S,tahmin,A,B):
S1 = []
for ss in S:
son1 = getMatches(ss,tahmin)
if son1[0]==A and son1[1]==B:
S1.append(ss)
return S1
# Verilen S listesindeki her sayi için verilen tahmin'i dener.
# Her alternatif sonuçtan (AB değeri) kaç tane alındığını kaydeder, hist sözlüğünde döndürür.
# hist (sözlük) : {sonuç : kaç_defa} . Örn: {"00":70, "01":45, "30":5, "40":1}
def getHist(S,tahmin):
# try given guess for each code in S
# count occurances of different results, return as hist
hist = {}
for ss in S:
son = getMatches(ss,tahmin)
a = str(son[0])+str(son[1])
if a in hist:
hist[a] += 1
else:
hist[a] = 1
return hist
import math
def getEntropies(S):
E = [0]*10000
# try all possible guesses (n) and find entropy of results for each guess
for n in range(10000):
tahmin = "%04d"%n
hist = getHist(S,tahmin)
tot = 0
for a in hist:
tot += hist[a]
entr = 0
for a in hist:
p = 1.0*hist[a]/tot
|
entr -= p*math.log(p,2)
E[n] = entr
|
if entr > 3.5: print("%04d : %f"%(n,entr))
return E
def getEntropiesDict(S,UNI,yaz=True,tr=1.9):
EN = OrderedDict()
# try all possible guesses (n) and find entropy of results for each guess
for n in range(len(UNI)):
#tahmin = "%04d"%n
tahmin = UNI[n]
hist = getHist(S,tahmin)
tot = 0
for a in hist:
tot += hist[a]
entr = 0
for a in hist:
p = 1.0*hist[a]/tot
entr -= p*math.log(p,2)
EN[tahmin] = entr
if yaz and entr >= tr:
print("%s : %f"%(tahmin,entr), end=", ")
print(hist)
if yaz and "40" in hist:
print("Possible guess:%s : %f"%(tahmin,entr), end=", ")
print(hist)
return EN
def ENInfo(EN):
mt = max(EN, key=lambda t: EN[t])
cnt = 0
for t in EN:
if (EN[t]==EN[mt]): cnt += 1
print("Max: %s -> %f. (%d times)" % (mt,EN[mt],cnt))
def ENSortInfo(EN,top=10):
ENs = sorted(EN,key=lambda t: EN[t], reverse=True)
for i in range(top):
print("%d. %s -> %f"%(i,ENs[i],EN[ENs[i]]))
from collections import OrderedDict
def game():
tahs = OrderedDict()
tahs["1234"] = (1,1)
tahs["0235"] = (1,1)
tahs["2637"] = (2,1)
tahs["2738"] = (1,1)
#tahs["9786"] = (0,1)
S = sayilariUret()
print("Starting with: %d"%len(S))
S0 = S
cnt = 0
for t in tahs:
res = tahs[t]
S1 = filt(S0,t,res[0],res[1])
cnt += 1
S0 = S1
print("S%d : tahmin: %s, res: %d%d, len: %d"%(cnt,t,res[0],res[1],len(S1)))
if len(S1) < 20:
print("Listing remaining candidates:")
for t in S1:
print(t,end=" -> ")
print(getHist(S1,t))
EN = getEntropiesDict(S1,S, False)
return EN
def game1():
history = OrderedDict()
S = sayilariUret()
print("Starting with: %d"%len(S))
S0 = S
cnt = 1
tahmin = "1234"
while tahmin != "":
print("-"*20)
restr = input( "%d. Result for %s? "%(cnt, tahmin) )
history[tahmin] = restr
res = ( int(restr[0]), int(restr[1]) )
S1 = filt( S0, tahmin, res[0], res[1] )
cnt += 1
S0 = S1
print("tahmin: %s, res: %d%d, len: %d"%(tahmin,res[0],res[1],len(S1)))
if len(S1) < 20:
print("Listing remaining candidates:")
for t in S1:
print(t,end=" -> ")
print(getHist(S1,t))
EN = getEntropiesDict(S1,S, False)
ENInfo(EN)
ENSortInfo(EN,15)
tahmin = input("Next tahmin? ")
print(history)
return history
def dene0():
S = sayilariUret()
print(len(S))
S1 = filt(S,"1234",0,2)
print(len(S1))
S2 = filt(S1,"5678",0,2)
print(len(S2))
S3 = filt(S2,"7812",0,2)
print(len(S3))
#E3 = getEntropies1(S3,S)
S4 = filt(S3,"2370",0,3)
print(len(S4))
#E4 = getEntropies1(S4,S)
S5 = filt(S4,"9786",0,1)
print(len(S5))
E5 = getEntropies1(S5,S)
#EN = game()
#ENInfo(EN)
#ENSortInfo(EN,15)
game1()
|
tomka/CATMAID
|
django/applications/catmaid/control/graph2.py
|
Python
|
gpl-3.0
| 24,814 | 0.004796 |
# -*- coding: utf-8 -*-
from collections import defaultdict
from functools import partial
from itertools import count
import json
import networkx as nx
from networkx.algorithms import weakly_connected_component_subgraphs
from numpy import subtract
from numpy.linalg import norm
from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union
from django.db import connection
from django.http import JsonResponse
from rest_framework.decorators import api_view
from catmaid.models import UserRole
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import (get_relation_to_id_map, get_request_bool,
get_request_list)
from catmaid.control.link import KNOWN_LINK_PAIRS, UNDIRECTED_LINK_TYPES
from catmaid.control.tree_util import simplify
from catmaid.control.synapseclustering import tree_max_density
def make_new_synapse_count_array() -> List[int]:
return [0, 0, 0, 0, 0]
def basic_graph(project_id, skeleton_ids, relations=None,
source_link:str="presynaptic_to", target_link:str="postsynaptic_to",
allowed_connector_ids=None) -> Dict[str, Tuple]:
if not skeleton_ids:
raise ValueError("No skeleton IDs provided")
cursor = connection.cursor()
if not relations:
relations = get_relation_to_id_map(project_id, (source_link, target_link), cursor)
source_rel_id, target_rel_id = relations[source_link], relations[target_link]
undirected_links = source_link in UNDIRECTED_LINK_TYPES and \
target_link in UNDIRECTED_LINK_TYPES
# Find all links in the passed in set of skeletons. If a relation is
# reciprocal, we need to avoid getting two result rows back for each
# treenode-connector-treenode connection. To keep things simple, we will add
# a "skeleton ID 1" < "skeleton ID 2" test for reciprocal links.
cursor.execute(f"""
SELECT t1.skeleton_id, t2.skeleton_id, LEAST(t1.confidence, t2.confidence)
FROM treenode_connector t1,
treenode_connector t2
WHERE t1.skeleton_id = ANY(%(skeleton_ids)s::bigint[])
AND t1.relation_id = %(source_rel)s
AND t1.connector_id = t2.connector_id
AND t2.skeleton_id = ANY(%(skeleton_ids)s::bigint[])
AND t2.relation_id = %(target_rel)s
AND t1.id <> t2.id
{'AND t1.skeleton_id < t2.skeleton_id' if undirected_links else ''}
{'AND t1.connector_id = ANY(%(allowed_c_ids)s::bigint[])' if allowed_connector_ids else ''}
""", {
'skeleton_ids': list(skeleton_ids),
'source_rel': source_rel_id,
'target_rel': target_rel_id,
'allowed_c_ids': allowed_connector_ids,
})
edges:DefaultDict = defaultdict(partial(defaultdict, make_new_synapse_count_array))
for row in cursor.fetchall():
edges[row[0]][row[1]][row[2] - 1] += 1
return {
'edges': tuple((s, t, count)
for s, edge in edges.items()
for t, count in edge.items())
}
def confidence_split_graph(project_id, skeleton_ids, confidence_threshold,
relations=None, source_rel:str="presynaptic_to",
target_rel:str="postsynaptic_to", allowed_connector_ids=None) -> Dict[str, Any]:
""" Assumes 0 < confidence_threshold <= 5. """
if not skeleton_ids:
raise ValueError("No skeleton IDs provided")
# We need skeleton IDs as a list
skeleton_ids = list(skeleton_ids)
cursor = connection.cursor()
if not relations:
relations = get_relation_to_id_map(project_id, (source_rel, target_rel), cursor)
source_rel_id, target_rel_id = relations[source_rel], relations[target_rel]
# Fetch (valid) synapses of all skeletons
cursor.execute(f'''
SELECT skeleton_id, treenode_id, connector_id, relation_id, confidence
FROM treenode_connector
WHERE project_id = %(project_id)s
AND skeleton_id = ANY(%(skids)s::bigint[])
AND relation_id IN (%(source_rel_id)s, %(target_rel_id)s)
{'AND connector_id = ANY(%(allowed_c_ids)s::bigint[])' if allowed_connector_ids else ''}
''', {
'project_id': int(project_id),
'skids': skeleton_ids,
'source_rel_id': source_rel_id,
'target_rel_id': target_rel_id,
'allowed_c_ids': allowed_connector_ids,
})
stc:DefaultDict[Any, List] = defaultdict(list)
for row in cursor.fetchall():
stc[row[0]].append(row[1:]) # skeleton_id vs (treenode_id, connector_id, relation_id, confidence)
# Fetch all treenodes of all skeletons
cursor.execute('''
SELECT skeleton_id, id, parent_id, confidence
FROM treenode
WHERE project_id = %(project_id)s
AND skeleton_id = ANY(%(skeleton_ids)s::bigint[])
ORDER BY skeleton_id
''', {
'project_id': project_id,
'skeleton_ids': skeleton_ids,
})
# Dictionary of connector_id vs relation_id vs list of sub-skeleton ID
connectors:DefaultDict = defaultdict(partial(defaultdict, list))
# All nodes of the graph
nodeIDs:List = []
# Read out into memory only one skeleton at a time
current_skid = None
tree:Optional[nx.DiGraph] = None
for row in cursor.fetchall():
if row[0] == current_skid:
# Build the tree, breaking it at the low-confidence edges
if row[2] and row[3] >= confidence_threshold:
# mypy cannot prove this will be a DiGraph by here
tree.add_edge(row[2], row[1]) # type: ignore
continue
if tree:
nodeIDs.extend(split_by_confidence(current_skid, tree, stc[current_skid], connectors))
# Start the next tree
current_skid = row[0]
tree = nx.DiGraph()
if row[2] and row[3] > confidence_threshold:
tree.add_edge(row[2], row[1])
if tree:
nodeIDs.extend(split_by_confidence(current_skid, tree, stc[current_skid], connectors))
# Create the edges of the graph from the connectors, which was populated as a side effect of 'split_by_confidence'
edges:DefaultDict = defaultdict(partial(defaultdict, make_new_synapse_count_array)) # pre vs post vs count
for c in connectors.values():
for pre in c[source_rel_id]:
for post in c[target_rel_id]:
edges[pre[0]][post[0]][min(pre[1], post[1]) - 1] += 1
return {
'nodes': nodeIDs,
'edges': [(s, t, count)
for s, edge in edges.items()
for t, count in edge.items()]
}
def dual_split_graph(project_id, skeleton_ids, confidence_threshold, bandwidth,
expand, relations=None, source_link="presynaptic_to",
target_link="postsynaptic_to", allowed_connector_ids=None) -> Dict[str, Any]:
""" Assumes bandwidth > 0 and some skeleton_id in expand. """
cursor = connection.cursor()
skeleton_ids = set(skeleton_ids)
expand = set(expand)
if not skeleton_ids:
raise ValueError("No skeleton IDs provided")
if not relations:
relations = get_relation_to_id_map(project_id, (source_link, target_link), cursor)
source_rel_id, target_rel_id = relations[source_link], relations[target_link]
# Fetch synapses of all skeletons
cursor.execute(f'''
SELECT skeleton_id, treenode_id, connector_id, relation_id, confidence
FROM treenode_connector
WHERE project_id = %(project_id)s
AND skeleton_id = ANY(%(skids)s::bigint[])
AND relation_id IN (%(source_rel_id)s, %(target_rel_id)s)
{'AND
|
connector_id = ANY(%(allowed_c_ids)s::bigint[])' if allowed_connector_ids else ''}
''', {
'project_id':
|
int(project_id),
'skids': list(skeleton_ids),
'source_rel_id': source_rel_id,
'target_rel_id': target_rel_id,
'allowed_c_ids': allowed_connector_ids,
})
stc:DefaultDict[Any, List] = defaultdict(list)
for row in cursor.fetchall():
stc[row[0]].append(row[1:]) # skeleton_id vs (treenode_id, connector_id, relation_id)
# Dictionary of connector_id vs relation_id vs list of sub-skeleton ID
connectors:DefaultDict = defaultdict(partial(defaultdict, list))
|
ATNF/askapsdp
|
Tools/Dev/rbuild/askapdev/rbuild/utils/create_init_file.py
|
Python
|
gpl-2.0
| 2,053 | 0.002923 |
## Package for various utility functions to execute build and shell commands
#
# @copyright (c) 2007 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# [email protected]
#
# This file is part of the ASKAP software distr
|
ibution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
# @author Malte Marquarding <[email protected]>
#
from __future__ import with_statement
import os
def create_init_file(name, env):
'''
Create package initialization file. e.g. init_package_env.sh
:param name: file name to be created.
:type name: string
:param env: the environment object.
:type env: :class:`.Environment`
:return: None
'''
aroot = os.environ["ASKAP_ROOT"]
inittxt= """#
# ASKAP auto-generated file
#
ASKAP_ROOT=%s
export ASKAP_ROOT
""" % aroot
vartxt = """if [ "${%(key)s}" != "" ]
then
%(key)s=%(value)s:${%(key)s}
else
%(key)s=%(value)s
fi
export %(key)s
"""
with open(name, 'w') as initfile:
initfile.write(inittxt)
for k, v in env.items():
if not v:
continue
if k == "LD_LIBRARY_PATH":
k = env.ld_prefix+k
v = v.replace(aroot, '${ASKAP_ROOT}')
initfile.write(vartxt % { 'key': k, 'value': v } )
|
runekaagaard/django-contrib-locking
|
django/forms/models.py
|
Python
|
bsd-3-clause
| 54,740 | 0.00148 |
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
import warnings
from django.core.exceptions import (
ImproperlyConfigured, ValidationError, NON_FIELD_ERRORS, FieldError)
from django.forms.fields import Field, ChoiceField
from django.forms.forms import DeclarativeFieldsMetaclass, BaseForm
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput)
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import smart_text, force_text
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
|
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a func
|
tion or cal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.