repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
johnyburd/glucometer
|
classes/bg_screen.py
|
1
|
1434
|
# Class for the bg_screen. Manages testing prompts and popups
from kivy.uix.screenmanager import Screen
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.popup import Popup
from .data_manager import DataManager
from .blood_glucose_tester import BloodGlucoseTester
from kivy.lang import Builder
Builder.load_file('kvfiles/bg_screen.kv')
class BGScreen(Screen):
def __init__(self, **kwargs):
super(BGScreen, self).__init__(**kwargs)
self.bgt = BloodGlucoseTester(self)
def open_popup(self):
popup = BGPopup(self.bgt)
popup.open()
class BGPopup(Popup):
def __init__(self, bgtester, **kwargs):
super(BGPopup, self).__init__(**kwargs)
self.bgt = bgtester
def start_pb(self):
event = Clock.schedule_interval(self.update_pb, 1 / 60.)
def update_pb(self, dt):
self.ids.pb.value = self.ids.pb.value + (1/3.)
if self.ids.pb.value >= 100:
self.display_BG('106')
self.ids.pb.value = 0
return False
def display_BG(self, value):
popup = Popup(title='BG',
content=Label(text=value,font_size=25),
size_hint=(None, None), size=(125, 125))
popup.bind(on_dismiss=self.dismiss_both)
popup.open()
def dismiss_both(self,instance):
self.dismiss()
return False
|
mit
| -230,565,130,032,434,340 | 31.590909 | 64 | 0.646444 | false |
pbanaszkiewicz/amy
|
amy/autoemails/tests/test_admin_preview.py
|
1
|
7861
|
from datetime import date, timedelta
from django.test import TestCase
from django.urls import reverse
from autoemails import admin
from autoemails.actions import NewInstructorAction
from autoemails.models import EmailTemplate, RQJob, Trigger
from autoemails.tests.base import FakeRedisTestCaseMixin, dummy_job
from autoemails.utils import compare_emails, scheduled_execution_time
from workshops.models import Event, Organization, Person, Role, Tag, Task
from workshops.tests.base import SuperuserMixin
class TestAdminJobPreview(SuperuserMixin, FakeRedisTestCaseMixin, TestCase):
def setUp(self):
super().setUp()
self._setUpSuperuser() # creates self.admin
# save scheduler and connection data
self._saved_scheduler = admin.scheduler
# overwrite
admin.scheduler = self.scheduler
# fake RQJob
self.email = EmailTemplate.objects.create(slug="test-1")
self.trigger = Trigger.objects.create(
action="new-instructor", template=self.email
)
self.rqjob = RQJob.objects.create(job_id="fake-id", trigger=self.trigger)
def tearDown(self):
super().tearDown()
# bring back saved scheduler
admin.scheduler = self._saved_scheduler
def prepare_data(self):
"""Create some real data (real Event, Task, Person, or action)."""
# totally fake Task, Role and Event data
Tag.objects.bulk_create(
[
Tag(name="SWC"),
Tag(name="DC"),
Tag(name="LC"),
]
)
self.event = Event.objects.create(
slug="test-event",
host=Organization.objects.first(),
start=date.today() + timedelta(days=7),
end=date.today() + timedelta(days=8),
country="GB",
venue="Ministry of Magic",
address="Underground",
latitude=20.0,
longitude=20.0,
url="https://test-event.example.com",
)
self.event.tags.set(Tag.objects.filter(name__in=["SWC", "DC", "LC"]))
self.person = Person.objects.create(
personal="Harry", family="Potter", email="[email protected]"
)
self.role = Role.objects.create(name="instructor")
self.task = Task.objects.create(
event=self.event, person=self.person, role=self.role
)
def test_view_access_by_anonymous(self):
url = reverse("admin:autoemails_rqjob_preview", args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 302)
def test_view_access_by_admin(self):
# log admin user
self._logSuperuserIn()
# try accessing the view again
url = reverse("admin:autoemails_rqjob_preview", args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 200)
def test_preview_job_nonexist(self):
# log admin user
self._logSuperuserIn()
url = reverse("admin:autoemails_rqjob_preview", args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 200)
# We can't fetch a non-existing Job (id: "fake-id"), so almost all
# fields are None'd.
self.assertEqual(rv.context["rqjob"], self.rqjob)
self.assertEqual(rv.context["job"], None)
self.assertEqual(rv.context["job_scheduled"], None)
self.assertEqual(rv.context["instance"], None)
self.assertEqual(rv.context["trigger"], None)
self.assertEqual(rv.context["template"], None)
self.assertEqual(rv.context["email"], None)
self.assertEqual(rv.context["adn_context"], None)
def test_preview_job_properties_nonexist(self):
# create some dummy job
job = self.queue.enqueue(dummy_job)
self.rqjob.job_id = job.id
self.rqjob.save()
# log admin user
self._logSuperuserIn()
url = reverse("admin:autoemails_rqjob_preview", args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 200)
# We can fetch the Job (id isn't fake anymore), but almost all
# fields are None'd.
self.assertEqual(rv.context["rqjob"], self.rqjob)
self.assertEqual(rv.context["job"], job)
self.assertEqual(rv.context["job_scheduled"], None)
self.assertEqual(rv.context["instance"], None)
self.assertEqual(rv.context["trigger"], None)
self.assertEqual(rv.context["template"], None)
self.assertEqual(rv.context["email"], None)
self.assertEqual(rv.context["adn_context"], None)
def test_preview_scheduled_job(self):
# prepare fake data
self.prepare_data()
# schedule a real job (NewInstructorAction)
action = NewInstructorAction(
trigger=self.trigger,
objects=dict(event=self.event, task=self.task),
)
# it's important to call `action._email()`, because it prepares
# `action.context`
email = action._email()
job = self.scheduler.enqueue_in(timedelta(minutes=10), action)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
scheduled = scheduled_execution_time(job.id, scheduler=self.scheduler)
# log admin user
self._logSuperuserIn()
url = reverse("admin:autoemails_rqjob_preview", args=[rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 200)
# We can fetch the Job, it's coming from NewInstructorAction.__call__
self.assertEqual(rv.context["rqjob"], rqjob)
self.assertEqual(rv.context["job"], job)
self.assertEqual(rv.context["job_scheduled"], scheduled)
self.assertEqual(rv.context["instance"], action)
self.assertEqual(rv.context["trigger"], self.trigger)
self.assertEqual(rv.context["template"], self.trigger.template)
# can't compare emails directly, __eq__ is not implemented
self.assertTrue(compare_emails(rv.context["email"], email))
self.assertEqual(rv.context["adn_context"], action.context)
def test_preview_invoked_job(self):
# prepare fake data
self.prepare_data()
# schedule a real job (NewInstructorAction)
action = NewInstructorAction(
trigger=self.trigger,
objects=dict(event=self.event, task=self.task),
)
# it's important to call `action._email()`, because it prepares
# `action.context`
email = action._email()
# some cheating, normally the `action.email` is implemented in
# `__call__`
action.email = email
job = self.scheduler.enqueue_in(timedelta(minutes=10), action)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# Speed up the job! Enqueue and run immediately.
self.scheduler.enqueue_job(job)
scheduled = scheduled_execution_time(job.id, scheduler=self.scheduler)
# log admin user
self._logSuperuserIn()
url = reverse("admin:autoemails_rqjob_preview", args=[rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 200)
# We can fetch the Job, it's coming from NewInstructorAction.__call__
self.assertEqual(rv.context["rqjob"], rqjob)
self.assertEqual(rv.context["job"], job)
self.assertEqual(rv.context["job_scheduled"], scheduled)
self.assertEqual(rv.context["instance"], action)
self.assertEqual(rv.context["trigger"], self.trigger)
self.assertEqual(rv.context["template"], self.trigger.template)
# can't compare emails directly, __eq__ is not implemented
self.assertTrue(compare_emails(rv.context["email"], email))
self.assertEqual(rv.context["adn_context"], action.context)
|
mit
| -7,180,478,265,559,647,000 | 38.502513 | 81 | 0.630073 | false |
praekelt/vumi-http-proxy
|
setup.py
|
1
|
1181
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name="vumi-http-proxy",
version="0.1.1",
description=(
"HTTP Proxy in Python Twisted to prevent unauthorized access to "
"blacklisted ips"),
long_description=readme,
author="Praekelt Foundation",
author_email='[email protected]',
url='https://github.com/praekelt/vumi-http-proxy',
packages=find_packages() + [
'twisted.plugins',
],
package_data={'twisted.plugins': ['twisted/plugins/*.py']},
include_package_data=True,
install_requires=[
'zope.interface',
'Click',
'Twisted',
'PyYAML',
],
entry_points='''
[console_scripts]
queen-of-ni=vumi_http_proxy.queen_of_ni:cli
''',
license="BSD",
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
bsd-3-clause
| 3,339,556,010,058,792,000 | 26.465116 | 73 | 0.602032 | false |
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/tests/python/test_topi_l2norm.py
|
1
|
1592
|
"""Test code for L2 normalization"""
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
import topi.testing
def verify_l2_normalize(ishape, eps, axis=None):
A = tvm.placeholder(ishape, name='A')
B = topi.nn.l2_normalize(A, eps, axis)
dtype = A.dtype
a_np = np.random.uniform(size=ishape).astype(dtype)
b_np = topi.testing.l2_normalize_python(a_np, eps, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
if device == 'llvm':
s = topi.generic.schedule_l2_normalize([B])
else:
s = topi.cuda.schedule_l2_normalize([B])
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan', 'nvptx']:
check_device(device)
def test_l2_normalize():
verify_l2_normalize((1, 3, 20, 20), 0.001)
verify_l2_normalize((1, 3, 20, 20), 0.001, (1,))
verify_l2_normalize((1, 3, 20, 20), 0.001, (1, 2))
verify_l2_normalize((1, 3, 20, 20), 0.001, (2, 3))
verify_l2_normalize((1, 3, 20, 20), 0.001, (0, 3))
verify_l2_normalize((1, 3, 20, 20), 0.001, (0, 2, 3))
if __name__ == "__main__":
test_l2_normalize()
|
apache-2.0
| 6,448,700,709,703,494,000 | 32.87234 | 81 | 0.576005 | false |
apache/bloodhound
|
bloodhound_dashboard/bhdashboard/model.py
|
2
|
13396
|
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from trac.db import Table, Column
from trac.core import TracError
from trac.resource import ResourceNotFound, ResourceSystem
from trac.ticket.api import TicketSystem
def dict_to_kv_str(env, data=None, sep=' AND '):
"""Converts a dictionary into a string and a list suitable for using as part
of an SQL where clause like:
('key0=%s AND key1=%s', ['value0','value1'])
The sep argument allows ' AND ' to be changed for ',' for UPDATE purposes
"""
if data is None:
return ('', [])
qfn = env.get_read_db().quote
return (sep.join('%s=%%s' % qfn(k) for k in data.keys()),
data.values())
def fields_to_kv_str(env, fields, data, sep=' AND '):
"""Converts a list of fields and a dictionary containing those fields into a
string and a list suitable for using as part of an SQL where clause like:
('key0=%s,key1=%s', ['value0','value1'])
"""
return dict_to_kv_str(env, dict((f, data[f]) for f in fields),sep)
class ModelBase(object):
"""Base class for the models to factor out common features
Derived classes should provide a meta dictionary to describe the table like:
_meta = {'table_name':'mytable',
'object_name':'WhatIWillCallMyselfInMessages',
'key_fields':['id','id2'],
'non_key_fields':[
'thing',
{
name:"field_name_x",
type='int64',
size=None,
key_size=None,
auto_increment=False
}],
'auto_inc_fields': ['id',],
}
key_fields and non_key_fields parameters may contain field name only (for
text columns) or dict with detailed column specification. In case of
detailed column specification 'name' parameter is obligatory).
"""
def __init__(self, env, keys=None):
"""Initialisation requires an environment to be specified.
If keys are provided, the Model will initialise from the database
"""
# make this impossible to instantiate without telling the class details
# about itself in the self.meta dictionary
self._old_data = {}
self._data = {}
self._exists = False
self._env = env
self._key_fields = self._get_field_names(self._meta['key_fields'])
self._non_key_fields = self._get_field_names(
self._meta['non_key_fields'])
self._all_fields = self._key_fields + self._non_key_fields
self._unique_fields = self._meta['unique_fields']
self._auto_inc_fields = self._get_auto_inc_field_names()
if keys is not None:
self._get_row(keys)
else:
self._update_from_row(None)
def update_field_dict(self, field_dict):
"""Updates the object's copy of the db fields (no db transaction)"""
self._data.update(field_dict)
def __getattr__(self, name):
"""Overridden to allow table.field style field access."""
try:
if name in self._all_fields:
return self._data[name]
except KeyError:
raise AttributeError(name)
raise AttributeError(name)
def __setattr__(self, name, value):
"""Overridden to allow table.field = value style field setting."""
data = self.__dict__.get('_data')
fields = self.__dict__.get('_all_fields')
if data and fields and name in fields:
self._data[name] = value
else:
dict.__setattr__(self, name, value)
@classmethod
def get_table_name(cls):
return cls._meta["table_name"]
def _update_from_row(self, row = None):
"""uses a provided database row to update the model"""
fields = self._all_fields
self._exists = row is not None
if row is None:
row = [None]*len(fields)
self._data = dict([(fields[i], row[i]) for i in range(len(row))])
self._old_data = {}
self._old_data.update(self._data)
def _get_row(self, keys):
"""queries the database and stores the result in the model"""
row = None
where, values = fields_to_kv_str(self._env, self._key_fields, keys)
fields = ','.join(self._all_fields)
sdata = {'fields':fields,
'where':where}
sdata.update(self._meta)
sql = """SELECT %(fields)s FROM %(table_name)s
WHERE %(where)s""" % sdata
with self._env.db_query as db:
for row in db(sql, values):
self._update_from_row(row)
break
else:
raise ResourceNotFound(
('No %(object_name)s with %(where)s' % sdata)
% tuple(values))
def delete(self):
"""Deletes the matching record from the database"""
if not self._exists:
raise TracError('%(object_name)s does not exist' % self._meta)
where, values = fields_to_kv_str(self._env, self._key_fields,
self._data)
sdata = {'where': where}
sdata.update(self._meta)
sql = """DELETE FROM %(table_name)s
WHERE %(where)s""" % sdata
with self._env.db_transaction as db:
db(sql, values)
self._exists = False
TicketSystem(self._env).reset_ticket_fields()
ResourceSystem(self._env).resource_deleted(self)
self._data = dict([(k, None) for k in self._data.keys()])
self._old_data.update(self._data)
def insert(self):
"""Create new record in the database"""
sdata = None
if self._exists or len(self.select(self._env, where =
dict([(k,self._data[k])
for k in self._key_fields]))):
sdata = {'keys':','.join(["%s='%s'" % (k, self._data[k])
for k in self._key_fields])}
elif self._unique_fields and len(self.select(self._env, where =
dict([(k,self._data[k])
for k in self._unique_fields]))):
sdata = {'keys':','.join(["%s='%s'" % (k, self._data[k])
for k in self._unique_fields])}
if sdata:
sdata.update(self._meta)
sdata['values'] = self._data
raise TracError('%(object_name)s %(keys)s already exists %(values)s' %
sdata)
for key in self._key_fields:
if self._data[key] is None and key not in self._auto_inc_fields:
sdata = {'key':key}
sdata.update(self._meta)
raise TracError('%(key)s required for %(object_name)s' %
sdata)
fields = [field for field in self._all_fields
if field not in self._auto_inc_fields]
sdata = {'fields':','.join(fields),
'values':','.join(['%s'] * len(fields))}
sdata.update(self._meta)
sql = """INSERT INTO %(table_name)s (%(fields)s)
VALUES (%(values)s)""" % sdata
with self._env.db_transaction as db:
cursor = db.cursor()
cursor.execute(sql, [self._data[f] for f in fields])
for auto_in_field in self._auto_inc_fields:
self._data[auto_in_field] = db.get_last_id(
cursor, sdata["table_name"], auto_in_field)
self._exists = True
self._old_data.update(self._data)
TicketSystem(self._env).reset_ticket_fields()
ResourceSystem(self._env).resource_created(self)
def _update_relations(self, db, author=None):
"""Extra actions due to update"""
pass
def update(self, author=None):
"""Update the matching record in the database"""
if self._old_data == self._data:
return
if not self._exists:
raise TracError('%(object_name)s does not exist' % self._meta)
for key in self._meta['no_change_fields']:
if self._data[key] != self._old_data[key]:
raise TracError('%s cannot be changed' % key)
for key in self._key_fields + self._unique_fields:
if self._data[key] != self._old_data[key]:
if len(self.select(self._env, where = {key:self._data[key]})):
raise TracError('%s already exists' % key)
setsql, setvalues = fields_to_kv_str(self._env, self._non_key_fields,
self._data, sep=',')
where, values = fields_to_kv_str(self._env, self._key_fields,
self._data)
sdata = {'where': where,
'values': setsql}
sdata.update(self._meta)
sql = """UPDATE %(table_name)s SET %(values)s
WHERE %(where)s""" % sdata
old_values = dict((k, v) for k, v in self._old_data.iteritems()
if self._data.get(k) != v)
with self._env.db_transaction as db:
db(sql, setvalues + values)
self._update_relations(db, author)
self._old_data.update(self._data)
TicketSystem(self._env).reset_ticket_fields()
ResourceSystem(self._env).resource_changed(self, old_values)
@classmethod
def select(cls, env, db=None, where=None, limit=None, order_by=None):
"""
Query the database to get a set of records back
* order_by: is list of fields with optional sort direction
("asc" or "desc") e.g. ["field1", "field2 desc"]
"""
rows = []
fields = cls._get_all_field_names()
sdata = {'fields': ','.join(env.get_read_db().quote(f)
for f in fields),}
sdata.update(cls._meta)
sql = r'SELECT %(fields)s FROM %(table_name)s' % sdata
wherestr, values = dict_to_kv_str(env, where)
if wherestr:
wherestr = ' WHERE ' + wherestr
final_sql = sql + wherestr
if limit is not None:
final_sql += ' LIMIT ' + str(int(limit))
if order_by:
final_sql += "\nORDER BY " + ', '.join(order_by)
for row in env.db_query(final_sql, values):
# we won't know which class we need until called
model = cls.__new__(cls)
data = dict([(fields[i], row[i]) for i in range(len(fields))])
model.__init__(env, data)
rows.append(model)
return rows
@classmethod
def _get_all_field_names(cls):
return cls._get_field_names(
cls._meta['key_fields']+cls._meta['non_key_fields'])
@classmethod
def _get_field_names(cls, field_specs):
def get_field_name(field_spec):
if isinstance(field_spec, dict):
return field_spec["name"]
return field_spec
return [get_field_name(field_spec) for field_spec in field_specs]
@classmethod
def _get_all_field_columns(cls):
auto_inc = cls._meta.get('auto_inc_fields', [])
columns = []
all_fields_spec = cls._meta['key_fields'] + cls._meta['non_key_fields']
for field_spec in all_fields_spec:
#field_spec can be field name string or dictionary with detailed
#column specification
if isinstance(field_spec, dict):
column_spec = field_spec
else:
column_spec = dict(
name = field_spec,
auto_increment=field_spec in auto_inc)
columns.append(column_spec)
return columns
@classmethod
def _get_auto_inc_field_names(cls):
return [field_spec["name"] for field_spec
in cls._get_all_field_columns()
if field_spec.get("auto_increment")]
@classmethod
def _get_schema(cls):
"""Generate schema from the class meta data"""
fields = [Column(
column_spec["name"],
type=column_spec.get("type", "text"),
size=column_spec.get("size"),
key_size=column_spec.get("key_size"),
auto_increment=column_spec.get("auto_increment", False))
for column_spec in cls._get_all_field_columns()]
return Table(cls._meta['table_name'], key=set(cls._meta['key_fields'] +
cls._meta['unique_fields'])) [fields]
|
apache-2.0
| 9,015,189,767,177,019,000 | 40.092025 | 82 | 0.546208 | false |
IPetr0v/compromutator
|
tests/prediction.py
|
1
|
1172
|
#!/usr/bin/env python
import sys
from random import randint
from mininet.topolib import Topo
from mininet.link import Link, TCLink
from mininet.topo import LinearTopo
from performance import PredictionTest
if __name__ == '__main__':
min_switches = int(sys.argv[1])
max_switches = int(sys.argv[2])
if len(sys.argv) > 3:
delay = int(sys.argv[3])
else:
delay = 0
if not delay:
topologies = [LinearTopo(n, 1) for n in range(
min_switches, max_switches + 1, 5)]
else:
topologies = [LinearTopo(n, 1, lopts={'cls': TCLink, 'delay': '%dus' % delay}) for n in range(
min_switches, max_switches + 1, 5)]
bandwidth_list = [int(b*1000000) for b in [0.1, 1, 10, 100, 1000]]
print '--- Prediction Test ---'
result_file = 'experiments/prediction_%d_%d_delay%d_id%d.csv' % (
min_switches, max_switches, delay, randint(1000, 9999))
test = PredictionTest(topologies, result_file=result_file,
flow_num=25, bandwidth_list=bandwidth_list,
delay=delay, run_times=1)
delays = test.run()
print 'Predictions'
print delays
|
apache-2.0
| 8,123,134,937,591,448,000 | 30.675676 | 102 | 0.605802 | false |
ebmdatalab/openprescribing
|
openprescribing/matrixstore/tests/matrixstore_factory.py
|
1
|
1707
|
import mock
import sqlite3
from frontend.price_per_unit.substitution_sets import get_substitution_sets
from matrixstore.connection import MatrixStore
from matrixstore import db
from matrixstore.tests.import_test_data_fast import import_test_data_fast
def matrixstore_from_data_factory(data_factory, end_date=None, months=None):
"""
Returns a new in-memory MatrixStore instance using the data from the
supplied DataFactory
"""
# We need this connection to be sharable across threads because
# LiveServerTestCase runs in a separate thread from the main test code
connection = sqlite3.connect(":memory:", check_same_thread=False)
end_date = max(data_factory.months)[:7] if end_date is None else end_date
months = len(data_factory.months) if months is None else months
import_test_data_fast(connection, data_factory, end_date, months=months)
return MatrixStore(connection)
def patch_global_matrixstore(matrixstore):
"""
Temporarily replace the global MatrixStore instance (as accessed via
`matrixstore.db.get_db`) with the supplied matrixstore
Returns a function which undoes the monkeypatching
"""
patcher = mock.patch("matrixstore.connection.MatrixStore.from_file")
mocked = patcher.start()
mocked.return_value = matrixstore
# There are memoized functions so we clear any previously memoized value
db.get_db.cache_clear()
db.get_row_grouper.cache_clear()
get_substitution_sets.cache_clear()
def stop_patching():
patcher.stop()
db.get_db.cache_clear()
db.get_row_grouper.cache_clear()
get_substitution_sets.cache_clear()
matrixstore.close()
return stop_patching
|
mit
| 1,399,860,575,519,518,200 | 36.108696 | 77 | 0.731107 | false |
Wireless-Innovation-Forum/Spectrum-Access-System
|
src/harness/testcases/WINNF_FT_S_PCR_testcase.py
|
1
|
41049
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of PCR tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
from datetime import datetime, timedelta
import time
import os
import uuid
from shapely import ops
from six import string_types as basestring
import sas_testcase
from full_activity_dump_helper import getFullActivityDumpSasUut
from reference_models.ppa import ppa
from reference_models.geo import drive, utils
from util import configurable_testcase, loadConfig, \
makePalRecordsConsistent, writeConfig, getCertificateFingerprint, \
makePpaAndPalRecordsConsistent, getCertFilename, json_load
from request_handler import HTTPError
SAS_TEST_HARNESS_URL = 'https://test.harness.url.not.used/v1.2'
def getSasUutClaimedPpaBoundaryFilePath(config_filename):
"""Get the absolute file path SAS UUT claimed PPA boundary to be stored.
Args:
config_filename: The absolute file path of the configuration file used in PCR.1 test.
It is required to differentiate SAS UUT claimed PPA boundary stored as result of
PCR.1 test executed with different configuration file.
Returns:
An expected absolute file path of PPA zone to be stored which is received SAS UUT
as part of PCR.1 test.
"""
ppa_zone_data_dir_path = os.path.join('testcases', 'output',
'test_WINNF_FT_S_PCR_1')
ppa_zone_data_file_name = 'sas_uut_claimed_ppa_boundary_' + \
config_filename + '.json'
ppa_zone_data_file_path = os.path.join(ppa_zone_data_dir_path,
ppa_zone_data_file_name)
return ppa_zone_data_file_path
def isPpaWithinServiceArea(pal_records, ppa_zone_geometry):
"""Check if the ppa zone geometry with in service area then return True.
Checks the ppa zone geometry's boundary and interior intersect only with the
interior of the service area (not its boundary or exterior).
Args:
pal_records: A list of pal records to compute service area based on
census_tracts.
ppa_zone_geometry: A PPA polygon dictionary in GeoJSON format.
Returns:
A value is the boolean with the value as True if the ppa zone geometry's
boundary and interior intersect with in the interior of the service
area otherwise value as false.
"""
# Get the census tract for each pal record and convert it to Shapely
# geometry.
census_tracts_for_pal = [
utils.ToShapely(drive.census_tract_driver.GetCensusTract(
pal['license']['licenseAreaIdentifier'])
['features'][0]['geometry']) for pal in pal_records]
pal_service_area = ops.cascaded_union(census_tracts_for_pal)
# Convert GeoJSON dictionary to Shapely object.
ppa_zone_shapely_geometry = utils.ToShapely(ppa_zone_geometry)
return ppa_zone_shapely_geometry.buffer(-1e-6).within(pal_service_area)
def assertRegConditionalsForPpaRefModel(registration_requests,
conditional_registration_data):
"""Check the REG Conditionals for PPA creation model and raises an exception.
Performs the assert to check installationParam present in
registrationRequests or conditional registration data and raises an exception.
PpaCreationModel requires the input registrationRequests to have
'installationParam'. But this parameter is removed for devices where
conditionals are pre-loaded. Adding the 'installationParam' into
registrationRequests by taking the corresponding values from
conditionalRegistrationData.
Args:
registration_requests: A list of individual CBSD registration
requests (each of which is itself a dictionary).
conditional_registration_data: A list of individual CBSD registration
data that need to be preloaded into SAS (each of which is a dictionary).
the fccId and cbsdSerialNumber fields are required, other fields are
optional but required for ppa reference model.
Raises:
Exception: If the installationParam object and required
fields is not found in conditionalRegistrationData and registrationRequests
for category B then raises an exception.
"""
for device in registration_requests:
if 'installationParam' not in device:
install_param_assigned = False
for conditional_params in conditional_registration_data:
# Check if FCC_ID+Serial_Number present in registrationRequest
# and conditional_params match and add the 'installationParam'.
if (conditional_params['fccId'] == device['fccId'] and
conditional_params['cbsdSerialNumber'] == device['cbsdSerialNumber']):
device.update({'installationParam': conditional_params['installationParam']})
install_param_assigned = True
# If the cbsdCategory is not present in registration request then
# assign it to the cbsdCategory in conditional_params.
if 'cbsdCategory' not in device:
# Causes KeyError: 'cbsdCategory' if 'cbsdCategory' does not exist
device['cbsdCategory'] = conditional_params['cbsdCategory']
break
# Raise an exception if InstallationParam is not found in the conditionals.
if not install_param_assigned:
raise Exception("InstallationParam not found in conditionals for device "
"%s:%s" % (device['fccId'], device['cbsdSerialNumber']))
class PpaCreationTestcase(sas_testcase.SasTestCase):
"""Implementation of PCR tests.
Checks the area of the non-overlapping difference between the maximum PPA boundary
created by SAS UUT shall be no more than 10% of the area of the maximum PPA boundary
created by the Reference Model for different varying condition and verify the SAS UUT
is able to create PPA zone or error.
"""
def triggerFadGenerationAndRetrievePpaZone(self, ssl_cert, ssl_key):
"""Triggers FAD and Retrieves PPA Zone Record.
Pulls FAD from SAS UUT. Retrieves the ZoneData Records from FAD,
checks that only one record is present.
Args:
ssl_cert: Path to SAS type cert file to be used for pulling FAD record.
ssl_key: Path to SAS type key file to be used for pulling FAD record.
Returns:
A PPA record of format of ZoneData Object.
"""
# Notify the SAS UUT about the SAS Test Harness.
certificate_hash = getCertificateFingerprint(ssl_cert)
self._sas_admin.InjectPeerSas({'certificateHash': certificate_hash,
'url': SAS_TEST_HARNESS_URL})
# As SAS is reset at the beginning of the test, the FAD records should
# contain only one zone record containing the PPA that was generated.
# Hence the first zone record is retrieved.
uut_fad = getFullActivityDumpSasUut(self._sas, self._sas_admin, ssl_cert,
ssl_key)
# Check if the retrieved FAD that has valid and at least one
# PPA zone record.
uut_ppa_zone_data = uut_fad.getZoneRecords()
self.assertEquals(len(uut_ppa_zone_data), 1,
msg='There is no single PPA Zone record received from SAS'
' UUT')
return uut_ppa_zone_data[0]
def generate_PCR_1_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 1."""
# Load PAL records.
pal_record_a = json_load(
os.path.join('testcases', 'testdata', 'pal_record_1.json'))
pal_record_b = json_load(
os.path.join('testcases', 'testdata', 'pal_record_2.json'))
# Use the FIPS codes of adjacent census tracts.
pal_record_a['fipsCode'] = 20063955100
pal_record_b['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_b = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_c = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Set the same user ID for all devices
device_b['userId'] = device_a['userId']
device_c['userId'] = device_a['userId']
# Device_a is Category A.
self.assertEqual(device_a['cbsdCategory'], 'A')
# Device_b is Category B with conditionals pre-loaded.
self.assertEqual(device_b['cbsdCategory'], 'B')
# Make PAL records consistent.
pal_records = makePalRecordsConsistent([pal_record_a, pal_record_b],
pal_low_frequency, pal_high_frequency,
device_a['userId'])
# Set the locations of devices to reside with in service area.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 39.0373, -100.4184
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 39.0378, -100.4785
# placed the device_c in between device_a and device_b within service area.
device_c['installationParam']['latitude'], device_c['installationParam'][
'longitude'] = 39.0426, -100.4457
device_c['installationParam']['heightType'] = 'AGL'
# Set the AntennaGain and EIRP capability.
device_a['installationParam']['eirpCapability'] = 30
device_b['installationParam']['eirpCapability'] = 47
device_a['installationParam']['antennaGain'] = 16
device_b['installationParam']['antennaGain'] = 16
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b, device_c]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records,
'sasTestHarnessCert': getCertFilename('sas.cert'),
'sasTestHarnessKey': getCertFilename('sas.key')
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_1_default_config)
def test_WINNF_FT_S_PCR_1(self, config_filename):
"""Successful Maximum PPA Creation.
Checks PPA generated by SAS UUT shall be fully contained within the service area.
"""
# Load the Config file
config = loadConfig(config_filename)
# Very light checking of the config file.
self.assertValidConfig(
config, {
'registrationRequests': list,
'conditionalRegistrationData': list,
'palRecords': list,
'sasTestHarnessCert': basestring,
'sasTestHarnessKey': basestring
})
# Register devices and check response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Asserts the REG-Conditional value doesn't exist in the registrationRequest,
# it required to be exist in the registrationRequest data.
assertRegConditionalsForPpaRefModel(config['registrationRequests'],
config['conditionalRegistrationData'])
# Trigger PPA creation to calculate maximum PPA boundary and check if any errors
# encountered in PPA creation reference model.
test_harness_ppa_geometry = ppa.PpaCreationModel(config['registrationRequests'],
config['palRecords'])
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Trigger SAS UUT to create a PPA boundary.
pal_ids = [record['palId'] for record in config['palRecords']]
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT.
ppa_id = self.triggerPpaCreationAndWaitUntilComplete(ppa_creation_request)
logging.debug('ppa_id received from SAS UUT:%s', ppa_id)
# Notify SAS UUT about SAS Harness and trigger Full Activity Dump and
# retrieves the PPA Zone record.
uut_ppa_zone_data = self.triggerFadGenerationAndRetrievePpaZone(
ssl_cert=config['sasTestHarnessCert'],
ssl_key=config['sasTestHarnessKey'])
# Write SAS UUT PPA to output directory of PCR.1 test.
# PPA Zone received from SAS UUT in PCR.1 test will be considered as input
# for PCR 3,PCR 6 and PCR 7 tests.
ppa_zone_data_file_path = getSasUutClaimedPpaBoundaryFilePath(
config_filename.split('/')[-1])
ppa_zone_data_dir_path = os.path.dirname(ppa_zone_data_file_path)
if not os.path.exists(ppa_zone_data_dir_path):
os.makedirs(ppa_zone_data_dir_path)
with open(ppa_zone_data_file_path, 'w') as file_handle:
file_handle.write(
json.dumps(uut_ppa_zone_data['zone'], indent=2, sort_keys=False,
separators=(',', ': ')))
# Check if the PPA generated by the SAS UUT is fully contained within the
# service area.
logging.debug("SAS UUT PPA - retrieved through FAD:%s",
json.dumps(uut_ppa_zone_data, indent=2, sort_keys=False,
separators=(',', ': ')))
logging.debug("Reference model PPA - retrieved through PpaCreationModel:%s",
json.dumps(json.loads(test_harness_ppa_geometry), indent=2,
sort_keys=False,
separators=(',', ': ')))
uut_ppa_geometry = uut_ppa_zone_data['zone']['features'][0]['geometry']
self.assertTrue(isPpaWithinServiceArea(config['palRecords'], uut_ppa_geometry),
msg="PPA Zone is not within service area")
# Check the Equation 8.3.1 in Test Specfification is satisified w.r t
# [n.12, R2-PAL-05]. Check the area of the non-overlapping difference between
# the maximum PPA boundary created by SAS UUT shall be no more than 10% of the area
# of the maximum PPA boundary created by the Reference Model.
self.assertTrue(utils.PolygonsAlmostEqual(test_harness_ppa_geometry,
uut_ppa_geometry))
def generate_PCR_2_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 2."""
# Load PAL records.
pal_record_a = json_load(
os.path.join('testcases', 'testdata', 'pal_record_1.json'))
pal_record_b = json_load(
os.path.join('testcases', 'testdata', 'pal_record_2.json'))
# Use the FIPS codes of adjacent census tracts.
pal_record_a['fipsCode'] = 20063955100
pal_record_b['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_b = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_c = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Set the same user ID for all devices.
device_b['userId'] = device_a['userId']
device_c['userId'] = device_a['userId']
# Device_a is Category A.
self.assertEqual(device_a['cbsdCategory'], 'A')
# Device_b is Category B with conditionals pre-loaded.
self.assertEqual(device_b['cbsdCategory'], 'B')
# Make PAL record consistent.
pal_records = makePalRecordsConsistent([pal_record_a, pal_record_b],
pal_low_frequency, pal_high_frequency,
device_a['userId'])
# Set the values of CBSD location, antenna gain, and EIRP limit such that a
# single PPA can be formed.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 38.74021, -100.53862
# At least one of the CBSDs is close to the boundary of the Service Area,
# so that -96 dBm/10 MHz protection contour extends beyond the service area boundary.
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 38.70645, -100.46034
# placed the device_c in between device_a and device_b within service area.
device_c['installationParam']['latitude'], device_c['installationParam'][
'longitude'] = 38.72281, -100.50103
device_c['installationParam']['heightType'] = 'AGL'
# Set the AntennaGain and EIRP capability in a way that only
# one PPA zone is created by those CBDSs using PPA Creation Reference Model.
device_a['installationParam']['eirpCapability'] = 30
device_b['installationParam']['eirpCapability'] = 47
device_a['installationParam']['antennaGain'] = 16
device_b['installationParam']['antennaGain'] = 16
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b, device_c]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records,
'sasTestHarnessCert': getCertFilename('sas.cert'),
'sasTestHarnessKey': getCertFilename('sas.key')
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_2_default_config)
def test_WINNF_FT_S_PCR_2(self, config_filename):
"""Successful Maximum PPA Boundary Creation Clipped by Service Area Boundary.
Checks the maximum PPA boundary, being clipped by the Service Area composed of
one or more adjacent Census Tracts.
Checks PPA generated by SAS UUT shall be fully contained within the service area.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Register devices and check response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Asserts the REG-Conditional value doesn't exist in the registrationRequest,
# it is required to be exist in the registrationRequest data.
assertRegConditionalsForPpaRefModel(config['registrationRequests'],
config['conditionalRegistrationData'])
# Trigger PPA creation to calculate maximum PPA boundary and check if any errors
# encountered in PPA creation reference model.
test_harness_ppa_geometry = ppa.PpaCreationModel(config['registrationRequests'],
config['palRecords'])
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in config['palRecords']]
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT.
ppa_id = self.triggerPpaCreationAndWaitUntilComplete(ppa_creation_request)
logging.debug('ppa_id received from SAS UUT:%s', ppa_id)
# Trigger Full Activity Dump and retrieves the PPA Zone record.
uut_ppa_zone_data = self.triggerFadGenerationAndRetrievePpaZone(
ssl_cert=config['sasTestHarnessCert'],
ssl_key=config['sasTestHarnessKey'])
# Check if the PPA generated by the SAS UUT is fully contained within the service area.
logging.debug("SAS UUT PPA - retrieved through FAD:%s",
json.dumps(uut_ppa_zone_data, indent=2, sort_keys=False,
separators=(',', ': ')))
logging.debug("Reference model PPA - retrieved through PpaCreationModel:%s",
json.dumps(json.loads(test_harness_ppa_geometry), indent=2, sort_keys=False,
separators=(',', ': ')))
uut_ppa_geometry = uut_ppa_zone_data['zone']['features'][0]['geometry']
self.assertTrue(isPpaWithinServiceArea(config['palRecords'], uut_ppa_geometry),
msg="PPA Zone is not within service area")
# Check the Equation 8.3.1 in Test Specfification is satisified w.r t
# [n.12, R2-PAL-05]. Check the area of the non-overlapping difference between
# the maximum PPA boundary created by SAS UUT shall be no more than 10% of the area
# of the maximum PPA boundary created by the Reference Model.
self.assertTrue(utils.PolygonsAlmostEqual(test_harness_ppa_geometry,
uut_ppa_geometry))
def generate_PCR_3_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 3."""
# File path where SAS UUT claimed ppa boundary generated in PCR.1 test.
pcr_1_test_config_file_path = os.path.join('testcases', 'configs',
'test_WINNF_FT_S_PCR_1',
'default.config')
sas_uut_claimed_ppa_boundary_file_path = getSasUutClaimedPpaBoundaryFilePath(
'default.config')
# SAS UUT claimed ppa boundary generated in PCR.1 test.
try:
user_claimed_ppa_contour = json_load(sas_uut_claimed_ppa_boundary_file_path)
except IOError:
raise RuntimeError('ConfigError:There is an error in reading path:%s \n\n'
% sas_uut_claimed_ppa_boundary_file_path)
# Shrink the user claimed ppa boundary by approximately 1 kilometer.
user_claimed_ppa_contour_feature_collection = utils.InsureFeatureCollection(
utils.ShrinkAndCleanPolygon(
user_claimed_ppa_contour['features'][0]['geometry'], 1e-2),
as_dict=True)
# Create the actual config
config = {
'configPCR_1': pcr_1_test_config_file_path,
'userClaimedPpaContour': user_claimed_ppa_contour_feature_collection
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_3_default_config)
def test_WINNF_FT_S_PCR_3(self, config_filename):
"""Successful PPA Confirmation with Claimed Boundary by PAL Holder.
Checks PPA generated by SAS UUT shall be fully contained within the service area.
Checks SAS UUT shall confirm a valid PPA boundary claimed by the PAL holder,
composed of one or more adjacent Census Tracts.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Load the test_WINNF_FT_S_PCR_1 config. All other inputs must be identical
# to those used in the corresponding configuration of PCR.1.
pcr_1_test_config = loadConfig(config['configPCR_1'])
# Inject the PAL records.
for pal_record in pcr_1_test_config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(pcr_1_test_config['registrationRequests'],
pcr_1_test_config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in pcr_1_test_config['palRecords']]
# Create PPA creation request with user claimed ppa contour.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids,
"providedContour": config['userClaimedPpaContour']
}
# Trigger PPA Creation to SAS UUT.
ppa_id = self.triggerPpaCreationAndWaitUntilComplete(ppa_creation_request)
logging.debug('ppa_id received from SAS UUT:%s', ppa_id)
# Trigger Full Activity Dump and retrieves the PPA Zone record.
uut_ppa_zone_data = self.triggerFadGenerationAndRetrievePpaZone(
ssl_cert=pcr_1_test_config['sasTestHarnessCert'],
ssl_key=pcr_1_test_config['sasTestHarnessKey'])
# Check if the PPA generated by the SAS UUT is fully contained within the service area.
logging.debug("SAS UUT PPA - retrieved through FAD:%s",
json.dumps(uut_ppa_zone_data, indent=2, sort_keys=False,
separators=(',', ': ')))
logging.debug("User claimed PPA boundary:%s",
json.dumps(config['userClaimedPpaContour'], indent=2, sort_keys=False,
separators=(',', ': ')))
uut_ppa_geometry = uut_ppa_zone_data['zone']['features'][0]['geometry']
self.assertTrue(isPpaWithinServiceArea(pcr_1_test_config['palRecords'], uut_ppa_geometry),
msg="PPA Zone is not within service area")
# Check the maximum PPA boundary created by SAS UUT is identical with the maximum
# PPA claimed boundary.
test_harness_ppa_geometry = config['userClaimedPpaContour']['features'][0]['geometry']
self.assertTrue(utils.PolygonsAlmostEqual(test_harness_ppa_geometry, uut_ppa_geometry))
def generate_PCR_4_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 4."""
# Load PAL records.
pal_record_1 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_1.json'))
pal_record_2 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_2.json'))
# Use the FIPS codes of adjacent census tracts.
pal_record_1['fipsCode'] = 20063955100
pal_record_2['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_b = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_c = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Set the same user ID for all devices
device_b['userId'] = device_a['userId']
device_c['userId'] = device_a['userId']
# Make PAL records consistent.
pal_records = makePalRecordsConsistent([pal_record_1, pal_record_2],
pal_low_frequency, pal_high_frequency,
device_a['userId'])
# Set the values of CBSD location, antenna gain, and EIRP limit such that a
# single PPA can be formed.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 39.0373, -100.4184
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 39.0378, -100.4785
# At least one of the CBSDs is located outside the service area.
device_c['installationParam']['latitude'], device_c['installationParam'][
'longitude'] = 39.09755, -99.9179
device_c['installationParam']['heightType'] = 'AGL'
# Set the AntennaGain and EIRP capability chosen in a way that only one PPA zone is created
# by those CBDSs
device_a['installationParam']['eirpCapability'] = 30
device_b['installationParam']['eirpCapability'] = 47
device_c['installationParam']['eirpCapability'] = 30
device_a['installationParam']['antennaGain'] = 16
device_b['installationParam']['antennaGain'] = 16
device_c['installationParam']['antennaGain'] = 16
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b, device_c]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_4_default_config)
def test_WINNF_FT_S_PCR_4(self, config_filename):
"""Unsuccessful PPA Creation with one or more CBSDs Outside Service Area.
Checks SAS UUT rejects creation of a PPA boundary if at least one of the CBSDs
included in the CBSD cluster list is located outside PAL holder service area.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in config['palRecords']]
# Create PPA creation request to SAS UUT.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT and expect the failure response.
self.assertPpaCreationFailure(ppa_creation_request)
def generate_PCR_5_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 5."""
# Load PAL records.
pal_record_1 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_1.json'))
pal_record_2 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_2.json'))
# Use the FIPS codes of adjacent census tracts.
pal_record_1['fipsCode'] = 20063955100
pal_record_2['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_b = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# light check to ensure CBSD userId is not same.
self.assertNotEqual(device_a['userId'], device_b['userId'])
# The userId of at least one of the CBSDs is not associated to the userId of
# the PAL Holder configured in the PAL record for this service area.
pal_records = makePalRecordsConsistent([pal_record_1, pal_record_2], pal_low_frequency,
pal_high_frequency,
device_a['userId'])
# CBSDs are located inside the service area.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 39.0373, -100.4184
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 39.0378, -100.4785
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_5_default_config)
def test_WINNF_FT_S_PCR_5(self, config_filename):
"""Unsuccessful PPA Creation with one or more CBSDs Outside Service Area.
Checks SAS UUT rejects creation of a PPA boundary if at least one of
the CBSDs included in the CBSD cluster list does not belong to the PAL holder.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in config['palRecords']]
# Create PPA creation request to SAS UUT.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT and expect the failure response.
# SAS does not create a PPA and generates an error.
self.assertPpaCreationFailure(ppa_creation_request)
def generate_PCR_6_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 6."""
# File path where SAS UUT claimed ppa boundary generated in PCR.1 test
pcr_1_test_config_file_path = os.path.join('testcases', 'configs',
'test_WINNF_FT_S_PCR_1',
'default.config')
sas_uut_claimed_ppa_boundary_file_path = getSasUutClaimedPpaBoundaryFilePath(
'default.config')
# Load SAS UUT claimed ppa boundary and check if any error while retrieving
# SAS UUT claimed ppa boundary generated in PCR.1 test.
try:
user_claimed_ppa_contour = json_load(sas_uut_claimed_ppa_boundary_file_path)
except IOError:
raise RuntimeError('ConfigError:There is an error in reading path:%s \n\n'
% sas_uut_claimed_ppa_boundary_file_path)
# Expand the user claimed ppa boundary by approximately 1 kilometer.
user_claimed_ppa_contour_feature_collection = utils.InsureFeatureCollection(
utils.ShrinkAndCleanPolygon(
user_claimed_ppa_contour['features'][0]['geometry'], -1e-2),
as_dict=True)
# Create the actual config.
config = {
'configPCR_1': pcr_1_test_config_file_path,
'userClaimedPpaContour': user_claimed_ppa_contour_feature_collection
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_6_default_config)
def test_WINNF_FT_S_PCR_6(self, config_filename):
"""Unsuccessful PPA boundary Claimed by PAL Holder Not contained within Maximum PPA Boundary.
SAS UUT shall reject a PPA boundary claimed by the PAL holder,
that is not fully contained within the maximum PPA boundary created by SAS UUT.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Load the test_WINNF_FT_S_PCR_1 config. All other inputs must be identical
# to those used in the corresponding configuration of PCR.1.
pcr_1_test_config = loadConfig(config['configPCR_1'])
# Inject the PAL records.
for pal_record in pcr_1_test_config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check response.
cbsd_ids = self.assertRegistered(pcr_1_test_config['registrationRequests'],
pcr_1_test_config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in pcr_1_test_config['palRecords']]
# Create PPA creation request with user claimed ppa contour.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids,
"providedContour": config['userClaimedPpaContour']
}
# Trigger PPA Creation to SAS UUT.
self.assertPpaCreationFailure(ppa_creation_request)
def generate_PCR_7_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 7."""
# File path where SAS UUT claimed ppa boundary generated in PCR.1 test
pcr_1_test_config_file_path = os.path.join('testcases', 'configs',
'test_WINNF_FT_S_PCR_1',
'default.config')
sas_uut_claimed_ppa_boundary_file_path = getSasUutClaimedPpaBoundaryFilePath(
'default.config')
# Load SAS UUT claimed ppa boundary and check if any error while retrieving
# SAS UUT claimed ppa boundary generated in PCR.1 test.
try:
overlapping_ppa_contour = json_load(sas_uut_claimed_ppa_boundary_file_path)
except IOError:
raise RuntimeError('ConfigError:There is an error in reading path:%s \n\n'
% sas_uut_claimed_ppa_boundary_file_path)
# Shrink the user claimed ppa boundary by approximately 1 kilometer.
overlapping_ppa_contour_geometry = utils.ShrinkAndCleanPolygon(
overlapping_ppa_contour['features'][0]['geometry'], 1e-2)
# Create ppa_record where user claimed PPA contour will be replaced.
overlapping_ppa_record = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
# Update the user_claimed ppa contour geometry required for overlaps ppa.
overlapping_ppa_record['zone'] = {'type':'FeatureCollection',
'features': [
{'type': 'Feature',
'properties': {},
'geometry': overlapping_ppa_contour_geometry}
]}
# Load PCR.1 configuration.
pcr_1_test_config = loadConfig(pcr_1_test_config_file_path)
# Set the pal_record used in PCR.1 tests.
pcr_1_pal_records = pcr_1_test_config['palRecords']
#updating the PPA record based on the PAL records
overlapping_ppa_record ['ppaInfo']['palId'] = [pal['palId'] for pal in pcr_1_pal_records]
overlapping_ppa_record ['id'] = 'zone/ppa/%s/%s/%s' % (overlapping_ppa_record['creator'],
overlapping_ppa_record['ppaInfo']['palId'][0],
uuid.uuid4().hex)
overlapping_ppa_record ['ppaInfo']['ppaBeginDate'] = pcr_1_pal_records[0]['license']['licenseDate']
overlapping_ppa_record ['ppaInfo']['ppaExpirationDate'] = pcr_1_pal_records[0]['license']['licenseExpiration']
# Create the actual config.
config = {
'configPCR_1': pcr_1_test_config_file_path,
'overlapPpaRecord': overlapping_ppa_record
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_7_default_config)
def test_WINNF_FT_S_PCR_7(self, config_filename):
"""Overlapping PPA Boundaries.
Checks SAS UUT shall doesnt create PPA zone within the service area.
Checks SAS UUT shall confirm a valid PPA boundary claimed by the PAL holder,
composed of one or more adjacent Census Tracts was overlapped by another
PPA zone.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Load the test_WINNF_FT_S_PCR_1 config. All other inputs must be identical
# to those used in the corresponding configuration of PCR.1.
pcr_1_test_config = loadConfig(config['configPCR_1'])
# Inject the PAL records.
for pal_record in pcr_1_test_config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Inject the overlap ppa zone into SAS UUT.
zone_id = self._sas_admin.InjectZoneData({'record': config['overlapPpaRecord']})
self.assertTrue(zone_id)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(pcr_1_test_config['registrationRequests'],
pcr_1_test_config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in pcr_1_test_config['palRecords']]
# Create PPA creation request with device which is already part of
# existing PPA zone.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT and check SAS UUT should not create PPA boundary
# claimed by PAL holder was overlapped by PPA zone.
self.assertPpaCreationFailure(ppa_creation_request)
|
apache-2.0
| -8,652,789,533,087,778,000 | 42.346357 | 114 | 0.661039 | false |
seecr/meresco-rdf
|
meresco/rdf/graph/_utils.py
|
1
|
1172
|
## begin license ##
#
# Meresco RDF contains components to handle RDF data.
#
# Copyright (C) 2015 Drents Archief http://www.drentsarchief.nl
# Copyright (C) 2015 Seecr (Seek You Too B.V.) http://seecr.nl
#
# This file is part of "Meresco RDF"
#
# "Meresco RDF" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco RDF" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco RDF"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
def unique(g, key=None):
seen = set()
if key is None:
return (x for x in g if x not in seen and not seen.add(x))
return (x for x in g if key(x) not in seen and not seen.add(key(x)))
|
gpl-2.0
| 8,431,025,589,879,382,000 | 38.1 | 76 | 0.71587 | false |
Zokol/CaveJet
|
cavejet.py
|
1
|
16546
|
import random
import time
#SCREEN_TYPE = "UNICORN"
#SCREEN_TYPE = "SCROLL"
SCREEN_TYPE = "SCROLLHD"
RGB_ENABLED = False
STUDY_LOOP = False
LOOP = False
TUNNEL_GAP_MIN = 2
TUNNEL_GAP_MAX = 3
TUNNEL_GAP_DIFF_MAX = 1
TUNNEL_MOVE_DIFF_MAX = 1
AI_VISIBILITY_DEPTH = 4
AI_REROUTE_DEPTH = 4
if SCREEN_TYPE == "UNICORN":
import unicornhat as unicorn
unicorn.set_layout(unicorn.AUTO)
unicorn.rotation(0)
unicorn.brightness(0.7)
SCREEN_WIDTH, SCREEN_HEIGHT = unicorn.get_shape()
screen_size = (SCREEN_WIDTH, SCREEN_HEIGHT)
RGB_ENABLED = True
BG_COLOR = [0, 150, 200]
BG_NOISE = 2
CAVE_COLOR = [244, 164, 96]
CAVE_NOISE = 10
PLAYER_COLOR = [255, 0, 0]
if SCREEN_TYPE == "SCROLL":
import scrollphat
SCREEN_WIDTH = 11
SCREEN_HEIGHT = 5
scrollphat.set_brightness(1)
if SCREEN_TYPE == "SCROLLHD":
import scrollphathd as scrollphat
SCREEN_WIDTH = 17
SCREEN_HEIGHT = 7
scrollphat.set_brightness(0.25)
screen_size = (SCREEN_WIDTH, SCREEN_HEIGHT)
class GameOver(Exception):
def __init__(self):
print("AI found no possible moves")
#input("quit?")
class Field:
def __init__(self, field_size):
self.buffer = [[0] * field_size[1]] * field_size[0]
self.gap_buffer = [(1, 3)]
"""
tunnel_gen - Tunnel Generator
creates the next column for the tunnel
Requires two random integers;
diff_place: in range of +-1, determines where the gap is placed in relation to the last gap
diff_width: in range of +-1, determines the gap width in relation to the last gap width
"""
def tunnel_gen(self):
if self.gap_buffer[-1][0] == 0: # Is the current place at screen edge?
diff_place = random.randint(0, TUNNEL_MOVE_DIFF_MAX) # Go away or stay at screen edge
elif self.gap_buffer[-1][0] == SCREEN_HEIGHT - TUNNEL_MOVE_DIFF_MAX: # Is the current place at screen edge?
diff_place = random.randint(-TUNNEL_MOVE_DIFF_MAX, 0) # Go away or stay at screen edge
else:
diff_place = random.randint(-TUNNEL_MOVE_DIFF_MAX, TUNNEL_MOVE_DIFF_MAX) # Not at screen edge, can move freely
if self.gap_buffer[-1][1] == TUNNEL_GAP_MIN: # Is gap at minimum?
diff_width = random.randint(0, TUNNEL_GAP_DIFF_MAX) # Go larger or stay at same
elif self.gap_buffer[-1][1] == TUNNEL_GAP_MAX: # Is gap at maximum?
diff_width = random.randint(-TUNNEL_GAP_DIFF_MAX, 0) # Go smaller or stay at same
else:
diff_width = random.randint(-TUNNEL_GAP_DIFF_MAX, TUNNEL_GAP_DIFF_MAX) # Adjust freely
self.gap_buffer.append((self.gap_buffer[-1][0] + diff_place, self.gap_buffer[-1][1] + diff_width))
if len(self.gap_buffer) > SCREEN_WIDTH: self.gap_buffer.pop(0)
col = [1] * len(self.buffer[0])
for pixel_i in range(0, self.gap_buffer[-1][1] + 1):
try: col[pixel_i + self.gap_buffer[-1][0]] = 0
except IndexError: pass
return col
def update(self):
new_col = self.tunnel_gen()
self.buffer.append(new_col)
self.buffer.pop(0)
class Player:
def __init__(self):
self.x = 1
self.y = 2
class Game:
def __init__(self, move_weight={-1: -1, 0: 0, 1: -1}, next_layer_weight=3, speed=0):
self.distance = 0
self.field = Field(screen_size)
self.ai = AI(self.field, move_weight, next_layer_weight)
self.speed = speed
self.run = True
def start(self):
while self.run:
self.step()
if SCREEN_TYPE == "UNICORN": self.print_unicorn()
if SCREEN_TYPE == "SCROLL" or SCREEN_TYPE == "SCROLLHD": self.print_scroll()
time.sleep(self.speed)
return self.distance
def step(self):
self.distance += 1
if self.speed is None:
start = time.time()
self.ai.move()
self.speed = time.time() - start
else:
self.ai.move()
self.field.update()
if self.field.buffer[self.ai.player.x][self.ai.player.y] == 1:
if SCREEN_TYPE == "UNICORN": self.game_over_unicorn()
if SCREEN_TYPE == "SCROLL" or SCREEN_TYPE == "SCROLLHD": self.game_over_scroll()
def game_over_unicorn(self):
width, height = unicorn.get_shape()
for x in range(width):
time.sleep(0.05)
for y in range(height):
r, g, b = [200, 0, 0]
unicorn.set_pixel(x, y, r, g, b)
time.sleep(0.5)
self.run = False
def game_over_scroll(self):
for i in range(1):
self.set_checker(0)
time.sleep(0.5)
self.set_checker(1)
time.sleep(0.5)
self.run = False
def set_checker(self, offset):
scrollphat.clear()
n = offset
for y in range(SCREEN_HEIGHT):
for x in range(SCREEN_WIDTH):
scrollphat.set_pixel(x, y, n % 2 == 0)
n += 1
if SCREEN_TYPE == "SCROLL": scrollphat.update()
if SCREEN_TYPE == "SCROLLHD": scrollphat.show()
def print_unicorn(self):
unicorn.clear()
for x, col in enumerate(self.field.buffer):
for y, pixel in enumerate(col):
if pixel: r, g, b = [i + random.randint(0, CAVE_NOISE) for i in CAVE_COLOR]
else: r, g, b = [i + random.randint(0, BG_NOISE) for i in BG_COLOR]
unicorn.set_pixel(x, y, r, g, b)
r, g, b = PLAYER_COLOR
unicorn.set_pixel(self.ai.player.x, self.ai.player.y, r, g, b)
unicorn.show()
def print_scroll(self):
scrollphat.clear()
for x, col in enumerate(self.field.buffer):
for y, pixel in enumerate(col):
scrollphat.set_pixel(x, y, pixel)
scrollphat.set_pixel(self.ai.player.x, self.ai.player.y, 1)
if SCREEN_TYPE == "SCROLL": scrollphat.update()
if SCREEN_TYPE == "SCROLLHD": scrollphat.show()
def print_field(self):
for col in self.field.buffer:
print(col, self.field.gap_buffer[-1])
class AI:
def __init__(self, field, move_weight={-1: -1, 0: 0, 1: -1}, next_layer_weight=3):
self.player = Player()
self.next_moves = []
self.field = field
self.move_weight = move_weight # Every decision is dependent on these numbers. Change these and you will get better or worse AI.
self.next_layer_weight = next_layer_weight # This is also very important.
"""
Filter possible moves
This is simple fundtion to figure out where player can move
"""
def filter_moves(self, layer, player_y):
possible_moves = [0, -1, 1] # List all possible moves player can take; move up, move down or keep the place
# Detect that the player is next to the screen edge
if player_y == SCREEN_HEIGHT - 1:
try: possible_moves.remove(1)
except ValueError: pass
if layer[player_y - 1] == 1:
try: possible_moves.remove(-1)
except ValueError: pass
elif 1 <= player_y <= 3:
if layer[player_y - 1] == 1:
try: possible_moves.remove(-1)
except ValueError: pass
if layer[player_y + 1] == 1:
try: possible_moves.remove(1)
except ValueError: pass
elif player_y == 0:
try: possible_moves.remove(-1)
except ValueError: pass
if layer[player_y + 1] == 1:
try: possible_moves.remove(1)
except ValueError: pass
"""
if len(possible_moves) == 0:
print("No possible moves to next layer!!")
if len(possible_moves) == 1:
print("Only one possible move")
"""
return possible_moves
"""
Evaluate path
This function gives score for each path
"""
def evaluate_path(self, moves):
score = 0
for move in moves:
score += (self.move_weight[move] + self.next_layer_weight)
return score
"""
Move player
Uses some available AI-algo to figure out the next move
"""
def move(self):
self.player_coords = {'x': self.player.x, 'y': self.player.y}
if len(self.next_moves) <= AI_REROUTE_DEPTH:
#self.player.y += self.next_move()[0]
#self.player.y += self.better_move(50)[0]
possible_paths = self.even_better_move(AI_VISIBILITY_DEPTH, [])
if possible_paths is None:
#print("Path finder returned None")
raise GameOver
if len(possible_paths) == 0:
#print("Path finder returned empty list of paths")
raise GameOver
for path in possible_paths:
path_score = self.evaluate_path(path)
#print("Path:", path, "Score:", path_score)
self.next_moves = max(possible_paths, key=lambda x: self.evaluate_path(x)) # Selecting the best path using evaluation-function
#print("Selected path:", self.next_moves, "With value:", self.evaluate_path(self.next_moves))
#print("selected path length:", len(self.next_moves))
#next_moves = self.better_move(50)
if len(self.next_moves) == 0:
#print("Selected empty list of paths")
raise GameOver
self.player.y += self.next_moves.pop(0)
"""
Myopic AI
This algorithm is very very near-sighted, and it's flying in fog, and it's raining... wait, can that happen? *wanders off to r/askreddit/*
"""
def next_move(self):
next_col = self.field.buffer[self.player.x + 1]
if next_col[self.player.y] == 1:
if self.player.y < 4:
if next_col[self.player.y + 1] == 0:
return [1]
if self.player.y > 0:
if next_col[self.player.y - 1] == 0:
return [-1]
"""
Random path finding AI
This algorithm takes all the possible moves and in the case of selection,
takes random direction and continues.
Algorithm is given number of iterations (computing time) to calculate paths.
Each path is evaluated based on pretermined weights (more direct path is better,
path that gives player best chances of survival is also better)
This is better AI than nothing at all, but it still misses a lot of cleverness.
It always choses random path, but nothing prevents it to taking the same path twice.
So, this AI is waisting valuable computing time, most likely comparing very similar paths.
"""
def better_move(self, iterations):
best = {"score": 0, "moves": []}
for iteration in range(iterations): # Iterations to find most of the move combinations
self.player_coords = {'x': self.player.x, 'y': self.player.y}
moves = []
for layer in self.field.buffer[self.player_coords['x']:]:
if layer[self.player_coords['y']] == 1:
break
possible_moves = self.filter_moves(layer, self.player_coords['y'])
move = random.choice(possible_moves)
self.player_coords['y'] += move
moves.append(move)
self.player_coords['x'] += 1
score = self.evaluate_path(moves)
if best["score"] < score:
best["score"] = score
best["moves"] = moves
return best["moves"]
"""
Recursive AI
Now this starts to be something you can call efficient. (If you disagree, take a look at the other two)
Idea is simple:
1) We take a path that has no intersections and continue until we have to make a choise or until player hit's a wall
2) Start new instances of this function for each choise and wait for them to return their moves.
3) Calculate scores for the routes we received.
4) Move player according to the best move we know.
It is quite clear that if we calculate this after every step player takes, and the field is giving us lots of room to move,
this recursive tactic spirals out of control pretty fast. It's dependent on how much memory and CPU-power we have, but there
should be some way of controlling this recursion.
Thus, the only parameter given to the algorithm is very very very important. It behaves like TTL-value in network packets.
The depth_limit is reduced by one each time it is passed onwards and checked if it reaches zero. If it does, the recursion ends
and the result is passed to the parent-function.
By increasing the depth-limit, more possible routes will be evaluated and also slower the code execution will be.
"""
def even_better_move(self, depth_limit, moves=[]):
depth_limit -= 1
if depth_limit == 0: # Hit depth limit without hitting the wall
"""
print(" " * self.player_coords['x'] + ''.join(str(move) for move in moves))
for y in range(SCREEN_HEIGHT):
row = ""
for x, layer in enumerate(self.field.buffer):
if x == self.player_coords['x'] and y == self.player_coords['y']:
row += "X"
elif layer[y] == 1:
row += "*"
else:
row += " "
print(row)
"""
return moves # Returing route
else:
next_layer = self.field.buffer[self.player_coords['x'] + len(moves)]
player_y = self.player_coords['y']
if len(moves) > 0:
for layer_i, y_move in enumerate(moves):
move_layer = self.field.buffer[self.player_coords['x'] + layer_i + 1]
player_y += y_move
if move_layer[player_y] == 1: # Hit a wall with this route
#print("layer:", move_layer)
#print("This path", moves, "hits the wall, returning None")
return None # Returning None
possible_moves = self.filter_moves(next_layer, player_y)
paths = []
for move in possible_moves:
returned_path = self.even_better_move(depth_limit, moves + [move])
if returned_path is not None: # Path did not hit the wall
if type(returned_path[0]) != list: # Check if we got list of paths or just one path
paths.append(returned_path) # Adding path to list of possible paths
else:
paths += returned_path # Return is already list of paths, summing it to the path list
if len(paths) > 0:
return paths
else:
return None # Found no paths that would not hit the wall
def run_game():
try:
#move_cost = -16
#layer_reward = 34
move_cost = -1
layer_reward = 10
game = Game(move_weight={-1: move_cost, 0: 0, 1: move_cost}, next_layer_weight=layer_reward)
game.start()
except GameOver:
print({"score": game.distance, "move_cost": move_cost, "layer_reward": layer_reward})
except KeyboardInterrupt:
print("Quitting")
def study_loop():
logfile = open("cavejet_AI.log", 'a+')
record = 0
history = []
while True:
try:
move_cost = random.randint(-18, -15)
layer_reward = random.randint(32, 36)
game = Game(move_weight={-1: move_cost, 0: 0, 1: move_cost}, next_layer_weight=layer_reward, speed=0)
game.start()
except GameOver:
history.append({"score": game.distance, "move_cost": move_cost, "layer_reward": layer_reward})
logfile.write(str(game.distance) + ',' + str(move_cost) + ',' + str(layer_reward) + '\r\n')
if record < game.distance:
record = game.distance
print("Score:", game.distance, " | Best score:", record)
except KeyboardInterrupt:
logfile.close()
raise
if __name__ == "__main__":
if STUDY_LOOP:
study_loop()
elif LOOP:
while True:
run_game()
else:
run_game()
|
mit
| 2,995,318,860,571,699,000 | 37.389791 | 142 | 0.562916 | false |
boh1996/LectioAPI
|
importers/importRooms.py
|
1
|
2399
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scrapers'))
sys.path.append("..")
from datetime import datetime
from database import *
import error
import sync
import rooms as roomsApi
def importRooms ( school_id, branch_id ):
try:
objectList = roomsApi.rooms({
"school_id" : school_id,
"branch_id" : branch_id
})
if objectList is None:
error.log(__file__, False, "Unknown Object")
return False
if not "status" in objectList:
error.log(__file__, False, "Unknown Object")
return False
if objectList["status"] == "ok":
for row in objectList["rooms"]:
unique = {
"room_id" : str(row["room_id"])
}
terms = []
existsing = db.persons.find(unique).limit(1)
if existsing.count() > 0:
existsing = existsing[0]
if "terms" in existsing:
terms = existsing["terms"]
if not objectList["term"]["value"] in terms:
terms.append(objectList["term"]["value"])
element = {
"room_id" : str(row["room_id"]),
"name" : row["name"],
"alternative_name" : row["number"],
"school_id" : str(row["school_id"]),
"branch_id" : str(row["branch_id"]),
"type" : row["type"],
"terms" : terms
}
status = sync.sync(db.rooms, unique, element)
'''if sync.check_action_event(status) == True:
for url in sync.find_listeners('room', unique):
sync.send_event(url, status["action"], element)
for url in sync.find_listeners('school', {"school" : school_id, "branch_id" : branch_id}):
sync.send_event(url, "room", element)
for url in sync.find_general_listeners('room_general'):
sync.send_event(url, status["action"], element)'''
'''deleted = sync.find_deleted(db.rooms, {"school_id" : school_id, "branch_id" : branch_id}, ["room_id"], objectList["rooms"])
for element in deleted:
for url in sync.find_listeners('room', {"room_id" : element["room_id"]}):
sync.send_event(url, 'deleted', element)
for url in sync.find_listeners('school', {"school" : school_id, "branch_id" : branch_id}):
sync.send_event(url, "room_deleted", element)'''
return True
else:
if "error" in objectList:
error.log(__file__, False, objectList["error"])
return False
else:
error.log(__file__, False, "Unknown error")
return False
except Exception, e:
error.log(__file__, False, str(e))
return False
|
mit
| -6,514,764,516,903,362,000 | 26.272727 | 129 | 0.61609 | false |
enzzc/simplempv
|
simplempv/simplempv.py
|
1
|
1205
|
import os
import os.path
import socket
import json
from functools import partial
class Error(Exception):
pass
class SocketError(Error):
pass
class MpvError(Error):
pass
class Mpv(object):
commands = ['']
def __init__(self, sockfile='/tmp/mpvsock'):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect(sockfile)
except OSError as e:
raise SocketError from e
self.fd = s
def execute(self, command):
data = json.dumps(command) + '\r\n'
data = bytes(data, encoding='utf-8')
try:
self.fd.send(data)
buf = self.fd.recv(1024)
except OSError as e:
raise SocketError from e
print('DEBUG', buf)
result = json.loads(buf.decode('utf-8'))
status = result['error']
if status == 'success':
return result
raise MpvError(status)
def command(self, command, *args):
return self.execute({'command': [command, *args]})
def close(self):
self.fd.close()
def __getattr__(self, name):
mpv_name = name.replace('_', '-')
return partial(self.command, mpv_name)
|
mit
| -7,761,629,143,300,644,000 | 22.627451 | 61 | 0.569295 | false |
richrr/scripts
|
python/summarize_topk_taxon.py
|
1
|
3307
|
import os
import sys
from utils import *
import operator
from time import localtime, strftime
import argparse
import re
import numpy as np
# Usage: python ~/scripts/python/summarize_topk_taxon.py -i top-10k-otus-taxons-rosana-16srrna-row_sum_sanitized_summary_otu_table_mc2_w_tax_no_pynast_failures.biom.txt-rep_set_tax_assign.txt -o summarize_topk_taxon_out.txt -f 5 -t 7 -s 0 1
########## add code to print header row in the output file
def main(args):
parser = argparse.ArgumentParser(description='Summarize number of seqs in a sample having same taxonomy/OTU')
parser.add_argument('-i', '--infile')
parser.add_argument('-o', '--outfile', default="summarize_topk_taxon_out.txt") # output filename
parser.add_argument('-f', '--fromcolumn', default=0, type=int) # 0-index based column from where the samples start
parser.add_argument('-t', '--tillcolumn', default=99999, type=int) # 0-index based column where the samples end (inclusive)
parser.add_argument('-s', '--summarizecolumn', nargs='*', type =int, default=[0]) # creates a list of items (0-index based column) used for summarizing
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
args = parser.parse_args()
if len(sys.argv)==1 :
parser.print_help()
sys.exit('\natleast one argument required\n')
infile = args.infile
outfile = args.outfile
fromcol = args.fromcolumn
tillcol = args.tillcolumn
summarcol = args.summarizecolumn
delim = args.delimiter
d_list=list()
comment_list=list()
temp_dict=dict()
for l in open(infile):
if '#' not in l:
d_list.append(l.strip('\n'))
else:
comment_list.append(l.strip())
'''
for col in np.arange(fromcol, tillcol+1):
temp_dict=dict()
for l in d_list:
contents = l.strip().split(delim)
key = delim.join([contents[i] for i in summarcol])
#key = key.replace('[' , '_').replace(']', '_')
if key not in temp_dict:
temp_dict[key] = contents[col]
else:
temp_dict[key] = temp_dict[key] + contents[col]
# list of tuples, ordered as per the index/value of the dictionary
sorted_temp_dict = sorted(temp_dict.iteritems(), key=operator.itemgetter(1))
print [i for i in sorted_temp_dict[:5]]
'''
for l in d_list:
contents = l.strip('\n').split(delim)
#print contents
key = delim.join([contents[i] for i in summarcol])
try:
value= [float(contents[col]) for col in np.arange(fromcol, tillcol+1)]
except:
print l
if key not in temp_dict:
temp_dict[key] = value
else:
temp_dict[key] = [x+y for x,y in zip(temp_dict[key], value)]
sorted_temp_dict = sorted(temp_dict.iteritems(), key=operator.itemgetter(1), reverse=True)
out = open(outfile, 'w')
for i in sorted_temp_dict:
val = [str(j) for j in i[1]]
out.write ('%s%s%s\n' %(i[0], delim, delim.join(val)))
out.close()
#print len(sorted_temp_dict)
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
|
gpl-3.0
| 4,516,134,482,446,276,000 | 34.945652 | 240 | 0.608406 | false |
yannrouillard/weboob
|
modules/youjizz/backend.py
|
1
|
2836
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Roger Philibert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.video import ICapVideo, BaseVideo
from weboob.capabilities.collection import ICapCollection, CollectionNotFound
from weboob.tools.backend import BaseBackend
from .browser import YoujizzBrowser
from .video import YoujizzVideo
__all__ = ['YoujizzBackend']
class YoujizzBackend(BaseBackend, ICapVideo, ICapCollection):
NAME = 'youjizz'
MAINTAINER = u'Roger Philibert'
EMAIL = '[email protected]'
VERSION = '0.i'
DESCRIPTION = 'YouJizz pornographic video streaming website'
LICENSE = 'AGPLv3+'
BROWSER = YoujizzBrowser
def get_video(self, _id):
video = self.browser.get_video(_id)
return video
def search_videos(self, pattern, sortby=ICapVideo.SEARCH_RELEVANCE, nsfw=False):
if not nsfw:
return set()
return self.browser.search_videos(pattern)
def fill_video(self, video, fields):
if fields != ['thumbnail']:
# if we don't want only the thumbnail, we probably want also every fields
with self.browser:
video = self.browser.get_video(YoujizzVideo.id2url(video.id), video)
if 'thumbnail' in fields and video.thumbnail:
with self.browser:
video.thumbnail.data = self.browser.readurl(video.thumbnail.url)
return video
def iter_resources(self, objs, split_path):
if BaseVideo in objs:
collection = self.get_collection(objs, split_path)
if collection.path_level == 0:
yield self.get_collection(objs, [u'latest_nsfw'])
if collection.split_path == [u'latest_nsfw']:
for video in self.browser.latest_videos():
yield video
def validate_collection(self, objs, collection):
if collection.path_level == 0:
return
if BaseVideo in objs and collection.split_path == [u'latest_nsfw']:
collection.title = u'Latest Youjizz videos (NSFW)'
return
raise CollectionNotFound(collection.split_path)
OBJECTS = {YoujizzVideo: fill_video}
|
agpl-3.0
| -6,394,259,530,830,417,000 | 34.45 | 85 | 0.674189 | false |
des-testbed/des_chan
|
graph.py
|
1
|
18151
|
#!/usr/bin/python
"""
DES-CHAN: A Framework for Channel Assignment Algorithms for Testbeds
This module provides a class to represent network graphs and conflict graphs.
Authors: Matthias Philipp <[email protected]>,
Felix Juraschek <[email protected]>
Copyright 2008-2013, Freie Universitaet Berlin (FUB). All rights reserved.
These sources were developed at the Freie Universitaet Berlin,
Computer Systems and Telematics / Distributed, embedded Systems (DES) group
(http://cst.mi.fu-berlin.de, http://www.des-testbed.net)
-------------------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see http://www.gnu.org/licenses/ .
--------------------------------------------------------------------------------
For further information and questions please use the web site
http://www.des-testbed.net
"""
import re
import sys
class Graph:
def __init__(self, vertices=[]):
# initialize internal data structures
self.values = dict()
self.distances = dict()
# set vertex names and distances
for vertex in vertices:
self.add_vertex(vertex)
def add_vertex(self, new_vertex):
"""Adds the given vertex to the graph.
"""
# do nothing if vertex already exists
if new_vertex in self.get_vertices():
return
# otherwise set up data structures for new vertex
self.values[new_vertex] = dict()
self.distances[new_vertex] = dict()
for old_vertex in self.get_vertices():
self.set_edge_value((old_vertex, new_vertex), None, False)
self.set_distance(old_vertex, new_vertex, sys.maxint)
# distance to itself is 0
self.set_distance(new_vertex, new_vertex, 0)
def remove_vertex(self, vertex):
"""Removes the given vertex from the graph.
"""
del self.values[vertex]
del self.distances[vertex]
for v in self.get_vertices():
del self.values[v][vertex]
del self.distances[v][vertex]
def get_vertices(self):
"""Returns a list that contains all vertices of the graph.
"""
return set(self.values.keys())
def set_edge_value(self, edge, value, update=True):
"""Sets the value of the given edge. The edge is represented by a tuple
of two vertices.
"""
v1, v2 = edge
self.values[v1][v2] = value
# we implement an undirected graph
self.values[v2][v1] = value
# update distance information
# None, "", False, and 0 correspond to no edge
if value:
self.set_distance(v1, v2, 1)
# other shortest paths may have changed
if update:
self.update_distances()
def set_distance(self, v1, v2, d):
"""Sets the distance between the two vertices.
"""
self.distances[v1][v2] = d
# we implement an undirected graph
self.distances[v2][v1] = d
def get_edge_value(self, edge):
"""Returns the value of the given edge. The edge is represented by a tuple
of two vertices.
"""
v1, v2 = edge
return self.values[v1][v2]
def get_distance(self, v1, v2):
"""Returns the distance between v1 and v2.
"""
return self.distances[v1][v2]
def get_edges(self, get_all=False):
"""Returns a dictionary that contains all edges as keys and the
corresponding edge values as values. Only edges that have a value are
returned. By default the graph is assumed to be undirected. If the
optional parameter get_all is True, all vertices are returned.
"""
edges = dict()
remaining_vertices = self.get_vertices()
for v1 in self.get_vertices():
for v2 in remaining_vertices:
value = self.get_edge_value((v1, v2))
# None, "", False, and 0 correspond to no edge
if value:
edges[(v1, v2)] = value
# graph is assumed to be undirected, therefore discard
# duplicate edges if not explicitly requested
if not get_all:
remaining_vertices.remove(v1)
return edges
def merge(self, graph):
"""Merges the current graph with the specified graph. The new graph
contains the union of both vertex sets and the corresponding edge
values.
"""
# add missing vertices
for vertex in graph.get_vertices() - self.get_vertices():
self.add_vertex(vertex)
# set edge values
for edge, edge_value in graph.get_edges().items():
self.set_edge_value(edge, edge_value, False)
self.update_distances()
def get_adjacency_matrix(self):
"""Returns the graph's adjacency matrix as a formatted string.
"""
vertices = self.values.keys()
maxlen = 4
# get maximum length of vertex names for proper layout
for vertex in vertices:
if len(str(vertex)) > maxlen:
maxlen = len(str(vertex))
# print column heads
matrix = "".rjust(maxlen) + " |"
for vertex in vertices:
matrix += " " + str(vertex).rjust(maxlen) + " |"
# print without trailing |
matrix = matrix[:-1] + "\n"
# generate row separator
matrix += "-" * maxlen + "-+"
for i in range(len(vertices)):
matrix += "-" + "-" * maxlen + "-+"
# print without trailing +
matrix = matrix[:-1] + "\n"
# print rows
for v1 in vertices:
matrix += str(v1).ljust(maxlen) + " |"
for v2 in vertices:
matrix += " " + self._get_edge_value_as_text((v1, v2)).rjust(maxlen) + " |"
# print without trailing |
matrix = matrix[:-1] + "\n"
return matrix
def get_graphviz(self, label=""):
"""Returns a string representation of the graph in the dot language from
the graphviz project.
"""
left_vertices = set(self.values.keys())
graph = "Graph G {\n"
if label != "":
graph += "\tgraph [label = \"%s\", labelloc=t]\n" % label
for v1 in self.values.keys():
for v2 in left_vertices:
if self.get_edge_value((v1, v2)):
graph += "\t\"" + str(v1) + "\" -- \"" + str(v2) + "\" "
graph += "[label = \"" + str(self.get_edge_value((v1, v2))) + "\"]\n"
# undirected graph, therefore discard double connections
left_vertices.remove(v1)
graph += "}\n"
return graph
def write_to_file(self, file_name, use_graphviz=False):
"""Writes a textual representation of the graph to the specified file.
If the optional parameter use_graphviz is True, the graph is represented
in the dot language from the graphviz project.
"""
file = open(file_name, 'w')
if use_graphviz:
file.write(self.get_graphviz())
else:
file.write(self.get_adjacency_matrix())
file.close()
def read_from_dotfile(self, file_name):
"""Reads the graph from a graphviz dot file.
"""
file = open(file_name, 'r')
# clear current data
self.__init__()
# match following lines
# "t9-035" -- "t9-146" [label = "1"]
edge_re = re.compile('\s*"(.+)" -- "(.+)" \[label = "(.+)"\]')
for line in file:
edge_ma = edge_re.match(line)
if edge_ma:
v1 = edge_ma.group(1)
v2 = edge_ma.group(2)
value = edge_ma.group(3)
self.add_vertex(v1)
self.add_vertex(v2)
self.set_edge_value((v1, v2), value, False)
file.close()
self.update_distances()
def read_from_file(self, file_name):
"""Reads the graph from a file containing an adjacency matrix as
generated by get_adjacency_matrix() or write_to_file(). The dot format
is not supported.
"""
file = open(file_name, 'r')
# line counter
i = 0;
vertices = list()
for line in file:
i += 1
# first line contains the vertex names
if i == 1:
# first element is empty, therefore discard it
for vertex in line.split("|")[1:]:
vertices.append(vertex.strip())
# clear current data and set new vertices
self.__init__(vertices)
if i > 2:
row = line.split("|")
# first element is the vertex name
v1 = row[0].strip()
# remaining elements are edge values
row = row[1:]
for v2 in vertices:
value = row[vertices.index(v2)].strip()
if value == '':
value = None
self.set_edge_value((v1, v2), value, False)
file.close()
self.update_distances()
def update_distances(self):
"""Updates the distance matrix with the number of hops between all
vertex pairs.
"""
# Floyd Warshall algorithm
# calculate all shortest paths
for k in self.get_vertices():
remaining_vertices = self.get_vertices()
for v1 in self.get_vertices():
for v2 in remaining_vertices:
d = min(self.get_distance(v1, v2),
self.get_distance(v1, k) + self.get_distance(k, v2))
self.set_distance(v1, v2, d)
remaining_vertices.remove(v1)
def get_neighbors(self, vertex):
"""Returns a set that contains all direct neighbors of the given vertex.
"""
neighbors = set()
for v1, v2 in self.get_edges().keys():
if v1 == vertex:
neighbors.add(v2)
elif v2 == vertex:
neighbors.add(v1)
return neighbors
def copy(self):
"""Returns a new Graph object that contains the same vertices and edges.
"""
remaining_vertices = self.get_vertices()
g = Graph(list(remaining_vertices))
for v1 in self.get_vertices():
for v2 in remaining_vertices:
g.set_edge_value((v1, v2), self.get_edge_value((v1, v2)))
g.set_distance(v1, v2, self.get_distance(v1, v2))
remaining_vertices.remove(v1)
return g
def copy_fast(self):
"""Returns a new Graph object that contains the same vertices and edges.
"""
remaining_vertices = self.get_vertices()
g = Graph(list(remaining_vertices))
e = self.get_edges()
for (v1, v2), chans in self.get_edges().iteritems():
g.set_edge_value((v1,v2), self.get_edge_value((v1, v2)), update=False)
g.set_distance(v1, v2, self.get_distance(v1, v2))
return g
def _get_edge_value_as_text(self, edge):
"""Returns a textual representation of the value of the given edge. The
edge is represented by a tuple of two vertices.
"""
v1, v2 = edge
if not self.values[v1][v2]:
return ""
else:
return str(self.values[v1][v2])
class ConflictGraphVertex:
def __init__(self, conflict_graph, nw_graph_edge):
self.conflict_graph = conflict_graph
self.nw_graph_edge = nw_graph_edge
self.channels = None
def __str__(self):
return "%s_%s" % (self.nw_graph_edge)
def get_channel(self):
"""Returns the channel of the link in the network graph that corresponds
to this vertex.
"""
return int(self.conflict_graph.network_graph.get_edge_value(self.nw_graph_edge))
def set_channel(self, channel):
"""Sets the channel in the network graph and computes the resulting
conflict graph.
"""
# update network graph
self.conflict_graph.network_graph.set_edge_value(self.nw_graph_edge,
str(channel))
# update conflict graph
# NOTE the change: We do not have to recalculate ALL edges, just the onces
# adjacent to the changed one are enough! gives us O(n) instead of O(n*n)
self.conflict_graph.update_edge(self)
# self.conflict_graph.update_edges()
def get_nw_graph_neighbor(self, node_name):
"""Returns the neigbor in the network graph corresponding to the link.
"""
if node_name not in self.nw_graph_edge:
return None
if self.nw_graph_edge[0] == node_name:
return self.nw_graph_edge[1]
else:
return self.nw_graph_edge[0]
class ConflictGraph(Graph):
def __init__(self, network_graph, interference_model):
# store the original network graph for later reference
self.network_graph = network_graph
self.interference_model = interference_model
vertices = set()
# each edge in the network graph corresponds to a vertex in the conflict
# graph
for edge in network_graph.get_edges().keys():
vertices.add(ConflictGraphVertex(self, edge))
# call constructor of the super-class with new vertex set
Graph.__init__(self, vertices)
# set edges according to interference model
self.update_edges()
def update_edges(self):
"""Updates all edges of the ConflictGraph regarding the current channel
assignment and the applied interference model.
"""
remaining_vertices = self.get_vertices()
for v1 in self.get_vertices():
for v2 in remaining_vertices:
# get edge value according to the interference model
value = self.interference_model.get_interference(self.network_graph,
v1.nw_graph_edge,
v2.nw_graph_edge)
self.set_edge_value((v1, v2), value, False)
# graph is undirected
remaining_vertices.remove(v1)
def update_edge(self, cg_vertex):
"""Updates all edges that are adjacent to the supplied cg_vertex.
"""
remaining_vertices = self.get_vertices()
for v2 in self.get_vertices():
# get edge value according to the interference model
value = self.interference_model.get_interference(self.network_graph,
cg_vertex.nw_graph_edge,
v2.nw_graph_edge)
self.set_edge_value((cg_vertex, v2), value, False)
def get_vertices_for_node(self, node_name):
"""Returns a set containing all vertices that correspond to links that
are incident to the given node.
"""
vertices = set()
for vertex in self.get_vertices():
if vertex.nw_graph_edge[0] == node_name or \
vertex.nw_graph_edge[1] == node_name:
vertices.add(vertex)
return vertices
def get_vertex(self, node1, node2):
"""Returns the vertex that corresponds to the link between the two given
node names, or None, if such vertex does not exist.
"""
for vertex in self.get_vertices():
if (vertex.nw_graph_edge[0] == node1 and \
vertex.nw_graph_edge[1] == node2) or \
(vertex.nw_graph_edge[0] == node2 and \
vertex.nw_graph_edge[1] == node1):
return vertex
return None
def get_interference_sum(self):
"""Returns the overall interference which is calculated by summing up
all edge values.
"""
sum = 0
for value in self.get_edges().values():
sum += value
return sum
def get_vertex_names(self):
vertex_names = set()
for vertex in self.get_vertices():
vertex_names.add(str(vertex))
return vertex_names
def update(self, network_graph):
old_edges = self.network_graph.get_edges()
new_edges = network_graph.get_edges()
# do nothing if graphs are equal
if new_edges == old_edges:
return
old_edges_keys = set(old_edges.keys())
new_edges_keys = set(new_edges.keys())
# assign new network graph
self.network_graph = network_graph
# update conflict graph
for new_edge in new_edges_keys - old_edges_keys:
# create a new conflict graph vertex for each new network graph edge
self.add_vertex(ConflictGraphVertex(self, new_edge))
for obsolete_edge in old_edges_keys - new_edges_keys:
# remove conflict graph vertices for obsolete edges
self.remove_vertex(self.get_vertex(obsolete_edge[0],
obsolete_edge[1]))
self.update_edges()
# this only runs if the module was *not* imported
if __name__ == '__main__':
g = Graph(["a", "b", "c", "d", "e"])
g.set_edge_value(("a", "b"), 40)
g.set_edge_value(("b", "c"), 40)
g.set_edge_value(("c", "d"), 40)
g.set_edge_value(("d", "e"), 40)
print g.get_adjacency_matrix()
|
gpl-3.0
| 8,558,417,932,781,254,000 | 33.705545 | 91 | 0.557545 | false |
ToraxXx/pytocl
|
tests/converttests.py
|
1
|
8141
|
import unittest
from pytocl import *
"""Tests for Parameters"""
def one_dim():
i = get_global_id(0)
def two_dim():
i = get_global_id(0)
j = get_global_id(1)
def three_dim():
i = get_global_id(0)
j = get_global_id(1)
k = get_global_id(2)
def array_params(float_array, int_array):
i = get_global_id(0)
def scalar_params(float, int):
i = get_global_id(0)
def writable_params(float, int):
i = get_global_id(0)
class TestParameters(unittest.TestCase):
def test_one_dim(self):
kernel = func_to_kernel(CLFuncDesc(one_dim, (1,)))
expected_header = "kernel void one_dim()"
self.assertIn(expected_header, kernel)
expected_one_dim = "int i=get_global_id(0);"
self.assertIn(expected_one_dim, kernel)
def test_two_dim(self):
kernel = func_to_kernel(CLFuncDesc(two_dim, (1,1)))
expected_header = "kernel void two_dim()"
self.assertIn(expected_header, kernel)
expected_one_dim = "int i=get_global_id(0);"
self.assertIn(expected_one_dim, kernel)
expected_two_dim = "int j=get_global_id(1);"
self.assertIn(expected_two_dim, kernel)
def test_three_dim(self):
kernel = func_to_kernel(CLFuncDesc(three_dim, (1,1,1)))
expected_header = "kernel void three_dim()"
self.assertIn(expected_header, kernel)
expected_one_dim = "int i=get_global_id(0);"
self.assertIn(expected_one_dim, kernel)
expected_two_dim = "int j=get_global_id(1);"
self.assertIn(expected_two_dim, kernel)
expected_two_dim = "int k=get_global_id(2);"
self.assertIn(expected_two_dim, kernel)
def test_array(self):
kernel = func_to_kernel(CLFuncDesc(array_params, (1,)).arg(CLArgDesc(CLArgType.float32_array, 100)).arg(CLArgDesc(CLArgType.int32_array, 10)))
expected_header = "kernel void array_params(const global float* float_array,const global int* int_array)"
self.assertIn(expected_header, kernel)
expected_one_dim = "int i=get_global_id(0);"
self.assertIn(expected_one_dim, kernel)
def test_scalar(self):
kernel = func_to_kernel(CLFuncDesc(scalar_params, (1,)).arg(CLArgDesc(CLArgType.float32, 100)).arg(CLArgDesc(CLArgType.int32, 10)))
expected_header = "kernel void scalar_params(const float float,const int int)"
self.assertIn(expected_header, kernel)
expected_one_dim = "int i=get_global_id(0);"
self.assertIn(expected_one_dim, kernel)
def test_writable(self):
kernel = func_to_kernel(CLFuncDesc(writable_params, (1,)).arg(CLArgDesc(CLArgType.float32_array, 100), False).arg(CLArgDesc(CLArgType.int32_array, 10), False))
expected_header = "kernel void writable_params(global float* float,global int* int)"
self.assertIn(expected_header, kernel)
expected_one_dim = "int i=get_global_id(0);"
self.assertIn(expected_one_dim, kernel)
"""Tests for Literals"""
def num_literal():
i = 5
f = 3.4
f_dec_pre = .4
f_dec_post = 4.
def name_constant_literal():
b_true = True
b_false = False
# TODO: Test None
class TestLiterals(unittest.TestCase):
def test_num(self):
kernel = func_to_kernel(CLFuncDesc(num_literal, (1,)))
expected_int = "i=5"
self.assertIn(expected_int, kernel)
expected_float = "f=3.4f"
self.assertIn(expected_float, kernel)
expected_float_dec_pre = "f_dec_pre=0.4f"
self.assertIn(expected_float_dec_pre, kernel)
expected_float_dec_post = "f_dec_post=4.0f"
self.assertIn(expected_float_dec_post, kernel)
def test_name_constant(self):
kernel = func_to_kernel(CLFuncDesc(name_constant_literal, (1,)))
expected_true = "b_true=true;"
self.assertIn(expected_true, kernel)
expected_false = "b_false=false;"
self.assertIn(expected_false, kernel)
"""Tests for Comparisons"""
def comparisons():
a = 3
b = 4
b_is_greater = a > b
b_is_equal = a == b
b_is_less = a < b
b_is_not_equal = a != b
class TestComparisons(unittest.TestCase):
def test_comparisons(self):
kernel = func_to_kernel(CLFuncDesc(comparisons, (1,)))
expected_greater = "bool b_is_greater=(a>b);"
self.assertIn(expected_greater, kernel)
expected_is_equal = "bool b_is_equal=(a==b);"
self.assertIn(expected_is_equal, kernel)
expected_is_less = "bool b_is_less=(a<b);"
self.assertIn(expected_is_less, kernel)
expected_is_not_equal = "bool b_is_not_equal=(a!=b);"
self.assertIn(expected_is_not_equal, kernel)
"""Tests for For loops"""
def for_loop_one_arg():
for i in range(10):
pass
def for_loop_two_arg():
for i in range(10, 20):
pass
def for_loop_three_arg():
for i in range(10, 20, 2):
pass
class TestForLoop(unittest.TestCase):
def test_one_arg(self):
kernel = func_to_kernel(CLFuncDesc(for_loop_one_arg, (1,)))
expected_for = "for(int i=0;i<10;i++)"
self.assertIn(expected_for, kernel)
def test_two_arg(self):
kernel = func_to_kernel(CLFuncDesc(for_loop_two_arg, (1,)))
expected_for = "for(int i=10;i<20;i++)"
self.assertIn(expected_for, kernel)
def test_three_arg(self):
kernel = func_to_kernel(CLFuncDesc(for_loop_three_arg, (1,)))
expected_for = "for(int i=10;i<20;i+=2)"
self.assertIn(expected_for, kernel)
"""Tests for While loops"""
def while_loop(dim):
while True:
pass
class TestWhileLoop(unittest.TestCase):
def test_while_loop(self):
kernel = func_to_kernel(CLFuncDesc(while_loop, (1,)))
expected_while = "while(true)"
self.assertIn(expected_while, kernel)
"""Tests for If statements"""
def if_statement():
if True:
pass
def if_else_statement():
if True:
pass
else:
pass
def if_comparison():
i = get_global_id(0)
if i > 4:
pass
class TestIfStatement(unittest.TestCase):
def test_if_statement(self):
kernel = func_to_kernel(CLFuncDesc(if_statement, (1,)))
expected_if = "if(true)"
self.assertIn(expected_if, kernel)
def test_if_else_statement(self):
kernel = func_to_kernel(CLFuncDesc(if_else_statement, (1,)))
expected_if = "if(true)"
self.assertIn(expected_if, kernel)
expected_else = "else"
self.assertIn(expected_else, kernel)
def test_if_comparison(self):
kernel = func_to_kernel(CLFuncDesc(if_comparison, (1,)))
expected_if = "if((i>4))"
self.assertIn(expected_if, kernel)
""" Tests for OpenCL barriers """
def test_local_barrier():
cl_call("barrier", cl_inline("CLK_LOCAL_MEM_FENCE"))
def test_global_barrier():
cl_call("barrier", cl_inline("CLK_GLOBAL_MEM_FENCE"))
def test_local_and_global_barrier():
cl_call("barrier", cl_inline("CLK_LOCAL_MEM_FENCE|CLK_GLOBAL_MEM_FENCE"))
class TestBarrier(unittest.TestCase):
def test_local_barrier(self):
kernel = func_to_kernel(CLFuncDesc(test_local_barrier, (1,)))
expected = "barrier(CLK_LOCAL_MEM_FENCE);"
self.assertIn(expected, kernel)
def test_global_barrier(self):
kernel = func_to_kernel(CLFuncDesc(test_global_barrier, (1,)))
expected = "barrier(CLK_GLOBAL_MEM_FENCE);"
self.assertIn(expected, kernel)
def test_local_and_global_barrier(self):
kernel = func_to_kernel(CLFuncDesc(test_local_and_global_barrier, (1,)))
expected = "barrier(CLK_LOCAL_MEM_FENCE|CLK_GLOBAL_MEM_FENCE);"
self.assertIn(expected, kernel)
""" Tests for miscalleneous things """
class SomeClass:
def class_func():
pass
def return_func():
return
class TestMisc(unittest.TestCase):
def test_class_func(self):
kernel = func_to_kernel(CLFuncDesc(SomeClass.class_func, (1,)))
def test_return_func(self):
kernel = func_to_kernel(CLFuncDesc(return_func, (1,)))
expected_return = "return;"
self.assertIn(expected_return, kernel)
if __name__ == "__main__":
unittest.main()
|
mit
| 1,394,027,190,289,902,600 | 31.177866 | 167 | 0.628178 | false |
kyleconroy/hn-comments
|
fabfile.py
|
1
|
10359
|
from fabric.api import *
from lxml import html
import datetime
import urllib
import time
import datetime
import re
import os
import json
import csv
DOWNLOAD_DATE = datetime.datetime(2012, 1, 28)
def story_id(url):
_, story = url.split("id=")
return story
def save_story(url, directory):
story_url = "http://news.ycombinator.com"
story_url += url.split("http://news.ycombinator.com")[1]
story = story_id(url)
created = False
filename = os.path.join(directory, "{}.html".format(story))
if not os.path.exists(filename):
created = True
urllib.urlretrieve(story_url, filename)
return filename, created
def parse_stories(frontpage_path):
try:
parsed = html.fromstring(open(frontpage_path).read())
except:
return []
pattern = re.compile("http:\/\/web\.archive\.org\/web\/\d+\/"
"http:\/\/news\.ycombinator\.com\/(item|comments)\?id=(\d+)")
urls = []
for link in parsed.cssselect("td.subtext a"):
href = link.attrib["href"]
if pattern.match(href) and "id=363" not in href:
urls.append(href.replace("comments", "item"))
return urls
def transform_frontpages():
if not os.path.isdir("data"):
os.makedirs("data")
if os.path.exists("data/frontpages.json"):
puts("Already created frontpages.json, stopping")
return
hn_dir = os.path.expanduser("~/clocktower/news.ycombinator.com")
frontpages = []
for frontpage in os.listdir(hn_dir):
filename, ext = os.path.splitext(frontpage)
ts = datetime.datetime.fromtimestamp(int(filename))
readable = ts.strftime('%Y-%m-%d %H:%M:%S')
puts("Transforming frontpage on {}".format(readable))
urls = parse_stories(os.path.join(hn_dir, frontpage))
stories = [story_id(url) for url in urls]
if not stories:
continue
frontpages.append({
"timestamp": int(filename),
"stories": stories,
})
json.dump(frontpages, open("data/frontpages.json", "w"))
def parse_comment(td):
comment = {
"id": None,
"author": None,
"url": None,
"body": None,
"score": 1,
"timestamp": None,
}
try:
match = re.search("(\d+) days ago", td.text_content())
if match:
days_ago = int(match.group(1))
submitted = DOWNLOAD_DATE - datetime.timedelta(days=days_ago)
comment["timestamp"] = time.mktime(submitted.timetuple())
except IndexError:
pass
try:
fragment = td.cssselect("span.comhead a")[1].attrib["href"]
comment["id"] = int(fragment.split("id=")[1])
comment["link"] = "http://news.ycombinator.com/" + fragment
except IndexError:
pass
try:
color = td.cssselect("span.comment font")[0].attrib["color"]
worst = int("0xe6e6e6", 0)
comment["score"] = 1 - int(color.replace("#", "0x"), 0) / float(worst)
except IndexError:
pass
try:
comment["author"] = td.cssselect("span.comhead a")[0].text_content()
except IndexError:
pass
try:
comment["body"] = td.cssselect("span.comment")[0].text_content()
except IndexError:
pass
return comment
def parse_comments(parsed):
return [parse_comment(td) for td in parsed.cssselect("td.default")]
def parse_story(parsed):
story = {
"title": None,
"url": None,
"dead": False,
"points": 0,
"submitter": None,
"timestamp": None,
"comments": parse_comments(parsed),
}
try:
title = parsed.cssselect("td.title")[0].text_content()
if title == "[deleted]" or title == "[dead]":
story["dead"] = True
return story
except IndexError:
pass
try:
link = parsed.cssselect("td.title a")[0]
story["title"] = link.text_content()
story["url"] = link.attrib["href"]
except IndexError:
pass
try:
span = parsed.cssselect("td.subtext span")[0]
story["points"] = int(span.text_content().replace(" points", ""))
except IndexError:
pass
try:
td = parsed.cssselect("td.subtext")[0]
match = re.search("(\d+) days ago", td.text_content())
if match:
days_ago = int(match.group(1))
submitted = DOWNLOAD_DATE - datetime.timedelta(days=days_ago)
story["timestamp"] = time.mktime(submitted.timetuple())
except IndexError:
pass
try:
link = parsed.cssselect("td.subtext a")[0]
story["submitter"] = link.text_content()
except IndexError:
pass
return story
def transform_stories():
if not os.path.isdir("data/stories"):
os.makedirs("data/stories")
for story in os.listdir("comments/raw"):
if story.startswith("."):
continue # Damn you .DS_Store
story_id, ext = os.path.splitext(story)
story_path = os.path.join("comments/raw", story)
#puts("Parsing story {}".format(story))
json_path = "data/stories/{}.json".format(story_id)
if os.path.exists(json_path):
puts("Already created {}".format(json_path))
continue
try:
parsed = html.fromstring(open(story_path).read())
except:
continue # Couldn't parse the html
story = parse_story(parsed)
story["id"] = int(story_id)
json.dump(story, open("data/stories/{}.json".format(story_id), "w"))
puts("Created {}".format(json_path))
@task
def transform():
transform_frontpages()
transform_stories()
def analyze_comment_length():
if not os.path.isdir("data/graphs"):
os.makedirs("data/graphs")
puts("Generating comment length data")
writer = csv.writer(open("data/graphs/number_comments.csv", "w"))
for story_file in os.listdir("data/stories"):
if story_file.startswith("."):
continue # Damn you .DS_Store
story = json.load(open(os.path.join("data/stories", story_file)))
if story["timestamp"]:
writer.writerow([story["timestamp"], len(story["comments"])])
def analyze_story_points():
if not os.path.isdir("data/graphs"):
os.makedirs("data/graphs")
puts("Generating stories points data")
writer = csv.writer(open("data/graphs/story_points.csv", "w"))
for story_file in os.listdir("data/stories"):
if story_file.startswith("."):
continue # Damn you .DS_Store
story = json.load(open(os.path.join("data/stories", story_file)))
writer.writerow([story["timestamp"], story["points"]])
def analyze_comment_score_versus_length():
if not os.path.isdir("data/graphs"):
os.makedirs("data/graphs")
puts("Generating comment score versus length data")
writer = csv.writer(open("data/graphs/comment_length_vs_score.csv", "w"))
for story_file in os.listdir("data/stories"):
if story_file.startswith("."):
continue # Damn you .DS_Store
story = json.load(open(os.path.join("data/stories", story_file)))
for comment in story["comments"]:
if comment["body"]:
writer.writerow([len(comment["body"]), comment["score"]])
def analyze_comment_case():
scores = []
lowercase = []
for story_file in os.listdir("data/stories"):
if story_file.startswith("."):
continue # Damn you .DS_Store
story = json.load(open(os.path.join("data/stories", story_file)))
for comment in story["comments"]:
scores.append(comment["score"])
if comment.get("body", None) and comment["body"] == comment["body"].lower():
lowercase.append(comment["score"])
all_avg = sum(scores) / float(len(scores))
puts("Total number of all comments: {}".format(len(scores)))
puts("Average score for all comments: {}".format(all_avg))
lowercase_avg = sum(lowercase) / float(len(lowercase))
puts("Total number of lowercase comments: {}".format(len(lowercase)))
puts("Average score for lowercase comments: {}".format(lowercase_avg))
def analyze_worst_comments():
scores = []
for story_file in os.listdir("data/stories"):
if story_file.startswith("."):
continue # Damn you .DS_Store
story = json.load(open(os.path.join("data/stories", story_file)))
for comment in story["comments"]:
scores.append((comment["score"], comment))
scores.sort()
for score, body in scores[:10]:
print score
print comment
print
def analyze_comment_numbers():
if not os.path.isdir("data/graphs"):
os.makedirs("data/graphs")
puts("Generating comment totals data")
writer = csv.writer(open("data/graphs/comment_length.csv", "w"))
for story_file in os.listdir("data/stories"):
if story_file.startswith("."):
continue # Damn you .DS_Store
story = json.load(open(os.path.join("data/stories", story_file)))
for comment in story["comments"]:
if comment["timestamp"] and comment["body"]:
writer.writerow([comment["timestamp"], len(comment["body"])])
@task
def report():
#analyze_comment_case()
analyze_worst_comments()
@task
def analyze():
analyze_comment_length()
analyze_comment_numbers()
analyze_story_points()
analyze_comment_score_versus_length()
@task
def download():
if not os.path.isdir("comments/raw"):
os.makedirs("comments/raw")
hn_dir = os.path.expanduser("~/clocktower/news.ycombinator.com")
for frontpage in os.listdir(hn_dir):
filename, ext = os.path.splitext(frontpage)
ts = datetime.datetime.fromtimestamp(int(filename))
readable = ts.strftime('%Y-%m-%d %H:%M:%S')
puts("Parsing frontpage on {}".format(readable))
stories = parse_stories(os.path.join(hn_dir, frontpage))
for url in set(stories):
filename, created = save_story(url, "comments/raw")
if created:
puts("Saved {} at {}".format(filename, time.time()))
time.sleep(1)
else:
puts("Already saved {}".format(filename))
|
mit
| 9,100,840,845,654,076,000 | 27.380822 | 88 | 0.58915 | false |
jackromo/mathLibPy
|
mathlibpy/functions/polynomial.py
|
1
|
3304
|
"""
Polynomial class.
Author: Jack Romo <[email protected]>
"""
import function
class Polynomial(function.Function):
"""
Generic dense one-variable polynomial.
"""
def __init__(self, coeffs):
"""
Args:
coeffs (list): List of all coefficients, indexed by degree. ie. coeffs[n] = nth degree coefficient.
"""
self.coeffs = coeffs
@property
def coeffs(self):
return self._coeffs
@property
def degree(self):
return len(self.coeffs) - 1
@coeffs.setter
def coeffs(self, c):
if not isinstance(c, list):
raise TypeError("must provide list as arg")
elif len(c) == 0:
raise ValueError("arg length must be > 0")
else:
for i, d in enumerate(list(reversed(c))):
if d != 0:
self._coeffs = c[:len(c) - i]
break
def _evaluate(self, x):
return sum(self[i]*(x**i) for i in range(self.degree+1))
def __str__(self):
"""
Returns:
str: String of following form: '#x^0 +- #x^1 +- ... +- #x^n'
"""
result = ""
for i, c in enumerate(self.coeffs):
if i == 0:
result += str(c)
elif c == 0:
continue
elif c < 0:
result += " - {0}x^{1}".format(-c, i)
else:
result += " + {0}x^{1}".format(c, i)
return result
def __eq__(self, other):
if not isinstance(other, Polynomial):
return False
elif self.degree != other.degree:
return False
else:
return all(self[i] == other[i] for i in range(self.degree))
def __getitem__(self, item):
"""
Gets nth coefficient of polynomial.
Args:
item (int): Degree of the term whose coefficient will be retrieved.
Returns:
number: Coefficient of term with degree equal to provided index.
"""
if not isinstance(item, int):
raise TypeError("Tried to index with a non-integer")
elif item < 0:
raise ValueError("Tried to index term of exponent < 0")
elif item > self.degree:
return 0 # Polynomial has infinite terms, but ones > degree have coefficient 0
else:
return self.coeffs[item]
def __setitem__(self, key, value):
"""
Resets nth coefficient of polynomial to prescribed value.
Args:
key (int): Degree of the term whose coefficient will be altered.
value (number): New coefficient value.
"""
if not isinstance(key, int):
raise TypeError("Tried to index with a non-integer")
elif key < 0:
raise ValueError("Tried to index term of exponent < 0")
elif key > self.degree:
self.coeffs.extend([0 for _ in range(key - self.degree - 1)] + [value])
# Degree automatically updated by coeffs setter
else:
self.coeffs[key] = value
def get_derivative(self):
if self.degree == 0:
return function.Constant(0)
else:
return Polynomial([self.coeffs[i]*i for i in range(1, self.degree+1)])
|
mit
| -5,076,760,681,048,782,000 | 28.5 | 111 | 0.524213 | false |
heuer/segno
|
tests/test_utils_iterverbose.py
|
1
|
12108
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Tests against the ``utils.matrix_iter_verboses`` function.
"""
from __future__ import absolute_import, unicode_literals, print_function
import io
import os
import pytest
from segno import encoder, utils, consts
def read_matrix(name):
"""\
Helper function to read a matrix from /ref_matrix. The file extension .txt
is added automatically.
:return: A tuple of bytearrays
"""
matrix = []
with io.open(os.path.join(os.path.dirname(__file__),
'feature_decompose/{0}.txt'.format(name)), 'rt') as f:
for row in f:
matrix.append(bytearray([int(i) for i in row if i != '\n']))
return matrix
def test_finder_pattern_dark_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_FINDER_PATTERN_DARK] for v in row]))
expected = read_matrix('v1-finder-dark')
assert expected == res
def test_finder_pattern_dark_light_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_FINDER_PATTERN_LIGHT] for v in row]))
expected = read_matrix('v1-finder-light')
assert expected == res
def test_finder_pattern_dark_and_light_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v in (consts.TYPE_FINDER_PATTERN_DARK,
consts.TYPE_FINDER_PATTERN_LIGHT)] for v in row])) # noqa: E501
expected = read_matrix('v1-finder-dark-and-light')
assert expected == res
def test_finder_pattern_dark_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_FINDER_PATTERN_DARK] for v in row]))
expected = read_matrix('m2-finder-dark')
assert expected == res
def test_finder_pattern_dark_light_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_FINDER_PATTERN_LIGHT] for v in row]))
expected = read_matrix('m2-finder-light')
assert expected == res
def test_finder_pattern_dark_and_light_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v in (consts.TYPE_FINDER_PATTERN_DARK,
consts.TYPE_FINDER_PATTERN_LIGHT)] for v in row])) # noqa: E501
expected = read_matrix('m2-finder-dark-and-light')
assert expected == res
def test_separator_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_SEPARATOR] for v in row]))
expected = read_matrix('v1-separator')
assert expected == res
def test_separator_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_SEPARATOR] for v in row]))
expected = read_matrix('m2-separator')
assert expected == res
def test_darkmodule_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_DARKMODULE] for v in row]))
expected = read_matrix('v1-darkmodule')
assert expected == res
def test_no_darkmodule_mqr():
# Micro QR Codes don't have a dark module.
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.extend([v == consts.TYPE_DARKMODULE for v in row])
assert True not in res
def test_timing_dark_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_TIMING_DARK] for v in row]))
expected = read_matrix('v1-timing-dark')
assert expected == res
def test_timing_light_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_TIMING_LIGHT] for v in row]))
expected = read_matrix('v1-timing-light')
assert expected == res
def test_timing_dark_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_TIMING_DARK] for v in row]))
expected = read_matrix('m2-timing-dark')
assert expected == res
def test_timing_light_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_TIMING_LIGHT] for v in row]))
expected = read_matrix('m2-timing-light')
assert expected == res
def test_timing_dark_and_light_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v in (consts.TYPE_TIMING_DARK,
consts.TYPE_TIMING_LIGHT)] for v in row]))
expected = read_matrix('m2-timing-dark-and-light')
assert expected == res
def test_alignment_dark():
qr = encoder.encode('A', version=12)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_ALIGNMENT_PATTERN_DARK] for v in row]))
expected = read_matrix('v12-alignment-dark')
assert expected == res
def test_alignment_light():
qr = encoder.encode('A', version=12)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_ALIGNMENT_PATTERN_LIGHT] for v in row]))
expected = read_matrix('v12-alignment-light')
assert expected == res
def test_alignment_dark_and_light():
qr = encoder.encode('A', version=12)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v in (consts.TYPE_ALIGNMENT_PATTERN_LIGHT,
consts.TYPE_ALIGNMENT_PATTERN_DARK)] for v in row])) # noqa: E501
expected = read_matrix('v12-alignment-dark-and-light')
assert expected == res
def test_version_dark():
qr = encoder.encode('A', version=7)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_VERSION_DARK] for v in row]))
expected = read_matrix('v7-version-dark')
assert expected == res
def test_version_light():
qr = encoder.encode('A', version=7)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_VERSION_LIGHT] for v in row]))
expected = read_matrix('v7-version-light')
assert expected == res
def test_version_dark_and_light():
qr = encoder.encode('A', version=7)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v in (consts.TYPE_VERSION_LIGHT,
consts.TYPE_VERSION_DARK)] for v in row])) # noqa: E501
expected = read_matrix('v7-version-dark-and-light')
assert expected == res
def test_version_no_version():
# The version information is not available in QR Codes < 7
qr = encoder.encode('A', version=6)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.extend([v in (consts.TYPE_VERSION_LIGHT, consts.TYPE_VERSION_DARK) for v in row])
assert True not in res
def test_format_dark_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v == consts.TYPE_FORMAT_DARK] for v in row]))
expected = read_matrix('v1-format-dark')
assert expected == res
def test_format_light_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_FORMAT_LIGHT] for v in row]))
expected = read_matrix('v1-format-light')
assert expected == res
def test_format_dark_and_light_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=0):
res.append(bytearray([(0x2, 0x1)[v in (consts.TYPE_FORMAT_DARK,
consts.TYPE_FORMAT_LIGHT)] for v in row]))
expected = read_matrix('v1-format-dark-and-light')
assert expected == res
def test_quietzone_default_qr():
qr = encoder.encode('A', micro=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_QUIET_ZONE] for v in row]))
expected = read_matrix('v1-quietzone-4')
assert expected == res
def test_quietzone_custom_qr():
qr = encoder.encode('A', micro=False)
border = 1
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=border):
res.append(bytearray([(0x2, 0x0)[v == consts.TYPE_QUIET_ZONE] for v in row]))
expected = read_matrix('v1-quietzone-1')
assert expected == res
def test_quietzone_default_mqr():
qr = encoder.encode('A', micro=True)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version):
res.append(bytearray([(0x1, 0x0)[v == consts.TYPE_QUIET_ZONE] for v in row]))
expected = read_matrix('m2-quietzone-2')
assert expected == res
def test_quietzone_custom_mqr():
qr = encoder.encode('A', micro=True)
border = 5
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version, border=border):
res.append(bytearray([(0x1, 0x0)[v == consts.TYPE_QUIET_ZONE] for v in row]))
expected = read_matrix('m2-quietzone-5')
assert expected == res
def test_convert_to_boolean_true():
qr = encoder.encode('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ',
error='m', mask=4, boost_error=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version):
res.append(bytearray([int(v >> 8 > 0) for v in row]))
expected = read_matrix('iso-fig-29')
assert expected == res
def test_convert_to_boolean_false():
qr = encoder.encode('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ',
error='m', mask=4, boost_error=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version):
res.append(bytearray([not int(v >> 8 == 0) for v in row]))
expected = read_matrix('iso-fig-29')
assert expected == res
def test_convert_to_boolean():
qr = encoder.encode('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ',
error='m', mask=4, boost_error=False)
res = []
for row in utils.matrix_iter_verbose(qr.matrix, qr.version):
res.append(bytearray([bool(v >> 8) for v in row]))
expected = read_matrix('iso-fig-29')
assert expected == res
if __name__ == '__main__':
pytest.main([__file__])
|
bsd-3-clause
| -163,168,455,473,727,740 | 35.36036 | 113 | 0.632144 | false |
felixmatt/shyft
|
shyft/tests/api/test_calibration_types.py
|
1
|
17544
|
import math
import numpy as np
import unittest
from shyft import api
from shyft.api import pt_gs_k
from shyft.api import pt_hs_k
from shyft.api import pt_ss_k
from shyft.api import hbv_stack
from shyft.api import pt_hps_k
class ShyftApi(unittest.TestCase):
"""
Verify basic SHyFT api calibration related functions and structures
"""
def verify_parameter_for_calibration(self, param, expected_size, valid_names, test_dict=None):
min_p_value = -1e+10
max_p_value = +1e+10
test_dict = test_dict or dict()
self.assertEqual(expected_size, param.size(), "expected parameter size changed")
pv = api.DoubleVector([param.get(i) for i in range(param.size())])
for i in range(param.size()):
v = param.get(i)
self.assertTrue(v > min_p_value and v < max_p_value)
if i not in test_dict:
pv[i] = v * 1.01
param.set(pv) # set the complete vector, only used during C++ calibration, but we verify it here
x = param.get(i)
self.assertAlmostEqual(v * 1.01, x, 3, "Expect new value when setting value")
else:
pv[i] = test_dict[i]
param.set(pv)
x = param.get(i)
self.assertAlmostEqual(x, test_dict[i], 1, "Expect new value when setting value")
p_name = param.get_name(i)
self.assertTrue(len(p_name) > 0, "parameter name should exist")
self.assertEqual(valid_names[i], p_name)
def test_pt_hs_k_param(self):
pthsk_size = 17
pthsk = pt_hs_k.PTHSKParameter()
self.assertIsNotNone(pthsk)
self.assertEqual(pthsk.size(), pthsk_size)
pthsk.hs.lw = 0.23
self.assertAlmostEqual(pthsk.hs.lw, 0.23)
snow = api.HbvSnowParameter(
tx=0.2) # ordered .. keyword does work now! TODO: verify if we can have boost provide real kwargs
self.assertIsNotNone(snow)
snow.lw = 0.2
self.assertAlmostEqual(snow.lw, 0.2)
valid_names = [
"kirchner.c1",
"kirchner.c2",
"kirchner.c3",
"ae.ae_scale_factor",
"hs.lw",
"hs.tx",
"hs.cx",
"hs.ts",
"hs.cfr",
"gm.dtf",
"p_corr.scale_factor",
"pt.albedo",
"pt.alpha",
"routing.velocity",
"routing.alpha",
"routing.beta",
"gm.direct_response"
]
self.verify_parameter_for_calibration(pthsk, pthsk_size, valid_names)
def test_hbv_stack_param(self):
hbv_size = 21
hbv = hbv_stack.HbvParameter()
self.assertIsNotNone(hbv)
self.assertEqual(hbv.size(), hbv_size)
valid_names = [
"soil.fc",
"soil.beta",
"ae.lp",
"tank.uz1",
"tank.kuz2",
"tank.kuz1",
"tank.perc",
"tank.klz",
"hs.lw",
"hs.tx",
"hs.cx",
"hs.ts",
"hs.cfr",
"p_corr.scale_factor",
"pt.albedo",
"pt.alpha",
"gm.dtf",
"routing.velocity",
"routing.alpha",
"routing.beta",
"gm.direct_response"
]
self.verify_parameter_for_calibration(hbv, hbv_size, valid_names)
def test_pt_gs_k_param(self):
ptgsk_size = 30
valid_names = [
"kirchner.c1",
"kirchner.c2",
"kirchner.c3",
"ae.ae_scale_factor",
"gs.tx",
"gs.wind_scale",
"gs.max_water",
"gs.wind_const",
"gs.fast_albedo_decay_rate",
"gs.slow_albedo_decay_rate",
"gs.surface_magnitude",
"gs.max_albedo",
"gs.min_albedo",
"gs.snowfall_reset_depth",
"gs.snow_cv",
"gs.glacier_albedo",
"p_corr.scale_factor",
"gs.snow_cv_forest_factor",
"gs.snow_cv_altitude_factor",
"pt.albedo",
"pt.alpha",
"gs.initial_bare_ground_fraction",
"gs.winter_end_day_of_year",
"gs.calculate_iso_pot_energy",
"gm.dtf",
"routing.velocity",
"routing.alpha",
"routing.beta",
"gs.n_winter_days",
"gm.direct_response"
]
p = pt_gs_k.PTGSKParameter()
special_values = {22: 130, 28: 221}
self.verify_parameter_for_calibration(p, ptgsk_size, valid_names,special_values)
# special verification of bool parameter
p.gs.calculate_iso_pot_energy = True
self.assertTrue(p.gs.calculate_iso_pot_energy)
self.assertAlmostEqual(p.get(23), 1.0, 0.00001)
p.gs.calculate_iso_pot_energy = False
self.assertFalse(p.gs.calculate_iso_pot_energy)
self.assertAlmostEqual(p.get(23), 0.0, 0.00001)
pv = api.DoubleVector.from_numpy([p.get(i) for i in range(p.size())])
pv[23] = 1.0
p.set(pv)
self.assertTrue(p.gs.calculate_iso_pot_energy)
pv[23] = 0.0
p.set(pv)
self.assertFalse(p.gs.calculate_iso_pot_energy)
# checkout new parameters for routing
p.routing.velocity = 1 / 3600.0
p.routing.alpha = 1.1
p.routing.beta = 0.8
self.assertAlmostEqual(p.routing.velocity, 1 / 3600.0)
self.assertAlmostEqual(p.routing.alpha, 1.1)
self.assertAlmostEqual(p.routing.beta, 0.8)
utc = api.Calendar()
self.assertTrue(p.gs.is_snow_season(utc.time(2017, 1, 1)))
self.assertFalse(p.gs.is_snow_season(utc.time(2017, 8, 1)))
p.gs.n_winter_days = 100
self.assertFalse(p.gs.is_snow_season(utc.time(2017, 11, 31)))
self.assertTrue(p.gs.is_snow_season(utc.time(2017, 2, 1)))
def test_pt_hps_k_param(self):
ptgsk_size = 23
valid_names = [
"kirchner.c1",
"kirchner.c2",
"kirchner.c3",
"ae.ae_scale_factor",
"hps.lw",
"hps.tx",
"hps.cfr",
"hps.wind_scale",
"hps.wind_const",
"hps.surface_magnitude",
"hps.max_albedo",
"hps.min_albedo",
"hps.fast_albedo_decay_rate",
"hps.slow_albedo_decay_rate",
"hps.snowfall_reset_depth",
"hps.calculate_iso_pot_energy",
"gm.dtf",
"p_corr.scale_factor",
"pt.albedo",
"pt.alpha",
"routing.velocity",
"routing.alpha",
"routing.beta"
]
p = pt_hps_k.PTHPSKParameter()
self.verify_parameter_for_calibration(p, ptgsk_size, valid_names)
# special verification of bool parameter
#p.us.calculate_iso_pot_energy = True
#self.assertTrue(p.us.calculate_iso_pot_energy)
#self.assertAlmostEqual(p.get(23), 1.0, 0.00001)
#p.us.calculate_iso_pot_energy = False
#self.assertFalse(p.us.calculate_iso_pot_energy)
#self.assertAlmostEqual(p.get(23), 0.0, 0.00001)
pv = api.DoubleVector.from_numpy([p.get(i) for i in range(p.size())])
#pv[23] = 1.0
#p.set(pv)
#self.assertTrue(p.us.calculate_iso_pot_energy)
#pv[23] = 0.0;
p.set(pv)
#self.assertFalse(p.us.calculate_iso_pot_energy)
# checkout new parameters for routing
p.routing.velocity = 1 / 3600.0
p.routing.alpha = 1.1
p.routing.beta = 0.8
self.assertAlmostEqual(p.routing.velocity, 1 / 3600.0)
self.assertAlmostEqual(p.routing.alpha, 1.1)
self.assertAlmostEqual(p.routing.beta, 0.8)
def test_pt_ss_k_param(self):
ptssk_size = 20
valid_names = [
"kirchner.c1",
"kirchner.c2",
"kirchner.c3",
"ae.ae_scale_factor",
"ss.alpha_0",
"ss.d_range",
"ss.unit_size",
"ss.max_water_fraction",
"ss.tx",
"ss.cx",
"ss.ts",
"ss.cfr",
"p_corr.scale_factor",
"pt.albedo",
"pt.alpha",
"gm.dtf",
"routing.velocity",
"routing.alpha",
"routing.beta",
"gm.direct_response"
]
self.verify_parameter_for_calibration(pt_ss_k.PTSSKParameter(), ptssk_size, valid_names)
def _create_std_ptgsk_param(self):
ptp = api.PriestleyTaylorParameter(albedo=0.85, alpha=1.23)
ptp.albedo = 0.9
ptp.alpha = 1.26
aep = api.ActualEvapotranspirationParameter(ae_scale_factor=1.5)
aep.ae_scale_factor = 1.1
gsp = api.GammaSnowParameter(winter_end_day_of_year=99, initial_bare_ground_fraction=0.04, snow_cv=0.44,
tx=-0.3, wind_scale=1.9, wind_const=0.9, max_water=0.11, surface_magnitude=33.0,
max_albedo=0.88, min_albedo=0.55, fast_albedo_decay_rate=6.0,
slow_albedo_decay_rate=4.0, snowfall_reset_depth=6.1, glacier_albedo=0.44
) # TODO: This does not work due to boost.python template arity of 15, calculate_iso_pot_energy=False)
gsp.calculate_iso_pot_energy = False
gsp.snow_cv = 0.5
gsp.initial_bare_ground_fraction = 0.04
kp = api.KirchnerParameter(c1=-2.55, c2=0.8, c3=-0.01)
kp.c1 = 2.5
kp.c2 = -0.9
kp.c3 = 0.01
spcp = api.PrecipitationCorrectionParameter(scale_factor=0.9)
gm = api.GlacierMeltParameter(dtf=5.9) # verify we can construct glacier parameter
ptgsk_p = pt_gs_k.PTGSKParameter(ptp, gsp, aep, kp, spcp, gm) # passing optional gm parameter here
ptgsk_p.ae.ae_scale_factor = 1.2 # sih: just to demo ae scale_factor can be set directly
return ptgsk_p
def test_precipitation_correction_constructor(self):
spcp = api.PrecipitationCorrectionParameter(scale_factor=0.9)
self.assertAlmostEqual(0.9, spcp.scale_factor)
def test_create_ptgsk_param(self):
ptgsk_p = self._create_std_ptgsk_param()
copy_p = pt_gs_k.PTGSKParameter(ptgsk_p)
self.assertTrue(ptgsk_p != None, "should be possible to create a std param")
self.assertIsNotNone(copy_p)
def _create_std_geo_cell_data(self):
geo_point = api.GeoPoint(1, 2, 3)
ltf = api.LandTypeFractions()
ltf.set_fractions(0.2, 0.2, 0.1, 0.3)
geo_cell_data = api.GeoCellData(geo_point, 1000.0 ** 2, 0, 0.7, ltf)
geo_cell_data.radiation_slope_factor = 0.7
return geo_cell_data
def test_create_ptgsk_grid_cells(self):
geo_cell_data = self._create_std_geo_cell_data()
param = self._create_std_ptgsk_param()
cell_ts = [pt_gs_k.PTGSKCellAll, pt_gs_k.PTGSKCellOpt]
for cell_t in cell_ts:
c = cell_t()
c.geo = geo_cell_data
c.set_parameter(param)
m = c.mid_point()
self.assertTrue(m is not None)
c.set_state_collection(True)
def test_create_region_environment(self):
region_env = api.ARegionEnvironment()
temp_vector = api.TemperatureSourceVector()
region_env.temperature = temp_vector
self.assertTrue(region_env is not None)
def test_create_TargetSpecificationPts(self):
t = api.TargetSpecificationPts()
t.scale_factor = 1.0
t.calc_mode = api.NASH_SUTCLIFFE
t.calc_mode = api.KLING_GUPTA
t.calc_mode = api.ABS_DIFF
t.calc_mode = api.RMSE
t.s_r = 1.0 # KGEs scale-factors
t.s_a = 2.0
t.s_b = 3.0
self.assertIsNotNone(t.uid)
t.uid = 'test'
self.assertEqual(t.uid, 'test')
self.assertAlmostEqual(t.scale_factor, 1.0)
# create a ts with some points
cal = api.Calendar()
start = cal.time(2015, 1, 1, 0, 0, 0)
dt = api.deltahours(1)
tsf = api.TsFactory()
times = api.UtcTimeVector()
times.push_back(start + 1 * dt)
times.push_back(start + 3 * dt)
times.push_back(start + 4 * dt)
values = api.DoubleVector()
values.push_back(1.0)
values.push_back(3.0)
values.push_back(np.nan)
tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt), times, values)
# convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration
tst = api.TsTransform()
tsa = tst.to_average(start, dt, 24, tsp)
# tsa2 = tst.to_average(start,dt,24,tsp,False)
# tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan
# tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0
# stuff it into the target spec.
# also show how to specify snow-calibration
cids = api.IntVector([0, 2, 3])
t2 = api.TargetSpecificationPts(tsa, cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.SNOW_COVERED_AREA,
'test_uid')
self.assertEqual(t2.uid, 'test_uid')
t2.catchment_property = api.SNOW_WATER_EQUIVALENT
self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT)
t2.catchment_property = api.CELL_CHARGE
self.assertEqual(t2.catchment_property, api.CELL_CHARGE)
self.assertIsNotNone(t2.catchment_indexes)
for i in range(len(cids)):
self.assertEqual(cids[i], t2.catchment_indexes[i])
t.ts = api.TimeSeries(tsa) # target spec is now a regular TimeSeries
tv = api.TargetSpecificationVector()
tv[:] = [t, t2]
# now verify we got something ok
self.assertEqual(2, tv.size())
self.assertAlmostEqual(tv[0].ts.value(1), 1.5) # average value 0..1 ->0.5
self.assertAlmostEqual(tv[0].ts.value(2), 2.5) # average value 0..1 ->0.5
# self.assertAlmostEqual(tv[0].ts.value(3), 3.0) # original flat out at end, but now:
self.assertTrue(math.isnan(tv[0].ts.value(3))) # strictly linear between points.
# and that the target vector now have its own copy of ts
tsa.set(1, 3.0)
self.assertAlmostEqual(tv[0].ts.value(1), 1.5) # make sure the ts passed onto target spec, is a copy
self.assertAlmostEqual(tsa.value(1), 3.0) # and that we really did change the source
# Create a clone of target specification vector
tv2 = api.TargetSpecificationVector(tv)
self.assertEqual(2, tv2.size())
self.assertAlmostEqual(tv2[0].ts.value(1), 1.5) # average value 0..1 ->0.5
self.assertAlmostEqual(tv2[0].ts.value(2), 2.5) # average value 0..1 ->0.5
self.assertTrue(math.isnan(tv2[0].ts.value(3))) # average value 0..1 ->0.5
tv2[0].scale_factor = 10.0
self.assertAlmostEqual(tv[0].scale_factor, 1.0)
self.assertAlmostEqual(tv2[0].scale_factor, 10.0)
# test we can create from breakpoint time-series
ts_bp = api.TimeSeries(
api.TimeAxis(api.UtcTimeVector([0, 25, 20]), 30),
fill_value=2.0,
point_fx=api.POINT_AVERAGE_VALUE
)
tspec_bp = api.TargetSpecificationPts(
ts_bp,
cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.CELL_CHARGE,
'test_uid'
)
self.assertIsNotNone(tspec_bp)
def test_create_target_spec_from_std_time_series(self):
"""
Verify we can create target-spec giving ordinary ts,
and that passing a non-fixed time-axis raises exception
"""
cal = api.Calendar()
ta = api.TimeAxis(cal.time(2017, 1, 1), api.deltahours(1), 24)
ts = api.TimeSeries(ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
cids = api.IntVector([0, 2, 3])
t0 = api.TargetSpecificationPts(ts, cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.SNOW_COVERED_AREA,
'test_uid')
self.assertAlmostEqual(t0.ts.value(0), ts.value(0))
rid = 0
t1 = api.TargetSpecificationPts(ts, rid, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, 'test_uid')
self.assertAlmostEqual(t1.ts.value(0), ts.value(0))
tax = api.TimeAxis(api.UtcTimeVector.from_numpy(ta.time_points[:-1]), ta.total_period().end)
tsx = api.TimeSeries(tax, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
tx = api.TargetSpecificationPts(tsx, rid, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, 'test_uid')
self.assertIsNotNone(tx)
def test_IntVector(self):
v1 = api.IntVector() # empy
v2 = api.IntVector([i for i in range(10)]) # by list
v3 = api.IntVector([1, 2, 3]) # simple list
self.assertEqual(v2.size(), 10)
self.assertEqual(v1.size(), 0)
self.assertEqual(len(v3), 3)
def test_DoubleVector(self):
v1 = api.DoubleVector([i for i in range(10)]) # empy
v2 = api.DoubleVector.FromNdArray(np.arange(0, 10.0, 0.5))
v3 = api.DoubleVector(np.arange(0, 10.0, 0.5))
self.assertEqual(len(v1), 10)
self.assertEqual(len(v2), 20)
self.assertEqual(len(v3), 20)
self.assertAlmostEqual(v2[3], 1.5)
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
| 4,587,426,790,893,696,500 | 39.419355 | 141 | 0.563049 | false |
RichardOfWard/discharge
|
tests/test_0006_cmd.py
|
1
|
2276
|
import os
import subprocess
import urllib2
import time
import threading
class TestCmd(object):
source_path = os.path.join(os.path.dirname(__file__), 'sites/test_cmd')
def test_no_command(self):
p = subprocess.Popen(['discharge'],
cwd=self.source_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert p.wait() != 0
def test_bad_command(self):
p = subprocess.Popen(['discharge', 'nosuchcommand'],
cwd=self.source_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert p.wait() != 0
def test_build(self):
p = subprocess.Popen(['discharge', 'build'],
cwd=self.source_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print(out, err)
assert False, "discharge build failed"
assert os.path.exists(self.source_path + '/testfile.html')
def test_serve(self):
p = subprocess.Popen(['discharge', 'serve'],
cwd=self.source_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pt = ProcessThread(p)
pt.start()
time.sleep(1)
try:
urllib2.urlopen('http://127.0.0.1:8000/testfile.html')
except Exception as e:
if p.returncode is None:
try:
urllib2.urlopen('http://127.0.0.1:8000/!SHUTDOWN!')
except:
pass
pt.join()
print(pt.out)
print(pt.err)
raise e
else:
if p.returncode is None:
try:
urllib2.urlopen('http://127.0.0.1:8000/!SHUTDOWN!')
except:
pass
pt.join()
class ProcessThread(threading.Thread):
def __init__(self, p):
super(ProcessThread, self).__init__()
self.p = p
def run(self):
self.out, self.err = self.p.communicate()
|
bsd-3-clause
| 3,167,018,456,134,187,000 | 29.756757 | 75 | 0.476274 | false |
vladimiroff/humble-media
|
humblemedia/resources/models.py
|
1
|
3629
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from taggit.managers import TaggableManager
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from payments.models import Payment
class Attachment(models.Model):
file = models.FileField(upload_to='attachments')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.file.name
def document_(self):
return format(str(self.file).split('/')[-1])
class License(models.Model):
name = models.CharField(max_length=64)
text = models.TextField()
url = models.URLField(null=True, blank=True)
def __str__(self):
return self.name
class ResourceManager(models.Manager):
def __init__(self, resource_type):
super().__init__()
self.resource_type = resource_type
def get_queryset(self):
ct = ContentType.objects.get_for_model(self.model)
resource_ids = Attachment.objects.filter(
content_type=ct, previews__preview_type=self.resource_type
).values_list('object_id', flat=True).distinct()
return super().get_queryset().filter(id__in=resource_ids)
class Resource(models.Model):
title = models.CharField(max_length=64)
description = models.TextField()
author = models.ForeignKey('auth.User', related_name='resources')
min_price = models.PositiveIntegerField(default=1, blank=True)
is_published = models.BooleanField(default=False, blank=True)
is_verified = models.BooleanField(default=True, blank=True)
created_at = models.DateTimeField(default=timezone.now)
modified_at = models.DateTimeField(default=timezone.now)
causes = models.ManyToManyField('causes.Cause', related_name='resources')
license = models.ForeignKey('License', related_name='resources', null=True, blank=True)
objects = models.Manager()
audios = ResourceManager('audio')
videos = ResourceManager('video')
images = ResourceManager('image')
documents = ResourceManager('document')
others = ResourceManager('unknown')
tags = TaggableManager()
class Meta:
ordering = ['-modified_at']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('resources:detail', kwargs={'pk': self.pk})
def save(self, *args, **kwargs):
self.modified_at = timezone.now()
return super().save(*args, **kwargs)
def is_bought_by(self, user):
return Payment.objects.filter(resource=self, user=user).exists()
def get_attachments(self):
ct = ContentType.objects.get_for_model(self)
return Attachment.objects.filter(object_id=self.id, content_type=ct)
def get_previews(self):
previews = []
attachments = self.get_attachments()
for attachment in attachments:
preview = attachment.previews.first()
if preview is not None:
previews.append(preview.preview_file.name)
return previews
class Preview(models.Model):
TYPES = (
('image', 'Image'),
('video', 'Video'),
('audio', 'Audio'),
('document', 'Document'),
('unknown', 'Unknown'),
)
attachment = models.ForeignKey(Attachment, related_name="previews")
preview_file = models.FileField(upload_to="previews")
preview_type = models.CharField(max_length=32, choices=TYPES, default='unknown')
|
mit
| -5,848,695,045,198,825,000 | 33.235849 | 91 | 0.672913 | false |
Letractively/aha-gae
|
aha/model/lazyloadingmodel.py
|
1
|
4444
|
## -*- coding: utf-8 -*-
#
# lazyloadingmodels.py
# The collection of classes, that load property lazily.
#
# Copyright 2010 Atsushi Shibata
#
__author__ = 'Atsushi Shibata <[email protected]>'
__docformat__ = 'plaintext'
__licence__ = 'BSD'
from google.appengine.ext import db
KEY_PREFIX = 'lazykey_'
class LazyPropertyBase(db.Model):
"""
A base class of lazy property,
which loads data only when the attribute accessed
"""
parent_keyname = db.StringProperty(required = False, default = '')
def get_data(self, key):
"""
A method to obtain data itself.
"""
if key:
try:
d = self.get(key)
return d.data
except db.datastore_errors.BadKeyError, e:
pass
return None
def set_data(self, model, key, data):
"""
A method to put data to the datastore.
"""
d = None
if key:
try:
d = self.get(key)
d.data = data
except db.datastore_errors.BadKeyError, e:
pass
if not d:
d = self.__class__(data = data)
try:
key = model.key()
d.parent_keyname = str(key)
except db.NotSavedError, e:
pass
d.put()
return str(d.key())
def get_parent(self, key):
"""
A method to obtain parent model.
"""
d = self.get(key)
if not d.parent_keyname:
raise ValueError('Parent has not been set yet.')
return db.get(d.parent_keyname)
class LazyLoadingMetaclass(db.PropertiedClass):
"""
A metaclass which adds property(s) to store key information of
lazily loading property.
"""
def __new__(cls, name, bases, attrs):
"""
A method to create new class dynamically
"""
keys = attrs.keys()
lazyprops = []
for n in keys:
if isinstance(attrs[n], LazyPropertyBase):
# adding property for key of lazy property
attrs[KEY_PREFIX+n] = db.StringProperty(required = False,
default = '')
lazyprops.append(n)
attrs['lazy_properties'] = lazyprops
new_class = super(LazyLoadingMetaclass, cls).__new__(cls, name,
bases, attrs)
return new_class
class LazyModelBase(db.Model):
"""
A base class to hold lazy property.
"""
__metaclass__ = LazyLoadingMetaclass
def __getattribute__(self, key):
"""
A method to set attribute.
"""
attr = db.Model.__getattribute__(self, key)
if isinstance(attr, LazyPropertyBase):
ds_key = db.Model.__getattribute__(self, KEY_PREFIX+key)
value = attr.get_data(ds_key)
return value
else:
return attr
def __setattr__(self, key, value):
"""
A method to set attribute.
"""
ds_key_id = KEY_PREFIX+key
if hasattr(self, ds_key_id):
attr = db.Model.__getattribute__(self, key)
ds_key = db.Model.__getattribute__(self, KEY_PREFIX+key)
origv = attr.get_data(ds_key)
new_key = attr.set_data(self, ds_key, value)
if not origv:
db.Model.__setattr__(self, KEY_PREFIX+key, new_key)
else:
db.Model.__setattr__(self, key, value)
class LazyStringProperty(LazyPropertyBase):
"""
A lazy property which store string data.
"""
MODEL = db.StringProperty
data = db.StringProperty(required = False)
'''
class LazyListProperty(LazyPropertyBase):
"""
A lazy property which store list data.
"""
MODEL = db.ListProperty
data = db.ListProperty(required = False)
'''
class LazyStringListProperty(LazyPropertyBase):
"""
A lazy property which store string list data.
"""
MODEL = db.StringListProperty
data = db.StringListProperty()
class LazyTextProperty(LazyPropertyBase):
"""
A lazy property which store text data.
"""
MODEL = db.TextProperty
data = db.TextProperty(required = False)
class LazyBlobProperty(LazyPropertyBase):
"""
A lazy property which store blob data.
"""
MODEL = db.BlobProperty
data = db.BlobProperty(required = False)
|
bsd-3-clause
| -4,916,361,267,980,582,000 | 25.933333 | 75 | 0.549055 | false |
videntity/python-omhe
|
omhe/tests/pts_test.py
|
1
|
2504
|
__author__ = 'mark'
from omhe.tests.OMHETestCase import OMHETestCase
import unittest
from omhe.core.parseomhe import parseomhe
from omhe.core.validators.validator_errors import *
from test_utils import *
"""
_pts_
*Points*
points, gems
# tags are allowed
Points or rewards
Range: negative or positive integer
pts=-50, points=100 points
"""
TESTS_DEBUG = True
class pts_test(OMHETestCase):
validValues = ('pts10', 'pts=1005', 'pts=-50', 'pts=2000','gems=500',
'points=200', 'points250','gems-100','points=-90', 'pts=0')
invalidOutOfRangeValues = ('pts=400.35','points=150.6#tags','pts=200#no tags')
invalidCommand = ('foo120/80p60#eee', 'bar=120','gem=50','point=35')
valid_parse_val_1="points"
if TESTS_DEBUG==True:
print "================== START of PTS TEST ================"
def test_ValidValues_AlwaysContains_Points_NumericValue(self):
"""parse() of validValues should always return points in dict."""
if TESTS_DEBUG==True:
display_function_info()
display_data_set("Valid Values Set:",self.validValues)
for i in self.validValues:
p=parseomhe()
result = p.parse(i)
if TESTS_DEBUG==True:
display_test_result(i,result)
self.assertDictContains(result, self.valid_parse_val_1)
def test_Invalid_OutOfRangeValues(self):
"""validate() of invalidOutOfRangeValues should always raise InvalidValueError."""
if TESTS_DEBUG==True:
display_function_info()
display_data_set("Invalid Out of Range Values Set:",self.invalidOutOfRangeValues)
for i in self.invalidOutOfRangeValues:
p=parseomhe()
d=p.split(i)
if TESTS_DEBUG==True:
display_test_result(i,d)
self.assertRaises(InvalidValueError, p.validate, splitdict=d)
def test_InvalidMessage_Raises_InvalidMessageError(self):
"""split() of invalidCommand should always raise InvalidMessageError."""
if TESTS_DEBUG==True:
display_function_info()
display_data_set("Invalid Command Set:",self.invalidCommand)
for i in self.invalidCommand:
p=parseomhe()
if TESTS_DEBUG==True:
display_test_result(i,p)
self.assertRaises(InvalidMessageError, p.split, message=i)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| -7,066,382,498,487,291,000 | 27.146067 | 93 | 0.607029 | false |
scienceopen/igrf12py
|
src/igrf/utils.py
|
1
|
2200
|
import typing
import datetime
from dateutil.parser import parse
import numpy as np
# %% utility functions
def mag_vector2incl_decl(x: float, y: float, z: float) -> typing.Tuple[float, float]:
"""
Inputs:
-------
vectors of the geomagnetic field.
x: north componment
y: east component
z: down (by convention) component
outputs:
--------
declination [degrees]
inclination [degrees]
http://geomag.nrcan.gc.ca/mag_fld/comp-en.php
"""
decl = np.degrees(np.arctan2(y, x))
incl = np.degrees(np.arctan2(z, np.hypot(x, y)))
return decl, incl
def latlon2colat(glat: float, glon: float) -> typing.Tuple[np.ndarray, np.ndarray]:
# atleast_1d for iteration later
colat = 90 - np.atleast_1d(glat)
elon = (360 + np.atleast_1d(glon)) % 360
return colat, elon
def latlonworldgrid(latstep: int = 5, lonstep: int = 5) -> typing.Tuple[np.ndarray, np.ndarray]:
lat = np.arange(-90.0, 90 + latstep, latstep)
lon = np.arange(-180.0, 180 + lonstep, lonstep)
glon, glat = np.meshgrid(lon, lat)
return glat, glon
def datetime2yeardec(time: typing.Union[str, datetime.datetime, datetime.date]) -> float:
"""
Convert a datetime into a float. The integer part of the float should
represent the year.
Order should be preserved. If adate<bdate, then d2t(adate)<d2t(bdate)
time distances should be preserved: If bdate-adate=ddate-cdate then
dt2t(bdate)-dt2t(adate) = dt2t(ddate)-dt2t(cdate)
"""
if isinstance(time, float):
# assume already year_dec
return time
if isinstance(time, str):
t = parse(time)
elif isinstance(time, datetime.datetime):
t = time
elif isinstance(time, datetime.date):
t = datetime.datetime.combine(time, datetime.datetime.min.time())
elif isinstance(time, (tuple, list, np.ndarray)):
return np.asarray([datetime2yeardec(t) for t in time])
else:
raise TypeError("unknown input type {}".format(type(time)))
year = t.year
boy = datetime.datetime(year, 1, 1)
eoy = datetime.datetime(year + 1, 1, 1)
return year + ((t - boy).total_seconds() / ((eoy - boy).total_seconds()))
|
mit
| 4,507,464,114,959,676,000 | 27.947368 | 96 | 0.645455 | false |
openmrslab/suspect
|
suspect/core.py
|
1
|
1054
|
def adjust_phase(data, zero_phase, first_phase=0, fixed_frequency=0):
"""
Adjust the phase of an MRSBase object
Parameters
----------
data : MRSSpectrum
The MRSSpectrum object to be phased
zero_phase : scalar
The change to the zero order phase, in radians
first_phase : scalar, optional
The change to the first order phase, in radians per Hz
fixed_frequency : scalar, optional
The frequency, in Hz, which is unchanged by the first order
phase shift
Returns
-------
out : MRSSpectrum
A new MRSSpectrum object with adjusted phase.
"""
return data.adjust_phase(zero_phase, first_phase, fixed_frequency)
def adjust_frequency(data, frequency_shift):
"""
Adjust the centre frequency of an MRSBase object.
Parameters
----------
frequency_shift: float
The amount to shift the frequency, in Hertz.
Returns
-------
out : MRSData
Frequency adjusted FID
"""
return data.adjust_frequency(frequency_shift)
|
mit
| -2,198,145,725,456,612,000 | 26.025641 | 70 | 0.63852 | false |
justinh5/CipherBox
|
Arithmetic/Numbers/Primality/PrimeTest.py
|
1
|
4092
|
# Baillie–PSW primality test
# Source: http://codegolf.stackexchange.com/questions/10701/fastest-code-to-find-the-next-prime
# primes less than 212
small_primes = set([
2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 193, 197, 199, 211])
# pre-calced sieve of eratosthenes for n = 2, 3, 5, 7
indices = [
1, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
89, 97, 101, 103, 107, 109, 113, 121, 127, 131,
137, 139, 143, 149, 151, 157, 163, 167, 169, 173,
179, 181, 187, 191, 193, 197, 199, 209]
# distances between sieve values
offsets = [
10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6,
6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4,
2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6,
4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10, 2]
max_int = 2147483647
# legendre symbol (a|m)
# note: returns m-1 if a is a non-residue, instead of -1
def legendre(a, m):
return pow(a, (m - 1) >> 1, m)
# strong probable prime
def is_sprp(n, b=2):
d = n - 1
s = 0
while d & 1 == 0:
s += 1
d >>= 1
x = pow(b, d, n)
if x == 1 or x == n - 1:
return True
for r in range(1, s):
x = (x * x) % n
if x == 1:
return False
elif x == n - 1:
return True
return False
# lucas probable prime
# assumes D = 1 (mod 4), (D|n) = -1
def is_lucas_prp(n, D):
P = 1
Q = (1 - D) >> 2
# n+1 = 2**r*s where s is odd
s = n + 1
r = 0
while s & 1 == 0:
r += 1
s >>= 1
# calculate the bit reversal of (odd) s
# e.g. 19 (10011) <=> 25 (11001)
t = 0
while s > 0:
if s & 1:
t += 1
s -= 1
else:
t <<= 1
s >>= 1
# use the same bit reversal process to calculate the sth Lucas number
# keep track of q = Q**n as we go
U = 0
V = 2
q = 1
# mod_inv(2, n)
inv_2 = (n + 1) >> 1
while t > 0:
if t & 1 == 1:
# U, V of n+1
U, V = ((U + V) * inv_2) % n, ((D * U + V) * inv_2) % n
q = (q * Q) % n
t -= 1
else:
# U, V of n*2
U, V = (U * V) % n, (V * V - 2 * q) % n
q = (q * q) % n
t >>= 1
# double s until we have the 2**r*sth Lucas number
while r > 0:
U, V = (U * V) % n, (V * V - 2 * q) % n
q = (q * q) % n
r -= 1
# primality check
# if n is prime, n divides the n+1st Lucas number, given the assumptions
return U == 0
# an 'almost certain' primality check
def is_prime(n):
if n < 212:
return n in small_primes
for p in small_primes:
if n % p == 0:
return False
# if n is a 32-bit integer, perform full trial division
if n <= max_int:
i = 211
while i * i < n:
for o in offsets:
i += o
if n % i == 0:
return False
return True
# Baillie-PSW
# this is technically a probabalistic test, but there are no known pseudoprimes
if not is_sprp(n): return False
a = 5
s = 2
while legendre(a, n) != n - 1:
s = -s
a = s - a
return is_lucas_prp(n, a)
# next prime strictly larger than n
def next_prime(n):
if n < 2:
return 2
# first odd larger than n
n = (n + 1) | 1
if n < 212:
while True:
if n in small_primes:
return n
n += 2
# find our position in the sieve rotation via binary search
x = int(n % 210)
s = 0
e = 47
m = 24
while m != e:
if indices[m] < x:
s = m
m = (s + e + 1) >> 1
else:
e = m
m = (s + e) >> 1
i = int(n + (indices[m] - x))
# adjust offsets
offs = offsets[m:] + offsets[:m]
while True:
for o in offs:
if is_prime(i):
return i
i += o
|
mit
| 1,237,812,149,612,064,500 | 22.505747 | 95 | 0.446455 | false |
MattWellie/PAGE_MPO
|
jenny_scan_park.py
|
1
|
1701
|
import cPickle, os, csv
# script to read in a list of gene names, and check a list of de novo variants
# from DF1 against them to find any overlaps
input_file = 'jenny_query.tsv'
pli_store = {}
files = [file for file in os.listdir('.') if file.split('_')[0] == 'park']
# Create a dictionary object to hold the full contents of the file, indexed by gene
jenny = {}
with open(input_file, 'r') as handle:
for line in handle:
line_list = line.split('\t')
if line_list[0] == 'id':
pass
else:
line_gene_list = line_list[7].split('|')
for gene in line_gene_list:
if gene not in jenny:
jenny[gene] = []
jenny[gene].append(line)
# Easier to strip out this index after tbh..
del jenny['.']
# Open each Park file in turn, identifying any lines of the file which overlap with the corresponding gene set
for file in files:
with open(file, 'r') as handle:
count = 0
print '\n------------------------------------------\
\nCurrent file: ', file.split('.')[0], \
'\n------------------------------------------'
for gene in handle:
gene=gene.rstrip()
if gene in jenny:
for x in jenny[gene]:
print x
count += 1
print 'Overlapping variants in ', file.split('.')[0], ' = ', count
# Now import the appropriate columns from the pLI file into a dict
with open('pli.txt', 'rU') as handle:
pli_dict = csv.DictReader(handle, delimiter='\t')
for row in pli_dict:
gene = row['gene']
pli_value = float(row['pLI'])
pli_store[gene] = pli_value
|
apache-2.0
| 4,703,149,821,520,379,000 | 33.02 | 110 | 0.537919 | false |
kulisty/sova
|
features.py
|
1
|
2069
|
import repository
import model
import json
import csv
import os
import itertools
import random
def retrieve(repository):
features = repository.retrieve_features()
names = {
(n,repository.address_functions(f,n,l),ff[0])
for (n,f,l,ff) in features
}
files = {
(f,repository.address_files(f))
for (n,f,l,ff) in features
}
ni = list(a for (n,a,c) in names)
nf = list(a for (f,a) in files)
idx = ni+nf+['.']
#print(idx)
#
graph = model.Graph([],[])
#
for (n,a,c) in names:
graph.nodes.append( model.Node(name = n, group = 'Function', id = idx.index(a), url = a, visibility = 2, complexity = 5+float(c)/3) )
for (f,a) in files:
graph.nodes.append( model.Node(name = f, group = 'File', id = idx.index(a), url = a, visibility = 1, complexity = 4.0))
#
graph.nodes.append( model.Node(name = '.', group = 'File', id = idx.index('.'), url = repository.origin, complexity = 6.0) )
#
for (n,f,l,ff) in features:
graph.links.append(model.Link(idx.index(repository.address_files(f)), idx.index(repository.address_functions(f,n,l)), 1, visibility = 2))
for (f,a) in files:
graph.links.append(model.Link(idx.index(a), idx.index('.'), 1, visibility = 1))
#for (c, p) in parents:
# graph.links.append(model.Link( idx.index(c), idx.index(p), 2))
return graph
def output(repository, file):
graph = retrieve(repository)
graph.project = model.Project(repository.origin, repository.commit, repository.owner, repository.name)
#
# write json
with open(file+'.json', 'wb+') as out_json:
json.dump(graph, out_json, default=model.default, indent=2)
#
# write csv
out_csv = csv.writer(open(file+'.csv', 'wb+'), delimiter=';')
out_csv.writerow(['name', 'group', 'id', 'url', 'complexity', 'quality'])
for i in range(len(graph.nodes)):
out_csv.writerow([graph.nodes[i].name, graph.nodes[i].group, graph.nodes[i].id, graph.nodes[i].url, graph.nodes[i].complexity, graph.nodes[i].quality])
|
mit
| 658,648,571,640,588,200 | 38.037736 | 159 | 0.61479 | false |
tikismoke/domogik-plugin-nabaztag
|
bin/nbz_tts.py
|
1
|
6701
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" This file is part of B{Domogik} project (U{http://www.domogik.org}).
License
=======
B{Domogik} is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
B{Domogik} is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Domogik. If not, see U{http://www.gnu.org/licenses}.
Plugin purpose
==============
Nabaztag TTS support
Implements
==========
- NBZNotificationListener
@author: Kriss1
@copyright: (C) 2007-2012 Domogik project
@license: GPL(v3)
@organization: Domogik
"""
from domogik.xpl.common.xplconnector import Listener
from domogik.xpl.common.plugin import XplPlugin
from domogik_packages.xpl.lib.nbz_tts import NBZNotification
from domogik.xpl.common.queryconfig import Query
class NBZNotificationListener(XplPlugin):
""" Create listener for xPL messages about Nabaztag TTS notification
"""
def __init__(self):
""" Create listener for Nabaztag TTS notification
"""
XplPlugin.__init__(self, name = 'nbz_tts')
# Create logger
self.log.debug("Listener for Nabaztag TTS notification created")
# Configuration : list of nabaztag whith serial, token and voice
self.alias_list = {}
num = 1
loop = True
self._config = Query(self.myxpl, self.log)
while loop == True:
nabaztag = self._config.query('nbz_tts', 'name-%s' % str(num))
server = self._config.query('nbz_tts', 'server-%s' % str(num))
serial = self._config.query('nbz_tts', 'serial-%s' % str(num))
token = self._config.query('nbz_tts', 'token-%s' % str(num))
voice = self._config.query('nbz_tts', 'voice-%s' % str(num))
if nabaztag != None:
mess="Configuration : nabaztag=" + str(nabaztag) + " , server=" + str(server) + " , serial=" + str(serial) + ", token=" + str(token) + ", voice=" + str(voice)
self.log.info(mess)
print(mess)
self.alias_list[nabaztag] = {"nabaztag" : nabaztag, "server" : server, "serial" : serial, "token" : token, "voice" : voice}
num += 1
else:
loop = False
# no nabaztag configured
if num == 1:
msg = "No nabaztag configured. Exiting plugin"
self.log.info(msg)
print(msg)
self.force_leave()
return
# Check server
for alias in self.alias_list:
if str(self.alias_list[alias]['server']) != "None":
self.log.debug("Server for nabaztag " + str(self.alias_list[alias]['nabaztag']) + " is " + str(self.alias_list[alias]['server']))
else:
self.log.error("Can't find the server adress for the nabaztag " + str(self.alias_list[alias]['nabaztag']) + " , please check the configuration page of this plugin")
self.force_leave()
return
# Check serial
for alias in self.alias_list:
if str(self.alias_list[alias]['serial']) != "None":
self.log.debug("Serial for nabaztag " + str(self.alias_list[alias]['nabaztag']) + " is " + str(self.alias_list[alias]['serial']))
else:
self.log.error("Can't find the serial for the nabaztag " + str(self.alias_list[alias]['nabaztag']) + " , please check the configuration page of this plugin")
self.force_leave()
return
# Check token
for alias in self.alias_list:
if str(self.alias_list[alias]['token']) != "None":
self.log.debug("Token for nabaztag " + str(self.alias_list[alias]['nabaztag']) + " is " + str(self.alias_list[alias]['token']))
else:
self.log.error("Can't find the Token for the nabaztag " + str(self.alias_list[alias]['nabaztag']) + " , please check the configuration page of this plugin")
self.force_leave()
return
# Check voice
for alias in self.alias_list:
if str(self.alias_list[alias]['voice']) != "None":
self.log.debug("Voice for nabaztag " + str(self.alias_list[alias]['nabaztag']) + " is " + str(self.alias_list[alias]['voice']))
else:
self.log.error("Can't find the Voice for the nabaztag " + str(self.alias_list[alias]['nabaztag']) + " , please check the configuration page of this plugin")
self.force_leave()
return
# Create NBZNotification object
self.nbz_notification_manager = NBZNotification(self.log)
# Create listeners
Listener(self.nbz_notification_cb, self.myxpl, {'schema': 'sendmsg.push', 'xpltype': 'xpl-cmnd'})
self.enable_hbeat()
def nbz_notification_cb(self, message):
""" Call Nabaztag TTS lib
@param message : message to send
"""
self.log.debug("Call nbz_notification_cb")
# mandatory keys
if 'to' in message.data:
to = message.data['to']
for alias in self.alias_list:
try:
if str(self.alias_list[alias]['nabaztag']) == str(to):
serverkey = self.alias_list[alias]['server']
serialkey = self.alias_list[alias]['serial']
tokenkey = self.alias_list[alias]['token']
voicekey = self.alias_list[alias]['voice']
except :
self.log.debug("Can't find the recipient, please check the configuration page of this plugin")
self.force_leave()
return
else:
self.log.warning("No recipient was found in the xpl message")
return
if 'body' in message.data:
body = message.data['body']
else:
self.log.warning("No message was found in the xpl message")
return
self.log.debug("Call send_tts with following parameters : server=" + serverkey + ", serial=" + serialkey + ", token=" + tokenkey + ", message=" + body + ", voice=" + voicekey)
self.nbz_notification_manager.send_tts(serverkey, serialkey, tokenkey, body, voicekey)
if __name__ == "__main__":
XN = NBZNotificationListener()
|
gpl-3.0
| 6,125,135,970,552,623,000 | 39.612121 | 183 | 0.584241 | false |
Open511/open511
|
open511/utils/timezone.py
|
1
|
1428
|
# Taken from django.utils.timezone
import datetime
from pytz import utc
def now():
return datetime.datetime.utcnow().replace(tzinfo=utc)
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize') and value not in (datetime.datetime.min, datetime.datetime.max):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
mit
| -7,766,526,149,958,879,000 | 27.019608 | 101 | 0.689076 | false |
kernoelpanic/PurePyECC
|
purepyecc/test/test_ec.py
|
1
|
3888
|
#!/usr/bin/env python
import unittest
import sys
sys.path.append("../") # to run it from test/
sys.path.append("./") # to run it from ../
import arithmetic0 as ar
import ec as ec
class TestEC(unittest.TestCase):
def test_EC_affine_point_mul_on_kc_small_k_pos(self):
gx=0x503213f78ca44883f1a3b8162f188e553cd265f23c1567a16876913b0c2ac2458492836L
gy=0x1ccda380f1c9e318d90f95d07e5426fe87e45c0e8184698e45962364e34116177dd2259L
f=0x800000000000000000000000000000000000000000000000000000000000000000010a1L
r=0x1ffffffffffffffffffffffffffffffffffe9ae2ed07577265dff7f94451e061e163c61L
kc = ec.KoblitzEC(283, 0b0,r, gx, gy, f, 6)
k = 1
x = 0x0503213F78CA44883F1A3B8162F188E553CD265F23C1567A16876913B0C2AC2458492836L
y = 0x01CCDA380F1C9E318D90F95D07E5426FE87E45C0E8184698E45962364E34116177DD2259L
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "1 Incorrect point/scalar multiplication Q=k*G")
k = 2
x = 0x030AE969B9792D44BFDAE086DC6FA1039E52A459A545E78B57A1C9D749C1DC6FAEAF80CFL
y = 0x059D726AA1B70C5E9FFA46D6A1F912B31480BC3D8E0CAB1666497F16B970256427B2FC02L
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "2 Incorrect point/scalar multiplication Q=k*G")
k = 3
x = 0x015DCCC30A8B1F5146412D51FEC337741090321408AAC521391AD36C5912E280124FE3B5L
y = 0x053FC9BED137312952AD97F6A98C4C7AC1B421635FBAFE28898E9213D979D5B4D279F192L
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "3 Incorrect point/scalar multiplication Q=k*G")
k = 4
x = 0x03949AFAEDDDE457A6B7F17129776A4EA5C5C671594A553C5F1DFC1C2C6C5D36CC6F7B91L
y = 0x0286EE1883F14F990BD23310F6212E0CB2578DE1DC43C6B52729D57A5FE072317C1AFB8EL
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "4 Incorrect point/scalar multiplication Q=k*G")
k = 112233445566778899
x = 0x02A97071496676C2FF9F345FA007C678FC9D86B423C8AB17AD9A1374936847AFE60611F4L
y = 0x0034DA6E836869547366E006FDDACBB27ABD7DD5C8EFA4F17CFDCF92C033DAF9D2812FCBL
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "5 Incorrect point/scalar multiplication Q=k*G")
def test_EC_affine_point_mul_on_kc_xxl_k_pos(self):
gx=0x503213f78ca44883f1a3b8162f188e553cd265f23c1567a16876913b0c2ac2458492836L
gy=0x1ccda380f1c9e318d90f95d07e5426fe87e45c0e8184698e45962364e34116177dd2259L
f=0x800000000000000000000000000000000000000000000000000000000000000000010a1L
r=0x1ffffffffffffffffffffffffffffffffffe9ae2ed07577265dff7f94451e061e163c61L
kc = ec.KoblitzEC(283, 0b0,r, gx, gy, f, 6)
k = 3885337784451458141838923813647037813284811733793061324295874997529815829704422603872
x = 0x0503213F78CA44883F1A3B8162F188E553CD265F23C1567A16876913B0C2AC2458492836L
y = 0x04CFFB0777D6DAB9B28AC2DC6514CA8ABBB3639FCBD910E2F2DE0B25FEF6BD452F940A6FL
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "1 Incorrect point/scalar multiplication Q=k*G")
k = 3885337784451458141838923813647037813284811733793061324295874997529815829704422603871
x = 0x030AE969B9792D44BFDAE086DC6FA1039E52A459A545E78B57A1C9D749C1DC6FAEAF80CFL
y = 0x06979B0318CE211A2020A6507D96B3B08AD218642B494C9D31E8B6C1F0B1F90B891D7CCDL
Q = kc.affine_point_mul(k, gx, gy)
self.assertEqual(Q, (x,y), "2 Incorrect point/scalar multiplication Q=k*G")
def test_EC_affine_point_mul_on_kc_neg(self):
# Not meant to be called with other values than int or long
#self.failUnlessRaises(TypeError, ar._testBit, 0.0, 1)
#self.failUnlessRaises(TypeError, ar._testBit, 0b101, 1.0)
pass
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| 9,057,137,990,319,432,000 | 50.84 | 97 | 0.742027 | false |
xfire/pydzen
|
plugins/timer.py
|
1
|
1485
|
#
# Copyright (C) 2008 Rico Schiekel (fire at downgra dot de)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# vim:syntax=python:sw=4:ts=4:expandtab
import time
import datetime
import logging
from pydzen import utils
logger = logging.getLogger('plugin.cpu')
def format_td(seconds):
td = datetime.timedelta(seconds = seconds)
sec = td.days * 24 * 60 * 60 + td.seconds
min, sec = divmod(sec, 60)
hrs, min = divmod(min, 60)
return '%02d:%02d:%02d' % (hrs, min, sec)
@utils.cache(5)
def update():
try:
uptime, idletime = [float(field) for field in open('/proc/uptime').read().split()]
return ['^fg()^bg()%s ' % time.strftime('%Y-%m-%d - %H:%M'),
'^fg()^bg()uptime: %s ' % format_td(uptime)]
except StandardError, e:
logger.warn(e)
return None
|
gpl-2.0
| -5,611,993,249,013,805,000 | 32 | 90 | 0.682155 | false |
T-002/pycast
|
pycast/tests/weightedmapetest.py
|
1
|
2864
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# required external modules
import unittest
# required modules from pycast
from pycast.errors.weightedmeanabsolutepercentageerror import WeightedMeanAbsolutePercentageError
from pycast.common.timeseries import TimeSeries
class WeightedMeanAbsolutePercentageErrorTest(unittest.TestCase):
"""Test class containing all tests for WeightedMeanAbsolutePercentageError."""
def local_error_test(self):
orgValues = [11, 33.1, 2.3, 6.54, 123.1, 12.54, 12.9]
calValues = [24, 1.23, 342, 1.21, 4.112, 9.543, 3.54]
resValues = ['118.181', '192.567', '14769.5', '162.996', '193.319', '47.7990', '145.116']
wmape = WeightedMeanAbsolutePercentageError()
for idx in xrange(len(orgValues)):
localError = wmape.local_error([orgValues[idx]], [calValues[idx]])
assert str(resValues[idx]) == str(localError)[:7], str(resValues[idx]) + '!=' + str(localError)[:7]
def error_calculation_test(self):
"""Test the calculation of the MeanAbsolutePercentageError."""
dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]
dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]
# abs difference: 2 3 3 1 0 NA 5 0 NA 1
# local errors: 200 150 200 50 0 NA 125 0 NA 20
# sum: 745
tsOrg = TimeSeries.from_twodim_list(dataOrg)
tsCalc = TimeSeries.from_twodim_list(dataCalc)
wmape = WeightedMeanAbsolutePercentageError()
wmape.initialize(tsOrg, tsCalc)
assert str(wmape.get_error())[:6] == "93.125"
|
mit
| -2,882,767,016,465,337,000 | 47.542373 | 111 | 0.65852 | false |
heuer/segno-mimos
|
segno_mimos/qrcode/image/base.py
|
1
|
1850
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011, Lincoln Loop
# All rights reserved.
#
# License: BSD License
#
class BaseImage(object):
"""
Base QRCode image output class.
"""
kind = None
allowed_kinds = None
def __init__(self, border, width, box_size, *args, **kwargs):
self.border = border
self.width = width
self.box_size = box_size
self.pixel_size = (self.width + self.border*2) * self.box_size
self._img = self.new_image(**kwargs)
def drawrect(self, row, col):
"""
Draw a single rectangle of the QR code.
"""
raise NotImplementedError("BaseImage.drawrect")
def save(self, stream, kind=None):
"""
Save the image file.
"""
raise NotImplementedError("BaseImage.save")
def pixel_box(self, row, col):
"""
A helper method for pixel-based image generators that specifies the
four pixel coordinates for a single rect.
"""
x = (col + self.border) * self.box_size
y = (row + self.border) * self.box_size
return [(x, y), (x + self.box_size - 1, y + self.box_size - 1)]
def new_image(self, **kwargs): # pragma: no cover
"""
Build the image class. Subclasses should return the class created.
"""
return None
def check_kind(self, kind, transform=None):
"""
Get the image type.
"""
if kind is None:
kind = self.kind
allowed = not self.allowed_kinds or kind in self.allowed_kinds
if transform:
kind = transform(kind)
if not allowed:
allowed = kind in self.allowed_kinds
if not allowed:
raise ValueError(
"Cannot set %s type to %s" % (type(self).__name__, kind))
return kind
|
bsd-3-clause
| -7,150,356,902,161,042,000 | 28.365079 | 75 | 0.552432 | false |
kobotoolbox/kobocat
|
onadata/libs/utils/viewer_tools.py
|
1
|
8723
|
# coding: utf-8
import os
import json
import traceback
import requests
import zipfile
from tempfile import NamedTemporaryFile
from xml.dom import minidom
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.mail import mail_admins
from django.utils.translation import ugettext as _
from onadata.libs.utils import common_tags
SLASH = "/"
class MyError(Exception):
pass
class EnketoError(Exception):
pass
def image_urls_for_form(xform):
return sum([
image_urls(s) for s in xform.instances.all()
], [])
def get_path(path, suffix):
fileName, fileExtension = os.path.splitext(path)
return fileName + suffix + fileExtension
# TODO VERIFY IF STILL USED
def image_urls(instance):
image_urls_dict_ = image_urls_dict(instance)
return image_urls_dict_.values()
def image_urls_dict(instance):
"""
Returns a dict of attachments with keys as base filename
and values link through `kobocat` redirector.
Only exposes `suffix` version of it. It will be created on the fly by the
redirector
:param instance: Instance
:return: dict
"""
urls = dict()
# Remove leading dash from suffix
suffix = settings.THUMB_CONF['medium']['suffix'][1:]
for a in instance.attachments.all():
urls[a.filename] = a.secure_url(suffix=suffix)
return urls
def parse_xform_instance(xml_str):
"""
'xml_str' is a str object holding the XML of an XForm
instance. Return a python object representation of this XML file.
"""
xml_obj = minidom.parseString(xml_str)
root_node = xml_obj.documentElement
# go through the xml object creating a corresponding python object
# NOTE: THIS WILL DESTROY ANY DATA COLLECTED WITH REPEATABLE NODES
# THIS IS OKAY FOR OUR USE CASE, BUT OTHER USERS SHOULD BEWARE.
survey_data = dict(_path_value_pairs(root_node))
assert len(list(_all_attributes(root_node))) == 1, \
_("There should be exactly one attribute in this document.")
survey_data.update({
common_tags.XFORM_ID_STRING: root_node.getAttribute("id"),
common_tags.INSTANCE_DOC_NAME: root_node.nodeName,
})
return survey_data
def _path(node):
n = node
levels = []
while n.nodeType != n.DOCUMENT_NODE:
levels = [n.nodeName] + levels
n = n.parentNode
return SLASH.join(levels[1:])
def _path_value_pairs(node):
"""
Using a depth first traversal of the xml nodes build up a python
object in parent that holds the tree structure of the data.
"""
if len(node.childNodes) == 0:
# there's no data for this leaf node
yield _path(node), None
elif len(node.childNodes) == 1 and \
node.childNodes[0].nodeType == node.TEXT_NODE:
# there is data for this leaf node
yield _path(node), node.childNodes[0].nodeValue
else:
# this is an internal node
for child in node.childNodes:
for pair in _path_value_pairs(child):
yield pair
def _all_attributes(node):
"""
Go through an XML document returning all the attributes we see.
"""
if hasattr(node, "hasAttributes") and node.hasAttributes():
for key in node.attributes.keys():
yield key, node.getAttribute(key)
for child in node.childNodes:
for pair in _all_attributes(child):
yield pair
def report_exception(subject, info, exc_info=None):
if exc_info:
cls, err = exc_info[:2]
info += _("Exception in request: %(class)s: %(error)s") \
% {'class': cls.__name__, 'error': err}
info += "".join(traceback.format_exception(*exc_info))
if settings.DEBUG:
print(subject)
print(info)
else:
mail_admins(subject=subject, message=info)
def django_file(path, field_name, content_type):
# adapted from here: http://groups.google.com/group/django-users/browse_th\
# read/thread/834f988876ff3c45/
f = open(path, 'rb')
return InMemoryUploadedFile(
file=f,
field_name=field_name,
name=f.name,
content_type=content_type,
size=os.path.getsize(path),
charset=None
)
def export_def_from_filename(filename):
# TODO fix circular import and move to top
from onadata.apps.viewer.models.export import Export
path, ext = os.path.splitext(filename)
ext = ext[1:]
# try get the def from extension
mime_type = Export.EXPORT_MIMES[ext]
return ext, mime_type
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def enketo_url(form_url, id_string, instance_xml=None,
instance_id=None, return_url=None, instance_attachments=None):
if not hasattr(settings, 'ENKETO_URL')\
and not hasattr(settings, 'ENKETO_API_SURVEY_PATH'):
return False
if instance_attachments is None:
instance_attachments = {}
url = settings.ENKETO_URL + settings.ENKETO_API_SURVEY_PATH
values = {
'form_id': id_string,
'server_url': form_url
}
if instance_id is not None and instance_xml is not None:
url = settings.ENKETO_URL + settings.ENKETO_API_INSTANCE_PATH
values.update({
'instance': instance_xml,
'instance_id': instance_id,
'return_url': return_url
})
for key, value in instance_attachments.items():
values.update({
'instance_attachments[' + key + ']': value
})
req = requests.post(url, data=values,
auth=(settings.ENKETO_API_TOKEN, ''), verify=False)
if req.status_code in [200, 201]:
try:
response = req.json()
except ValueError:
pass
else:
if 'edit_url' in response:
return response['edit_url']
if settings.ENKETO_OFFLINE_SURVEYS and ('offline_url' in response):
return response['offline_url']
if 'url' in response:
return response['url']
else:
try:
response = req.json()
except ValueError:
pass
else:
if 'message' in response:
raise EnketoError(response['message'])
return False
def create_attachments_zipfile(attachments, output_file=None):
if not output_file:
output_file = NamedTemporaryFile()
else:
# Disable seeking in a way understood by Python's zipfile module. See
# https://github.com/python/cpython/blob/ca2009d72a52a98bf43aafa9ad270a4fcfabfc89/Lib/zipfile.py#L1270-L1274
# This is a workaround for https://github.com/kobotoolbox/kobocat/issues/475
# and https://github.com/jschneier/django-storages/issues/566
def no_seeking(*a, **kw):
raise AttributeError(
'Seeking disabled! See '
'https://github.com/kobotoolbox/kobocat/issues/475'
)
output_file.seek = no_seeking
storage = get_storage_class()()
with zipfile.ZipFile(output_file, 'w', zipfile.ZIP_STORED, allowZip64=True) as zip_file:
for attachment in attachments:
if storage.exists(attachment.media_file.name):
try:
with storage.open(attachment.media_file.name, 'rb') as source_file:
zip_file.writestr(attachment.media_file.name, source_file.read())
except Exception as e:
report_exception("Error adding file \"{}\" to archive.".format(attachment.media_file.name), e)
return output_file
def _get_form_url(username):
if settings.TESTING_MODE:
http_host = 'http://{}'.format(settings.TEST_HTTP_HOST)
username = settings.TEST_USERNAME
else:
# Always use a public url to prevent Enketo SSRF from blocking request
http_host = settings.KOBOCAT_URL
# Internal requests use the public url, KOBOCAT_URL already has the protocol
return '{http_host}/{username}'.format(
http_host=http_host,
username=username
)
def get_enketo_edit_url(request, instance, return_url):
form_url = _get_form_url(instance.xform.user.username)
instance_attachments = image_urls_dict(instance)
url = enketo_url(
form_url, instance.xform.id_string, instance_xml=instance.xml,
instance_id=instance.uuid, return_url=return_url,
instance_attachments=instance_attachments)
return url
|
bsd-2-clause
| -4,834,902,923,868,209,000 | 30.490975 | 116 | 0.633039 | false |
nos86/VSCP-Helper-for-Python
|
vscphelper/vscplib.py
|
1
|
11086
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Musumeci Salvatore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from vscphelper import websocket
from vscphelper.exception import *
from eventlet.timeout import Timeout
from time import sleep, time
import vscphelper.VSCPConstant as constant
import socket
import signal
import collections
import datetime
from hashlib import md5
import logging
logger = logging.getLogger(__name__)
class vscpEvent:
def __init__(self,
vscp_class,
vscp_type,
vscp_data=[],
GUID='FF:FF:FF:FF:FF:FF:FF:FC:00:00:00:00:00:00:00:00',
head=0,
obid=0,
timestamp=None):
""" Parameters are:
head
vscp_class int: VSCP Event Class
vscp_type int: VSCP Event Type
obid
timestamp int: timestamp of message
guid string: Global Unique IDentifier
data int[8]: Data of message (variable length up to 8)
"""
self.head = int(head)
self.vscp_class = int(vscp_class)
self.vscp_type = int(vscp_type)
self.obid = int(obid)
if timestamp == None:
self.timestamp = int(time())
else:
self.timestamp = int(timestamp)
self.guid = str(GUID)
self.data = []
for i in range(0,len(vscp_data)):
try:
self.data.append(int(vscp_data[i]))
except ValueError:
self.data.append(int(vscp_data[i],16))
def getHead(self):
return self.head
def getClass(self):
return self.vscp_class
def getType(self, ):
return self.vscp_type
def getNodeId(self):
return int(self.guid[-2:],16)
def getObID(self, ):
return self.obid
def getGUID(self, ):
return self.guid
def getTimestamp(self, ):
return self.timestamp
def getUTCDateTime(self, format="%d-%m-%Y %H:%M:%S"):
return datetime.datetime.utcfromtimestamp(int(self.timestamp)).strftime(format)
def getLocalDateTime(self, format="%d-%m-%Y %H:%M:%S"):
return datetime.datetime.fromtimestamp(int(self.timestamp)).strftime(format)
def getDataLength(self):
return len(self.data)
def getData(self, ):
return self.data
def __str__(self):
data = [self.head, self.vscp_class, self.vscp_type, self.obid, self.timestamp, self.guid]
for i in range(0, len(self.data)):
data.append(self.data[i])
for i in range(0, len(data)):
data[i] = str(data[i])
return "E;"+','.join(data)
@classmethod
def fromAnswer(cls, answer):
if not isinstance(answer, websocket.answer):
raise ValueError("Answer is not a vscphelper.websocket.answer object")
if answer.getType()!="Event":
raise ValueError("Impossible to init a vscpEvent using an answer that is not an Event one")
temp = str(answer.msg[1]).split(",")
data = []
if len(temp)>5:
for i in range(6, len(temp)):
data.append(temp[i])
return cls(head = temp[0],
vscp_class = temp[1],
vscp_type = temp[2],
obid = temp[3],
timestamp = temp[4],
GUID = temp[5],
vscp_data = data)
class vscp:
def __init__(self, hostname='127.0.0.1', port='8080', user='admin', password='secret', domain='mydomain.com', timeout=2):
self.queue = collections.deque()
self.eventStreaming = False
self.authenticated = False
self.ws = websocket.websocket(hostname=hostname, port=port, eventCallback = self.__eventCallback) #Open websocket
self.ws.setTimeout(timeout)
self.user = user
self.handler = None
for i in range(0,50):
sleep(0.01)
if self.ws.seed is not None:
break
if self.ws.seed == None:
raise VSCPNoCommException("No AUTH0 is received by websocket")
else:
key = self.calculateKey(user, password, domain)
command = ";".join(["AUTH", self.user, key])
answer = self.ws.send("C;"+command).isPositiveAnswer()
if answer != constant.VSCP_ERROR_SUCCESS:
raise VSCPException(constant.VSCP_ERROR_NOT_AUTHORIZED)
self.authenticated = True
def calculateKey(self, user, password, domain):
passwordHash = md5((user+":"+domain+":"+password).encode('utf-8')).hexdigest()
return md5((user+":"+passwordHash+":"+self.ws.seed).encode('utf-8')).hexdigest()
def setHandler(self, fnct):
if callable(fnct) == False:
raise ValueError("argument must be a callable function")
self.handler = fnct
def setResponseTimeOut(self, timeout=2):
"""This is the timeout in seconds used when checking for replies after commands has been sent to the server.
The value is also used multiplied with three as a connection timeout.
It can be changed anytime during a communication session.
Default value is 2 seconds.
"""
if timeout>0:
self.ws.setTimeout(timeout)
else:
raise ValueError("Timeout must be be greater than 0")
def isConnected(self):
"""Check if the session is active or not and returns:
VSCP_ERROR_SUCCESS if websocket is opened and user is authenticated
VSCP_ERROR_ERROR if at least user is not autheticated
"""
if self.ws.connected and self.authenticated:
return constant.VSCP_ERROR_SUCCESS
else:
return constant.VSCP_ERROR_ERROR
def doCommand(self, command="NOOP"):
"""Send a command over the communication link.
The response from the server will be checked for +;COMMAND
Return value should be:
VSCP_ERROR_SUCCESS if the VSCP daemon respond with +OK after it has received the command
VSCP_ERROR_ERROR if not (-OK) or no response before the timeout expires.
VSCP_ERROR_CONNECTION is returned if the communication channel is not open.
"""
if not (self.isConnected()==constant.VSCP_ERROR_SUCCESS):
return constant.VSCP_ERROR_CONNECTION
try:
answer = self.ws.send("C;"+command)
return answer.isPositiveAnswer()
except VSCPException:
return constant.VSCP_ERROR_ERROR
def clearDaemonEventQueue(self):
""" Clear the receiving side (to us) event queue on the VSCP daemon
VSCP_ERROR_SUCCESS if the VSCP daemon cleared the queue
VSCP_ERROR_ERROR if not or no response is received before the timeout expires.
VSCP_ERROR_CONNECTION is returned if the communication channel is not open
"""
return self.doCommand("CLRQUEUE")
def enterReceiveLoop(self):
"""Enter the receive loop.
Return
VSCP_ERROR_SUCCESS on success
VSCP_ERROR_ERROR on failure.
VSCP_ERROR_CONNECTION If the connection is closed.
"""
self.eventStreaming = True
return self.doCommand("OPEN")
def quitReceiveLoop(self):
"""Quit the receive loop.
Return
VSCP_ERROR_SUCCESS on success
VSCP_ERROR_ERROR on failure.
VSCP_ERROR_CONNECTION If the connection is closed.
"""
if self.eventStreaming:
self.doCommand("CLOSE")
self.eventStreaming = False
def noop(self):
"""This is a command that can be used for test purposes.
It does not do anything else then to send a command over the interfaces and check the result.
Returns
VSCP_ERROR_SUCCESS on success
VSCP_ERROR_ERROR on failure.
VSCP_ERROR_CONNECTION If the connection is closed.
"""
self.doCommand("NOOP")
def sendEvent(self, event):
"""Send an event over the communication link.
The response from the server will be checked for +;EVENT
Return value should be:
VSCP_ERROR_SUCCESS if the VSCP daemon respond with +OK after it has received the command
VSCP_ERROR_ERROR if not (-OK) or no response before the timeout expires.
VSCP_ERROR_CONNECTION is returned if the communication channel is not open.
"""
if not isinstance(event, vscpEvent):
raise ValueError("event must be a vscpEvent object")
if not (self.isConnected()==constant.VSCP_ERROR_SUCCESS):
return constant.VSCP_ERROR_CONNECTION
try:
answer = self.ws.send(str(event))
return answer.isPositiveAnswer()
except VSCPException:
return constant.VSCP_ERROR_ERROR
def sendSimpleEvent(self, vscp_class=0, vscp_type=2, vscp_data=[1]):
"""Fast Method to send an event.
It will create the event object and it'll send it over CAN
Expected return / exceptions are the same of sendEvent method
"""
event = vscpEvent(vscp_class, vscp_type, vscp_data)
return self.sendEvent(event)
def receiveEvent(self):
"""Receive one VSCP event from the remote VSCP daemon.
Returns VSCPEvent object if available, otherwise None is returned
"""
if self.isDataAvailable():
return self.queue.pop()
else:
return None
def isDataAvailable(self):
"""Check the number of events (if any) that are available in the remote input queue
Returns number of events ready to be read
"""
return len(self.queue)>0
def blockingReceiveEvent(self, timeout = 2):
"""Blocking receive event.
As soon as a new event is received, it will be returned to caller
In case of connection is closed or user is not authenticated, VSCPException(VSCP_ERROR_CONNECTION) is raised
In case of ReceiveLoop is closed VSCPException(VSCP_ERROR_OPERATION_FAILED) is raised
In order to avoid infinite waiting, a timeout is foreseen. After this time without incoming message,
VSCPException(VSCP_ERROR_TIMEOUT) is raisen
"""
if not isinstance(timeout, int):
raise ValueError("Timeout must be a non-negative integer number")
if timeout < 0:
raise ValueError("Timeout must be a non-negative integer number")
if self.eventStreaming == False:
raise VSCPException(constant.VSCP_ERROR_OPERATION_FAILED)
timer = Timeout(timeout, self.__errorOnTimeout)
while(self.isDataAvailable()==0):
if self.authenticated == False or self.ws.connected == False:
raise VSCPException(constant.VSCP_ERROR_CONNECTION)
sleep(0.1)
timer.cancel()
return self.receiveEvent()
def setFilter(self):
pass
def __errorOnTimeout(self, ):
raise VSCPException(constant.VSCP_ERROR_TIMEOUT)
def __eventCallback(self, answer):
self.queue.appendleft(vscpEvent.fromAnswer(answer))
if not self.handler is None:
self.handler()
|
mit
| 6,163,762,167,187,106,000 | 33.883495 | 122 | 0.689789 | false |
jalanb/dotsite
|
pysyte/types/test/test_paths.py
|
1
|
4059
|
"""Test paths module"""
import random
from unittest import TestCase
from pysyte.types import paths
class SourcePath(paths.FilePath):
# pylint: disable=abstract-method
pass
SourcePath.__file_class__ = SourcePath
class MockFilePathWithLines(paths.FilePath):
# pylint: disable=abstract-method
"""Mock some known lines into a file"""
def lines(self, encoding=None, errors='strict', retain=True):
return [
'\n',
'line ends with spaces ',
'# comment\n',
'Normal line\n'
]
class TestPaths(TestCase):
def setUp(self):
self.path = paths.path(__file__).extend_by('py')
self.dir = self.path.parent
self.source = paths.path(paths.__file__.rstrip('c')).extend_by('py')
def test_path_error(self):
self.assertRaises(
paths.PathError,
paths.path('/not/a/path').assert_exists)
def test_not_path_error(self):
p = paths.path('/')
self.assertEqual(p, p.assert_exists())
def test_assert_isfile(self):
self.assertEqual(
self.path,
self.path.assert_isfile())
def test_assert_not_isfile(self):
self.assertRaises(
paths.PathError,
self.dir.assert_isfile)
def test_assert_isdir(self):
self.assertEqual(
self.dir,
self.dir.assert_isdir())
def test_assert_not_isdir(self):
self.assertRaises(
paths.PathError,
self.path.assert_isdir)
def test_join_with_nothing(self):
self.assertEqual(
self.dir,
self.dir / '')
def test_file_class(self):
path = SourcePath('').as_existing_file(str(self.source))
self.assertTrue(path.isfile)
self.assertIs(path.__file_class__, SourcePath)
def test_dirnames(self):
self.assertIn(self.dir.name, self.path.dirnames())
def test_stripped_lines(self):
path = MockFilePathWithLines('/not/a/real/file')
line = random.choice(path.lines())
self.assertTrue(line[-1].isspace())
line = random.choice(path.stripped_lines())
if line:
self.assertFalse(line[-1].isspace())
def test_stripped_whole_lines(self):
path = MockFilePathWithLines('/not/a/real/file')
self.assertTrue([l for l in path.stripped_lines() if not l])
self.assertFalse([l for l in path.stripped_whole_lines() if not l])
def test_non_comment_lines(self):
path = MockFilePathWithLines('/not/a/real/file')
self.assertTrue([l for l in path.stripped_whole_lines()
if l.startswith('#')])
self.assertFalse([l for l in path.non_comment_lines()
if l.startswith('#')])
def test_has_line(self):
path = MockFilePathWithLines('/not/a/real/file')
self.assertTrue(path.has_line('Normal line'))
self.assertFalse(path.has_line('Normal'))
self.assertFalse(path.has_line('Abnormal line'))
def test_any_line_has(self):
path = MockFilePathWithLines('/not/a/real/file')
self.assertTrue(path.any_line_has('Normal line'))
self.assertTrue(path.any_line_has('Normal'))
self.assertFalse(path.any_line_has('Abnormal line'))
def test_directory(self):
self.assertEqual(self.path.directory(), self.path.parent)
def test_no_div_for_file(self):
path = paths.FilePath(__file__)
with self.assertRaises(paths.PathError):
path / 'fred'
def test_directory_iteration(self):
for item in self.path.directory():
if item == self.path:
break
else:
self.fail('Could not find %s' % self.path)
def test_vcs_dirs(self):
self.assertTrue(paths.path('/usr/.git/fred').has_vcs_dir())
self.assertTrue(paths.path('/usr/local/.svn').has_vcs_dir())
self.assertTrue(paths.path('/.hg/etc').has_vcs_dir())
self.assertFalse(paths.path('/usr/local/bin').has_vcs_dir())
|
mit
| -4,556,598,913,420,792,300 | 29.984733 | 76 | 0.595467 | false |
epfl-idevelop/jahia2wp
|
src/crawler/tests/test_crawler.py
|
1
|
5461
|
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017
Testing the crawl.py script
"""
import os
import pytest
import requests_mock
import settings
from datetime import datetime
from importlib import reload
from crawler import JahiaConfig, SessionHandler, JahiaCrawler, download_many
CURRENT_DIR = os.path.dirname(__file__)
TEST_FILE = "one-site_export_2017-10-11-05-03.zip"
TEST_JAHIA_SITE = "one-site"
TEST_USER = "foo"
TEST_PASSWORD = "bar"
TEST_HOST = "localhost"
@pytest.fixture()
def delete_environment(request):
"""
Delete all env. vars
"""
for env_var in ["JAHIA_USER", "JAHIA_PASSWORD", "JAHIA_HOST"]:
if os.environ.get(env_var):
del os.environ[env_var]
reload(settings)
@pytest.fixture()
def environment(request):
"""
Load fake environment variables for every test
"""
os.environ["JAHIA_HOST"] = TEST_HOST
os.environ["JAHIA_USER"] = TEST_USER
os.environ["JAHIA_PASSWORD"] = TEST_PASSWORD
reload(settings)
return os.environ
@pytest.fixture(scope='module')
def session_handler(request):
url = '{}://localhost/administration?redirectTo=%2Fadministration%3Fnull&do=processlogin'\
.format(settings.JAHIA_PROTOCOL)
# data_file = 'session.data'
# with requests_mock.Mocker() as mocker, open(data_file, 'r') as input:
with requests_mock.Mocker() as mocker:
# set mock response
mocker.post(url, text="session")
# make query
handler = SessionHandler(username=TEST_USER, password=TEST_PASSWORD)
handler.session
return handler
class TestConfig(object):
def test_with_no_env(self, delete_environment):
config = JahiaConfig(TEST_JAHIA_SITE)
assert config.host == "localhost"
def test_with_var_env(self, environment):
config = JahiaConfig(TEST_JAHIA_SITE, date=datetime(2017, 10, 11, 5, 3))
assert config.host == TEST_HOST
assert config.file_url == "{}://{}/{}/one-site_export_2017-10-11-05-03.zip"\
.format(settings.JAHIA_PROTOCOL, TEST_HOST, JahiaConfig.JAHIA_DOWNLOAD_URI)
def test_config_with_kwargs(self, environment):
config = JahiaConfig(TEST_JAHIA_SITE, host="epfl.ch", date=datetime(2017, 10, 11, 5, 3))
assert config.host == "epfl.ch"
assert config.file_url == "{}://epfl.ch/{}/one-site_export_2017-10-11-05-03.zip"\
.format(settings.JAHIA_PROTOCOL, JahiaConfig.JAHIA_DOWNLOAD_URI)
def test_existing_files(self, environment):
config = JahiaConfig(TEST_JAHIA_SITE, zip_path=CURRENT_DIR)
assert config.already_downloaded is True
assert config.existing_files[-1].endswith(TEST_FILE)
def test_non_existing_files(self):
config = JahiaConfig("not-downloaded-site", zip_path=CURRENT_DIR)
assert config.already_downloaded is False
class TestSession(object):
def test_fail_with_missing_env(self, delete_environment):
with pytest.raises(Exception):
SessionHandler()
def test_default_parameters(self, delete_environment):
os.environ["JAHIA_PASSWORD"] = TEST_PASSWORD
session = SessionHandler()
assert session.username == settings.JAHIA_USER
assert session.password == TEST_PASSWORD
assert session.host == settings.JAHIA_HOST
assert session.post_url == "{}://{}/administration".format(settings.JAHIA_PROTOCOL, settings.JAHIA_HOST)
assert session.credentials == {
'login_username': settings.JAHIA_USER,
'login_password': TEST_PASSWORD
}
def test_session_with_kwargs(self, environment):
session = SessionHandler(username="bob", password="bob's secret", host="epfl.ch")
assert session.username == "bob"
assert session.password == "bob's secret"
assert session.host == "epfl.ch"
assert session.post_url == "{}://epfl.ch/administration".format(settings.JAHIA_PROTOCOL)
assert session.credentials == {
'login_username': "bob",
'login_password': "bob's secret"
}
def test_session(self, session_handler):
assert session_handler.session
assert session_handler._session is not None
class TestCrawler(object):
def test_download_existing(self, session_handler):
crawler = JahiaCrawler(TEST_JAHIA_SITE, zip_path=CURRENT_DIR)
assert crawler.download_site().endswith(TEST_FILE)
def test_download_non_existing(self, session_handler):
url = '{}://localhost/{}/non-existing-site_export_2017-10-11-05-03.zip?' \
'do=sites&sitebox=non-existing-site&exportformat=site' \
.format(settings.JAHIA_PROTOCOL, JahiaConfig.JAHIA_DOWNLOAD_URI)
zip_path = os.path.join(CURRENT_DIR, TEST_FILE)
with requests_mock.Mocker() as mocker, open(zip_path, 'rb') as input:
# set mock response
mocker.post(url, body=input)
# make query
crawler = JahiaCrawler("non-existing-site", session=session_handler, date=datetime(2017, 10, 11, 5, 3))
downloaded_path = crawler.download_site()
assert downloaded_path.endswith('non-existing-site_export_2017-10-11-05-03.zip')
os.remove(downloaded_path)
def test_download_many(self, session_handler):
assert TEST_JAHIA_SITE in download_many([TEST_JAHIA_SITE], zip_path=CURRENT_DIR, session=session_handler)
|
mit
| 3,712,136,505,517,033,000 | 37.188811 | 115 | 0.659769 | false |
jajcayn/pyclits
|
pyclits/functions.py
|
1
|
5808
|
"""
created on Sep 22, 2017
@author: Nikola Jajcay, jajcay(at)cs.cas.cz
"""
import numpy as np
def cross_correlation(a, b, max_lag):
"""
Cross correlation with lag.
When computing cross-correlation, the first parameter, a, is
in 'future' with positive lag and in 'past' with negative lag.
"""
a = (a - np.mean(a)) / (np.std(a, ddof = 1) * (len(a) - 1))
b = (b - np.mean(b)) / np.std(b, ddof = 1)
cor = np.correlate(a, b, 'full')
return cor[len(cor)//2 - max_lag : len(cor)//2 + max_lag+1]
def kdensity_estimate(a, kernel = 'gaussian', bandwidth = 1.0):
"""
Estimates kernel density. Uses sklearn.
kernels: 'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'
"""
from sklearn.neighbors import KernelDensity
a = a[:, None]
x = np.linspace(a.min(), a.max(), 100)[:, None]
kde = KernelDensity(kernel = kernel, bandwidth = bandwidth).fit(a)
logkde = kde.score_samples(x)
return np.squeeze(x), np.exp(logkde)
def detrend_with_return(arr, axis = 0):
"""
Removes the linear trend along the axis, ignoring Nans.
"""
a = arr.copy()
rnk = len(a.shape)
# determine axis
if axis < 0:
axis += rnk # axis -1 means along last dimension
# reshape that axis is 1. dimension and other dimensions are enrolled into 2. dimensions
newdims = np.r_[axis, 0:axis, axis + 1:rnk]
newdata = np.reshape(np.transpose(a, tuple(newdims)), (a.shape[axis], np.prod(a.shape, axis = 0) // a.shape[axis]))
newdata = newdata.copy()
# compute linear fit as least squared residuals
x = np.arange(0, a.shape[axis], 1)
A = np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A, newdata)[0]
# remove the trend from the data along 1. axis
for i in range(a.shape[axis]):
newdata[i, ...] = newdata[i, ...] - (m*x[i] + c)
# reshape back to original shape
tdshape = np.take(a.shape, newdims, 0)
ret = np.reshape(newdata, tuple(tdshape))
vals = list(range(1,rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = np.transpose(ret, tuple(olddims))
# return detrended data and linear coefficient
return ret, m, c
def partial_corr(a):
"""
Computes partial correlation of array a.
Array as dim x time; partial correlation is between first two dimensions, conditioned on others.
"""
from scipy import linalg, stats
array = a.copy()
D, T = array.shape
if np.isnan(array).sum() != 0:
raise ValueError("nans in the array!")
# Standardize
array -= array.mean(axis=1).reshape(D, 1)
array /= array.std(axis=1).reshape(D, 1)
if np.isnan(array).sum() != 0:
raise ValueError("nans after standardizing, "
"possibly constant array!")
x = array[0, :]
y = array[1, :]
if len(array) > 2:
confounds = array[2:, :]
ortho_confounds = linalg.qr(
np.fastCopyAndTranspose(confounds), mode='economic')[0].T
x -= np.dot(np.dot(ortho_confounds, x), ortho_confounds)
y -= np.dot(np.dot(ortho_confounds, y), ortho_confounds)
val, pvalwrong = stats.pearsonr(x, y)
df = float(T - D)
if df < 1:
pval = np.nan
raise ValueError("D > T: Not enough degrees of freedom!")
else:
# Two-sided p-value accouting for degrees of freedom
trafo_val = val*np.sqrt(df/(1. - np.array([val])**2))
pval = stats.t.sf(np.abs(trafo_val), df)*2
return val, pval
def get_haar_flucs(ts, min_dt = 2, run_backwards = True, spacings = [2, 4, 8, 16], rms = True):
"""
Computes Haar fluctuations of the data -- scaling.
if run_backwards is True, the function runs twice, the second time with reversed time seres,
this is used for better statistics
spacings either None for linear even sampling [takes too long]
or sequence as e.g. [2, 4, 8, 16] where first 1/n time series will be spaced with 2 steps,
next 1/n with 4 steps and so on..
rms boolean whether to run RMS Haar or absolute Haar
"""
min_dt = min_dt
max_dt = ts.shape[0]
if spacings is None:
dts = np.arange(min_dt, max_dt, 2) # only even as we are dividing the interval into two
else:
dts = np.concatenate([np.arange(i*int(np.ceil(max_dt//len(spacings) / 2) * 2), (i+1)*int(np.ceil(max_dt//len(spacings) / 2) * 2), sp) for sp, i in zip(spacings, range(len(spacings)))])
dts = dts[1:] # dts starts with index 0, we need to start with 2
runs = 2 if run_backwards else 1
haar = np.zeros((dts.shape[0], runs), dtype = np.float32)
for run in range(runs):
if run == 1:
ts = ts[::-1]
for i in range(dts.shape[0]):
# split index every dt
split_ndx = list(np.arange(dts[i], max_dt, dts[i]))
# split array, result is array with shape [x,dt]
if ts.shape[0] % dts[i] == 0:
splitted = np.array(np.split(ts, split_ndx))
else:
# if last window is shorter, just omit it
splitted = np.array(np.split(ts, split_ndx)[:-1])
# split into two equal parts for averaging -- dt/2, shape is [x, dt/2, 2]
splitted = splitted.reshape((splitted.shape[0], dts[i]//2, 2), order = "F")
# average parts over second axis [the dt/2 one]
means = np.mean(splitted, axis = 1)
# compute Haar squared with C = 2
haars = (2*means[:, 1] - 2*means[:, 0])
if rms:
haars = haars**2
else:
haars = np.abs(haars)
haar[i, run] = np.mean(haars)
if rms:
return dts, np.mean(np.sqrt(haar), axis = 1)
else:
return dts, np.mean(haar, axis = 1)
|
mit
| -1,540,109,556,385,848,000 | 35.080745 | 192 | 0.5823 | false |
darfire/screp
|
screp/source.py
|
1
|
1326
|
import urllib2
class BaseDataSource(object):
name = 'Unknown'
def read_data(self):
pass
class OpenedFileDataSource(object):
def __init__(self, name, ofile):
self._file = ofile
self.name = name
def read_data(self):
return self._file.read()
class URLDataSource(object):
def __init__(self, url, user_agent=None, proxy=True):
self._url = url
self.name = url
self._user_agent = user_agent
self._proxy = proxy
def _make_request(self):
request = urllib2.Request(self._url)
if self._user_agent is not None:
request.add_header('User-Agent', self._user_agent)
return request
def _setup_urllib(self):
if self._proxy:
proxy_opener = urllib2.ProxyHandler()
else:
proxy_opener = urllib2.ProxyHandler({})
urllib2.install_opener(urllib2.build_opener(proxy_opener))
def read_data(self):
self._setup_urllib()
request = self._make_request()
data = urllib2.urlopen(request).read()
return data
class FileDataSource(object):
def __init__(self, fname):
self._fname = fname
self.name = fname
def read_data(self):
with open(self._fname, 'r') as f:
return f.read()
|
gpl-3.0
| -7,309,112,707,587,113,000 | 20.047619 | 66 | 0.575415 | false |
thesharp/botogram
|
botogram/api.py
|
1
|
5991
|
"""
botogram.api
Wrapper for the Telegram Bot API
Copyright (c) 2015 Pietro Albini <[email protected]>
Released under the MIT license
"""
import os
import requests
# These API methods sends something to a chat
# This list is used to filter which method to check for unavailable chats
SEND_TO_CHAT_METHODS = (
"sendMessage",
"forwardMessage",
"sendPhoto",
"sendAudio",
"sendDocument",
"sendSticker",
"sendVideo",
"sendVoice",
"sendLocation",
"sendChatAction",
"getChat",
)
class APIError(Exception):
"""Something went wrong with the API"""
def __init__(self, response):
self.error_code = response["error_code"]
self.description = response["description"]
msg = "Request failed with code %s. Response from Telegram: \"%s\"" % (
self.error_code, self.description
)
super(APIError, self).__init__(msg)
class ChatUnavailableError(APIError):
"""A chat is unavailable, which means you can't send messages to it"""
def __init__(self, reason, chat_id):
self.reason = reason
self.chat_id = chat_id
if reason == "blocked":
msg = "The user with ID %s blocked your bot" % chat_id
elif reason == "account_deleted":
msg = "The user with ID %s deleted his account" % chat_id
elif reason == "not_contacted":
msg = "The user with ID %s didn't contact you before" % chat_id
elif reason == "not_found":
msg = "The chat with ID %s doesn't exist" % chat_id
elif reason == "kicked":
msg = "The bot was kicked from the group with ID %s" % chat_id
elif reason == "chat_moved":
msg = "The chat with ID %s moved, and the old ID is no longer " \
"valid" % chat_id
else:
raise ValueError("Unknown reason: %s" % reason)
Exception.__init__(self, msg)
class TelegramAPI:
"""Main interface to the Telegram API"""
def __init__(self, api_key, endpoint=None):
# Fill the default API endpoint
if endpoint is None:
endpoint = "https://api.telegram.org/"
self._api_key = api_key
self._endpoint = endpoint
self._session_cache = None
self._session_pid = -1
def _session(self):
"""Get the current requests session"""
# Ensure a new session is created if the PID changes. This is because
# sessions behaves badly if you use them after fork()
if self._session_pid != os.getpid() or self._session_cache is None:
self._session_cache = requests.Session()
self._session_pid = os.getpid()
return self._session_cache
def call(self, method, params=None, files=None, expect=None):
"""Call a method of the API"""
url = self._endpoint + "bot%s/%s" % (self._api_key, method)
response = self._session().get(url, params=params, files=files)
content = response.json()
if not content["ok"]:
status = content["error_code"]
message = content["description"]
# Special handling for unavailable chats
if method in SEND_TO_CHAT_METHODS:
reason = None
# This happens when the bot tries to send messages to an user
# who blocked the bot
if status == 403 and "blocked" in message:
# Error code # 403
# Bot was blocked by the user
reason = "blocked"
# This happens when the user deleted its account
elif status == 403 and "deleted" in message:
# Error code # 403
# Forbidden: user is deleted
reason = "account_deleted"
# This happens, as @BotSupport says, when the Telegram API
# isn't able to determine why your bot can't contact an user
elif status == 400 and "PEER_ID_INVALID" in message:
# Error code # 400
# Bad request: PEER_ID_INVALID
reason = "not_found"
# This happens when the bot can't contact the user or the user
# doesn't exist
elif status == 400 and "not found" in message:
# Error code # 400
# Bad Request: chat not found
reason = "not_found"
# This happens when the bot is kicked from the group chat it's
# trying to send messages to
elif status == 403 and "kicked" in message:
# Error code # 403
# Forbidden: bot was kicked from the group chat
# Forbidden: bot was kicked from the supergroup chat
reason = "kicked"
# This happens when the ID points to a group chat, which was
# migrated to a supergroup chat, thus changing its ID
elif status == 400 and "migrated" in message:
# Error code # 400
# Bad Request: group chat is migrated to a supergroup chat
reason = "chat_moved"
if reason is not None:
raise ChatUnavailableError(reason, params["chat_id"])
raise APIError(content)
# If no special object is expected, return the decoded json.
# Else, return the "pythonized" result
if expect is None:
return content
else:
wrapped = expect(content["result"])
if hasattr(wrapped, "set_api"):
wrapped.set_api(self)
return wrapped
def file_content(self, path):
"""Get the content of an user-submitted file"""
url = self._endpoint + "file/bot%s/%s" % (self._api_key, path)
response = requests.get(url)
return response.content
|
mit
| -5,768,657,739,455,037,000 | 34.035088 | 79 | 0.552829 | false |
cmusatyalab/gabriel
|
examples/round_trip/server.py
|
1
|
1058
|
import argparse
import common
from gabriel_protocol import gabriel_pb2
from gabriel_server import local_engine
from gabriel_server import cognitive_engine
class DisplayEngine(cognitive_engine.Engine):
def handle(self, input_frame):
status = gabriel_pb2.ResultWrapper.Status.SUCCESS
result_wrapper = cognitive_engine.create_result_wrapper(status)
result = gabriel_pb2.ResultWrapper.Result()
result.payload_type = gabriel_pb2.PayloadType.IMAGE
result.payload = input_frame.payloads[0]
result_wrapper.results.append(result)
return result_wrapper
def main():
common.configure_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
'source_name', nargs='?', default=common.DEFAULT_SOURCE_NAME)
args = parser.parse_args()
def engine_factory():
return DisplayEngine()
local_engine.run(engine_factory, args.source_name, input_queue_maxsize=60,
port=common.WEBSOCKET_PORT, num_tokens=2)
if __name__ == '__main__':
main()
|
apache-2.0
| 8,300,970,384,304,075,000 | 28.388889 | 78 | 0.690926 | false |
indykish/servo
|
tests/wpt/harness/wptrunner/wptmanifest/parser.py
|
1
|
17689
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#default_value:foo
#include: other.manifest
#
#[test_name.js]
# expected: ERROR
#
# [subtest 1]
# expected:
# os == win: FAIL #This is a comment
# PASS
#
# TODO: keep comments in the tree
import types
from cStringIO import StringIO
from node import *
class ParseError(Exception):
pass
eol = object
group_start = object
group_end = object
digits = "0123456789"
open_parens = "[("
close_parens = "])"
parens = open_parens + close_parens
operator_chars = "=!"
unary_operators = ["not"]
binary_operators = ["==", "!=", "and", "or"]
operators = ["==", "!=", "not", "and", "or"]
def decode(byte_str):
return byte_str.decode("string_escape").decode("utf8")
def precedence(operator_node):
return len(operators) - operators.index(operator_node.data)
class TokenTypes(object):
def __init__(self):
for type in ["group_start", "group_end", "paren", "separator", "ident", "string", "number", "eof"]:
setattr(self, type, type)
token_types = TokenTypes()
class Tokenizer(object):
def __init__(self):
self.reset()
def reset(self):
self.indent_levels = [0]
self.state = self.line_start_state
self.next_state = self.data_line_state
self.line_number = 0
def tokenize(self, stream):
self.reset()
if type(stream) in types.StringTypes:
stream = StringIO(stream)
for i, line in enumerate(stream):
self.state = self.line_start_state
self.line_number = i + 1
self.index = 0
self.line = line.rstrip()
if self.line:
while self.state != self.eol_state:
tokens = self.state()
if tokens:
for token in tokens:
yield token
while True:
yield (token_types.eof, None)
def char(self):
if self.index == len(self.line):
return eol
return self.line[self.index]
def consume(self):
if self.index < len(self.line):
self.index += 1
def peek(self, length):
return self.line[self.index:self.index + length]
def skip_whitespace(self):
while self.char() == " ":
self.consume()
def eol_state(self):
pass
def line_start_state(self):
self.skip_whitespace()
assert self.char() != eol
if self.index > self.indent_levels[-1]:
self.indent_levels.append(self.index)
yield (token_types.group_start, None)
else:
while self.index < self.indent_levels[-1]:
self.indent_levels.pop()
yield (token_types.group_end, None)
# This is terrible; if we were parsing an expression
# then the next_state will be expr_or_value but when we deindent
# it must always be a heading or key next so we go back to data_line_state
self.next_state = self.data_line_state
if self.index != self.indent_levels[-1]:
raise ParseError("Unexpected indent")
self.state = self.next_state
def data_line_state(self):
if self.char() == "[":
yield (token_types.paren, self.char())
self.consume()
self.state = self.heading_state
else:
self.state = self.key_state
def heading_state(self):
index_0 = self.index
skip_indexes = []
while True:
c = self.char()
if c == "\\":
self.consume()
c = self.char()
if c == eol:
raise ParseError("Unexpected EOL in heading")
elif c == "]":
skip_indexes.append(self.index - 1)
self.consume()
elif c == "]":
break
elif c == eol:
raise ParseError("EOL in heading")
else:
self.consume()
self.state = self.line_end_state
index_1 = self.index
parts = []
min_index = index_0
for index in skip_indexes:
parts.append(self.line[min_index:index])
min_index = index + 1
parts.append(self.line[min_index:index_1])
yield (token_types.string, decode("".join(parts)))
yield (token_types.paren, "]")
self.consume()
self.state = self.line_end_state
self.next_state = self.data_line_state
def key_state(self):
index_0 = self.index
while True:
c = self.char()
if c == " ":
index_1 = self.index
self.skip_whitespace()
if self.char() != ":":
raise ParseError("Space in key name")
break
elif c == ":":
index_1 = self.index
break
elif c == eol:
raise ParseError("EOL in key name (missing ':'?)")
else:
self.consume()
yield (token_types.string, decode(self.line[index_0:index_1]))
yield (token_types.separator, ":")
self.consume()
self.state = self.after_key_state
def after_key_state(self):
self.skip_whitespace()
c = self.char()
if c == "#":
self.next_state = self.expr_or_value_state
self.state = self.comment_state
elif c == eol:
self.next_state = self.expr_or_value_state
self.state = self.eol_state
else:
self.state = self.value_state
def value_state(self):
self.skip_whitespace()
index_0 = self.index
if self.char() in ("'", '"'):
quote_char = self.char()
self.consume()
yield (token_types.string, decode(self.read_string(quote_char)))
else:
index_1 = self.index
while True:
c = self.char()
if c == "\\":
self.consume()
if self.char() == eol:
raise ParseError("EOL in character escape")
elif c == "#":
self.state = self.comment_state
break
elif c == " ":
# prevent whitespace before comments from being included in the value
pass
elif c == eol:
break
else:
index_1 = self.index
self.consume()
yield (token_types.string, decode(self.line[index_0:index_1 + 1]))
self.state = self.line_end_state
def comment_state(self):
self.state = self.eol_state
def line_end_state(self):
self.skip_whitespace()
c = self.char()
if c == "#":
self.state = self.comment_state
elif c == eol:
self.state = self.eol_state
else:
raise ParseError("Junk before EOL c")
def read_string(self, quote_char):
index_0 = self.index
while True:
c = self.char()
if c == "\\":
self.consume()
if self.char == eol:
raise ParseError("EOL following quote")
self.consume()
elif c == quote_char:
break
elif c == eol:
raise ParseError("EOL in quoted string")
else:
self.consume()
rv = self.line[index_0:self.index]
self.consume()
return rv
def expr_or_value_state(self):
if self.peek(3) == "if ":
self.state = self.expr_state
else:
self.state = self.value_state
def expr_state(self):
self.skip_whitespace()
c = self.char()
if c == eol:
raise ParseError("EOL in expression")
elif c in "'\"":
self.consume()
yield (token_types.string, decode(self.read_string(c)))
elif c == "#":
raise ParseError("Comment before end of expression")
elif c == ":":
yield (token_types.separator, c)
self.consume()
self.state = self.value_state
elif c in parens:
self.consume()
yield (token_types.paren, c)
elif c in ("!", "="):
self.state = self.operator_state
elif c in digits:
self.state = self.digit_state
else:
self.state = self.ident_state
def operator_state(self):
# Only symbolic operators
index_0 = self.index
while True:
c = self.char()
if c == eol:
break
elif c in operator_chars:
self.consume()
else:
self.state = self.expr_state
break
yield (token_types.ident, self.line[index_0:self.index])
def digit_state(self):
index_0 = self.index
seen_dot = False
while True:
c = self.char()
if c == eol:
break
elif c in digits:
self.consume()
elif c == ".":
if seen_dot:
raise ParseError("Invalid number")
self.consume()
seen_dot = True
elif c in parens:
break
elif c in operator_chars:
break
elif c == " ":
break
elif c == ":":
break
else:
raise ParseError("Invalid character in number")
self.state = self.expr_state
yield (token_types.number, self.line[index_0:self.index])
def ident_state(self):
index_0 = self.index
while True:
c = self.char()
if c == eol:
break
elif c == ".":
break
elif c in parens:
break
elif c in operator_chars:
break
elif c == " ":
break
elif c == ":":
break
else:
self.consume()
self.state = self.expr_state
yield (token_types.ident, self.line[index_0:self.index])
class Parser(object):
def __init__(self):
self.reset()
def reset(self):
self.token = None
self.unary_operators = "!"
self.binary_operators = frozenset(["&&", "||", "=="])
self.tokenizer = Tokenizer()
self.token_generator = None
self.tree = Treebuilder(DataNode(None))
self.expr_builder = None
self.expr_builders = []
def parse(self, input):
self.reset()
self.token_generator = self.tokenizer.tokenize(input)
self.consume()
self.manifest()
return self.tree.node
def consume(self):
self.token = self.token_generator.next()
def expect(self, type, value=None):
if self.token[0] != type:
raise ParseError
if value is not None:
if self.token[1] != value:
raise ParseError
self.consume()
def manifest(self):
self.data_block()
self.expect(token_types.eof)
def data_block(self):
while self.token[0] == token_types.string:
self.tree.append(KeyValueNode(self.token[1]))
self.consume()
self.expect(token_types.separator)
self.value_block()
self.tree.pop()
while self.token == (token_types.paren, "["):
self.consume()
if self.token[0] != token_types.string:
raise ParseError
self.tree.append(DataNode(self.token[1]))
self.consume()
self.expect(token_types.paren, "]")
if self.token[0] == token_types.group_start:
self.consume()
self.data_block()
self.eof_or_end_group()
self.tree.pop()
def eof_or_end_group(self):
if self.token[0] != token_types.eof:
self.expect(token_types.group_end)
def value_block(self):
if self.token[0] == token_types.string:
self.value()
elif self.token[0] == token_types.group_start:
self.consume()
self.expression_values()
if self.token[0] == token_types.string:
self.value()
self.eof_or_end_group()
else:
raise ParseError
def expression_values(self):
while self.token == (token_types.ident, "if"):
self.consume()
self.tree.append(ConditionalNode())
self.expr_start()
self.expect(token_types.separator)
if self.token[0] == token_types.string:
self.value()
else:
raise ParseError
self.tree.pop()
def value(self):
self.tree.append(ValueNode(self.token[1]))
self.consume()
self.tree.pop()
def expr_start(self):
self.expr_builder = ExpressionBuilder()
self.expr_builders.append(self.expr_builder)
self.expr()
expression = self.expr_builder.finish()
self.expr_builders.pop()
self.expr_builder = self.expr_builders[-1] if self.expr_builders else None
if self.expr_builder:
self.expr_builder.operands[-1].children[-1].append(expression)
else:
self.tree.append(expression)
self.tree.pop()
def expr(self):
self.expr_operand()
while (self.token[0] == token_types.ident and self.token[1] in binary_operators):
self.expr_bin_op()
self.expr_operand()
def expr_operand(self):
if self.token == (token_types.paren, "("):
self.consume()
self.expr_builder.left_paren()
self.expr()
self.expect(token_types.paren, ")")
self.expr_builder.right_paren()
elif self.token[0] == token_types.ident and self.token[1] in unary_operators:
self.expr_unary_op()
self.expr_operand()
elif self.token[0] in [token_types.string, token_types.ident]:
self.expr_value()
elif self.token[0] == token_types.number:
self.expr_number()
else:
raise ParseError
def expr_unary_op(self):
if self.token[1] in unary_operators:
self.expr_builder.push_operator(UnaryOperatorNode(self.token[1]))
self.consume()
else:
raise ParseError()
def expr_bin_op(self):
if self.token[1] in binary_operators:
self.expr_builder.push_operator(BinaryOperatorNode(self.token[1]))
self.consume()
else:
raise ParseError()
def expr_value(self):
node_type = {token_types.string: StringNode,
token_types.ident: VariableNode}[self.token[0]]
self.expr_builder.push_operand(node_type(self.token[1]))
self.consume()
if self.token == (token_types.paren, "["):
self.consume()
self.expr_builder.operands[-1].append(IndexNode())
self.expr_start()
self.expect(token_types.paren, "]")
def expr_number(self):
self.expr_builder.push_operand(NumberNode(self.token[1]))
self.consume()
class Treebuilder(object):
def __init__(self, root):
self.root = root
self.node = root
def append(self, node):
self.node.append(node)
self.node = node
return node
def pop(self):
node = self.node
self.node = self.node.parent
return node
class ExpressionBuilder(object):
def __init__(self):
self.operands = []
self.operators = [None]
def finish(self):
while self.operators[-1] is not None:
self.pop_operator()
rv = self.pop_operand()
assert self.is_empty()
return rv
def left_paren(self):
self.operators.append(None)
def right_paren(self):
while self.operators[-1] is not None:
self.pop_operator()
if not self.operators:
raise ParseError("Unbalanced parens")
assert self.operators.pop() is None
def push_operator(self, operator):
assert operator is not None
while self.precedence(self.operators[-1]) > self.precedence(operator):
self.pop_operator()
self.operators.append(operator)
def pop_operator(self):
operator = self.operators.pop()
if isinstance(operator, BinaryOperatorNode):
operand_1 = self.operands.pop()
operand_0 = self.operands.pop()
self.operands.append(BinaryExpressionNode(operator, operand_0, operand_1))
else:
operand_0 = self.operands.pop()
self.operands.append(UnaryExpressionNode(operator, operand_0))
def push_operand(self, node):
self.operands.append(node)
def pop_operand(self):
return self.operands.pop()
def is_empty(self):
return len(self.operands) == 0 and all(item is None for item in self.operators)
def precedence(self, operator):
if operator is None:
return 0
return precedence(operator)
def parse(stream):
p = Parser()
return p.parse(stream)
|
mpl-2.0
| 243,515,875,209,743,070 | 29.134583 | 107 | 0.51908 | false |
jwalgran/otm-core
|
opentreemap/opentreemap/settings/__init__.py
|
3
|
1290
|
# The purpose of this package is to provide fine-grained control
# over how settings are initialized and overridden
#
# file summary
# * ./__init__.py - the canonical place to manage importing settings
# * ./default_settings.py - the canonical place to add new settings
# * ./local_settings.py - the canonical place to override settings
#
# WARNING: !!! DO NOT ADD SETTINGS TO THIS FILE !!!
# WARNING: !!! USE THIS FILE EXCLUSIVELY TO MANAGE SETTING IMPORTS !!!
STORAGE_UNITS = {}
DISPLAY_DEFAULTS = {}
MIDDLEWARE = ()
RESERVED_INSTANCE_URL_NAMES = ()
INSTALLED_APPS = ()
from opentreemap.settings.default_settings import * # NOQA
EXTRA_URLS = (
# Mount extra urls. These should be a
# tuple of (url path, url module). Something like:
#
# ('/extra_api/', 'apiv2.urls),
# ('/local/', 'local.urls)),
)
EXTRA_APPS = ()
EXTRA_MIDDLEWARE = ()
EXTRA_RESERVED_INSTANCE_URL_NAMES = ()
EXTRA_UI_TESTS = ()
EXTRA_DISPLAY_DEFAULTS = {}
EXTRA_STORAGE_UNITS = {}
from opentreemap.settings.local_settings import * # NOQA
INSTALLED_APPS = EXTRA_APPS + INSTALLED_APPS
MIDDLEWARE = EXTRA_MIDDLEWARE + MIDDLEWARE
RESERVED_INSTANCE_URL_NAMES += EXTRA_RESERVED_INSTANCE_URL_NAMES
DISPLAY_DEFAULTS.update(EXTRA_DISPLAY_DEFAULTS)
STORAGE_UNITS.update(EXTRA_STORAGE_UNITS)
|
gpl-3.0
| -3,212,290,943,003,899,400 | 29 | 76 | 0.705426 | false |
shucommon/little-routine
|
python/python-crash-course/hello.py
|
1
|
4949
|
print("hello world")
msg1='I told my friend, "Python is my favorite language!"'
msg2="The language 'Python' is named after Monty Python, not the snake."
msg3="One of Python's strengths is its diverse and supportive community."
print(msg1)
print(msg2)
print(msg3)
name='hello world'
print(name.title())
print(name.upper())
print(name.lower())
name=" hello "
name.rstrip() # end
name.lstrip() # start
name.strip() # both
bicycles = ['trek', 'cannondale', 'redline', 'specialized']
print(bicycles)
bicycles.append("new_one")
print(bicycles)
bicycles.insert(1, "insert-in-1")
print(bicycles)
del bicycles[0]
print(bicycles)
poped=bicycles.pop()
print(bicycles)
print(poped)
poped=bicycles.pop(1)
print(bicycles)
print(poped)
bicycles.remove("redline")
print(bicycles)
cars = ['bmw', 'audi', 'toyota', 'subaru']
print(cars)
cars.sort()
print(cars)
cars.sort(reverse=True)
print(cars)
print("reverse interate")
cars.reverse()
print(cars)
cars = ['bmw', 'audi', 'toyota', 'subaru']
print("Here is the original list:")
print(cars)
print("Here is the sorted list:")
print(sorted(cars))
print("Here is the original list again:")
print(cars)
print(sorted(cars, reverse=True))
print("Here is the original list again:")
print(cars)
print("len of list cars:")
print(len(cars))
magicians = ['alice', 'david', 'carolina']
for magician in magicians:
print(magician)
numbers = list(range(1,6))
print(numbers)
even_numbers = list(range(2,11,2))
print(even_numbers)
digits = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
print("min ", min(digits))
print("max ", max(digits))
print("sum ", sum(digits))
squares = [value**2 for value in range(1,11)]
print(squares)
alien = {'color' : 'green', 'points' : '5'}
print(alien['color'])
print(alien['points'])
alien = {1 : 'green', 2 : 5}
print(alien[1])
print(alien[2])
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
'''
message = input("Tell me something, and I will repeat it back to you: ")
print(message)
age = input("how old are you ?")
age = int(age)
print(age)
current_number = 1
while current_number <= 5:
print(current_number)
current_number += 1
'''
pets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']
print(pets)
while 'cat' in pets:
print("find")
pets.remove('cat')
print(pets)
def func(*parm):
#print(parm)
for p in parm:
print(p)
func('a')
func('a', 'b')
def build_profile(first, last, **user_info):
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('albert', 'einstein',
location='princeton',
field='physics')
print(user_profile)
class Dog():
def __init__(self, name, age):
self.name = name
self.age = age
def sit(self):
print(self.name.title() + " is now sitting.")
def roll_over(self):
print(self.name.title() + " rolled over!")
my_dog = Dog('willie', 6)
my_dog.sit()
my_dog.roll_over()
class Battery():
def __init__(self, battery_size=70):
self.battery_size = battery_size
def describe_battery(self):
print("This car has a " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
class Car():
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self, miles):
self.odometer_reading += miles
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year) # init father's property
#self.battery_size = 70 # then add child class propraty
self.battery = Battery()
def describe_battery(self):
print("This car has a " + str(self.battery_size) + "-kWh battery.")
my_tesla = ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
my_tesla.read_odometer()
#my_tesla.describe_battery()
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
|
gpl-3.0
| 5,006,750,513,838,343,000 | 23.621891 | 77 | 0.624975 | false |
bparzella/secsgem
|
secsgem/secs/functions/s12f12.py
|
1
|
1608
|
#####################################################################
# s12f12.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""Class for stream 12 function 12."""
from secsgem.secs.functions.base import SecsStreamFunction
from secsgem.secs.data_items import MDACK
class SecsS12F12(SecsStreamFunction):
"""
map data type 3 - acknowledge.
**Data Items**
- :class:`MDACK <secsgem.secs.data_items.MDACK>`
**Structure**::
>>> import secsgem.secs
>>> secsgem.secs.functions.SecsS12F12
MDACK: B[1]
**Example**::
>>> import secsgem.secs
>>> secsgem.secs.functions.SecsS12F12(secsgem.secs.data_items.MDACK.FORMAT_ERROR)
S12F12
<B 0x1> .
:param value: parameters for this function (see example)
:type value: byte
"""
_stream = 12
_function = 12
_data_format = MDACK
_to_host = False
_to_equipment = True
_has_reply = False
_is_reply_required = False
_is_multi_block = False
|
lgpl-2.1
| 132,408,518,408,366,960 | 26.724138 | 89 | 0.618781 | false |
nathdwek/vhdltree
|
test/test_integration.py
|
1
|
2494
|
import os
import pytest
from vhdltree.logic import find_ext, _vhdltree, vhdltree
@pytest.fixture()
def cd_proot():
proot = os.path.join(os.path.dirname(__file__), 'dummy_project')
os.chdir(proot)
@pytest.mark.usefixtures("cd_proot")
class TestIntegration:
def test_find_vhd(self):
assert dict(find_ext('./', 'vhd')) == {
'e2': './e2.vhd',
'e3': './lib/E3.vhd',
'e4': './lib/deep/e4.vhd',
'e5': './lib/e5.vhd',
'e6': './lib/deep/e6.vhd',
'e7': './e7.vhd',
'e_1': './e_1.vhd',
'long_component_name5678': './long_component_name5678.vhd',
'main': './main.vhd'
}
def test_vhdltree_logic(self):
vhd_files = {
'e2': './e2.vhd',
'e3': './lib/E3.vhd',
'e4': './lib/deep/e4.vhd',
'e5': './lib/e5.vhd',
'e6': './lib/deep/e6.vhd',
'e7': './e7.vhd',
'e_1': './e_1.vhd',
'long_component_name5678': './long_component_name5678.vhd',
'main': './main.vhd'
}
assert tuple(_vhdltree(0, './main.vhd', vhd_files)) == (
(0, 'e1i1', './long_component_name5678.vhd'),
(1, 'long_ent17y_n4m3_with_numbers4567', './lib/deep/e4.vhd'),
(0, 'e1i2', './long_component_name5678.vhd'),
(1, 'long_ent17y_n4m3_with_numbers4567', './lib/deep/e4.vhd'),
(0, 'e2I1', './e2.vhd'),
(1, 'NO_PREFIX', './lib/E3.vhd'),
(1, 'bad_prefix', './lib/E3.vhd'),
(0, 'E3_i1', './lib/E3.vhd'),
(0, 'truncate_before_dot', './lib/deep/e4.vhd'),
(0, 'the', './e_1.vhd'),
(1, 'chain', './lib/e5.vhd'),
(2, 'goes', './lib/deep/e6.vhd'),
(3, 'on', './e7.vhd'),
(0, 'not_found', '')
)
def test_integration(self, capsys):
vhdltree('./main.vhd', './')
out, err = capsys.readouterr()
assert not err
assert out == """e1i1 : ./long_component_name5678.vhd
long_ent17y_n4m3_with_numbers4567 : ./lib/deep/e4.vhd
e1i2 : ./long_component_name5678.vhd
long_ent17y_n4m3_with_numbers4567 : ./lib/deep/e4.vhd
e2I1 : ./e2.vhd
NO_PREFIX : ./lib/E3.vhd
bad_prefix : ./lib/E3.vhd
E3_i1 : ./lib/E3.vhd
truncate_before_dot : ./lib/deep/e4.vhd
the : ./e_1.vhd
chain : ./lib/e5.vhd
goes : ./lib/deep/e6.vhd
on : ./e7.vhd
not_found : Not found
"""
|
mit
| -1,429,446,865,932,973,300 | 32.253333 | 74 | 0.481957 | false |
binxio/cfn-secret-provider
|
tests/test_cfn_keypair_provider.py
|
1
|
7425
|
import sys
import boto3
import uuid
from cfn_keypair_provider import KeyPairProvider
from secrets import handler
def test_defaults():
request = Request("Create", "abc")
r = KeyPairProvider()
r.set_request(request, {})
assert r.is_valid_request()
assert r.get("Name") == "abc"
assert r.get("PublicKeyMaterial") is not None
request["ResourceProperties"] = {"Name": "abc"}
r.set_request(request, {})
assert not r.is_valid_request(), "PublicKeyMaterial is required"
request["ResourceProperties"] = {"PublicKeyMaterial": "abc"}
r.set_request(request, {})
assert not r.is_valid_request(), "Name is required"
def test_key_name_from_physical_resource_id():
request = Request(
"Update",
"abc",
"arn:aws:ec2:eu-central-1:245111612214:keypair/kb062b200-4b67-4eee-8933-44d76c0a199a",
)
provider = KeyPairProvider()
provider.set_request(request, {})
assert (
provider.key_name_from_physical_resource_id()
== "kb062b200-4b67-4eee-8933-44d76c0a199a"
)
request = Request("Update", "abc", "sdfasdfsdfsf")
provider = KeyPairProvider()
provider.set_request(request, {})
assert provider.key_name_from_physical_resource_id() is None
def get_finger_print(name):
ec2 = boto3.resource("ec2")
key_pair = ec2.KeyPair(name)
key_pair.load()
return key_pair.key_fingerprint
def test_create_and_public():
# create a test parameter
provider = KeyPairProvider()
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = provider.handle(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert provider.is_valid_cfn_response(), response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
assert "Data" in response
assert "Arn" in response["Data"]
assert "Name" in response["Data"]
assert response["Data"]["Arn"] == physical_resource_id
assert response["Data"]["Name"] == name
finger_print_1 = get_finger_print(name)
assert finger_print_1 is not None
# update the material
request = Request(
"Update", name, physical_resource_id, KeyPair().public_key_material
)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert response["Data"]["Name"] == name
finger_print_2 = get_finger_print(name)
assert finger_print_2 is not None
assert finger_print_1 != finger_print_2
assert response["Data"]["Name"] == name
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_request_duplicate_create():
# prrequest duplicate create
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
physical_resource_id = response["PhysicalResourceId"]
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "FAILED", response["Reason"]
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_update_name():
# create a keypair
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
assert response["Data"]["Name"] == name
# update keypair name
name_2 = "k2%s" % name
request = Request("Update", name_2, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
assert response["Data"]["Name"] == name_2
physical_resource_id_2 = response["PhysicalResourceId"]
assert physical_resource_id != physical_resource_id_2
# delete the keypairs
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_request_duplicate_through_update():
# update parameter name
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
physical_resource_id = response["PhysicalResourceId"]
name_2 = "k2%s" % name
request = Request("Create", name_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id_2 = response["PhysicalResourceId"]
request = Request("Update", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "FAILED", response["Reason"]
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
class KeyPair(object):
def __init__(self):
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import (
default_backend as crypto_default_backend,
)
self.key = rsa.generate_private_key(
backend=crypto_default_backend(), public_exponent=65537, key_size=2048
)
self.private_key = self.key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption(),
).decode("ascii")
self.public_key = (
self.key.public_key()
.public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH,
)
.decode("ascii")
)
@property
def public_key_material(self):
return self.public_key
class Request(dict):
def __init__(
self,
request_type,
name,
physical_resource_id=str(uuid.uuid4()),
public_key_material=KeyPair().public_key_material,
):
self.update(
{
"RequestType": request_type,
"ResponseURL": "https://httpbin.org/put",
"StackId": "arn:aws:cloudformation:us-west-2:EXAMPLE/stack-name/guid",
"RequestId": "request-%s" % uuid.uuid4(),
"ResourceType": "Custom::KeyPair",
"LogicalResourceId": "MyKey",
"PhysicalResourceId": physical_resource_id,
"ResourceProperties": {
"Name": name,
"PublicKeyMaterial": public_key_material,
},
}
)
|
apache-2.0
| 5,258,234,521,446,483,000 | 32.90411 | 94 | 0.633266 | false |
hellfish90/HeyLily
|
Client_Server/OkLilyServer/OkLilyServer/models.py
|
1
|
3576
|
from django.db import models
import subprocess
import requests
class Job(models.Model):
create = "0"
received = "1"
sent = "2"
error = "3"
success = "4"
not_understand = "5"
you_say_something = "6"
fact = "7"
doing = "8"
tell_me = "9"
no_connection_with_server = "11"
bad_job = "12"
Type_State = (
(create, 'created'),
(received, 'received'),
(sent, 'sent'),
(success, 'success'),
(error, 'error')
)
name = models.CharField(max_length=50, verbose_name="name", default='no value')
module = models.CharField(max_length=50, verbose_name="name_module", default='no value')
plugin = models.CharField(max_length=50, verbose_name="plugin_name", default='no value')
instruction = models.CharField(max_length=50, verbose_name="name_instruction", default='no value')
parameter = models.CharField(max_length=50, verbose_name="parameter_instruction", default='no value')
command = models.CharField(max_length=50, verbose_name="value_instruction_command", default='no value')
state = models.IntegerField(choices=Type_State, default=create, verbose_name="command state")
def __init__(self, name, module, plugin, instruction, parameter, command, state):
self.name = name
self.module = module
self.plugin = plugin
self.instruction = instruction
self.command = command
self.state = state
self.parameter = parameter
def do_job(self):
try:
my_file = open('/tmp/chanel', 'w')
my_file.write(self.parameter)
my_file.close()
self.state = subprocess.check_output(str(self.command), shell=True)
my_file = open('callback_proces.html','w')
my_file.write(self.state)
my_file.close()
except Exception as e:
my_file = open('callback_proces.html', 'w')
my_file.write("State: "+self.state+" Error: "+self.command+"</p>"+ self.name + "state: " +self.state + "parameter:" + self.parameter + "Exception:" + e.message )
my_file.close()
self.state = "-1"
if self.state != "-1":
return self.success
else:
return self.error
def send_job(self, ip):
payload = {'name': self.name, 'plugin': self.plugin,
'module': self.module, 'instruction': self.instruction,
'parameter':self.parameter, 'command': self.command,
'state': self.state}
my_file = open('command.html', 'w')
my_file.write(self.command)
my_file.close()
try:
r = requests.post("http://" + ip + "/client/job/receiver/", data=payload)
callback = r.text
if self.bad_response(r.text):
# if the response is bad
callback = r.text
else:
# if the response is well
callback = r.text
my_file = open('callback_send_job.html', 'w')
my_file.write(callback)
my_file.close()
if len(callback) > 4:
return "-1"
else:
return callback
except Exception as e:
my_file = open('callback_send_job.html', 'w')
my_file.write("Error on send job")
my_file.close()
callback = e.message
return "-1"
def save_job_to_log(self):
pass
def bad_response(self, data):
return len(data)>4
|
gpl-2.0
| 7,365,925,454,104,061,000 | 30.646018 | 173 | 0.550336 | false |
zhmcclient/python-zhmcclient
|
zhmcclient/_storage_group_template.py
|
1
|
12763
|
# Copyright 2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :class:`~zhmcclient.StorageGroupTemplate` object represents a
:term:`storage group template` object and can be used to create
:term:`storage group` objects with the properties of the template, including an
initial set of storage volumes, using the
:meth:`zhmcclient.StorageGroupTemplateManager.create` method.
Storage group template objects can be created, updated and deleted.
Storage group template objects can only be defined in CPCs that are in
DPM mode and that have the "dpm-storage-management" feature enabled.
"""
from __future__ import absolute_import
import copy
from ._manager import BaseManager
from ._resource import BaseResource
from ._storage_volume_template import StorageVolumeTemplateManager
from ._logging import logged_api_call
from ._utils import matches_filters, divide_filter_args
__all__ = ['StorageGroupTemplateManager', 'StorageGroupTemplate']
class StorageGroupTemplateManager(BaseManager):
"""
Manager providing access to the
:term:`storage group templates <storage group template>` of the HMC.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable:
* :attr:`~zhmcclient.Console.storage_group_templates` of a
:class:`~zhmcclient.Console` object.
"""
def __init__(self, console):
# This function should not go into the docs.
# Parameters:
# console (:class:`~zhmcclient.Console`):
# CPC or HMC defining the scope for this manager.
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
query_props = [
'cpc-uri',
'name',
'type',
]
super(StorageGroupTemplateManager, self).__init__(
resource_class=StorageGroupTemplate,
class_name='storage-template',
session=console.manager.session,
parent=console,
base_uri='/api/storage-templates',
oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
self._console = console
@property
def console(self):
"""
:class:`~zhmcclient.Console`: The Console object representing the HMC.
"""
return self._console
@logged_api_call
def list(self, full_properties=False, filter_args=None):
"""
List the storage group templates defined in the HMC.
Storage group templates for which the authenticated user does not have
object-access permission are not included.
Authorization requirements:
* Object-access permission to any storage group templates to be
included in the result.
Parameters:
full_properties (bool):
Controls that the full set of resource properties for each returned
storage group template is being retrieved, vs. only the following
short set: "object-uri", "cpc-uri", "name", and "type".
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen.
Returns:
: A list of :class:`~zhmcclient.StorageGroupTemplate` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
resource_obj_list = []
if filter_args is None:
filter_args = {}
resource_obj = self._try_optimized_lookup(filter_args)
if resource_obj:
resource_obj_list.append(resource_obj)
# It already has full properties
else:
query_parms, client_filters = divide_filter_args(
self._query_props, filter_args)
uri = '{}{}'.format(self._base_uri, query_parms)
result = self.session.get(uri)
if result:
props_list = result['storage-templates']
for props in props_list:
resource_obj = self.resource_class(
manager=self,
uri=props[self._uri_prop],
name=props.get(self._name_prop, None),
properties=props)
if matches_filters(resource_obj, client_filters):
resource_obj_list.append(resource_obj)
if full_properties:
resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
@logged_api_call
def create(self, properties):
"""
Create a storage group template.
The new storage group will be associated with the CPC identified by the
`cpc-uri` input property.
Authorization requirements:
* Object-access permission to the CPC that will be associated with
the new storage group template.
* Task permission to the "Configure Storage - System Programmer" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create Storage Template' in the :term:`HMC API` book.
The 'cpc-uri' property identifies the CPC to which the new
storage group template will be associated, and is required to be
specified in this parameter.
Returns:
:class:`~zhmcclient.StorageGroupTemplate`:
The resource object for the new storage group template.
The object will have its 'object-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if properties is None:
properties = {}
result = self.session.post(self._base_uri, body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
storage_group_template = StorageGroupTemplate(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return storage_group_template
class StorageGroupTemplate(BaseResource):
"""
Representation of a :term:`storage group template`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.StorageGroupTemplateManager`).
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.StorageGroupTemplateManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, StorageGroupTemplateManager), \
"StorageGroupTemplate init: Expected manager type %s, got %s" % \
(StorageGroupTemplateManager, type(manager))
super(StorageGroupTemplate, self).__init__(
manager, uri, name, properties)
# The manager objects for child resources (with lazy initialization):
self._storage_volume_templates = None
self._cpc = None
@property
def storage_volume_templates(self):
"""
:class:`~zhmcclient.StorageVolumeManager`: Access to the
:term:`storage volumes <storage volume>` in this storage group.
"""
# We do here some lazy loading.
if not self._storage_volume_templates:
self._storage_volume_templates = StorageVolumeTemplateManager(self)
return self._storage_volume_templates
@property
def cpc(self):
"""
:class:`~zhmcclient.Cpc`: The :term:`CPC` to which this storage group
template is associated.
The returned :class:`~zhmcclient.Cpc` has only a minimal set of
properties populated.
"""
# We do here some lazy loading.
if not self._cpc:
cpc_uri = self.get_property('cpc-uri')
cpc_mgr = self.manager.console.manager.client.cpcs
self._cpc = cpc_mgr.resource_object(cpc_uri)
return self._cpc
@logged_api_call
def delete(self):
"""
Delete this storage group template and its storage volume template
resources on the HMC.
Storage groups and their volumes that have been created from the
template that is deleted, are not affected.
Authorization requirements:
* Object-access permission to this storage group template.
* Task permission to the "Configure Storage - System Programmer" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(uri=self.uri)
self.manager._name_uri_cache.delete(
self._properties.get(self.manager._name_prop, None))
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this storage group template.
This includes the `storage-template-volumes` property which contains
requests for creations, deletions and updates of
:class:`~zhmcclient.StorageVolumeTemplate` resources of this storage
group template.
As an alternative to this bulk approach for managing storage volume
templates, each :class:`~zhmcclient.StorageVolumeTemplate` resource
can individually be created, deleted and updated using the respective
methods on
:attr:`~zhmcclient.StorageGroupTemplate.storage_volume_templates`.
Authorization requirements:
* Object-access permission to this storage group template.
* Task permission to the "Configure Storage - System Programmer" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are listed for operation
'Modify Storage Template Properties' in the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
uri = '{}/operations/modify'.format(self.uri)
self.manager.session.post(uri, body=properties)
is_rename = self.manager._name_prop in properties
if is_rename:
# Delete the old name from the cache
self.manager._name_uri_cache.delete(self.name)
self._properties.update(copy.deepcopy(properties))
if is_rename:
# Add the new name to the cache
self.manager._name_uri_cache.update(self.name, self.uri)
|
apache-2.0
| 1,258,159,227,317,339,600 | 35.887283 | 79 | 0.637154 | false |
dials/dials
|
command_line/plot_scan_varying_model.py
|
1
|
14162
|
import errno
import os
import matplotlib
from libtbx.phil import parse
import dials.util
from dials.algorithms.refinement.rotation_decomposition import (
solve_r3_rotation_for_angles_given_axes,
)
matplotlib.use("Agg")
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
phil_scope = parse(
"""
output {
directory = .
.type = str
.help = "The directory to store the results"
format = *png pdf
.type = choice
debug = False
.help = "print tables of values that will be plotted"
.type = bool
.expert_level = 1
}
orientation_decomposition
.help = "Options determining how the orientation matrix"
"decomposition is done. The axes about which to decompose"
"the matrix into three rotations are chosen here, as well"
"as whether the rotations are relative to the reference"
"orientation, taken from the static crystal model"
{
e1 = 1. 0. 0.
.type = floats(size = 3)
e2 = 0. 1. 0.
.type = floats(size = 3)
e3 = 0. 0. 1.
.type = floats(size = 3)
relative_to_static_orientation = True
.type = bool
}
"""
)
help_message = """
Generate plots of scan-varying models, including crystal orientation, unit cell
and beam centre, from the input refined.expt
Examples::
dials.plot_scan_varying_model refined.expt
"""
def ensure_directory(path):
"""Make the directory if not already there."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Script:
"""Class to run script."""
def __init__(self):
"""Setup the script."""
from dials.util.options import OptionParser
usage = "usage: dials.plot_scan_varying_model [options] refined.expt"
self.parser = OptionParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
check_format=False,
epilog=help_message,
)
def run(self, args=None):
"""Run the script."""
from scitbx import matrix
from dials.util.options import flatten_experiments
params, options = self.parser.parse_args(args)
if len(params.input.experiments) == 0:
self.parser.print_help()
return
experiments = flatten_experiments(params.input.experiments)
# Determine output path
self._directory = os.path.join(params.output.directory, "scan-varying_model")
self._directory = os.path.abspath(self._directory)
ensure_directory(self._directory)
self._format = "." + params.output.format
self._debug = params.output.debug
# Decomposition axes
self._e1 = params.orientation_decomposition.e1
self._e2 = params.orientation_decomposition.e2
self._e3 = params.orientation_decomposition.e3
# cell plot
dat = []
for iexp, exp in enumerate(experiments):
crystal = exp.crystal
scan = exp.scan
if crystal.num_scan_points == 0:
print("Ignoring scan-static crystal")
continue
scan_pts = list(range(crystal.num_scan_points))
cells = [crystal.get_unit_cell_at_scan_point(t) for t in scan_pts]
cell_params = [e.parameters() for e in cells]
a, b, c, aa, bb, cc = zip(*cell_params)
phi = [scan.get_angle_from_array_index(t) for t in scan_pts]
vol = [e.volume() for e in cells]
cell_dat = {
"phi": phi,
"a": a,
"b": b,
"c": c,
"alpha": aa,
"beta": bb,
"gamma": cc,
"volume": vol,
}
try:
cell_esds = [
crystal.get_cell_parameter_sd_at_scan_point(t) for t in scan_pts
]
sig_a, sig_b, sig_c, sig_aa, sig_bb, sig_cc = zip(*cell_esds)
cell_dat["sig_a"] = sig_a
cell_dat["sig_b"] = sig_b
cell_dat["sig_c"] = sig_c
cell_dat["sig_aa"] = sig_aa
cell_dat["sig_bb"] = sig_bb
cell_dat["sig_cc"] = sig_cc
except RuntimeError:
pass
if self._debug:
print(f"Crystal in Experiment {iexp}")
print("Phi\ta\tb\tc\talpha\tbeta\tgamma\tVolume")
msg = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}"
line_dat = zip(phi, a, b, c, aa, bb, cc, vol)
for line in line_dat:
print(msg.format(*line))
dat.append(cell_dat)
if dat:
self.plot_cell(dat)
# orientation plot
dat = []
for iexp, exp in enumerate(experiments):
crystal = exp.crystal
scan = exp.scan
if crystal.num_scan_points == 0:
print("Ignoring scan-static crystal")
continue
scan_pts = list(range(crystal.num_scan_points))
phi = [scan.get_angle_from_array_index(t) for t in scan_pts]
Umats = [matrix.sqr(crystal.get_U_at_scan_point(t)) for t in scan_pts]
if params.orientation_decomposition.relative_to_static_orientation:
# factor out static U
Uinv = matrix.sqr(crystal.get_U()).inverse()
Umats = [U * Uinv for U in Umats]
# NB e3 and e1 definitions for the crystal are swapped compared
# with those used inside the solve_r3_rotation_for_angles_given_axes
# method
angles = [
solve_r3_rotation_for_angles_given_axes(
U, self._e3, self._e2, self._e1, deg=True
)
for U in Umats
]
phi3, phi2, phi1 = zip(*angles)
angle_dat = {"phi": phi, "phi3": phi3, "phi2": phi2, "phi1": phi1}
if self._debug:
print(f"Crystal in Experiment {iexp}")
print("Image\tphi3\tphi2\tphi1")
msg = "{0}\t{1}\t{2}\t{3}"
line_dat = zip(phi, phi3, phi2, phi1)
for line in line_dat:
print(msg.format(*line))
dat.append(angle_dat)
if dat:
self.plot_orientation(dat)
# beam centre plot
dat = []
for iexp, exp in enumerate(experiments):
beam = exp.beam
detector = exp.detector
scan = exp.scan
if beam.num_scan_points == 0:
print("Ignoring scan-static beam")
continue
scan_pts = range(beam.num_scan_points)
phi = [scan.get_angle_from_array_index(t) for t in scan_pts]
p = detector.get_panel_intersection(beam.get_s0())
if p < 0:
print("Beam does not intersect a panel")
continue
panel = detector[p]
s0_scan_points = [
beam.get_s0_at_scan_point(i) for i in range(beam.num_scan_points)
]
bc_scan_points = [panel.get_beam_centre_px(s0) for s0 in s0_scan_points]
bc_x, bc_y = zip(*bc_scan_points)
dat.append({"phi": phi, "beam_centre_x": bc_x, "beam_centre_y": bc_y})
if dat:
self.plot_beam_centre(dat)
def plot_cell(self, dat):
plt.figure(figsize=(13, 10))
gs = gridspec.GridSpec(4, 2, wspace=0.4, hspace=0.6)
ax = plt.subplot(gs[0, 0])
ax.ticklabel_format(useOffset=False)
for cell in dat:
if "sig_a" in cell:
ax.errorbar(
cell["phi"][0::20], cell["a"][0::20], yerr=cell["sig_a"][0::20]
)
plt.plot(cell["phi"], cell["a"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"length $\left(\AA\right)$")
plt.title("a")
ax = plt.subplot(gs[0, 1])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 180.0, 0.0
for cell in dat:
if "sig_aa" in cell:
ax.errorbar(
cell["phi"][0::20], cell["alpha"][0::20], yerr=cell["sig_aa"][0::20]
)
plt.plot(cell["phi"], cell["alpha"])
# choose the widest y range
ymin = min(ymin, min(cell["alpha"]) - 0.1)
ymax = max(ymax, max(cell["alpha"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\alpha$")
ax = plt.subplot(gs[1, 0])
ax.ticklabel_format(useOffset=False)
for cell in dat:
if "sig_b" in cell:
ax.errorbar(
cell["phi"][0::20], cell["b"][0::20], yerr=cell["sig_b"][0::20]
)
plt.plot(cell["phi"], cell["b"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"length $\left(\AA\right)$")
plt.title("b")
ax = plt.subplot(gs[1, 1])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 180.0, 0.0
for cell in dat:
if "sig_bb" in cell:
ax.errorbar(
cell["phi"][0::20], cell["beta"][0::20], yerr=cell["sig_bb"][0::20]
)
plt.plot(cell["phi"], cell["beta"])
# choose the widest y range
ymin = min(ymin, min(cell["beta"]) - 0.1)
ymax = max(ymax, max(cell["beta"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\beta$")
ax = plt.subplot(gs[2, 0])
ax.ticklabel_format(useOffset=False)
for cell in dat:
if "sig_c" in cell:
ax.errorbar(
cell["phi"][0::20], cell["c"][0::20], yerr=cell["sig_c"][0::20]
)
plt.plot(cell["phi"], cell["c"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"length $\left(\AA\right)$")
plt.title("c")
ax = plt.subplot(gs[2, 1])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 180.0, 0.0
for cell in dat:
if "sig_cc" in cell:
ax.errorbar(
cell["phi"][0::20], cell["gamma"][0::20], yerr=cell["sig_cc"][0::20]
)
plt.plot(cell["phi"], cell["gamma"])
# choose the widest y range
ymin = min(ymin, min(cell["gamma"]) - 0.1)
ymax = max(ymax, max(cell["gamma"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\gamma$")
ax = plt.subplot2grid((4, 2), (3, 0), colspan=2)
ax.ticklabel_format(useOffset=False)
for cell in dat:
plt.plot(cell["phi"], cell["volume"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"volume $\left(\AA^3\right)$")
plt.title("Cell volume")
basename = os.path.join(self._directory, "unit_cell")
fullname = basename + self._format
print(f"Saving unit cell plot to {fullname}")
plt.savefig(fullname)
def plot_orientation(self, dat):
plt.figure(figsize=(13, 10))
gs = gridspec.GridSpec(3, 1, wspace=0.4, hspace=0.6)
ax = plt.subplot(gs[0, 0])
ax.ticklabel_format(useOffset=False)
for ori in dat:
plt.plot(ori["phi"], ori["phi1"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\phi_1$")
ax = plt.subplot(gs[1, 0])
ax.ticklabel_format(useOffset=False)
for ori in dat:
plt.plot(ori["phi"], ori["phi2"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\phi_2$")
ax = plt.subplot(gs[2, 0])
ax.ticklabel_format(useOffset=False)
for ori in dat:
plt.plot(ori["phi"], ori["phi3"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\phi_3$")
basename = os.path.join(self._directory, "orientation")
fullname = basename + self._format
print(f"Saving orientation plot to {fullname}")
plt.savefig(fullname)
def plot_beam_centre(self, dat):
plt.figure(figsize=(13, 10))
gs = gridspec.GridSpec(2, 1, wspace=0.4, hspace=0.6)
ax = plt.subplot(gs[0, 0])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 0.0, 0.0
for bc in dat:
plt.plot(bc["phi"], bc["beam_centre_x"])
ymin = max(ymin, min(bc["beam_centre_x"]) - 0.1)
ymax = max(ymax, max(bc["beam_centre_x"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"X (pixels)")
plt.title(r"Beam centre X (pixels)")
ax = plt.subplot(gs[1, 0])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 0.0, 0.0
for bc in dat:
plt.plot(bc["phi"], bc["beam_centre_y"])
ymin = max(ymin, min(bc["beam_centre_y"]) - 0.1)
ymax = max(ymax, max(bc["beam_centre_y"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"Y (pixels)")
plt.title(r"Beam centre Y (pixels)")
basename = os.path.join(self._directory, "beam_centre")
fullname = basename + self._format
print(f"Saving beam centre plot to {fullname}")
plt.savefig(fullname)
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
|
bsd-3-clause
| 8,369,115,470,694,126,000 | 33.207729 | 88 | 0.520972 | false |
bauman/python-idzip
|
idzip/decompressor.py
|
1
|
11157
|
import os
import struct
import zlib
import itertools
from io import BytesIO, open
from idzip import compressor, caching
from idzip._stream import IOStreamWrapperMixin
GZIP_CRC32_LEN = 4
SELECTED_CACHE = caching.OneItemCache
class IdzipReader(IOStreamWrapperMixin):
def __init__(self, filename=None, fileobj=None):
if filename is None:
if fileobj:
self._fileobj = fileobj
self._should_close = False
try:
self.name = fileobj.name
except AttributeError:
self.name = ''
else:
raise ValueError("Must provide a filename or a fileobj argument")
else:
self.name = filename
self._should_close = True
self._fileobj = open(filename, "rb")
# The current position in the decompressed data.
self._pos = 0
self._members = []
self._last_zstream_end = None
self._chunks = []
self._cache = SELECTED_CACHE()
self._read_member_header()
@property
def stream(self):
return self._fileobj
def _read_member_header(self):
"""Extends self._members and self._chunks
by the read header data.
"""
header = _read_gzip_header(self._fileobj)
offset = self._fileobj.tell()
if "RA" not in header["extra_field"]:
try:
if self._fileobj.seekable():
self.stream.seek(0)
except AttributeError:
pass
raise IOError("Not an idzip file: %r" % self.name)
dictzip_field = _parse_dictzip_field(header["extra_field"]["RA"])
num_member_chunks = len(dictzip_field["zlengths"])
start_chunk_index = len(self._chunks)
for zlen in dictzip_field["zlengths"]:
self._chunks.append((offset, zlen))
offset += zlen
self._last_zstream_end = offset
chlen = dictzip_field["chlen"]
sure_size = chlen * (num_member_chunks - 1)
self._add_member(chlen, start_chunk_index, sure_size)
def _add_member(self, chlen, start_chunk_index, sure_size):
if len(self._members) > 0:
prev_member = self._members[-1]
start_pos = prev_member.start_pos + prev_member.isize
else:
start_pos = 0
self._members.append(_Member(chlen, start_pos, start_chunk_index,
sure_size))
def read(self, size=-1):
"""Reads the given number of bytes.
It returns less bytes if EOF was reached.
A negative size means unlimited reading.
"""
chunk_index, prefix_size = self._index_pos(self._pos)
prefixed_buffer = []
try:
if size < 0:
while True:
prefixed_buffer.append(self._readchunk(chunk_index))
chunk_index += 1
else:
need = prefix_size + size
while need > 0:
chunk_data = self._readchunk(chunk_index)
prefixed_buffer.append(chunk_data[:need])
need -= len(chunk_data)
chunk_index += 1
except EOFError:
# The read data will be returned.
pass
prefixed_buffer = b"".join(prefixed_buffer)
result = prefixed_buffer[prefix_size:]
self._pos += len(result)
return result
def readline(self, size=-1):
chunk_index, prefix_size = self._index_pos(self._pos)
line = b""
while True:
try:
data = self._readchunk(chunk_index)
except EOFError:
break
chunk_index += 1
eol_pos = data.find(b"\n", prefix_size)
if eol_pos != -1:
line += data[prefix_size:eol_pos+1]
break
line += data[prefix_size:]
prefix_size = 0
if size >= 0 and len(line) >= size:
break
if size >= 0:
line = line[:size]
self._pos += len(line)
return line
def flush(self):
"""No-op, but needed by IdzipFile.flush(), which is called
if wrapped in TextIOWrapper."""
pass
def close(self):
if self._should_close:
self._fileobj.close()
self._cache = None
def _index_pos(self, pos):
"""Returns (chunk_index, remainder) index
for the given position in uncompressed data.
"""
member = self._select_member(pos)
pos_in_member = (pos - member.start_pos)
member_chunk_index = pos_in_member // member.chlen
chunk_index = member.start_chunk_index + member_chunk_index
remainder = pos_in_member % member.chlen
return (chunk_index, remainder)
def _select_member(self, pos):
"""Returns a member that covers the given pos.
If the pos is after the EOF, the last member is returned.
The EOF will be hit when reading from it.
"""
try:
for i in itertools.count():
if i >= len(self._members):
return self._members[-1]
member = self._members[i]
if pos < member.start_pos + member.sure_size:
return member
if member.isize is None:
self._parse_next_member()
if pos < member.start_pos + member.isize:
return member
except EOFError:
return self._members[-1]
def _readchunk(self, chunk_index):
"""Reads the specified chunk or throws EOFError.
"""
chunk = self._cache.get(chunk_index)
if chunk is not None:
return chunk
chunk = self._uncached_readchunk(chunk_index)
self._cache.put(chunk_index, chunk)
return chunk
def _uncached_readchunk(self, chunk_index):
while chunk_index >= len(self._chunks):
self._parse_next_member()
offset, zlen = self._chunks[chunk_index]
self._fileobj.seek(offset)
compressed = _read_exactly(self._fileobj, zlen)
deobj = zlib.decompressobj(-zlib.MAX_WBITS)
return deobj.decompress(compressed)
def _parse_next_member(self):
self._reach_member_end()
self._read_member_header()
def _reach_member_end(self):
"""Seeks the _fileobj at the end of the last known member.
"""
self._fileobj.seek(self._last_zstream_end)
# The zlib stream could end with an empty block.
deobj = zlib.decompressobj(-zlib.MAX_WBITS)
extra = b""
while deobj.unused_data == b"" and not extra:
extra += deobj.decompress(self._fileobj.read(3))
extra += deobj.flush()
if extra != b"":
raise IOError("Found extra compressed data after chunks.")
self._fileobj.seek(GZIP_CRC32_LEN - len(deobj.unused_data),
os.SEEK_CUR)
isize = _read32(self._fileobj)
self._members[-1].set_input_size(isize)
def tell(self):
return self._pos
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
new_pos = offset
elif whence == os.SEEK_CUR:
new_pos = self._pos + offset
elif whence == os.SEEK_END:
raise ValueError("Seek from the end not supported")
else:
raise ValueError("Unknown whence: %r" % whence)
if new_pos < 0:
raise ValueError("Invalid pos: %r" % new_pos)
self._pos = new_pos
def __repr__(self):
return "<idzip %s file %r at %s>" % (
"open" if not self.closed else "closed",
self.name,
hex(id(self)))
class _Member(object):
def __init__(self, chlen, start_pos, start_chunk_index, sure_size):
self.chlen = chlen
self.start_pos = start_pos
self.start_chunk_index = start_chunk_index
self.sure_size = sure_size
self.isize = None
def set_input_size(self, isize):
assert isize >= self.sure_size
self.isize = isize
def _read_gzip_header(input):
"""Returns a parsed gzip header.
The position of the input is advanced beyond the header.
EOFError is thrown if there is not enough of data for the header.
"""
header = {
"extra_field": {}
}
magic, flags, mtime = struct.unpack("<3sBIxx", _read_exactly(input, 10))
if magic != compressor.GZIP_DEFLATE_ID:
raise IOError("Not a gzip-deflate file.")
if compressor.FRESERVED & flags:
raise IOError("Unknown reserved flags: %s" % flags)
if compressor.FEXTRA & flags:
xlen = _read16(input)
extra_field = input.read(xlen)
header["extra_field"] = _split_subfields(extra_field)
if compressor.FNAME & flags:
_skip_cstring(input)
if compressor.FCOMMENT & flags:
_skip_cstring(input)
if compressor.FHCRC & flags:
# Skips header CRC
input.read(2)
return header
def _read_exactly(input, size):
data = input.read(size)
if len(data) != size:
raise EOFError("Reached EOF")
return data
def _read16(input):
"""Reads next two bytes as an unsigned integer.
"""
return struct.unpack("<H", _read_exactly(input, 2))[0]
def _read32(input):
"""Reads next four bytes as an unsigned integer.
"""
return struct.unpack("<I", _read_exactly(input, 4))[0]
def _split_subfields(extra_field):
"""Returns a dict with {sub_id: subfield_data} entries.
The extra field contains a variable number of bytes
for each subfields:
+---+---+---+---+===============================+
|SUB_ID | LEN | LEN bytes of subfield data ...|
+---+---+---+---+===============================+
"""
input = BytesIO(extra_field)
sub_fields = {}
while True:
sub_id = input.read(2)
if not sub_id:
return sub_fields
data_len = _read16(input)
sub_id = sub_id.decode("UTF-8")
sub_fields[sub_id] = input.read(data_len)
def _skip_cstring(input):
"""Reads and discards a zero-terminated string.
"""
while True:
c = input.read(1)
if not c or c == b"\0":
return
def _parse_dictzip_field(subfield):
"""Returns a dict with:
chlen ... length of each uncompressed chunk,
zlengths ... lengths of compressed chunks.
The dictzip subfield consists of:
+---+---+---+---+---+---+==============================================+
| VER=1 | CHLEN | CHCNT | CHCNT 2-byte lengths of compressed chunks ...|
+---+---+---+---+---+---+==============================================+
"""
input = BytesIO(subfield)
ver, chlen, chunk_count = struct.unpack("<HHH", input.read(6))
if ver != 1:
raise IOError("Unsupported dictzip version: %s" % ver)
zlengths = []
for i in range(chunk_count):
zlengths.append(_read16(input))
return dict(chlen=chlen, zlengths=zlengths)
IdzipFile = IdzipReader
|
mit
| -9,082,215,199,775,828,000 | 29.735537 | 81 | 0.546204 | false |
dasmarci/check_mk
|
fireeye/checks/fireeye_psu.py
|
1
|
1163
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
def inventory_fireeye_psu(info):
if info:
inventory = []
for (name, status_txt, healthy) in info:
inventory.append((name, None))
return inventory
def check_fireeye_psu(item, _params, info):
for line in info:
name, status_txt, healthy = line
if name == item:
rc = 0
label = ''
msgtxt = 'healthy'
healthy = fireeye_truefalse[healthy]
if not healthy:
rc = 1
label = '(!)'
msgtxt = "un%s" % msgtxt
return rc, "PowerSupply %s is %s (\"%s\")%s" % (name, msgtxt, status_txt, label)
return 3, "item not found in SNMP data"
check_info["fireeye_psu"] = {
"check_function" : check_fireeye_psu,
"inventory_function" : inventory_fireeye_psu,
"has_perfdata" : False,
"service_description" : "FE PowerSupply %s",
"snmp_info" : (".1.3.6.1.4.1.25597.11.3.1.3.1", [ 1, 2, 3 ]),
"snmp_scan_function" : fireeye_snmp_scan,
"includes" : [ "fireeye.include" ],
}
|
gpl-2.0
| -697,093,828,381,229,300 | 26.690476 | 92 | 0.517627 | false |
foglamp/FogLAMP
|
tests/system/lab/scripts/trendc.py
|
1
|
1687
|
# -*- coding: utf-8 -*-
""" Predict up/down trend in data which has momentum
"""
import json
# exponential moving average rate default values
# short-term: include 15% of current value in ongoing average (and 85% of history)
rate_short = 0.15
# long-term: include 7% of current value
rate_long = 0.07
# short-term and long-term averages.
ema_short = ema_long = None
# trend of data: 5: down / 10: up. Start with up.
trend = 10
# get configuration if provided.
# set this JSON string in configuration:
# {"rate_short":0.15, "rate_long":0.07}
def set_filter_config(configuration):
global rate_short, rate_long
filter_config = json.loads(configuration['config'])
if 'rate_short' in filter_config:
rate_short = filter_config['rate_short']
if 'rate_long' in filter_config:
rate_long = filter_config['rate_long']
return True
# Process a reading
def doit(reading):
global rate_short, rate_long # config
global ema_short, ema_long, trend # internal variables
for attribute in list(reading):
if not ema_long:
ema_long = ema_short = reading[attribute]
else:
ema_long = reading[attribute] * rate_long + ema_long * (1 - rate_long)
reading[b'ema_long'] = ema_long
ema_short = reading[attribute] * rate_short + ema_short * (1 - rate_short)
reading[b'ema_short'] = ema_short
if(trend == 10) != (ema_short > ema_long):
trend = 5 if trend == 10 else 10
reading[b'trend'] = trend
# process one or more readings
def trendc(readings):
for elem in list(readings):
doit(elem['reading'])
return readings
|
apache-2.0
| 7,264,927,168,722,952,000 | 29.125 | 86 | 0.630705 | false |
reckbo/ppl
|
scripts/makesoft.py
|
1
|
8257
|
#!/usr/bin/env python
from __future__ import print_function
from plumbum import local, FG, cli
from plumbum.cmd import git, cmake, make, chmod
import logging
from util import logfmt, TemporaryDirectory
import sys
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format=logfmt(__file__))
def downloadGithubArchive(ownerrepo, commit='master'):
"""Makes 'repo-<commit>' directory."""
url = 'https://github.com/{ownerrepo}/archive/{commit}.tar.gz'.format(**locals())
repo = ownerrepo.split('/')[1]
(curl['-L', url] | tar['xz'])()
return local.path(repo+'-'+commit)
def getCommitInfo(repo_path):
with local.cwd(local.path(repo_path)):
sha = git('rev-parse', '--short', 'HEAD')[:-1]
date = git('show', '-s', '--format=%cd', '--date=short')[:-1]
return (sha, date)
def downloadGithubRepo(ownerrepo, commit='master'):
url = 'https://github.com/{ownerrepo}.git'.format(**locals())
repo = ownerrepo.split('/')[1]
if not local.path(repo).exists():
git('clone', url)
with local.cwd(repo):
git('checkout', commit)
return local.path(repo)
class MakeSoftware(cli.Application):
"""Software installer."""
dest = cli.SwitchAttr(['-d', '--dest'], cli.ExistingDirectory, help="Root directory in which to install repo", envname='soft')
commit = cli.SwitchAttr(['-c', '--commit'], help='GitHub hash commit. If omitted will get latest commit from the master branch.'
,mandatory=False
,default="master")
def main(self, *args):
if args:
print("Unknown command {0!r}".format(args[0]))
return 1
if not self.nested_command:
print("No command given")
return 1 # error exit code
@MakeSoftware.subcommand("brainstools")
class BrainsTools(cli.Application):
"""Downloads and compiles BRAINSTools binaries. Output is 'BRAINSTools-bin-<hash>'."""
def main(self):
blddir = self.parent.dest / "BRAINSTools-build"
with local.cwd(self.parent.dest):
repo = downloadGithubRepo('BRAINSia/BRAINSTools', self.parent.commit)
sha, date = getCommitInfo(repo)
logging.info("Build code:")
blddir.mkdir()
with local.cwd(blddir):
cmake(repo
,"-DBRAINSTools_INSTALL_DEVELOPMENT=OFF"
,"-DBRAINSTools_MAX_TEST_LEVEL=0"
,"-DBRAINSTools_SUPERBUILD=ON"
,"-DBRAINSTools_USE_QT=OFF"
,"-DBRAINS_DEBUG_IMAGE_WRITE=OFF"
,"-DBUILD_STYLE_UTILS=OFF"
,"-DBUILD_TESTING=OFF"
,"-DCMAKE_BUILD_TYPE=Release"
,"-DCMAKE_COLOR_MAKEFILE=ON"
,"-DCMAKE_EXE_LINKER_FLAGS=' '"
,"-DCMAKE_EXE_LINKER_FLAGS_DEBUG="
,"-DCMAKE_EXE_LINKER_FLAGS_MINSIZEREL="
,"-DCMAKE_EXE_LINKER_FLAGS_RELEASE="
,"-DCMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO="
,"-DCMAKE_EXPORT_COMPILE_COMMANDS=OFF"
,"-DCMAKE_INSTALL_PREFIX:PATH=/usr/local"
,"-DCMAKE_MODULE_LINKER_FLAGS=' '"
,"-DCMAKE_MODULE_LINKER_FLAGS_DEBUG="
,"-DCMAKE_MODULE_LINKER_FLAGS_MINSIZEREL="
,"-DCMAKE_MODULE_LINKER_FLAGS_RELEASE="
,"-DCMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO="
,"-DCMAKE_PROJECT_NAME:STATIC=SuperBuild_BRAINSTools"
,"-DCMAKE_SHARED_LINKER_FLAGS=' '"
,"-DCMAKE_SHARED_LINKER_FLAGS_DEBUG="
,"-DCMAKE_SHARED_LINKER_FLAGS_MINSIZEREL="
,"-DCMAKE_SHARED_LINKER_FLAGS_RELEASE="
,"-DCMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO="
,"-DCMAKE_SKIP_INSTALL_RPATH=NO"
,"-DCMAKE_SKIP_RPATH=NO"
,"-DCMAKE_STATIC_LINKER_FLAGS="
,"-DCMAKE_STATIC_LINKER_FLAGS_DEBUG="
,"-DCMAKE_STATIC_LINKER_FLAGS_MINSIZEREL="
,"-DCMAKE_STATIC_LINKER_FLAGS_RELEASE="
,"-DCMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO="
,"-DCMAKE_USE_RELATIVE_PATHS=OFF"
,"-DCMAKE_VERBOSE_MAKEFILE=FALSE"
,"-DCOVERAGE_EXTRA_FLAGS=-l"
,"-DCTEST_SUBMIT_RETRY_COUNT=3"
,"-DCTEST_SUBMIT_RETRY_DELAY=5"
,"-DDART_TESTING_TIMEOUT=1500"
,"-DEXTERNAL_PROJECT_BUILD_TYPE=Release"
,"-DFORCE_EXTERNAL_BUILDS=OFF"
,"-DITK_VERSION_MAJOR=4"
,"-DSuperBuild_BRAINSTools_BUILD_DICOM_SUPPORT=ON"
,"-DSuperBuild_BRAINSTools_USE_CTKAPPLAUNCHER=OFF"
,"-DSuperBuild_BRAINSTools_USE_GIT_PROTOCOL=ON"
,"-DUSE_ANTS=ON"
,"-DUSE_AutoWorkup=OFF"
,"-DUSE_BRAINSABC=OFF"
,"-DUSE_BRAINSConstellationDetector=OFF"
,"-DUSE_BRAINSContinuousClass=OFF"
,"-DUSE_BRAINSCreateLabelMapFromProbabilityMaps=OFF"
,"-DUSE_BRAINSCut=OFF"
,"-DUSE_BRAINSDWICleanup=OFF"
,"-DUSE_BRAINSDemonWarp=OFF"
,"-DUSE_BRAINSFit=OFF"
,"-DUSE_BRAINSInitializedControlPoints=OFF"
,"-DUSE_BRAINSLabelStats=OFF"
,"-DUSE_BRAINSLandmarkInitializer=OFF"
,"-DUSE_BRAINSMultiModeSegment=OFF"
,"-DUSE_BRAINSMultiSTAPLE=OFF"
,"-DUSE_BRAINSMush=OFF"
,"-DUSE_BRAINSPosteriorToContinuousClass=OFF"
,"-DUSE_BRAINSROIAuto=OFF"
,"-DUSE_BRAINSResample=OFF"
,"-DUSE_BRAINSSnapShotWriter=OFF"
,"-DUSE_BRAINSStripRotation=OFF"
,"-DUSE_BRAINSSurfaceTools=OFF"
,"-DUSE_BRAINSTalairach=OFF"
,"-DUSE_BRAINSTransformConvert=OFF"
,"-DUSE_ConvertBetweenFileFormats=ON"
,"-DUSE_DWIConvert=ON"
,"-DUSE_DebugImageViewer=OFF"
,"-DUSE_GTRACT=OFF"
,"-DUSE_ICCDEF=OFF"
,"-DUSE_ImageCalculator=OFF"
,"-DUSE_ReferenceAtlas=OFF"
,"-DUSE_SYSTEM_DCMTK=OFF"
,"-DUSE_SYSTEM_ITK=OFF"
,"-DUSE_SYSTEM_SlicerExecutionModel=OFF"
,"-DUSE_SYSTEM_VTK=OFF"
,"-DVTK_GIT_REPOSITORY=git://vtk.org/VTK.git"
)
make['all'] & FG
out = self.parent.dest / 'BRAINSTools-bin-'+sha
symlink = self.parent.dest / 'BRAINSTools-bin-'+date
(blddir / 'bin').move(out)
(blddir / 'ANTs/Scripts/antsRegistrationSyN.sh').copy(out)
chmod('a-w', out // '*')
chmod('a-w', out)
outdir.symlink(symlink)
def installTraining(repo, commit):
archive = downloadGithubArchive('pnlbwh/'+repo, commit)
archive.move(dest/repo)
with local.cwd(dest/repo):
from plumbum.cmd import bash
bash('./mktrainingcsv.sh', '.')
chmod('a-w', '*')
@MakeSoftware.subcommand("t2s")
class T2s(cli.Application):
"""Downloads t2 training set (has masks only). Makes '<dest>/trainingDataT2Masks"""
def main(self):
installTraining('trainingDataT2Masks', self.parent.commit)
@MakeSoftware.subcommand("t1s")
class T1s(cli.Application):
"""Downloads t1 training set. Has masks, amygdala-hippocampus (left/right), and cingulate (left/right). Makes '<dest>/trainingDataT1AHCC'"""
def main(self):
installTraining('trainingDataT1AHCC', self.parent.commit)
@MakeSoftware.subcommand("tractquerier")
class App(cli.Application):
"""Makes a read-only version of tract_querier. Output is '<dest>/tract_querier-<commit>'."""
def main(self):
with TemporaryDirectory() as tmpdir, local.cwd(tmpdir):
repo = downloadGithubRepo('demianw/tract_querier', self.parent.commit)
sha, date = getCommitInfo(repo)
# save space
(repo / 'doc').delete()
(repo / '.git').delete()
outdir = local.path(self.parent.dest / 'tract_querier-' + sha)
if outdir.exists():
logging.warning(outdir + ' already exists, quitting.')
sys.exit(0)
logging.info("Make '{outdir}'".format(**locals()))
repo.move(outdir)
chmod('-R', 'a-w', outdir)
chmod('a-w', outdir)
date_symlink = self.parent.dest / 'tract_querier-' + date
outdir.symlink(date_symlink)
if __name__ == '__main__':
MakeSoftware.run()
|
bsd-3-clause
| -7,629,800,266,403,862,000 | 40.70202 | 144 | 0.589439 | false |
ScoffM/ITESO-Word2Vec
|
Salaries_Woeization.py
|
1
|
3115
|
import pandas as pd
import numpy as np
import math
import time
#Replace the nan values with the string True_nan in a dataframe's column
def eliminate_nan(col):
trueNan = pd.isnull(col)
indexs = trueNan[ trueNan == True].index.tolist()
col[indexs] = 'True_nan'
return col
#colnames is a list of names of the columns to be transformed
#Should either:
# a) Be ["ContractType", "ContractTime", "Category", "SourceName"]
# b) Pass data with only the above columns and use colnames.values
# The NaN's might have to be transformed before woeization can be completed.
#This function returns a dataframe with woe values just with the specified columns
def woeization(data, target_variable, colnames):
import numpy as np
import math
my_median = math.floor(data[target_variable].median())
true_all = sum(data[target_variable] >= my_median)
false_all = sum(data[target_variable] < my_median)
for x in range(len(colnames)):
#If the column has any nan value, the nan function is applies
if data[colnames[x]].isnull().values.any() == True:
data[colnames[x]] = eliminate_nan(data[colnames[x]])
xx = data[colnames[x]] # In each loop, set xx for an entire column
my_cat = np.unique(xx).tolist() # List of unique categories on my column xx
for y in range(len(my_cat)):
true = sum((xx == my_cat[y]) & (data[target_variable] >= my_median))
false = sum((xx == my_cat[y]) & (data[target_variable] < my_median))
# If the data is completely skewed towards a "side"
# Make it slightly larger than 0 to get out of the undefined zones of log(x) and 1/x
if true == 0:
true = 0.001
if false == 0:
false = 0.001
# Calcular WoE
true_per = float(true) / true_all
false_per = float(false) / false_all
div = float(true_per) / false_per
woe = math.log(div)
data.loc[data[colnames[x]] == my_cat[y], colnames[x]] = woe
data = data[(colnames + [target_variable])]
return data
# Run as standalone to get a modified dataframe, else import to get the modified features
def main():
global_start = time.time()
path = "data/Train_Rev1.csv"
target_variable = "SalaryNormalized"
colnames = ['ContractType', 'ContractTime', 'Category', 'SourceName']
def identity(x):
return x
# This allegedly increases speed in loading as it tells pandas to load thos oclumns as strings
converters = { "FullDescription" : identity
, "Title": identity
, "LocationRaw": identity
, "LocationNormalized": identity
}
print "Loading Data..."
data = pd.read_csv(path)
print "Done!"
print "Initializing Data Transformation"
data_woe= woeization(data=data, target_variable=target_variable, colnames=colnames)
data_woe.to_csv('data/WoE_Features.csv')
if __name__=="__main__":
main()
|
gpl-3.0
| 3,483,683,236,759,404,000 | 39.533333 | 98 | 0.609952 | false |
hugoatease/encas
|
model/transaction.py
|
1
|
5155
|
# Encas Sales Management Server
# Copyright 2013 - Hugo Caille
#
# This file is part of Encas.
#
# Encas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Encas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Encas. If not, see <http://www.gnu.org/licenses/>.
from database import db
from database import Transaction as TransactionModel
from database import Account as AccountModel
from errors import ApiError
from math import isinf
import account
import config
class Transaction:
@staticmethod
def available(account_id):
if db.session.query(TransactionModel).filter_by(account=account_id).count() == 0:
return 1
else:
result = db.session.query(TransactionModel).filter_by(account=account_id).order_by("operation desc").first()
return result.operation + 1
@staticmethod
def get(transaction_id):
result = db.session.query(TransactionModel).get(transaction_id)
if result is None:
raise ApiError("Transaction not found")
return result
@staticmethod
def list():
transactions = db.session.query(TransactionModel).join(AccountModel, AccountModel.id == TransactionModel.account) \
.filter(AccountModel.deleted == False).order_by("date desc").all()
for transaction in transactions:
acc = account.Account.get(transaction.account)
transaction.account_number = acc.number
transaction.account_firstname = acc.firstname
transaction.account_lastname = acc.lastname
transaction.to_serialize += ['account_number', 'account_firstname', 'account_lastname']
return transactions
@staticmethod
def getByAccount(account_id, max=None, exclude_revoked=False):
account.Account.get(account_id) # Raises exception if account doesn't exist.
query = db.session.query(TransactionModel).filter_by(account=account_id)
if exclude_revoked:
query = query.filter_by(revoked=False)
query = query.order_by("operation desc")
if max is not None:
query = query.limit(max)
return query.all()
@classmethod
def getBalance(self, account_id, verify=True, verify_depth=5):
transactions = self.getByAccount(account_id, verify_depth)
if len(transactions) == 0:
return 0
balance = transactions[0].balance
if verify:
transactions.reverse()
cash = transactions[0].balance
for transaction in transactions[1:]:
cash += transaction.cash
if transaction.balance != cash:
message = "Account balance verification failed: " \
+ "Operation " + str(transaction.operation) + " is corrupted."
raise ApiError(message)
if cash != balance:
raise ApiError("Account balance verification failed.")
return balance
@classmethod
def calculateBalance(self, account_id):
transactions = self.getByAccount(account_id, None, False)
cash = 0
for transaction in transactions:
cash += transaction.cash
if isinf(cash):
raise ApiError("Transaction can't be created : new account balance is out of bounds.")
return cash
@classmethod
def add(self, account_id, cash):
if isinf(cash):
raise ApiError("Transaction can't be created : entered price is too big.")
try:
last = self.getByAccount(account_id, 1)[0]
except:
last = None
if last is None:
balance = cash
else:
balance = last.balance + cash
if isinf(balance):
raise ApiError("Transaction can't be created : new account balance is out of bounds.")
if balance > config.MAX_BALANCE or balance < - config.MAX_BALANCE:
raise ApiError("Transaction makes account balance beyond the " + str(config.MAX_BALANCE) + " limit.")
transaction = TransactionModel(account=account_id, operation=self.available(account_id), cash=cash, balance=balance)
db.session.add(transaction)
db.session.commit()
return transaction
def __init__(self, transaction_id):
self.transaction = self.get(transaction_id)
def revoke(self):
self.transaction.revoked = True
db.session.add(self.transaction)
inverse = self.add(self.transaction.account, -self.transaction.cash)
inverse.revokes = self.transaction.id
db.session.add(inverse)
db.session.commit()
return self.transaction
|
gpl-3.0
| 2,599,808,672,490,054,700 | 34.558621 | 124 | 0.642871 | false |
3ll3d00d/vibe
|
backend/src/analyser/common/uploadcontroller.py
|
1
|
9122
|
import glob
import logging
import os
import shutil
from pathlib import Path
import numpy as np
from core.reactor import Reactor
logger = logging.getLogger('analyser.upload')
CONVERT_WAV = 'cw'
class UploadController(object):
def __init__(self, uploadCfg):
self._tmpDir = uploadCfg['tmpDir']
self._uploadDir = uploadCfg['uploadDir']
self._watchdogInterval = uploadCfg['watchdogInterval']
self._uploadCache = self._scanUpload()
self._tmpCache = []
self._conversionCache = []
self._reactor = Reactor(name='converter', threads=uploadCfg['converterThreads'])
self._reactor.register(CONVERT_WAV, self._convertTmp)
self._findNewFiles()
def get(self):
"""
:return: metadata about all files in the cache.
"""
return self._uploadCache + self._tmpCache + self._conversionCache
def getEntry(self, name):
"""
:param name: the named wav.
:return: the cached info.
"""
return self._getCacheEntry(name)
def loadSignal(self, name, start=None, end=None):
"""
Loads the named entry from the upload cache as a signal.
:param name: the name.
:param start: the time to start from in HH:mm:ss.SSS format
:param end: the time to end at in HH:mm:ss.SSS format.
:return: the signal if the named upload exists.
"""
entry = self._getCacheEntry(name)
if entry is not None:
from analyser.common.signal import loadSignalFromWav
return loadSignalFromWav(entry['path'], start=start, end=end)
else:
return None
def _getCacheEntry(self, name):
"""
:param name: the name of the cache entry.
:return: the entry or none.
"""
return next((x for x in self._uploadCache if x['name'] == name), None)
def _scanUpload(self):
return [self._extractMeta(p, 'loaded') for p in glob.iglob(self._uploadDir + '/*.wav')]
def _extractMeta(self, fullPath, status):
from soundfile import info
sf = info(fullPath)
p = Path(fullPath)
return {
'status': status,
'path': sf.name,
'name': p.name,
'size': p.stat().st_size,
'duration': sf.duration,
'fs': sf.samplerate
}
def _watch(self):
import threading
from datetime import datetime
nextRun = datetime.utcnow().timestamp() + self._watchdogInterval
tilNextTime = max(nextRun - datetime.utcnow().timestamp(), 0)
logger.debug("Scheduling next scan in " + str(round(tilNextTime, 3)) + " seconds")
threading.Timer(tilNextTime, self._findNewFiles).start()
def _findNewFiles(self):
for tmp in self._scanTmp():
if not any(x['path'] == tmp['path'] for x in self._tmpCache):
self._tmpCache.append(tmp)
self._reactor.offer(CONVERT_WAV, (tmp,))
self._watch()
def _convertTmp(self, tmpCacheEntry):
"""
Moves a tmp file to the upload dir, resampling it if necessary, and then deleting the tmp entries.
:param tmpCacheEntry: the cache entry.
:return:
"""
from analyser.common.signal import loadSignalFromWav
tmpCacheEntry['status'] = 'converting'
logger.info("Loading " + tmpCacheEntry['path'])
signal = loadSignalFromWav(tmpCacheEntry['path'])
logger.info("Loaded " + tmpCacheEntry['path'])
if Path(tmpCacheEntry['path']).exists():
logger.info('Deleting ' + tmpCacheEntry['path'])
os.remove(tmpCacheEntry['path'])
else:
logger.warning('Tmp cache file does not exist: ' + tmpCacheEntry['path'])
self._tmpCache.remove(tmpCacheEntry)
self._conversionCache.append(tmpCacheEntry)
srcFs = signal.fs
completeSamples = signal.samples
outputFileName = os.path.join(self._uploadDir, tmpCacheEntry['name'])
if srcFs > 1024:
self.writeOutput(outputFileName, completeSamples, srcFs, 1000)
else:
self.writeOutput(outputFileName, completeSamples, srcFs, srcFs)
tmpCacheEntry['status'] = 'loaded'
self._conversionCache.remove(tmpCacheEntry)
self._uploadCache.append(self._extractMeta(outputFileName, 'loaded'))
def _scanTmp(self):
return [self._extractMeta(p, 'tmp') for p in glob.iglob(self._tmpDir + '/*.wav')]
def writeChunk(self, stream, filename, chunkIdx=None):
"""
Streams an uploaded chunk to a file.
:param stream: the binary stream that contains the file.
:param filename: the name of the file.
:param chunkIdx: optional chunk index (for writing to a tmp dir)
:return: no of bytes written or -1 if there was an error.
"""
import io
more = True
outputFileName = filename if chunkIdx is None else filename + '.' + str(chunkIdx)
outputDir = self._uploadDir if chunkIdx is None else self._tmpDir
chunkFilePath = os.path.join(outputDir, outputFileName)
if os.path.exists(chunkFilePath) and os.path.isfile(chunkFilePath):
logger.error('Uploaded file already exists: ' + chunkFilePath)
return -1
else:
chunkFile = open(chunkFilePath, 'xb')
count = 0
while more:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
chunkLen = len(chunk)
count += chunkLen
if chunkLen == 0:
more = False
else:
chunkFile.write(chunk)
return count
def finalise(self, filename, totalChunks, status):
"""
Completes the upload which means converting to a single 1kHz sample rate file output file.
:param filename:
:param totalChunks:
:param status:
:return:
"""
def getChunkIdx(x):
try:
return int(x.suffix[1:])
except ValueError:
return -1
def isChunkFile(x):
return x.is_file() and -1 < getChunkIdx(x) <= totalChunks
asSingleFile = os.path.join(self._tmpDir, filename)
if status.lower() == 'true':
chunks = [(getChunkIdx(file), str(file)) for file in
Path(self._tmpDir).glob(filename + '.*') if isChunkFile(file)]
# TODO if len(chunks) != totalChunks then error
with open(asSingleFile, 'xb') as wfd:
for f in [x[1] for x in sorted(chunks, key=lambda tup: tup[0])]:
with open(f, 'rb') as fd:
logger.info("cat " + f + " with " + asSingleFile)
shutil.copyfileobj(fd, wfd, 1024 * 1024 * 10)
self.cleanupChunks(filename, isChunkFile, status)
def cleanupChunks(self, filename, isChunkFile, status):
if status.lower() != 'true':
logger.warning('Upload failed for ' + filename + ', deleting all uploaded chunks')
toDelete = [file for file in Path(self._tmpDir).glob(filename + '.*') if isChunkFile(file)]
for file in toDelete:
if file.exists():
logger.info('Deleting ' + str(file))
os.remove(str(file))
def writeOutput(self, filename, samples, srcFs, targetFs):
"""
Resamples the signal to the targetFs and writes it to filename.
:param filename: the filename.
:param signal: the signal to resample.
:param targetFs: the target fs.
:return: None
"""
import librosa
inputLength = samples.shape[-1]
if srcFs != targetFs:
if inputLength < targetFs:
logger.info("Input signal is too short (" + str(inputLength) +
" samples) for resampling to " + str(targetFs) + "Hz")
outputSamples = samples
targetFs = srcFs
else:
logger.info("Resampling " + str(inputLength) + " samples from " + str(srcFs) + "Hz to " +
str(targetFs) + "Hz")
outputSamples = librosa.resample(samples, srcFs, targetFs, res_type='kaiser_fast')
else:
outputSamples = samples
logger.info("Writing output to " + filename)
maxv = np.iinfo(np.int32).max
librosa.output.write_wav(filename, (outputSamples * maxv).astype(np.int32), targetFs)
logger.info("Output written to " + filename)
def delete(self, name):
"""
Deletes the named entry.
:param name: the entry.
:return: the deleted entry.
"""
i, entry = next(((i, x) for i, x in enumerate(self._uploadCache) if x['name'] == name), (None, None))
if entry is not None:
logger.info("Deleting " + name)
os.remove(str(entry['path']))
del self._uploadCache[i]
return entry
else:
logger.info("Unable to delete " + name + ", not found")
return None
|
mit
| 2,325,367,406,157,679,000 | 37.817021 | 109 | 0.578711 | false |
rhpit/paws
|
paws/providers/libvirt_kvm.py
|
1
|
24880
|
#
# paws -- provision automated windows and services
# Copyright (C) 2016 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from logging import getLogger
from subprocess import PIPE
from xml.etree import ElementTree as ET
import libvirt
from click import style
from click.termui import progressbar
from libvirt import libvirtError
from os import environ, getenv
from os.path import join, exists
from requests import HTTPError, RequestException, get
from paws.compat import urlopen
from paws.constants import LIBVIRT_OUTPUT, LIBVIRT_AUTH_HELP, \
ANSIBLE_INVENTORY_FILENAME
from paws.helpers import get_ssh_conn, file_mgmt, subprocess_call, cleanup, \
retry
from paws.lib.remote import create_inventory, inventory_init
"""
Libvirt provider, It is a wrapper interacting with Libvirt
QEMU-KVM through API based on official documentation
http://libvirt.org/html/libvirt-libvirt-domain.html
_kvm was appended to the module name to avoid conflict with official
libvirt-python module
PAWS assumptions to use libvirt provider in your system:
* libvirt is already installed
* qemu driver is installed (ibvirt-daemon-driver-qemu)
* libvirt service is running
* libvirt authentication rule or policy is in place
* windows*.qcow file exists at path specified in resources.yaml
* windows*.xml file exists at path specified in resources.yaml
@attention: Libvirt requires permissions to execute some API calls and
to write new domains/vms. if you run PAWS with root or sudo than you can
skip Libvirt authentication from notes below otherwise choose one of the
2 alternatives and setup in your system where PAWS will be executed.
@note: Alternative 1 (RECOMMENDED) - polkit rule
Configure libvirt virt-manager without asking password. It is needed
to paws be able to communicate with libvirt without need
to run PAWS as sudo/root
--create new polkit rule
sudo vim /etc/polkit-1/rules.d/80-libvirt-manage.rules
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage"
&& subject.local && subject.active && subject.isInGroup("wheel")) {
return polkit.Result.YES;
}
});
--add your user to wheel group
usermod -a -G wheel $USER
source: https://goldmann.pl/blog/2012/12/03/\
configuring-polkit-in-fedora-18-to-access-virt-manager/
@note: Alternative 2 - Configuring libvirtd.conf
Change the configuration in /etc/libvirt/libvirtd.conf as follows:
1.In case it does not exist, create the group which should own the socket:
$ sudo groupadd libvirt
2. Add the desired users to the group:
$ sudo usermod -a -G libvirt username
or example using wheel group;
$ sudo usermod -a -G wheel username
3. Change the configuration in /etc/libvirt/libvirtd.conf as follows:
unix_sock_group = "libvirt" # or wheel if you prefer
unix_sock_rw_perms = "0770"
auth_unix_rw = "none"
4. Restart libvirtd:
$ sudo systemctl restart libvirtd
5. set LIBVIRT_DEFAULT_URI variable. It is handled by PAWS so you can skip
this step if you are interacting with libvirt by PAWS
$ export LIBVIRT_DEFAULT_URI=qemu:///system
6. testing, if there is any domain/vm in your system you should see after
run the command below without sudo:
$ virsh list --all
"""
LOG = getLogger(__name__)
# TODO: add variables back in constants
API_GET = ''
API_FIND = ''
class Libvirt(object):
""" Libvirt PAWS main class"""
__provider_name__ = 'libvirt'
def __init__(self, args):
self.args = args
self.userdir = args.userdir
self.resources = args.resources
self.credentials = args.credentials
self.resources_paws_file = args.resources_paws_file
self.verbose = args.verbose
self.util = Util(self)
self.inventory = join(self.userdir, ANSIBLE_INVENTORY_FILENAME)
self.resources_paws = None
# Libvirt domain/VM states
self.states = {
libvirt.VIR_DOMAIN_NOSTATE: 'no state',
libvirt.VIR_DOMAIN_RUNNING: 'running',
libvirt.VIR_DOMAIN_BLOCKED: 'blocked on resource',
libvirt.VIR_DOMAIN_PAUSED: 'paused by user',
libvirt.VIR_DOMAIN_SHUTDOWN: 'being shut down',
libvirt.VIR_DOMAIN_SHUTOFF: 'shut off',
libvirt.VIR_DOMAIN_CRASHED: 'crashed',
}
@property
def name(self):
"""Return provider name."""
return self.__provider_name__
def set_libvirt_env_var(self):
"""Set LIBVIRT_DEFAULT_URI system variable. It is required for some
API calls"""
if getenv('LIBVIRT_DEFAULT_URI', False) is False:
environ['LIBVIRT_DEFAULT_URI'] = self.credentials['qemu_instance']
def garbage_collector(self):
"""Garbage collector method. it knows which files to collect for this
provider and when asked/invoked it return all files to be deletec
in a list
:return garbage: all files to be deleted from this provider
:rtypr garbage: list
"""
garbage = [self.inventory,
join(self.userdir, LIBVIRT_OUTPUT)]
return garbage
def clean_files(self):
"""Clean files genereated by paws for Openstack provider. It will only
clean generated files when in non verbose mode.
"""
if not self.verbose:
cleanup(self.garbage_collector())
def provision(self):
""" Provision system resource(s) in Libvirt provider"""
# -- basic flow --
# assume qcow and xml files required already exist as declared in
# resources.yaml file
# check if files exists (windows*.qcow and windows*.xml)
# check if VM name based on resources.yaml already exists
# Start VM
# show VM info (network, etc)
self.set_libvirt_env_var()
# Create empty inventory file for localhost calls
inventory_init(self.inventory)
# check libvirt connection - validating authentication
conn = self.util.get_connection()
for elem in self.resources:
LOG.info('Working to provision %s VM on %s' % (elem['name'],
elem['provider']))
# check for required files
LOG.debug("Checking %s exist" % elem['disk_source'])
if not exists(elem['disk_source']):
LOG.error('File %s not found' % elem['disk_source'])
LOG.warn('check PAWS documentation %s' %
LIBVIRT_AUTH_HELP)
raise SystemExit(1)
# check for VM and delete/undefine in case already exist
if self.util.vm_exist(conn, elem['name']):
vm = self.util.find_vm_by_name(conn, elem['name'])
if vm:
self.util.stop_vm(vm)
self.util.delete_vm(conn, vm, flag=None)
self.util.create_vm_virtinstall(elem)
# get VM
vm = self.util.find_vm_by_name(conn, elem['name'])
try:
# get vm info
vm_info = self.util.get_vm_info(conn, elem)
# loop to get SSH connection with auto-retry
try:
get_ssh_conn(vm_info['ip'], elem['win_username'],
elem['win_password'])
except Exception as ex:
LOG.error(ex)
except Exception:
LOG.debug("An error happened during provision VM %s trying \
forced teardown" % elem['name'])
self.teardown()
# @attention Libvirt provider doesn't need hosts inventory file but
# it is required by Winsetup and Group.
# preparing resource to be compatible with ansible create inventory
elem['ip'] = vm_info['ip'] # append ip to resource
res = {}
list_resources = [elem]
res['resources'] = list_resources
create_inventory(self.inventory, res)
self.util.generate_resources_paws(conn)
return self.resources_paws
def teardown(self):
""" Provision system resource(s) in Openstack provider"""
self.set_libvirt_env_var()
conn = self.util.get_connection()
for elem in self.resources:
# get vm object and continue with teardown process (stop and del)
if self.util.vm_exist(conn, elem['name']):
vm = self.util.find_vm_by_name(conn, elem['name'])
if vm:
self.util.stop_vm(vm)
self.util.delete_vm(conn, vm, flag=None)
self.clean_files()
return {'resources': self.resources}
def show(self):
""" Provision system resource(s) in Openstack provider"""
self.set_libvirt_env_var()
conn = self.util.get_connection()
self.util.generate_resources_paws(conn)
return self.resources_paws
class Util(object):
"""
Util methods for Libvirt provider
"""
def __init__(self, args):
self.args = args
def get_connection(self):
""" Get connection with libvirt using QEMU driver and system
context
:return conn: connection with libvirt
:rtype conn: libvirt connection
"""
creds = self.args.credentials
if 'username' in creds:
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
creds['username'], None]
conn = libvirt.openAuth(creds['qemu_instance'], auth, 0)
else:
conn = libvirt.open(creds['qemu_instance'])
if conn is None:
LOG.error('Failed to open connection to %s',
creds['qemu_instance'])
LOG.warn('check PAWS documentation %s' % LIBVIRT_AUTH_HELP)
raise SystemExit(1)
LOG.debug("connected successfully to %s" % creds['qemu_instance'])
return conn
@staticmethod
def vm_exist(conn, vm_name):
""" check if the domain exists, may or may not be active
:param conn: Libvirt connection
:type conn: object
:param vm_name: name of vm or domain to check
:type vm_name: str
:return Boolean True|False
"""
all_vms = conn.listAllDomains()
for vm in all_vms:
if vm_name == vm.name():
return True
return False
@staticmethod
def download(link, file_dst, label):
"""Create HTTP request to download a file"""
# delimit labels to be at same size and a better visualization
# when printing the progres bar
fill_out = 20 - len(label)
extra = " " * fill_out
pbar_label = label + extra
LOG.debug("Starting download %s" % link)
try:
req = urlopen(link)
remote_file_size = int(req.headers.get('content-length'))
CHUNK = 16 * 1024
with open(file_dst, 'wb') as fp:
with progressbar(length=remote_file_size,
fill_char=style('#', fg='green'),
empty_char=' ',
label=pbar_label,
show_percent=True) as bar:
while True:
chunk = req.read(CHUNK)
if not chunk:
break
fp.write(chunk)
bar.update(len(chunk))
LOG.debug("Download complete, file %s saved locally" % file_dst)
except (HTTPError, RequestException, Exception) as ex:
raise ex
def donwload_image(self, image_name, dst_path, imgsrv_url):
"""Download QCOW and XML files required by libvirt to import
as local disc in qemu-kvm
:param image_name: file name
:type image_name: str
:param dst_path:
:type dst_path: str
:param imgsrv_url:
:type imgsrv_url: str
:return qcow_path, xml_path
:rtype qcow_path, xml_path
"""
# Image service base url
BASE_URL = imgsrv_url.strip('/')
# find image in imagesrv
URL_FIND = BASE_URL + API_FIND + image_name
try:
resp = get(URL_FIND)
if 'error' in resp.json():
raise Exception(resp.json()['error'],
resp.json()['url_received'])
except (RequestException, Exception) as ex:
LOG.error(ex)
URL_XML = BASE_URL + API_GET + resp.json()['xml']
URL_QCOW = BASE_URL + API_GET + resp.json()['qcow']
DST_XML = dst_path + '/' + resp.json()['xml']
DST_QCOW = dst_path + '/' + resp.json()['qcow']
LOG.info('Download Windows libvirt files')
self.download(URL_XML, DST_XML, resp.json()['xml'])
self.download(URL_QCOW, DST_QCOW, resp.json()['qcow'])
# TODO: improve the errors, it doesnt make sense now
if not exists(DST_XML):
LOG.error('file %s not found', DST_XML)
sys.exit(1)
# TODO: improve the errors, it doesnt make sense now
if not exists(DST_QCOW):
LOG.error('file %s not found', DST_QCOW)
sys.exit(1)
return DST_QCOW, DST_XML
@staticmethod
def update_vm_xml(xml_path, qcow_path, elem, userdir):
"""Read VM definition XML file and update content of some childreen
with data from resources.yaml file
:param image_name:
:type str
:param dst_path:
:type str
:param userdir:
:type str
:return qcow_path, xml_path
:rtype str, str
"""
LOG.debug("Parse VM XML descriptor %s" % xml_path)
# load xml
domain = ET.parse(xml_path)
xml = domain.getroot()
# parse elements from resources.yaml
if xml.find('name') is not None:
name = xml.find('name')
name.text = str(elem['name'])
if xml.find('memory') is not None:
memory = xml.find('memory')
memory.text = str(elem['memory'])
if xml.find('vcpu') is not None:
vcpu = xml.find('vcpu')
vcpu.text = str(elem['vcpu'])
if xml.find('devices') is not None:
devices = xml.find('devices')
disk = devices.find('disk')
source = disk.find('source')
source.attrib['file'] = qcow_path
# save temporally vm definition file to be used for creation
# user_dir + elem-name -- hidden file
_xml = join(userdir, LIBVIRT_OUTPUT)
domain.write(_xml)
# Load xml object in memory
fp = open(_xml, "r")
_xml_obj = fp.read()
fp.close()
LOG.debug("Parse completed, file %s is ready" % xml_path)
return _xml_obj
@staticmethod
def create_vm_virtinstall(vm, fatal=True):
""" provision a new virtual machine to the host using virt-install
cli
:param vm
:type obj
command line:
virt-install --connect qemu:///system
--name myVM
--ram 4000
--vcpus=1
--os-type=windows
--disk path=/tmp/windows_2012.qcow,device=disk,bus=virtio,format=qcow2
--vnc
--noautoconsole
--import
"""
LOG.debug("Creating your vm %s" % vm['name'])
cmd = ("virt-install"
" --connect qemu:///system"
" --name " + str(vm['name']) +
" --ram " + str(vm['memory']) +
" --vcpus " + str(vm['vcpu']) +
" --os-type=windows"
" --disk path=" + str(vm['disk_source']) +
",device=disk,bus=virtio,format=qcow2"
" --vnc"
" --noautoconsole"
" --import"
)
LOG.debug(cmd)
rs = subprocess_call(cmd, stdout=PIPE, stderr=PIPE)
if rs == 0:
LOG.error(rs['stderr'])
raise SystemExit(1)
LOG.info("%s provisioned" % vm['name'])
@staticmethod
def create_vm(conn, xml_path):
"""Define a new domain in Libvirt, creating new Virtual Machine
:param conn: Libvirt connection
:type conn: object
:param xml_path: full path for XML with VM definition
:type xml_path: str
:return True
:rtype Boolean
"""
LOG.debug("VM creation, defining new domain in libvirt")
try:
conn.defineXMLFlags(xml_path)
LOG.debug("VM creation, complete")
return True
except (SystemExit, libvirtError) as ex:
LOG.error(ex)
raise SystemExit(1)
@staticmethod
def find_vm_by_name(conn, vm_name):
"""Find VM or domain in Libvirt
:param conn: Libvirt connection
:type conn: object
:param vm_name: name of virtual machine or domain
:type vm_name: str
:return vm: Virtual Machine
:rtype vm: object
"""
try:
vm = conn.lookupByName(vm_name)
LOG.debug("VM %s found" % vm_name)
except Exception:
vm = None
LOG.debug("VM %s doesn't exist" % vm_name)
return vm
def is_running(self, vm):
"""Check if VM state is running
:param vm: virtual machine
:type vm: object
:return True|False
:rtype Boolean
"""
vm_state = self.args.states.get(vm.info()[0], vm.info()[0])
if 'running' in vm_state:
return True
else:
return False
def reboot_vm(self, vm):
"""Reboot virtual machine on libvirt
:param vm: virtual machine
:type vm: object
"""
vm.reboot()
@retry(Exception, tries=3, delay=5)
def start_vm(self, vm):
"""Start virtual machine instance on Libvirt
:param vm: virtual machine
:type vm: object
"""
if self.is_running(vm):
LOG.debug("VM %s is running" % vm.name())
return True
try:
vm.create()
LOG.debug("Importing VM %s" % vm)
# the raise exception is to force the vm state be checked again
raise Exception
except libvirt.libvirtError as ex:
raise ex
@retry(Exception, tries=3, delay=5)
def stop_vm(self, vm):
"""Stop virtual machine instance on Libvirt
:param vm: virtual machine
:type vm: object
"""
if not self.is_running(vm):
return True
try:
vm.destroy()
LOG.debug("VM %s stopped" % vm.name())
raise Exception
except libvirt.libvirtError as ex:
raise ex
@staticmethod
def delete_vm(conn, vm, flag=None):
""" """
# TODO: PAWS-84 flag to delete VM during teardown
try:
vm.undefineFlags(1)
LOG.debug("VM %s deleted" % vm.name())
if flag:
storage_pools = conn.listAllStoragePools()
for pool in storage_pools:
stgvols = pool.listVolumes()
LOG.error(stgvols)
return True
except (libvirt.libvirtError, Exception) as ex:
LOG.error(ex)
@retry(Exception, tries=5, delay=30)
def get_ipv4(self, vm):
"""Get IP V4 from Windows running as Virtual Machine in Libvirt
QEMU-KVM provider.
:param vm: virtual machine
:type vm: domain object
:return addr['addr']: IP address V4
:rtype ipv4: str
"""
try:
ifaces = vm.interfaceAddresses(
libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE)
if not ifaces:
raise Exception("waiting for network interface")
LOG.debug("{0:10} {1:20} {2:12} {3}".
format("Interface", "MAC address", "Protocol",
"Address"))
def toIPAddrType(addrType):
if addrType == libvirt.VIR_IP_ADDR_TYPE_IPV4:
return "ipv4"
elif addrType == libvirt.VIR_IP_ADDR_TYPE_IPV6:
return "ipv6"
for (name, val) in ifaces.iteritems():
if val['addrs']:
for addr in val['addrs']:
LOG.debug("{0:10} {1:19}".format(name, val['hwaddr'])),
LOG.debug("{0:12} {1}/{2} ".
format(toIPAddrType(addr['type']),
addr['addr'], addr['prefix'])),
else:
LOG.debug("{0:10} {1:19}".format(name, val['hwaddr'])),
LOG.debug("{0:12} {1}".format("N/A", "N/A")),
except Exception as ex:
raise ex
return addr['addr']
def get_vm_info(self, conn, elem):
"""Get virtual machine info
:param vm: virtual machine
:type vm: object
:return vm_info: relevant info to PAWS for a given virtual machine
:rtype dict
"""
LOG.info("Retrieving info from %s" % elem['name'])
vm = self.find_vm_by_name(conn, elem['name'])
# in case VM doesnt exist
if not vm:
return False
vm_info = {}
vm_info['id'] = vm.ID()
vm_info['name'] = vm.name()
vm_info['uuid'] = vm.UUIDString()
vm_info['os_type'] = vm.OSType()
vm_info['state'] = self.args.states.get(vm.info()[0], vm.info()[0])
vm_info['max_memory'] = str(vm.info()[1])
vm_info['used_memory'] = str(vm.info()[2])
vm_info['vcpu'] = str(vm.info()[3])
# Determine if the vm has a persistent configuration
# which means it will still exist after shutting down
vm_info['persistent'] = vm.isPersistent()
vm_info['autostart'] = vm.autostart()
if self.is_running(vm):
ip = self.get_ipv4(vm)
else:
ip = None
vm_info['ip'] = ip
vm_info['win_username'] = elem['win_username']
vm_info['win_password'] = elem['win_password']
vm_info['provider'] = elem['provider']
# read VM xml definition or descriptor
xmldesc = vm.XMLDesc(0)
xml = ET.fromstring(xmldesc)
if xml.find('devices') is not None:
devices = xml.find('devices')
disk = devices.find('disk')
source = disk.find('source')
vm_info['disk_source'] = source.attrib['file']
LOG.debug("Loaded VM Info for %s" % elem['name'])
return vm_info
def generate_resources_paws(self, conn):
"""Generate or Update resources.paws file.
If resources.paws file exists function will not delete the file or
even just append new data into this. First of all any element
decldared into resources.paws from provider different than
libvirt will be preserved and new data just appended to the file.
:param conn: Libvirt connection
:type conn: object
"""
LOG.debug("Generating %s" % self.args.resources_paws_file)
vms = []
for res in self.args.resources:
if self.vm_exist(conn, res['name']):
vm_info = self.get_vm_info(conn, res)
if vm_info:
vms.append(vm_info)
# Write resources.paws
if len(vms) > 0:
if exists(self.args.resources_paws_file):
res_paws = file_mgmt('r', self.args.resources_paws_file)
for x in res_paws['resources']:
if x['provider'] != self.args.name:
vms.append(x)
self.args.resources_paws = {'resources': vms}
file_mgmt(
'w',
self.args.resources_paws_file,
self.args.resources_paws
)
LOG.debug("Successfully created %s", self.args.resources_paws_file)
|
gpl-3.0
| -1,914,747,923,806,665,700 | 32.667118 | 79 | 0.562661 | false |
iamforeverme/kshop
|
kshop/settings.py
|
1
|
3397
|
"""
Django settings for kshop project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tws*($*n#%^l^nz$=v4onq7b$2z*6*mj(o4jlawr^esc7w70y+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'goods.apps.GoodsConfig'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'kshop',
'USER': 'kshop_user',
'PASSWORD': 'use_kshop',
'HOST': 'postgres',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# for uploaded products' images
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploaded_files')
MEDIA_URL = '/files/'
|
mit
| 1,380,343,952,064,162,800 | 25.138462 | 91 | 0.684722 | false |
tgalal/python-axolotl
|
axolotl/protocol/prekeywhispermessage.py
|
1
|
4082
|
# -*- coding: utf-8 -*-
from google.protobuf.message import DecodeError
from .ciphertextmessage import CiphertextMessage
from ..util.byteutil import ByteUtil
from ..ecc.curve import Curve
from ..identitykey import IdentityKey
from .whispermessage import WhisperMessage
from ..invalidversionexception import InvalidVersionException
from ..invalidmessageexception import InvalidMessageException
from ..legacymessageexception import LegacyMessageException
from ..invalidkeyexception import InvalidKeyException
from . import whisperprotos_pb2 as whisperprotos
class PreKeyWhisperMessage(CiphertextMessage):
def __init__(self, messageVersion=None, registrationId=None, preKeyId=None,
signedPreKeyId=None, ecPublicBaseKey=None, identityKey=None,
whisperMessage=None, serialized=None):
if serialized:
try:
self.version = ByteUtil.highBitsToInt(serialized[0])
if self.version > CiphertextMessage.CURRENT_VERSION:
raise InvalidVersionException("Unknown version %s" % self.version)
if self.version < CiphertextMessage.CURRENT_VERSION:
raise LegacyMessageException("Legacy version: %s" % self.version)
preKeyWhisperMessage = whisperprotos.PreKeyWhisperMessage()
preKeyWhisperMessage.ParseFromString(serialized[1:])
if preKeyWhisperMessage.signedPreKeyId is None or \
not preKeyWhisperMessage.baseKey or \
not preKeyWhisperMessage.identityKey or \
not preKeyWhisperMessage.message:
raise InvalidMessageException("Incomplete message")
self.serialized = serialized
self.registrationId = preKeyWhisperMessage.registrationId
self.preKeyId = preKeyWhisperMessage.preKeyId
if preKeyWhisperMessage.signedPreKeyId is not None:
self.signedPreKeyId = preKeyWhisperMessage.signedPreKeyId
else:
self.signedPreKeyId = -1
self.baseKey = Curve.decodePoint(bytearray(preKeyWhisperMessage.baseKey), 0)
self.identityKey = IdentityKey(Curve.decodePoint(bytearray(preKeyWhisperMessage.identityKey), 0))
self.message = WhisperMessage(serialized=preKeyWhisperMessage.message)
except (InvalidKeyException, LegacyMessageException, DecodeError) as e:
raise InvalidMessageException(e)
else:
self.version = messageVersion
self.registrationId = registrationId
self.preKeyId = preKeyId
self.signedPreKeyId = signedPreKeyId
self.baseKey = ecPublicBaseKey
self.identityKey = identityKey
self.message = whisperMessage
builder = whisperprotos.PreKeyWhisperMessage()
builder.signedPreKeyId = signedPreKeyId
builder.baseKey = ecPublicBaseKey.serialize()
builder.identityKey = identityKey.serialize()
builder.message = whisperMessage.serialize()
builder.registrationId = registrationId
if preKeyId is not None:
builder.preKeyId = preKeyId
versionBytes = ByteUtil.intsToByteHighAndLow(self.version, self.__class__.CURRENT_VERSION)
messageBytes = builder.SerializeToString()
self.serialized = bytes(ByteUtil.combine(versionBytes, messageBytes))
def getMessageVersion(self):
return self.version
def getIdentityKey(self):
return self.identityKey
def getRegistrationId(self):
return self.registrationId
def getPreKeyId(self):
return self.preKeyId
def getSignedPreKeyId(self):
return self.signedPreKeyId
def getBaseKey(self):
return self.baseKey
def getWhisperMessage(self):
return self.message
def serialize(self):
return self.serialized
def getType(self):
return CiphertextMessage.PREKEY_TYPE
|
gpl-3.0
| 1,577,542,224,139,526,400 | 39.019608 | 113 | 0.6683 | false |
mferenca/HMS-ecommerce
|
ecommerce/extensions/payment/utils.py
|
1
|
1405
|
from django.utils.translation import ugettext_lazy as _
def middle_truncate(string, chars):
"""Truncate the provided string, if necessary.
Cuts excess characters from the middle of the string and replaces
them with a string indicating that truncation has occurred.
Arguments:
string (unicode or str): The string to be truncated.
chars (int): The character limit for the truncated string.
Returns:
Unicode: The truncated string, of length less than or equal to `chars`.
If no truncation was required, the original string is returned.
Raises:
ValueError: If the provided character limit is less than the length of
the truncation indicator.
"""
if len(string) <= chars:
return string
# Translators: This is a string placed in the middle of a truncated string
# to indicate that truncation has occurred. For example, if a title may only
# be at most 11 characters long, "A Very Long Title" (17 characters) would be
# truncated to "A Ve...itle".
indicator = _('...')
indicator_length = len(indicator)
if chars < indicator_length:
raise ValueError
slice_size = (chars - indicator_length) / 2
start, end = string[:slice_size], string[-slice_size:]
truncated = u'{start}{indicator}{end}'.format(start=start, indicator=indicator, end=end)
return truncated
|
agpl-3.0
| 6,131,410,970,237,403,000 | 35.025641 | 92 | 0.679715 | false |
SacNaturalFoods/django-pm
|
helpdesk/settings.py
|
1
|
6941
|
"""
Default settings for django-helpdesk.
"""
from django.conf import settings
settings.ADMIN_MEDIA_PREFIX = '/admin/'
# required for social-auth
settings.AUTHENTICATION_BACKENDS = (
'social_auth.backends.google.GoogleOAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
# django-wysiwyg
settings.DJANGO_WYSIWYG_FLAVOR = 'ckeditor'
settings.DJANGO_WYSIWYG_MEDIA_URL = "%s/ckeditor/" % settings.STATIC_URL
settings.TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
'helpdesk.context_processors.saved_searches',
)
# TODO: remove references to these settings elsewhere
HAS_TAGGIT_SUPPORT = True
HAS_TAGGING_SUPPORT = False
try:
DEFAULT_USER_SETTINGS = settings.HELPDESK_DEFAULT_SETTINGS
except:
DEFAULT_USER_SETTINGS = None
if type(DEFAULT_USER_SETTINGS) != type(dict()):
DEFAULT_USER_SETTINGS = {
'use_email_as_submitter': True,
'email_on_ticket_assign': True,
'email_on_ticket_change': True,
'login_view_ticketlist': True,
'email_on_ticket_apichange': True,
'tickets_per_page': 25
}
''' generic options - visible on all pages '''
# redirect to login page instead of the default homepage when users visits "/"?
HELPDESK_REDIRECT_TO_LOGIN_BY_DEFAULT = getattr(settings, 'HELPDESK_REDIRECT_TO_LOGIN_BY_DEFAULT', False)
# customize helpdesk name on a few pages, i.e., your organization.
HELPDESK_PREPEND_ORG_NAME = getattr(settings, 'HELPDESK_PREPEND_ORG_NAME', False)
# show knowledgebase links?
HELPDESK_KB_ENABLED = getattr(settings, 'HELPDESK_KB_ENABLED', True)
# show knowledgebase links on staff view?
HELPDESK_KB_ENABLED_STAFF = getattr(settings, 'HELPDESK_KB_ENABLED_STAFF', True)
# show extended navigation by default, to all users, irrespective of staff status?
HELPDESK_NAVIGATION_ENABLED = getattr(settings, 'HELPDESK_NAVIGATION_ENABLED', True)
# show 'stats' link in navigation bar?
HELPDESK_NAVIGATION_STATS_ENABLED = getattr(settings, 'HELPDESK_NAVIGATION_STATS_ENABLED', True)
# set this to an email address inside your organization and a footer below
# the 'Powered by django-helpdesk' will be shown, telling the user whom to contact
# in case they have technical problems.
HELPDESK_SUPPORT_PERSON = getattr(settings, 'HELPDESK_SUPPORT_PERSON', False)
# show dropdown list of languages that ticket comments can be translated into?
HELPDESK_TRANSLATE_TICKET_COMMENTS = getattr(settings, 'HELPDESK_TRANSLATE_TICKET_COMMENTS', False)
# list of languages to offer. if set to false, all default google translate languages will be shown.
HELPDESK_TRANSLATE_TICKET_COMMENTS_LANG = getattr(settings, 'HELPDESK_TRANSLATE_TICKET_COMMENTS_LANG', ["en", "de", "fr", "it", "ru"])
# show link to 'change password' on 'User Settings' page?
HELPDESK_SHOW_CHANGE_PASSWORD = getattr(settings, 'HELPDESK_SHOW_CHANGE_PASSWORD', False)
# allow user to override default layout for 'followups' - work in progress.
HELPDESK_FOLLOWUP_MOD = getattr(settings, 'HELPDESK_FOLLOWUP_MOD', True)
# include or exclude the ticket description from it's initial follow-up on creation
HELPDESK_INCLUDE_DESCRIPTION_IN_FOLLOWUP = getattr(settings, 'HELPDESK_INCLUDE_DESCRIPTION_IN_FOLLOWUP', False)
# show custom welcome message in dashboard?
HELPDESK_CUSTOM_WELCOME = getattr(settings, 'HELPDESK_CUSTOM_WELCOME', True)
''' options for public pages '''
# show 'view a ticket' section on public page?
HELPDESK_VIEW_A_TICKET_PUBLIC = getattr(settings, 'HELPDESK_VIEW_A_TICKET_PUBLIC', False)
# show 'submit a ticket' section on public page?
HELPDESK_SUBMIT_A_TICKET_PUBLIC = getattr(settings, 'HELPDESK_SUBMIT_A_TICKET_PUBLIC', False)
''' options for update_ticket views '''
# allow non-staff users to interact with tickets? this will also change how 'staff_member_required'
# in staff.py will be defined.
HELPDESK_ALLOW_NON_STAFF_TICKET_UPDATE = getattr(settings, 'HELPDESK_ALLOW_NON_STAFF_TICKET_UPDATE', True)
# show edit buttons in ticket follow ups.
HELPDESK_SHOW_EDIT_BUTTON_FOLLOW_UP = getattr(settings, 'HELPDESK_SHOW_EDIT_BUTTON_FOLLOW_UP', True)
# show ticket edit button on top of ticket description.
HELPDESK_SHOW_EDIT_BUTTON_TICKET_TOP = getattr(settings, 'HELPDESK_SHOW_EDIT_BUTTON_TICKET_TOP', True)
# show ticket delete button on top of ticket description.
HELPDESK_SHOW_DELETE_BUTTON_TICKET_TOP = getattr(settings, 'HELPDESK_SHOW_DELETE_BUTTON_TICKET_TOP', True)
# show hold / unhold button on top of ticket description.
# TODO: remove this feature, it seems useless
HELPDESK_SHOW_HOLD_BUTTON_TICKET_TOP = getattr(settings, 'HELPDESK_SHOW_HOLD_BUTTON_TICKET_TOP', False)
# make all updates public by default? this will hide the 'is this update public' checkbox
HELPDESK_UPDATE_PUBLIC_DEFAULT = getattr(settings, 'HELPDESK_UPDATE_PUBLIC_DEFAULT', True)
# only show staff users in ticket owner drop-downs
HELPDESK_STAFF_ONLY_TICKET_OWNERS = getattr(settings, 'HELPDESK_STAFF_ONLY_TICKET_OWNERS', True)
# only show staff users in ticket cc drop-down
HELPDESK_STAFF_ONLY_TICKET_CC = getattr(settings, 'HELPDESK_STAFF_ONLY_TICKET_CC', True)
''' options for staff.create_ticket view '''
# hide the 'assigned to' / 'Case owner' field from the 'create_ticket' view?
HELPDESK_CREATE_TICKET_HIDE_ASSIGNED_TO = getattr(settings, 'HELPDESK_CREATE_TICKET_HIDE_ASSIGNED_TO', False)
''' options for dashboard '''
# show delete button next to unassigned tickets
HELPDESK_DASHBOARD_SHOW_DELETE_UNASSIGNED = getattr(settings, 'HELPDESK_DASHBOARD_SHOW_DELETE_UNASSIGNED', True)
# hide empty queues in dashboard overview?
HELPDESK_DASHBOARD_HIDE_EMPTY_QUEUES = getattr(settings, 'HELPDESK_DASHBOARD_HIDE_EMPTY_QUEUES', True)
''' options for footer '''
# show 'API' link at bottom of page
HELPDESK_FOOTER_SHOW_API_LINK = getattr(settings, 'HELPDESK_FOOTER_SHOW_API_LINK', True)
# show / hide 'change language' link at bottom of page
HELPDESK_FOOTER_SHOW_CHANGE_LANGUAGE_LINK = getattr(settings, 'HELPDESK_FOOTER_SHOW_CHANGE_LANGUAGE_LINK', False)
''' email options '''
# default Queue email submission settings
QUEUE_EMAIL_BOX_TYPE = getattr(settings, 'QUEUE_EMAIL_BOX_TYPE', None)
QUEUE_EMAIL_BOX_SSL = getattr(settings, 'QUEUE_EMAIL_BOX_SSL', None)
QUEUE_EMAIL_BOX_HOST = getattr(settings, 'QUEUE_EMAIL_BOX_HOST', None)
QUEUE_EMAIL_BOX_USER = getattr(settings, 'QUEUE_EMAIL_BOX_USER', None)
QUEUE_EMAIL_BOX_PASSWORD = getattr(settings, 'QUEUE_EMAIL_BOX_PASSWORD', None)
''' calendar options '''
# these settings determine whether a calendar will be integrated with the helpdesk
# create or update an event in your calendar based on ticket.due_date
HELPDESK_UPDATE_CALENDAR = getattr(settings, 'HELPDESK_UPDATE_CALENDAR', False)
# specify the calendar to update - selects a python module to run the update code
HELPDESK_CALENDAR = getattr(settings, 'HELPDESK_CALENDAR', None)
# media options
PM_LOGO_URL = getattr(settings, 'PM_LOGO_URL', None)
|
agpl-3.0
| 7,715,549,365,510,107,000 | 39.829412 | 134 | 0.751477 | false |
mrunge/openstack_horizon
|
openstack_horizon/dashboards/admin/volumes/urls.py
|
1
|
1703
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_horizon.dashboards.admin.volumes.snapshots \
import urls as snapshot_urls
from openstack_horizon.dashboards.admin.volumes import views
from openstack_horizon.dashboards.admin.volumes.volume_types \
import urls as volume_types_urls
from openstack_horizon.dashboards.admin.volumes.volumes \
import urls as volumes_urls
urlpatterns = patterns(
'',
url(r'^$',
views.IndexView.as_view(),
name='index'),
url(r'^\?tab=volumes_group_tabs__snapshots_tab$',
views.IndexView.as_view(),
name='snapshots_tab'),
url(r'^\?tab=volumes_group_tabs__volumes_tab$',
views.IndexView.as_view(),
name='volumes_tab'),
url(r'^\?tab=volumes_group_tabs__volume_types_tab$',
views.IndexView.as_view(),
name='volume_types_tab'),
url(r'',
include(volumes_urls, namespace='volumes')),
url(r'volume_types/',
include(volume_types_urls, namespace='volume_types')),
url(r'snapshots/',
include(snapshot_urls, namespace='snapshots')),
)
|
apache-2.0
| -3,214,202,331,074,112,000 | 36.844444 | 75 | 0.70229 | false |
wright-group/WrightTools
|
WrightTools/artists/_colors.py
|
1
|
13487
|
"""Colormaps."""
# --- import --------------------------------------------------------------------------------------
import collections
import copy
import numpy as np
from numpy import r_
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
import matplotlib.gridspec as grd
from ._turbo import turbo
# --- define -------------------------------------------------------------------------------------
__all__ = [
"colormaps",
"get_color_cycle",
"grayify_cmap",
"overline_colors",
"plot_colormap_components",
]
# --- functions ----------------------------------------------------------------------------------
def make_cubehelix(name="WrightTools", gamma=0.5, s=0.25, r=-1, h=1.3, reverse=False, darkest=0.7):
"""Define cubehelix type colorbars.
Look `here`__ for more information.
__ http://arxiv.org/abs/1108.5083
Parameters
----------
name : string (optional)
Name of new cmap. Default is WrightTools.
gamma : number (optional)
Intensity factor. Default is 0.5
s : number (optional)
Start color factor. Default is 0.25
r : number (optional)
Number and direction of rotations. Default is -1
h : number (option)
Hue factor. Default is 1.3
reverse : boolean (optional)
Toggle reversal of output colormap. By default (Reverse = False),
colormap goes from light to dark.
darkest : number (optional)
Default is 0.7
Returns
-------
matplotlib.colors.LinearSegmentedColormap
See Also
--------
plot_colormap_components
Displays RGB components of colormaps.
"""
rr = 0.213 / 0.30
rg = 0.715 / 0.99
rb = 0.072 / 0.11
def get_color_function(p0, p1):
def color(x):
# Calculate amplitude and angle of deviation from the black to
# white diagonal in the plane of constant perceived intensity.
xg = darkest * x ** gamma
lum = 1 - xg # starts at 1
if reverse:
lum = lum[::-1]
a = lum.copy()
a[lum < 0.5] = h * lum[lum < 0.5] / 2.0
a[lum >= 0.5] = h * (1 - lum[lum >= 0.5]) / 2.0
phi = 2 * np.pi * (s / 3 + r * x)
out = lum + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return out
return color
rgb_dict = {
"red": get_color_function(-0.14861 * rr, 1.78277 * rr),
"green": get_color_function(-0.29227 * rg, -0.90649 * rg),
"blue": get_color_function(1.97294 * rb, 0.0),
}
cmap = matplotlib.colors.LinearSegmentedColormap(name, rgb_dict)
return cmap
def make_colormap(seq, name="CustomMap", plot=False):
"""Generate a LinearSegmentedColormap.
Parameters
----------
seq : list of tuples
A sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
name : string (optional)
A name for the colormap
plot : boolean (optional)
Use to generate a plot of the colormap (Default is False).
Returns
-------
matplotlib.colors.LinearSegmentedColormap
`Source`__
__ http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {"red": [], "green": [], "blue": []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict["red"].append([item, r1, r2])
cdict["green"].append([item, g1, g2])
cdict["blue"].append([item, b1, b2])
cmap = mplcolors.LinearSegmentedColormap(name, cdict)
if plot:
plot_colormap_components(cmap)
return cmap
def nm_to_rgb(nm):
"""Convert a wavelength to corresponding RGB values [0.0-1.0].
Parameters
----------
nm : int or float
The wavelength of light.
Returns
-------
List of [R,G,B] values between 0 and 1
`original code`__
__ http://www.physics.sfasu.edu/astro/color/spectra.html
"""
w = int(nm)
# color ---------------------------------------------------------------------------------------
if w >= 380 and w < 440:
R = -(w - 440.0) / (440.0 - 350.0)
G = 0.0
B = 1.0
elif w >= 440 and w < 490:
R = 0.0
G = (w - 440.0) / (490.0 - 440.0)
B = 1.0
elif w >= 490 and w < 510:
R = 0.0
G = 1.0
B = -(w - 510.0) / (510.0 - 490.0)
elif w >= 510 and w < 580:
R = (w - 510.0) / (580.0 - 510.0)
G = 1.0
B = 0.0
elif w >= 580 and w < 645:
R = 1.0
G = -(w - 645.0) / (645.0 - 580.0)
B = 0.0
elif w >= 645 and w <= 780:
R = 1.0
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
# intensity correction ------------------------------------------------------------------------
if w >= 380 and w < 420:
SSS = 0.3 + 0.7 * (w - 350) / (420 - 350)
elif w >= 420 and w <= 700:
SSS = 1.0
elif w > 700 and w <= 780:
SSS = 0.3 + 0.7 * (780 - w) / (780 - 700)
else:
SSS = 0.0
SSS *= 255
return [
float(int(SSS * R) / 256.0),
float(int(SSS * G) / 256.0),
float(int(SSS * B) / 256.0),
]
def plot_colormap_components(cmap):
"""Plot the components of a given colormap."""
from ._helpers import set_ax_labels # recursive import protection
plt.figure(figsize=[8, 4])
gs = grd.GridSpec(3, 1, height_ratios=[1, 10, 1], hspace=0.05)
# colorbar
ax = plt.subplot(gs[0])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0.0, vmax=1.0)
ax.set_title(cmap.name, fontsize=20)
ax.set_axis_off()
# components
ax = plt.subplot(gs[1])
x = np.arange(cmap.N)
colors = cmap(x)
r = colors[:, 0]
g = colors[:, 1]
b = colors[:, 2]
RGB_weight = [0.299, 0.587, 0.114]
k = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
r.clip(0, 1, out=r)
g.clip(0, 1, out=g)
b.clip(0, 1, out=b)
xi = np.linspace(0, 1, x.size)
plt.plot(xi, r, "r", linewidth=5, alpha=0.6)
plt.plot(xi, g, "g", linewidth=5, alpha=0.6)
plt.plot(xi, b, "b", linewidth=5, alpha=0.6)
plt.plot(xi, k, "k", linewidth=5, alpha=0.6)
ax.set_xlim(0, 1)
ax.set_ylim(-0.1, 1.1)
set_ax_labels(ax=ax, xlabel=None, xticks=False, ylabel="intensity")
# grayified colorbar
cmap = grayify_cmap(cmap)
ax = plt.subplot(gs[2])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0.0, vmax=1.0)
ax.set_axis_off()
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap.
`Source`__
__ https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/
"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return mplcolors.LinearSegmentedColormap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def get_color_cycle(n, cmap="rainbow", rotations=3):
"""Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists.
"""
cmap = colormaps[cmap]
if np.mod(n, rotations) == 0:
per = np.floor_divide(n, rotations)
else:
per = np.floor_divide(n, rotations) + 1
vals = list(np.linspace(0, 1, per))
vals = vals * rotations
vals = vals[:n]
out = cmap(vals)
return out
# --- color maps ----------------------------------------------------------------------------------
cubehelix = make_cubehelix()
experimental = [
"#FFFFFF",
"#0000FF",
"#0080FF",
"#00FFFF",
"#00FF00",
"#FFFF00",
"#FF8000",
"#FF0000",
"#881111",
]
greenscale = ["#000000", "#00FF00"] # black # green
greyscale = ["#FFFFFF", "#000000"] # white # black
invisible = ["#FFFFFF", "#FFFFFF"] # white # white
# isoluminant colorbar based on the research of Kindlmann et al.
# http://dx.doi.org/10.1109/VISUAL.2002.1183788
c = mplcolors.ColorConverter().to_rgb
isoluminant1 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.847, 0.057, 0.057]),
1 / 6.0,
c(r_[0.847, 0.057, 0.057]),
c(r_[0.527, 0.527, 0.000]),
2 / 6.0,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.000, 0.592, 0.000]),
3 / 6.0,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.000, 0.559, 0.559]),
4 / 6.0,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.316, 0.316, 0.991]),
5 / 6.0,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.718, 0.000, 0.718]),
],
name="isoluminant`",
)
isoluminant2 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.718, 0.000, 0.718]),
1 / 6.0,
c(r_[0.718, 0.000, 0.718]),
c(r_[0.316, 0.316, 0.991]),
2 / 6.0,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
3 / 6.0,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
4 / 6.0,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
5 / 6.0,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant2",
)
isoluminant3 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.316, 0.316, 0.991]),
1 / 5.0,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
2 / 5.0,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
3 / 5.0,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
4 / 5.0,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant3",
)
signed = [
"#0000FF", # blue
"#002AFF",
"#0055FF",
"#007FFF",
"#00AAFF",
"#00D4FF",
"#00FFFF",
"#FFFFFF", # white
"#FFFF00",
"#FFD400",
"#FFAA00",
"#FF7F00",
"#FF5500",
"#FF2A00",
"#FF0000", # red
]
signed_old = [
"#0000FF", # blue
"#00BBFF", # blue-aqua
"#00FFFF", # aqua
"#FFFFFF", # white
"#FFFF00", # yellow
"#FFBB00", # orange
"#FF0000", # red
]
skyebar = [
"#FFFFFF", # white
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000", # dark red
]
skyebar_d = [
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000", # dark red
]
skyebar_i = [
"#000000", # black
"#FFFFFF", # white
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000", # dark red
]
wright = ["#FFFFFF", "#0000FF", "#00FFFF", "#00FF00", "#FFFF00", "#FF0000", "#881111"]
class cmapdict(dict):
def __getitem__(self, key):
if key in self:
return self.get(key)
self[key] = plt.get_cmap(key)
return self.get(key)
colormaps = cmapdict()
colormaps["cubehelix"] = copy.copy(plt.get_cmap("cubehelix_r"))
colormaps["default"] = cubehelix
colormaps["signed"] = copy.copy(plt.get_cmap("bwr"))
colormaps["greenscale"] = mplcolors.LinearSegmentedColormap.from_list("greenscale", greenscale)
colormaps["greyscale"] = mplcolors.LinearSegmentedColormap.from_list("greyscale", greyscale)
colormaps["invisible"] = mplcolors.LinearSegmentedColormap.from_list("invisible", invisible)
colormaps["isoluminant1"] = isoluminant1
colormaps["isoluminant2"] = isoluminant2
colormaps["isoluminant3"] = isoluminant3
colormaps["signed_old"] = mplcolors.LinearSegmentedColormap.from_list("signed", signed_old)
colormaps["skyebar1"] = mplcolors.LinearSegmentedColormap.from_list("skyebar", skyebar)
colormaps["skyebar2"] = mplcolors.LinearSegmentedColormap.from_list("skyebar dark", skyebar_d)
colormaps["skyebar3"] = mplcolors.LinearSegmentedColormap.from_list("skyebar inverted", skyebar_i)
colormaps["turbo"] = turbo
colormaps["wright"] = mplcolors.LinearSegmentedColormap.from_list("wright", wright)
# enforce grey as 'bad' value for colormaps
for cmap in colormaps.values():
cmap.set_bad([0.75] * 3, 1)
# enforce under and over for default colormap
colormaps["default"].set_under([0.50] * 3, 1)
colormaps["default"].set_over("m")
# enforce under and over for signed colormap
colormaps["signed"].set_under("c")
colormaps["signed"].set_over("m")
# a nice set of line colors
overline_colors = ["#CCFF00", "#FE4EDA", "#FF6600", "#00FFBF", "#00B7EB"]
|
mit
| -2,167,874,850,872,179,700 | 26.637295 | 99 | 0.527619 | false |
simphony/tornado-webapi
|
tornadowebapi/traitlets.py
|
1
|
3181
|
"""Our traits must be able to deal with Absent values, for two reasons.
First, the fact that we don't specify an optional value does not imply that
the resulting resource will have a default.
Second, when we do modification (PATCH) operations, we only specify the values
we want to change.
In practice, this means that all traits are optional. Mandatory entries
are only enforced when creating new resources or setting from scratch."""
import traitlets as _traitlets
HasTraits = _traitlets.HasTraits
TraitError = _traitlets.TraitError
Absent = _traitlets.Sentinel("Absent", "tornadowebapi.traitlets")
class Int(_traitlets.Int):
"""An int trait, with support for lack of specified value"""
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Unicode(_traitlets.Unicode):
default_value = Absent
def info(self):
qualifiers = []
if self.metadata.get("strip", False):
qualifiers.append("strip")
if not self.metadata.get("allow_empty", True):
qualifiers.append("not empty")
text = ", ".join(qualifiers)
if len(text):
return self.info_text + "("+text+")"
else:
return self.info_text
def validate(self, obj, value):
if value == Absent:
return value
value = super().validate(obj, value)
if self.metadata.get("strip", False):
value = value.strip()
if not self.metadata.get("allow_empty", True) and len(value) == 0:
self.error(obj, value)
return value
class Label(Unicode):
"""A label is a string that is not none and is automatically
stripped"""
def __init__(self):
super().__init__(allow_empty=False, strip=True)
class Enum(_traitlets.Enum):
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Bool(_traitlets.Bool):
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Float(_traitlets.Float):
default_value = Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class List(_traitlets.List):
def make_dynamic_default(self):
return Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class Dict(_traitlets.Dict):
def make_dynamic_default(self):
return Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
class OneOf(_traitlets.Instance):
"""Marks a one to one relationship with a resource or resourcefragment."""
def make_dynamic_default(self):
return Absent
def validate(self, obj, value):
if value == Absent:
return value
return super().validate(obj, value)
|
bsd-3-clause
| 8,428,902,971,515,318,000 | 24.047244 | 78 | 0.624646 | false |
mosegontar/billybot
|
billybot/message_handler.py
|
1
|
6213
|
import json
import abc
from copy import deepcopy
from .config import slack_attachment
from eng_join import join
class SlackMessageHandler(metaclass=abc.ABCMeta):
"""Abstract base class from which all Slack message handlers derive"""
message_dictionary = dict()
message_dictionary['NONE'] = ''
message_dictionary['RESOLVED'] = "Here's what I found {} :) Let me know"\
" if there is anything else I can do."
message_dictionary['RESULTS'] = "Okay, here's what I found {}:"
message_dictionary['CLARIFY'] = "Which one did you mean?"
message_dictionary['NO_RESULTS'] = "Ugh, I couldn't find anything {}:("
def __init__(self):
self.messages = []
self.attachment_data = {'title': None, 'title_link': None,
'fields': [], 'text': None}
def get_message(self):
"""Returns formatted message to caller."""
self._prepare_message()
return self._make_reply()
def _make_reply(self):
"""Package messages as list of dicts and return packaged reply
Each message dict item contains two key, value pairs
'text': a string containing the message
'attachments': None if no attachment else a dict of attachment fields
"""
attachment = self._format_attachment(deepcopy(slack_attachment))
reply = [{'text': self.messages.pop(0),
'attachments': attachment}]
while self.messages:
next_reply = {'text': self.messages.pop(0),
'attachments': None}
reply.append(next_reply)
return reply
def _prepare_message(self):
"""Format message and return reply and attachments."""
if self.error:
self._set_error_msg()
elif self.pending:
self._set_unresolved_query_msg()
else:
self._set_resolved_query_msg()
def _create_attachment_fields(self, proposed_fields):
"""Set proposed_fields to the Slack attachment fields format"""
fields = []
for title, value in proposed_fields:
if value:
fields.append(dict([('title', title),
('value', value),
('short', True)]))
return fields
def _format_attachment(self, attachment):
"""Set attachment fields and return Slack attachment as JSON"""
for key, value in self.attachment_data.items():
if key in attachment.keys():
if key == 'text' and type(value) == list:
value = self._make_list_string(value)
attachment[key] = value
return json.dumps([attachment])
def _make_list_string(self, item_list):
"""Turn a list of items into an enumerated string"""
string_list = '\n'.join(['{}. {}'.format(i+1, item)
for i, item in enumerate(item_list)])
return string_list
def _format_msg(self, base, joiner='', string=''):
"""Format message strings"""
try:
_int = int(string)
format_string = ''
except:
_int = False
format_string = ' '.join([joiner, string])
return base.format(format_string).strip()
@abc.abstractmethod
def _set_error_msg(self):
"""Create error message."""
pass
@abc.abstractmethod
def _set_unresolved_query_msg(self):
"""Create message requesting further query of interim results."""
pass
@abc.abstractmethod
def _set_resolved_query_msg(self):
"""Create message for resolved query."""
pass
class ContactQueryMessageHandler(SlackMessageHandler):
def __init__(self, query, pending, error,
summary=None, data=None, results=None):
super().__init__()
self.msg_dict = SlackMessageHandler.message_dictionary
self.query = query
self.pending = pending
self.error = error
self.member_summary = summary
self.member_data = data
self.search_results = results
def _set_error_msg(self):
"""Set error message and append to messages."""
error_message = self._format_msg(base=self.msg_dict[self.error],
joiner='for',
string=self.query)
self.messages.append(error_message)
def _set_unresolved_query_msg(self):
"""Set unresolved query msg and append msg and attachment to messages"""
primary_reply = self._format_msg(base=self.msg_dict['RESULTS'],
joiner='for',
string=self.query)
secondary_reply = self.msg_dict['CLARIFY']
self.attachment_data['text'] = [res[0] for res in self.search_results]
self.messages.extend([primary_reply, secondary_reply])
def _set_resolved_query_msg(self):
"""Set resolved query msg and append msg and attachment to messages"""
twitter_handle = self.member_data.get('twitter_id')
if twitter_handle:
twitter_url = 'twitter.com/{}'.format(twitter_handle)
else:
twitter_url = None
# prepare
phone_number = self.member_data.get('phone')
office_locale = self.member_data.get('office')
contact_form = self.member_data.get('contact_form')
_fields = [('Twitter', twitter_url),
('Phone', phone_number),
('Office', office_locale),
('Contact Form', contact_form)]
self.attachment_data['fields'] = self._create_attachment_fields(_fields)
self.attachment_data['title'] = self.member_summary
self.attachment_data['title_link'] = self.member_data['website']
primary_reply = self._format_msg(base=self.msg_dict['RESOLVED'],
joiner='for',
string=self.query)
secondary_reply = self.msg_dict['NONE']
self.messages.extend([primary_reply, secondary_reply])
|
mit
| -6,864,756,672,610,262,000 | 31.025773 | 80 | 0.560599 | false |
pantheon-systems/etl-framework
|
etl_framework/config_mixins/tests/test_postgresql_drop_table_statement_mixin.py
|
1
|
1112
|
"""test cases for PostgreSqlCreateTableStatementMixin"""
import re
import unittest
from etl_framework.config_mixins.postgresql_drop_table_statement_mixin import \
PostgreSqlDropTableStatementMixin
class PostgreSqlDropTableStatementMixinTestCases(unittest.TestCase):
"""TestCases"""
def test_create_drop_table_statement_output_is_string(self):
"""
tests get_create_table_statement method
NOTE we need more unittests to full cover every branch of logic
for this method
"""
table = "test_table"
expected_statement = \
"""
DROP TABLE IF EXISTS test_table
"""
fields, statement = \
PostgreSqlDropTableStatementMixin.create_drop_table_statement(
table=table,
statement_string=True,
)
# Ignore superfluous whitespace
expected_statement = re.sub(r'\s+', ' ', expected_statement).strip()
statement = re.sub(r'\s+', ' ', statement).strip()
self.assertEqual(statement, expected_statement)
self.assertEqual(fields, [])
|
mit
| 2,961,109,704,062,054,400 | 29.054054 | 79 | 0.640288 | false |
lonvia/Nominatim
|
utils/analyse_indexing.py
|
1
|
4536
|
#!/usr/bin/python3
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim.
# Copyright (C) 2020 Sarah Hoffmann
"""
Script for analysing the indexing process.
The script enables detailed logging for nested statements and then
runs the indexing process for teh given object. Detailed 'EXPLAIN ANALYSE'
information is printed for each executed query in the trigger. The
transaction is then rolled back, so that no actual changes to the database
happen. It also disables logging into the system log, so that the
log files are not cluttered.
"""
from argparse import ArgumentParser, RawDescriptionHelpFormatter, ArgumentTypeError
import psycopg2
import getpass
import re
class Analyser(object):
def __init__(self, options):
password = None
if options.password_prompt:
password = getpass.getpass("Database password: ")
self.options = options
self.conn = psycopg2.connect(dbname=options.dbname,
user=options.user,
password=password,
host=options.host,
port=options.port)
def run(self):
c = self.conn.cursor()
if self.options.placeid:
place_id = self.options.placeid
else:
if self.options.rank:
c.execute(f"""select place_id from placex
where rank_address = {self.options.rank}
and linked_place_id is null
limit 1""")
objinfo = f"rank {self.options.rank}"
if self.options.osmid:
osm_type = self.options.osmid[0].upper()
if osm_type not in ('N', 'W', 'R'):
raise RuntimeError("OSM ID must be of form <N|W|R><id>")
try:
osm_id = int(self.options.osmid[1:])
except ValueError:
raise RuntimeError("OSM ID must be of form <N|W|R><id>")
c.execute(f"""SELECT place_id FROM placex
WHERE osm_type = '{osm_type}' AND osm_id = {osm_id}""")
objinfo = f"OSM object {self.options.osmid}"
if c.rowcount < 1:
raise RuntimeError(f"Cannot find a place for {objinfo}.")
place_id = c.fetchone()[0]
c.execute(f"""update placex set indexed_status = 2 where
place_id = {place_id}""")
c.execute("""SET auto_explain.log_min_duration = '0';
SET auto_explain.log_analyze = 'true';
SET auto_explain.log_nested_statements = 'true';
LOAD 'auto_explain';
SET client_min_messages = LOG;
SET log_min_messages = FATAL""");
c.execute(f"""update placex set indexed_status = 0 where
place_id = {place_id}""")
c.close() # automatic rollback
for l in self.conn.notices:
print(l)
if __name__ == '__main__':
def h(s):
return re.sub("\s\s+" , " ", s)
p = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
group = p.add_mutually_exclusive_group(required=True)
group.add_argument('--rank', dest='rank', type=int,
help='Analyse indexing of the given address rank')
group.add_argument('--osm-id', dest='osmid', type=str,
help='Analyse indexing of the given OSM object')
group.add_argument('--place-id', dest='placeid', type=int,
help='Analyse indexing of the given Nominatim object')
p.add_argument('-d', '--database',
dest='dbname', action='store', default='nominatim',
help='Name of the PostgreSQL database to connect to.')
p.add_argument('-U', '--username',
dest='user', action='store',
help='PostgreSQL user name.')
p.add_argument('-W', '--password',
dest='password_prompt', action='store_true',
help='Force password prompt.')
p.add_argument('-H', '--host',
dest='host', action='store',
help='PostgreSQL server hostname or socket location.')
p.add_argument('-P', '--port',
dest='port', action='store',
help='PostgreSQL server port')
Analyser(p.parse_args()).run()
|
gpl-2.0
| 641,761,913,534,099,100 | 37.117647 | 85 | 0.540123 | false |
JudiciaryPag/TopicServer
|
server/mockdb.py
|
1
|
20883
|
#!/usr/bin/env python
"""
A simple topic-article memory-resident database.
mockdb.py
The database consists of messages recorded against topics and subscribers.
Subscribers collect messages posted on topics they've subscribed to.
This module is part of a larger service that includes an HTTP server process
and a command-line client utility that can be used to post and retrieve
articles.
The implementation is robust and complete but may appear relatively crude.
Functionality takes first place with performance second. Consequently the
structures employed may not offer the fastest solution but, as always,
a complex working system is better when it evolves from simple working
system. Therefore, depending on performance requirements, further work
may be needed to optimise the data structures and their use.
The following significant structures are managed in this module:
DbResult
--------
This is a namedtuple returned by all the public methods in this module.
It contains 'message' and 'error' fields. The 'message' field is
used to pass subscription messages back to the caller bu 'get_next_message()'.
If there are no further unread messages for a subscriber the 'message'
field will be 'None'.
Message
-------
This named tuple is used to record posted messages on each topic
that remain unread by at least one subscriber. The namedtuple contains
'text' and 'subscribers' fields. The message is placed in the 'text' field
and the 'subscribers' is a list of subscribers who have not yet
read the message. As messages are read by a subscriber their name is
removed frm the list. When the list is empty the message is removed form the
database.
MockDb._topics
--------------
This object member contains the list of topics that have at least one
subscriber. When all subscribers un-subscribe from a topic the topic is removed
from the list.
MockDb._subscribers
-------------------
This object member contains the list of subscribers that have a subscription
to at least one topic. When a subscriber un-subscribes from all topics
they are removed from the list.
MockDb._topic_subscribers
-------------------------
This is a dictionary subscribers indexed by topic. When a message is
posted for a topic this list is copied to the message.subscribers field.
When a topic has no subscribers is it removed from the dictionary.
MockDb._topic_messages
----------------------
This is a dictionary of messages indexed by topic. Messages are held in
and are released in creation order. A message remains against a topic
until all the subscribers have read it. When there are no more messages
for a topic the topic is removed form the dictionary.
May 2015
"""
# Import _standard_ modules we're dependent upon
from collections import namedtuple
import ConfigParser
import string
from types import StringType, IntType
from twisted.python import log
# Module version
# and brief revision history (latest first)
__version__ = '1.3'
# 1.3 Use of topicserver.cfg file
# 1.2 Use of Twisted logging service
# 1.1 Changes to satisfy PEP8 (style guide)
# 1.0 Initial release
# The MockDb class method return structure (named tuple).
# An instance of this structure is returned by every method.
DbResult = namedtuple('DbResult', 'message error')
# A Message.
# A message consists of a body of text
# and a list of subscribers. The list of subscribers is a list of
# subscribers that a) were interested in the article's topic when
# the message was added and b) have not read the message.
# When a Message has no subscribers then it can assume to have
# been read by everyone and can be removed from the topic.
Message = namedtuple('Message', 'text subscribers')
# Our configuration is located in an appropriately named section
# in a Python configuration file.
CONFIG_FILE = 'server/topicserver.cfg'
OUR_CONFIG_SECTION = 'mockdb'
# -----------------------------------------------------------------------------
# _check_string
# -----------------------------------------------------------------------------
def _check_string(test_string, length):
"""A general method to check the validity of strings. The string length
is checked along with its content, which must only contain acceptable
characters (printable ASCII) excluding '/' or '\'.
True is returned if the string is acceptable.
test_string -- The string to check
length -- The maximum length permitted
"""
assert type(test_string) is StringType
assert type(length) is IntType
# Too long or empty?
if len(test_string) > length or len(test_string) == 0:
return
# Does the supplied string contain whitespace?
for ch in test_string:
if ch in string.whitespace:
return
elif ch in '/\\':
return
# OK if we get here
return True
# -----------------------------------------------------------------------------
# MockDb
# -----------------------------------------------------------------------------
class MockDb:
"""The topic-message database. A very simple service that allows
contributors and subscribers to post and receive short text messages
against a topic.
Methods return a `DbResult` namedtuple which contains a message
or an informative error, in the form of a string.
The implicit API contract conditions require that all method arguments
are of type string. Passing in any other type will result in a run-time
error. It is the responsibility of the caller to ensure that this
requirement is met.
This class is not thread safe.
"""
# -------------------------------------------------------------------------
def __init__(self):
"""Object initialiser. Setup empty topics, subscribers
and topic messages.
"""
# Read our configuration
self._read_configuration()
# The list of topics that have at least one subscriber
self._topics = []
# The list of subscribers to at least one topic
self._subscribers = []
# A dictionary of subscribers for each topic
self._topic_subscribers = {}
# A dictionary of messages for each topic
self._topic_messages = {}
# The number of unread messages (across all topics)
# For diagnostics/testing
self._messages = 0
# -------------------------------------------------------------------------
def _read_configuration(self):
"""Reads our configuration from file.
"""
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
self.max_subscribers = config.\
getint(OUR_CONFIG_SECTION, 'MAX_SUBSCRIBERS')
self.max_topics = config.\
getint(OUR_CONFIG_SECTION, 'MAX_TOPICS')
self.max_per_topic_messages = config.\
getint(OUR_CONFIG_SECTION, 'MAX_PER_TOPIC_MESSAGES')
self.max_subscriber_name_length = config.\
getint(OUR_CONFIG_SECTION, 'MAX_SUBSCRIBER_NAME_LENGTH')
self.max_topic_name_length = config.\
getint(OUR_CONFIG_SECTION, 'MAX_TOPIC_NAME_LENGTH')
self.max_message_length = config.\
getint(OUR_CONFIG_SECTION, 'MAX_MESSAGE_LENGTH')
# -------------------------------------------------------------------------
def subscribe(self, topic, subscriber):
"""Adds a subscriber to a topic returning a DbResult namedtuple.
If the subscriber or topic limit has been reached an error is returned.
An error is also returned if the length or content of the subscriber
and topic strings is found to be unacceptable.
topic -- The subscription topic
subscriber -- The subscriber, a string
"""
# Assert _API contract_ conditions...
assert type(topic) is StringType
assert type(subscriber) is StringType
# Check field content
if not _check_string(subscriber, self.max_subscriber_name_length):
return DbResult(None, 'The subscriber string is invalid')
if not _check_string(topic, self.max_topic_name_length):
return DbResult(None, 'The topic string is invalid')
# Entry conditions have ben validated.
# Log
prior_num_subscribers = len(self._subscribers)
prior_num_topics = len(self._topics)
log.msg('Prior subscribers/topics'
' {:d}/{:d}'.format(prior_num_subscribers,
prior_num_topics))
# Check subscriber and topic capacity
if (prior_num_subscribers >= self.max_subscribers and
subscriber not in self._subscribers):
return DbResult(None, 'The subscriber limit has been reached')
if (prior_num_topics >= self.max_topics and
topic not in self._topics):
return DbResult(None, 'The topic limit has been reached')
# Remember this subscriber and topic (if not known)
if subscriber not in self._subscribers:
self._subscribers.append(subscriber)
if topic not in self._topics:
# A new topic
self._topics.append(topic)
# And initialise the topic message list
self._topic_messages[topic] = []
# Extend the list of topic listeners
num_topic_subscribers = 0
if topic not in self._topic_subscribers:
# This is the first subscriber for the topic.
# Add a topic and its first subscriber.
self._topic_subscribers[topic] = [subscriber]
num_topic_subscribers = 1
elif subscriber not in self._topic_subscribers[topic]:
# Topic exists, and this is a new subscriber
self._topic_subscribers[topic].append(subscriber)
num_topic_subscribers = len(self._topic_subscribers[topic])
# Log changes
changed = False
if num_topic_subscribers:
changed = True
log.msg('New topic subscriber'
' (total={:d})'.format(num_topic_subscribers))
new_num_subscribers = len(self._subscribers)
new_num_topics = len(self._topics)
if new_num_subscribers > prior_num_subscribers or\
new_num_topics > prior_num_topics:
changed = True
log.msg('New subscribers/topics'
' {:d}/{:d}'.format(new_num_subscribers,
new_num_topics))
if not changed:
log.msg('Nothing changed')
# Success
return DbResult(None, None)
# -------------------------------------------------------------------------
def unsubscribe(self, topic, subscriber):
"""Removes a subscriber from a topic returning a DbResult namedtuple.
It is an error to un-subscribe from a topic that the subscriber
was not subscribed to.
topic -- The topic the subscriber was subscribed to
subscriber -- The subscriber, a string
"""
# Assert _API contract_ conditions...
assert type(topic) is StringType
assert type(subscriber) is StringType
# Check field content
if not _check_string(subscriber, self.max_subscriber_name_length):
return DbResult(None, 'The subscriber string is invalid')
if not _check_string(topic, self.max_topic_name_length):
return DbResult(None, 'The topic string is invalid')
# If the subscriber is not in the topic list, this is an error.
if (topic not in self._topics or
subscriber not in self._topic_subscribers[topic]):
return DbResult(None, 'The subscription does not exist')
# Clean up th e database for this subscriber.
# Remove the subscriber from the topic list.
self._topic_subscribers[topic].remove(subscriber)
# Log
remaining_subscribers = len(self._topic_subscribers[topic])
log.msg('Removed topic subscriber'
' ({:d} remaining subscribers)'.format(remaining_subscribers))
# Forget the subscriber if the subscriber isn't in any other topic
subscribed_to_other_topics = False
for known_topic in self._topics:
if subscriber in self._topic_subscribers[known_topic]:
subscribed_to_other_topics = True
break
if not subscribed_to_other_topics:
self._subscribers.remove(subscriber)
log.msg('Removed subscriber'
' (not subscribed to any other topics)')
# Get the topic messages in order to remove the subscriber from
# any remaining messages in the topic. Then, if the message has no
# other subscribers, remove the message.
#
# We do this by copying messages and recreating the topics messages...
# Probably expensive for topics with a lot of messages!
# A list comprehension approach might be more compact?
topic_messages = self._topic_messages[topic][:]
remaining_topic_messages = 0
if topic_messages:
# Empty the topic of messages
self._topic_messages[topic] = []
# Put back messages that still have subscribers
# other than this one...
for message in topic_messages:
if subscriber in message.subscribers:
message.subscribers.remove(subscriber)
if message.subscribers:
# Message still has subscribers
# so put it back in the topic
self._topic_messages[topic].append(message)
remaining_topic_messages
else:
# Message removed from the topic.
# Decrement the total message count...
assert self._messages > 0
self._messages -= 1
# Log...
if self._messages:
if self._messages > 1:
log.msg("{:d} remaining messages".format(self._messages))
else:
log.msg("1 remaining message")
log.msg("{:d} in topic)".format(remaining_topic_messages))
else:
log.msg("No more messages (in any topic)")
# If the topic has no subscribers then remove it and
# its list of messages and remove the topic from the list of topics
if not self._topic_subscribers[topic]:
self._topic_subscribers.pop(topic)
self._topic_messages.pop(topic)
self._topics.remove(topic)
log.msg('Removed topic (no more subscribers)')
# Success if we get here
return DbResult(None, None)
# -------------------------------------------------------------------------
def put_message(self, topic, message):
"""Puts a message on a topic stream. It is an error to place
a message on a topic that does not exist. A topic will not exist
if it has now current subscribers.
If successful an empty DbResult is returned.
topic -- The topic the message will be attached to
message -- The message
"""
# Assert _API contract_ conditions...
assert type(topic) is StringType
assert type(message) is StringType
# Check field content
if not _check_string(topic, self.max_topic_name_length):
return DbResult(None, 'The topic string is invalid')
# Message can contain anything, limited only by length
# and must have some content!
if (len(message) > self.max_message_length or
len(message) == 0):
return DbResult(None, 'The message string is invalid')
# If the topic does not exist there's not point in
# Storing the message. Only existing subscribers
# (and therefore existing topics) will see messages anyway.
if topic not in self._topics:
return DbResult(None, 'Topic has no subscribers')
# Do not exceed the maximum unread message count per topic.
if len(self._topic_messages[topic]) >= self.max_per_topic_messages:
return DbResult(None, 'Topic unread message limit reached')
# Attach the message to the list of messages on a topic
# along with the existing list of topic subscribers.
topic_subscribers = self._topic_subscribers[topic]
# The topic must have at least one subscriber if we here here...
assert topic_subscribers
topic_message = Message(message, topic_subscribers[:])
self._topic_messages[topic].append(topic_message)
# Count
self._messages += 1
# Log...
num_topic_messages = len(self._topic_messages[topic])
plural = ''
if self._messages > 2:
plural = 's'
log.msg('{:d} message{} held'
' ({:d} in topic)'.format(self._messages,
plural,
num_topic_messages))
# Success if we get here
return DbResult(None, None)
# -------------------------------------------------------------------------
def get_next_message(self, topic, subscriber):
"""Returns the next message for the named topic for this subscriber.
If no more messages await this subscriber an empty DbResult is
returned.
topic -- The topic
subscriber -- The subscriber
"""
# Assert _API contract_ conditions...
assert type(topic) is StringType
assert type(subscriber) is StringType
# Check field content
if not _check_string(subscriber, self.max_subscriber_name_length):
return DbResult(None, 'The subscriber string is invalid')
if not _check_string(topic, self.max_topic_name_length):
return DbResult(None, 'The topic string is invalid')
# If the subscriber or topic does not exist then
# return a suitable error.
if (subscriber not in self._subscribers or
topic not in self._topics):
return DbResult(None, 'The subscription does not exist')
# Is the
# Iterate through the topics subscribed to
# and then return the message, removing the
# subscriber from the message.
for messageId in range(len(self._topic_messages[topic])):
message = self._topic_messages[topic][messageId]
if subscriber in message.subscribers:
# Remove the subscriber from the list of subscribers
# (this prevents the message being returned again)
message.subscribers.remove(subscriber)
# And put it back into the topic array
# or remove if no further subscribers
if message.subscribers:
self._topic_messages[topic][messageId] = message
else:
self._topic_messages[topic].pop(messageId)
# And count
assert self._messages > 0
self._messages -= 1
left = len(self._topic_messages[topic])
log.msg('Got message ({:d} left in topic)'.format(left))
return DbResult(message.text, None)
# No more messages for this subscriber if we get here
log.msg('No messages for subscriber'
' ({:d} left in total)'.format(self._messages))
return DbResult(None, None)
# -------------------------------------------------------------------------
def clear(self):
"""Clears the database. Used by the unit-test framework."""
self._topics = []
self._subscribers = []
self._topic_subscribers = {}
self._topic_messages = {}
self._messages = 0
# -------------------------------------------------------------------------
def __repr__(self):
"""A simplified string representation fo this class.
For debug & test purposes. It return a string with a summary of
the object's state including the list of subscribers for each topic
and a count of the number of messages still to be read on each topic.
"""
us = 'MockDb/'
us += 'topics=' + str(len(self._topics))
us += ' subscribers=' + str(len(self._subscribers))
us += ' messages=' + str(self._messages)
# Subscribers to topics...
for topic in self._topics:
us += " topic['%s'].subscribers=%s" % \
(topic, self._topic_subscribers[topic])
# messages in the topics...
for topic in self._topics:
us += " topic['%s'].messages=%d" % \
(topic, len(self._topic_messages[topic]))
us += '/'
return us
|
apache-2.0
| 1,159,775,142,815,923,200 | 38.777143 | 79 | 0.599914 | false |
ashishbaghudana/mthesis-ashish
|
resources/tees/ExampleBuilders/EntityExampleBuilder.py
|
1
|
30545
|
"""
Trigger examples
"""
__version__ = "$Revision: 1.34 $"
import sys, os
thisPath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(thisPath,"..")))
from ExampleBuilder import ExampleBuilder
import Utils.Libraries.PorterStemmer as PorterStemmer
from Core.IdSet import IdSet
import Core.ExampleUtils as ExampleUtils
#from Core.Gazetteer import Gazetteer
from FeatureBuilders.RELFeatureBuilder import RELFeatureBuilder
from FeatureBuilders.WordNetFeatureBuilder import WordNetFeatureBuilder
from FeatureBuilders.GiulianoFeatureBuilder import GiulianoFeatureBuilder
from FeatureBuilders.DrugFeatureBuilder import DrugFeatureBuilder
import PhraseTriggerExampleBuilder
import Utils.InteractionXML.ResolveEPITriggerTypes
import Utils.Range as Range
class EntityExampleBuilder(ExampleBuilder):
def __init__(self, style=None, classSet=None, featureSet=None, gazetteerFileName=None, skiplist=None):
if classSet == None:
classSet = IdSet(1)
if featureSet == None:
featureSet = IdSet()
ExampleBuilder.__init__(self, classSet, featureSet)
assert( classSet.getId("neg") == 1 )
#gazetteerFileName="/usr/share/biotext/GeniaChallenge/SharedTaskTriggerTest/gazetteer-train"
if gazetteerFileName!=None:
self.gazetteer=Gazetteer.loadGztr(gazetteerFileName)
print >> sys.stderr, "Loaded gazetteer from",gazetteerFileName
else:
print >> sys.stderr, "No gazetteer loaded"
self.gazetteer=None
self._setDefaultParameters(["rel_features", "wordnet", "bb_features", "giuliano",
"epi_merge_negated", "limit_merged_types", "genia_task1",
"names", "build_for_nameless", "skip_for_nameless",
"pos_only", "all_tokens", "pos_pairs", "linear_ngrams",
"phospho", "drugbank_features", "ddi13_features", "metamap"])
self.styles = self.getParameters(style)
# if "selftrain_group" in self.styles:
# self.selfTrainGroups = set()
# if "selftrain_group-1" in self.styles:
# self.selfTrainGroups.add("-1")
# if "selftrain_group0" in self.styles:
# self.selfTrainGroups.add("0")
# if "selftrain_group1" in self.styles:
# self.selfTrainGroups.add("1")
# if "selftrain_group2" in self.styles:
# self.selfTrainGroups.add("2")
# if "selftrain_group3" in self.styles:
# self.selfTrainGroups.add("3")
# print >> sys.stderr, "Self-train-groups:", self.selfTrainGroups
self.skiplist = set()
if skiplist != None:
f = open(skiplist, "rt")
for line in f.readlines():
self.skiplist.add(line.strip())
f.close()
if self.styles["rel_features"]:
self.relFeatureBuilder = RELFeatureBuilder(featureSet)
if self.styles["wordnet"]:
self.wordNetFeatureBuilder = WordNetFeatureBuilder(featureSet)
if self.styles["bb_features"]:
self.bacteriaTokens = PhraseTriggerExampleBuilder.getBacteriaTokens()
#self.bacteriaTokens = PhraseTriggerExampleBuilder.getBacteriaTokens(PhraseTriggerExampleBuilder.getBacteriaNames())
if self.styles["giuliano"]:
self.giulianoFeatureBuilder = GiulianoFeatureBuilder(featureSet)
if self.styles["drugbank_features"]:
self.drugFeatureBuilder = DrugFeatureBuilder(featureSet)
def getMergedEntityType(self, entities):
"""
If a single token belongs to multiple entities of different types,
a new, composite type is defined. This type is the alphabetically
ordered types of these entities joined with '---'.
"""
types = set()
entityIds = set()
for entity in entities:
if entity.get("given") == "True" and self.styles["all_tokens"]:
continue
if entity.get("type") == "Entity" and self.styles["genia_task1"]:
continue
if self.styles["epi_merge_negated"]:
types.add(Utils.InteractionXML.ResolveEPITriggerTypes.getEPIBaseType(entity.get("type")))
entityIds.add(entity.get("id"))
else:
types.add(entity.get("type"))
entityIds.add(entity.get("id"))
types = list(types)
types.sort()
typeString = ""
for type in types:
#if type == "Protein" and "all_tokens" in self.styles:
# continue
if typeString != "":
typeString += "---"
typeString += type
if typeString == "":
return "neg", None
idString = "/".join(sorted(list(entityIds)))
if self.styles["limit_merged_types"]:
if typeString.find("---") != -1:
if typeString == "Gene_expression---Positive_regulation":
return typeString, idString
else:
return typeString.split("---")[0], idString # ids partially incorrect
else:
return typeString, idString
return typeString, idString
def getMetaMapFeatures(self, token, sentenceGraph, features):
analyses = sentenceGraph.sentenceElement.find("analyses")
if analyses == None:
return
metamap = analyses.find("metamap")
if metamap == None:
return
tokenOffset = Range.charOffsetToSingleTuple(token.get("charOffset"))
skipAttr = set(["charOffset", "text"])
for phrase in metamap.findall("phrase"):
phraseOffset = Range.charOffsetToSingleTuple(phrase.get("charOffset"))
if Range.overlap(tokenOffset, phraseOffset):
attr = phrase.attrib
attrNames = sorted(attr.keys())
for attrName in attrNames:
if attrName in skipAttr:
continue
elif attrName == "score":
features["_metamap_score"] = 0.001 * abs(int(attr[attrName]))
else:
attrValues = attr[attrName].split(",")
for attrValue in attrValues:
features["_metamap_"+attrName+"_"+attrValue.replace(" ", "-")] = 1
def getTokenFeatures(self, token, sentenceGraph):
"""
Returns a list of features based on the attributes of a token.
These can be used to define more complex features.
"""
# These features are cached when this method is first called
# for a token.
if self.tokenFeatures.has_key(token):
return self.tokenFeatures[token], self.tokenFeatureWeights[token]
tokTxt=sentenceGraph.getTokenText(token)
features = {}
features["_txt_"+tokTxt]=1
features["_POS_"+token.get("POS")]=1
if sentenceGraph.tokenIsName[token] and not self.styles["names"]:
features["_given"]=1
for entity in sentenceGraph.tokenIsEntityHead[token]:
if entity.get("given") == "True":
features["_annType_"+entity.get("type")]=1
if self.styles["metamap"]:
self.getMetaMapFeatures(token, sentenceGraph, features)
# # Filip's gazetteer based features (can be used separately from exclude_gazetteer)
# if "gazetteer_features" in self.styles:
# tokTxtLower = tokTxt.lower()
# if "stem_gazetteer" in self.styles:
# tokTxtLower = PorterStemmer.stem(tokTxtLower)
# if self.gazetteer and tokTxtLower in self.gazetteer:
# for label,weight in self.gazetteer[tokTxtLower].items():
# features["_knownLabel_"+label]=weight # 1 performs slightly worse
## BANNER features
#if sentenceGraph.entityHintsByToken.has_key(token):
# features["BANNER-entity"] = 1
# Wordnet features
#if "wordnet" in self.styles:
# for wordNetFeature in self.wordNetFeatureBuilder.getTokenFeatures(tokTxt, token.get("POS")):
# features["_WN_"+wordNetFeature] = 1
self.tokenFeatures[token] = sorted(features.keys())
self.tokenFeatureWeights[token] = features
return self.tokenFeatures[token], self.tokenFeatureWeights[token]
def buildLinearOrderFeatures(self,sentenceGraph,index,tag,features):
"""
Linear features are built by marking token features with a tag
that defines their relative position in the linear order.
"""
tag = "linear_"+tag
tokenFeatures, tokenFeatureWeights = self.getTokenFeatures(sentenceGraph.tokens[index], sentenceGraph)
for tokenFeature in tokenFeatures:
features[self.featureSet.getId(tag+tokenFeature)] = tokenFeatureWeights[tokenFeature]
def buildLinearNGram(self, i, j, sentenceGraph, features):
ngram = "ngram"
for index in range(i, j+1):
ngram += "_" + sentenceGraph.getTokenText(sentenceGraph.tokens[index]).lower()
features[self.featureSet.getId(ngram)] = 1
def buildExamplesFromGraph(self, sentenceGraph, outfile, goldGraph=None, structureAnalyzer=None):
"""
Build one example for each token of the sentence
"""
if sentenceGraph.sentenceElement.get("origId") in self.skiplist:
print >> sys.stderr, "Skipping sentence", sentenceGraph.sentenceElement.get("origId")
return 0 #[]
#examples = []
exampleIndex = 0
self.tokenFeatures = {}
self.tokenFeatureWeights = {}
# determine (manually or automatically) the setting for whether sentences with no given entities should be skipped
buildForNameless = False
if structureAnalyzer and not structureAnalyzer.hasGroupClass("GIVEN", "ENTITY"): # no given entities points to no separate NER program being used
buildForNameless = True
if self.styles["build_for_nameless"]: # manually force the setting
buildForNameless = True
if self.styles["skip_for_nameless"]: # manually force the setting
buildForNameless = False
# determine whether sentences with no given entities should be skipped
namedEntityHeadTokens = []
if not self.styles["names"]:
namedEntityCount = 0
for entity in sentenceGraph.entities:
if entity.get("given") == "True": # known data which can be used for features
namedEntityCount += 1
namedEntityCountFeature = "nameCount_" + str(namedEntityCount)
# NOTE!!! This will change the number of examples and omit
# all triggers (positive and negative) from sentences which
# have no NE:s, possibly giving a too-optimistic performance
# value. Such sentences can still have triggers from intersentence
# interactions, but as such events cannot be recovered anyway,
# looking for these triggers would be pointless.
if namedEntityCount == 0 and not buildForNameless: # no names, no need for triggers
return 0 #[]
if self.styles["pos_pairs"]:
namedEntityHeadTokens = self.getNamedEntityHeadTokens(sentenceGraph)
else:
for key in sentenceGraph.tokenIsName.keys():
sentenceGraph.tokenIsName[key] = False
bagOfWords = {}
for token in sentenceGraph.tokens:
text = "bow_" + token.get("text")
if not bagOfWords.has_key(text):
bagOfWords[text] = 0
bagOfWords[text] += 1
if sentenceGraph.tokenIsName[token]:
text = "ne_" + text
if not bagOfWords.has_key(text):
bagOfWords[text] = 0
bagOfWords[text] += 1
bowFeatures = {}
for k in sorted(bagOfWords.keys()):
bowFeatures[self.featureSet.getId(k)] = bagOfWords[k]
self.inEdgesByToken = {}
self.outEdgesByToken = {}
self.edgeSetByToken = {}
for token in sentenceGraph.tokens:
#inEdges = sentenceGraph.dependencyGraph.in_edges(token, data=True)
#fixedInEdges = []
#for edge in inEdges:
# fixedInEdges.append( (edge[0], edge[1], edge[2]["element"]) )
#inEdges = fixedInEdges
inEdges = sentenceGraph.dependencyGraph.getInEdges(token)
#inEdges.sort(compareDependencyEdgesById)
self.inEdgesByToken[token] = inEdges
#outEdges = sentenceGraph.dependencyGraph.out_edges(token, data=True)
#fixedOutEdges = []
#for edge in outEdges:
# fixedOutEdges.append( (edge[0], edge[1], edge[2]["element"]) )
#outEdges = fixedOutEdges
outEdges = sentenceGraph.dependencyGraph.getOutEdges(token)
#outEdges.sort(compareDependencyEdgesById)
self.outEdgesByToken[token] = outEdges
self.edgeSetByToken[token] = set(inEdges + outEdges)
for i in range(len(sentenceGraph.tokens)):
token = sentenceGraph.tokens[i]
# CLASS
if len(sentenceGraph.tokenIsEntityHead[token]) > 0:
categoryName, entityIds = self.getMergedEntityType(sentenceGraph.tokenIsEntityHead[token])
else:
categoryName, entityIds = "neg", None
self.exampleStats.beginExample(categoryName)
# Recognize only non-named entities (i.e. interaction words)
if sentenceGraph.tokenIsName[token] and not self.styles["names"] and not self.styles["all_tokens"]:
self.exampleStats.filter("name")
self.exampleStats.endExample()
continue
# if "selftrain_limits" in self.styles:
# # any predicted entity not part of the self-training set causes example to be rejected
# filtered = False
# for entity in sentenceGraph.tokenIsEntityHead[token]:
# if entity.get("selftrain") == "False":
# self.exampleStats.filter("selftrain_limits")
# self.exampleStats.endExample()
# filtered = True
# break
# if filtered:
# continue
# if "selftrain_group" in self.styles:
# # any predicted entity not part of the self-training set causes example to be rejected
# filtered = False
# for entity in sentenceGraph.tokenIsEntityHead[token]:
# if entity.get("selftraingroup") not in self.selfTrainGroups:
# self.exampleStats.filter("selftrain_group")
# self.exampleStats.endExample()
# filtered = True
# break
# if filtered:
# continue
if self.styles["pos_only"] and categoryName == "neg":
self.exampleStats.filter("pos_only")
self.exampleStats.endExample()
continue
category = self.classSet.getId(categoryName)
if category == None:
self.exampleStats.filter("undefined_class")
self.exampleStats.endExample()
continue
tokenText = token.get("text").lower()
# if "stem_gazetteer" in self.styles:
# tokenText = PorterStemmer.stem(tokenText)
# if ("exclude_gazetteer" in self.styles) and self.gazetteer and tokenText not in self.gazetteer:
# features = {}
# features[self.featureSet.getId("exclude_gazetteer")] = 1
# extra = {"xtype":"token","t":token.get("id"),"excluded":"True"}
# if entityIds != None:
# extra["goldIds"] = entityIds
# #examples.append( (sentenceGraph.getSentenceId()+".x"+str(exampleIndex),category,features,extra) )
# ExampleUtils.appendExamples([(sentenceGraph.getSentenceId()+".x"+str(exampleIndex),category,features,extra)], outfile)
# exampleIndex += 1
# continue
# FEATURES
features = {}
if not self.styles["names"]:
features[self.featureSet.getId(namedEntityCountFeature)] = 1
#for k,v in bagOfWords.iteritems():
# features[self.featureSet.getId(k)] = v
# pre-calculate bow _features_
features.update(bowFeatures)
# for j in range(len(sentenceGraph.tokens)):
# text = "bow_" + sentenceGraph.tokens[j].get("text")
# if j < i:
# features[self.featureSet.getId("bf_" + text)] = 1
# elif j > i:
# features[self.featureSet.getId("af_" + text)] = 1
# Main features
text = token.get("text")
features[self.featureSet.getId("txt_"+text)] = 1
features[self.featureSet.getId("POS_"+token.get("POS"))] = 1
stem = PorterStemmer.stem(text)
features[self.featureSet.getId("stem_"+stem)] = 1
features[self.featureSet.getId("nonstem_"+text[len(stem):])] = 1
# Normalized versions of the string (if same as non-normalized, overlap without effect)
normalizedText = text.replace("-","").replace("/","").replace(",","").replace("\\","").replace(" ","").lower()
if normalizedText == "bound": # should be for all irregular verbs
normalizedText = "bind"
features[self.featureSet.getId("txt_"+normalizedText)] = 1
norStem = PorterStemmer.stem(normalizedText)
features[self.featureSet.getId("stem_"+norStem)] = 1
features[self.featureSet.getId("nonstem_"+normalizedText[len(norStem):])] = 1
## Subspan features
#textLower = text.lower()
#for i in range(1, len(textLower)):
# features[self.featureSet.getId("subspanbegin"+str(i)+"_"+textLower[0:i])] = 1
# features[self.featureSet.getId("subspanend"+str(i)+"_"+textLower[-i:])] = 1
# Substring features
for string in text.split("-"):
stringLower = string.lower()
features[self.featureSet.getId("substring_"+stringLower)] = 1
features[self.featureSet.getId("substringstem_"+PorterStemmer.stem(stringLower))] = 1
# Linear order features
for index in [-3,-2,-1,1,2,3]:
if i + index > 0 and i + index < len(sentenceGraph.tokens):
self.buildLinearOrderFeatures(sentenceGraph, i + index, str(index), features)
# Linear n-grams
if self.styles["linear_ngrams"]:
self.buildLinearNGram(max(0, i-1), i, sentenceGraph, features)
self.buildLinearNGram(max(0, i-2), i, sentenceGraph, features)
if self.styles["phospho"]:
if text.find("hospho") != -1:
features[self.featureSet.getId("phospho_found")] = 1
features[self.featureSet.getId("begin_"+text[0:2].lower())] = 1
features[self.featureSet.getId("begin_"+text[0:3].lower())] = 1
if self.styles["bb_features"]:
if text.lower() in self.bacteriaTokens:
features[self.featureSet.getId("lpsnBacToken")] = 1
# Content
if i > 0 and text[0].isalpha() and text[0].isupper():
features[self.featureSet.getId("upper_case_start")] = 1
for j in range(len(text)):
if j > 0 and text[j].isalpha() and text[j].isupper():
features[self.featureSet.getId("upper_case_middle")] = 1
# numbers and special characters
if text[j].isdigit():
features[self.featureSet.getId("has_digits")] = 1
if j > 0 and text[j-1] == "-":
features[self.featureSet.getId("has_hyphenated_digit")] = 1
elif text[j] == "-":
features[self.featureSet.getId("has_hyphen")] = 1
elif text[j] == "/":
features[self.featureSet.getId("has_fslash")] = 1
elif text[j] == "\\":
features[self.featureSet.getId("has_bslash")] = 1
# duplets
if j > 0:
features[self.featureSet.getId("dt_"+text[j-1:j+1].lower())] = 1
# triplets
if j > 1:
features[self.featureSet.getId("tt_"+text[j-2:j+1].lower())] = 1
# quadruplets (don't work, slight decrease (0.5 pp) on f-score
#if j > 2:
# features[self.featureSet.getId("qt_"+text[j-3:j+1].lower())] = 1
# Attached edges (Hanging in and out edges)
t1InEdges = self.inEdgesByToken[token]
for edge in t1InEdges:
edgeType = edge[2].get("type")
features[self.featureSet.getId("t1HIn_"+edgeType)] = 1
features[self.featureSet.getId("t1HIn_"+edge[0].get("POS"))] = 1
features[self.featureSet.getId("t1HIn_"+edgeType+"_"+edge[0].get("POS"))] = 1
tokenText = sentenceGraph.getTokenText(edge[0])
features[self.featureSet.getId("t1HIn_"+tokenText)] = 1
features[self.featureSet.getId("t1HIn_"+edgeType+"_"+tokenText)] = 1
tokenStem = PorterStemmer.stem(tokenText)
features[self.featureSet.getId("t1HIn_"+tokenStem)] = 1
features[self.featureSet.getId("t1HIn_"+edgeType+"_"+tokenStem)] = 1
features[self.featureSet.getId("t1HIn_"+norStem+"_"+edgeType+"_"+tokenStem)] = 1
t1OutEdges = self.outEdgesByToken[token]
for edge in t1OutEdges:
edgeType = edge[2].get("type")
features[self.featureSet.getId("t1HOut_"+edgeType)] = 1
features[self.featureSet.getId("t1HOut_"+edge[1].get("POS"))] = 1
features[self.featureSet.getId("t1HOut_"+edgeType+"_"+edge[1].get("POS"))] = 1
tokenText = sentenceGraph.getTokenText(edge[1])
features[self.featureSet.getId("t1HOut_"+tokenText)] = 1
features[self.featureSet.getId("t1HOut_"+edgeType+"_"+tokenText)] = 1
tokenStem = PorterStemmer.stem(tokenText)
features[self.featureSet.getId("t1HOut_"+tokenStem)] = 1
features[self.featureSet.getId("t1HOut_"+edgeType+"_"+tokenStem)] = 1
features[self.featureSet.getId("t1HOut_"+norStem+"_"+edgeType+"_"+tokenStem)] = 1
# REL features
if self.styles["rel_features"]:
self.relFeatureBuilder.setFeatureVector(features)
self.relFeatureBuilder.buildAllFeatures(sentenceGraph.tokens, i)
self.relFeatureBuilder.setFeatureVector(None)
# DDI13 features
if self.styles["ddi13_features"]:
for index in range(len(normalizedText)):
features[self.featureSet.getId("ddi13_fromstart" + str(index) + "_" + normalizedText[:index+1])] = 1
features[self.featureSet.getId("ddi13_fromend" + str(index) + "_" + normalizedText[index:])] = 1
if self.styles["drugbank_features"]:
self.drugFeatureBuilder.setFeatureVector(features)
self.drugFeatureBuilder.tag = "ddi_"
self.drugFeatureBuilder.buildDrugFeatures(token)
self.drugFeatureBuilder.setFeatureVector(None)
#self.wordNetFeatureBuilder.getTokenFeatures("show", "VBP")
#tokTxt = token.get("text")
#tokPOS = token.get("POS")
#wordNetFeatures = []
#wordNetFeatures = self.wordNetFeatureBuilder.getTokenFeatures(tokTxt, tokPOS)
#self.wordNetFeatureBuilder.getTokenFeatures(tokTxt, tokPOS)
if self.styles["wordnet"]:
tokTxt = token.get("text")
tokPOS = token.get("POS")
wordNetFeatures = self.wordNetFeatureBuilder.getTokenFeatures(tokTxt, tokPOS)
for wordNetFeature in wordNetFeatures:
#print wordNetFeature,
features[self.featureSet.getId("WN_"+wordNetFeature)] = 1
#print
if self.styles["giuliano"]:
self.giulianoFeatureBuilder.setFeatureVector(features)
self.giulianoFeatureBuilder.buildTriggerFeatures(token, sentenceGraph)
self.giulianoFeatureBuilder.setFeatureVector(None)
extra = {"xtype":"token","t":token.get("id")}
if self.styles["bb_features"]:
extra["trigex"] = "bb" # Request trigger extension in ExampleWriter
if self.styles["epi_merge_negated"]:
extra["unmergeneg"] = "epi" # Request trigger type unmerging
if entityIds != None:
extra["goldIds"] = entityIds # The entities to which this example corresponds
#examples.append( (sentenceGraph.getSentenceId()+".x"+str(exampleIndex),category,features,extra) )
# chains
self.buildChains(token, sentenceGraph, features)
if self.styles["pos_pairs"]:
self.buildPOSPairs(token, namedEntityHeadTokens, features)
example = (sentenceGraph.getSentenceId()+".x"+str(exampleIndex), category, features, extra)
ExampleUtils.appendExamples([example], outfile)
exampleIndex += 1
self.exampleStats.endExample()
#return examples
return exampleIndex
def buildChains(self,token,sentenceGraph,features,depthLeft=3,chain="",visited=None):
if depthLeft == 0:
return
strDepthLeft = "dist_" + str(depthLeft)
if visited == None:
visited = set()
inEdges = self.inEdgesByToken[token]
outEdges = self.outEdgesByToken[token]
edgeSet = visited.union(self.edgeSetByToken[token])
for edge in inEdges:
if not edge in visited:
edgeType = edge[2].get("type")
features[self.featureSet.getId("dep_"+strDepthLeft+edgeType)] = 1
nextToken = edge[0]
tokenFeatures, tokenWeights = self.getTokenFeatures(nextToken, sentenceGraph)
for tokenFeature in tokenFeatures:
features[self.featureSet.getId(strDepthLeft + tokenFeature)] = tokenWeights[tokenFeature]
# for entity in sentenceGraph.tokenIsEntityHead[nextToken]:
# if entity.get("given") == "True":
# features[self.featureSet.getId("name_dist_"+strDepthLeft)] = 1
# features[self.featureSet.getId("name_dist_"+strDepthLeft+entity.get("type"))] = 1
# features[self.featureSet.getId("POS_dist_"+strDepthLeft+nextToken.get("POS"))] = 1
# tokenText = sentenceGraph.getTokenText(nextToken)
# features[self.featureSet.getId("text_dist_"+strDepthLeft+tokenText)] = 1
if sentenceGraph.tokenIsName[nextToken] and not self.styles["names"]:
features[self.featureSet.getId("name_chain_dist_"+strDepthLeft+chain+"-frw_"+edgeType)] = 1
features[self.featureSet.getId("chain_dist_"+strDepthLeft+chain+"-frw_"+edgeType)] = 1
self.buildChains(nextToken,sentenceGraph,features,depthLeft-1,chain+"-frw_"+edgeType,edgeSet)
for edge in outEdges:
if not edge in visited:
edgeType = edge[2].get("type")
features[self.featureSet.getId("dep_dist_"+strDepthLeft+edgeType)] = 1
nextToken = edge[1]
tokenFeatures, tokenWeights = self.getTokenFeatures(nextToken, sentenceGraph)
for tokenFeature in tokenFeatures:
features[self.featureSet.getId(strDepthLeft + tokenFeature)] = tokenWeights[tokenFeature]
# for entity in sentenceGraph.tokenIsEntityHead[nextToken]:
# if entity.get("given") == "True":
# features[self.featureSet.getId("name_dist_"+strDepthLeft)] = 1
# features[self.featureSet.getId("name_dist_"+strDepthLeft+entity.get("type"))] = 1
# features[self.featureSet.getId("POS_dist_"+strDepthLeft+nextToken.get("POS"))] = 1
# tokenText = sentenceGraph.getTokenText(nextToken)
# features[self.featureSet.getId("text_dist_"+strDepthLeft+tokenText)] = 1
if sentenceGraph.tokenIsName[nextToken] and not self.styles["names"]:
features[self.featureSet.getId("name_chain_dist_"+strDepthLeft+chain+"-rev_"+edgeType)] = 1
features[self.featureSet.getId("chain_dist_"+strDepthLeft+chain+"-rev_"+edgeType)] = 1
self.buildChains(nextToken,sentenceGraph,features,depthLeft-1,chain+"-rev_"+edgeType,edgeSet)
def getNamedEntityHeadTokens(self, sentenceGraph):
headTokens = []
for entity in sentenceGraph.entities:
if entity.get("given") == "True": # known data which can be used for features
headTokens.append(sentenceGraph.entityHeadTokenByEntity[entity])
return headTokens
def buildPOSPairs(self, token, namedEntityHeadTokens, features):
tokenPOS = token.get("POS")
assert tokenPOS != None
for headToken in namedEntityHeadTokens:
headPOS = headToken.get("POS")
features[self.featureSet.getId("POS_pair_NE_"+tokenPOS+"-"+headPOS)] = 1
|
mit
| 6,282,562,362,146,071,000 | 50.596284 | 153 | 0.577018 | false |
stefantalpalaru/generic_celery_task
|
tests/common.py
|
1
|
1389
|
import subprocess
import time
import os
redis_process = None
celery_process = None
DEVNULL = open(os.devnull, 'w')
def setup_redis():
global redis_process
if redis_process is not None:
return
redis_process = subprocess.Popen(['redis-server', '--port', '6389', '--bind', '127.0.0.1', '--logfile', ''], stdout=DEVNULL, stderr=DEVNULL)
time.sleep(1)
def teardown_redis():
global redis_process
redis_process.terminate()
timeout = 5 # seconds
seconds_passed = 0
start = time.time()
while redis_process.poll() is None and seconds_passed < timeout:
seconds_passed = time.time() - start
if redis_process.poll() is None:
redis_process.kill()
redis_process.wait()
redis_process = None
def setup_celery():
global celery_process
if celery_process is not None:
return
celery_process = subprocess.Popen(['celery', 'worker', '-A', 'celeryapp', '-Ofair'], stdout=DEVNULL, stderr=DEVNULL)
time.sleep(2)
def teardown_celery():
global celery_process
celery_process.terminate()
timeout = 5 # seconds
seconds_passed = 0
start = time.time()
while celery_process.poll() is None and seconds_passed < timeout:
seconds_passed = time.time() - start
if celery_process.poll() is None:
celery_process.kill()
celery_process.wait()
celery_process = None
|
bsd-3-clause
| 5,390,091,021,668,461,000 | 27.346939 | 144 | 0.647228 | false |
Parallel-in-Time/pySDC
|
pySDC/implementations/problem_classes/HeatEquation_2D_FD_periodic.py
|
1
|
5118
|
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import cg
from pySDC.core.Errors import ParameterError, ProblemError
from pySDC.core.Problem import ptype
from pySDC.implementations.datatype_classes.mesh import mesh
# noinspection PyUnusedLocal
class heat2d_periodic(ptype):
"""
Example implementing the unforced 2D heat equation with periodic BCs in [0,1]^2,
discretized using central finite differences
Attributes:
A: second-order FD discretization of the 2D laplace operator
dx: distance between two spatial nodes (here: being the same in both dimensions)
"""
def __init__(self, problem_params, dtype_u=mesh, dtype_f=mesh):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: mesh data type (will be passed parent class)
dtype_f: mesh data type (will be passed parent class)
"""
# these parameters will be used later, so assert their existence
essential_keys = ['nvars', 'nu', 'freq']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
# make sure parameters have the correct form
if problem_params['freq'] % 2 != 0:
raise ProblemError('need even number of frequencies due to periodic BCs')
if len(problem_params['nvars']) != 2:
raise ProblemError('this is a 2d example, got %s' % problem_params['nvars'])
if problem_params['nvars'][0] != problem_params['nvars'][1]:
raise ProblemError('need a square domain, got %s' % problem_params['nvars'])
if problem_params['nvars'][0] % 2 != 0:
raise ProblemError('the setup requires nvars = 2^p per dimension')
# invoke super init, passing number of dofs, dtype_u and dtype_f
super(heat2d_periodic, self).__init__(init=(problem_params['nvars'], None, np.dtype('float64')),
dtype_u=dtype_u, dtype_f=dtype_f, params=problem_params)
# compute dx (equal in both dimensions) and get discretization matrix A
self.dx = 1.0 / self.params.nvars[0]
self.A = self.__get_A(self.params.nvars, self.params.nu, self.dx)
@staticmethod
def __get_A(N, nu, dx):
"""
Helper function to assemble FD matrix A in sparse format
Args:
N (list): number of dofs
nu (float): diffusion coefficient
dx (float): distance between two spatial nodes
Returns:
scipy.sparse.csc_matrix: matrix A in CSC format
"""
stencil = [1, -2, 1]
zero_pos = 2
dstencil = np.concatenate((stencil, np.delete(stencil, zero_pos - 1)))
offsets = np.concatenate(([N[0] - i - 1 for i in reversed(range(zero_pos - 1))],
[i - zero_pos + 1 for i in range(zero_pos - 1, len(stencil))]))
doffsets = np.concatenate((offsets, np.delete(offsets, zero_pos - 1) - N[0]))
A = sp.diags(dstencil, doffsets, shape=(N[0], N[0]), format='csc')
# stencil = [1, -2, 1]
# A = sp.diags(stencil, [-1, 0, 1], shape=(N[0], N[0]), format='csc')
A = sp.kron(A, sp.eye(N[0])) + sp.kron(sp.eye(N[1]), A)
A *= nu / (dx ** 2)
return A
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
f = self.dtype_f(self.init)
f[:] = self.A.dot(u.flatten()).reshape(self.params.nvars)
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple linear solver for (I-factor*A)u = rhs
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float): abbrev. for the local stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
me = self.dtype_u(self.init)
me[:] = cg(sp.eye(self.params.nvars[0] * self.params.nvars[1], format='csc') - factor * self.A,
rhs.flatten(), x0=u0.flatten(), tol=1E-12)[0].reshape(self.params.nvars)
return me
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
me = self.dtype_u(self.init)
xvalues = np.array([i * self.dx for i in range(self.params.nvars[0])])
xv, yv = np.meshgrid(xvalues, xvalues)
me[:] = np.sin(np.pi * self.params.freq * xv) * np.sin(np.pi * self.params.freq * yv) * \
np.exp(-t * self.params.nu * 2 * (np.pi * self.params.freq) ** 2)
return me
|
bsd-2-clause
| 1,987,082,503,088,301,600 | 36.357664 | 104 | 0.574248 | false |
smashwilson/swiftest
|
swiftest/client.py
|
1
|
2894
|
import requests
from .exception import ProtocolError
from .account import Account
from .container import Container
class Client:
"""
The main entry point into Swiftest.
A Client mediates communications with OpenStack Object Storage API
endpoints, remembers your authentication token throughout the
session, and provides access to other objects through methods like
account() or container().
"""
def __init__(self, endpoint, username, auth_key):
"""
Construct a ready-to-use Client.
Authenticate to the specified OpenStack endpoint. Remember the generated
token and storage URL.
"""
self.endpoint = endpoint
self.username = username
auth_headers = {'X-Auth-User': username, 'X-Auth-Key': auth_key}
auth_response = requests.get(self.endpoint, headers=auth_headers)
auth_response.raise_for_status()
# Read the storage URL and auth token from the response.
self.storage_url = auth_response.headers['X-Storage-Url']
self.auth_token = auth_response.headers['X-Auth-Token']
def account(self):
"""
Access metadata about your account.
"""
return Account(self)
def container(self, name):
"""
Access a Container within this account by name.
If no container with this name exists, a NullContainer will be
returned instead.
"""
try:
return Container(self, name)
except requests.HTTPError as e:
if e.response.status_code == 404:
return NullContainer(self, name)
else:
raise
def container_names(self):
"""
List the names of Containers available in this account.
"""
names = self._call(requests.get, '').text.split("\n")
return [name for name in names if name.strip()]
def containers(self):
"""
Generate each existing Container.
"""
for name in self.container_names():
yield self.container(name)
def _call(self, method, path, accept_status=[], **kwargs):
"""
Perform an HTTP request against the storage endpoint.
Always include the auth token as a header and add "path" to the storage_url.
"""
extra = kwargs
if 'headers' in extra:
extra['headers']['X-Auth-Token'] = self.auth_token
else:
extra['headers'] = {'X-Auth-Token': self.auth_token}
r = method(self.storage_url + path, **extra)
if r.status_code not in accept_status:
r.raise_for_status()
return r
def __repr__(self):
cli_str = "<Client(endpoint='{}',username='{}',auth_key={})>"
return cli_str.format(self.endpoint,
self.username,
"...")
|
mit
| 4,992,642,017,786,282,000 | 29.145833 | 84 | 0.587422 | false |
dferguso/IGT4SAR
|
MissingPersonForm.py
|
1
|
6940
|
#-------------------------------------------------------------------------------
# Name: MissingPersomForm.py
#
# Purpose: Create Missing Person Flyer from data stored in the Subject
# Information data layer within MapSAR
#
# Author: Don Ferguson
#
# Created: 12/12/2011
# Copyright: (c) Don Ferguson 2011
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The GNU General Public License can be found at
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import arcpy
from datetime import datetime
#workspc = arcpy.GetParameterAsText(0)
output = arcpy.GetParameterAsText(0)
#arcpy.env.workspace = workspc
arcpy.env.overwriteOutput = "True"
fc3="Incident_Information"
fc2="Lead Agency"
rows = arcpy.SearchCursor(fc3)
row = rows.next()
arcpy.AddMessage("Get Incident Info")
while row:
# you need to insert correct field names in your getvalue function
LeadAgency = row.getValue("Lead_Agency")
where2 = '"Lead_Agency" = ' + "'" + LeadAgency + "'"
arcpy.AddMessage(where2)
rows2 = arcpy.SearchCursor(fc2, where2)
row2 = rows2.next()
Phone = 'none'
email = 'none'
while row2:
# you need to insert correct field names in your getvalue function
Phone = row2.getValue("Lead_Phone")
if Phone == 'none':
Phone = " "
arcpy.AddWarning("No Phone number provided for Lead Agency")
email = row2.getValue("E_Mail")
if email == 'none':
email = " "
arcpy.AddWarning("No e-mail address provided for Lead Agency")
row2 = rows2.next()
del rows2
del row2
row = rows.next()
del rows
del row
Callback = "If you have information please call: " + str(LeadAgency) + " at phone: " + str(Phone) + " or e-mail:" + str(email)
fc1="Subject_Information"
rows = arcpy.SearchCursor(fc1)
row = rows.next()
while row:
# you need to insert correct field names in your getvalue function
try:
Subject_Name = row.getValue("Name")
if len(Subject_Name) == 0:
arcpy.AddWarning('Need to provide a Subject Name')
except:
Subject_Name = " "
arcpy.AddWarning('Need to provide a Subject Name')
try:
fDate = row.getValue("Date_Seen")
Date_Seen = fDate.strftime("%m/%d/%Y")
except:
Date_Seen = " "
try:
fTime = row.getValue("Time_Seen")
except:
fTime = " "
Where_Last = row.getValue("WhereLastSeen")
Age = row.getValue("Age")
Gender = row.getValue("Gender")
Race = row.getValue("Race")
try:
Height1 = (row.getValue("Height"))/12.0
feet = int(Height1)
inches = int((Height1 - feet)*12.0)
fInches = "%1.0f" %inches
Height = str(feet) + " ft " + fInches +" in"
except:
Height = "NA"
Weight = row.getValue("Weight")
Build = row.getValue("Build")
Complex = row.getValue("Complexion")
Hair = row.getValue("Hair")
Eyes = row.getValue("Eyes")
Other = row.getValue("Other")
Shirt = row.getValue("Shirt")
Pants = row.getValue("Pants")
Jacket = row.getValue("Jacket")
Hat = row.getValue("Hat")
Footwear = row.getValue("Footwear")
Info = row.getValue("Info")
try:
QRCode = row.getValue("QRCode")
except:
QRCode = " "
filename = output + "/" + str(Subject_Name) + ".fdf"
txt= open (filename, "w")
txt.write("%FDF-1.2\n")
txt.write("%????\n")
txt.write("1 0 obj<</FDF<</F(MissingPersonForm.pdf)/Fields 2 0 R>>>>\n")
txt.write("endobj\n")
txt.write("2 0 obj[\n")
txt.write ("\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Name[0])/V(" + str(Subject_Name) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPFAge[0])/V(" + str(Age) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPFSex[0])/V(" + str(Gender) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Location[0])/V(" + str(Where_Last) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_TimeMissing[0])/V(" + fTime + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_DateMissing[0])/V(" + str(Date_Seen) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Race[0])/V(" + str(Race) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Height[0])/V(" + Height + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Weight[0])/V(" + str(Weight) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Build[0])/V(" + str(Build) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Complex[0])/V(" + str(Complex) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_HairColor[0])/V(" + str(Hair) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_EyeColor[0])/V(" + str(Eyes) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_OtherPhy[0])/V(" + str(Other) + ")>>\n")
#txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_OtherPhy[1])/V(" + str(Incident_Name) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_ShirtClothing[0])/V(" + str(Shirt) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_PantsClothing[0])/V(" + str(Pants) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_JacketClothing[0])/V(" + str(Jacket) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_HatClothing[0])/V(" + str(Hat) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_FootClothing[0])/V(" + str(Footwear) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_OtherInfo[0])/V(" + str(Info) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_CallNumber[0])/V(" + str(Callback) + ")>>\n")
#txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].ImageField1[0])/V(" + str(Incident_Name) + ")>>\n")
txt.write("]\n")
txt.write("endobj\n")
txt.write("trailer\n")
txt.write("<</Root 1 0 R>>\n")
txt.write("%%EO\n")
txt.close ()
row = rows.next()
del rows
del row
#arcpy.DeleteFeatures_management(fc3)
|
gpl-3.0
| 9,069,429,812,889,086,000 | 39.590643 | 126 | 0.594092 | false |
openstack/octavia
|
octavia/controller/queue/v1/consumer.py
|
1
|
2251
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cotyledon
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from octavia.common import rpc
from octavia.controller.queue.v1 import endpoints
LOG = logging.getLogger(__name__)
class ConsumerService(cotyledon.Service):
def __init__(self, worker_id, conf):
super().__init__(worker_id)
self.conf = conf
self.topic = conf.oslo_messaging.topic
self.server = conf.host
self.endpoints = []
self.access_policy = dispatcher.DefaultRPCAccessPolicy
self.message_listener = None
def run(self):
LOG.info('Starting consumer...')
target = messaging.Target(topic=self.topic, server=self.server,
fanout=False)
self.endpoints = [endpoints.Endpoints()]
self.message_listener = rpc.get_server(
target, self.endpoints,
executor='threading',
access_policy=self.access_policy
)
self.message_listener.start()
def terminate(self):
if self.message_listener:
LOG.info('Stopping consumer...')
self.message_listener.stop()
LOG.info('Consumer successfully stopped. Waiting for final '
'messages to be processed...')
self.message_listener.wait()
if self.endpoints:
LOG.info('Shutting down endpoint worker executors...')
for e in self.endpoints:
try:
e.worker.executor.shutdown()
except AttributeError:
pass
super().terminate()
|
apache-2.0
| -881,828,057,490,526,600 | 34.171875 | 78 | 0.631719 | false |
luci/recipes-py
|
unittests/config_test.py
|
2
|
4512
|
#!/usr/bin/env vpython
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import test_env
from recipe_engine import config
from PB.recipe_engine import doc
d = doc.Doc
class TestConfigGroupSchema(test_env.RecipeEngineUnitTest):
def testNewReturnsConfigGroup(self):
schema = config.ConfigGroupSchema(test=config.Single(int))
self.assertIsInstance(schema.new(test=3), config.ConfigGroup)
def testCallCallsNew(self):
schema = config.ConfigGroupSchema(test=config.Single(int))
sentinel = object()
schema.new = lambda *args, **kwargs: sentinel
self.assertEqual(schema(test=3), sentinel)
def testMustHaveTypeMap(self):
with self.assertRaises(ValueError):
config.ConfigGroupSchema()
class TestEnum(test_env.RecipeEngineUnitTest):
def testEnum(self):
schema = config.ConfigGroupSchema(test=config.Enum('foo', 'bar'))
self.assertIsInstance(schema.new(test='foo'), config.ConfigGroup)
def testMustBeOneOf(self):
schema = config.ConfigGroupSchema(test=config.Enum('foo', 'bar'))
with self.assertRaises(ValueError):
schema.new(test='baz')
class TestSchemaProto(test_env.RecipeEngineUnitTest):
def test_config_group(self):
cg = config.ConfigGroup(
combo=config.Single((int, float), empty_val=20),
other=config.List(str),
field=config.Single((str, type(None))),
)
self.assertEqual(
cg.schema_proto(),
d.Schema(struct=d.Schema.Struct(type_map={
'combo': d.Schema(single=d.Schema.Single(
inner_type=[d.Schema.NUMBER],
required=True,
default_json='20',
)),
'other': d.Schema(list=d.Schema.List(
inner_type=[d.Schema.STRING],
)),
'field': d.Schema(single=d.Schema.Single(
inner_type=[d.Schema.STRING, d.Schema.NULL],
required=True,
default_json='null',
)),
})))
def test_config_group_schema(self):
cg = config.ConfigGroupSchema(
combo=config.Single((int, float), empty_val=20),
other=config.List(str),
field=config.Single((str, type(None))),
)
self.assertEqual(
cg.schema_proto(),
d.Schema(struct=d.Schema.Struct(type_map={
'combo': d.Schema(single=d.Schema.Single(
inner_type=[d.Schema.NUMBER],
required=True,
default_json='20',
)),
'other': d.Schema(list=d.Schema.List(
inner_type=[d.Schema.STRING],
)),
'field': d.Schema(single=d.Schema.Single(
inner_type=[d.Schema.STRING, d.Schema.NULL],
required=True,
default_json='null',
)),
})))
def test_config_list(self):
cl = config.ConfigList(lambda: config.ConfigGroup(
a = config.Single(bool),
b = config.Single(dict),
))
self.assertEqual(
cl.schema_proto(),
d.Schema(sequence=d.Schema.Sequence(
inner_type=d.Schema(struct=d.Schema.Struct(type_map={
'a': d.Schema(single=d.Schema.Single(
inner_type=[d.Schema.BOOLEAN],
required=True,
default_json='null',
)),
'b': d.Schema(single=d.Schema.Single(
inner_type=[d.Schema.OBJECT],
required=True,
default_json='null',
))
}))
))
)
def test_dict(self):
cd = config.Dict(value_type=list)
self.assertEqual(
cd.schema_proto(),
d.Schema(dict=d.Schema.Dict(
value_type=[d.Schema.ARRAY],
))
)
def test_set(self):
cd = config.Set(str)
self.assertEqual(
cd.schema_proto(),
d.Schema(set=d.Schema.Set(
inner_type=[d.Schema.STRING],
))
)
def test_list(self):
cd = config.List((int, type(None)))
self.assertEqual(
cd.schema_proto(),
d.Schema(list=d.Schema.List(
inner_type=[d.Schema.NUMBER, d.Schema.NULL],
))
)
def test_static(self):
cd = config.Static("hello")
self.assertEqual(
cd.schema_proto(),
d.Schema(static=d.Schema.Static(
default_json='"hello"',
))
)
def test_enum(self):
cd = config.Enum("hello", "world")
self.assertEqual(
cd.schema_proto(),
d.Schema(enum=d.Schema.Enum(
values_json=[
'"hello"',
'"world"',
],
required=True,
))
)
if __name__ == '__main__':
test_env.main()
|
apache-2.0
| 6,331,560,203,424,972,000 | 25.541176 | 75 | 0.598183 | false |
oaubert/advene
|
lib/advene/gui/plugins/kinect.py
|
1
|
5659
|
#
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2011-2017 Olivier Aubert <[email protected]>
#
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import time
from gettext import gettext as _
from gi.repository import Gtk
import advene.core.config as config
from advene.gui.views import AdhocView
name="Kinect view/control plugin"
try:
import pytrack
except ImportError:
pytrack = None
def register(controller):
if pytrack is not None:
controller.register_viewclass(KinectController)
class KinectController(AdhocView):
view_name = _("Kinect Controller")
view_id = 'kinect'
tooltip=_("Kinect control interface")
def __init__(self, controller=None, uri=None, parameters=None):
super(KinectController, self).__init__(controller=controller)
self.close_on_package_load = False
self.contextual_actions = []
self.controller = controller
self.registered_rules = []
self.mode = ''
# When was the last seek done ?
self.last_seek = 0
self.seek_threshold = .5
if pytrack.pytrack_init(config.data.advenefile('Sample-Tracking.xml')):
self.log("Problem when initializing Kinect controller")
@pytrack.callback_function
def callback(*p):
self.callback(*p)
self.callbacks = pytrack.CallbacksT()
for (n, t) in self.callbacks._fields_:
setattr(self.callbacks, n, callback)
pytrack.pytrack_set_callbacks(self.callbacks)
# Load options
opt, arg = self.load_parameters(parameters)
self.options.update(opt)
self.widget = self.build_widget()
pytrack.pytrack_start_loop()
def handle_rate_control(self, fx):
if fx > .9:
rate = 8.0
elif fx > .8:
rate = 4.0
elif fx > .7:
rate = 2.0
elif fx > .4:
rate = 1.0
elif fx > .3:
rate = .5
elif fx > .2:
rate = .1
else:
rate = 1 / config.data.preferences['default-fps']
if self.controller.player.get_rate() != rate:
self.action.set_text("Set rate %.2f" % rate)
self.controller.queue_action(self.controller.player.set_rate, rate)
def handle_seek_control(self, fx):
t = time.time()
if t - self.last_seek < self.seek_threshold:
return
self.last_seek = t
if fx > .8:
seek = config.data.preferences['second-time-increment']
elif fx > .6:
seek = config.data.preferences['time-increment']
elif fx < .2:
seek = -config.data.preferences['second-time-increment']
elif fx < .4:
seek = -config.data.preferences['time-increment']
else:
seek =0
if seek:
self.action.set_text("Seek %d" % seek)
self.controller.queue_action(self.controller.update_status, "seek_relative", seek)
def handle_mode_selection(self, fx):
if fx < .3:
mode = 'rate'
elif fx > .7:
mode = 'seek'
else:
mode = 'none'
if mode != self.mode:
self.set_mode(mode)
def set_mode(self, mode):
if mode != self.mode:
if self.mode in self.mode_buttons:
self.mode_buttons[self.mode].set_sensitive(False)
self.mode = mode
self.mode_buttons[self.mode].set_sensitive(True)
self.action.set_text("Mode: %s" % mode)
def callback(self, event, fx, fy, ix, iy, d):
#self.log("Kinect: %s (%f, %f) (%d, %d) %d" % (event, fx, fy, ix, iy, d))
self.label.set_text("%s (%f, %f) (%d, %d) %d" % (event, fx, fy, ix, iy, d))
if event == 'push' and d == 5:
# Any direction
self.action.set_text("Play/pause")
self.controller.update_status('pause')
elif event == 'move':
if .5 < fy < .9:
# Control zone
if self.mode == 'rate':
self.handle_rate_control(fx)
elif self.mode == 'seek':
self.handle_seek_control(fx)
elif fy < .3:
# Mode selection
self.handle_mode_selection(fx)
def build_widget(self):
vbox=Gtk.VBox()
vbox.add(Gtk.Label(label="Kinect control"))
hb = Gtk.HBox()
self.mode_buttons = {
'rate': Gtk.Button("Rate"),
'none': Gtk.Button("Disabled"),
'seek': Gtk.Button("Seek"),
}
for k in ('rate', 'none', 'seek'):
hb.add(self.mode_buttons[k])
self.mode_buttons[k].set_sensitive(False)
vbox.pack_start(hb, False, True, 0)
self.label = Gtk.Label(label="Perform a Wave to start.")
vbox.add(self.label)
self.action = Gtk.Label(label="No action")
vbox.add(self.action)
# FIXME
#self.slider = Gtk.
vbox.show_all()
self.set_mode('none')
return vbox
|
gpl-2.0
| 8,219,196,242,009,655,000 | 32.288235 | 94 | 0.569712 | false |
dploeger/disclaimr
|
disclaimr/query_cache.py
|
1
|
2728
|
""" A global cache for milter LDAP queries
"""
import datetime
class QueryCache(object):
""" A global cache for milter LDAP queries
"""
cache = {}
""" The cache """
@staticmethod
def get(directory_server, query):
""" Return a cached query
:param directory_server: The directory server, that runs the query
:param query: The query itself
:return: The query or None if it wasn't cached or has timed out
"""
if directory_server.id not in QueryCache.cache or\
query not in QueryCache.cache[directory_server.id]:
# That item isn't cached
return None
# Check, if the item has timed out
now = datetime.datetime.now()
then = QueryCache.cache[directory_server.id][query]["timestamp"]
timeout = QueryCache.cache[directory_server.id]["_timeout"]
if (now-then).total_seconds() > timeout:
return None
# Store the item
return QueryCache.cache[directory_server.id][query]["data"]
@staticmethod
def set(directory_server, query, data):
""" Add a query to the cache
:param directory_server: The directory server, that runs the query
:param query: The query itself
:param data: The data returned from the query
"""
now = datetime.datetime.now()
if directory_server.id not in QueryCache.cache:
# Create a basic directory server cache item and store the
# timeout value
QueryCache.cache[directory_server.id] = {
"_timeout": directory_server.cache_timeout
}
# Add the item to the cache
QueryCache.cache[directory_server.id][query] = {
"timestamp": now,
"data": data
}
@staticmethod
def flush():
""" Walk through the cache and remove timed out values
"""
now = datetime.datetime.now()
for directory_server_id in list(QueryCache.cache):
timeout = QueryCache.cache[directory_server_id]["_timeout"]
for query in list(QueryCache.cache[directory_server_id]):
if query == "_timeout":
continue
then = QueryCache.cache[directory_server_id][query]["timestamp"]
if (now-then).total_seconds() > timeout:
# The cache item has timed out. Remove it.
del(QueryCache.cache[directory_server_id][query])
if len(QueryCache.cache[directory_server_id]) == 1:
# There are no cache items left. Remove the directory server.
del(QueryCache.cache[directory_server_id])
|
mit
| 3,392,218,367,358,194,700 | 24.990476 | 80 | 0.583578 | false |
pombredanne/parakeet
|
parakeet/builder/builder.py
|
2
|
3544
|
from ..ndtypes import make_array_type, TupleT, IntT, FnT, ClosureT, increase_rank
from ..syntax import ArrayView, Struct, Expr, ParFor, IndexMap, UntypedFn, TypedFn
from ..syntax.helpers import zero_i64, get_types, one
from call_builder import CallBuilder
from adverb_builder import AdverbBuilder
class Builder(AdverbBuilder, CallBuilder):
def create_output_array(self, fn, inner_args,
outer_shape = (),
name = "output"):
if isinstance(outer_shape, (list, tuple)):
outer_shape = self.tuple(outer_shape)
try:
inner_shape_tuple = self.call_shape(fn, inner_args)
except:
print "Shape inference failed when calling %s with %s" % (fn, inner_args)
import sys
print "Error %s ==> %s" % (sys.exc_info()[:2])
raise
shape = self.concat_tuples(outer_shape, inner_shape_tuple)
closure_args = self.closure_elts(fn)
fn = self.get_fn(fn)
if isinstance(fn, UntypedFn):
from .. type_inference import infer_return_type
arg_types = get_types(tuple(closure_args) + tuple(inner_args))
return_type = infer_return_type(fn , arg_types)
else:
assert isinstance(fn, TypedFn), "Unexpected function %s" % fn
return_type = self.return_type(fn)
elt_t = self.elt_type(return_type)
if len(shape.type.elt_types) > 0:
return self.alloc_array(elt_t, shape, name)
else:
return self.fresh_var(elt_t, name)
def any_eq(self, tup, target_elt):
elts = self.tuple_elts(tup)
is_eq = self.eq(elts[0], target_elt)
for elt in elts[1:]:
is_eq = self.or_(is_eq, self.eq(elt, target_elt))
return is_eq
def parfor(self, fn, bounds):
assert isinstance(bounds, Expr)
assert isinstance(bounds.type, (TupleT, IntT))
assert isinstance(fn, Expr)
assert isinstance(fn.type, (FnT, ClosureT))
self.blocks += [ParFor(fn = fn, bounds = bounds)]
def imap(self, fn, bounds):
assert isinstance(bounds, Expr), "Expected imap bounds to be expression, got %s" % bounds
if isinstance(bounds.type, TupleT):
tup = bounds
ndims = len(bounds.type.elt_types)
else:
assert isinstance(bounds.type, IntT), \
"Expected imap bounds to be tuple or int, got %s : %s" % (bounds, bounds.type)
tup = self.tuple([bounds])
ndims = 1
assert isinstance(fn, Expr), "Expected imap function to be expression, got %s" % (fn,)
assert isinstance(fn.type, (FnT, ClosureT)), \
"Expected imap function to have a function type but got %s : %s" % (fn, fn.type)
elt_type = self.return_type(fn)
result_type = increase_rank(elt_type, ndims)
return IndexMap(fn = fn, shape = tup, type = result_type)
def ravel(self, x, explicit_struct = False):
# TODO: Check the strides to see if any element is equal to 1
# otherwise do an array_copy
assert self.is_array(x)
if x.type.rank == 1:
return x
nelts = self.nelts(x, explicit_struct = explicit_struct)
assert isinstance(nelts, Expr)
shape = self.tuple((nelts,), 'shape', explicit_struct = explicit_struct)
strides = self.tuple((self.int(1),), "strides", explicit_struct = explicit_struct)
data = self.attr(x, 'data', 'data_ptr')
offset = self.attr(x, 'offset')
t = make_array_type(x.type.elt_type, 1)
if explicit_struct:
return Struct(args = (data, shape, strides, offset, nelts), type = t)
else:
return ArrayView(data, shape, strides, offset, nelts, type = t)
|
bsd-3-clause
| -5,627,328,395,972,491,000 | 36.315789 | 93 | 0.633747 | false |
cassiopaixao/simmycloud
|
simmycloud/statistics_fields/vms_fields.py
|
1
|
2152
|
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2013 Cassio Paixao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
from core.statistics_manager import StatisticsField
class VMNameField(StatisticsField):
def value(self, vm):
return vm.name
class VMStretchField(StatisticsField):
def value(self, vm):
current_timestamp = self._config.simulation_info.current_timestamp
submit_timestamp = self._config.resource_manager.get_vm_allocation_data(vm.name).submit_time
requested_processing_time = self._config.resource_manager.get_vm_allocation_data(vm.name).process_time
return float(current_timestamp - submit_timestamp) / requested_processing_time
class VMSubmitTimestampField(StatisticsField):
def value(self, vm):
return self._config.resource_manager.get_vm_allocation_data(vm.name).submit_time
class VMFinishTimestampField(StatisticsField):
def value(self, vm):
return self._config.simulation_info.current_timestamp
|
mit
| -3,563,730,692,121,011,700 | 47.909091 | 110 | 0.707714 | false |
binarybana/samcnet
|
exps/mpm_play.py
|
1
|
12504
|
import os
import sys
import tempfile
import yaml
import zlib
import numpy as np
import simplejson as js
import subprocess as sb
from time import time,sleep
from os import path
from scipy.stats.mstats import mquantiles
try:
from sklearn.lda import LDA
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.feature_selection import SelectKBest, f_classif
import samcnet.mh as mh
from samcnet.mixturepoisson import *
from samcnet.lori import *
from samcnet.data import *
from samcnet.calibrate import *
except ImportError as e:
sys.exit("Make sure LD_LIBRARY_PATH is set correctly and that the build"+\
" directory is populated by waf.\n\n %s" % str(e))
if 'WORKHASH' in os.environ:
try:
server = os.environ['SERVER']
except:
sys.exit("ERROR in worker: Need SERVER environment variable defined.")
if 'PARAM' in os.environ:
params = yaml.load(os.environ['PARAM'])
else:
params = {}
iters = setv(params, 'iters', int(1e4), int)
num_feat = setv(params, 'num_feat', 2, int)
seed = setv(params, 'seed', np.random.randint(10**8), int)
rseed = setv(params, 'rseed', np.random.randint(10**8), int)
Ntrn = setv(params, 'Ntrn', 20, int)
Ntst = setv(params, 'Ntst', 3000, int)
mu0 = setv(params, 'mu0', 0.0, float)
mu1 = setv(params, 'mu1', 0.6, float)
sigma0 = setv(params, 'sigma0', 0.2, float)
sigma1 = setv(params, 'sigma1', 0.6, float)
kappa = setv(params, 'kappa', 30.0, float)
lowd = setv(params, 'lowd', 9.0, float)
highd = setv(params, 'highd', 11.0, float)
num_gen_feat = setv(params, 'num_gen_feat', 20, int)
mumove = setv(params, 'mumove', 0.08, float)
lammove = setv(params, 'lammove', 0.01, float)
priorkappa = setv(params, 'priorkappa', 150, int)
burn = setv(params, 'burn', 3000, int)
thin = setv(params, 'thin', 40, int)
numlam = setv(params, 'numlam', 40, int)
output = {}
output['errors'] = {}
errors = output['errors']
np.seterr(all='ignore') # Careful with this
rseed = np.random.randint(10**8)
sel, rawdata, normdata = get_data(data_jk, params)
norm_trn_data = normdata.loc[sel['trn'], sel['feats']]
norm_tst_data = normdata.loc[sel['tst'], sel['feats']]
tst_data = rawdata.loc[sel['tst'], sel['feats']]
t1 = time()
#################### CLASSIFICATION ################
########################################
########################################
########################################
sklda = LDA()
skknn = KNN(3, warn_on_equidistant=False)
sksvm = SVC()
sklda.fit(norm_trn_data, sel['trnl'])
skknn.fit(norm_trn_data, sel['trnl'])
sksvm.fit(norm_trn_data, sel['trnl'])
errors['lda'] = (1-sklda.score(norm_tst_data, sel['tstl']))
errors['knn'] = (1-skknn.score(norm_tst_data, sel['tstl']))
errors['svm'] = (1-sksvm.score(norm_tst_data, sel['tstl']))
print("skLDA error: %f" % errors['lda'])
print("skKNN error: %f" % errors['knn'])
print("skSVM error: %f" % errors['svm'])
bayes0 = GaussianBayes(np.zeros(num_feat), 1, kappa,
np.eye(num_feat)*(kappa-1-num_feat),
normdata.loc[sel['trn0'], sel['feats']])
bayes1 = GaussianBayes(np.zeros(num_feat), 1, kappa,
np.eye(num_feat)*(kappa-1-num_feat),
normdata.loc[sel['trn1'], sel['feats']])
# Gaussian Analytic
gc = GaussianCls(bayes0, bayes1)
errors['gauss'] = gc.approx_error_data(norm_tst_data, sel['tstl'])
print("Gaussian Analytic error: %f" % errors['gauss'])
########################################
########################################
########################################
########################################
########################################
# MPM Model
dist0 = MPMDist(rawdata.loc[sel['trn0'],sel['feats']],priorkappa=priorkappa,
lammove=lammove,mumove=mumove)
dist1 = MPMDist(rawdata.loc[sel['trn1'],sel['feats']],priorkappa=priorkappa,
lammove=lammove,mumove=mumove)
mpm = MPMCls(dist0, dist1)
mhmc = mh.MHRun(mpm, burn=burn, thin=thin)
mhmc.sample(iters,verbose=False)
errors['mpm'] = mpm.approx_error_data(mhmc.db, tst_data, sel['tstl'],numlam=numlam)
print("MPM Sampler error: %f" % errors['mpm'])
output['acceptance'] = float(mhmc.accept_loc)/mhmc.total_loc
########################################
########################################
########################################
########################################
########################################
# Calibrated MPM Model
p0, p1 = calibrate(rawdata, sel, params)
dist0 = MPMDist(rawdata.loc[sel['trn0'],sel['feats']],priorkappa=priorkappa,
lammove=lammove,mumove=mumove,**p0)
dist1 = MPMDist(rawdata.loc[sel['trn1'],sel['feats']],priorkappa=priorkappa,
lammove=lammove,mumove=mumove,**p1)
mpmc = MPMCls(dist0, dist1)
mhmcc = mh.MHRun(mpmc, burn=burn, thin=thin)
mhmcc.sample(iters,verbose=False)
errors['mpmc_calib'] = mpmc.approx_error_data(mhmcc.db, tst_data, sel['tstl'],numlam=numlam)
print("mpmc Calibrated error: %f" % errors['mpmc_calib'])
output['acceptance_calib'] = float(mhmcc.accept_loc)/mhmcc.total_loc
########################################
########################################
########################################
########################################
########################################
priorsigma = np.ones(4)*0.1
pm0 = np.ones(4) * mu0
pm1 = np.ones(4) * mu1
dist0 = MPMDist(rawdata.loc[sel['trn0'],sel['feats']],priorkappa=priorkappa,
lammove=lammove,mumove=mumove,
priormu=pm0,priorsigma=priorsigma)
dist1 = MPMDist(rawdata.loc[sel['trn1'],sel['feats']],priorkappa=priorkappa,
lammove=lammove,mumove=mumove,
priormu=pm1,priorsigma=priorsigma)
#dist0 = MPMDist(rawdata.loc[sel['trn0'],sel['feats']],kmax=1,priorkappa=200,
#lammove=0.01,mumove=0.08,#S=S0,kappa=kappa,
#priormu=pm0,priorsigma=priorsigma, usedata=ud)
#dist1 = MPMDist(rawdata.loc[sel['trn0'],sel['feats']],kmax=1,priorkappa=200,
#lammove=0.01,mumove=0.08,#S=S1,kappa=kappa,
#priormu=pm1, priorsigma=priorsigma, usedata=ud)
mpmp = MPMCls(dist0, dist1)
mhmcp = mh.MHRun(mpmp, burn=burn, thin=thin)
mhmcp.sample(iters,verbose=False)
errors['mpm_prior'] = mpmp.approx_error_data(mhmcp.db, tst_data, sel['tstl'],numlam=numlam)
print("MPM prior Sampler error: %f" % errors['mpm_prior'])
output['acceptance_prior'] = float(mhmcp.accept_loc)/mhmcp.total_loc
########################################
########################################
########################################
########################################
import pylab as p
n,gext,grid = get_grid_data(np.vstack(( rawdata.loc[sel['trn0'],sel['feats']],
rawdata.loc[sel['trn1'],sel['feats']])), positive=True)
def myplot(ax,g,data,sel,gext):
data0 = data.loc[sel['trn0'], sel['feats']]
data1 = data.loc[sel['trn1'], sel['feats']]
ax.plot(data0.iloc[:,0], data0.iloc[:,1], 'g.',label='0', alpha=0.5)
ax.plot(data1.iloc[:,0], data1.iloc[:,1], 'r.',label='1', alpha=0.5)
ax.legend(fontsize=8, loc='best')
im = ax.imshow(g, extent=gext, aspect=1.0, origin='lower')
p.colorbar(im,ax=ax)
ax.contour(g, [0.0], extent=gext, aspect=1.0, origin='lower', cmap = p.cm.gray)
def plot_all(n, gext, grid, data0, data1, g0, g1, gavg):
Z = np.exp(g0)+np.exp(g1)
eg0 = np.exp(g0)/Z
eg1 = np.exp(g1)/Z
err = np.minimum(eg0,eg1)
err = err.reshape(-1,n)
lx,hx,ly,hy = gext
asp = float(hx-lx) / (hy-ly)
alp = 1.0
ms = 8
p.figure()
p.subplot(2,2,1)
p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms, alpha=alp)
p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms, alpha=alp)
p.legend(fontsize=8, loc='best')
#p.contour(gavg, extent=gext, aspect=1, origin='lower', cmap = p.cm.gray)
#p.contour(gavg, [0.0], extent=gext, aspect=1, origin='lower', cmap = p.cm.gray)
#p.imshow(gavg, extent=gext, aspect=1, origin='lower')
#p.imshow(g0.reshape(-1,n), extent=gext, aspect=asp, origin='lower')
#p.colorbar()
p.contour(g0.reshape(-1,n), extent=gext, aspect=asp, origin='lower', cmap = p.cm.Greens)
p.subplot(2,2,2)
p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms, alpha=alp)
p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms, alpha=alp)
p.legend(fontsize=8, loc='best')
#p.contour(g0.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Greens)
#p.contour(g1.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Reds)
#p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=1, origin='lower', cmap = p.cm.gray)
#p.imshow((g1-g0).reshape(-1,n), extent=gext, aspect=1, origin='lower')
#p.imshow(g1.reshape(-1,n), extent=gext, aspect=asp, origin='lower')
#p.colorbar()
p.contour(g1.reshape(-1,n), extent=gext, aspect=asp, origin='lower', cmap = p.cm.Reds)
p.subplot(2,2,3)
p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms, alpha=alp)
p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms, alpha=alp)
p.legend(fontsize=8, loc='best')
#p.imshow(err, extent=gext, origin='lower', aspect=asp)
#p.colorbar()
p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
#p.contour(eg0.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Greens)
#p.contour(eg1.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Reds)
p.subplot(2,2,4)
p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms)
p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms)
p.legend(fontsize=8, loc='best')
p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
CS = p.contour(err, [0.4, 0.3, 0.2, 0.1, 0.05], extent=gext, aspect=asp, origin='lower')
p.clabel(CS, inline=1, fontsize=10, aspect=asp)
p.show()
def plot_concise(n, gext, grid, data0, data1, g0, g1, gavg):
p.figure()
Z = np.exp(g0)+np.exp(g1)
eg0 = np.exp(g0)/Z
eg1 = np.exp(g1)/Z
err = np.minimum(eg0,eg1)
err = err.reshape(-1,n)
ms=8
lx,hx,ly,hy = gext
asp = float(hx-lx) / (hy-ly)
p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms)
p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms)
p.legend(fontsize=8, loc='best')
cont = (g0.max() + g1.max()) / 2.0 - 0.6
p.contour(g0.reshape(-1,n), [cont], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
p.contour(g1.reshape(-1,n), [cont], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
p.imshow(err, extent=gext, origin='lower', aspect=asp, alpha=0.4, cmap = p.cm.Reds)
p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray, linewidth=15.0)
CS = p.contour(err, [0.4, 0.3, 0.2, 0.1, 0.05], extent=gext, aspect=asp, origin='lower')
p.clabel(CS, inline=1, fontsize=10, aspect=asp)
p.show()
##def jitter(x):
##rand = np.random.rand
##n = x.shape[0]
##return (x.T + rand(n)).T
#def jitter(x):
#rand = np.random.rand
#return x + rand(*x.shape)-0.5
p.close("all")
gavg = mpm.calc_gavg(mhmc.db, grid, numlam=numlam).reshape(-1,n)
myplot(p.subplot(3,1,1),gavg,rawdata,sel,gext)
gavgc = mpmc.calc_gavg(mhmcc.db, grid, numlam=numlam).reshape(-1,n)
myplot(p.subplot(3,1,2),gavgc,rawdata,sel,gext)
gavgp = mpmp.calc_gavg(mhmcp.db, grid, numlam=numlam).reshape(-1,n)
myplot(p.subplot(3,1,3),gavgp,rawdata,sel,gext)
p.show()
#g0 = mpm1.dist0.calc_db_g(mhmc1.db, mhmc1.db.root.object.dist0, grid)
#g1 = mpm1.dist1.calc_db_g(mhmc1.db, mhmc1.db.root.object.dist1, grid)
##myplot(p.subplot(3,1,3),err.reshape(-1,n),jitter(tst_data0),jitter(tst_data1),gext)
#plot_all(n, gext, grid, trn_data0, trn_data1, g0,g1,gavg)
#plot_concise(n, gext, grid, trn_data0, trn_data1, g0,g1,gavg)
##n,gext,grid = get_grid_data(np.vstack(( norm_trn_data0, norm_trn_data1 )), positive=False)
##myplot(p.subplot(3,1,3),sksvm.decision_function(grid).reshape(-1,n),norm_trn_data0,norm_trn_data1,gext)
#p.figure()
#myplot(p.subplot(1,1,1),gavg,jitter(tst_data0),jitter(tst_data1),gext)
#p.axis(gext)
#mpm1.dist0.plot_traces(mhmc1.db, '/object/dist0', ['sigma'])
output['seed'] = seed
output['time'] = time()-t1
if 'WORKHASH' in os.environ:
import zmq
ctx = zmq.Context()
socket = ctx.socket(zmq.REQ)
socket.connect('tcp://'+server+':7000')
wiredata = zlib.compress(js.dumps(output))
socket.send(os.environ['WORKHASH'], zmq.SNDMORE)
socket.send(wiredata)
socket.recv()
socket.close()
ctx.term()
#mhmc.clean_db()
|
mit
| 3,789,207,219,876,480,000 | 38.56962 | 118 | 0.601168 | false |
explosion/spaCy
|
spacy/lang/mk/lemmatizer.py
|
2
|
1718
|
from typing import List
from collections import OrderedDict
from ...pipeline import Lemmatizer
from ...tokens import Token
class MacedonianLemmatizer(Lemmatizer):
def rule_lemmatize(self, token: Token) -> List[str]:
string = token.text
univ_pos = token.pos_.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
if string[-3:] == "јќи":
string = string[:-3]
univ_pos = "verb"
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any(
(
index_table.get(univ_pos),
exc_table.get(univ_pos),
rules_table.get(univ_pos),
)
):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
index = index_table.get(univ_pos, {})
exceptions = exc_table.get(univ_pos, {})
rules = rules_table.get(univ_pos, [])
orig = string
string = string.lower()
forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
continue
if form in index or not form.isalpha():
forms.append(form)
forms = list(OrderedDict.fromkeys(forms))
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.append(orig)
return forms
|
mit
| -7,816,183,188,029,771,000 | 28.568966 | 63 | 0.506706 | false |
hcrlab/access_teleop
|
limb_manipulation/src/access_gripper.py
|
1
|
2276
|
#!/usr/bin/python
import rospy
from ezgripper_libs.ezgripper_interface import EZGripper
from access_teleop_msgs.msg import EzgripperAccess
class EZGripperAccess(object):
"""
Controls for SAKE gripper
"""
def __init__(self, gripper_names):
self.ezgripper_left = EZGripper(gripper_names[0])
# For multiple grippers
# if len(gripper_names) > 1:
# self.ezgripper_right = EZGripper(gripper_names[1])
# else:
# self.ezgripper_right = None
self.last_command_end_time = rospy.get_rostime()
def start(self):
rospy.Subscriber("/ezgripper_access", EzgripperAccess, ezgripper_access.access_callback)
self.pub = rospy.Publisher("/ezgripper_access_status", EzgripperAccess, queue_size=1)
def access_callback(self, data):
if (rospy.get_rostime() - self.last_command_end_time).to_sec() > 0.2:
# This check should flush all messages accumulated during command execution
# and avoid executing it again.
gripper = self.ezgripper_left
if data.type == "h_close": # hard close
gripper.hard_close()
self.last_command_end_time = rospy.get_rostime()
if data.type == "s_close": # soft close
gripper.soft_close()
self.last_command_end_time = rospy.get_rostime()
if data.type == "open": # open
gripper.open()
self.last_command_end_time = rospy.get_rostime()
if data.type == "release": # release
gripper.release()
self.last_command_end_time = rospy.get_rostime()
if data.type == "calibrate": # calibrate
gripper.calibrate()
self.last_command_end_time = rospy.get_rostime()
if len(data.type) > 2:
commands = data.type.split(" ") # [percentage open, effort]
if len(commands) == 2:
open_percentage = int(commands[0])
effort = int(commands[1])
# 0% (close) ---> 100% (open)
# >0 (min effort) ---> 100 (max effort)
if 0 <= open_percentage <= 100 and 0 < effort <= 100:
gripper.goto_position(open_percentage, effort)
self.last_command_end_time = rospy.get_rostime()
self.pub.publish(data)
if __name__ == "__main__":
rospy.init_node("access_ezgripper")
rospy.sleep(0.5)
gripper_names = rospy.get_param('~grippers')
ezgripper_access = EZGripperAccess(gripper_names)
ezgripper_access.start()
rospy.sleep(0.5)
rospy.spin()
|
mit
| -5,262,220,617,798,118,000 | 28.558442 | 90 | 0.668717 | false |
buaabyl/pycurl-win32
|
tests/ssh_key_cb_test.py
|
1
|
2467
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
import nose
import unittest
import pycurl
from . import util
sftp_server = 'sftp://web.sourceforge.net'
@nose.plugins.attrib.attr('online')
@nose.plugins.attrib.attr('ssh')
class SshKeyCbTest(unittest.TestCase):
'''This test requires Internet access.'''
def setUp(self):
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.URL, sftp_server)
self.curl.setopt(pycurl.VERBOSE, True)
def tearDown(self):
self.curl.close()
@util.min_libcurl(7, 19, 6)
def test_keyfunction(self):
# with keyfunction returning ok
def keyfunction(known_key, found_key, match):
return pycurl.KHSTAT_FINE
self.curl.setopt(pycurl.SSH_KNOWNHOSTS, '.known_hosts')
self.curl.setopt(pycurl.SSH_KEYFUNCTION, keyfunction)
try:
self.curl.perform()
self.fail('should have raised')
except pycurl.error as e:
self.assertEqual(pycurl.E_LOGIN_DENIED, e.args[0])
# with keyfunction returning not ok
def keyfunction(known_key, found_key, match):
return pycurl.KHSTAT_REJECT
self.curl.setopt(pycurl.SSH_KNOWNHOSTS, '.known_hosts')
self.curl.setopt(pycurl.SSH_KEYFUNCTION, keyfunction)
try:
self.curl.perform()
self.fail('should have raised')
except pycurl.error as e:
self.assertEqual(pycurl.E_PEER_FAILED_VERIFICATION, e.args[0])
@util.min_libcurl(7, 19, 6)
def test_keyfunction_bogus_return(self):
def keyfunction(known_key, found_key, match):
return 'bogus'
self.curl.setopt(pycurl.SSH_KNOWNHOSTS, '.known_hosts')
self.curl.setopt(pycurl.SSH_KEYFUNCTION, keyfunction)
try:
self.curl.perform()
self.fail('should have raised')
except pycurl.error as e:
self.assertEqual(pycurl.E_PEER_FAILED_VERIFICATION, e.args[0])
@nose.plugins.attrib.attr('ssh')
class SshKeyCbUnsetTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.URL, sftp_server)
self.curl.setopt(pycurl.VERBOSE, True)
@util.min_libcurl(7, 19, 6)
def test_keyfunction_none(self):
self.curl.setopt(pycurl.SSH_KEYFUNCTION, None)
@util.min_libcurl(7, 19, 6)
def test_keyfunction_unset(self):
self.curl.unsetopt(pycurl.SSH_KEYFUNCTION)
|
lgpl-2.1
| 2,064,823,395,666,767,400 | 28.369048 | 74 | 0.634374 | false |
duncanmmacleod/dqsegdb
|
server/src/Constants.py
|
2
|
3105
|
# Copyright (C) 2014-2020 Syracuse University, European Gravitational Observatory, and Christopher Newport University. Written by Ryan Fisher and Gary Hemming. See the NOTICE file distributed with this work for additional information regarding copyright ownership.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
DQSEGDB Python Server
Constants class file
'''
class ConstantsHandle():
##########################
# API version constants #
########################
api_version = '2.1.17'
##############################
# ODBC connection constants #
############################
# If connecting via DSN, add DSN string ('DSN=[DSN_NAME];UID=[USER];PWD=[PASSWORD]'). Otherwise, set to None.
odbc_dsn = 'DSN=DQSEGDB' # /etc/odbc.ini
# If DSN is set to None, use these constants to connect via direct string, add required variables below. Each should be set to None if not being used.
odbc_driver = None # ODBC Driver to be used to make connection.
odbc_host = None # Host on which database is found.
odbc_db = None # Database name.
odbc_user = None # Database connection user.
odbc_pass = None # Database connection user password.
odbc_socket = None # Socket used when connecting to database.
######################
# Request constants #
####################
segment_requests = ["active", "known"] # Types of requests available in segment retrieval.
metadata_requests = ["metadata", "insert_history"] # Types of requests available in metadata retrieval.
######################
# Logging constants #
####################
log_file_location = '/opt/dqsegdb/python_server/logs/' # Log-file write directory.
#############################
# HTTP(S) & GRID constants #
###########################
use_https = True # False = Use HTTP; True = Use HTTPS.
grid_map_get_file = '/etc/grid-security/grid-mapfile' # Grid Map file used in authentication.
grid_map_put_patch_file = '/etc/grid-security/grid-mapfile-insert' # Grid Map file used in authorisation.
#################################
# Sub-second segment constants #
###############################
use_sub_second_segments = True
segment_requests = ["active", "known"] # Types of requests available in segment retrieval.
###################
# Time constants #
#################
gps_leap_secs = 17
|
gpl-3.0
| 2,271,712,665,630,871,300 | 34.701149 | 265 | 0.606119 | false |
YoannDupont/SEM
|
sem/modules/evaluate.py
|
1
|
17700
|
"""
file: chunking_evaluate.py
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import sys
import codecs
from sem.IO.columnIO import Reader
from sem.storage import Tag, Annotation, Document
from sem.storage.annotation import annotation_from_sentence
import sem.importers
CORRECT = "correct output"
TYPE_ERROR = "type error"
BOUNDARY_ERROR = "boundary error"
TYPE_AND_BOUNDARY_ERROR = "type+boundary error"
NOISE_ERROR = "noise error"
SILENCE_ERROR = "silence error"
ERRORS_KINDS = [TYPE_ERROR, BOUNDARY_ERROR, TYPE_AND_BOUNDARY_ERROR, NOISE_ERROR, SILENCE_ERROR]
OUTPUT_KINDS = [CORRECT] + ERRORS_KINDS
def mean(numbers):
return float(sum(numbers)) / len(numbers)
def precision(d):
numerator = float(len(d[CORRECT]))
denominator = float(len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[NOISE_ERROR]))
if denominator == 0.0:
return 0.0
else:
return numerator / denominator
return float(len(d[CORRECT])) / len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[NOISE_ERROR])
def recall(d):
numerator = float(len(d[CORRECT]))
denominator = len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[SILENCE_ERROR])
if denominator == 0.0:
return 0.0
else:
return numerator / denominator
return float(len(d[CORRECT])) / len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[SILENCE_ERROR])
def undergeneration(d):
numerator = float(len(d[SILENCE_ERROR]))
denominator = float(len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[SILENCE_ERROR]))
if denominator == 0.0:
return 0.0
else:
return numerator / denominator
return float(len(d[SILENCE_ERROR])) / len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[SILENCE_ERROR])
def overgeneration(d):
numerator = float(len(d[NOISE_ERROR]))
denominator = float(len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[NOISE_ERROR]))
if denominator == 0.0:
return 0.0
else:
return numerator / denominator
return float(len(d[NOISE_ERROR])) / len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]+d[NOISE_ERROR])
def substitution(d):
numerator = float(len(d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]))
denominator = float(len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR]))
if denominator == 0.0:
return 0.0
else:
return numerator / denominator
return float(len(d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR])) / len(d[CORRECT]+d[TYPE_ERROR]+d[BOUNDARY_ERROR]+d[TYPE_AND_BOUNDARY_ERROR])
def fscore(P, R, beta=1.0):
return ((1+(beta**2))*P*R / (((beta**2)*P)+R) if P+R != 0 else 0.0)
def get_diff(content, gold, guess, error_kind, context_size=20):
if error_kind == TYPE_ERROR:
diff = content[gold.lb - context_size : gold.lb]
diff += u"{{+<{0}>+}} ".format(guess.value)
diff += u"[-<{0}>-] ".format(gold.value)
diff += content[gold.lb : gold.ub]
diff += u" [-</{0}>-]".format(gold.value)
diff += u" {{+</{0}>+}}".format(guess.value)
diff += content[gold.ub : gold.ub + context_size]
elif error_kind == BOUNDARY_ERROR:
if gold.lb == guess.lb:
diff = content[gold.lb - context_size : gold.lb]
diff += u"<{0}> ".format(gold.value)
gold_min = gold.ub < guess.ub
diff += content[gold.lb : min(gold.ub, guess.ub)]
diff += (u" [-</{0}>-]".format(gold.value) if min(gold.ub, guess.ub) else u" {{+</{0}>+}}".format(guess.value))
diff += content[min(gold.ub, guess.ub) : max(gold.ub, guess.ub)]
diff += (u" {{+</{0}>+}}".format(guess.value) if min(gold.ub, guess.ub) else u" [-</{0}>-]".format(gold.value))
diff += content[max(gold.ub, guess.ub) : max(gold.ub, guess.ub) + context_size]
else:
gold_min = gold.lb < guess.lb
diff = content[min(gold.lb, guess.lb) - context_size : min(gold.lb, guess.lb)]
diff += (u"[-<{0}>-] ".format(gold.value) if gold_min else u"{{+<{0}>+}} ".format(guess.value))
diff += content[min(gold.lb, guess.lb) : max(gold.lb, guess.lb)]
diff += (u"{{+<{0}>+}} ".format(guess.value) if gold_min else u"[-<{0}>-] ".format(gold.value))
diff += content[max(gold.lb, guess.lb) : gold.ub]
diff += u" </{0}>".format(gold.value)
diff += content[gold.ub : gold.ub + context_size]
elif error_kind == TYPE_AND_BOUNDARY_ERROR:
min_lb = (gold if gold.lb < guess.lb else (gold if gold.lb == guess.lb and gold.ub > guess.ub else guess))
max_lb = (gold if min_lb == guess else guess)
min_ub = (gold if gold.ub < guess.ub else (gold if gold.ub == guess.ub and gold.lb > guess.lb else guess))
max_ub = (gold if min_ub == guess else guess)
diff = content[min_lb.lb - context_size : min_lb.lb]
if min_lb == gold:
diff += u"[-<{0}>-] ".format(gold.value)
diff += content[min_lb.lb : max_lb.lb]
diff += u"{{+<{0}>+}} ".format(guess.value)
else:
diff += u"{{+<{0}>+}} ".format(guess.value)
diff += content[min_lb.lb : max_lb.lb]
diff += u"[-<{0}>-] ".format(gold.value)
diff += content[max_lb.lb : min_ub.ub]
if min_ub == gold:
diff += u" [-</{0}>-]".format(gold.value)
diff += content[min_ub.ub : max_ub.ub]
diff += u" {{+</{0}>+}}".format(guess.value)
else:
diff += u" {{+</{0}>+}}".format(guess.value)
diff += content[min_ub.ub : max_ub.ub]
diff += u" [-</{0}>-]".format(gold.value)
diff += content[max_ub.ub : max_ub.ub + context_size]
elif error_kind == NOISE_ERROR:
diff = content[guess.lb - context_size : guess.lb]
diff += u"{{+<{0}>+}} ".format(guess.value)
diff += content[guess.lb : guess.ub]
diff += u" {{+</{0}>+}}".format(guess.value)
diff += content[guess.ub : guess.ub + context_size]
elif error_kind == SILENCE_ERROR:
diff = content[gold.lb - context_size : gold.lb]
diff += u"[-<{0}>-] ".format(gold.value)
diff += content[gold.lb : gold.ub]
diff += u" [-</{0}>-]".format(gold.value)
diff += content[gold.ub : gold.ub + context_size]
else:
raise ValueError("Unknown error kind: {0}".format(error_kind))
return diff.replace("\r", "").replace("\n", " ").replace('"','\\"')
def main(args):
infile = args.infile
reference_column = args.reference_column
tagging_column = args.tagging_column
ienc = args.ienc or args.enc
oenc = args.oenc or args.enc
verbose = args.verbose
input_format = args.input_format
reference_file = args.reference_file
annotation_name = args.annotation_name
dump = args.dump
context_size = args.context_size
counts = {}
prf = {}
if input_format == "conll":
if reference_file:
print(u"reference_file not handled for CoNLL files")
L = []
R = []
keys = None
nth = -1
for n_line, p in Reader(infile, ienc).line_iter():
nth += 1
keys = keys or range(len(p[0]))
L.extend(annotation_from_sentence(p, column=reference_column, shift=n_line-nth))
R.extend(annotation_from_sentence(p, column=tagging_column, shift=n_line-nth))
document = sem.importers.conll_file(infile, keys, keys[0], encoding=ienc)
L = Annotation("", annotations=L, reference=document.segmentation("tokens")).get_reference_annotations()
R = Annotation("", annotations=R, reference=document.segmentation("tokens")).get_reference_annotations()
elif input_format == "brat":
document = sem.importers.brat_file(reference_file)
L = document.annotation("NER").get_reference_annotations()
R = sem.importers.brat_file(infile).annotation("NER").get_reference_annotations()
elif input_format in ("sem", "SEM"):
document = Document.from_xml(reference_file)
system = Document.from_xml(infile)
common_annotations = set(document.annotations.keys()) & set(system.annotations.keys())
if len(common_annotations) == 1 and annotation_name is None:
annotation_name = list(common_annotations)[0]
if annotation_name is None:
raise RuntimeError("Could not find an annotation set to evaluate: please provide one")
L = document.annotation(annotation_name).get_reference_annotations()
R = system.annotation(annotation_name).get_reference_annotations()
else:
raise RuntimeError("format not handled: {0}".format(input_format))
len_ref = len(L)
len_tag = len(R)
d = {CORRECT:[], TYPE_ERROR:[], BOUNDARY_ERROR:[], TYPE_AND_BOUNDARY_ERROR:[], SILENCE_ERROR:[], NOISE_ERROR:[]}
# first pass, removing correct
i = 0
while i < len(L):
LR = L[i]
j = 0
while j < len(R):
RR = R[j]
if LR == RR:
del L[i]
del R[j]
i -= 1
d[CORRECT].append([LR, RR])
break
j += 1
i += 1
# second pass, typing errors
i = 0
while i < len(L):
LR = L[i]
j = 0
while j < len(R):
RR = R[j]
if LR.value != RR.value and LR.lb == RR.lb and LR.ub == RR.ub:
del L[i]
del R[j]
d[TYPE_ERROR].append([LR, RR])
break
j += 1
i += 1
# third pass, boundary errors
i = 0
while i < len(L):
LR = L[i]
j = 0
while j < len(R):
RR = R[j]
if LR.value == RR.value and ((LR.lb != RR.lb and LR.ub == RR.ub) or (LR.lb == RR.lb and LR.ub != RR.ub)):
del L[i]
del R[j]
i -= 1
d[BOUNDARY_ERROR].append([LR, RR])
break
j += 1
i += 1
# fourth pass, both type and boundary errors
i = 0
while i < len(L):
LR = L[i]
j = 0
while j < len(R):
RR = R[j]
if LR.value != RR.value and (LR.lb != RR.lb and LR.ub == RR.ub) or (LR.lb == RR.lb and LR.ub != RR.ub):
del L[i]
del R[j]
i -= 1
d[TYPE_AND_BOUNDARY_ERROR].append([LR, RR])
break
j += 1
i += 1
d[SILENCE_ERROR] = L[:]
d[NOISE_ERROR] = R[:]
entities = set()
for l in d.values():
for e in l:
try:
l,r = e
entities.add(l.value)
entities.add(r.value)
except:
entities.add(e.value)
with codecs.open(dump, "w", "utf-8") as O:
O.write(u"error kind\treference entity\toutput entity\tdiff\n")
for error_kind in (TYPE_ERROR, BOUNDARY_ERROR, TYPE_AND_BOUNDARY_ERROR, NOISE_ERROR, SILENCE_ERROR):
for ex in d[error_kind]:
if error_kind == NOISE_ERROR:
gold = None
guess = ex
elif error_kind == SILENCE_ERROR:
gold = ex
guess = None
else:
gold = ex[0]
guess = ex[1]
gold_str = (u"{0}:{1}".format(gold.value, document.content[gold.lb : gold.ub]) if gold else "").replace("\r", "").replace("\n", " ")
guess_str = (u"{0}:{1}".format(guess.value, document.content[guess.lb : guess.ub]) if guess else "").replace("\r", "").replace("\n", " ")
diff = get_diff(document.content, gold, guess, error_kind, context_size=context_size)
O.write(u"{0}\t{1}\t{2}\t{3}\n".format(error_kind, gold_str, guess_str, diff))
counts = {}
for entity in entities:
sub_d = {}
sub_d[CORRECT] = [m for m in d[CORRECT] if m[0].value == entity]
sub_d[TYPE_ERROR] = [m for m in d[TYPE_ERROR] if m[0].value == entity or m[1].value == entity]
sub_d[BOUNDARY_ERROR] = [m for m in d[BOUNDARY_ERROR] if m[0].value == entity or m[1].value == entity]
sub_d[TYPE_AND_BOUNDARY_ERROR] = [m for m in d[TYPE_AND_BOUNDARY_ERROR] if m[0].value == entity or m[1].value == entity]
sub_d[NOISE_ERROR] = [m for m in d[NOISE_ERROR] if m.value == entity]
sub_d[SILENCE_ERROR] = [m for m in d[SILENCE_ERROR] if m.value == entity]
counts[entity] = sub_d
# basic counts
print(u"entity\tmeasure\tvalue")
for entity in sorted(entities):
for kind in OUTPUT_KINDS:
print(u"{0}\t{1}\t{2}".format(entity, kind, len(counts[entity][kind])))
print(u"global\treference\t{0}".format(len_ref))
print(u"global\ttagging\t{0}".format(len_tag))
for kind in OUTPUT_KINDS:
print(u"global\t{0}\t{1}".format(kind, len(d[kind])))
# P R F
precisions = []
recalls = []
print()
print(u"entity\tmeasure\tvalue")
for entity in sorted(entities):
precisions.append(precision(counts[entity]))
recalls.append(recall(counts[entity]))
print(u"{0}\tprecision\t{1:.4f}".format(entity, precisions[-1]))
print(u"{0}\trecall\t{1:.4f}".format(entity, recalls[-1]))
print(u"{0}\tfscore\t{1:.4f}".format(entity, fscore(precision(counts[entity]), recall(counts[entity]))))
print(u"global\tprecision\t{0:.4f}".format(precision(d)))
print(u"global\trecall\t{0:.4f}".format(recall(d)))
print(u"global\tfscore\t{0:.4f}".format(fscore(precision(d), recall(d))))
print(u"global\tmacro-precision\t{0:.4f}".format(mean(precisions)))
print(u"global\tmacro-recall\t{0:.4f}".format(mean(recalls)))
print(u"global\tmacro-fscore\t{0:.4f}".format(fscore(mean(precisions), mean(recalls))))
# over/under generation, substitution
print()
print(u"entity\tmeasure\tvalue")
for entity in sorted(entities):
print(u"{0}\tundergeneration\t{1:.4f}".format(entity, undergeneration(counts[entity])))
print(u"{0}\tovergeneration\t{1:.4f}".format(entity, overgeneration(counts[entity])))
print(u"{0}\tsubstitution\t{1:.4f}".format(entity, substitution(counts[entity])))
print(u"global\tundergeneration\t{0:.4f}".format(undergeneration(d)))
print(u"global\tovergeneration\t{0:.4f}".format(overgeneration(d)))
print(u"global\tsubstitution\t{0:.4f}".format(substitution(d)))
import os.path
import sem
_subparsers = sem.argument_subparsers
parser = _subparsers.add_parser(os.path.splitext(os.path.basename(__file__))[0], description="Get F1-score for tagging using the IOB scheme.")
parser.add_argument("infile",
help="The input file (CoNLL format)")
parser.add_argument("-r", "--reference-column", dest="reference_column", type=int, default=-2,
help="Column for reference output (default: %(default)s)")
parser.add_argument("-t", "--tagging-column", dest="tagging_column", type=int, default=-1,
help="Column for CRF output (default: %(default)s)")
parser.add_argument("-f", "--format", dest="input_format", default="conll",
help="The input format (default: %(default)s)")
parser.add_argument("-a", "--annotation-name", dest="annotation_name",
help="The annotation name, useful for some formats, like SEM.")
parser.add_argument("-c", "--reference-file", dest="reference_file",
help="The comparing file")
parser.add_argument("--input-encoding", dest="ienc",
help="Encoding of the input (default: utf-8)")
parser.add_argument("--output-encoding", dest="oenc",
help="Encoding of the input (default: utf-8)")
parser.add_argument("-e", "--encoding", dest="enc", default="utf-8",
help="Encoding of both the input and the output (default: utf-8)")
parser.add_argument("-d", "--dump", dest="dump", default=os.devnull,
help="File where to dump errors (default: redirect to devnull)")
parser.add_argument("-s", "--context-size", dest="context_size", type=int, default=30,
help="context size (default: %(default)s)")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="Writes feedback during process (default: no output)")
|
mit
| 4,337,289,300,954,572,000 | 43.923858 | 158 | 0.588305 | false |
pothosware/SoapyBladeRF
|
self_test.py
|
1
|
1972
|
import SoapySDR
from SoapySDR import * #SOAPY_SDR_* constants
import numpy as np
if __name__ == "__main__":
bladerf = SoapySDR.Device(dict(driver="bladerf"))
print bladerf
for i in range(5):
print(" Make rx stream #%d"%i)
rxStream = bladerf.setupStream(SOAPY_SDR_RX, SOAPY_SDR_CF32, [0])
for j in range(5):
lastTimeNs = 0
numSampsTotal = 10000
print(" Activate, get %d samples, Deactivate #%d"%(numSampsTotal, j))
bladerf.activateStream(rxStream, SOAPY_SDR_END_BURST, 0, numSampsTotal)
buff = np.array([0]*1024, np.complex64)
while numSampsTotal != 0:
sr = bladerf.readStream(rxStream, [buff], buff.size)
assert(sr.ret > 0)
numSampsTotal -= sr.ret
if not (sr.timeNs > lastTimeNs):
print("Fail %s, %d"%(sr, numSampsTotal))
assert(sr.timeNs > lastTimeNs)
lastTimeNs = sr.timeNs
bladerf.deactivateStream(rxStream)
bladerf.closeStream(rxStream)
for i in range(5):
print(" Make tx stream #%d"%i)
txStream = bladerf.setupStream(SOAPY_SDR_TX, SOAPY_SDR_CF32, [0])
for j in range(5):
numSampsTotal = 10000
print(" Activate, send %d samples, Deactivate #%d"%(numSampsTotal, j))
bladerf.activateStream(txStream)
buff = np.array([0]*1024, np.complex64)
while numSampsTotal != 0:
size = min(buff.size, numSampsTotal)
flags = 0
#if size == numSampsTotal: flags |= SOAPY_SDR_END_BURST
sr = bladerf.writeStream(txStream, [buff], size, flags)
if not (sr.ret > 0): print("Fail %s, %d"%(sr, numSampsTotal))
assert(sr.ret > 0)
numSampsTotal -= sr.ret
bladerf.deactivateStream(txStream)
bladerf.closeStream(txStream)
|
lgpl-2.1
| 2,158,470,047,602,105,000 | 41.869565 | 85 | 0.552738 | false |
madhusudancs/pytask
|
pytask/taskapp/admin.py
|
1
|
1277
|
#!/usr/bin/env python
#
# Copyright 2011 Authors of PyTask.
#
# This file is part of PyTask.
#
# PyTask is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTask is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyTask. If not, see <http://www.gnu.org/licenses/>.
__authors__ = [
'"Madhusudan.C.S" <[email protected]>',
'"Nishanth Amuluru" <[email protected]>',
]
from django.contrib import admin
from pytask.taskapp.models import Task, TaskComment, TaskClaim,\
WorkReport, ReportComment, PyntRequest,\
TextBook
admin.site.register(Task)
admin.site.register(TaskComment)
admin.site.register(TextBook)
admin.site.register(WorkReport)
admin.site.register(TaskClaim)
admin.site.register(ReportComment)
admin.site.register(PyntRequest)
|
agpl-3.0
| -8,011,213,779,084,535,000 | 31.74359 | 74 | 0.720439 | false |
escattone/kuma
|
kuma/core/email_utils.py
|
1
|
2082
|
import logging
from functools import wraps
from django.conf import settings
from django.template.loader import render_to_string
from django.test import RequestFactory
from django.utils import translation
log = logging.getLogger("kuma.core.email")
def safe_translation(f):
"""Call `f` which has first argument `locale`. If `f` raises an
exception indicative of a bad localization of a string, try again in
`settings.WIKI_DEFAULT_LANGUAGE`.
NB: This means `f` will be called up to two times!
"""
@wraps(f)
def wrapper(locale, *args, **kwargs):
try:
with translation.override(locale):
return f(locale, *args, **kwargs)
except (TypeError, KeyError, ValueError, IndexError) as e:
# Types of errors, and examples.
#
# TypeError: Not enough arguments for string
# '%s %s %s' % ('foo', 'bar')
# KeyError: Bad variable name
# '%(Foo)s' % {'foo': 10} or '{Foo}'.format(foo=10')
# ValueError: Incomplete Format, or bad format string.
# '%(foo)a' or '%(foo)' or '{foo'
# IndexError: Not enough arguments for .format() style string.
# '{0} {1}'.format(42)
log.error('Bad translation in locale "%s": %s', locale, e)
with translation.override(settings.WIKI_DEFAULT_LANGUAGE):
return f(settings.WIKI_DEFAULT_LANGUAGE, *args, **kwargs)
return wrapper
def render_email(template, context):
"""Renders a template in the currently set locale.
Falls back to WIKI_DEFAULT_LANGUAGE in case of error.
"""
@safe_translation
def _render(locale):
"""Render an email in the given locale.
Because of safe_translation decorator, if this fails,
the function will be run again in English.
"""
req = RequestFactory().get("/")
req.META = {}
req.LANGUAGE_CODE = locale
return render_to_string(template, context, request=req)
return _render(translation.get_language())
|
mpl-2.0
| -5,253,210,990,913,891,000 | 31.53125 | 74 | 0.611912 | false |
nadapapa/raspberry-scripts
|
dropboxUploader.py
|
1
|
1755
|
#!/usr/bin/env python3
import dropbox
import os
import configparser
script_path = "/home/pi/raspberry-scripts/"
config = configparser.ConfigParser()
config.read(script_path + "config.ini")
app_key = config["DROPBOX"]["app_key"]
app_secret = config["DROPBOX"]["app_secret"]
def dropboxAuth():
accessTokenFileOverwrite = open(script_path + "accessToken.txt", "w+")
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(app_key, app_secret)
# Have the user sign in and authorize this token
authorize_url = flow.start()
print('1. Go to: ' + authorize_url)
print('2. Click "Allow" (you might have to log in first)')
print('3. Copy the authorization code.')
code = input("Enter the authorization code here: ").strip()
try:
# This will fail if the user enters an invalid authorization code
access_token, user_id = flow.finish(code)
accessTokenFileOverwrite.write(access_token)
except:
print("failed authorization, restart")
accessTokenFileOverwrite.close()
os.remove(script_path + "accessToken.txt")
accessTokenFileOverwrite.close()
def dropboxUpload(fileToUpload):
if not os.path.isfile(script_path + "accessToken.txt"):
dropboxAuth()
# get access token from file
accessTokenFileRead = open(script_path + "accessToken.txt", "r")
access_token = accessTokenFileRead.read().rstrip()
accessTokenFileRead.close()
# make client
client = dropbox.client.DropboxClient(access_token)
# upload file
fileToUploadObject = open(fileToUpload, "rb")
response = client.put_file(fileToUpload, fileToUploadObject)
fileToUploadObject.close()
if __name__ == "__main__":
import sys
sys.exit(dropboxUpload(sys.argv[1]))
|
mit
| 4,767,238,827,218,026,000 | 29.258621 | 74 | 0.689459 | false |
praekelt/diamondash
|
diamondash/widgets/widget/tests/test_widget.py
|
1
|
1879
|
from twisted.trial import unittest
from diamondash.widgets.widget import WidgetConfig
from diamondash.config import ConfigError
class WidgetConfigTestCase(unittest.TestCase):
def test_parsing(self):
config = WidgetConfig({
'name': u'Test Widget',
'title': 'Test Widget',
'width': 4
})
self.assertEqual(config['name'], 'test-widget')
self.assertEqual(config['title'], 'Test Widget')
self.assertEqual(config['width'], 4)
def test_parsing_for_no_name(self):
"""
Should raise an exception when given widget dashboard config without a
name key.
"""
self.assertRaises(ConfigError, WidgetConfig, {})
def test_parsing_for_no_title(self):
"""
Should set the widget title to the name passed into the config if no
title is in the config.
"""
config = {'name': u'Test Widget'}
parsed_config = WidgetConfig(config)
self.assertEqual(parsed_config['title'], 'Test Widget')
def test_parsing_for_no_width(self):
"""
Should set the widget width to the minimum column span if no width
value is given.
"""
config = WidgetConfig({
'name': u'Test Widget',
'title': u'Test Widget'
})
self.assertEqual(config['width'], 3)
def test_parse_width(self):
"""
Should clamp the passed in width value between 1 and the maximum
widget column span.
"""
self.assertEqual(WidgetConfig.parse_width(0), 3)
self.assertEqual(WidgetConfig.parse_width(3), 3)
self.assertEqual(WidgetConfig.parse_width(4), 4)
self.assertEqual(WidgetConfig.parse_width(11), 11)
self.assertEqual(WidgetConfig.parse_width(12), 12)
self.assertEqual(WidgetConfig.parse_width(13), 12)
|
bsd-3-clause
| -6,620,913,646,141,220,000 | 32.553571 | 78 | 0.612028 | false |
pebble/spacel-provision
|
src/test/provision/app/test_base_decorator.py
|
1
|
1033
|
import json
from test import BaseSpaceAppTest
from test.provision import normalize_cf
class BaseTemplateDecoratorTest(BaseSpaceAppTest):
def setUp(self):
super(BaseTemplateDecoratorTest, self).setUp()
self.user_data_params = []
self.resources = {
'Lc': {
'Properties': {
'UserData': {
'Fn::Base64': {
'Fn::Join': [
'', self.user_data_params
]
}
}
}
}
}
self.parameters = {}
self.template = {
'Parameters': self.parameters,
'Resources': self.resources
}
def _user_data(self):
user_data_params = normalize_cf(self.user_data_params)
user_data_text = ''.join(user_data_params)
try:
return json.loads(user_data_text)
except ValueError:
return user_data_text
|
mit
| 5,880,192,451,664,297,000 | 27.694444 | 62 | 0.46273 | false |
flyingdisc/harmonic-stochastic-sound-transformation
|
hpsTransformations_function2.py
|
1
|
6947
|
"""
This is modified version of sms-tools' hpsTransformations_function.py
Original code: https://github.com/MTG/sms-tools
Original code by Xavier Serra et.al.
This file should be placed inside sms-tools/software/transformations-interface/
"""
# function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1):
"""
Analyze a sound with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; mYst: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic plus stochastic model of the whole sound
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
# synthesize the harmonic plus stochastic model without original phases
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, np.array([]), mYst, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav'
UF.wavwrite(y,fs, outputFile)
"""
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot spectrogram stochastic compoment
plt.subplot(3,1,2)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
"""
return inputFile, fs, hfreq, hmag, mYst
def transformation_synthesis(inputFile, fs, hfreq, hmag, mYst, freqScaling = np.array([0, 1.2, 2.01, 1.2, 2.679, .7, 3.146, .7]),
freqStretching = np.array([0, 1, 2.01, 1, 2.679, 1.5, 3.146, 1.5]), timbrePreservation = 1,
timeScaling = np.array([0, 0, 2.138, 2.138-1.0, 3.146, 3.146])):
"""
transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file
fs: sampling rate of input file
hfreq, hmag: harmonic frequencies and magnitudes
mYst: stochastic residual
freqScaling: frequency scaling factors, in time-value pairs (value of 1 no scaling)
freqStretching: frequency stretching factors, in time-value pairs (value of 1 no stretching)
timbrePreservation: 1 preserves original timbre, 0 it does not
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the harmonics
hfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
# time scaling the sound
yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreqt, hmagt, mYst, timeScaling)
# synthesis from the trasformed hps representation
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
return y
"""
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModelTransformation.wav'
UF.wavwrite(y,fs, outputFile)
"""
"""
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
"""
if __name__ == "__main__":
# analysis
inputFile, fs, hfreq, hmag, mYst = analysis()
# transformation and synthesis
transformation_synthesis(inputFile, fs, hfreq, hmag, mYst)
plt.show(block=False)
|
mit
| 9,086,991,966,123,679,000 | 33.735 | 147 | 0.700014 | false |
schrockntemp/graphscaletemp
|
test/test_graphql_grapple.py
|
1
|
5739
|
import pytest
from graphscale.grapple.grapple_parser import parse_grapple, print_graphql_defs
def test_basic_type():
graphql = """type Test { name: String }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'name': GraphQLField(type=GraphQLString),
},
)
"""
def test_non_pythonic_name():
graphql = """type Test { longName: String }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'longName': GraphQLField(
type=GraphQLString,
resolver=lambda obj, args, *_: obj.long_name(*args),
),
},
)
"""
def test_nonnullable_type():
graphql = """type Test { name: String! }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'name': GraphQLField(type=req(GraphQLString)),
},
)
"""
def test_list_type():
graphql = """type Test { names: [String] }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'names': GraphQLField(type=list_of(GraphQLString)),
},
)
"""
def test_list_of_reqs():
graphql = """type Test { names: [String!] }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'names': GraphQLField(type=list_of(req(GraphQLString))),
},
)
"""
def test_req_list():
graphql = """type Test { names: [String]! }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'names': GraphQLField(type=req(list_of(GraphQLString))),
},
)
"""
def test_req_list_of_reqs():
graphql = """type Test { names: [String!]! }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'names': GraphQLField(type=req(list_of(req(GraphQLString)))),
},
)
"""
def test_double_list():
graphql = """type Test { matrix: [[String]] }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'matrix': GraphQLField(type=list_of(list_of(GraphQLString))),
},
)
"""
def test_ref_to_self():
graphql = """type Test { other: Test }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'other': GraphQLField(type=GraphQLTest.type()),
},
)
"""
def test_args():
graphql = """type Test { relatives(skip: Int, take: Int) : [Test] }"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLTest(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Test',
fields=lambda: {
'relatives': GraphQLField(
type=list_of(GraphQLTest.type()),
args={
'skip': GraphQLArgument(type=GraphQLInt),
'take': GraphQLArgument(type=GraphQLInt),
},
),
},
)
"""
def test_enum():
graphql = """
type Hospital {
status: HospitalStatus
reqStatus: HospitalStatus!
}
enum HospitalStatus {
AS_SUBMITTED
}
"""
result = print_graphql_defs(parse_grapple(graphql))
assert result == """class GraphQLHospital(GrappleType):
@staticmethod
def create_type():
return GraphQLObjectType(
name='Hospital',
fields=lambda: {
'status': GraphQLField(
type=GraphQLHospitalStatus.type(),
resolver=lambda obj, args, *_: obj.status(*args).name if obj.status(*args) else None,
),
'reqStatus': GraphQLField(
type=req(GraphQLHospitalStatus.type()),
resolver=lambda obj, args, *_: obj.req_status(*args).name if obj.req_status(*args) else None,
),
},
)
class GraphQLHospitalStatus(GrappleType):
@staticmethod
def create_type():
return GraphQLEnumType(
name='HospitalStatus',
values={
'AS_SUBMITTED': GraphQLEnumValue(),
},
)
"""
|
mit
| 1,272,046,684,306,629,400 | 29.205263 | 113 | 0.546959 | false |
nschloe/meshio
|
tests/test_abaqus.py
|
1
|
2388
|
import pathlib
import tempfile
import numpy as np
import pytest
import meshio
from . import helpers
@pytest.mark.parametrize(
"mesh",
[
helpers.empty_mesh,
helpers.tri_mesh,
helpers.triangle6_mesh,
helpers.quad_mesh,
helpers.quad8_mesh,
helpers.tri_quad_mesh,
helpers.tet_mesh,
helpers.tet10_mesh,
helpers.hex_mesh,
helpers.hex20_mesh,
],
)
def test(mesh):
def writer(*args, **kwargs):
return meshio.abaqus.write(*args, **kwargs)
helpers.write_read(writer, meshio.abaqus.read, mesh, 1.0e-15)
@pytest.mark.parametrize(
"filename, ref_sum, ref_num_cells, ref_num_cell_sets",
[
("UUea.inp", 4950.0, 50, 10),
("nle1xf3c.inp", 32.215275528, 12, 3),
("element_elset.inp", 6.0, 2, 3),
("wInclude_main.inp", 1.5, 2, 0),
],
)
def test_reference_file(filename, ref_sum, ref_num_cells, ref_num_cell_sets):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "abaqus" / filename
mesh = meshio.read(filename)
assert np.isclose(np.sum(mesh.points), ref_sum)
assert sum([len(cells.data) for cells in mesh.cells]) == ref_num_cells
assert len(mesh.cell_sets) == ref_num_cell_sets
def test_elset():
points = np.array(
[[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 0.5, 0.0], [0.0, 0.5, 0.0]]
)
cells = [
("triangle", np.array([[0, 1, 2]])),
("triangle", np.array([[0, 1, 3]])),
]
cell_sets = {
"right": [np.array([0]), np.array([])],
"left": [np.array([]), np.array([1])],
}
mesh_ref = meshio.Mesh(points, cells, cell_sets=cell_sets)
with tempfile.TemporaryDirectory() as temp_dir:
filepath = pathlib.Path(temp_dir) / "test.inp"
meshio.abaqus.write(filepath, mesh_ref)
mesh = meshio.abaqus.read(filepath)
assert np.allclose(mesh_ref.points, mesh.points)
assert len(mesh_ref.cells) == len(mesh.cells)
for ic, cell in enumerate(mesh_ref.cells):
assert cell.type == mesh.cells[ic].type
assert np.allclose(cell.data, mesh.cells[ic].data)
assert sorted(mesh_ref.cell_sets.keys()) == sorted(mesh.cell_sets.keys())
for k, v in mesh_ref.cell_sets.items():
for ic in range(len(mesh_ref.cells)):
assert np.allclose(v[ic], mesh.cell_sets[k][ic])
|
mit
| 6,534,547,558,376,211,000 | 27.771084 | 77 | 0.592546 | false |
daniel-de-vries/OpenLEGO
|
openlego/docs/_utils/docutil.py
|
1
|
25945
|
"""
A collection of functions for modifying source code that is embeded into the Sphinx documentation.
"""
import sys
import os
import re
import tokenize
import importlib
import inspect
import sqlite3
import subprocess
import tempfile
import numpy as np
from six import StringIO, PY3
from six.moves import cStringIO as cStringIO
from redbaron import RedBaron
sqlite_file = 'feature_docs_unit_test_db.sqlite' # name of the sqlite database file
table_name = 'feature_unit_tests' # name of the table to be queried
def remove_docstrings(source):
"""
Return 'source' minus docstrings.
Parameters
----------
source : str
Original source code.
Returns
-------
str
Source with docstrings removed.
"""
io_obj = StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
# ltext = tok[4] # in original code but not used here
# The following two conditionals preserve indentation.
# This is necessary because we're not using tokenize.untokenize()
# (because it spits out code with copious amounts of oddly-placed
# whitespace).
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# This series of conditionals removes docstrings:
if token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out
def remove_redbaron_node(node, index):
"""
Utility function for removing a node using RedBaron.
RedBaron has some problems with modifying code lines that run across
multiple lines. ( It is mentioned somewhere online but cannot seem to
find it now. )
RedBaron throws an Exception but when you check, it seems like it does
what you asked it to do. So, for now, we ignore the Exception.
"""
try:
node.value.remove(node.value[index])
except Exception as e: # no choice but to catch the general Exception
if str(e).startswith('It appears that you have indentation in your CommaList'):
pass
else:
raise
def replace_asserts_with_prints(source_code):
"""
Replace asserts with print statements.
Using RedBaron, replace some assert calls with print statements that print the actual
value given in the asserts.
Depending on the calls, the actual value can be the first or second
argument.
"""
rb = RedBaron(source_code) # convert to RedBaron internal structure
for assert_type in ['assertAlmostEqual', 'assertLess', 'assertGreater', 'assertEqual',
'assertEqualArrays', 'assertTrue', 'assertFalse']:
assert_nodes = rb.findAll("NameNode", value=assert_type)
for assert_node in assert_nodes:
assert_node = assert_node.parent
remove_redbaron_node(assert_node, 0) # remove 'self' from the call
assert_node.value[0].replace('print')
if assert_type not in ['assertTrue', 'assertFalse']:
remove_redbaron_node(assert_node.value[1], 1) # remove the expected value argument
assert_nodes = rb.findAll("NameNode", value='assert_rel_error')
for assert_node in assert_nodes:
assert_node = assert_node.parent
# If relative error tolerance is specified, there are 4 arguments
if len(assert_node.value[1]) == 4:
remove_redbaron_node(assert_node.value[1], -1) # remove the relative error tolerance
remove_redbaron_node(assert_node.value[1], -1) # remove the expected value
remove_redbaron_node(assert_node.value[1], 0) # remove the first argument which is
# the TestCase
assert_node.value[0].replace("print")
assert_nodes = rb.findAll("NameNode", value='assert_almost_equal')
for assert_node in assert_nodes:
assert_node = assert_node.parent
# If relative error tolerance is specified, there are 3 arguments
if len(assert_node.value[1]) == 3:
remove_redbaron_node(assert_node.value[1], -1) # remove the relative error tolerance
remove_redbaron_node(assert_node.value[1], -1) # remove the expected value
assert_node.value[0].replace("print")
source_code_with_prints = rb.dumps() # get back the string representation of the code
return source_code_with_prints
def get_method_body(method_code):
'''Using the RedBaron module, get the body of a method.
Do not want the definition signature line
'''
method_code = '\n' + method_code # For some reason RedBaron has problems with this if
# if it does not start with an empty line
rb = RedBaron(method_code)
def_node = rb.findAll("DefNode")[0] # Look for the 'def' node. Should only be one!
def_node.value.decrease_indentation(8) # so that the code is all the way to the left
return def_node.value.dumps()
def remove_initial_empty_lines_from_source(source):
"""
Some initial empty lines were added to keep RedBaron happy.
Need to strip these out before we pass the source code to the
directive for including source code into feature doc files.
"""
idx = re.search(r'\S', source, re.MULTILINE).start()
return source[idx:]
def get_source_code_of_class_or_method(class_or_method_path, remove_docstring=True):
"""
Return source code as a text string.
Parameters
----------
class_or_method_path : str
Package path to the class or function.
remove_docstring : bool
Set to False to keep docstrings in the text.
"""
# First, assume module path since we want to support loading a full module as well.
try:
module = importlib.import_module(class_or_method_path)
source = inspect.getsource(module)
except ImportError:
# Second, assume class and see if it works
try:
module_path = '.'.join(class_or_method_path.split('.')[:-1])
module_with_class = importlib.import_module(module_path)
class_name = class_or_method_path.split('.')[-1]
cls = getattr(module_with_class, class_name)
source = inspect.getsource(cls)
except ImportError:
# else assume it is a path to a method
module_path = '.'.join(class_or_method_path.split('.')[:-2])
module_with_method = importlib.import_module(module_path)
class_name = class_or_method_path.split('.')[-2]
method_name = class_or_method_path.split('.')[-1]
cls = getattr(module_with_method, class_name)
meth = getattr(cls, method_name)
source = inspect.getsource(meth)
# Remove docstring from source code
if remove_docstring:
source = remove_docstrings(source)
return source
sqlite_file = 'feature_docs_unit_test_db.sqlite' # name of the sqlite database file
table_name = 'feature_unit_tests' # name of the table to be queried
def get_test_source_code_for_feature(feature_name):
'''The function to be called from the custom Sphinx directive code
that includes relevant unit test code(s).
It gets the test source from the unit tests that have been
marked to indicate that they are associated with the "feature_name"'''
# get the:
#
# 1. title of the test
# 2. test source code
# 3. output of running the test
#
# from from the database that was created during an earlier
# phase of the doc build process using the
# devtools/create_feature_docs_unit_test_db.py script
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
cur.execute('SELECT title, unit_test_source, run_outputs FROM {tn} WHERE feature="{fn}"'.
format(tn=table_name, fn=feature_name))
all_rows = cur.fetchall()
conn.close()
test_source_code_for_feature = []
# Loop through all the unit tests that are relevant to this feature name
for title, unit_test_source, run_outputs in all_rows:
# add to the list that will be returned
test_source_code_for_feature.append((title, unit_test_source, run_outputs))
return test_source_code_for_feature
def remove_raise_skip_tests(source):
"""
Remove from the code any raise unittest.SkipTest lines since we don't want those in
what the user sees.
"""
rb = RedBaron(source)
raise_nodes = rb.findAll("RaiseNode")
for rn in raise_nodes:
# only the raise for SkipTest
if rn.value[:2].dumps() == 'unittestSkipTest':
rn.parent.value.remove(rn)
return rb.dumps()
def remove_leading_trailing_whitespace_lines(src):
"""
Remove any trailing whitespace lines.
Parameters
----------
src : str
Input code.
Returns
-------
str
Code with trailing whitespace lines removed.
"""
lines = src.splitlines()
non_whitespace_lines = []
for i, l in enumerate( lines ):
if l and not l.isspace():
non_whitespace_lines.append(i)
imin = min(non_whitespace_lines)
imax = max(non_whitespace_lines)
return '\n'.join(lines[imin: imax+1])
def split_source_into_input_blocks(src):
"""
Split source into blocks; the splits occur at prints.
Parameters
----------
src : str
Input code.
Returns
-------
list
List of input code sections.
"""
rb = RedBaron(src)
in_code_blocks = []
in_code_block = []
# group code until the first print, then repeat
for r in rb:
line = r.dumps()
if not line.endswith('\n'):
line += '\n'
in_code_block.append(line)
if r.type == 'print' or \
( len(r.value) == 3 and \
(r.type, r.value[0].type, r.value[1].type, r.value[2].type) == \
('atomtrailers', 'name', 'name', 'call') and \
r.value[1].value in ['run_model', 'run_driver', 'setup'] ):
# stop and make an input code block
in_code_block = ''.join(in_code_block)
in_code_block = remove_leading_trailing_whitespace_lines(in_code_block)
in_code_blocks.append(in_code_block)
in_code_block = []
if in_code_block: # If anything left over
in_code_blocks.append(''.join(in_code_block))
return in_code_blocks
def insert_output_start_stop_indicators(src):
"""
Insert identifier strings so that output can be segregated from input.
Parameters
----------
src : str
String containing input and output lines.
Returns
-------
str
String with output demarked.
"""
rb = RedBaron(src)
src_with_out_start_stop_indicators = []
input_block_number = 0
for r in rb:
line = r.dumps()
# not sure why some lines from RedBaron do not have newlines
if not line.endswith('\n'):
line += '\n'
if r.type == 'print':
# src_with_out_start_stop_indicators += 'print("<<<<<{}")'.format(input_block_number) + '\n'
src_with_out_start_stop_indicators.append(line)
src_with_out_start_stop_indicators.append('print(">>>>>{}")\n'.format(input_block_number))
elif len(r.value) == 3 and \
(r.type, r.value[0].type, r.value[1].type, r.value[2].type) == \
('atomtrailers', 'name', 'name', 'call') and \
r.value[1].value in ['run_model', 'run_driver', 'setup']:
src_with_out_start_stop_indicators.append(line)
src_with_out_start_stop_indicators.append('print(">>>>>{}")\n'.format(input_block_number))
else:
src_with_out_start_stop_indicators.append(line)
input_block_number += 1
return ''.join(src_with_out_start_stop_indicators)
def clean_up_empty_output_blocks(input_blocks, output_blocks):
"""Some of the blocks do not generate output. We only want to have
input blocks that have outputs.
"""
new_input_blocks = []
new_output_blocks = []
current_in_block = ''
for in_block, out_block in zip(input_blocks, output_blocks):
if current_in_block and not current_in_block.endswith('\n'):
current_in_block += '\n'
current_in_block += in_block
if out_block:
new_input_blocks.append(current_in_block)
new_output_blocks.append(out_block)
current_in_block = ''
# if there was no output, return the one input block and empty output block
if current_in_block:
new_input_blocks.append(current_in_block)
new_output_blocks.append('')
return new_input_blocks, new_output_blocks
def extract_output_blocks(run_output):
"""
Identify and extract outputs from source.
Parameters
----------
run_output : str
Source code with outputs.
Returns
-------
list of str
List containing output text blocks.
"""
output_blocks = []
# Look for start and end lines that look like this:
# <<<<<4
# >>>>>4
output_block = []
for line in run_output.splitlines():
if line.startswith('>>>>>'):
output_blocks.append('\n'.join(output_block))
output_block = []
else:
output_block.append(line)
if output_block:
output_blocks.append('\n'.join(output_block))
return output_blocks
def globals_for_imports(src):
"""
Generate text that creates a global for each imported class, method, or module.
It appears that sphinx royally screws up something in python, so that when exec-ing
code with imports, they aren't always available inside of classes or methods. This
can be solved by issuing a global for each class, method, or module.
Parameters
----------
src : str
Source code to be tested.
Returns
-------
str
New code string with global statements
"""
# HACK: A test had problems loading this specific user-defined class under exec+sphinx, so
# hacking it in.
new_txt = ['from __future__ import print_function',
'global Sub',
'global ImplSimple']
continuation = False
for line in src.split('\n'):
if continuation or 'import ' in line:
if continuation:
tail = line
elif ' as ' in line:
tail = line.split(' as ')[1]
else:
tail = line.split('import ')[1]
if ', \\' in tail:
continuation = True
tail = tail.replace(', \\', '')
else:
continuation = False
modules = tail.split(',')
for module in modules:
new_txt.append('global %s' % module.strip())
return '\n'.join(new_txt)
def get_test_src(method_path):
"""
Return desired source code for a single feature after testing it.
Used by embed_test.
1. Get the source code for a unit test method
2. Replace the asserts with prints -> source_minus_docstrings_with_prints_cleaned
3. Split source_minus_docstrings_with_prints_cleaned up into groups of "In" blocks -> input_blocks
4. Insert extra print statements into source_minus_docstrings_with_prints_cleaned
to indicate start and end of print Out blocks -> source_with_output_start_stop_indicators
5. Run the test using source_with_out_start_stop_indicators -> run_outputs
6. Extract from run_outputs, the Out blocks -> output_blocks
7. Return source_minus_docstrings_with_prints_cleaned, input_blocks, output_blocks, skipped, failed
Parameters
----------
method_path : str
Module hiearchy path to the test.
Returns
-------
str
Cleaned source code, ready for inclusion in doc.
str
Reason that the test failed or was skipped.
list of str
List of input code blocks
list of str
List of Python output blocks
bool
True if test was skipped
bool
True if test failed
"""
#----------------------------------------------------------
# 1. Get the source code for a unit test method.
#----------------------------------------------------------
module_path = '.'.join(method_path.split('.')[:-2])
class_name = method_path.split('.')[-2]
method_name = method_path.split('.')[-1]
test_module = importlib.import_module(module_path)
cls = getattr(test_module, class_name)
try:
import mpi4py
except ImportError:
use_mpi = False
else:
N_PROCS = getattr(cls, 'N_PROCS', 1)
use_mpi = N_PROCS > 1
meth = getattr(cls, method_name)
class_source_code = inspect.getsource(cls)
# Directly manipulating function text to strip header and remove leading whitespace.
# Should be faster than redbaron
method_source_code = inspect.getsource(meth)
meth_lines = method_source_code.split('\n')
counter = 0
past_header = False
new_lines = []
for line in meth_lines:
if not past_header:
n1 = len(line)
newline = line.lstrip()
n2 = len(newline)
tab = n1-n2
if counter == 0:
first_len = tab
elif n1 == 0:
continue
if tab == first_len:
counter += 1
newline = line[tab:]
else:
past_header = True
else:
newline = line[tab:]
# exclude 'global' directives, not needed the way we are running things
if not newline.startswith("global "):
new_lines.append(newline)
method_source = '\n'.join(new_lines[counter:])
# Remove docstring from source code
source_minus_docstrings = remove_docstrings(method_source)
#-----------------------------------------------------------------------------------
# 2. Replace the asserts with prints -> source_minus_docstrings_with_prints_cleaned
#-----------------------------------------------------------------------------------
# Replace some of the asserts with prints of the actual values
# This calls RedBaron
source_minus_docstrings_with_prints = replace_asserts_with_prints(source_minus_docstrings)
# remove raise SkipTest lines
# We decided to leave them in for now
# source_minus_docstrings_with_prints = remove_raise_skip_tests(source_minus_docstrings_with_prints)
# Remove the initial empty lines
source_minus_docstrings_with_prints_cleaned = remove_initial_empty_lines_from_source(
source_minus_docstrings_with_prints)
#-----------------------------------------------------------------------------------
# 4. Insert extra print statements into source_minus_docstrings_with_prints_cleaned
# to indicate start and end of print Out blocks -> source_with_output_start_stop_indicators
#-----------------------------------------------------------------------------------
source_with_output_start_stop_indicators = insert_output_start_stop_indicators( source_minus_docstrings_with_prints_cleaned )
#-----------------------------------------------------------------------------------
# 5. Run the test using source_with_out_start_stop_indicators -> run_outputs
#-----------------------------------------------------------------------------------
# Get all the pieces of code needed to run the unit test method
global_imports = globals_for_imports(method_source)
teardown_source_code = get_method_body(inspect.getsource(getattr(cls, 'tearDown')))
code_to_run = '\n'.join([global_imports,
source_with_output_start_stop_indicators,
teardown_source_code])
skipped = False
failed = False
try:
# Use Subprocess if we are under MPI.
if use_mpi:
# Write it to a file so we can run it.
fd, code_to_run_path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as tmp:
tmp.write(code_to_run)
tmp.close()
env = os.environ.copy()
env['USE_PROC_FILES'] = '1'
p = subprocess.Popen(['mpirun', '-n', str(N_PROCS), 'python', code_to_run_path],
env=env)
p.wait()
multi_out_blocks = []
for i in range(N_PROCS):
with open('%d.out' % i) as f:
multi_out_blocks.append(extract_output_blocks(f.read()))
os.remove('%d.out' % i)
output_blocks = []
for i in range(len(multi_out_blocks[0])):
output_blocks.append('\n'.join(["(rank %d) %s" % (j, m[i]) for j, m in enumerate(multi_out_blocks) if m[i]]))
# Just Exec the code for serial tests.
else:
stdout = sys.stdout
stderr = sys.stderr
strout = cStringIO()
sys.stdout = strout
sys.stderr = strout
#
# # Hacking, but trying to make sure we capture the check config messages.
# from openmdao.utils.logger_utils import _loggers, get_logger
# if 'check_config' not in _loggers:
# get_logger('check_config', use_format=True)
# if 'check_partials' not in _loggers:
# get_logger('check_partials')
# if 'check_totals' not in _loggers:
# get_logger('check_totals')
#
# _loggers['check_config']['logger'].handlers[0].stream = strout
# _loggers['check_partials']['logger'].handlers[0].stream = strout
# _loggers['check_totals']['logger'].handlers[0].stream = strout
# We need more precision from numpy
save_opts = np.get_printoptions()
np.set_printoptions(precision=8)
exec(code_to_run, {})
np.set_printoptions(precision=save_opts['precision'])
run_outputs = strout.getvalue()
except subprocess.CalledProcessError as e:
# Get a traceback.
if 'raise unittest.SkipTest' in e.output.decode('utf-8'):
reason_for_skip = e.output.splitlines()[-1][len('unittest.case.SkipTest: '):]
run_outputs = reason_for_skip
skipped = True
else:
run_outputs = "Running of embedded test {} in docs failed due to: \n\n{}".format(method_path, e.output.decode('utf-8'))
failed = True
except Exception as err:
if 'SkipTest' in code_to_run:
txt1 = code_to_run.split('SkipTest(')[1]
run_outputs = txt1.split(')')[0]
skipped = True
else:
msg = "Running of embedded test {} in docs failed due to: \n\n{}"
run_outputs = msg.format(method_path, str(err))
failed = True
finally:
if use_mpi:
os.remove(code_to_run_path)
else:
sys.stdout = stdout
sys.stderr = stderr
if PY3 and not use_mpi and not isinstance(run_outputs, str):
run_outputs = "".join(map(chr, run_outputs)) # in Python 3, run_outputs is of type bytes!
if not skipped and not failed:
#####################
### 3. Split source_minus_docstrings_with_prints_cleaned up into groups of "In" blocks -> input_blocks ###
#####################
input_blocks = split_source_into_input_blocks(source_minus_docstrings_with_prints_cleaned)
#####################
### 6. Extract from run_outputs, the Out blocks -> output_blocks ###
#####################
if not use_mpi:
output_blocks = extract_output_blocks(run_outputs)
# Need to deal with the cases when there is no outputblock for a given input block
# Merge an input block with the previous block and throw away the output block
input_blocks, output_blocks = clean_up_empty_output_blocks(input_blocks, output_blocks)
skipped_failed_output = None
else:
input_blocks = output_blocks = None
skipped_failed_output = run_outputs
return source_minus_docstrings_with_prints_cleaned, skipped_failed_output, input_blocks, output_blocks, skipped, failed
|
apache-2.0
| -1,197,391,333,084,861,000 | 35.08484 | 131 | 0.583928 | false |
jaredlunde/cargo-orm
|
unit_tests/fields/Double.py
|
1
|
1916
|
#!/usr/bin/python3 -S
# -*- coding: utf-8 -*-
from cargo.fields import Double
from unit_tests.fields.Numeric import TestNumeric, TestEncNumeric
from unit_tests import configure
class TestDouble(TestNumeric):
'''
value: value to populate the field with
not_null: bool() True if the field cannot be Null
primary: bool() True if this field is the primary key in your table
unique: bool() True if this field is a unique index in your table
index: bool() True if this field is a plain index in your table, that is,
not unique or primary
default: default value to set the field to
validation: callable() custom validation plugin, must return True if the
field validates, and False if it does not
decimal_places: int() maximum digit precision
'''
@property
def base(self):
return self.orm.float8
def test_init_(self):
base = Double()
self.assertEqual(base.value, base.empty)
self.assertIsNone(base.primary)
self.assertIsNone(base.unique)
self.assertIsNone(base.index)
self.assertIsNone(base.default)
self.assertIsNone(base.not_null)
self.assertEqual(base.minval, base.MINVAL)
self.assertEqual(base.maxval, base.MAXVAL)
self.assertEqual(base.decimal_places, 15)
def test_type_name(self):
self.assertEqual(self.base.type_name, 'double precision')
self.assertEqual(self.base_array.type_name, 'double precision[]')
class TestEncDouble(TestDouble, TestEncNumeric):
@property
def base(self):
return self.orm.enc_float8
def test_init(self):
pass
def test_type_name(self):
self.assertEqual(self.base.type_name, 'text')
self.assertEqual(self.base_array.type_name, 'text[]')
if __name__ == '__main__':
# Unit test
configure.run_tests(TestDouble, TestEncDouble, failfast=True, verbosity=2)
|
mit
| -1,712,506,100,909,295,600 | 31.474576 | 78 | 0.674322 | false |
ignamv/PlanarProcess
|
hole.py
|
1
|
1711
|
from matplotlib import pyplot as plt
import shapely.geometry
from geometry_helpers import *
gap = 0.00
original_coords = [ (0,0), (3,0), (3, 1.5-gap),
(2,1.5-gap), (2,1), (1,1), (1,2), (2.2,2), (2.2,1.5+gap),
(2.8,1.5+gap), (2.8,3), (0,3)]
original_segments = map(shapely.geometry.LineString, zip(original_coords[:-1],
original_coords[1:]))
# Split segments at every intersection
all_coords = []
for ii, segmentA in enumerate(original_segments):
all_coords.append(original_coords[ii])
for jj, coordB in reversed(list(enumerate(original_coords))):
if segmentA.contains(shapely.geometry.Point(coordB)) and \
coordB not in segmentA.coords:
# Split this segment
all_coords.append(coordB)
segmentA = shapely.geometry.LineString([coordB,
segmentA.coords[1]])
all_coords.append(segmentA.coords[1])
print all_coords
# Separate interior from exterior
processed = []
# Lists of coords left to process
pending = [(0, len(all_coords))]
while pending:
first, last = pending.pop()
current = []
ii = first
while ii < last:
coordA = all_coords[ii]
current.append(coordA)
for jj in range(last-1, ii, -1):
if coordA == all_coords[jj]:
pending.append((ii+1, jj-1))
ii = jj
break
ii += 1
processed.append(current)
print processed
#plot_geometryref(GeometryReference(holey))
from itertools import cycle
colors = cycle('rgbcmyk')
for p in processed:
plt.plot(*zip(*p), linestyle='solid', marker='x', color=next(colors))
#plt.plot(*zip(*exterior), linestyle='solid', marker='o', color='b')
plt.show()
|
gpl-3.0
| -7,490,323,570,187,449,000 | 32.54902 | 78 | 0.618936 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.